repo stringlengths 5 92 | file_url stringlengths 80 287 | file_path stringlengths 5 197 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:37:27 2026-01-04 17:58:21 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb | require 'concurrent/synchronization/safe_initialization'
require 'concurrent/utility/native_integer'
module Concurrent
# @!macro atomic_fixnum
# @!visibility private
# @!macro internal_implementation_note
class MutexAtomicFixnum
extend Concurrent::Synchronization::SafeInitialization
# @!macro atomic_fixnum_method_initialize
def initialize(initial = 0)
super()
@Lock = ::Mutex.new
ns_set(initial)
end
# @!macro atomic_fixnum_method_value_get
def value
synchronize { @value }
end
# @!macro atomic_fixnum_method_value_set
def value=(value)
synchronize { ns_set(value) }
end
# @!macro atomic_fixnum_method_increment
def increment(delta = 1)
synchronize { ns_set(@value + delta.to_i) }
end
alias_method :up, :increment
# @!macro atomic_fixnum_method_decrement
def decrement(delta = 1)
synchronize { ns_set(@value - delta.to_i) }
end
alias_method :down, :decrement
# @!macro atomic_fixnum_method_compare_and_set
def compare_and_set(expect, update)
synchronize do
if @value == expect.to_i
@value = update.to_i
true
else
false
end
end
end
# @!macro atomic_fixnum_method_update
def update
synchronize do
@value = yield @value
end
end
protected
# @!visibility private
def synchronize
if @Lock.owned?
yield
else
@Lock.synchronize { yield }
end
end
private
# @!visibility private
def ns_set(value)
Utility::NativeInteger.ensure_integer_and_bounds value
@value = value
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb | require 'concurrent/utility/native_extension_loader' # load native parts first
require 'concurrent/atomic/mutex_atomic_boolean'
module Concurrent
###################################################################
# @!macro atomic_boolean_method_initialize
#
# Creates a new `AtomicBoolean` with the given initial value.
#
# @param [Boolean] initial the initial value
# @!macro atomic_boolean_method_value_get
#
# Retrieves the current `Boolean` value.
#
# @return [Boolean] the current value
# @!macro atomic_boolean_method_value_set
#
# Explicitly sets the value.
#
# @param [Boolean] value the new value to be set
#
# @return [Boolean] the current value
# @!macro atomic_boolean_method_true_question
#
# Is the current value `true`
#
# @return [Boolean] true if the current value is `true`, else false
# @!macro atomic_boolean_method_false_question
#
# Is the current value `false`
#
# @return [Boolean] true if the current value is `false`, else false
# @!macro atomic_boolean_method_make_true
#
# Explicitly sets the value to true.
#
# @return [Boolean] true if value has changed, otherwise false
# @!macro atomic_boolean_method_make_false
#
# Explicitly sets the value to false.
#
# @return [Boolean] true if value has changed, otherwise false
###################################################################
# @!macro atomic_boolean_public_api
#
# @!method initialize(initial = false)
# @!macro atomic_boolean_method_initialize
#
# @!method value
# @!macro atomic_boolean_method_value_get
#
# @!method value=(value)
# @!macro atomic_boolean_method_value_set
#
# @!method true?
# @!macro atomic_boolean_method_true_question
#
# @!method false?
# @!macro atomic_boolean_method_false_question
#
# @!method make_true
# @!macro atomic_boolean_method_make_true
#
# @!method make_false
# @!macro atomic_boolean_method_make_false
###################################################################
# @!visibility private
# @!macro internal_implementation_note
AtomicBooleanImplementation = case
when Concurrent.on_cruby? && Concurrent.c_extensions_loaded?
CAtomicBoolean
when Concurrent.on_jruby?
JavaAtomicBoolean
else
MutexAtomicBoolean
end
private_constant :AtomicBooleanImplementation
# @!macro atomic_boolean
#
# A boolean value that can be updated atomically. Reads and writes to an atomic
# boolean and thread-safe and guaranteed to succeed. Reads and writes may block
# briefly but no explicit locking is required.
#
# @!macro thread_safe_variable_comparison
#
# Performance:
#
# ```
# Testing with ruby 2.1.2
# Testing with Concurrent::MutexAtomicBoolean...
# 2.790000 0.000000 2.790000 ( 2.791454)
# Testing with Concurrent::CAtomicBoolean...
# 0.740000 0.000000 0.740000 ( 0.740206)
#
# Testing with jruby 1.9.3
# Testing with Concurrent::MutexAtomicBoolean...
# 5.240000 2.520000 7.760000 ( 3.683000)
# Testing with Concurrent::JavaAtomicBoolean...
# 3.340000 0.010000 3.350000 ( 0.855000)
# ```
#
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicBoolean.html java.util.concurrent.atomic.AtomicBoolean
#
# @!macro atomic_boolean_public_api
class AtomicBoolean < AtomicBooleanImplementation
# @return [String] Short string representation.
def to_s
format '%s value:%s>', super[0..-2], value
end
alias_method :inspect, :to_s
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb | require 'concurrent/constants'
require_relative 'locals'
module Concurrent
# A `ThreadLocalVar` is a variable where the value is different for each thread.
# Each variable may have a default value, but when you modify the variable only
# the current thread will ever see that change.
#
# This is similar to Ruby's built-in thread-local variables (`Thread#thread_variable_get`),
# but with these major advantages:
# * `ThreadLocalVar` has its own identity, it doesn't need a Symbol.
# * Each Ruby's built-in thread-local variable leaks some memory forever (it's a Symbol held forever on the thread),
# so it's only OK to create a small amount of them.
# `ThreadLocalVar` has no such issue and it is fine to create many of them.
# * Ruby's built-in thread-local variables leak forever the value set on each thread (unless set to nil explicitly).
# `ThreadLocalVar` automatically removes the mapping for each thread once the `ThreadLocalVar` instance is GC'd.
#
# @!macro thread_safe_variable_comparison
#
# @example
# v = ThreadLocalVar.new(14)
# v.value #=> 14
# v.value = 2
# v.value #=> 2
#
# @example
# v = ThreadLocalVar.new(14)
#
# t1 = Thread.new do
# v.value #=> 14
# v.value = 1
# v.value #=> 1
# end
#
# t2 = Thread.new do
# v.value #=> 14
# v.value = 2
# v.value #=> 2
# end
#
# v.value #=> 14
class ThreadLocalVar
LOCALS = ThreadLocals.new
# Creates a thread local variable.
#
# @param [Object] default the default value when otherwise unset
# @param [Proc] default_block Optional block that gets called to obtain the
# default value for each thread
def initialize(default = nil, &default_block)
if default && block_given?
raise ArgumentError, "Cannot use both value and block as default value"
end
if block_given?
@default_block = default_block
@default = nil
else
@default_block = nil
@default = default
end
@index = LOCALS.next_index(self)
end
# Returns the value in the current thread's copy of this thread-local variable.
#
# @return [Object] the current value
def value
LOCALS.fetch(@index) { default }
end
# Sets the current thread's copy of this thread-local variable to the specified value.
#
# @param [Object] value the value to set
# @return [Object] the new value
def value=(value)
LOCALS.set(@index, value)
end
# Bind the given value to thread local storage during
# execution of the given block.
#
# @param [Object] value the value to bind
# @yield the operation to be performed with the bound variable
# @return [Object] the value
def bind(value)
if block_given?
old_value = self.value
self.value = value
begin
yield
ensure
self.value = old_value
end
end
end
protected
# @!visibility private
def default
if @default_block
self.value = @default_block.call
else
@default
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb | require 'concurrent/errors'
require 'concurrent/synchronization/object'
module Concurrent
# An atomic reference which maintains an object reference along with a mark bit
# that can be updated atomically.
#
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicMarkableReference.html
# java.util.concurrent.atomic.AtomicMarkableReference
class AtomicMarkableReference < ::Concurrent::Synchronization::Object
attr_atomic(:reference)
private :reference, :reference=, :swap_reference, :compare_and_set_reference, :update_reference
def initialize(value = nil, mark = false)
super()
self.reference = immutable_array(value, mark)
end
# Atomically sets the value and mark to the given updated value and
# mark given both:
# - the current value == the expected value &&
# - the current mark == the expected mark
#
# @param [Object] expected_val the expected value
# @param [Object] new_val the new value
# @param [Boolean] expected_mark the expected mark
# @param [Boolean] new_mark the new mark
#
# @return [Boolean] `true` if successful. A `false` return indicates
# that the actual value was not equal to the expected value or the
# actual mark was not equal to the expected mark
def compare_and_set(expected_val, new_val, expected_mark, new_mark)
# Memoize a valid reference to the current AtomicReference for
# later comparison.
current = reference
curr_val, curr_mark = current
# Ensure that that the expected marks match.
return false unless expected_mark == curr_mark
if expected_val.is_a? Numeric
# If the object is a numeric, we need to ensure we are comparing
# the numerical values
return false unless expected_val == curr_val
else
# Otherwise, we need to ensure we are comparing the object identity.
# Theoretically, this could be incorrect if a user monkey-patched
# `Object#equal?`, but they should know that they are playing with
# fire at that point.
return false unless expected_val.equal? curr_val
end
prospect = immutable_array(new_val, new_mark)
compare_and_set_reference current, prospect
end
alias_method :compare_and_swap, :compare_and_set
# Gets the current reference and marked values.
#
# @return [Array] the current reference and marked values
def get
reference
end
# Gets the current value of the reference
#
# @return [Object] the current value of the reference
def value
reference[0]
end
# Gets the current marked value
#
# @return [Boolean] the current marked value
def mark
reference[1]
end
alias_method :marked?, :mark
# _Unconditionally_ sets to the given value of both the reference and
# the mark.
#
# @param [Object] new_val the new value
# @param [Boolean] new_mark the new mark
#
# @return [Array] both the new value and the new mark
def set(new_val, new_mark)
self.reference = immutable_array(new_val, new_mark)
end
# Pass the current value and marked state to the given block, replacing it
# with the block's results. May retry if the value changes during the
# block's execution.
#
# @yield [Object] Calculate a new value and marked state for the atomic
# reference using given (old) value and (old) marked
# @yieldparam [Object] old_val the starting value of the atomic reference
# @yieldparam [Boolean] old_mark the starting state of marked
#
# @return [Array] the new value and new mark
def update
loop do
old_val, old_mark = reference
new_val, new_mark = yield old_val, old_mark
if compare_and_set old_val, new_val, old_mark, new_mark
return immutable_array(new_val, new_mark)
end
end
end
# Pass the current value to the given block, replacing it
# with the block's result. Raise an exception if the update
# fails.
#
# @yield [Object] Calculate a new value and marked state for the atomic
# reference using given (old) value and (old) marked
# @yieldparam [Object] old_val the starting value of the atomic reference
# @yieldparam [Boolean] old_mark the starting state of marked
#
# @return [Array] the new value and marked state
#
# @raise [Concurrent::ConcurrentUpdateError] if the update fails
def try_update!
old_val, old_mark = reference
new_val, new_mark = yield old_val, old_mark
unless compare_and_set old_val, new_val, old_mark, new_mark
fail ::Concurrent::ConcurrentUpdateError,
'AtomicMarkableReference: Update failed due to race condition.',
'Note: If you would like to guarantee an update, please use ' +
'the `AtomicMarkableReference#update` method.'
end
immutable_array(new_val, new_mark)
end
# Pass the current value to the given block, replacing it with the
# block's result. Simply return nil if update fails.
#
# @yield [Object] Calculate a new value and marked state for the atomic
# reference using given (old) value and (old) marked
# @yieldparam [Object] old_val the starting value of the atomic reference
# @yieldparam [Boolean] old_mark the starting state of marked
#
# @return [Array] the new value and marked state, or nil if
# the update failed
def try_update
old_val, old_mark = reference
new_val, new_mark = yield old_val, old_mark
return unless compare_and_set old_val, new_val, old_mark, new_mark
immutable_array(new_val, new_mark)
end
private
def immutable_array(*args)
args.freeze
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb | require 'concurrent/utility/native_extension_loader' # load native parts first
require 'concurrent/atomic/mutex_atomic_fixnum'
module Concurrent
###################################################################
# @!macro atomic_fixnum_method_initialize
#
# Creates a new `AtomicFixnum` with the given initial value.
#
# @param [Fixnum] initial the initial value
# @raise [ArgumentError] if the initial value is not a `Fixnum`
# @!macro atomic_fixnum_method_value_get
#
# Retrieves the current `Fixnum` value.
#
# @return [Fixnum] the current value
# @!macro atomic_fixnum_method_value_set
#
# Explicitly sets the value.
#
# @param [Fixnum] value the new value to be set
#
# @return [Fixnum] the current value
#
# @raise [ArgumentError] if the new value is not a `Fixnum`
# @!macro atomic_fixnum_method_increment
#
# Increases the current value by the given amount (defaults to 1).
#
# @param [Fixnum] delta the amount by which to increase the current value
#
# @return [Fixnum] the current value after incrementation
# @!macro atomic_fixnum_method_decrement
#
# Decreases the current value by the given amount (defaults to 1).
#
# @param [Fixnum] delta the amount by which to decrease the current value
#
# @return [Fixnum] the current value after decrementation
# @!macro atomic_fixnum_method_compare_and_set
#
# Atomically sets the value to the given updated value if the current
# value == the expected value.
#
# @param [Fixnum] expect the expected value
# @param [Fixnum] update the new value
#
# @return [Boolean] true if the value was updated else false
# @!macro atomic_fixnum_method_update
#
# Pass the current value to the given block, replacing it
# with the block's result. May retry if the value changes
# during the block's execution.
#
# @yield [Object] Calculate a new value for the atomic reference using
# given (old) value
# @yieldparam [Object] old_value the starting value of the atomic reference
#
# @return [Object] the new value
###################################################################
# @!macro atomic_fixnum_public_api
#
# @!method initialize(initial = 0)
# @!macro atomic_fixnum_method_initialize
#
# @!method value
# @!macro atomic_fixnum_method_value_get
#
# @!method value=(value)
# @!macro atomic_fixnum_method_value_set
#
# @!method increment(delta = 1)
# @!macro atomic_fixnum_method_increment
#
# @!method decrement(delta = 1)
# @!macro atomic_fixnum_method_decrement
#
# @!method compare_and_set(expect, update)
# @!macro atomic_fixnum_method_compare_and_set
#
# @!method update
# @!macro atomic_fixnum_method_update
###################################################################
# @!visibility private
# @!macro internal_implementation_note
AtomicFixnumImplementation = case
when Concurrent.on_cruby? && Concurrent.c_extensions_loaded?
CAtomicFixnum
when Concurrent.on_jruby?
JavaAtomicFixnum
else
MutexAtomicFixnum
end
private_constant :AtomicFixnumImplementation
# @!macro atomic_fixnum
#
# A numeric value that can be updated atomically. Reads and writes to an atomic
# fixnum and thread-safe and guaranteed to succeed. Reads and writes may block
# briefly but no explicit locking is required.
#
# @!macro thread_safe_variable_comparison
#
# Performance:
#
# ```
# Testing with ruby 2.1.2
# Testing with Concurrent::MutexAtomicFixnum...
# 3.130000 0.000000 3.130000 ( 3.136505)
# Testing with Concurrent::CAtomicFixnum...
# 0.790000 0.000000 0.790000 ( 0.785550)
#
# Testing with jruby 1.9.3
# Testing with Concurrent::MutexAtomicFixnum...
# 5.460000 2.460000 7.920000 ( 3.715000)
# Testing with Concurrent::JavaAtomicFixnum...
# 4.520000 0.030000 4.550000 ( 1.187000)
# ```
#
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html java.util.concurrent.atomic.AtomicLong
#
# @!macro atomic_fixnum_public_api
class AtomicFixnum < AtomicFixnumImplementation
# @return [String] Short string representation.
def to_s
format '%s value:%s>', super[0..-2], value
end
alias_method :inspect, :to_s
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb | require 'thread'
require 'concurrent/atomic/atomic_reference'
require 'concurrent/atomic/atomic_fixnum'
require 'concurrent/errors'
require 'concurrent/synchronization/object'
require 'concurrent/synchronization/lock'
require 'concurrent/atomic/lock_local_var'
module Concurrent
# Re-entrant read-write lock implementation
#
# Allows any number of concurrent readers, but only one concurrent writer
# (And while the "write" lock is taken, no read locks can be obtained either.
# Hence, the write lock can also be called an "exclusive" lock.)
#
# If another thread has taken a read lock, any thread which wants a write lock
# will block until all the readers release their locks. However, once a thread
# starts waiting to obtain a write lock, any additional readers that come along
# will also wait (so writers are not starved).
#
# A thread can acquire both a read and write lock at the same time. A thread can
# also acquire a read lock OR a write lock more than once. Only when the read (or
# write) lock is released as many times as it was acquired, will the thread
# actually let it go, allowing other threads which might have been waiting
# to proceed. Therefore the lock can be upgraded by first acquiring
# read lock and then write lock and that the lock can be downgraded by first
# having both read and write lock a releasing just the write lock.
#
# If both read and write locks are acquired by the same thread, it is not strictly
# necessary to release them in the same order they were acquired. In other words,
# the following code is legal:
#
# @example
# lock = Concurrent::ReentrantReadWriteLock.new
# lock.acquire_write_lock
# lock.acquire_read_lock
# lock.release_write_lock
# # At this point, the current thread is holding only a read lock, not a write
# # lock. So other threads can take read locks, but not a write lock.
# lock.release_read_lock
# # Now the current thread is not holding either a read or write lock, so
# # another thread could potentially acquire a write lock.
#
# This implementation was inspired by `java.util.concurrent.ReentrantReadWriteLock`.
#
# @example
# lock = Concurrent::ReentrantReadWriteLock.new
# lock.with_read_lock { data.retrieve }
# lock.with_write_lock { data.modify! }
#
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock
class ReentrantReadWriteLock < Synchronization::Object
# Implementation notes:
#
# A goal is to make the uncontended path for both readers/writers mutex-free
# Only if there is reader-writer or writer-writer contention, should mutexes be used
# Otherwise, a single CAS operation is all we need to acquire/release a lock
#
# Internal state is represented by a single integer ("counter"), and updated
# using atomic compare-and-swap operations
# When the counter is 0, the lock is free
# Each thread which has one OR MORE read locks increments the counter by 1
# (and decrements by 1 when releasing the read lock)
# The counter is increased by (1 << 15) for each writer waiting to acquire the
# write lock, and by (1 << 29) if the write lock is taken
#
# Additionally, each thread uses a thread-local variable to count how many times
# it has acquired a read lock, AND how many times it has acquired a write lock.
# It uses a similar trick; an increment of 1 means a read lock was taken, and
# an increment of (1 << 15) means a write lock was taken
# This is what makes re-entrancy possible
#
# 2 rules are followed to ensure good liveness properties:
# 1) Once a writer has queued up and is waiting for a write lock, no other thread
# can take a lock without waiting
# 2) When a write lock is released, readers are given the "first chance" to wake
# up and acquire a read lock
# Following these rules means readers and writers tend to "take turns", so neither
# can starve the other, even under heavy contention
# @!visibility private
READER_BITS = 15
# @!visibility private
WRITER_BITS = 14
# Used with @Counter:
# @!visibility private
WAITING_WRITER = 1 << READER_BITS
# @!visibility private
RUNNING_WRITER = 1 << (READER_BITS + WRITER_BITS)
# @!visibility private
MAX_READERS = WAITING_WRITER - 1
# @!visibility private
MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1
# Used with @HeldCount:
# @!visibility private
WRITE_LOCK_HELD = 1 << READER_BITS
# @!visibility private
READ_LOCK_MASK = WRITE_LOCK_HELD - 1
# @!visibility private
WRITE_LOCK_MASK = MAX_WRITERS
safe_initialization!
# Create a new `ReentrantReadWriteLock` in the unlocked state.
def initialize
super()
@Counter = AtomicFixnum.new(0) # single integer which represents lock state
@ReadQueue = Synchronization::Lock.new # used to queue waiting readers
@WriteQueue = Synchronization::Lock.new # used to queue waiting writers
@HeldCount = LockLocalVar.new(0) # indicates # of R & W locks held by this thread
end
# Execute a block operation within a read lock.
#
# @yield the task to be performed within the lock.
#
# @return [Object] the result of the block operation.
#
# @raise [ArgumentError] when no block is given.
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
# is exceeded.
def with_read_lock
raise ArgumentError.new('no block given') unless block_given?
acquire_read_lock
begin
yield
ensure
release_read_lock
end
end
# Execute a block operation within a write lock.
#
# @yield the task to be performed within the lock.
#
# @return [Object] the result of the block operation.
#
# @raise [ArgumentError] when no block is given.
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
# is exceeded.
def with_write_lock
raise ArgumentError.new('no block given') unless block_given?
acquire_write_lock
begin
yield
ensure
release_write_lock
end
end
# Acquire a read lock. If a write lock is held by another thread, will block
# until it is released.
#
# @return [Boolean] true if the lock is successfully acquired
#
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
# is exceeded.
def acquire_read_lock
if (held = @HeldCount.value) > 0
# If we already have a lock, there's no need to wait
if held & READ_LOCK_MASK == 0
# But we do need to update the counter, if we were holding a write
# lock but not a read lock
@Counter.update { |c| c + 1 }
end
@HeldCount.value = held + 1
return true
end
while true
c = @Counter.value
raise ResourceLimitError.new('Too many reader threads') if max_readers?(c)
# If a writer is waiting OR running when we first queue up, we need to wait
if waiting_or_running_writer?(c)
# Before going to sleep, check again with the ReadQueue mutex held
@ReadQueue.synchronize do
@ReadQueue.ns_wait if waiting_or_running_writer?
end
# Note: the above 'synchronize' block could have used #wait_until,
# but that waits repeatedly in a loop, checking the wait condition
# each time it wakes up (to protect against spurious wakeups)
# But we are already in a loop, which is only broken when we successfully
# acquire the lock! So we don't care about spurious wakeups, and would
# rather not pay the extra overhead of using #wait_until
# After a reader has waited once, they are allowed to "barge" ahead of waiting writers
# But if a writer is *running*, the reader still needs to wait (naturally)
while true
c = @Counter.value
if running_writer?(c)
@ReadQueue.synchronize do
@ReadQueue.ns_wait if running_writer?
end
elsif @Counter.compare_and_set(c, c+1)
@HeldCount.value = held + 1
return true
end
end
elsif @Counter.compare_and_set(c, c+1)
@HeldCount.value = held + 1
return true
end
end
end
# Try to acquire a read lock and return true if we succeed. If it cannot be
# acquired immediately, return false.
#
# @return [Boolean] true if the lock is successfully acquired
def try_read_lock
if (held = @HeldCount.value) > 0
if held & READ_LOCK_MASK == 0
# If we hold a write lock, but not a read lock...
@Counter.update { |c| c + 1 }
end
@HeldCount.value = held + 1
return true
else
c = @Counter.value
if !waiting_or_running_writer?(c) && @Counter.compare_and_set(c, c+1)
@HeldCount.value = held + 1
return true
end
end
false
end
# Release a previously acquired read lock.
#
# @return [Boolean] true if the lock is successfully released
def release_read_lock
held = @HeldCount.value = @HeldCount.value - 1
rlocks_held = held & READ_LOCK_MASK
if rlocks_held == 0
c = @Counter.update { |counter| counter - 1 }
# If one or more writers were waiting, and we were the last reader, wake a writer up
if waiting_or_running_writer?(c) && running_readers(c) == 0
@WriteQueue.signal
end
elsif rlocks_held == READ_LOCK_MASK
raise IllegalOperationError, "Cannot release a read lock which is not held"
end
true
end
# Acquire a write lock. Will block and wait for all active readers and writers.
#
# @return [Boolean] true if the lock is successfully acquired
#
# @raise [Concurrent::ResourceLimitError] if the maximum number of writers
# is exceeded.
def acquire_write_lock
if (held = @HeldCount.value) >= WRITE_LOCK_HELD
# if we already have a write (exclusive) lock, there's no need to wait
@HeldCount.value = held + WRITE_LOCK_HELD
return true
end
while true
c = @Counter.value
raise ResourceLimitError.new('Too many writer threads') if max_writers?(c)
# To go ahead and take the lock without waiting, there must be no writer
# running right now, AND no writers who came before us still waiting to
# acquire the lock
# Additionally, if any read locks have been taken, we must hold all of them
if held > 0 && @Counter.compare_and_set(1, c+RUNNING_WRITER)
# If we are the only one reader and successfully swap the RUNNING_WRITER bit on, then we can go ahead
@HeldCount.value = held + WRITE_LOCK_HELD
return true
elsif @Counter.compare_and_set(c, c+WAITING_WRITER)
while true
# Now we have successfully incremented, so no more readers will be able to increment
# (they will wait instead)
# However, readers OR writers could decrement right here
@WriteQueue.synchronize do
# So we have to do another check inside the synchronized section
# If a writer OR another reader is running, then go to sleep
c = @Counter.value
@WriteQueue.ns_wait if running_writer?(c) || running_readers(c) != held
end
# Note: if you are thinking of replacing the above 'synchronize' block
# with #wait_until, read the comment in #acquire_read_lock first!
# We just came out of a wait
# If we successfully turn the RUNNING_WRITER bit on with an atomic swap,
# then we are OK to stop waiting and go ahead
# Otherwise go back and wait again
c = @Counter.value
if !running_writer?(c) &&
running_readers(c) == held &&
@Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER)
@HeldCount.value = held + WRITE_LOCK_HELD
return true
end
end
end
end
end
# Try to acquire a write lock and return true if we succeed. If it cannot be
# acquired immediately, return false.
#
# @return [Boolean] true if the lock is successfully acquired
def try_write_lock
if (held = @HeldCount.value) >= WRITE_LOCK_HELD
@HeldCount.value = held + WRITE_LOCK_HELD
return true
else
c = @Counter.value
if !waiting_or_running_writer?(c) &&
running_readers(c) == held &&
@Counter.compare_and_set(c, c+RUNNING_WRITER)
@HeldCount.value = held + WRITE_LOCK_HELD
return true
end
end
false
end
# Release a previously acquired write lock.
#
# @return [Boolean] true if the lock is successfully released
def release_write_lock
held = @HeldCount.value = @HeldCount.value - WRITE_LOCK_HELD
wlocks_held = held & WRITE_LOCK_MASK
if wlocks_held == 0
c = @Counter.update { |counter| counter - RUNNING_WRITER }
@ReadQueue.broadcast
@WriteQueue.signal if waiting_writers(c) > 0
elsif wlocks_held == WRITE_LOCK_MASK
raise IllegalOperationError, "Cannot release a write lock which is not held"
end
true
end
private
# @!visibility private
def running_readers(c = @Counter.value)
c & MAX_READERS
end
# @!visibility private
def running_readers?(c = @Counter.value)
(c & MAX_READERS) > 0
end
# @!visibility private
def running_writer?(c = @Counter.value)
c >= RUNNING_WRITER
end
# @!visibility private
def waiting_writers(c = @Counter.value)
(c & MAX_WRITERS) >> READER_BITS
end
# @!visibility private
def waiting_or_running_writer?(c = @Counter.value)
c >= WAITING_WRITER
end
# @!visibility private
def max_readers?(c = @Counter.value)
(c & MAX_READERS) == MAX_READERS
end
# @!visibility private
def max_writers?(c = @Counter.value)
(c & MAX_WRITERS) == MAX_WRITERS
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb | require 'concurrent/synchronization/lockable_object'
require 'concurrent/utility/native_integer'
module Concurrent
# @!macro semaphore
# @!visibility private
# @!macro internal_implementation_note
class MutexSemaphore < Synchronization::LockableObject
# @!macro semaphore_method_initialize
def initialize(count)
Utility::NativeInteger.ensure_integer_and_bounds count
super()
synchronize { ns_initialize count }
end
# @!macro semaphore_method_acquire
def acquire(permits = 1)
Utility::NativeInteger.ensure_integer_and_bounds permits
Utility::NativeInteger.ensure_positive permits
synchronize do
try_acquire_timed(permits, nil)
end
return unless block_given?
begin
yield
ensure
release(permits)
end
end
# @!macro semaphore_method_available_permits
def available_permits
synchronize { @free }
end
# @!macro semaphore_method_drain_permits
#
# Acquires and returns all permits that are immediately available.
#
# @return [Integer]
def drain_permits
synchronize do
@free.tap { |_| @free = 0 }
end
end
# @!macro semaphore_method_try_acquire
def try_acquire(permits = 1, timeout = nil)
Utility::NativeInteger.ensure_integer_and_bounds permits
Utility::NativeInteger.ensure_positive permits
acquired = synchronize do
if timeout.nil?
try_acquire_now(permits)
else
try_acquire_timed(permits, timeout)
end
end
return acquired unless block_given?
return unless acquired
begin
yield
ensure
release(permits)
end
end
# @!macro semaphore_method_release
def release(permits = 1)
Utility::NativeInteger.ensure_integer_and_bounds permits
Utility::NativeInteger.ensure_positive permits
synchronize do
@free += permits
permits.times { ns_signal }
end
nil
end
# Shrinks the number of available permits by the indicated reduction.
#
# @param [Fixnum] reduction Number of permits to remove.
#
# @raise [ArgumentError] if `reduction` is not an integer or is negative
#
# @raise [ArgumentError] if `@free` - `@reduction` is less than zero
#
# @return [nil]
#
# @!visibility private
def reduce_permits(reduction)
Utility::NativeInteger.ensure_integer_and_bounds reduction
Utility::NativeInteger.ensure_positive reduction
synchronize { @free -= reduction }
nil
end
protected
# @!visibility private
def ns_initialize(count)
@free = count
end
private
# @!visibility private
def try_acquire_now(permits)
if @free >= permits
@free -= permits
true
else
false
end
end
# @!visibility private
def try_acquire_timed(permits, timeout)
ns_wait_until(timeout) { try_acquire_now(permits) }
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb | require 'concurrent/utility/native_extension_loader' # load native parts first
require 'concurrent/atomic_reference/atomic_direct_update'
require 'concurrent/atomic_reference/numeric_cas_wrapper'
require 'concurrent/atomic_reference/mutex_atomic'
# Shim for TruffleRuby::AtomicReference
if Concurrent.on_truffleruby? && !defined?(TruffleRuby::AtomicReference)
# @!visibility private
module TruffleRuby
AtomicReference = Truffle::AtomicReference
end
end
module Concurrent
# @!macro internal_implementation_note
AtomicReferenceImplementation = case
when Concurrent.on_cruby? && Concurrent.c_extensions_loaded?
# @!visibility private
# @!macro internal_implementation_note
class CAtomicReference
include AtomicDirectUpdate
include AtomicNumericCompareAndSetWrapper
alias_method :compare_and_swap, :compare_and_set
end
CAtomicReference
when Concurrent.on_jruby?
# @!visibility private
# @!macro internal_implementation_note
class JavaAtomicReference
include AtomicDirectUpdate
end
JavaAtomicReference
when Concurrent.on_truffleruby?
class TruffleRubyAtomicReference < TruffleRuby::AtomicReference
include AtomicDirectUpdate
alias_method :value, :get
alias_method :value=, :set
alias_method :compare_and_swap, :compare_and_set
alias_method :swap, :get_and_set
end
TruffleRubyAtomicReference
else
MutexAtomicReference
end
private_constant :AtomicReferenceImplementation
# An object reference that may be updated atomically. All read and write
# operations have java volatile semantic.
#
# @!macro thread_safe_variable_comparison
#
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/package-summary.html
#
# @!method initialize(value = nil)
# @!macro atomic_reference_method_initialize
# @param [Object] value The initial value.
#
# @!method get
# @!macro atomic_reference_method_get
# Gets the current value.
# @return [Object] the current value
#
# @!method set(new_value)
# @!macro atomic_reference_method_set
# Sets to the given value.
# @param [Object] new_value the new value
# @return [Object] the new value
#
# @!method get_and_set(new_value)
# @!macro atomic_reference_method_get_and_set
# Atomically sets to the given value and returns the old value.
# @param [Object] new_value the new value
# @return [Object] the old value
#
# @!method compare_and_set(old_value, new_value)
# @!macro atomic_reference_method_compare_and_set
#
# Atomically sets the value to the given updated value if
# the current value == the expected value.
#
# @param [Object] old_value the expected value
# @param [Object] new_value the new value
#
# @return [Boolean] `true` if successful. A `false` return indicates
# that the actual value was not equal to the expected value.
#
# @!method update
# Pass the current value to the given block, replacing it
# with the block's result. May retry if the value changes
# during the block's execution.
#
# @yield [Object] Calculate a new value for the atomic reference using
# given (old) value
# @yieldparam [Object] old_value the starting value of the atomic reference
# @return [Object] the new value
#
# @!method try_update
# Pass the current value to the given block, replacing it
# with the block's result. Return nil if the update fails.
#
# @yield [Object] Calculate a new value for the atomic reference using
# given (old) value
# @yieldparam [Object] old_value the starting value of the atomic reference
# @note This method was altered to avoid raising an exception by default.
# Instead, this method now returns `nil` in case of failure. For more info,
# please see: https://github.com/ruby-concurrency/concurrent-ruby/pull/336
# @return [Object] the new value, or nil if update failed
#
# @!method try_update!
# Pass the current value to the given block, replacing it
# with the block's result. Raise an exception if the update
# fails.
#
# @yield [Object] Calculate a new value for the atomic reference using
# given (old) value
# @yieldparam [Object] old_value the starting value of the atomic reference
# @note This behavior mimics the behavior of the original
# `AtomicReference#try_update` API. The reason this was changed was to
# avoid raising exceptions (which are inherently slow) by default. For more
# info: https://github.com/ruby-concurrency/concurrent-ruby/pull/336
# @return [Object] the new value
# @raise [Concurrent::ConcurrentUpdateError] if the update fails
class AtomicReference < AtomicReferenceImplementation
# @return [String] Short string representation.
def to_s
format '%s value:%s>', super[0..-2], get
end
alias_method :inspect, :to_s
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb | require 'concurrent/utility/engine'
require 'concurrent/atomic/mutex_count_down_latch'
require 'concurrent/atomic/java_count_down_latch'
module Concurrent
###################################################################
# @!macro count_down_latch_method_initialize
#
# Create a new `CountDownLatch` with the initial `count`.
#
# @param [new] count the initial count
#
# @raise [ArgumentError] if `count` is not an integer or is less than zero
# @!macro count_down_latch_method_wait
#
# Block on the latch until the counter reaches zero or until `timeout` is reached.
#
# @param [Fixnum] timeout the number of seconds to wait for the counter or `nil`
# to block indefinitely
# @return [Boolean] `true` if the `count` reaches zero else false on `timeout`
# @!macro count_down_latch_method_count_down
#
# Signal the latch to decrement the counter. Will signal all blocked threads when
# the `count` reaches zero.
# @!macro count_down_latch_method_count
#
# The current value of the counter.
#
# @return [Fixnum] the current value of the counter
###################################################################
# @!macro count_down_latch_public_api
#
# @!method initialize(count = 1)
# @!macro count_down_latch_method_initialize
#
# @!method wait(timeout = nil)
# @!macro count_down_latch_method_wait
#
# @!method count_down
# @!macro count_down_latch_method_count_down
#
# @!method count
# @!macro count_down_latch_method_count
###################################################################
# @!visibility private
# @!macro internal_implementation_note
CountDownLatchImplementation = case
when Concurrent.on_jruby?
JavaCountDownLatch
else
MutexCountDownLatch
end
private_constant :CountDownLatchImplementation
# @!macro count_down_latch
#
# A synchronization object that allows one thread to wait on multiple other threads.
# The thread that will wait creates a `CountDownLatch` and sets the initial value
# (normally equal to the number of other threads). The initiating thread passes the
# latch to the other threads then waits for the other threads by calling the `#wait`
# method. Each of the other threads calls `#count_down` when done with its work.
# When the latch counter reaches zero the waiting thread is unblocked and continues
# with its work. A `CountDownLatch` can be used only once. Its value cannot be reset.
#
# @!macro count_down_latch_public_api
# @example Waiter and Decrementer
# latch = Concurrent::CountDownLatch.new(3)
#
# waiter = Thread.new do
# latch.wait()
# puts ("Waiter released")
# end
#
# decrementer = Thread.new do
# sleep(1)
# latch.count_down
# puts latch.count
#
# sleep(1)
# latch.count_down
# puts latch.count
#
# sleep(1)
# latch.count_down
# puts latch.count
# end
#
# [waiter, decrementer].each(&:join)
class CountDownLatch < CountDownLatchImplementation
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/fiber_local_var.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/fiber_local_var.rb | require 'concurrent/constants'
require_relative 'locals'
module Concurrent
# A `FiberLocalVar` is a variable where the value is different for each fiber.
# Each variable may have a default value, but when you modify the variable only
# the current fiber will ever see that change.
#
# This is similar to Ruby's built-in fiber-local variables (`Thread.current[:name]`),
# but with these major advantages:
# * `FiberLocalVar` has its own identity, it doesn't need a Symbol.
# * Each Ruby's built-in fiber-local variable leaks some memory forever (it's a Symbol held forever on the fiber),
# so it's only OK to create a small amount of them.
# `FiberLocalVar` has no such issue and it is fine to create many of them.
# * Ruby's built-in fiber-local variables leak forever the value set on each fiber (unless set to nil explicitly).
# `FiberLocalVar` automatically removes the mapping for each fiber once the `FiberLocalVar` instance is GC'd.
#
# @example
# v = FiberLocalVar.new(14)
# v.value #=> 14
# v.value = 2
# v.value #=> 2
#
# @example
# v = FiberLocalVar.new(14)
#
# Fiber.new do
# v.value #=> 14
# v.value = 1
# v.value #=> 1
# end.resume
#
# Fiber.new do
# v.value #=> 14
# v.value = 2
# v.value #=> 2
# end.resume
#
# v.value #=> 14
class FiberLocalVar
LOCALS = FiberLocals.new
# Creates a fiber local variable.
#
# @param [Object] default the default value when otherwise unset
# @param [Proc] default_block Optional block that gets called to obtain the
# default value for each fiber
def initialize(default = nil, &default_block)
if default && block_given?
raise ArgumentError, "Cannot use both value and block as default value"
end
if block_given?
@default_block = default_block
@default = nil
else
@default_block = nil
@default = default
end
@index = LOCALS.next_index(self)
end
# Returns the value in the current fiber's copy of this fiber-local variable.
#
# @return [Object] the current value
def value
LOCALS.fetch(@index) { default }
end
# Sets the current fiber's copy of this fiber-local variable to the specified value.
#
# @param [Object] value the value to set
# @return [Object] the new value
def value=(value)
LOCALS.set(@index, value)
end
# Bind the given value to fiber local storage during
# execution of the given block.
#
# @param [Object] value the value to bind
# @yield the operation to be performed with the bound variable
# @return [Object] the value
def bind(value)
if block_given?
old_value = self.value
self.value = value
begin
yield
ensure
self.value = old_value
end
end
end
protected
# @!visibility private
def default
if @default_block
self.value = @default_block.call
else
@default
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb | require 'thread'
require 'concurrent/atomic/atomic_fixnum'
require 'concurrent/errors'
require 'concurrent/synchronization/object'
require 'concurrent/synchronization/lock'
module Concurrent
# Ruby read-write lock implementation
#
# Allows any number of concurrent readers, but only one concurrent writer
# (And if the "write" lock is taken, any readers who come along will have to wait)
#
# If readers are already active when a writer comes along, the writer will wait for
# all the readers to finish before going ahead.
# Any additional readers that come when the writer is already waiting, will also
# wait (so writers are not starved).
#
# This implementation is based on `java.util.concurrent.ReentrantReadWriteLock`.
#
# @example
# lock = Concurrent::ReadWriteLock.new
# lock.with_read_lock { data.retrieve }
# lock.with_write_lock { data.modify! }
#
# @note Do **not** try to acquire the write lock while already holding a read lock
# **or** try to acquire the write lock while you already have it.
# This will lead to deadlock
#
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock
class ReadWriteLock < Synchronization::Object
# @!visibility private
WAITING_WRITER = 1 << 15
# @!visibility private
RUNNING_WRITER = 1 << 29
# @!visibility private
MAX_READERS = WAITING_WRITER - 1
# @!visibility private
MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1
safe_initialization!
# Implementation notes:
# A goal is to make the uncontended path for both readers/writers lock-free
# Only if there is reader-writer or writer-writer contention, should locks be used
# Internal state is represented by a single integer ("counter"), and updated
# using atomic compare-and-swap operations
# When the counter is 0, the lock is free
# Each reader increments the counter by 1 when acquiring a read lock
# (and decrements by 1 when releasing the read lock)
# The counter is increased by (1 << 15) for each writer waiting to acquire the
# write lock, and by (1 << 29) if the write lock is taken
# Create a new `ReadWriteLock` in the unlocked state.
def initialize
super()
@Counter = AtomicFixnum.new(0) # single integer which represents lock state
@ReadLock = Synchronization::Lock.new
@WriteLock = Synchronization::Lock.new
end
# Execute a block operation within a read lock.
#
# @yield the task to be performed within the lock.
#
# @return [Object] the result of the block operation.
#
# @raise [ArgumentError] when no block is given.
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
# is exceeded.
def with_read_lock
raise ArgumentError.new('no block given') unless block_given?
acquire_read_lock
begin
yield
ensure
release_read_lock
end
end
# Execute a block operation within a write lock.
#
# @yield the task to be performed within the lock.
#
# @return [Object] the result of the block operation.
#
# @raise [ArgumentError] when no block is given.
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
# is exceeded.
def with_write_lock
raise ArgumentError.new('no block given') unless block_given?
acquire_write_lock
begin
yield
ensure
release_write_lock
end
end
# Acquire a read lock. If a write lock has been acquired will block until
# it is released. Will not block if other read locks have been acquired.
#
# @return [Boolean] true if the lock is successfully acquired
#
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
# is exceeded.
def acquire_read_lock
while true
c = @Counter.value
raise ResourceLimitError.new('Too many reader threads') if max_readers?(c)
# If a writer is waiting when we first queue up, we need to wait
if waiting_writer?(c)
@ReadLock.wait_until { !waiting_writer? }
# after a reader has waited once, they are allowed to "barge" ahead of waiting writers
# but if a writer is *running*, the reader still needs to wait (naturally)
while true
c = @Counter.value
if running_writer?(c)
@ReadLock.wait_until { !running_writer? }
else
return if @Counter.compare_and_set(c, c+1)
end
end
else
break if @Counter.compare_and_set(c, c+1)
end
end
true
end
# Release a previously acquired read lock.
#
# @return [Boolean] true if the lock is successfully released
def release_read_lock
while true
c = @Counter.value
if @Counter.compare_and_set(c, c-1)
# If one or more writers were waiting, and we were the last reader, wake a writer up
if waiting_writer?(c) && running_readers(c) == 1
@WriteLock.signal
end
break
end
end
true
end
# Acquire a write lock. Will block and wait for all active readers and writers.
#
# @return [Boolean] true if the lock is successfully acquired
#
# @raise [Concurrent::ResourceLimitError] if the maximum number of writers
# is exceeded.
def acquire_write_lock
while true
c = @Counter.value
raise ResourceLimitError.new('Too many writer threads') if max_writers?(c)
if c == 0 # no readers OR writers running
# if we successfully swap the RUNNING_WRITER bit on, then we can go ahead
break if @Counter.compare_and_set(0, RUNNING_WRITER)
elsif @Counter.compare_and_set(c, c+WAITING_WRITER)
while true
# Now we have successfully incremented, so no more readers will be able to increment
# (they will wait instead)
# However, readers OR writers could decrement right here, OR another writer could increment
@WriteLock.wait_until do
# So we have to do another check inside the synchronized section
# If a writer OR reader is running, then go to sleep
c = @Counter.value
!running_writer?(c) && !running_readers?(c)
end
# We just came out of a wait
# If we successfully turn the RUNNING_WRITER bit on with an atomic swap,
# Then we are OK to stop waiting and go ahead
# Otherwise go back and wait again
c = @Counter.value
break if !running_writer?(c) && !running_readers?(c) && @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER)
end
break
end
end
true
end
# Release a previously acquired write lock.
#
# @return [Boolean] true if the lock is successfully released
def release_write_lock
return true unless running_writer?
c = @Counter.update { |counter| counter - RUNNING_WRITER }
@ReadLock.broadcast
@WriteLock.signal if waiting_writers(c) > 0
true
end
# Queries if the write lock is held by any thread.
#
# @return [Boolean] true if the write lock is held else false`
def write_locked?
@Counter.value >= RUNNING_WRITER
end
# Queries whether any threads are waiting to acquire the read or write lock.
#
# @return [Boolean] true if any threads are waiting for a lock else false
def has_waiters?
waiting_writer?(@Counter.value)
end
private
# @!visibility private
def running_readers(c = @Counter.value)
c & MAX_READERS
end
# @!visibility private
def running_readers?(c = @Counter.value)
(c & MAX_READERS) > 0
end
# @!visibility private
def running_writer?(c = @Counter.value)
c >= RUNNING_WRITER
end
# @!visibility private
def waiting_writers(c = @Counter.value)
(c & MAX_WRITERS) / WAITING_WRITER
end
# @!visibility private
def waiting_writer?(c = @Counter.value)
c >= WAITING_WRITER
end
# @!visibility private
def max_readers?(c = @Counter.value)
(c & MAX_READERS) == MAX_READERS
end
# @!visibility private
def max_writers?(c = @Counter.value)
(c & MAX_WRITERS) == MAX_WRITERS
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/locals.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/locals.rb | require 'fiber'
require 'concurrent/utility/engine'
require 'concurrent/constants'
module Concurrent
# @!visibility private
# @!macro internal_implementation_note
#
# An abstract implementation of local storage, with sub-classes for
# per-thread and per-fiber locals.
#
# Each execution context (EC, thread or fiber) has a lazily initialized array
# of local variable values. Each time a new local variable is created, we
# allocate an "index" for it.
#
# For example, if the allocated index is 1, that means slot #1 in EVERY EC's
# locals array will be used for the value of that variable.
#
# The good thing about using a per-EC structure to hold values, rather than
# a global, is that no synchronization is needed when reading and writing
# those values (since the structure is only ever accessed by a single
# thread).
#
# Of course, when a local variable is GC'd, 1) we need to recover its index
# for use by other new local variables (otherwise the locals arrays could
# get bigger and bigger with time), and 2) we need to null out all the
# references held in the now-unused slots (both to avoid blocking GC of those
# objects, and also to prevent "stale" values from being passed on to a new
# local when the index is reused).
#
# Because we need to null out freed slots, we need to keep references to
# ALL the locals arrays, so we can null out the appropriate slots in all of
# them. This is why we need to use a finalizer to clean up the locals array
# when the EC goes out of scope.
class AbstractLocals
def initialize
@free = []
@lock = Mutex.new
@all_arrays = {}
@next = 0
end
def synchronize
@lock.synchronize { yield }
end
if Concurrent.on_cruby?
def weak_synchronize
yield
end
else
alias_method :weak_synchronize, :synchronize
end
def next_index(local)
index = synchronize do
if @free.empty?
@next += 1
else
@free.pop
end
end
# When the local goes out of scope, we should free the associated index
# and all values stored into it.
ObjectSpace.define_finalizer(local, local_finalizer(index))
index
end
def free_index(index)
weak_synchronize do
# The cost of GC'ing a TLV is linear in the number of ECs using local
# variables. But that is natural! More ECs means more storage is used
# per local variable. So naturally more CPU time is required to free
# more storage.
#
# DO NOT use each_value which might conflict with new pair assignment
# into the hash in #set method.
@all_arrays.values.each do |locals|
locals[index] = nil
end
# free index has to be published after the arrays are cleared:
@free << index
end
end
def fetch(index)
locals = self.locals
value = locals ? locals[index] : nil
if nil == value
yield
elsif NULL.equal?(value)
nil
else
value
end
end
def set(index, value)
locals = self.locals!
locals[index] = (nil == value ? NULL : value)
value
end
private
# When the local goes out of scope, clean up that slot across all locals currently assigned.
def local_finalizer(index)
proc do
free_index(index)
end
end
# When a thread/fiber goes out of scope, remove the array from @all_arrays.
def thread_fiber_finalizer(array_object_id)
proc do
weak_synchronize do
@all_arrays.delete(array_object_id)
end
end
end
# Returns the locals for the current scope, or nil if none exist.
def locals
raise NotImplementedError
end
# Returns the locals for the current scope, creating them if necessary.
def locals!
raise NotImplementedError
end
end
# @!visibility private
# @!macro internal_implementation_note
# An array-backed storage of indexed variables per thread.
class ThreadLocals < AbstractLocals
def locals
Thread.current.thread_variable_get(:concurrent_thread_locals)
end
def locals!
thread = Thread.current
locals = thread.thread_variable_get(:concurrent_thread_locals)
unless locals
locals = thread.thread_variable_set(:concurrent_thread_locals, [])
weak_synchronize do
@all_arrays[locals.object_id] = locals
end
# When the thread goes out of scope, we should delete the associated locals:
ObjectSpace.define_finalizer(thread, thread_fiber_finalizer(locals.object_id))
end
locals
end
end
# @!visibility private
# @!macro internal_implementation_note
# An array-backed storage of indexed variables per fiber.
class FiberLocals < AbstractLocals
def locals
Thread.current[:concurrent_fiber_locals]
end
def locals!
thread = Thread.current
locals = thread[:concurrent_fiber_locals]
unless locals
locals = thread[:concurrent_fiber_locals] = []
weak_synchronize do
@all_arrays[locals.object_id] = locals
end
# When the fiber goes out of scope, we should delete the associated locals:
ObjectSpace.define_finalizer(Fiber.current, thread_fiber_finalizer(locals.object_id))
end
locals
end
end
private_constant :AbstractLocals, :ThreadLocals, :FiberLocals
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/lock_local_var.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic/lock_local_var.rb | require 'concurrent/utility/engine'
require_relative 'fiber_local_var'
require_relative 'thread_local_var'
module Concurrent
# @!visibility private
def self.mutex_owned_per_thread?
return false if Concurrent.on_jruby? || Concurrent.on_truffleruby?
return RUBY_VERSION < "3.0" if Concurrent.on_cruby?
mutex = Mutex.new
# Lock the mutex:
mutex.synchronize do
# Check if the mutex is still owned in a child fiber:
Fiber.new { mutex.owned? }.resume
end
end
if mutex_owned_per_thread?
LockLocalVar = ThreadLocalVar
else
LockLocalVar = FiberLocalVar
end
# Either {FiberLocalVar} or {ThreadLocalVar} depending on whether Mutex (and Monitor)
# are held, respectively, per Fiber or per Thread.
class LockLocalVar
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb | require 'thread'
require 'concurrent/atomic/event'
require 'concurrent/concern/logging'
require 'concurrent/executor/ruby_executor_service'
require 'concurrent/utility/monotonic_time'
require 'concurrent/collection/timeout_queue'
module Concurrent
# @!macro thread_pool_executor
# @!macro thread_pool_options
# @!visibility private
class RubyThreadPoolExecutor < RubyExecutorService
include Concern::Deprecation
# @!macro thread_pool_executor_constant_default_max_pool_size
DEFAULT_MAX_POOL_SIZE = 2_147_483_647 # java.lang.Integer::MAX_VALUE
# @!macro thread_pool_executor_constant_default_min_pool_size
DEFAULT_MIN_POOL_SIZE = 0
# @!macro thread_pool_executor_constant_default_max_queue_size
DEFAULT_MAX_QUEUE_SIZE = 0
# @!macro thread_pool_executor_constant_default_thread_timeout
DEFAULT_THREAD_IDLETIMEOUT = 60
# @!macro thread_pool_executor_constant_default_synchronous
DEFAULT_SYNCHRONOUS = false
# @!macro thread_pool_executor_attr_reader_max_length
attr_reader :max_length
# @!macro thread_pool_executor_attr_reader_min_length
attr_reader :min_length
# @!macro thread_pool_executor_attr_reader_idletime
attr_reader :idletime
# @!macro thread_pool_executor_attr_reader_max_queue
attr_reader :max_queue
# @!macro thread_pool_executor_attr_reader_synchronous
attr_reader :synchronous
# @!macro thread_pool_executor_method_initialize
def initialize(opts = {})
super(opts)
end
# @!macro thread_pool_executor_attr_reader_largest_length
def largest_length
synchronize { @largest_length }
end
# @!macro thread_pool_executor_attr_reader_scheduled_task_count
def scheduled_task_count
synchronize { @scheduled_task_count }
end
# @!macro thread_pool_executor_attr_reader_completed_task_count
def completed_task_count
synchronize { @completed_task_count }
end
# @!macro thread_pool_executor_method_active_count
def active_count
synchronize do
@pool.length - @ready.length
end
end
# @!macro executor_service_method_can_overflow_question
def can_overflow?
synchronize { ns_limited_queue? }
end
# @!macro thread_pool_executor_attr_reader_length
def length
synchronize { @pool.length }
end
# @!macro thread_pool_executor_attr_reader_queue_length
def queue_length
synchronize { @queue.length }
end
# @!macro thread_pool_executor_attr_reader_remaining_capacity
def remaining_capacity
synchronize do
if ns_limited_queue?
@max_queue - @queue.length
else
-1
end
end
end
# removes the worker if it can be pruned
#
# @return [true, false] if the worker was pruned
#
# @!visibility private
def prune_worker(worker)
synchronize do
if ns_prunable_capacity > 0
remove_worker worker
true
else
false
end
end
end
# @!visibility private
def remove_worker(worker)
synchronize do
ns_remove_ready_worker worker
ns_remove_busy_worker worker
end
end
# @!visibility private
def ready_worker(worker, last_message)
synchronize { ns_ready_worker worker, last_message }
end
# @!visibility private
def worker_died(worker)
synchronize { ns_worker_died worker }
end
# @!visibility private
def worker_task_completed
synchronize { @completed_task_count += 1 }
end
# @!macro thread_pool_executor_method_prune_pool
def prune_pool
deprecated "#prune_pool has no effect and will be removed in next the release, see https://github.com/ruby-concurrency/concurrent-ruby/pull/1082."
end
private
# @!visibility private
def ns_initialize(opts)
@min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
@max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
@idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
@max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
@synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS)
@fallback_policy = opts.fetch(:fallback_policy, :abort)
raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0
raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICIES.include?(@fallback_policy)
raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @max_length < DEFAULT_MIN_POOL_SIZE
raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if @max_length > DEFAULT_MAX_POOL_SIZE
raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE
raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length
@pool = [] # all workers
@ready = [] # used as a stash (most idle worker is at the start)
@queue = [] # used as queue
# @ready or @queue is empty at all times
@scheduled_task_count = 0
@completed_task_count = 0
@largest_length = 0
@workers_counter = 0
@ruby_pid = $$ # detects if Ruby has forked
end
# @!visibility private
def ns_limited_queue?
@max_queue != 0
end
# @!visibility private
def ns_execute(*args, &task)
ns_reset_if_forked
if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task)
@scheduled_task_count += 1
nil
else
fallback_action(*args, &task)
end
end
# @!visibility private
def ns_shutdown_execution
ns_reset_if_forked
if @pool.empty?
# nothing to do
stopped_event.set
end
if @queue.empty?
# no more tasks will be accepted, just stop all workers
@pool.each(&:stop)
end
end
# @!visibility private
def ns_kill_execution
# TODO log out unprocessed tasks in queue
# TODO try to shutdown first?
@pool.each(&:kill)
@pool.clear
@ready.clear
end
# tries to assign task to a worker, tries to get one from @ready or to create new one
# @return [true, false] if task is assigned to a worker
#
# @!visibility private
def ns_assign_worker(*args, &task)
# keep growing if the pool is not at the minimum yet
worker, _ = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker
if worker
worker << [task, args]
true
else
false
end
rescue ThreadError
# Raised when the operating system refuses to create the new thread
return false
end
# tries to enqueue task
# @return [true, false] if enqueued
#
# @!visibility private
def ns_enqueue(*args, &task)
return false if @synchronous
if !ns_limited_queue? || @queue.size < @max_queue
@queue << [task, args]
true
else
false
end
end
# @!visibility private
def ns_worker_died(worker)
ns_remove_busy_worker worker
replacement_worker = ns_add_busy_worker
ns_ready_worker replacement_worker, Concurrent.monotonic_time, false if replacement_worker
end
# creates new worker which has to receive work to do after it's added
# @return [nil, Worker] nil of max capacity is reached
#
# @!visibility private
def ns_add_busy_worker
return if @pool.size >= @max_length
@workers_counter += 1
@pool << (worker = Worker.new(self, @workers_counter))
@largest_length = @pool.length if @pool.length > @largest_length
worker
end
# handle ready worker, giving it new job or assigning back to @ready
#
# @!visibility private
def ns_ready_worker(worker, last_message, success = true)
task_and_args = @queue.shift
if task_and_args
worker << task_and_args
else
# stop workers when !running?, do not return them to @ready
if running?
raise unless last_message
@ready.push([worker, last_message])
else
worker.stop
end
end
end
# removes a worker which is not tracked in @ready
#
# @!visibility private
def ns_remove_busy_worker(worker)
@pool.delete(worker)
stopped_event.set if @pool.empty? && !running?
true
end
# @!visibility private
def ns_remove_ready_worker(worker)
if index = @ready.index { |rw, _| rw == worker }
@ready.delete_at(index)
end
true
end
# @return [Integer] number of excess idle workers which can be removed without
# going below min_length, or all workers if not running
#
# @!visibility private
def ns_prunable_capacity
if running?
[@pool.size - @min_length, @ready.size].min
else
@pool.size
end
end
# @!visibility private
def ns_reset_if_forked
if $$ != @ruby_pid
@queue.clear
@ready.clear
@pool.clear
@scheduled_task_count = 0
@completed_task_count = 0
@largest_length = 0
@workers_counter = 0
@ruby_pid = $$
end
end
# @!visibility private
class Worker
include Concern::Logging
def initialize(pool, id)
# instance variables accessed only under pool's lock so no need to sync here again
@queue = Collection::TimeoutQueue.new
@pool = pool
@thread = create_worker @queue, pool, pool.idletime
if @thread.respond_to?(:name=)
@thread.name = [pool.name, 'worker', id].compact.join('-')
end
end
def <<(message)
@queue << message
end
def stop
@queue << :stop
end
def kill
@thread.kill
end
private
def create_worker(queue, pool, idletime)
Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime|
catch(:stop) do
prunable = true
loop do
timeout = prunable && my_pool.running? ? my_idletime : nil
case message = my_queue.pop(timeout: timeout)
when nil
throw :stop if my_pool.prune_worker(self)
prunable = false
when :stop
my_pool.remove_worker(self)
throw :stop
else
task, args = message
run_task my_pool, task, args
my_pool.ready_worker(self, Concurrent.monotonic_time)
prunable = true
end
end
end
end
end
def run_task(pool, task, args)
task.call(*args)
pool.worker_task_completed
rescue => ex
# let it fail
log DEBUG, ex
rescue Exception => ex
log ERROR, ex
pool.worker_died(self)
throw :stop
end
end
private_constant :Worker
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb | require 'concurrent/atomic/atomic_boolean'
require 'concurrent/atomic/atomic_fixnum'
require 'concurrent/atomic/event'
require 'concurrent/executor/executor_service'
require 'concurrent/executor/ruby_executor_service'
module Concurrent
# An executor service in which every operation spawns a new,
# independently operating thread.
#
# This is perhaps the most inefficient executor service in this
# library. It exists mainly for testing an debugging. Thread creation
# and management is expensive in Ruby and this executor performs no
# resource pooling. This can be very beneficial during testing and
# debugging because it decouples the using code from the underlying
# executor implementation. In production this executor will likely
# lead to suboptimal performance.
#
# @note Intended for use primarily in testing and debugging.
class SimpleExecutorService < RubyExecutorService
# @!macro executor_service_method_post
def self.post(*args)
raise ArgumentError.new('no block given') unless block_given?
Thread.new(*args) do
Thread.current.abort_on_exception = false
yield(*args)
end
true
end
# @!macro executor_service_method_left_shift
def self.<<(task)
post(&task)
self
end
# @!macro executor_service_method_post
def post(*args, &task)
raise ArgumentError.new('no block given') unless block_given?
return false unless running?
@count.increment
Thread.new(*args) do
Thread.current.abort_on_exception = false
begin
yield(*args)
ensure
@count.decrement
@stopped.set if @running.false? && @count.value == 0
end
end
end
# @!macro executor_service_method_left_shift
def <<(task)
post(&task)
self
end
# @!macro executor_service_method_running_question
def running?
@running.true?
end
# @!macro executor_service_method_shuttingdown_question
def shuttingdown?
@running.false? && ! @stopped.set?
end
# @!macro executor_service_method_shutdown_question
def shutdown?
@stopped.set?
end
# @!macro executor_service_method_shutdown
def shutdown
@running.make_false
@stopped.set if @count.value == 0
true
end
# @!macro executor_service_method_kill
def kill
@running.make_false
@stopped.set
true
end
# @!macro executor_service_method_wait_for_termination
def wait_for_termination(timeout = nil)
@stopped.wait(timeout)
end
private
def ns_initialize(*args)
@running = Concurrent::AtomicBoolean.new(true)
@stopped = Concurrent::Event.new
@count = Concurrent::AtomicFixnum.new(0)
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb | require 'concurrent/utility/engine'
require 'concurrent/executor/ruby_thread_pool_executor'
module Concurrent
if Concurrent.on_jruby?
require 'concurrent/executor/java_thread_pool_executor'
end
ThreadPoolExecutorImplementation = case
when Concurrent.on_jruby?
JavaThreadPoolExecutor
else
RubyThreadPoolExecutor
end
private_constant :ThreadPoolExecutorImplementation
# @!macro thread_pool_executor
#
# An abstraction composed of one or more threads and a task queue. Tasks
# (blocks or `proc` objects) are submitted to the pool and added to the queue.
# The threads in the pool remove the tasks and execute them in the order
# they were received.
#
# A `ThreadPoolExecutor` will automatically adjust the pool size according
# to the bounds set by `min-threads` and `max-threads`. When a new task is
# submitted and fewer than `min-threads` threads are running, a new thread
# is created to handle the request, even if other worker threads are idle.
# If there are more than `min-threads` but less than `max-threads` threads
# running, a new thread will be created only if the queue is full.
#
# Threads that are idle for too long will be garbage collected, down to the
# configured minimum options. Should a thread crash it, too, will be garbage collected.
#
# `ThreadPoolExecutor` is based on the Java class of the same name. From
# the official Java documentation;
#
# > Thread pools address two different problems: they usually provide
# > improved performance when executing large numbers of asynchronous tasks,
# > due to reduced per-task invocation overhead, and they provide a means
# > of bounding and managing the resources, including threads, consumed
# > when executing a collection of tasks. Each ThreadPoolExecutor also
# > maintains some basic statistics, such as the number of completed tasks.
# >
# > To be useful across a wide range of contexts, this class provides many
# > adjustable parameters and extensibility hooks. However, programmers are
# > urged to use the more convenient Executors factory methods
# > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation),
# > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single
# > background thread), that preconfigure settings for the most common usage
# > scenarios.
#
# @!macro thread_pool_options
#
# @!macro thread_pool_executor_public_api
class ThreadPoolExecutor < ThreadPoolExecutorImplementation
# @!macro thread_pool_executor_method_initialize
#
# Create a new thread pool.
#
# @param [Hash] opts the options which configure the thread pool.
#
# @option opts [Integer] :max_threads (DEFAULT_MAX_POOL_SIZE) the maximum
# number of threads to be created
# @option opts [Integer] :min_threads (DEFAULT_MIN_POOL_SIZE) When a new task is submitted
# and fewer than `min_threads` are running, a new thread is created
# @option opts [Integer] :idletime (DEFAULT_THREAD_IDLETIMEOUT) the maximum
# number of seconds a thread may be idle before being reclaimed
# @option opts [Integer] :max_queue (DEFAULT_MAX_QUEUE_SIZE) the maximum
# number of tasks allowed in the work queue at any one time; a value of
# zero means the queue may grow without bound
# @option opts [Symbol] :fallback_policy (:abort) the policy for handling new
# tasks that are received when the queue size has reached
# `max_queue` or the executor has shut down
# @option opts [Boolean] :synchronous (DEFAULT_SYNCHRONOUS) whether or not a value of 0
# for :max_queue means the queue must perform direct hand-off rather than unbounded.
# @raise [ArgumentError] if `:max_threads` is less than one
# @raise [ArgumentError] if `:min_threads` is less than zero
# @raise [ArgumentError] if `:fallback_policy` is not one of the values specified
# in `FALLBACK_POLICIES`
#
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html
# @!method initialize(opts = {})
# @!macro thread_pool_executor_method_initialize
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb | require 'concurrent/utility/engine'
require 'concurrent/executor/ruby_single_thread_executor'
module Concurrent
if Concurrent.on_jruby?
require 'concurrent/executor/java_single_thread_executor'
end
SingleThreadExecutorImplementation = case
when Concurrent.on_jruby?
JavaSingleThreadExecutor
else
RubySingleThreadExecutor
end
private_constant :SingleThreadExecutorImplementation
# @!macro single_thread_executor
#
# A thread pool with a single thread an unlimited queue. Should the thread
# die for any reason it will be removed and replaced, thus ensuring that
# the executor will always remain viable and available to process jobs.
#
# A common pattern for background processing is to create a single thread
# on which an infinite loop is run. The thread's loop blocks on an input
# source (perhaps blocking I/O or a queue) and processes each input as it
# is received. This pattern has several issues. The thread itself is highly
# susceptible to errors during processing. Also, the thread itself must be
# constantly monitored and restarted should it die. `SingleThreadExecutor`
# encapsulates all these behaviors. The task processor is highly resilient
# to errors from within tasks. Also, should the thread die it will
# automatically be restarted.
#
# The API and behavior of this class are based on Java's `SingleThreadExecutor`.
#
# @!macro abstract_executor_service_public_api
class SingleThreadExecutor < SingleThreadExecutorImplementation
# @!macro single_thread_executor_method_initialize
#
# Create a new thread pool.
#
# @option opts [Symbol] :fallback_policy (:discard) the policy for handling new
# tasks that are received when the queue size has reached
# `max_queue` or the executor has shut down
#
# @raise [ArgumentError] if `:fallback_policy` is not one of the values specified
# in `FALLBACK_POLICIES`
#
# @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html
# @!method initialize(opts = {})
# @!macro single_thread_executor_method_initialize
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb | if Concurrent.on_jruby?
require 'concurrent/executor/java_executor_service'
require 'concurrent/executor/serial_executor_service'
module Concurrent
# @!macro single_thread_executor
# @!macro abstract_executor_service_public_api
# @!visibility private
class JavaSingleThreadExecutor < JavaExecutorService
include SerialExecutorService
# @!macro single_thread_executor_method_initialize
def initialize(opts = {})
super(opts)
end
private
def ns_initialize(opts)
@executor = java.util.concurrent.Executors.newSingleThreadExecutor(
DaemonThreadFactory.new(ns_auto_terminate?)
)
@fallback_policy = opts.fetch(:fallback_policy, :discard)
raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.keys.include?(@fallback_policy)
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb | if Concurrent.on_jruby?
require 'concurrent/executor/java_executor_service'
module Concurrent
# @!macro thread_pool_executor
# @!macro thread_pool_options
# @!visibility private
class JavaThreadPoolExecutor < JavaExecutorService
include Concern::Deprecation
# @!macro thread_pool_executor_constant_default_max_pool_size
DEFAULT_MAX_POOL_SIZE = java.lang.Integer::MAX_VALUE # 2147483647
# @!macro thread_pool_executor_constant_default_min_pool_size
DEFAULT_MIN_POOL_SIZE = 0
# @!macro thread_pool_executor_constant_default_max_queue_size
DEFAULT_MAX_QUEUE_SIZE = 0
# @!macro thread_pool_executor_constant_default_thread_timeout
DEFAULT_THREAD_IDLETIMEOUT = 60
# @!macro thread_pool_executor_constant_default_synchronous
DEFAULT_SYNCHRONOUS = false
# @!macro thread_pool_executor_attr_reader_max_length
attr_reader :max_length
# @!macro thread_pool_executor_attr_reader_max_queue
attr_reader :max_queue
# @!macro thread_pool_executor_attr_reader_synchronous
attr_reader :synchronous
# @!macro thread_pool_executor_method_initialize
def initialize(opts = {})
super(opts)
end
# @!macro executor_service_method_can_overflow_question
def can_overflow?
@max_queue != 0
end
# @!macro thread_pool_executor_attr_reader_min_length
def min_length
@executor.getCorePoolSize
end
# @!macro thread_pool_executor_attr_reader_max_length
def max_length
@executor.getMaximumPoolSize
end
# @!macro thread_pool_executor_attr_reader_length
def length
@executor.getPoolSize
end
# @!macro thread_pool_executor_attr_reader_largest_length
def largest_length
@executor.getLargestPoolSize
end
# @!macro thread_pool_executor_attr_reader_scheduled_task_count
def scheduled_task_count
@executor.getTaskCount
end
# @!macro thread_pool_executor_attr_reader_completed_task_count
def completed_task_count
@executor.getCompletedTaskCount
end
# @!macro thread_pool_executor_method_active_count
def active_count
@executor.getActiveCount
end
# @!macro thread_pool_executor_attr_reader_idletime
def idletime
@executor.getKeepAliveTime(java.util.concurrent.TimeUnit::SECONDS)
end
# @!macro thread_pool_executor_attr_reader_queue_length
def queue_length
@executor.getQueue.size
end
# @!macro thread_pool_executor_attr_reader_remaining_capacity
def remaining_capacity
@max_queue == 0 ? -1 : @executor.getQueue.remainingCapacity
end
# @!macro executor_service_method_running_question
def running?
super && !@executor.isTerminating
end
# @!macro thread_pool_executor_method_prune_pool
def prune_pool
deprecated "#prune_pool has no effect and will be removed in the next release."
end
private
def ns_initialize(opts)
min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
@max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
@synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS)
@fallback_policy = opts.fetch(:fallback_policy, :abort)
raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0
raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if max_length < DEFAULT_MIN_POOL_SIZE
raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if max_length > DEFAULT_MAX_POOL_SIZE
raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if min_length < DEFAULT_MIN_POOL_SIZE
raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length
raise ArgumentError.new("#{fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.include?(@fallback_policy)
if @max_queue == 0
if @synchronous
queue = java.util.concurrent.SynchronousQueue.new
else
queue = java.util.concurrent.LinkedBlockingQueue.new
end
else
queue = java.util.concurrent.LinkedBlockingQueue.new(@max_queue)
end
@executor = java.util.concurrent.ThreadPoolExecutor.new(
min_length,
max_length,
idletime,
java.util.concurrent.TimeUnit::SECONDS,
queue,
DaemonThreadFactory.new(ns_auto_terminate?),
FALLBACK_POLICY_CLASSES[@fallback_policy].new)
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb | require 'concurrent/executor/abstract_executor_service'
require 'concurrent/atomic/event'
module Concurrent
# @!macro abstract_executor_service_public_api
# @!visibility private
class RubyExecutorService < AbstractExecutorService
safe_initialization!
def initialize(*args, &block)
super
@StopEvent = Event.new
@StoppedEvent = Event.new
end
def post(*args, &task)
raise ArgumentError.new('no block given') unless block_given?
deferred_action = synchronize {
if running?
ns_execute(*args, &task)
else
fallback_action(*args, &task)
end
}
if deferred_action
deferred_action.call
else
true
end
end
def shutdown
synchronize do
break unless running?
stop_event.set
ns_shutdown_execution
end
true
end
def kill
synchronize do
break if shutdown?
stop_event.set
ns_kill_execution
stopped_event.set
end
true
end
def wait_for_termination(timeout = nil)
stopped_event.wait(timeout)
end
private
def stop_event
@StopEvent
end
def stopped_event
@StoppedEvent
end
def ns_shutdown_execution
stopped_event.set
end
def ns_running?
!stop_event.set?
end
def ns_shuttingdown?
!(ns_running? || ns_shutdown?)
end
def ns_shutdown?
stopped_event.set?
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb | require 'concurrent/utility/engine'
require 'concurrent/executor/thread_pool_executor'
module Concurrent
# A thread pool that dynamically grows and shrinks to fit the current workload.
# New threads are created as needed, existing threads are reused, and threads
# that remain idle for too long are killed and removed from the pool. These
# pools are particularly suited to applications that perform a high volume of
# short-lived tasks.
#
# On creation a `CachedThreadPool` has zero running threads. New threads are
# created on the pool as new operations are `#post`. The size of the pool
# will grow until `#max_length` threads are in the pool or until the number
# of threads exceeds the number of running and pending operations. When a new
# operation is post to the pool the first available idle thread will be tasked
# with the new operation.
#
# Should a thread crash for any reason the thread will immediately be removed
# from the pool. Similarly, threads which remain idle for an extended period
# of time will be killed and reclaimed. Thus these thread pools are very
# efficient at reclaiming unused resources.
#
# The API and behavior of this class are based on Java's `CachedThreadPool`
#
# @!macro thread_pool_options
class CachedThreadPool < ThreadPoolExecutor
# @!macro cached_thread_pool_method_initialize
#
# Create a new thread pool.
#
# @param [Hash] opts the options defining pool behavior.
# @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy
#
# @raise [ArgumentError] if `fallback_policy` is not a known policy
#
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newCachedThreadPool--
def initialize(opts = {})
defaults = { idletime: DEFAULT_THREAD_IDLETIMEOUT }
overrides = { min_threads: 0,
max_threads: DEFAULT_MAX_POOL_SIZE,
max_queue: DEFAULT_MAX_QUEUE_SIZE }
super(defaults.merge(opts).merge(overrides))
end
private
# @!macro cached_thread_pool_method_initialize
# @!visibility private
def ns_initialize(opts)
super(opts)
if Concurrent.on_jruby?
@max_queue = 0
@executor = java.util.concurrent.Executors.newCachedThreadPool(
DaemonThreadFactory.new(ns_auto_terminate?))
@executor.setRejectedExecutionHandler(FALLBACK_POLICY_CLASSES[@fallback_policy].new)
@executor.setKeepAliveTime(opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT), java.util.concurrent.TimeUnit::SECONDS)
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb | require 'delegate'
require 'concurrent/executor/serial_executor_service'
require 'concurrent/executor/serialized_execution'
module Concurrent
# A wrapper/delegator for any `ExecutorService` that
# guarantees serialized execution of tasks.
#
# @see [SimpleDelegator](http://www.ruby-doc.org/stdlib-2.1.2/libdoc/delegate/rdoc/SimpleDelegator.html)
# @see Concurrent::SerializedExecution
class SerializedExecutionDelegator < SimpleDelegator
include SerialExecutorService
def initialize(executor)
@executor = executor
@serializer = SerializedExecution.new
super(executor)
end
# @!macro executor_service_method_post
def post(*args, &task)
raise ArgumentError.new('no block given') unless block_given?
return false unless running?
@serializer.post(@executor, *args, &task)
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb | require 'concurrent/atomic/event'
require 'concurrent/executor/abstract_executor_service'
require 'concurrent/executor/serial_executor_service'
module Concurrent
# An executor service which runs all operations on the current thread,
# blocking as necessary. Operations are performed in the order they are
# received and no two operations can be performed simultaneously.
#
# This executor service exists mainly for testing an debugging. When used
# it immediately runs every `#post` operation on the current thread, blocking
# that thread until the operation is complete. This can be very beneficial
# during testing because it makes all operations deterministic.
#
# @note Intended for use primarily in testing and debugging.
class ImmediateExecutor < AbstractExecutorService
include SerialExecutorService
# Creates a new executor
def initialize
@stopped = Concurrent::Event.new
end
# @!macro executor_service_method_post
def post(*args, &task)
raise ArgumentError.new('no block given') unless block_given?
return false unless running?
task.call(*args)
true
end
# @!macro executor_service_method_left_shift
def <<(task)
post(&task)
self
end
# @!macro executor_service_method_running_question
def running?
! shutdown?
end
# @!macro executor_service_method_shuttingdown_question
def shuttingdown?
false
end
# @!macro executor_service_method_shutdown_question
def shutdown?
@stopped.set?
end
# @!macro executor_service_method_shutdown
def shutdown
@stopped.set
true
end
alias_method :kill, :shutdown
# @!macro executor_service_method_wait_for_termination
def wait_for_termination(timeout = nil)
@stopped.wait(timeout)
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb | require 'concurrent/utility/engine'
if Concurrent.on_jruby?
require 'concurrent/errors'
require 'concurrent/executor/abstract_executor_service'
module Concurrent
# @!macro abstract_executor_service_public_api
# @!visibility private
class JavaExecutorService < AbstractExecutorService
java_import 'java.lang.Runnable'
FALLBACK_POLICY_CLASSES = {
abort: java.util.concurrent.ThreadPoolExecutor::AbortPolicy,
discard: java.util.concurrent.ThreadPoolExecutor::DiscardPolicy,
caller_runs: java.util.concurrent.ThreadPoolExecutor::CallerRunsPolicy
}.freeze
private_constant :FALLBACK_POLICY_CLASSES
def post(*args, &task)
raise ArgumentError.new('no block given') unless block_given?
return fallback_action(*args, &task).call unless running?
@executor.submit Job.new(args, task)
true
rescue Java::JavaUtilConcurrent::RejectedExecutionException
raise RejectedExecutionError
end
def wait_for_termination(timeout = nil)
if timeout.nil?
ok = @executor.awaitTermination(60, java.util.concurrent.TimeUnit::SECONDS) until ok
true
else
@executor.awaitTermination(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS)
end
end
def shutdown
synchronize do
@executor.shutdown
nil
end
end
def kill
synchronize do
@executor.shutdownNow
wait_for_termination
nil
end
end
private
def ns_running?
!(ns_shuttingdown? || ns_shutdown?)
end
def ns_shuttingdown?
@executor.isShutdown && !@executor.isTerminated
end
def ns_shutdown?
@executor.isTerminated
end
class Job
include Runnable
def initialize(args, block)
@args = args
@block = block
end
def run
@block.call(*@args)
end
end
private_constant :Job
end
class DaemonThreadFactory
# hide include from YARD
send :include, java.util.concurrent.ThreadFactory
def initialize(daemonize = true)
@daemonize = daemonize
@java_thread_factory = java.util.concurrent.Executors.defaultThreadFactory
end
def newThread(runnable)
thread = @java_thread_factory.newThread(runnable)
thread.setDaemon(@daemonize)
return thread
end
end
private_constant :DaemonThreadFactory
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb | require 'concurrent/synchronization/lockable_object'
module Concurrent
# A simple utility class that executes a callable and returns and array of three elements:
# success - indicating if the callable has been executed without errors
# value - filled by the callable result if it has been executed without errors, nil otherwise
# reason - the error risen by the callable if it has been executed with errors, nil otherwise
class SafeTaskExecutor < Synchronization::LockableObject
def initialize(task, opts = {})
@task = task
@exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError
super() # ensures visibility
end
# @return [Array]
def execute(*args)
success = true
value = reason = nil
synchronize do
begin
value = @task.call(*args)
success = true
rescue @exception_class => ex
reason = ex
success = false
end
end
[success, value, reason]
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb | require 'concurrent/executor/ruby_thread_pool_executor'
require 'concurrent/executor/serial_executor_service'
module Concurrent
# @!macro single_thread_executor
# @!macro abstract_executor_service_public_api
# @!visibility private
class RubySingleThreadExecutor < RubyThreadPoolExecutor
include SerialExecutorService
# @!macro single_thread_executor_method_initialize
def initialize(opts = {})
super(
min_threads: 1,
max_threads: 1,
max_queue: 0,
idletime: DEFAULT_THREAD_IDLETIMEOUT,
fallback_policy: opts.fetch(:fallback_policy, :discard),
)
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb | require 'concurrent/errors'
require 'concurrent/concern/logging'
require 'concurrent/synchronization/lockable_object'
module Concurrent
# Ensures passed jobs in a serialized order never running at the same time.
class SerializedExecution < Synchronization::LockableObject
include Concern::Logging
def initialize()
super()
synchronize { ns_initialize }
end
Job = Struct.new(:executor, :args, :block) do
def call
block.call(*args)
end
end
# Submit a task to the executor for asynchronous processing.
#
# @param [Executor] executor to be used for this job
#
# @param [Array] args zero or more arguments to be passed to the task
#
# @yield the asynchronous task to perform
#
# @return [Boolean] `true` if the task is queued, `false` if the executor
# is not running
#
# @raise [ArgumentError] if no task is given
def post(executor, *args, &task)
posts [[executor, args, task]]
true
end
# As {#post} but allows to submit multiple tasks at once, it's guaranteed that they will not
# be interleaved by other tasks.
#
# @param [Array<Array(ExecutorService, Array<Object>, Proc)>] posts array of triplets where
# first is a {ExecutorService}, second is array of args for task, third is a task (Proc)
def posts(posts)
# if can_overflow?
# raise ArgumentError, 'SerializedExecution does not support thread-pools which can overflow'
# end
return nil if posts.empty?
jobs = posts.map { |executor, args, task| Job.new executor, args, task }
job_to_post = synchronize do
if @being_executed
@stash.push(*jobs)
nil
else
@being_executed = true
@stash.push(*jobs[1..-1])
jobs.first
end
end
call_job job_to_post if job_to_post
true
end
private
def ns_initialize
@being_executed = false
@stash = []
end
def call_job(job)
did_it_run = begin
job.executor.post { work(job) }
true
rescue RejectedExecutionError => ex
false
end
# TODO not the best idea to run it myself
unless did_it_run
begin
work job
rescue => ex
# let it fail
log DEBUG, ex
end
end
end
# ensures next job is executed if any is stashed
def work(job)
job.call
ensure
synchronize do
job = @stash.shift || (@being_executed = false)
end
# TODO maybe be able to tell caching pool to just enqueue this job, because the current one end at the end
# of this block
call_job job if job
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb | require 'concurrent/executor/executor_service'
module Concurrent
# Indicates that the including `ExecutorService` guarantees
# that all operations will occur in the order they are post and that no
# two operations may occur simultaneously. This module provides no
# functionality and provides no guarantees. That is the responsibility
# of the including class. This module exists solely to allow the including
# object to be interrogated for its serialization status.
#
# @example
# class Foo
# include Concurrent::SerialExecutor
# end
#
# foo = Foo.new
#
# foo.is_a? Concurrent::ExecutorService #=> true
# foo.is_a? Concurrent::SerialExecutor #=> true
# foo.serialized? #=> true
#
# @!visibility private
module SerialExecutorService
include ExecutorService
# @!macro executor_service_method_serialized_question
#
# @note Always returns `true`
def serialized?
true
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/executor_service.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/executor_service.rb | require 'concurrent/concern/logging'
module Concurrent
###################################################################
# @!macro executor_service_method_post
#
# Submit a task to the executor for asynchronous processing.
#
# @param [Array] args zero or more arguments to be passed to the task
#
# @yield the asynchronous task to perform
#
# @return [Boolean] `true` if the task is queued, `false` if the executor
# is not running
#
# @raise [ArgumentError] if no task is given
# @!macro executor_service_method_left_shift
#
# Submit a task to the executor for asynchronous processing.
#
# @param [Proc] task the asynchronous task to perform
#
# @return [self] returns itself
# @!macro executor_service_method_can_overflow_question
#
# Does the task queue have a maximum size?
#
# @return [Boolean] True if the task queue has a maximum size else false.
# @!macro executor_service_method_serialized_question
#
# Does this executor guarantee serialization of its operations?
#
# @return [Boolean] True if the executor guarantees that all operations
# will be post in the order they are received and no two operations may
# occur simultaneously. Else false.
###################################################################
# @!macro executor_service_public_api
#
# @!method post(*args, &task)
# @!macro executor_service_method_post
#
# @!method <<(task)
# @!macro executor_service_method_left_shift
#
# @!method can_overflow?
# @!macro executor_service_method_can_overflow_question
#
# @!method serialized?
# @!macro executor_service_method_serialized_question
###################################################################
# @!macro executor_service_attr_reader_fallback_policy
# @return [Symbol] The fallback policy in effect. Either `:abort`, `:discard`, or `:caller_runs`.
# @!macro executor_service_method_shutdown
#
# Begin an orderly shutdown. Tasks already in the queue will be executed,
# but no new tasks will be accepted. Has no additional effect if the
# thread pool is not running.
# @!macro executor_service_method_kill
#
# Begin an immediate shutdown. In-progress tasks will be allowed to
# complete but enqueued tasks will be dismissed and no new tasks
# will be accepted. Has no additional effect if the thread pool is
# not running.
# @!macro executor_service_method_wait_for_termination
#
# Block until executor shutdown is complete or until `timeout` seconds have
# passed.
#
# @note Does not initiate shutdown or termination. Either `shutdown` or `kill`
# must be called before this method (or on another thread).
#
# @param [Integer] timeout the maximum number of seconds to wait for shutdown to complete
#
# @return [Boolean] `true` if shutdown complete or false on `timeout`
# @!macro executor_service_method_running_question
#
# Is the executor running?
#
# @return [Boolean] `true` when running, `false` when shutting down or shutdown
# @!macro executor_service_method_shuttingdown_question
#
# Is the executor shuttingdown?
#
# @return [Boolean] `true` when not running and not shutdown, else `false`
# @!macro executor_service_method_shutdown_question
#
# Is the executor shutdown?
#
# @return [Boolean] `true` when shutdown, `false` when shutting down or running
# @!macro executor_service_method_auto_terminate_question
#
# Is the executor auto-terminate when the application exits?
#
# @return [Boolean] `true` when auto-termination is enabled else `false`.
# @!macro executor_service_method_auto_terminate_setter
#
#
# Set the auto-terminate behavior for this executor.
# @deprecated Has no effect
# @param [Boolean] value The new auto-terminate value to set for this executor.
# @return [Boolean] `true` when auto-termination is enabled else `false`.
###################################################################
# @!macro abstract_executor_service_public_api
#
# @!macro executor_service_public_api
#
# @!attribute [r] fallback_policy
# @!macro executor_service_attr_reader_fallback_policy
#
# @!method shutdown
# @!macro executor_service_method_shutdown
#
# @!method kill
# @!macro executor_service_method_kill
#
# @!method wait_for_termination(timeout = nil)
# @!macro executor_service_method_wait_for_termination
#
# @!method running?
# @!macro executor_service_method_running_question
#
# @!method shuttingdown?
# @!macro executor_service_method_shuttingdown_question
#
# @!method shutdown?
# @!macro executor_service_method_shutdown_question
#
# @!method auto_terminate?
# @!macro executor_service_method_auto_terminate_question
#
# @!method auto_terminate=(value)
# @!macro executor_service_method_auto_terminate_setter
###################################################################
# @!macro executor_service_public_api
# @!visibility private
module ExecutorService
include Concern::Logging
# @!macro executor_service_method_post
def post(*args, &task)
raise NotImplementedError
end
# @!macro executor_service_method_left_shift
def <<(task)
post(&task)
self
end
# @!macro executor_service_method_can_overflow_question
#
# @note Always returns `false`
def can_overflow?
false
end
# @!macro executor_service_method_serialized_question
#
# @note Always returns `false`
def serialized?
false
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb | require 'concurrent/errors'
require 'concurrent/concern/deprecation'
require 'concurrent/executor/executor_service'
require 'concurrent/synchronization/lockable_object'
module Concurrent
# @!macro abstract_executor_service_public_api
# @!visibility private
class AbstractExecutorService < Synchronization::LockableObject
include ExecutorService
include Concern::Deprecation
# The set of possible fallback policies that may be set at thread pool creation.
FALLBACK_POLICIES = [:abort, :discard, :caller_runs].freeze
# @!macro executor_service_attr_reader_fallback_policy
attr_reader :fallback_policy
attr_reader :name
# Create a new thread pool.
def initialize(opts = {}, &block)
super(&nil)
synchronize do
@auto_terminate = opts.fetch(:auto_terminate, true)
@name = opts.fetch(:name) if opts.key?(:name)
ns_initialize(opts, &block)
end
end
def to_s
name ? "#{super[0..-2]} name: #{name}>" : super
end
# @!macro executor_service_method_shutdown
def shutdown
raise NotImplementedError
end
# @!macro executor_service_method_kill
def kill
raise NotImplementedError
end
# @!macro executor_service_method_wait_for_termination
def wait_for_termination(timeout = nil)
raise NotImplementedError
end
# @!macro executor_service_method_running_question
def running?
synchronize { ns_running? }
end
# @!macro executor_service_method_shuttingdown_question
def shuttingdown?
synchronize { ns_shuttingdown? }
end
# @!macro executor_service_method_shutdown_question
def shutdown?
synchronize { ns_shutdown? }
end
# @!macro executor_service_method_auto_terminate_question
def auto_terminate?
synchronize { @auto_terminate }
end
# @!macro executor_service_method_auto_terminate_setter
def auto_terminate=(value)
deprecated "Method #auto_terminate= has no effect. Set :auto_terminate option when executor is initialized."
end
private
# Returns an action which executes the `fallback_policy` once the queue
# size reaches `max_queue`. The reason for the indirection of an action
# is so that the work can be deferred outside of synchronization.
#
# @param [Array] args the arguments to the task which is being handled.
#
# @!visibility private
def fallback_action(*args)
case fallback_policy
when :abort
lambda { raise RejectedExecutionError }
when :discard
lambda { false }
when :caller_runs
lambda {
begin
yield(*args)
rescue => ex
# let it fail
log DEBUG, ex
end
true
}
else
lambda { fail "Unknown fallback policy #{fallback_policy}" }
end
end
def ns_execute(*args, &task)
raise NotImplementedError
end
# @!macro executor_service_method_ns_shutdown_execution
#
# Callback method called when an orderly shutdown has completed.
# The default behavior is to signal all waiting threads.
def ns_shutdown_execution
# do nothing
end
# @!macro executor_service_method_ns_kill_execution
#
# Callback method called when the executor has been killed.
# The default behavior is to do nothing.
def ns_kill_execution
# do nothing
end
def ns_auto_terminate?
@auto_terminate
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb | require 'concurrent/utility/engine'
require 'concurrent/executor/thread_pool_executor'
module Concurrent
# @!macro thread_pool_executor_constant_default_max_pool_size
# Default maximum number of threads that will be created in the pool.
# @!macro thread_pool_executor_constant_default_min_pool_size
# Default minimum number of threads that will be retained in the pool.
# @!macro thread_pool_executor_constant_default_max_queue_size
# Default maximum number of tasks that may be added to the task queue.
# @!macro thread_pool_executor_constant_default_thread_timeout
# Default maximum number of seconds a thread in the pool may remain idle
# before being reclaimed.
# @!macro thread_pool_executor_constant_default_synchronous
# Default value of the :synchronous option.
# @!macro thread_pool_executor_attr_reader_max_length
# The maximum number of threads that may be created in the pool.
# @return [Integer] The maximum number of threads that may be created in the pool.
# @!macro thread_pool_executor_attr_reader_min_length
# The minimum number of threads that may be retained in the pool.
# @return [Integer] The minimum number of threads that may be retained in the pool.
# @!macro thread_pool_executor_attr_reader_largest_length
# The largest number of threads that have been created in the pool since construction.
# @return [Integer] The largest number of threads that have been created in the pool since construction.
# @!macro thread_pool_executor_attr_reader_scheduled_task_count
# The number of tasks that have been scheduled for execution on the pool since construction.
# @return [Integer] The number of tasks that have been scheduled for execution on the pool since construction.
# @!macro thread_pool_executor_attr_reader_completed_task_count
# The number of tasks that have been completed by the pool since construction.
# @return [Integer] The number of tasks that have been completed by the pool since construction.
# @!macro thread_pool_executor_method_active_count
# The number of threads that are actively executing tasks.
# @return [Integer] The number of threads that are actively executing tasks.
# @!macro thread_pool_executor_attr_reader_idletime
# The number of seconds that a thread may be idle before being reclaimed.
# @return [Integer] The number of seconds that a thread may be idle before being reclaimed.
# @!macro thread_pool_executor_attr_reader_synchronous
# Whether or not a value of 0 for :max_queue option means the queue must perform direct hand-off or rather unbounded queue.
# @return [true, false]
# @!macro thread_pool_executor_attr_reader_max_queue
# The maximum number of tasks that may be waiting in the work queue at any one time.
# When the queue size reaches `max_queue` subsequent tasks will be rejected in
# accordance with the configured `fallback_policy`.
#
# @return [Integer] The maximum number of tasks that may be waiting in the work queue at any one time.
# When the queue size reaches `max_queue` subsequent tasks will be rejected in
# accordance with the configured `fallback_policy`.
# @!macro thread_pool_executor_attr_reader_length
# The number of threads currently in the pool.
# @return [Integer] The number of threads currently in the pool.
# @!macro thread_pool_executor_attr_reader_queue_length
# The number of tasks in the queue awaiting execution.
# @return [Integer] The number of tasks in the queue awaiting execution.
# @!macro thread_pool_executor_attr_reader_remaining_capacity
# Number of tasks that may be enqueued before reaching `max_queue` and rejecting
# new tasks. A value of -1 indicates that the queue may grow without bound.
#
# @return [Integer] Number of tasks that may be enqueued before reaching `max_queue` and rejecting
# new tasks. A value of -1 indicates that the queue may grow without bound.
# @!macro thread_pool_executor_method_prune_pool
# Prune the thread pool of unneeded threads
#
# What is being pruned is controlled by the min_threads and idletime
# parameters passed at pool creation time
#
# This is a no-op on all pool implementations as they prune themselves
# automatically, and has been deprecated.
# @!macro thread_pool_executor_public_api
#
# @!macro abstract_executor_service_public_api
#
# @!attribute [r] max_length
# @!macro thread_pool_executor_attr_reader_max_length
#
# @!attribute [r] min_length
# @!macro thread_pool_executor_attr_reader_min_length
#
# @!attribute [r] largest_length
# @!macro thread_pool_executor_attr_reader_largest_length
#
# @!attribute [r] scheduled_task_count
# @!macro thread_pool_executor_attr_reader_scheduled_task_count
#
# @!attribute [r] completed_task_count
# @!macro thread_pool_executor_attr_reader_completed_task_count
#
# @!attribute [r] idletime
# @!macro thread_pool_executor_attr_reader_idletime
#
# @!attribute [r] max_queue
# @!macro thread_pool_executor_attr_reader_max_queue
#
# @!attribute [r] length
# @!macro thread_pool_executor_attr_reader_length
#
# @!attribute [r] queue_length
# @!macro thread_pool_executor_attr_reader_queue_length
#
# @!attribute [r] remaining_capacity
# @!macro thread_pool_executor_attr_reader_remaining_capacity
#
# @!method can_overflow?
# @!macro executor_service_method_can_overflow_question
#
# @!method prune_pool
# @!macro thread_pool_executor_method_prune_pool
# @!macro thread_pool_options
#
# **Thread Pool Options**
#
# Thread pools support several configuration options:
#
# * `idletime`: The number of seconds that a thread may be idle before being reclaimed.
# * `name`: The name of the executor (optional). Printed in the executor's `#to_s` output and
# a `<name>-worker-<id>` name is given to its threads if supported by used Ruby
# implementation. `<id>` is uniq for each thread.
# * `max_queue`: The maximum number of tasks that may be waiting in the work queue at
# any one time. When the queue size reaches `max_queue` and no new threads can be created,
# subsequent tasks will be rejected in accordance with the configured `fallback_policy`.
# * `auto_terminate`: When true (default), the threads started will be marked as daemon.
# * `fallback_policy`: The policy defining how rejected tasks are handled.
#
# Three fallback policies are supported:
#
# * `:abort`: Raise a `RejectedExecutionError` exception and discard the task.
# * `:discard`: Discard the task and return false.
# * `:caller_runs`: Execute the task on the calling thread.
#
# **Shutting Down Thread Pools**
#
# Killing a thread pool while tasks are still being processed, either by calling
# the `#kill` method or at application exit, will have unpredictable results. There
# is no way for the thread pool to know what resources are being used by the
# in-progress tasks. When those tasks are killed the impact on those resources
# cannot be predicted. The *best* practice is to explicitly shutdown all thread
# pools using the provided methods:
#
# * Call `#shutdown` to initiate an orderly termination of all in-progress tasks
# * Call `#wait_for_termination` with an appropriate timeout interval an allow
# the orderly shutdown to complete
# * Call `#kill` *only when* the thread pool fails to shutdown in the allotted time
#
# On some runtime platforms (most notably the JVM) the application will not
# exit until all thread pools have been shutdown. To prevent applications from
# "hanging" on exit, all threads can be marked as daemon according to the
# `:auto_terminate` option.
#
# ```ruby
# pool1 = Concurrent::FixedThreadPool.new(5) # threads will be marked as daemon
# pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # mark threads as non-daemon
# ```
#
# @note Failure to properly shutdown a thread pool can lead to unpredictable results.
# Please read *Shutting Down Thread Pools* for more information.
#
# @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html Java Tutorials: Thread Pools
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface
# @see https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html#setDaemon-boolean-
# @!macro fixed_thread_pool
#
# A thread pool that reuses a fixed number of threads operating off an unbounded queue.
# At any point, at most `num_threads` will be active processing tasks. When all threads are busy new
# tasks `#post` to the thread pool are enqueued until a thread becomes available.
# Should a thread crash for any reason the thread will immediately be removed
# from the pool and replaced.
#
# The API and behavior of this class are based on Java's `FixedThreadPool`
#
# @!macro thread_pool_options
class FixedThreadPool < ThreadPoolExecutor
# @!macro fixed_thread_pool_method_initialize
#
# Create a new thread pool.
#
# @param [Integer] num_threads the number of threads to allocate
# @param [Hash] opts the options defining pool behavior.
# @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy
#
# @raise [ArgumentError] if `num_threads` is less than or equal to zero
# @raise [ArgumentError] if `fallback_policy` is not a known policy
#
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newFixedThreadPool-int-
def initialize(num_threads, opts = {})
raise ArgumentError.new('number of threads must be greater than zero') if num_threads.to_i < 1
defaults = { max_queue: DEFAULT_MAX_QUEUE_SIZE,
idletime: DEFAULT_THREAD_IDLETIMEOUT }
overrides = { min_threads: num_threads,
max_threads: num_threads }
super(defaults.merge(opts).merge(overrides))
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/timer_set.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/timer_set.rb | require 'concurrent/scheduled_task'
require 'concurrent/atomic/event'
require 'concurrent/collection/non_concurrent_priority_queue'
require 'concurrent/executor/executor_service'
require 'concurrent/executor/single_thread_executor'
require 'concurrent/errors'
require 'concurrent/options'
module Concurrent
# Executes a collection of tasks, each after a given delay. A master task
# monitors the set and schedules each task for execution at the appropriate
# time. Tasks are run on the global thread pool or on the supplied executor.
# Each task is represented as a `ScheduledTask`.
#
# @see Concurrent::ScheduledTask
#
# @!macro monotonic_clock_warning
class TimerSet < RubyExecutorService
# Create a new set of timed tasks.
#
# @!macro executor_options
#
# @param [Hash] opts the options used to specify the executor on which to perform actions
# @option opts [Executor] :executor when set use the given `Executor` instance.
# Three special values are also supported: `:task` returns the global task pool,
# `:operation` returns the global operation pool, and `:immediate` returns a new
# `ImmediateExecutor` object.
def initialize(opts = {})
super(opts)
end
# Post a task to be execute run after a given delay (in seconds). If the
# delay is less than 1/100th of a second the task will be immediately post
# to the executor.
#
# @param [Float] delay the number of seconds to wait for before executing the task.
# @param [Array<Object>] args the arguments passed to the task on execution.
#
# @yield the task to be performed.
#
# @return [Concurrent::ScheduledTask, false] IVar representing the task if the post
# is successful; false after shutdown.
#
# @raise [ArgumentError] if the intended execution time is not in the future.
# @raise [ArgumentError] if no block is given.
def post(delay, *args, &task)
raise ArgumentError.new('no block given') unless block_given?
return false unless running?
opts = { executor: @task_executor,
args: args,
timer_set: self }
task = ScheduledTask.execute(delay, opts, &task) # may raise exception
task.unscheduled? ? false : task
end
# Begin an immediate shutdown. In-progress tasks will be allowed to
# complete but enqueued tasks will be dismissed and no new tasks
# will be accepted. Has no additional effect if the thread pool is
# not running.
def kill
shutdown
@timer_executor.kill
end
private :<<
private
# Initialize the object.
#
# @param [Hash] opts the options to create the object with.
# @!visibility private
def ns_initialize(opts)
@queue = Collection::NonConcurrentPriorityQueue.new(order: :min)
@task_executor = Options.executor_from_options(opts) || Concurrent.global_io_executor
@timer_executor = SingleThreadExecutor.new
@condition = Event.new
@ruby_pid = $$ # detects if Ruby has forked
end
# Post the task to the internal queue.
#
# @note This is intended as a callback method from ScheduledTask
# only. It is not intended to be used directly. Post a task
# by using the `SchedulesTask#execute` method.
#
# @!visibility private
def post_task(task)
synchronize { ns_post_task(task) }
end
# @!visibility private
def ns_post_task(task)
return false unless ns_running?
ns_reset_if_forked
if (task.initial_delay) <= 0.01
task.executor.post { task.process_task }
else
@queue.push(task)
# only post the process method when the queue is empty
@timer_executor.post(&method(:process_tasks)) if @queue.length == 1
@condition.set
end
true
end
# Remove the given task from the queue.
#
# @note This is intended as a callback method from `ScheduledTask`
# only. It is not intended to be used directly. Cancel a task
# by using the `ScheduledTask#cancel` method.
#
# @!visibility private
def remove_task(task)
synchronize { @queue.delete(task) }
end
# `ExecutorService` callback called during shutdown.
#
# @!visibility private
def ns_shutdown_execution
ns_reset_if_forked
@queue.clear
@condition.set
@condition.reset
@timer_executor.shutdown
stopped_event.set
end
def ns_reset_if_forked
if $$ != @ruby_pid
@queue.clear
@condition.reset
@ruby_pid = $$
end
end
# Run a loop and execute tasks in the scheduled order and at the approximate
# scheduled time. If no tasks remain the thread will exit gracefully so that
# garbage collection can occur. If there are no ready tasks it will sleep
# for up to 60 seconds waiting for the next scheduled task.
#
# @!visibility private
def process_tasks
loop do
task = synchronize { @condition.reset; @queue.peek }
break unless task
now = Concurrent.monotonic_time
diff = task.schedule_time - now
if diff <= 0
# We need to remove the task from the queue before passing
# it to the executor, to avoid race conditions where we pass
# the peek'ed task to the executor and then pop a different
# one that's been added in the meantime.
#
# Note that there's no race condition between the peek and
# this pop - this pop could retrieve a different task from
# the peek, but that task would be due to fire now anyway
# (because @queue is a priority queue, and this thread is
# the only reader, so whatever timer is at the head of the
# queue now must have the same pop time, or a closer one, as
# when we peeked).
task = synchronize { @queue.pop }
begin
task.executor.post { task.process_task }
rescue RejectedExecutionError
# ignore and continue
end
else
@condition.wait([diff, 60].min)
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb | require 'concurrent/executor/immediate_executor'
require 'concurrent/executor/simple_executor_service'
module Concurrent
# An executor service which runs all operations on a new thread, blocking
# until it completes. Operations are performed in the order they are received
# and no two operations can be performed simultaneously.
#
# This executor service exists mainly for testing an debugging. When used it
# immediately runs every `#post` operation on a new thread, blocking the
# current thread until the operation is complete. This is similar to how the
# ImmediateExecutor works, but the operation has the full stack of the new
# thread at its disposal. This can be helpful when the operations will spawn
# more operations on the same executor and so on - such a situation might
# overflow the single stack in case of an ImmediateExecutor, which is
# inconsistent with how it would behave for a threaded executor.
#
# @note Intended for use primarily in testing and debugging.
class IndirectImmediateExecutor < ImmediateExecutor
# Creates a new executor
def initialize
super
@internal_executor = SimpleExecutorService.new
end
# @!macro executor_service_method_post
def post(*args, &task)
raise ArgumentError.new("no block given") unless block_given?
return false unless running?
event = Concurrent::Event.new
@internal_executor.post do
begin
task.call(*args)
ensure
event.set
end
end
event.wait
true
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb | module Concurrent
# Special "compare and set" handling of numeric values.
#
# @!visibility private
# @!macro internal_implementation_note
module AtomicNumericCompareAndSetWrapper
# @!macro atomic_reference_method_compare_and_set
def compare_and_set(old_value, new_value)
if old_value.kind_of? Numeric
while true
old = get
return false unless old.kind_of? Numeric
return false unless old == old_value
result = _compare_and_set(old, new_value)
return result if result
end
else
_compare_and_set(old_value, new_value)
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb | require 'concurrent/atomic_reference/atomic_direct_update'
require 'concurrent/atomic_reference/numeric_cas_wrapper'
require 'concurrent/synchronization/safe_initialization'
module Concurrent
# @!visibility private
# @!macro internal_implementation_note
class MutexAtomicReference
extend Concurrent::Synchronization::SafeInitialization
include AtomicDirectUpdate
include AtomicNumericCompareAndSetWrapper
alias_method :compare_and_swap, :compare_and_set
# @!macro atomic_reference_method_initialize
def initialize(value = nil)
super()
@Lock = ::Mutex.new
@value = value
end
# @!macro atomic_reference_method_get
def get
synchronize { @value }
end
alias_method :value, :get
# @!macro atomic_reference_method_set
def set(new_value)
synchronize { @value = new_value }
end
alias_method :value=, :set
# @!macro atomic_reference_method_get_and_set
def get_and_set(new_value)
synchronize do
old_value = @value
@value = new_value
old_value
end
end
alias_method :swap, :get_and_set
# @!macro atomic_reference_method_compare_and_set
def _compare_and_set(old_value, new_value)
synchronize do
if @value.equal? old_value
@value = new_value
true
else
false
end
end
end
protected
# @!visibility private
def synchronize
if @Lock.owned?
yield
else
@Lock.synchronize { yield }
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic_reference/atomic_direct_update.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/atomic_reference/atomic_direct_update.rb | require 'concurrent/errors'
module Concurrent
# Define update methods that use direct paths
#
# @!visibility private
# @!macro internal_implementation_note
module AtomicDirectUpdate
def update
true until compare_and_set(old_value = get, new_value = yield(old_value))
new_value
end
def try_update
old_value = get
new_value = yield old_value
return unless compare_and_set old_value, new_value
new_value
end
def try_update!
old_value = get
new_value = yield old_value
unless compare_and_set(old_value, new_value)
if $VERBOSE
raise ConcurrentUpdateError, "Update failed"
else
raise ConcurrentUpdateError, "Update failed", ConcurrentUpdateError::CONC_UP_ERR_BACKTRACE
end
end
new_value
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/concern/deprecation.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/concern/deprecation.rb | require 'concurrent/concern/logging'
module Concurrent
module Concern
# @!visibility private
# @!macro internal_implementation_note
module Deprecation
# TODO require additional parameter: a version. Display when it'll be removed based on that. Error if not removed.
include Concern::Logging
def deprecated(message, strip = 2)
caller_line = caller(strip).first if strip > 0
klass = if Module === self
self
else
self.class
end
message = if strip > 0
format("[DEPRECATED] %s\ncalled on: %s", message, caller_line)
else
format('[DEPRECATED] %s', message)
end
log WARN, klass.to_s, message
end
def deprecated_method(old_name, new_name)
deprecated "`#{old_name}` is deprecated and it'll removed in next release, use `#{new_name}` instead", 3
end
extend self
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/concern/obligation.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/concern/obligation.rb | require 'thread'
require 'timeout'
require 'concurrent/atomic/event'
require 'concurrent/concern/dereferenceable'
module Concurrent
module Concern
module Obligation
include Concern::Dereferenceable
# NOTE: The Dereferenceable module is going away in 2.0. In the mean time
# we need it to place nicely with the synchronization layer. This means
# that the including class SHOULD be synchronized and it MUST implement a
# `#synchronize` method. Not doing so will lead to runtime errors.
# Has the obligation been fulfilled?
#
# @return [Boolean]
def fulfilled?
state == :fulfilled
end
alias_method :realized?, :fulfilled?
# Has the obligation been rejected?
#
# @return [Boolean]
def rejected?
state == :rejected
end
# Is obligation completion still pending?
#
# @return [Boolean]
def pending?
state == :pending
end
# Is the obligation still unscheduled?
#
# @return [Boolean]
def unscheduled?
state == :unscheduled
end
# Has the obligation completed processing?
#
# @return [Boolean]
def complete?
[:fulfilled, :rejected].include? state
end
# Is the obligation still awaiting completion of processing?
#
# @return [Boolean]
def incomplete?
! complete?
end
# The current value of the obligation. Will be `nil` while the state is
# pending or the operation has been rejected.
#
# @param [Numeric] timeout the maximum time in seconds to wait.
# @return [Object] see Dereferenceable#deref
def value(timeout = nil)
wait timeout
deref
end
# Wait until obligation is complete or the timeout has been reached.
#
# @param [Numeric] timeout the maximum time in seconds to wait.
# @return [Obligation] self
def wait(timeout = nil)
event.wait(timeout) if timeout != 0 && incomplete?
self
end
# Wait until obligation is complete or the timeout is reached. Will re-raise
# any exceptions raised during processing (but will not raise an exception
# on timeout).
#
# @param [Numeric] timeout the maximum time in seconds to wait.
# @return [Obligation] self
# @raise [Exception] raises the reason when rejected
def wait!(timeout = nil)
wait(timeout).tap { raise self if rejected? }
end
alias_method :no_error!, :wait!
# The current value of the obligation. Will be `nil` while the state is
# pending or the operation has been rejected. Will re-raise any exceptions
# raised during processing (but will not raise an exception on timeout).
#
# @param [Numeric] timeout the maximum time in seconds to wait.
# @return [Object] see Dereferenceable#deref
# @raise [Exception] raises the reason when rejected
def value!(timeout = nil)
wait(timeout)
if rejected?
raise self
else
deref
end
end
# The current state of the obligation.
#
# @return [Symbol] the current state
def state
synchronize { @state }
end
# If an exception was raised during processing this will return the
# exception object. Will return `nil` when the state is pending or if
# the obligation has been successfully fulfilled.
#
# @return [Exception] the exception raised during processing or `nil`
def reason
synchronize { @reason }
end
# @example allows Obligation to be risen
# rejected_ivar = Ivar.new.fail
# raise rejected_ivar
def exception(*args)
raise 'obligation is not rejected' unless rejected?
reason.exception(*args)
end
protected
# @!visibility private
def get_arguments_from(opts = {})
[*opts.fetch(:args, [])]
end
# @!visibility private
def init_obligation
@event = Event.new
@value = @reason = nil
end
# @!visibility private
def event
@event
end
# @!visibility private
def set_state(success, value, reason)
if success
@value = value
@state = :fulfilled
else
@reason = reason
@state = :rejected
end
end
# @!visibility private
def state=(value)
synchronize { ns_set_state(value) }
end
# Atomic compare and set operation
# State is set to `next_state` only if `current state == expected_current`.
#
# @param [Symbol] next_state
# @param [Symbol] expected_current
#
# @return [Boolean] true is state is changed, false otherwise
#
# @!visibility private
def compare_and_set_state(next_state, *expected_current)
synchronize do
if expected_current.include? @state
@state = next_state
true
else
false
end
end
end
# Executes the block within mutex if current state is included in expected_states
#
# @return block value if executed, false otherwise
#
# @!visibility private
def if_state(*expected_states)
synchronize do
raise ArgumentError.new('no block given') unless block_given?
if expected_states.include? @state
yield
else
false
end
end
end
protected
# Am I in the current state?
#
# @param [Symbol] expected The state to check against
# @return [Boolean] true if in the expected state else false
#
# @!visibility private
def ns_check_state?(expected)
@state == expected
end
# @!visibility private
def ns_set_state(value)
@state = value
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/concern/logging.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/concern/logging.rb | require 'concurrent/atomic/atomic_reference'
module Concurrent
module Concern
# Include where logging is needed
#
# @!visibility private
module Logging
# The same as Logger::Severity but we copy it here to avoid a dependency on the logger gem just for these 7 constants
DEBUG, INFO, WARN, ERROR, FATAL, UNKNOWN = 0, 1, 2, 3, 4, 5
SEV_LABEL = %w[DEBUG INFO WARN ERROR FATAL ANY].freeze
# Logs through {Concurrent.global_logger}, it can be overridden by setting @logger
# @param [Integer] level one of Concurrent::Concern::Logging constants
# @param [String] progname e.g. a path of an Actor
# @param [String, nil] message when nil block is used to generate the message
# @yieldreturn [String] a message
def log(level, progname, message = nil, &block)
logger = if defined?(@logger) && @logger
@logger
else
Concurrent.global_logger
end
logger.call level, progname, message, &block
rescue => error
$stderr.puts "`Concurrent.global_logger` failed to log #{[level, progname, message, block]}\n" +
"#{error.message} (#{error.class})\n#{error.backtrace.join "\n"}"
end
end
end
end
module Concurrent
extend Concern::Logging
# Create a simple logger with provided level and output.
def self.create_simple_logger(level = :FATAL, output = $stderr)
level = Concern::Logging.const_get(level) unless level.is_a?(Integer)
# TODO (pitr-ch 24-Dec-2016): figure out why it had to be replaced, stdlogger was deadlocking
lambda do |severity, progname, message = nil, &block|
return false if severity < level
message = block ? block.call : message
formatted_message = case message
when String
message
when Exception
format "%s (%s)\n%s",
message.message, message.class, (message.backtrace || []).join("\n")
else
message.inspect
end
output.print format "[%s] %5s -- %s: %s\n",
Time.now.strftime('%Y-%m-%d %H:%M:%S.%L'),
Concern::Logging::SEV_LABEL[severity],
progname,
formatted_message
true
end
end
# Use logger created by #create_simple_logger to log concurrent-ruby messages.
def self.use_simple_logger(level = :FATAL, output = $stderr)
Concurrent.global_logger = create_simple_logger level, output
end
# Create a stdlib logger with provided level and output.
# If you use this deprecated method you might need to add logger to your Gemfile to avoid warnings from Ruby 3.3.5+.
# @deprecated
def self.create_stdlib_logger(level = :FATAL, output = $stderr)
require 'logger'
logger = Logger.new(output)
logger.level = level
logger.formatter = lambda do |severity, datetime, progname, msg|
formatted_message = case msg
when String
msg
when Exception
format "%s (%s)\n%s",
msg.message, msg.class, (msg.backtrace || []).join("\n")
else
msg.inspect
end
format "[%s] %5s -- %s: %s\n",
datetime.strftime('%Y-%m-%d %H:%M:%S.%L'),
severity,
progname,
formatted_message
end
lambda do |loglevel, progname, message = nil, &block|
logger.add loglevel, message, progname, &block
end
end
# Use logger created by #create_stdlib_logger to log concurrent-ruby messages.
# @deprecated
def self.use_stdlib_logger(level = :FATAL, output = $stderr)
Concurrent.global_logger = create_stdlib_logger level, output
end
# TODO (pitr-ch 27-Dec-2016): remove deadlocking stdlib_logger methods
# Suppresses all output when used for logging.
NULL_LOGGER = lambda { |level, progname, message = nil, &block| }
# @!visibility private
GLOBAL_LOGGER = AtomicReference.new(create_simple_logger(:WARN))
private_constant :GLOBAL_LOGGER
def self.global_logger
GLOBAL_LOGGER.value
end
def self.global_logger=(value)
GLOBAL_LOGGER.value = value
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/concern/observable.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/concern/observable.rb | require 'concurrent/collection/copy_on_notify_observer_set'
require 'concurrent/collection/copy_on_write_observer_set'
module Concurrent
module Concern
# The [observer pattern](http://en.wikipedia.org/wiki/Observer_pattern) is one
# of the most useful design patterns.
#
# The workflow is very simple:
# - an `observer` can register itself to a `subject` via a callback
# - many `observers` can be registered to the same `subject`
# - the `subject` notifies all registered observers when its status changes
# - an `observer` can deregister itself when is no more interested to receive
# event notifications
#
# In a single threaded environment the whole pattern is very easy: the
# `subject` can use a simple data structure to manage all its subscribed
# `observer`s and every `observer` can react directly to every event without
# caring about synchronization.
#
# In a multi threaded environment things are more complex. The `subject` must
# synchronize the access to its data structure and to do so currently we're
# using two specialized ObserverSet: {Concurrent::Concern::CopyOnWriteObserverSet}
# and {Concurrent::Concern::CopyOnNotifyObserverSet}.
#
# When implementing and `observer` there's a very important rule to remember:
# **there are no guarantees about the thread that will execute the callback**
#
# Let's take this example
# ```
# class Observer
# def initialize
# @count = 0
# end
#
# def update
# @count += 1
# end
# end
#
# obs = Observer.new
# [obj1, obj2, obj3, obj4].each { |o| o.add_observer(obs) }
# # execute [obj1, obj2, obj3, obj4]
# ```
#
# `obs` is wrong because the variable `@count` can be accessed by different
# threads at the same time, so it should be synchronized (using either a Mutex
# or an AtomicFixum)
module Observable
# @!macro observable_add_observer
#
# Adds an observer to this set. If a block is passed, the observer will be
# created by this method and no other params should be passed.
#
# @param [Object] observer the observer to add
# @param [Symbol] func the function to call on the observer during notification.
# Default is :update
# @return [Object] the added observer
def add_observer(observer = nil, func = :update, &block)
observers.add_observer(observer, func, &block)
end
# As `#add_observer` but can be used for chaining.
#
# @param [Object] observer the observer to add
# @param [Symbol] func the function to call on the observer during notification.
# @return [Observable] self
def with_observer(observer = nil, func = :update, &block)
add_observer(observer, func, &block)
self
end
# @!macro observable_delete_observer
#
# Remove `observer` as an observer on this object so that it will no
# longer receive notifications.
#
# @param [Object] observer the observer to remove
# @return [Object] the deleted observer
def delete_observer(observer)
observers.delete_observer(observer)
end
# @!macro observable_delete_observers
#
# Remove all observers associated with this object.
#
# @return [Observable] self
def delete_observers
observers.delete_observers
self
end
# @!macro observable_count_observers
#
# Return the number of observers associated with this object.
#
# @return [Integer] the observers count
def count_observers
observers.count_observers
end
protected
attr_accessor :observers
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb | module Concurrent
module Concern
# Object references in Ruby are mutable. This can lead to serious problems when
# the `#value` of a concurrent object is a mutable reference. Which is always the
# case unless the value is a `Fixnum`, `Symbol`, or similar "primitive" data type.
# Most classes in this library that expose a `#value` getter method do so using the
# `Dereferenceable` mixin module.
#
# @!macro copy_options
module Dereferenceable
# NOTE: This module is going away in 2.0. In the mean time we need it to
# play nicely with the synchronization layer. This means that the
# including class SHOULD be synchronized and it MUST implement a
# `#synchronize` method. Not doing so will lead to runtime errors.
# Return the value this object represents after applying the options specified
# by the `#set_deref_options` method.
#
# @return [Object] the current value of the object
def value
synchronize { apply_deref_options(@value) }
end
alias_method :deref, :value
protected
# Set the internal value of this object
#
# @param [Object] value the new value
def value=(value)
synchronize{ @value = value }
end
# @!macro dereferenceable_set_deref_options
# Set the options which define the operations #value performs before
# returning data to the caller (dereferencing).
#
# @note Most classes that include this module will call `#set_deref_options`
# from within the constructor, thus allowing these options to be set at
# object creation.
#
# @param [Hash] opts the options defining dereference behavior.
# @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
# @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
# @option opts [String] :copy_on_deref (nil) call the given `Proc` passing
# the internal value and returning the value returned from the proc
def set_deref_options(opts = {})
synchronize{ ns_set_deref_options(opts) }
end
# @!macro dereferenceable_set_deref_options
# @!visibility private
def ns_set_deref_options(opts)
@dup_on_deref = opts[:dup_on_deref] || opts[:dup]
@freeze_on_deref = opts[:freeze_on_deref] || opts[:freeze]
@copy_on_deref = opts[:copy_on_deref] || opts[:copy]
@do_nothing_on_deref = !(@dup_on_deref || @freeze_on_deref || @copy_on_deref)
nil
end
# @!visibility private
def apply_deref_options(value)
return nil if value.nil?
return value if @do_nothing_on_deref
value = @copy_on_deref.call(value) if @copy_on_deref
value = value.dup if @dup_on_deref
value = value.freeze if @freeze_on_deref
value
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb | require 'concurrent/utility/engine'
# Synchronization::AbstractObject must be defined before loading the extension
require 'concurrent/synchronization/abstract_object'
module Concurrent
# @!visibility private
module Utility
# @!visibility private
module NativeExtensionLoader
def allow_c_extensions?
Concurrent.on_cruby?
end
def c_extensions_loaded?
defined?(@c_extensions_loaded) && @c_extensions_loaded
end
def load_native_extensions
if Concurrent.on_cruby? && !c_extensions_loaded?
['concurrent/concurrent_ruby_ext',
"concurrent/#{RUBY_VERSION[0..2]}/concurrent_ruby_ext"
].each { |p| try_load_c_extension p }
end
if Concurrent.on_jruby? && !java_extensions_loaded?
begin
require 'concurrent/concurrent_ruby.jar'
set_java_extensions_loaded
rescue LoadError => e
raise e, "Java extensions are required for JRuby.\n" + e.message, e.backtrace
end
end
end
private
def load_error_path(error)
if error.respond_to? :path
error.path
else
error.message.split(' -- ').last
end
end
def set_c_extensions_loaded
@c_extensions_loaded = true
end
def java_extensions_loaded?
defined?(@java_extensions_loaded) && @java_extensions_loaded
end
def set_java_extensions_loaded
@java_extensions_loaded = true
end
def try_load_c_extension(path)
require path
set_c_extensions_loaded
rescue LoadError => e
if load_error_path(e) == path
# move on with pure-Ruby implementations
# TODO (pitr-ch 12-Jul-2018): warning on verbose?
else
raise e
end
end
end
end
# @!visibility private
extend Utility::NativeExtensionLoader
end
Concurrent.load_native_extensions
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/utility/processor_counter.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/utility/processor_counter.rb | require 'etc'
require 'rbconfig'
require 'concurrent/delay'
module Concurrent
# @!visibility private
module Utility
# @!visibility private
class ProcessorCounter
def initialize
@processor_count = Delay.new { compute_processor_count }
@physical_processor_count = Delay.new { compute_physical_processor_count }
@cpu_quota = Delay.new { compute_cpu_quota }
@cpu_shares = Delay.new { compute_cpu_shares }
end
def processor_count
@processor_count.value
end
def physical_processor_count
@physical_processor_count.value
end
def available_processor_count
cpu_count = processor_count.to_f
quota = cpu_quota
return cpu_count if quota.nil?
# cgroup cpus quotas have no limits, so they can be set to higher than the
# real count of cores.
if quota > cpu_count
cpu_count
else
quota
end
end
def cpu_quota
@cpu_quota.value
end
def cpu_shares
@cpu_shares.value
end
private
def compute_processor_count
if Concurrent.on_jruby?
java.lang.Runtime.getRuntime.availableProcessors
else
Etc.nprocessors
end
end
def compute_physical_processor_count
ppc = case RbConfig::CONFIG["target_os"]
when /darwin\d\d/
IO.popen("/usr/sbin/sysctl -n hw.physicalcpu", &:read).to_i
when /linux/
cores = {} # unique physical ID / core ID combinations
phy = 0
IO.read("/proc/cpuinfo").scan(/^physical id.*|^core id.*/) do |ln|
if ln.start_with?("physical")
phy = ln[/\d+/]
elsif ln.start_with?("core")
cid = phy + ":" + ln[/\d+/]
cores[cid] = true if not cores[cid]
end
end
cores.count
when /mswin|mingw/
# Get-CimInstance introduced in PowerShell 3 or earlier: https://learn.microsoft.com/en-us/previous-versions/powershell/module/cimcmdlets/get-ciminstance?view=powershell-3.0
result = run('powershell -command "Get-CimInstance -ClassName Win32_Processor -Property NumberOfCores | Select-Object -Property NumberOfCores"')
if !result || $?.exitstatus != 0
# fallback to deprecated wmic for older systems
result = run("wmic cpu get NumberOfCores")
end
if !result || $?.exitstatus != 0
# Bail out if both commands returned something unexpected
processor_count
else
# powershell: "\nNumberOfCores\n-------------\n 4\n\n\n"
# wmic: "NumberOfCores \n\n4 \n\n\n\n"
result.scan(/\d+/).map(&:to_i).reduce(:+)
end
else
processor_count
end
# fall back to logical count if physical info is invalid
ppc > 0 ? ppc : processor_count
rescue
return 1
end
def run(command)
IO.popen(command, &:read)
rescue Errno::ENOENT
end
def compute_cpu_quota
if RbConfig::CONFIG["target_os"].include?("linux")
if File.exist?("/sys/fs/cgroup/cpu.max")
# cgroups v2: https://docs.kernel.org/admin-guide/cgroup-v2.html#cpu-interface-files
cpu_max = File.read("/sys/fs/cgroup/cpu.max")
return nil if cpu_max.start_with?("max ") # no limit
max, period = cpu_max.split.map(&:to_f)
max / period
elsif File.exist?("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us")
# cgroups v1: https://kernel.googlesource.com/pub/scm/linux/kernel/git/glommer/memcg/+/cpu_stat/Documentation/cgroups/cpu.txt
max = File.read("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us").to_i
# If the cpu.cfs_quota_us is -1, cgroup does not adhere to any CPU time restrictions
# https://docs.kernel.org/scheduler/sched-bwc.html#management
return nil if max <= 0
period = File.read("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us").to_f
max / period
end
end
end
def compute_cpu_shares
if RbConfig::CONFIG["target_os"].include?("linux")
if File.exist?("/sys/fs/cgroup/cpu.weight")
# cgroups v2: https://docs.kernel.org/admin-guide/cgroup-v2.html#cpu-interface-files
# Ref: https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2254-cgroup-v2#phase-1-convert-from-cgroups-v1-settings-to-v2
weight = File.read("/sys/fs/cgroup/cpu.weight").to_f
((((weight - 1) * 262142) / 9999) + 2) / 1024
elsif File.exist?("/sys/fs/cgroup/cpu/cpu.shares")
# cgroups v1: https://kernel.googlesource.com/pub/scm/linux/kernel/git/glommer/memcg/+/cpu_stat/Documentation/cgroups/cpu.txt
File.read("/sys/fs/cgroup/cpu/cpu.shares").to_f / 1024
end
end
end
end
end
# create the default ProcessorCounter on load
@processor_counter = Utility::ProcessorCounter.new
singleton_class.send :attr_reader, :processor_counter
# Number of processors seen by the OS and used for process scheduling. For
# performance reasons the calculated value will be memoized on the first
# call.
#
# When running under JRuby the Java runtime call
# `java.lang.Runtime.getRuntime.availableProcessors` will be used. According
# to the Java documentation this "value may change during a particular
# invocation of the virtual machine... [applications] should therefore
# occasionally poll this property." We still memoize this value once under
# JRuby.
#
# Otherwise Ruby's Etc.nprocessors will be used.
#
# @return [Integer] number of processors seen by the OS or Java runtime
#
# @see http://docs.oracle.com/javase/6/docs/api/java/lang/Runtime.html#availableProcessors()
def self.processor_count
processor_counter.processor_count
end
# Number of physical processor cores on the current system. For performance
# reasons the calculated value will be memoized on the first call.
#
# On Windows the Win32 API will be queried for the `NumberOfCores from
# Win32_Processor`. This will return the total number "of cores for the
# current instance of the processor." On Unix-like operating systems either
# the `hwprefs` or `sysctl` utility will be called in a subshell and the
# returned value will be used. In the rare case where none of these methods
# work or an exception is raised the function will simply return 1.
#
# @return [Integer] number physical processor cores on the current system
#
# @see https://github.com/grosser/parallel/blob/4fc8b89d08c7091fe0419ca8fba1ec3ce5a8d185/lib/parallel.rb
#
# @see http://msdn.microsoft.com/en-us/library/aa394373(v=vs.85).aspx
# @see http://www.unix.com/man-page/osx/1/HWPREFS/
# @see http://linux.die.net/man/8/sysctl
def self.physical_processor_count
processor_counter.physical_processor_count
end
# Number of processors cores available for process scheduling.
# This method takes in account the CPU quota if the process is inside a cgroup with a
# dedicated CPU quota (typically Docker).
# Otherwise it returns the same value as #processor_count but as a Float.
#
# For performance reasons the calculated value will be memoized on the first
# call.
#
# @return [Float] number of available processors
def self.available_processor_count
processor_counter.available_processor_count
end
# The maximum number of processors cores available for process scheduling.
# Returns `nil` if there is no enforced limit, or a `Float` if the
# process is inside a cgroup with a dedicated CPU quota (typically Docker).
#
# Note that nothing prevents setting a CPU quota higher than the actual number of
# cores on the system.
#
# For performance reasons the calculated value will be memoized on the first
# call.
#
# @return [nil, Float] Maximum number of available processors as set by a cgroup CPU quota, or nil if none set
def self.cpu_quota
processor_counter.cpu_quota
end
# The CPU shares requested by the process. For performance reasons the calculated
# value will be memoized on the first call.
#
# @return [Float, nil] CPU shares requested by the process, or nil if not set
def self.cpu_shares
processor_counter.cpu_shares
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/utility/native_integer.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/utility/native_integer.rb | module Concurrent
# @!visibility private
module Utility
# @private
module NativeInteger
# http://stackoverflow.com/questions/535721/ruby-max-integer
MIN_VALUE = -(2**(0.size * 8 - 2))
MAX_VALUE = (2**(0.size * 8 - 2) - 1)
def ensure_upper_bound(value)
if value > MAX_VALUE
raise RangeError.new("#{value} is greater than the maximum value of #{MAX_VALUE}")
end
value
end
def ensure_lower_bound(value)
if value < MIN_VALUE
raise RangeError.new("#{value} is less than the maximum value of #{MIN_VALUE}")
end
value
end
def ensure_integer(value)
unless value.is_a?(Integer)
raise ArgumentError.new("#{value} is not an Integer")
end
value
end
def ensure_integer_and_bounds(value)
ensure_integer value
ensure_upper_bound value
ensure_lower_bound value
end
def ensure_positive(value)
if value < 0
raise ArgumentError.new("#{value} cannot be negative")
end
value
end
def ensure_positive_and_no_zero(value)
if value < 1
raise ArgumentError.new("#{value} cannot be negative or zero")
end
value
end
extend self
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/utility/engine.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/utility/engine.rb | module Concurrent
# @!visibility private
module Utility
# @!visibility private
module EngineDetector
def on_cruby?
RUBY_ENGINE == 'ruby'
end
def on_jruby?
RUBY_ENGINE == 'jruby'
end
def on_truffleruby?
RUBY_ENGINE == 'truffleruby'
end
def on_windows?
!(RbConfig::CONFIG['host_os'] =~ /mswin|mingw|cygwin/).nil?
end
def on_osx?
!(RbConfig::CONFIG['host_os'] =~ /darwin|mac os/).nil?
end
def on_linux?
!(RbConfig::CONFIG['host_os'] =~ /linux/).nil?
end
def ruby_version(version = RUBY_VERSION, comparison, major, minor, patch)
result = (version.split('.').map(&:to_i) <=> [major, minor, patch])
comparisons = { :== => [0],
:>= => [1, 0],
:<= => [-1, 0],
:> => [1],
:< => [-1] }
comparisons.fetch(comparison).include? result
end
end
end
# @!visibility private
extend Utility::EngineDetector
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/concurrent-ruby-1.3.6/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb | module Concurrent
# @!macro monotonic_get_time
#
# Returns the current time as tracked by the application monotonic clock.
#
# @param [Symbol] unit the time unit to be returned, can be either
# :float_second, :float_millisecond, :float_microsecond, :second,
# :millisecond, :microsecond, or :nanosecond default to :float_second.
#
# @return [Float] The current monotonic time since some unspecified
# starting point
#
# @!macro monotonic_clock_warning
def monotonic_time(unit = :float_second)
Process.clock_gettime(Process::CLOCK_MONOTONIC, unit)
end
module_function :monotonic_time
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf.rb | # frozen_string_literal: true
# Main module of patchelf.
#
# @author david942j
module PatchELF
end
require 'patchelf/patcher'
require 'patchelf/version'
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/version.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/version.rb | # frozen_string_literal: true
module PatchELF
# Current gem version.
VERSION = '1.5.2'.freeze
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/mm.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/mm.rb | # frozen_string_literal: true
require 'patchelf/helper'
module PatchELF
# Memory management, provides malloc/free to allocate LOAD segments.
# @private
class MM
attr_reader :extend_size # @return [Integer] The size extended.
attr_reader :threshold # @return [Integer] Where the file start to be extended.
# Instantiate a {MM} object.
# @param [ELFTools::ELFFile] elf
def initialize(elf)
@elf = elf
@request = []
end
# @param [Integer] size
# @return [void]
# @yieldparam [Integer] off
# @yieldparam [Integer] vaddr
# @yieldreturn [void]
# One can only do the following things in the block:
# 1. Set ELF headers' attributes (with ELFTools)
# 2. Invoke {Saver#inline_patch}
def malloc(size, &block)
raise ArgumentError, 'malloc\'s size most be positive.' if size <= 0
@request << [size, block]
end
# Let the malloc / free requests be effective.
# @return [void]
def dispatch!
return if @request.empty?
@request_size = @request.map(&:first).inject(0, :+)
# The malloc-ed area must be 'rw-' since the dynamic table will be modified during runtime.
# Find all LOADs and calculate their f-gaps and m-gaps.
# We prefer f-gap since it doesn't need move the whole binaries.
# 1. Find if any f-gap has enough size, and one of the LOAD next to it is 'rw-'.
# - expand (forwardlly), only need to change the attribute of LOAD.
# 2. Do 1. again but consider m-gaps instead.
# - expand (forwardlly), need to modify all section headers.
# 3. We have to create a new LOAD, now we need to expand the first LOAD for putting new segment header.
# First of all we check if there're less than two LOADs.
abnormal_elf('No LOAD segment found, not an executable.') if load_segments.empty?
# TODO: Handle only one LOAD. (be careful if memsz > filesz)
fgap_method? || mgap_method? || new_load_method
end
# Query if extended.
# @return [Boolean]
def extended?
defined?(@threshold)
end
# Get correct offset after the extension.
#
# @param [Integer] off
# @return [Integer]
# Shifted offset.
def extended_offset(off)
return off unless defined?(@threshold)
return off if off < @threshold
off + @extend_size
end
private
def fgap_method?
idx = find_gap { |prv, nxt| nxt.file_head - prv.file_tail }
return false if idx.nil?
loads = load_segments
# prefer extend backwardly
return extend_backward?(loads[idx - 1]) if writable?(loads[idx - 1])
extend_forward?(loads[idx])
end
def extend_backward?(seg, size = @request_size)
invoke_callbacks(seg, seg.file_tail)
seg.header.p_filesz += size
seg.header.p_memsz += size
true
end
def extend_forward?(seg, size = @request_size)
seg.header.p_offset -= size
seg.header.p_vaddr -= size
seg.header.p_filesz += size
seg.header.p_memsz += size
invoke_callbacks(seg, seg.file_head)
true
end
def mgap_method?
# | 1 | | 2 |
# | 1 | | 2 |
#=>
# | 1 | | 2 |
# | 1 | | 2 |
idx = find_gap(check_sz: false) { |prv, nxt| PatchELF::Helper.aligndown(nxt.mem_head) - prv.mem_tail }
return false if idx.nil?
loads = load_segments
@threshold = loads[idx].file_head
@extend_size = PatchELF::Helper.alignup(@request_size)
shift_attributes
# prefer backward than forward
return extend_backward?(loads[idx - 1]) if writable?(loads[idx - 1])
# NOTE: loads[idx].file_head has been changed in shift_attributes
extend_forward?(loads[idx], @extend_size)
end
def find_gap(check_sz: true)
loads = load_segments
loads.each_with_index do |l, i|
next if i.zero?
next unless writable?(l) || writable?(loads[i - 1])
sz = yield(loads[i - 1], l)
abnormal_elf('LOAD segments are out of order.') if check_sz && sz.negative?
next unless sz >= @request_size
return i
end
nil
end
# TODO
def new_load_method
raise NotImplementedError
end
def writable?(seg)
seg.readable? && seg.writable?
end
# For all attributes >= threshold, += offset
def shift_attributes
# ELFHeader->section_header
# Sections:
# all
# Segments:
# all
# XXX: will be buggy if someday the number of segments can be changed.
# Bottom-up
@elf.each_sections do |sec|
sec.header.sh_offset += extend_size if sec.header.sh_offset >= threshold
end
@elf.each_segments do |seg|
next unless seg.header.p_offset >= threshold
seg.header.p_offset += extend_size
# We have to change align of LOAD segment since ld.so checks it.
seg.header.p_align = Helper.page_size if seg.is_a?(ELFTools::Segments::LoadSegment)
end
@elf.header.e_shoff += extend_size if @elf.header.e_shoff >= threshold
end
def load_segments
@elf.segments_by_type(:load)
end
def invoke_callbacks(seg, start)
cur = start
@request.each do |sz, block|
block.call(cur, seg.offset_to_vma(cur))
cur += sz
end
end
def abnormal_elf(msg)
raise ArgumentError, msg
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/exceptions.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/exceptions.rb | # encoding: ascii-8bit
# frozen_string_literal: true
require 'elftools/exceptions'
module PatchELF
# Raised on an error during ELF modification.
class PatchError < ELFTools::ELFError; end
# Raised when Dynamic Tag is missing
class MissingTagError < PatchError; end
# Raised on missing Program Header(segment)
class MissingSegmentError < PatchError; end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/logger.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/logger.rb | # frozen_string_literal: true
require 'logger'
require 'patchelf/helper'
module PatchELF
# A logger for internal usage.
module Logger
module_function
@logger = ::Logger.new($stderr).tap do |log|
log.formatter = proc do |severity, _datetime, _progname, msg|
"[#{PatchELF::Helper.colorize(severity, severity.downcase.to_sym)}] #{msg}\n"
end
end
%i[debug info warn error level=].each do |sym|
define_method(sym) do |msg|
@logger.__send__(sym, msg)
nil
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/cli.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/cli.rb | # frozen_string_literal: true
require 'optparse'
require 'patchelf/patcher'
require 'patchelf/version'
module PatchELF
# For command line interface to parsing arguments.
module CLI
# Name of binary.
SCRIPT_NAME = 'patchelf.rb'.freeze
# CLI usage string.
USAGE = format('Usage: %s <commands> FILENAME [OUTPUT_FILE]', SCRIPT_NAME).freeze
module_function
# Main method of CLI.
# @param [Array<String>] argv
# Command line arguments.
# @return [void]
# @example
# PatchELF::CLI.work(%w[--help])
# # usage message to stdout
# PatchELF::CLI.work(%w[--version])
# # version message to stdout
def work(argv)
@options = {
set: {},
print: [],
needed: []
}
return $stdout.puts "PatchELF Version #{PatchELF::VERSION}" if argv.include?('--version')
return $stdout.puts option_parser unless parse?(argv)
# Now the options are (hopefully) valid, let's process the ELF file.
begin
@patcher = PatchELF::Patcher.new(@options[:in_file])
rescue ELFTools::ELFError, Errno::ENOENT => e
return PatchELF::Logger.error(e.message)
end
patcher.use_rpath! if @options[:force_rpath]
readonly
patch_requests
patcher.save(@options[:out_file])
end
private
def patcher
@patcher
end
def readonly
@options[:print].uniq.each do |s|
content = patcher.__send__(s)
next if content.nil?
s = :rpath if @options[:force_rpath] && s == :runpath
$stdout.puts "#{s}: #{Array(content).join(' ')}"
end
end
def patch_requests
@options[:set].each do |sym, val|
patcher.__send__(:"#{sym}=", val)
end
@options[:needed].each do |type, val|
patcher.__send__(:"#{type}_needed", *val)
end
end
def parse?(argv)
remain = option_parser.permute(argv)
return false if remain.first.nil?
@options[:in_file] = remain.first
@options[:out_file] = remain[1] # can be nil
true
end
def option_parser
@option_parser ||= OptionParser.new do |opts|
opts.banner = USAGE
opts.on('--print-interpreter', '--pi', 'Show interpreter\'s name.') do
@options[:print] << :interpreter
end
opts.on('--print-needed', '--pn', 'Show needed libraries specified in DT_NEEDED.') do
@options[:print] << :needed
end
opts.on('--print-runpath', '--pr', 'Show the path specified in DT_RUNPATH.') do
@options[:print] << :runpath
end
opts.on('--print-soname', '--ps', 'Show soname specified in DT_SONAME.') do
@options[:print] << :soname
end
opts.on('--set-interpreter INTERP', '--interp INTERP', 'Set interpreter\'s name.') do |interp|
@options[:set][:interpreter] = interp
end
opts.on('--set-needed LIB1,LIB2,LIB3', '--needed LIB1,LIB2,LIB3', Array,
'Set needed libraries, this will remove all existent needed libraries.') do |needs|
@options[:set][:needed] = needs
end
opts.on('--add-needed LIB', 'Append a new needed library.') do |lib|
@options[:needed] << [:add, lib]
end
opts.on('--remove-needed LIB', 'Remove a needed library.') do |lib|
@options[:needed] << [:remove, lib]
end
opts.on('--replace-needed LIB1,LIB2', Array, 'Replace needed library LIB1 as LIB2.') do |libs|
@options[:needed] << [:replace, libs]
end
opts.on('--set-runpath PATH', '--runpath PATH', 'Set the path of runpath.') do |path|
@options[:set][:runpath] = path
end
opts.on(
'--force-rpath',
'According to the ld.so docs, DT_RPATH is obsolete,',
"#{SCRIPT_NAME} will always try to get/set DT_RUNPATH first.",
'Use this option to force every operations related to runpath (e.g. --runpath)',
'to consider \'DT_RPATH\' instead of \'DT_RUNPATH\'.'
) do
@options[:force_rpath] = true
end
opts.on('--set-soname SONAME', '--so SONAME', 'Set name of a shared library.') do |soname|
@options[:set][:soname] = soname
end
opts.on('--version', 'Show current gem\'s version.')
end
end
extend self
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/saver.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/saver.rb | # frozen_string_literal: true
require 'elftools/constants'
require 'elftools/elf_file'
require 'elftools/structs'
require 'elftools/util'
require 'fileutils'
require 'objspace'
require 'patchelf/helper'
require 'patchelf/mm'
module PatchELF
# To mark a not-using tag
IGNORE = ELFTools::Constants::DT_LOOS
# Internal use only.
#
# For {Patcher} to do patching things and save to file.
# @private
class Saver
attr_reader :in_file # @return [String] Input filename.
attr_reader :out_file # @return [String] Output filename.
# Instantiate a {Saver} object.
# @param [String] in_file
# @param [String] out_file
# @param [{Symbol => String, Array}] set
def initialize(in_file, out_file, set)
@in_file = in_file
@out_file = out_file
@set = set
# [{Integer => String}]
@inline_patch = {}
f = File.open(in_file)
@elf = ELFTools::ELFFile.new(f)
@mm = PatchELF::MM.new(@elf)
@strtab_extend_requests = []
@append_dyn = []
# Ensure file is closed when the {Saver} object is garbage collected.
ObjectSpace.define_finalizer(self, Helper.close_file_proc(f))
end
# @return [void]
def save!
# In this method we assume all attributes that should exist do exist.
# e.g. DT_INTERP, DT_DYNAMIC. These should have been checked in the patcher.
patch_interpreter
patch_dynamic
@mm.dispatch!
FileUtils.cp(in_file, out_file) if out_file != in_file
patch_out(@out_file)
# Let output file have the same permission as input.
FileUtils.chmod(File.stat(in_file).mode, out_file)
end
private
def patch_interpreter
return if @set[:interpreter].nil?
new_interp = "#{@set[:interpreter]}\x00"
old_interp = "#{@elf.segment_by_type(:interp).interp_name}\x00"
return if old_interp == new_interp
# These headers must be found here but not in the proc.
seg_header = @elf.segment_by_type(:interp).header
sec_header = section_header('.interp')
patch = proc do |off, vaddr|
# Register an inline patching
inline_patch(off, new_interp)
# The patching feature of ELFTools
seg_header.p_offset = off
seg_header.p_vaddr = seg_header.p_paddr = vaddr
seg_header.p_filesz = seg_header.p_memsz = new_interp.size
if sec_header
sec_header.sh_offset = off
sec_header.sh_size = new_interp.size
end
end
if new_interp.size <= old_interp.size
# easy case
patch.call(seg_header.p_offset.to_i, seg_header.p_vaddr.to_i)
else
# hard case, we have to request a new LOAD area
@mm.malloc(new_interp.size, &patch)
end
end
def patch_dynamic
# We never do inline patching on strtab's string.
# 1. Search if there's useful string exists
# - only need header patching
# 2. Append a new string to the strtab.
# - register strtab extension
dynamic.tags # HACK, force @tags to be defined
patch_soname if @set[:soname]
patch_runpath if @set[:runpath]
patch_runpath(:rpath) if @set[:rpath]
patch_needed if @set[:needed]
malloc_strtab!
expand_dynamic!
end
def patch_soname
# The tag must exist.
so_tag = dynamic.tag_by_type(:soname)
reg_str_table(@set[:soname]) do |idx|
so_tag.header.d_val = idx
end
end
def patch_runpath(sym = :runpath)
tag = dynamic.tag_by_type(sym)
tag = tag.nil? ? lazy_dyn(sym) : tag.header
reg_str_table(@set[sym]) do |idx|
tag.d_val = idx
end
end
def patch_needed
original_needs = dynamic.tags_by_type(:needed)
@set[:needed].uniq!
original = original_needs.map(&:name)
replace = @set[:needed]
# 3 sets:
# 1. in original and in needs - remain unchanged
# 2. in original but not in needs - remove
# 3. not in original and in needs - append
append = replace - original
remove = original - replace
ignored_dyns = remove.each_with_object([]) do |name, ignored|
dyn = original_needs.find { |n| n.name == name }.header
dyn.d_tag = IGNORE
ignored << dyn
end
append.zip(ignored_dyns) do |name, ignored_dyn|
dyn = ignored_dyn || lazy_dyn(:needed)
dyn.d_tag = ELFTools::Constants::DT_NEEDED
reg_str_table(name) { |idx| dyn.d_val = idx }
end
end
# Create a temp tag header.
# @return [ELFTools::Structs::ELF_Dyn]
def lazy_dyn(sym)
ELFTools::Structs::ELF_Dyn.new(endian: @elf.endian).tap do |dyn|
@append_dyn << dyn
dyn.elf_class = @elf.elf_class
dyn.d_tag = ELFTools::Util.to_constant(ELFTools::Constants::DT, sym)
end
end
def expand_dynamic!
return if @append_dyn.empty?
dyn_sec = section_header('.dynamic')
total = dynamic.tags.map(&:header)
# the last must be a null-tag
total = total[0..-2] + @append_dyn + [total.last]
bytes = total.first.num_bytes * total.size
@mm.malloc(bytes) do |off, vaddr|
inline_patch(off, total.map(&:to_binary_s).join)
dynamic.header.p_offset = off
dynamic.header.p_vaddr = dynamic.header.p_paddr = vaddr
dynamic.header.p_filesz = dynamic.header.p_memsz = bytes
if dyn_sec
dyn_sec.sh_offset = off
dyn_sec.sh_addr = vaddr
dyn_sec.sh_size = bytes
end
end
end
def malloc_strtab!
return if @strtab_extend_requests.empty?
strtab = dynamic.tag_by_type(:strtab)
# Process registered requests
need_size = strtab_string.size + @strtab_extend_requests.reduce(0) { |sum, (str, _)| sum + str.size + 1 }
dynstr = section_header('.dynstr')
@mm.malloc(need_size) do |off, vaddr|
new_str = "#{strtab_string}#{@strtab_extend_requests.map(&:first).join("\x00")}\x00"
inline_patch(off, new_str)
cur = strtab_string.size
@strtab_extend_requests.each do |str, block|
block.call(cur)
cur += str.size + 1
end
# Now patching strtab header
strtab.header.d_val = vaddr
# We also need to patch dynstr to let readelf have correct output.
if dynstr
dynstr.sh_size = new_str.size
dynstr.sh_offset = off
dynstr.sh_addr = vaddr
end
end
end
# @param [String] str
# @yieldparam [Integer] idx
# @yieldreturn [void]
def reg_str_table(str, &block)
idx = strtab_string.index("#{str}\x00")
# Request string is already exist
return yield idx if idx
# Record the request
@strtab_extend_requests << [str, block]
end
def strtab_string
return @strtab_string if defined?(@strtab_string)
# TODO: handle no strtab exists..
offset = @elf.offset_from_vma(dynamic.tag_by_type(:strtab).value)
# This is a little tricky since no length information is stored in the tag.
# We first get the file offset of the string then 'guess' where the end is.
@elf.stream.pos = offset
@strtab_string = +''
loop do
c = @elf.stream.read(1)
break unless c =~ /\x00|[[:print:]]/
@strtab_string << c
end
@strtab_string
end
# This can only be used for patching interpreter's name
# or set strings in a malloc-ed area.
# i.e. NEVER intend to change the string defined in strtab
def inline_patch(off, str)
@inline_patch[off] = str
end
# Modify the out_file according to registered patches.
def patch_out(out_file)
File.open(out_file, 'r+') do |f|
if @mm.extended?
original_head = @mm.threshold
extra = {}
# Copy all data after the second load
@elf.stream.pos = original_head
extra[original_head + @mm.extend_size] = @elf.stream.read # read to end
# zero out the 'gap' we created
extra[original_head] = "\x00" * @mm.extend_size
extra.each do |pos, str|
f.pos = pos
f.write(str)
end
end
@elf.patches.each do |pos, str|
f.pos = @mm.extended_offset(pos)
f.write(str)
end
@inline_patch.each do |pos, str|
f.pos = pos
f.write(str)
end
end
end
# @return [ELFTools::Sections::Section?]
def section_header(name)
sec = @elf.section_by_name(name)
return if sec.nil?
sec.header
end
def dynamic
@dynamic ||= @elf.segment_by_type(:dynamic)
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/patcher.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/patcher.rb | # encoding: ascii-8bit
# frozen_string_literal: true
require 'elftools/elf_file'
require 'objspace'
require 'patchelf/exceptions'
require 'patchelf/helper'
require 'patchelf/logger'
require 'patchelf/saver'
module PatchELF
# Class to handle all patching things.
class Patcher
# @!macro [new] note_apply
# @note This setting will be saved after {#save} being invoked.
attr_reader :elf # @return [ELFTools::ELFFile] ELF parser object.
# Instantiate a {Patcher} object.
# @param [String] filename
# Filename of input ELF.
# @param [Boolean] logging
# *deprecated*: use +on_error+ instead
# @param [:log, :silent, :exception] on_error
# action when the desired segment/tag field isn't present
# :log = logs to stderr
# :exception = raise exception related to the error
# :silent = ignore the errors
def initialize(filename, on_error: :log, logging: true)
@in_file = filename
f = File.open(filename)
@elf = ELFTools::ELFFile.new(f)
@set = {}
@rpath_sym = :runpath
@on_error = logging ? on_error : :exception
on_error_syms = %i[exception log silent]
raise ArgumentError, "on_error must be one of #{on_error_syms}" unless on_error_syms.include?(@on_error)
# Ensure file is closed when the {Patcher} object is garbage collected.
ObjectSpace.define_finalizer(self, Helper.close_file_proc(f))
end
# @return [String?]
# Get interpreter's name.
# @example
# PatchELF::Patcher.new('/bin/ls').interpreter
# #=> "/lib64/ld-linux-x86-64.so.2"
def interpreter
@set[:interpreter] || interpreter_
end
# Set interpreter's name.
#
# If the input ELF has no existent interpreter,
# this method will show a warning and has no effect.
# @param [String] interp
# @macro note_apply
def interpreter=(interp)
return if interpreter_.nil? # will also show warning if there's no interp segment.
@set[:interpreter] = interp
end
# Get needed libraries.
# @return [Array<String>]
# @example
# patcher = PatchELF::Patcher.new('/bin/ls')
# patcher.needed
# #=> ["libselinux.so.1", "libc.so.6"]
def needed
@set[:needed] || needed_
end
# Set needed libraries.
# @param [Array<String>] needs
# @macro note_apply
def needed=(needs)
@set[:needed] = needs
end
# Add the needed library.
# @param [String] need
# @return [void]
# @macro note_apply
def add_needed(need)
@set[:needed] ||= needed_
@set[:needed] << need
end
# Remove the needed library.
# @param [String] need
# @return [void]
# @macro note_apply
def remove_needed(need)
@set[:needed] ||= needed_
@set[:needed].delete(need)
end
# Replace needed library +src+ with +tar+.
#
# @param [String] src
# Library to be replaced.
# @param [String] tar
# Library replace with.
# @return [void]
# @macro note_apply
def replace_needed(src, tar)
@set[:needed] ||= needed_
@set[:needed].map! { |v| v == src ? tar : v }
end
# Get the soname of a shared library.
# @return [String?] The name.
# @example
# patcher = PatchELF::Patcher.new('/bin/ls')
# patcher.soname
# # [WARN] Entry DT_SONAME not found, not a shared library?
# #=> nil
# @example
# PatchELF::Patcher.new('/lib/x86_64-linux-gnu/libc.so.6').soname
# #=> "libc.so.6"
def soname
@set[:soname] || soname_
end
# Set soname.
#
# If the input ELF is not a shared library with a soname,
# this method will show a warning and has no effect.
# @param [String] name
# @macro note_apply
def soname=(name)
return if soname_.nil?
@set[:soname] = name
end
# Get runpath.
# @return [String?]
def runpath
@set[@rpath_sym] || runpath_(@rpath_sym)
end
# Get rpath
# return [String?]
def rpath
@set[:rpath] || runpath_(:rpath)
end
# Set rpath
#
# Modify / set DT_RPATH of the given ELF.
# similar to runpath= except DT_RPATH is modifed/created in DYNAMIC segment.
# @param [String] rpath
# @macro note_apply
def rpath=(rpath)
@set[:rpath] = rpath
end
# Set runpath.
#
# If DT_RUNPATH is not presented in the input ELF,
# a new DT_RUNPATH attribute will be inserted into the DYNAMIC segment.
# @param [String] runpath
# @macro note_apply
def runpath=(runpath)
@set[@rpath_sym] = runpath
end
# Set all operations related to DT_RUNPATH to use DT_RPATH.
# @return [self]
def use_rpath!
@rpath_sym = :rpath
self
end
# Save the patched ELF as +out_file+.
# @param [String?] out_file
# If +out_file+ is +nil+, the original input file will be modified.
# @param [Boolean] patchelf_compatible
# When +patchelf_compatible+ is true, tries to produce same ELF as the one produced by NixOS/patchelf.
# @return [void]
def save(out_file = nil, patchelf_compatible: false)
# If nothing is modified, return directly.
return if out_file.nil? && !dirty?
out_file ||= @in_file
saver = if patchelf_compatible
require 'patchelf/alt_saver'
PatchELF::AltSaver.new(@in_file, out_file, @set)
else
PatchELF::Saver.new(@in_file, out_file, @set)
end
saver.save!
end
private
def log_or_raise(msg, exception = PatchELF::PatchError)
raise exception, msg if @on_error == :exception
PatchELF::Logger.warn(msg) if @on_error == :log
end
def interpreter_
segment = @elf.segment_by_type(:interp)
return log_or_raise 'No interpreter found.', PatchELF::MissingSegmentError if segment.nil?
segment.interp_name
end
# @return [Array<String>]
def needed_
segment = dynamic_or_log
return if segment.nil?
segment.tags_by_type(:needed).map(&:name)
end
# @return [String?]
def runpath_(rpath_sym = :runpath)
tag_name_or_log(rpath_sym, "Entry DT_#{rpath_sym.to_s.upcase} not found.")
end
# @return [String?]
def soname_
tag_name_or_log(:soname, 'Entry DT_SONAME not found, not a shared library?')
end
# @return [Boolean]
def dirty?
@set.any?
end
def tag_name_or_log(type, log_msg)
segment = dynamic_or_log
return if segment.nil?
tag = segment.tag_by_type(type)
return log_or_raise log_msg, PatchELF::MissingTagError if tag.nil?
tag.name
end
def dynamic_or_log
@elf.segment_by_type(:dynamic).tap do |s|
if s.nil?
log_or_raise 'DYNAMIC segment not found, might be a statically-linked ELF?', PatchELF::MissingSegmentError
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/helper.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/helper.rb | # frozen_string_literal: true
module PatchELF
# Helper methods for internal usage.
module Helper
module_function
# Color codes for pretty print.
COLOR_CODE = {
esc_m: "\e[0m",
info: "\e[38;5;82m", # light green
warn: "\e[38;5;230m", # light yellow
error: "\e[38;5;196m" # heavy red
}.freeze
# The size of one page.
def page_size(e_machine = nil)
# Different architectures have different minimum section alignments.
case e_machine
when ELFTools::Constants::EM_SPARC,
ELFTools::Constants::EM_MIPS,
ELFTools::Constants::EM_PPC,
ELFTools::Constants::EM_PPC64,
ELFTools::Constants::EM_AARCH64,
ELFTools::Constants::EM_TILEGX,
ELFTools::Constants::EM_LOONGARCH
0x10000
else
0x1000
end
end
# For wrapping string with color codes for prettier inspect.
# @param [String] str
# Content to colorize.
# @param [Symbol] type
# Specify which kind of color to use, valid symbols are defined in {.COLOR_CODE}.
# @return [String]
# String that wrapped with color codes.
def colorize(str, type)
return str unless color_enabled?
cc = COLOR_CODE
color = cc.key?(type) ? cc[type] : ''
"#{color}#{str.sub(COLOR_CODE[:esc_m], color)}#{cc[:esc_m]}"
end
# For {#colorize} to decide if need add color codes.
# @return [Boolean]
def color_enabled?
$stderr.tty?
end
# @param [Integer] val
# @param [Integer] align
# @return [Integer]
# Aligned result.
# @example
# aligndown(0x1234)
# #=> 4096
# aligndown(0x33, 0x20)
# #=> 32
# aligndown(0x10, 0x8)
# #=> 16
def aligndown(val, align = page_size)
val - (val & (align - 1))
end
# @param [Integer] val
# @param [Integer] align
# @return [Integer]
# Aligned result.
# @example
# alignup(0x1234)
# #=> 8192
# alignup(0x33, 0x20)
# #=> 64
# alignup(0x10, 0x8)
# #=> 16
def alignup(val, align = page_size)
val.nobits?(align - 1) ? val : (aligndown(val, align) + align)
end
# @param [File?] file
# @return [Proc]
# A proc that closes the file if it's open.
def close_file_proc(file)
proc { file.close if file && !file.closed? }
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/alt_saver.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/patchelf-1.5.2/lib/patchelf/alt_saver.rb | # frozen_string_literal: true
require 'elftools/constants'
require 'elftools/elf_file'
require 'elftools/structs'
require 'elftools/util'
require 'fileutils'
require 'objspace'
require 'patchelf/helper'
# :nodoc:
module PatchELF
# TODO: refactor buf_* methods here
# TODO: move all refinements into a separate file / helper file.
# refinements for cleaner syntax / speed / memory optimizations
module Refinements
refine StringIO do
# behaves like C memset. Equivalent to calling stream.write(char * nbytes)
# the benefit of preferring this over `stream.write(char * nbytes)` is only when data to be written is large.
# @param [String] char
# @param [Integer] nbytes
# @return[void]
def fill(char, nbytes)
at_once = Helper.page_size
pending = nbytes
if pending > at_once
to_write = char * at_once
while pending >= at_once
write(to_write)
pending -= at_once
end
end
write(char * pending) if pending.positive?
end
end
end
using Refinements
# Internal use only.
# alternative to +Saver+, that aims to be byte to byte equivalent with NixOS/patchelf.
#
# *DISCLAIMER*: This differs from +Saver+ in number of ways. No lazy reading,
# inconsistent use of existing internal API(e.g: manual reading of data instead of calling +section.data+)
# @private
class AltSaver
attr_reader :in_file # @return [String] Input filename.
attr_reader :out_file # @return [String] Output filename.
# Instantiate a {AltSaver} object.
# the params passed are the same as the ones passed to +Saver+
# @param [String] in_file
# @param [String] out_file
# @param [{Symbol => String, Array}] set
def initialize(in_file, out_file, set)
@in_file = in_file
@out_file = out_file
@set = set
f = File.open(in_file, 'rb')
# the +@buffer+ and +@elf+ both could work on same +StringIO+ stream,
# the updating of @buffer in place blocks us from looking up old values.
# TODO: cache the values needed later, use same stream for +@buffer+ and +@elf+.
# also be sure to update the stream offset passed to Segments::Segment.
@elf = ELFTools::ELFFile.new(f)
@buffer = StringIO.new(f.tap(&:rewind).read) # StringIO makes easier to work with Bindata
# Ensure file is closed when the {AltSaver} object is garbage collected.
ObjectSpace.define_finalizer(self, Helper.close_file_proc(f))
@ehdr = @elf.header
@endian = @elf.endian
@elf_class = @elf.elf_class
@segments = @elf.segments # usage similar to phdrs
@sections = @elf.sections # usage similar to shdrs
update_section_idx!
# {String => String}
# section name to its data mapping
@replaced_sections = {}
@section_alignment = ehdr.e_phoff.num_bytes
# using the same environment flag as patchelf, makes it easier for debugging
Logger.level = ::Logger.const_get(ENV['PATCHELF_DEBUG'] ? :DEBUG : :WARN)
end
# @return [void]
def save!
@set.each { |mtd, val| send(:"modify_#{mtd}") if val }
rewrite_sections
FileUtils.cp(in_file, out_file) if out_file != in_file
patch_out
# Let output file have the same permission as input.
FileUtils.chmod(File.stat(in_file).mode, out_file)
end
private
attr_reader :ehdr, :endian, :elf_class
def old_sections
@old_sections ||= @elf.sections
end
def buf_cstr(off)
cstr = []
with_buf_at(off) do |buf|
loop do
c = buf.read 1
break if c.nil? || c == "\x00"
cstr.push c
end
end
cstr.join
end
def buf_move!(dst_idx, src_idx, n_bytes)
with_buf_at(src_idx) do |buf|
to_write = buf.read(n_bytes)
buf.seek dst_idx
buf.write to_write
end
end
def dynstr
find_section '.dynstr'
end
# yields dynamic tag, and offset in buffer
def each_dynamic_tags
return unless block_given?
sec = find_section '.dynamic'
return unless sec
return if sec.header.sh_type == ELFTools::Constants::SHT_NOBITS
shdr = sec.header
with_buf_at(shdr.sh_offset) do |buf|
dyn = ELFTools::Structs::ELF_Dyn.new(elf_class: elf_class, endian: endian)
loop do
buf_dyn_offset = buf.tell
dyn.clear
dyn.read(buf)
break if dyn.d_tag == ELFTools::Constants::DT_NULL
yield dyn, buf_dyn_offset
# there's a possibility for caller to modify @buffer.pos, seek to avoid such issues
buf.seek buf_dyn_offset + dyn.num_bytes
end
end
end
# the idea of uniquely identifying section by its name has its problems
# but this is how patchelf operates and is prone to bugs.
# e.g: https://github.com/NixOS/patchelf/issues/197
def find_section(sec_name)
idx = find_section_idx sec_name
return unless idx
@sections[idx]
end
def find_section_idx(sec_name)
@section_idx_by_name[sec_name]
end
def buf_grow!(newsz)
bufsz = @buffer.size
return if newsz <= bufsz
@buffer.truncate newsz
end
def modify_interpreter
@replaced_sections['.interp'] = "#{@set[:interpreter]}\x00"
end
def modify_needed
# due to gsoc time constraints only implementing features used by brew.
raise NotImplementedError
end
# not checking for nil as modify_rpath is only called if @set[:rpath]
def modify_rpath
modify_rpath_helper @set[:rpath], force_rpath: true
end
# not checking for nil as modify_runpath is only called if @set[:runpath]
def modify_runpath
modify_rpath_helper @set[:runpath]
end
def collect_runpath_tags
tags = {}
each_dynamic_tags do |dyn, off|
case dyn.d_tag
when ELFTools::Constants::DT_RPATH
tag_type = :rpath
when ELFTools::Constants::DT_RUNPATH
tag_type = :runpath
else
next
end
# clone does shallow copy, and for some reason d_tag and d_val can't be pass as argument
dyn_rpath = ELFTools::Structs::ELF_Dyn.new(endian: endian, elf_class: elf_class)
dyn_rpath.assign({ d_tag: dyn.d_tag.to_i, d_val: dyn.d_val.to_i })
tags[tag_type] = { offset: off, header: dyn_rpath }
end
tags
end
def resolve_rpath_tag_conflict(dyn_tags, force_rpath: false)
dyn_runpath, dyn_rpath = dyn_tags.values_at(:runpath, :rpath)
update_sym =
if !force_rpath && dyn_rpath && dyn_runpath.nil?
:runpath
elsif force_rpath && dyn_runpath
:rpath
end
return unless update_sym
delete_sym, = %i[rpath runpath] - [update_sym]
dyn_tag = dyn_tags[update_sym] = dyn_tags[delete_sym]
dyn = dyn_tag[:header]
dyn.d_tag = ELFTools::Constants.const_get("DT_#{update_sym.upcase}")
with_buf_at(dyn_tag[:offset]) { |buf| dyn.write(buf) }
dyn_tags.delete(delete_sym)
end
def modify_rpath_helper(new_rpath, force_rpath: false)
shdr_dynstr = dynstr.header
dyn_tags = collect_runpath_tags
resolve_rpath_tag_conflict(dyn_tags, force_rpath: force_rpath)
# (:runpath, :rpath) order_matters.
resolved_rpath_dyn = dyn_tags.values_at(:runpath, :rpath).compact.first
old_rpath = ''
rpath_off = nil
if resolved_rpath_dyn
rpath_off = shdr_dynstr.sh_offset + resolved_rpath_dyn[:header].d_val
old_rpath = buf_cstr(rpath_off)
end
return if old_rpath == new_rpath
with_buf_at(rpath_off) { |b| b.write('X' * old_rpath.size) } if rpath_off
if new_rpath.size <= old_rpath.size
with_buf_at(rpath_off) { |b| b.write "#{new_rpath}\x00" }
return
end
Logger.debug 'rpath is too long, resizing...'
new_dynstr = replace_section '.dynstr', shdr_dynstr.sh_size + new_rpath.size + 1
new_rpath_strtab_idx = shdr_dynstr.sh_size.to_i
new_dynstr[new_rpath_strtab_idx..(new_rpath_strtab_idx + new_rpath.size)] = "#{new_rpath}\x00"
dyn_tags.each_value do |dyn|
dyn[:header].d_val = new_rpath_strtab_idx
with_buf_at(dyn[:offset]) { |b| dyn[:header].write(b) }
end
return unless dyn_tags.empty?
add_dt_rpath!(
d_tag: force_rpath ? ELFTools::Constants::DT_RPATH : ELFTools::Constants::DT_RUNPATH,
d_val: new_rpath_strtab_idx
)
end
def modify_soname
return unless ehdr.e_type == ELFTools::Constants::ET_DYN
# due to gsoc time constraints only implementing features used by brew.
raise NotImplementedError
end
def add_segment!(**phdr_vals)
new_phdr = ELFTools::Structs::ELF_Phdr[elf_class].new(endian: endian, **phdr_vals)
# nil = no reference to stream; we only want @segments[i].header
new_segment = ELFTools::Segments::Segment.new(new_phdr, nil)
@segments.push new_segment
ehdr.e_phnum += 1
nil
end
def add_dt_rpath!(d_tag: nil, d_val: nil)
dyn_num_bytes = nil
dt_null_idx = 0
each_dynamic_tags do |dyn|
dyn_num_bytes ||= dyn.num_bytes
dt_null_idx += 1
end
if dyn_num_bytes.nil?
Logger.error 'no dynamic tags'
return
end
# allot for new dt_runpath
shdr_dynamic = find_section('.dynamic').header
new_dynamic_data = replace_section '.dynamic', shdr_dynamic.sh_size + dyn_num_bytes
# consider DT_NULL when copying
replacement_size = (dt_null_idx + 1) * dyn_num_bytes
# make space for dt_runpath tag at the top, shift data by one tag position
new_dynamic_data[dyn_num_bytes..(replacement_size + dyn_num_bytes)] = new_dynamic_data[0..replacement_size]
dyn_rpath = ELFTools::Structs::ELF_Dyn.new endian: endian, elf_class: elf_class
dyn_rpath.d_tag = d_tag
dyn_rpath.d_val = d_val
zi = StringIO.new
dyn_rpath.write zi
zi.rewind
new_dynamic_data[0...dyn_num_bytes] = zi.read
end
# given a index into old_sections table
# returns the corresponding section index in @sections
#
# raises ArgumentError if old_shndx can't be found in old_sections
# TODO: handle case of non existing section in (new) @sections.
def new_section_idx(old_shndx)
return if old_shndx == ELFTools::Constants::SHN_UNDEF || old_shndx >= ELFTools::Constants::SHN_LORESERVE
raise ArgumentError if old_shndx >= old_sections.count
old_sec = old_sections[old_shndx]
raise PatchError, "old_sections[#{shndx}] is nil" if old_sec.nil?
# TODO: handle case of non existing section in (new) @sections.
find_section_idx(old_sec.name)
end
def page_size
Helper.page_size(ehdr.e_machine)
end
def patch_out
with_buf_at(0) { |b| ehdr.write(b) }
File.open(out_file, 'wb') do |f|
@buffer.rewind
f.write @buffer.read
end
end
# size includes NUL byte
def replace_section(section_name, size)
data = @replaced_sections[section_name]
unless data
shdr = find_section(section_name).header
# avoid calling +section.data+ as the @buffer contents may vary from
# the stream provided to section at initialization.
# ideally, calling section.data should work, however avoiding it to prevent
# future traps.
with_buf_at(shdr.sh_offset) { |b| data = b.read shdr.sh_size }
end
rep_data = if data.size == size
data
elsif data.size < size
data.ljust(size, "\x00")
else
"#{data[0...size]}\x00"
end
@replaced_sections[section_name] = rep_data
end
def write_phdrs_to_buf!
sort_phdrs!
with_buf_at(ehdr.e_phoff) do |buf|
@segments.each { |seg| seg.header.write(buf) }
end
end
def write_shdrs_to_buf!
raise PatchError, 'ehdr.e_shnum != @sections.count' if ehdr.e_shnum != @sections.count
sort_shdrs!
with_buf_at(ehdr.e_shoff) do |buf|
@sections.each { |section| section.header.write(buf) }
end
sync_dyn_tags!
end
# data for manual packing and unpacking of symbols in symtab sections.
def meta_sym_pack
return @meta_sym_pack if @meta_sym_pack
# resort to manual packing and unpacking of data,
# as using bindata is painfully slow :(
if elf_class == 32
sym_num_bytes = 16 # u32 u32 u32 u8 u8 u16
pack_code = endian == :little ? 'VVVCCv' : 'NNNCCn'
pack_st_info = 3
pack_st_shndx = 5
pack_st_value = 1
else # 64
sym_num_bytes = 24 # u32 u8 u8 u16 u64 u64
pack_code = endian == :little ? 'VCCvQ<Q<' : 'NCCnQ>Q>'
pack_st_info = 1
pack_st_shndx = 3
pack_st_value = 4
end
@meta_sym_pack = {
num_bytes: sym_num_bytes, code: pack_code,
st_info: pack_st_info, st_shndx: pack_st_shndx, st_value: pack_st_value
}
end
# yields +symbol+, +entry+
def each_symbol(shdr)
return unless [ELFTools::Constants::SHT_SYMTAB, ELFTools::Constants::SHT_DYNSYM].include?(shdr.sh_type)
pack_code, sym_num_bytes = meta_sym_pack.values_at(:code, :num_bytes)
with_buf_at(shdr.sh_offset) do |buf|
num_symbols = shdr.sh_size / sym_num_bytes
num_symbols.times do |entry|
sym = buf.read(sym_num_bytes).unpack(pack_code)
sym_modified = yield sym, entry
if sym_modified
buf.seek buf.tell - sym_num_bytes
buf.write sym.pack(pack_code)
end
end
end
end
def rewrite_headers(phdr_address)
# there can only be a single program header table according to ELF spec
@segments.find { |seg| seg.header.p_type == ELFTools::Constants::PT_PHDR }&.tap do |seg|
phdr = seg.header
phdr.p_offset = ehdr.e_phoff.to_i
phdr.p_vaddr = phdr.p_paddr = phdr_address.to_i
phdr.p_filesz = phdr.p_memsz = phdr.num_bytes * @segments.count # e_phentsize * e_phnum
end
write_phdrs_to_buf!
write_shdrs_to_buf!
pack = meta_sym_pack
@sections.each do |sec|
each_symbol(sec.header) do |sym, entry|
old_shndx = sym[pack[:st_shndx]]
begin
new_index = new_section_idx(old_shndx)
rescue ArgumentError
Logger.warn "entry #{entry} in symbol table refers to a non existing section, skipping"
end
next unless new_index
sym[pack[:st_shndx]] = new_index
# right 4 bits in the st_info field is st_type
if (sym[pack[:st_info]] & 0xF) == ELFTools::Constants::STT_SECTION
sym[pack[:st_value]] = @sections[new_index].header.sh_addr.to_i
end
true
end
end
end
def rewrite_sections
return if @replaced_sections.empty?
case ehdr.e_type
when ELFTools::Constants::ET_DYN
rewrite_sections_library
when ELFTools::Constants::ET_EXEC
rewrite_sections_executable
else
raise PatchError, 'unknown ELF type'
end
end
def replaced_section_indices
return enum_for(:replaced_section_indices) unless block_given?
last_replaced = 0
@sections.each_with_index do |sec, idx|
if @replaced_sections[sec.name]
last_replaced = idx
yield last_replaced
end
end
raise PatchError, 'last_replaced = 0' if last_replaced.zero?
raise PatchError, 'last_replaced + 1 >= @sections.size' if last_replaced + 1 >= @sections.size
end
def start_replacement_shdr
last_replaced = replaced_section_indices.max
start_replacement_hdr = @sections[last_replaced + 1].header
prev_sec_name = ''
(1..last_replaced).each do |idx|
sec = @sections[idx]
shdr = sec.header
if (sec.type == ELFTools::Constants::SHT_PROGBITS && sec.name != '.interp') || prev_sec_name == '.dynstr'
start_replacement_hdr = shdr
break
elsif @replaced_sections[sec.name].nil?
Logger.debug " replacing section #{sec.name} which is in the way"
replace_section(sec.name, shdr.sh_size)
end
prev_sec_name = sec.name
end
start_replacement_hdr
end
def copy_shdrs_to_eof
shoff_new = @buffer.size
# honestly idk why `ehdr.e_shoff` is considered when we are only moving shdrs.
sh_size = ehdr.e_shoff + (ehdr.e_shnum * ehdr.e_shentsize)
buf_grow! @buffer.size + sh_size
ehdr.e_shoff = shoff_new
raise PatchError, 'ehdr.e_shnum != @sections.size' if ehdr.e_shnum != @sections.size
with_buf_at(ehdr.e_shoff + @sections.first.header.num_bytes) do |buf| # skip writing to NULL section
@sections.each_with_index do |sec, idx|
next if idx.zero?
sec.header.write buf
end
end
end
def rewrite_sections_executable
sort_shdrs!
shdr = start_replacement_shdr
start_offset = shdr.sh_offset.to_i
start_addr = shdr.sh_addr.to_i
first_page = start_addr - start_offset
Logger.debug "first reserved offset/addr is 0x#{start_offset.to_s 16}/0x#{start_addr.to_s 16}"
unless start_addr % page_size == start_offset % page_size
raise PatchError, 'start_addr != start_offset (mod PAGE_SIZE)'
end
Logger.debug "first page is 0x#{first_page.to_i.to_s 16}"
copy_shdrs_to_eof if ehdr.e_shoff < start_offset
normalize_note_segments
seg_num_bytes = @segments.first.header.num_bytes
needed_space = (
ehdr.num_bytes +
(@segments.count * seg_num_bytes) +
@replaced_sections.sum { |_, str| Helper.alignup(str.size, @section_alignment) }
)
if needed_space > start_offset
needed_space += seg_num_bytes # new load segment is required
extra_bytes = needed_space - start_offset
needed_pages = Helper.alignup(extra_bytes, page_size) / page_size
Logger.debug "needed pages is #{needed_pages}"
raise PatchError, 'virtual address space underrun' if needed_pages * page_size > first_page
shift_file(needed_pages, start_offset, extra_bytes)
first_page -= needed_pages * page_size
start_offset += needed_pages * page_size
end
Logger.debug "needed space is #{needed_space}"
cur_off = ehdr.num_bytes + (@segments.count * seg_num_bytes)
Logger.debug "clearing first #{start_offset - cur_off} bytes"
with_buf_at(cur_off) { |buf| buf.fill("\x00", start_offset - cur_off) }
cur_off = write_replaced_sections cur_off, first_page, 0
raise PatchError, "cur_off(#{cur_off}) != needed_space" if cur_off != needed_space
rewrite_headers first_page + ehdr.e_phoff
end
def replace_sections_in_the_way_of_phdr!
num_notes = @sections.count { |sec| sec.type == ELFTools::Constants::SHT_NOTE }
pht_size = ehdr.num_bytes + ((@segments.count + num_notes + 1) * @segments.first.header.num_bytes)
# replace sections that may overlap with expanded program header table
@sections.each_with_index do |sec, idx|
shdr = sec.header
next if idx.zero? || @replaced_sections[sec.name]
break if shdr.sh_offset > pht_size
replace_section sec.name, shdr.sh_size
end
end
def rewrite_sections_library
start_page = 0
first_page = 0
@segments.each do |seg|
phdr = seg.header
this_page = Helper.alignup(phdr.p_vaddr + phdr.p_memsz, page_size)
start_page = [start_page, this_page].max
first_page = phdr.p_vaddr - phdr.p_offset if phdr.p_type == ELFTools::Constants::PT_PHDR
end
Logger.debug "Last page is 0x#{start_page.to_s 16}"
Logger.debug "First page is 0x#{first_page.to_s 16}"
replace_sections_in_the_way_of_phdr!
needed_space = @replaced_sections.sum { |_, str| Helper.alignup(str.size, @section_alignment) }
Logger.debug "needed space = #{needed_space}"
start_offset = Helper.alignup(@buffer.size, page_size)
buf_grow! start_offset + needed_space
# executable shared object
if start_offset > start_page && @segments.any? { |seg| seg.header.p_type == ELFTools::Constants::PT_INTERP }
Logger.debug(
"shifting new PT_LOAD segment by #{start_offset - start_page} bytes to work around a Linux kernel bug"
)
start_page = start_offset
end
ehdr.e_phoff = ehdr.num_bytes
add_segment!(
p_type: ELFTools::Constants::PT_LOAD,
p_offset: start_offset,
p_vaddr: start_page,
p_paddr: start_page,
p_filesz: needed_space,
p_memsz: needed_space,
p_flags: ELFTools::Constants::PF_R | ELFTools::Constants::PF_W,
p_align: page_size
)
normalize_note_segments
cur_off = write_replaced_sections start_offset, start_page, start_offset
raise PatchError, 'cur_off != start_offset + needed_space' if cur_off != start_offset + needed_space
rewrite_headers(first_page + ehdr.e_phoff)
end
def normalize_note_segments
return if @replaced_sections.none? do |rsec_name, _|
find_section(rsec_name)&.type == ELFTools::Constants::SHT_NOTE
end
new_phdrs = []
phdrs_by_type(ELFTools::Constants::PT_NOTE) do |phdr|
# Binaries produced by older patchelf versions may contain empty PT_NOTE segments.
next if @sections.none? do |sec|
sec.header.sh_offset >= phdr.p_offset && sec.header.sh_offset < phdr.p_offset + phdr.p_filesz
end
new_phdrs += normalize_note_segment(phdr)
end
new_phdrs.each { |phdr| add_segment!(**phdr.snapshot) }
end
def normalize_note_segment(phdr)
start_off = phdr.p_offset.to_i
curr_off = start_off
end_off = start_off + phdr.p_filesz
new_phdrs = []
while curr_off < end_off
size = 0
sections_at_aligned_offset(curr_off) do |sec|
next if sec.type != ELFTools::Constants::SHT_NOTE
size = sec.header.sh_size.to_i
curr_off = sec.header.sh_offset.to_i
break
end
raise PatchError, 'cannot normalize PT_NOTE segment: non-contiguous SHT_NOTE sections' if size.zero?
if curr_off + size > end_off
raise PatchError, 'cannot normalize PT_NOTE segment: partially mapped SHT_NOTE section'
end
new_phdr = ELFTools::Structs::ELF_Phdr[elf_class].new(endian: endian, **phdr.snapshot)
new_phdr.p_offset = curr_off
new_phdr.p_vaddr = phdr.p_vaddr + (curr_off - start_off)
new_phdr.p_paddr = phdr.p_paddr + (curr_off - start_off)
new_phdr.p_filesz = size
new_phdr.p_memsz = size
if curr_off == start_off
phdr.assign(new_phdr)
else
new_phdrs << new_phdr
end
curr_off += size
end
new_phdrs
end
def sections_at_aligned_offset(offset)
@sections.each do |sec|
shdr = sec.header
aligned_offset = Helper.alignup(offset, shdr.sh_addralign)
next if shdr.sh_offset != aligned_offset
yield sec
end
end
def shift_sections(shift, start_offset)
ehdr.e_shoff += shift if ehdr.e_shoff >= start_offset
@sections.each_with_index do |sec, i|
next if i.zero? # dont touch NULL section
shdr = sec.header
next if shdr.sh_offset < start_offset
shdr.sh_offset += shift
end
end
def shift_segment_offset(phdr, shift)
phdr.p_offset += shift
phdr.p_align = page_size if phdr.p_align != 0 && (phdr.p_vaddr - phdr.p_offset) % phdr.p_align != 0
end
def shift_segment_virtual_address(phdr, shift)
phdr.p_paddr -= shift if phdr.p_paddr > shift
phdr.p_vaddr -= shift if phdr.p_vaddr > shift
end
# rubocop:disable Metrics/PerceivedComplexity
def shift_segments(shift, start_offset)
split_index = -1
split_shift = 0
@segments.each_with_index do |seg, idx|
phdr = seg.header
p_start = phdr.p_offset
if p_start <= start_offset && p_start + phdr.p_filesz > start_offset &&
phdr.p_type == ELFTools::Constants::PT_LOAD
raise PatchError, "split_index(#{split_index}) != -1" if split_index != -1
split_index = idx
split_shift = start_offset - p_start
phdr.p_offset = start_offset
phdr.p_memsz -= split_shift
phdr.p_filesz -= split_shift
phdr.p_paddr += split_shift
phdr.p_vaddr += split_shift
p_start = start_offset
end
if p_start >= start_offset
shift_segment_offset(phdr, shift)
else
shift_segment_virtual_address(phdr, shift)
end
end
raise PatchError, "split_index(#{split_index}) == -1" if split_index == -1
[split_index, split_shift]
end
# rubocop:enable Metrics/PerceivedComplexity
def shift_file(extra_pages, start_offset, extra_bytes)
raise PatchError, "start_offset(#{start_offset}) < ehdr.num_bytes" if start_offset < ehdr.num_bytes
oldsz = @buffer.size
raise PatchError, "oldsz <= start_offset(#{start_offset})" if oldsz <= start_offset
shift = extra_pages * page_size
buf_grow!(oldsz + shift)
buf_move!(start_offset + shift, start_offset, oldsz - start_offset)
with_buf_at(start_offset) { |buf| buf.write "\x00" * shift }
ehdr.e_phoff = ehdr.num_bytes
shift_sections(shift, start_offset)
split_index, split_shift = shift_segments(shift, start_offset)
split_phdr = @segments[split_index].header
add_segment!(
p_type: ELFTools::Constants::PT_LOAD,
p_offset: split_phdr.p_offset - split_shift - shift,
p_vaddr: split_phdr.p_vaddr - split_shift - shift,
p_paddr: split_phdr.p_paddr - split_shift - shift,
p_filesz: split_shift + extra_bytes,
p_memsz: split_shift + extra_bytes,
p_flags: ELFTools::Constants::PF_R | ELFTools::Constants::PF_W,
p_align: page_size
)
end
def sort_phdrs!
pt_phdr = ELFTools::Constants::PT_PHDR
@segments.sort! do |me, you|
next 1 if you.header.p_type == pt_phdr
next -1 if me.header.p_type == pt_phdr
me.header.p_paddr.to_i <=> you.header.p_paddr.to_i
end
end
# section headers may contain sh_info and sh_link values that are
# references to another section
def collect_section_to_section_refs
rel_syms = [ELFTools::Constants::SHT_REL, ELFTools::Constants::SHT_RELA]
# Translate sh_link, sh_info mappings to section names.
@sections.each_with_object({ linkage: {}, info: {} }) do |s, collected|
hdr = s.header
collected[:linkage][s.name] = @sections[hdr.sh_link].name if hdr.sh_link.nonzero?
collected[:info][s.name] = @sections[hdr.sh_info].name if hdr.sh_info.nonzero? && rel_syms.include?(hdr.sh_type)
end
end
# @param collected
# this must be the value returned by +collect_section_to_section_refs+
def restore_section_to_section_refs!(collected)
rel_syms = [ELFTools::Constants::SHT_REL, ELFTools::Constants::SHT_RELA]
linkage, info = collected.values_at(:linkage, :info)
@sections.each do |sec|
hdr = sec.header
hdr.sh_link = find_section_idx(linkage[sec.name]) if hdr.sh_link.nonzero?
hdr.sh_info = find_section_idx(info[sec.name]) if hdr.sh_info.nonzero? && rel_syms.include?(hdr.sh_type)
end
end
def sort_shdrs!
return if @sections.empty?
section_dep_values = collect_section_to_section_refs
shstrtab = @sections[ehdr.e_shstrndx].header
@sections.sort! { |me, you| me.header.sh_offset.to_i <=> you.header.sh_offset.to_i }
update_section_idx!
restore_section_to_section_refs!(section_dep_values)
@sections.each_with_index do |sec, idx|
ehdr.e_shstrndx = idx if sec.header.sh_offset == shstrtab.sh_offset
end
end
def jmprel_section_name
sec_name = %w[.rel.plt .rela.plt .rela.IA_64.pltoff].find { |s| find_section(s) }
raise PatchError, 'cannot find section corresponding to DT_JMPREL' unless sec_name
sec_name
end
# given a +dyn.d_tag+, returns the section name it must be synced to.
# it may return nil, when given tag maps to no section,
# or when its okay to skip if section is not found.
def dyn_tag_to_section_name(d_tag)
case d_tag
when ELFTools::Constants::DT_STRTAB, ELFTools::Constants::DT_STRSZ
'.dynstr'
when ELFTools::Constants::DT_SYMTAB
'.dynsym'
when ELFTools::Constants::DT_HASH
'.hash'
when ELFTools::Constants::DT_GNU_HASH
# return nil if not found, patchelf claims no problem in skipping
find_section('.gnu.hash')&.name
when ELFTools::Constants::DT_MIPS_XHASH
return if ehdr.e_machine != ELFTools::Constants::EM_MIPS
'.MIPS.xhash'
when ELFTools::Constants::DT_JMPREL
jmprel_section_name
when ELFTools::Constants::DT_REL
# regarding .rel.got, NixOS/patchelf says
# "no idea if this makes sense, but it was needed for some program"
#
# return nil if not found, patchelf claims no problem in skipping
%w[.rel.dyn .rel.got].find { |s| find_section(s) }
when ELFTools::Constants::DT_RELA
# return nil if not found, patchelf claims no problem in skipping
find_section('.rela.dyn')&.name
when ELFTools::Constants::DT_VERNEED
'.gnu.version_r'
when ELFTools::Constants::DT_VERSYM
'.gnu.version'
end
end
# updates dyn tags by syncing it with @section values
def sync_dyn_tags!
dyn_table_offset = nil
each_dynamic_tags do |dyn, buf_off|
dyn_table_offset ||= buf_off
sec_name = dyn_tag_to_section_name(dyn.d_tag)
unless sec_name
if dyn.d_tag == ELFTools::Constants::DT_MIPS_RLD_MAP_REL && ehdr.e_machine == ELFTools::Constants::EM_MIPS
rld_map = find_section('.rld_map')
dyn.d_val = if rld_map
rld_map.header.sh_addr.to_i - (buf_off - dyn_table_offset) -
find_section('.dynamic').header.sh_addr.to_i
else
Logger.warn 'DT_MIPS_RLD_MAP_REL entry is present, but .rld_map section is not'
0
end
end
next
end
shdr = find_section(sec_name).header
dyn.d_val = dyn.d_tag == ELFTools::Constants::DT_STRSZ ? shdr.sh_size.to_i : shdr.sh_addr.to_i
with_buf_at(buf_off) { |wbuf| dyn.write(wbuf) }
end
end
def update_section_idx!
@section_idx_by_name = @sections.map.with_index { |sec, idx| [sec.name, idx] }.to_h
end
def with_buf_at(pos)
return unless block_given?
opos = @buffer.tell
@buffer.seek pos
yield @buffer
@buffer.seek opos
nil
end
def sync_sec_to_seg(shdr, phdr)
phdr.p_offset = shdr.sh_offset.to_i
phdr.p_vaddr = phdr.p_paddr = shdr.sh_addr.to_i
phdr.p_filesz = phdr.p_memsz = shdr.sh_size.to_i
end
def phdrs_by_type(seg_type)
return unless seg_type
@segments.each_with_index do |seg, idx|
next unless (phdr = seg.header).p_type == seg_type
yield phdr, idx
end
end
# Returns a blank shdr if the section doesn't exist.
def find_or_create_section_header(rsec_name)
shdr = find_section(rsec_name)&.header
shdr ||= ELFTools::Structs::ELF_Shdr.new(endian: endian, elf_class: elf_class)
shdr
end
def overwrite_replaced_sections
# the original source says this has to be done separately to
# prevent clobbering the previously written section contents.
@replaced_sections.each_key do |rsec_name|
shdr = find_section(rsec_name)&.header
next unless shdr
next if shdr.sh_type == ELFTools::Constants::SHT_NOBITS
with_buf_at(shdr.sh_offset) { |b| b.fill('X', shdr.sh_size) }
end
end
def write_section_alignment(shdr)
return if shdr.sh_type == ELFTools::Constants::SHT_NOTE && shdr.sh_addralign <= @section_alignment
shdr.sh_addralign = @section_alignment
end
def section_bounds_within_segment?(s_start, s_end, p_start, p_end)
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | true |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools.rb | # frozen_string_literal: true
require 'elftools/constants'
require 'elftools/elf_file'
require 'elftools/version'
# The ELF parsing tools!
# Main entry point is {ELFTools::ELFFile}, see it
# for more information.
module ELFTools
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/dynamic.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/dynamic.rb | # frozen_string_literal: true
module ELFTools
# Define common methods for dynamic sections and dynamic segments.
#
# @note
# This module can only be included by {ELFTools::Sections::DynamicSection}
# and {ELFTools::Segments::DynamicSegment} because methods here assume some
# attributes exist.
module Dynamic
# Iterate all tags.
#
# @note
# This method assume the following methods already exist:
# header
# tag_start
# @yieldparam [ELFTools::Dynamic::Tag] tag
# @return [Enumerator<ELFTools::Dynamic::Tag>, Array<ELFTools::Dynamic::Tag>]
# If block is not given, an enumerator will be returned.
# Otherwise, return array of tags.
def each_tags(&block)
return enum_for(:each_tags) unless block_given?
arr = []
0.step do |i|
tag = tag_at(i).tap(&block)
arr << tag
break if tag.header.d_tag == ELFTools::Constants::DT_NULL
end
arr
end
# Use {#tags} to get all tags.
# @return [Array<ELFTools::Dynamic::Tag>]
# Array of tags.
def tags
@tags ||= each_tags.to_a
end
# Get a tag of specific type.
# @param [Integer, Symbol, String] type
# Constant value, symbol, or string of type
# is acceptable. See examples for more information.
# @return [ELFTools::Dynamic::Tag] The desired tag.
# @example
# dynamic = elf.segment_by_type(:dynamic)
# # type as integer
# dynamic.tag_by_type(0) # the null tag
# #=> #<ELFTools::Dynamic::Tag:0x0055b5a5ecad28 @header={:d_tag=>0, :d_val=>0}>
# dynamic.tag_by_type(ELFTools::Constants::DT_NULL)
# #=> #<ELFTools::Dynamic::Tag:0x0055b5a5ecad28 @header={:d_tag=>0, :d_val=>0}>
#
# # symbol
# dynamic.tag_by_type(:null)
# #=> #<ELFTools::Dynamic::Tag:0x0055b5a5ecad28 @header={:d_tag=>0, :d_val=>0}>
# dynamic.tag_by_type(:pltgot)
# #=> #<ELFTools::Dynamic::Tag:0x0055d3d2d91b28 @header={:d_tag=>3, :d_val=>6295552}>
#
# # string
# dynamic.tag_by_type('null')
# #=> #<ELFTools::Dynamic::Tag:0x0055b5a5ecad28 @header={:d_tag=>0, :d_val=>0}>
# dynamic.tag_by_type('DT_PLTGOT')
# #=> #<ELFTools::Dynamic::Tag:0x0055d3d2d91b28 @header={:d_tag=>3, :d_val=>6295552}>
def tag_by_type(type)
type = Util.to_constant(Constants::DT, type)
each_tags.find { |tag| tag.header.d_tag == type }
end
# Get tags of specific type.
# @param [Integer, Symbol, String] type
# Constant value, symbol, or string of type
# is acceptable. See examples for more information.
# @return [Array<ELFTools::Dynamic::Tag>] The desired tags.
#
# @see #tag_by_type
def tags_by_type(type)
type = Util.to_constant(Constants::DT, type)
each_tags.select { |tag| tag.header.d_tag == type }
end
# Get the +n+-th tag.
#
# Tags are lazy loaded.
# @note
# This method assume the following methods already exist:
# header
# tag_start
# @note
# We cannot do bound checking of +n+ here since the only way to get size
# of tags is calling +tags.size+.
# @param [Integer] n The index.
# @return [ELFTools::Dynamic::Tag] The desired tag.
def tag_at(n)
return if n.negative?
@tag_at_map ||= {}
return @tag_at_map[n] if @tag_at_map[n]
dyn = Structs::ELF_Dyn.new(endian:)
dyn.elf_class = header.elf_class
stream.pos = tag_start + n * dyn.num_bytes
dyn.offset = stream.pos
@tag_at_map[n] = Tag.new(dyn.read(stream), stream, method(:str_offset))
end
private
def endian
header.class.self_endian
end
# Get the DT_STRTAB's +d_val+ offset related to file.
def str_offset
# TODO: handle DT_STRTAB not exitsts.
@str_offset ||= @offset_from_vma.call(tag_by_type(:strtab).header.d_val.to_i)
end
# A tag class.
class Tag
attr_reader :header # @return [ELFTools::Structs::ELF_Dyn] The dynamic tag header.
attr_reader :stream # @return [#pos=, #read] Streaming object.
# Instantiate a {ELFTools::Dynamic::Tag} object.
# @param [ELF_Dyn] header The dynamic tag header.
# @param [#pos=, #read] stream Streaming object.
# @param [Method] str_offset
# Call this method to get the string offset related
# to file.
def initialize(header, stream, str_offset)
@header = header
@stream = stream
@str_offset = str_offset
end
# Some dynamic have name.
TYPE_WITH_NAME = [Constants::DT_NEEDED,
Constants::DT_SONAME,
Constants::DT_RPATH,
Constants::DT_RUNPATH].freeze
# Return the content of this tag records.
#
# For normal tags, this method just return
# +header.d_val+. For tags with +header.d_val+
# in meaning of string offset (e.g. DT_NEEDED), this method would
# return the string it specified.
# Tags with type in {TYPE_WITH_NAME} are those tags with name.
# @return [Integer, String] The content this tag records.
# @example
# dynamic = elf.segment_by_type(:dynamic)
# dynamic.tag_by_type(:init).value
# #=> 4195600 # 0x400510
# dynamic.tag_by_type(:needed).value
# #=> 'libc.so.6'
def value
name || header.d_val.to_i
end
# Is this tag has a name?
#
# The criteria here is if this tag's type is in {TYPE_WITH_NAME}.
# @return [Boolean] Is this tag has a name.
def name?
TYPE_WITH_NAME.include?(header.d_tag)
end
# Return the name of this tag.
#
# Only tags with name would return a name.
# Others would return +nil+.
# @return [String, nil] The name.
def name
return nil unless name?
Util.cstring(stream, @str_offset.call + header.d_val.to_i)
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/version.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/version.rb | # frozen_string_literal: true
module ELFTools
# Current gem version
VERSION = '1.3.1'
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/exceptions.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/exceptions.rb | # frozen_string_literal: true
module ELFTools
# Being raised when parsing error.
class ELFError < StandardError; end
# Raised on invalid ELF magic.
class ELFMagicError < ELFError; end
# Raised on invalid ELF class (EI_CLASS).
class ELFClassError < ELFError; end
# Raised on invalid ELF data encoding (EI_DATA).
class ELFDataError < ELFError; end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/lazy_array.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/lazy_array.rb | # frozen_string_literal: true
require 'delegate'
module ELFTools
# A helper class for {ELFTools} easy to implement
# 'lazy loading' objects.
# Mainly used when loading sections, segments, and
# symbols.
class LazyArray < SimpleDelegator
# Instantiate a {LazyArray} object.
# @param [Integer] size
# The size of array.
# @yieldparam [Integer] i
# Needs the +i+-th element.
# @yieldreturn [Object]
# Value of the +i+-th element.
# @example
# arr = LazyArray.new(10) { |i| p "calc #{i}"; i * i }
# p arr[2]
# # "calc 2"
# # 4
#
# p arr[3]
# # "calc 3"
# # 9
#
# p arr[3]
# # 9
def initialize(size, &block)
super(Array.new(size))
@block = block
end
# To access elements like a normal array.
#
# Elements are lazy loaded at the first time
# access it.
# @return [Object]
# The element, returned type is the
# return type of block given in {#initialize}.
def [](i)
# XXX: support negative index?
return nil unless i.between?(0, __getobj__.size - 1)
__getobj__[i] ||= @block.call(i)
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/constants.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/constants.rb | # frozen_string_literal: true
module ELFTools
# Define constants from elf.h.
# Mostly refer from https://github.com/torvalds/linux/blob/master/include/uapi/linux/elf.h
# and binutils/elfcpp/elfcpp.h.
module Constants
# ELF magic header
ELFMAG = "\x7FELF"
# Values of `d_un.d_val' in the DT_FLAGS and DT_FLAGS_1 entry.
module DF
DF_ORIGIN = 0x00000001 # Object may use DF_ORIGIN
DF_SYMBOLIC = 0x00000002 # Symbol resolutions starts here
DF_TEXTREL = 0x00000004 # Object contains text relocations
DF_BIND_NOW = 0x00000008 # No lazy binding for this object
DF_STATIC_TLS = 0x00000010 # Module uses the static TLS model
DF_1_NOW = 0x00000001 # Set RTLD_NOW for this object.
DF_1_GLOBAL = 0x00000002 # Set RTLD_GLOBAL for this object.
DF_1_GROUP = 0x00000004 # Set RTLD_GROUP for this object.
DF_1_NODELETE = 0x00000008 # Set RTLD_NODELETE for this object.
DF_1_LOADFLTR = 0x00000010 # Trigger filtee loading at runtime.
DF_1_INITFIRST = 0x00000020 # Set RTLD_INITFIRST for this object
DF_1_NOOPEN = 0x00000040 # Set RTLD_NOOPEN for this object.
DF_1_ORIGIN = 0x00000080 # $ORIGIN must be handled.
DF_1_DIRECT = 0x00000100 # Direct binding enabled.
DF_1_TRANS = 0x00000200 # :nodoc:
DF_1_INTERPOSE = 0x00000400 # Object is used to interpose.
DF_1_NODEFLIB = 0x00000800 # Ignore default lib search path.
DF_1_NODUMP = 0x00001000 # Object can't be dldump'ed.
DF_1_CONFALT = 0x00002000 # Configuration alternative created.
DF_1_ENDFILTEE = 0x00004000 # Filtee terminates filters search.
DF_1_DISPRELDNE = 0x00008000 # Disp reloc applied at build time.
DF_1_DISPRELPND = 0x00010000 # Disp reloc applied at run-time.
DF_1_NODIRECT = 0x00020000 # Object has no-direct binding.
DF_1_IGNMULDEF = 0x00040000 # :nodoc:
DF_1_NOKSYMS = 0x00080000 # :nodoc:
DF_1_NOHDR = 0x00100000 # :nodoc:
DF_1_EDITED = 0x00200000 # Object is modified after built.
DF_1_NORELOC = 0x00400000 # :nodoc:
DF_1_SYMINTPOSE = 0x00800000 # Object has individual interposers.
DF_1_GLOBAUDIT = 0x01000000 # Global auditing required.
DF_1_SINGLETON = 0x02000000 # Singleton symbols are used.
DF_1_STUB = 0x04000000 # :nodoc:
DF_1_PIE = 0x08000000 # Object is a position-independent executable.
DF_1_KMOD = 0x10000000 # :nodoc:
DF_1_WEAKFILTER = 0x20000000 # :nodoc:
DF_1_NOCOMMON = 0x40000000 # :nodoc:
end
include DF
# Dynamic table types, records in +d_tag+.
module DT
DT_NULL = 0 # marks the end of the _DYNAMIC array
DT_NEEDED = 1 # libraries need to be linked by loader
DT_PLTRELSZ = 2 # total size of relocation entries
DT_PLTGOT = 3 # address of procedure linkage table or global offset table
DT_HASH = 4 # address of symbol hash table
DT_STRTAB = 5 # address of string table
DT_SYMTAB = 6 # address of symbol table
DT_RELA = 7 # address of a relocation table
DT_RELASZ = 8 # total size of the {DT_RELA} table
DT_RELAENT = 9 # size of each entry in the {DT_RELA} table
DT_STRSZ = 10 # total size of {DT_STRTAB}
DT_SYMENT = 11 # size of each entry in {DT_SYMTAB}
DT_INIT = 12 # where the initialization function is
DT_FINI = 13 # where the termination function is
DT_SONAME = 14 # the shared object name
DT_RPATH = 15 # has been superseded by {DT_RUNPATH}
DT_SYMBOLIC = 16 # has been superseded by the DF_SYMBOLIC flag
DT_REL = 17 # similar to {DT_RELA}
DT_RELSZ = 18 # total size of the {DT_REL} table
DT_RELENT = 19 # size of each entry in the {DT_REL} table
DT_PLTREL = 20 # type of relocation entry, either {DT_REL} or {DT_RELA}
DT_DEBUG = 21 # for debugging
DT_TEXTREL = 22 # has been superseded by the DF_TEXTREL flag
DT_JMPREL = 23 # address of relocation entries associated solely with procedure linkage table
DT_BIND_NOW = 24 # if the loader needs to do relocate now, superseded by the DF_BIND_NOW flag
DT_INIT_ARRAY = 25 # address init array
DT_FINI_ARRAY = 26 # address of fini array
DT_INIT_ARRAYSZ = 27 # total size of init array
DT_FINI_ARRAYSZ = 28 # total size of fini array
DT_RUNPATH = 29 # path of libraries for searching
DT_FLAGS = 30 # flags
DT_ENCODING = 32 # just a lower bound
DT_PREINIT_ARRAY = 32 # pre-initialization functions array
DT_PREINIT_ARRAYSZ = 33 # pre-initialization functions array size (bytes)
DT_SYMTAB_SHNDX = 34 # address of the +SHT_SYMTAB_SHNDX+ section associated with {DT_SYMTAB} table
DT_RELRSZ = 35 # :nodoc:
DT_RELR = 36 # :nodoc:
DT_RELRENT = 37 # :nodoc:
# Values between {DT_LOOS} and {DT_HIOS} are reserved for operating system-specific semantics.
DT_LOOS = 0x6000000d
DT_HIOS = 0x6ffff000 # see {DT_LOOS}
# Values between {DT_VALRNGLO} and {DT_VALRNGHI} use the +d_un.d_val+ field of the dynamic structure.
DT_VALRNGLO = 0x6ffffd00
DT_VALRNGHI = 0x6ffffdff # see {DT_VALRNGLO}
# Values between {DT_ADDRRNGLO} and {DT_ADDRRNGHI} use the +d_un.d_ptr+ field of the dynamic structure.
DT_ADDRRNGLO = 0x6ffffe00
DT_GNU_HASH = 0x6ffffef5 # the gnu hash
DT_TLSDESC_PLT = 0x6ffffef6 # :nodoc:
DT_TLSDESC_GOT = 0x6ffffef7 # :nodoc:
DT_GNU_CONFLICT = 0x6ffffef8 # :nodoc:
DT_GNU_LIBLIST = 0x6ffffef9 # :nodoc:
DT_CONFIG = 0x6ffffefa # :nodoc:
DT_DEPAUDIT = 0x6ffffefb # :nodoc:
DT_AUDIT = 0x6ffffefc # :nodoc:
DT_PLTPAD = 0x6ffffefd # :nodoc:
DT_MOVETAB = 0x6ffffefe # :nodoc:
DT_SYMINFO = 0x6ffffeff # :nodoc:
DT_ADDRRNGHI = 0x6ffffeff # see {DT_ADDRRNGLO}
DT_VERSYM = 0x6ffffff0 # section address of .gnu.version
DT_RELACOUNT = 0x6ffffff9 # relative relocation count
DT_RELCOUNT = 0x6ffffffa # relative relocation count
DT_FLAGS_1 = 0x6ffffffb # flags
DT_VERDEF = 0x6ffffffc # address of version definition table
DT_VERDEFNUM = 0x6ffffffd # number of entries in {DT_VERDEF}
DT_VERNEED = 0x6ffffffe # address of version dependency table
DT_VERNEEDNUM = 0x6fffffff # number of entries in {DT_VERNEED}
# Values between {DT_LOPROC} and {DT_HIPROC} are reserved for processor-specific semantics.
DT_LOPROC = 0x70000000
DT_PPC_GOT = 0x70000000 # global offset table
DT_PPC_OPT = 0x70000001 # whether various optimisations are possible
DT_PPC64_GLINK = 0x70000000 # start of the .glink section
DT_PPC64_OPD = 0x70000001 # start of the .opd section
DT_PPC64_OPDSZ = 0x70000002 # size of the .opd section
DT_PPC64_OPT = 0x70000003 # whether various optimisations are possible
DT_SPARC_REGISTER = 0x70000000 # index of an +STT_SPARC_REGISTER+ symbol within the {DT_SYMTAB} table
DT_MIPS_RLD_VERSION = 0x70000001 # 32 bit version number for runtime linker interface
DT_MIPS_TIME_STAMP = 0x70000002 # time stamp
DT_MIPS_ICHECKSUM = 0x70000003 # checksum of external strings and common sizes
DT_MIPS_IVERSION = 0x70000004 # index of version string in string table
DT_MIPS_FLAGS = 0x70000005 # 32 bits of flags
DT_MIPS_BASE_ADDRESS = 0x70000006 # base address of the segment
DT_MIPS_MSYM = 0x70000007 # :nodoc:
DT_MIPS_CONFLICT = 0x70000008 # address of +.conflict+ section
DT_MIPS_LIBLIST = 0x70000009 # address of +.liblist+ section
DT_MIPS_LOCAL_GOTNO = 0x7000000a # number of local global offset table entries
DT_MIPS_CONFLICTNO = 0x7000000b # number of entries in the +.conflict+ section
DT_MIPS_LIBLISTNO = 0x70000010 # number of entries in the +.liblist+ section
DT_MIPS_SYMTABNO = 0x70000011 # number of entries in the +.dynsym+ section
DT_MIPS_UNREFEXTNO = 0x70000012 # index of first external dynamic symbol not referenced locally
DT_MIPS_GOTSYM = 0x70000013 # index of first dynamic symbol in global offset table
DT_MIPS_HIPAGENO = 0x70000014 # number of page table entries in global offset table
DT_MIPS_RLD_MAP = 0x70000016 # address of run time loader map, used for debugging
DT_MIPS_DELTA_CLASS = 0x70000017 # delta C++ class definition
DT_MIPS_DELTA_CLASS_NO = 0x70000018 # number of entries in {DT_MIPS_DELTA_CLASS}
DT_MIPS_DELTA_INSTANCE = 0x70000019 # delta C++ class instances
DT_MIPS_DELTA_INSTANCE_NO = 0x7000001a # number of entries in {DT_MIPS_DELTA_INSTANCE}
DT_MIPS_DELTA_RELOC = 0x7000001b # delta relocations
DT_MIPS_DELTA_RELOC_NO = 0x7000001c # number of entries in {DT_MIPS_DELTA_RELOC}
DT_MIPS_DELTA_SYM = 0x7000001d # delta symbols that Delta relocations refer to
DT_MIPS_DELTA_SYM_NO = 0x7000001e # number of entries in {DT_MIPS_DELTA_SYM}
DT_MIPS_DELTA_CLASSSYM = 0x70000020 # delta symbols that hold class declarations
DT_MIPS_DELTA_CLASSSYM_NO = 0x70000021 # number of entries in {DT_MIPS_DELTA_CLASSSYM}
DT_MIPS_CXX_FLAGS = 0x70000022 # flags indicating information about C++ flavor
DT_MIPS_PIXIE_INIT = 0x70000023 # :nodoc:
DT_MIPS_SYMBOL_LIB = 0x70000024 # address of +.MIPS.symlib+
DT_MIPS_LOCALPAGE_GOTIDX = 0x70000025 # GOT index of the first PTE for a segment
DT_MIPS_LOCAL_GOTIDX = 0x70000026 # GOT index of the first PTE for a local symbol
DT_MIPS_HIDDEN_GOTIDX = 0x70000027 # GOT index of the first PTE for a hidden symbol
DT_MIPS_PROTECTED_GOTIDX = 0x70000028 # GOT index of the first PTE for a protected symbol
DT_MIPS_OPTIONS = 0x70000029 # address of +.MIPS.options+
DT_MIPS_INTERFACE = 0x7000002a # address of +.interface+
DT_MIPS_DYNSTR_ALIGN = 0x7000002b # :nodoc:
DT_MIPS_INTERFACE_SIZE = 0x7000002c # size of the +.interface+ section
DT_MIPS_RLD_TEXT_RESOLVE_ADDR = 0x7000002d # size of +rld_text_resolve+ function stored in the GOT
DT_MIPS_PERF_SUFFIX = 0x7000002e # default suffix of DSO to be added by rld on +dlopen()+ calls
DT_MIPS_COMPACT_SIZE = 0x7000002f # size of compact relocation section (O32)
DT_MIPS_GP_VALUE = 0x70000030 # GP value for auxiliary GOTs
DT_MIPS_AUX_DYNAMIC = 0x70000031 # address of auxiliary +.dynamic+
DT_MIPS_PLTGOT = 0x70000032 # address of the base of the PLTGOT
DT_MIPS_RWPLT = 0x70000034 # base of a writable PLT
DT_MIPS_RLD_MAP_REL = 0x70000035 # relative offset of run time loader map
DT_MIPS_XHASH = 0x70000036 # GNU-style hash table with xlat
DT_AUXILIARY = 0x7ffffffd # :nodoc:
DT_USED = 0x7ffffffe # :nodoc:
DT_FILTER = 0x7ffffffe # :nodoc:
DT_HIPROC = 0x7fffffff # see {DT_LOPROC}
end
include DT
# These constants define the various ELF target machines.
module EM
EM_NONE = 0 # none
EM_M32 = 1 # AT&T WE 32100
EM_SPARC = 2 # SPARC
EM_386 = 3 # Intel 80386
EM_68K = 4 # Motorola 68000
EM_88K = 5 # Motorola 88000
EM_486 = 6 # Intel 80486
EM_860 = 7 # Intel 80860
EM_MIPS = 8 # MIPS R3000 (officially, big-endian only)
EM_S370 = 9 # IBM System/370
# Next two are historical and binaries and
# modules of these types will be rejected by Linux.
EM_MIPS_RS3_LE = 10 # MIPS R3000 little-endian
EM_MIPS_RS4_BE = 10 # MIPS R4000 big-endian
EM_PARISC = 15 # HPPA
EM_VPP500 = 17 # Fujitsu VPP500 (also some older versions of PowerPC)
EM_SPARC32PLUS = 18 # Sun's "v8plus"
EM_960 = 19 # Intel 80960
EM_PPC = 20 # PowerPC
EM_PPC64 = 21 # PowerPC64
EM_S390 = 22 # IBM S/390
EM_SPU = 23 # Cell BE SPU
EM_V800 = 36 # NEC V800 series
EM_FR20 = 37 # Fujitsu FR20
EM_RH32 = 38 # TRW RH32
EM_RCE = 39 # Motorola M*Core
EM_ARM = 40 # ARM 32 bit
EM_SH = 42 # SuperH
EM_SPARCV9 = 43 # SPARC v9 64-bit
EM_TRICORE = 44 # Siemens Tricore embedded processor
EM_ARC = 45 # ARC Cores
EM_H8_300 = 46 # Renesas H8/300
EM_H8_300H = 47 # Renesas H8/300H
EM_H8S = 48 # Renesas H8S
EM_H8_500 = 49 # Renesas H8/500H
EM_IA_64 = 50 # HP/Intel IA-64
EM_MIPS_X = 51 # Stanford MIPS-X
EM_COLDFIRE = 52 # Motorola Coldfire
EM_68HC12 = 53 # Motorola M68HC12
EM_MMA = 54 # Fujitsu Multimedia Accelerator
EM_PCP = 55 # Siemens PCP
EM_NCPU = 56 # Sony nCPU embedded RISC processor
EM_NDR1 = 57 # Denso NDR1 microprocessor
EM_STARCORE = 58 # Motorola Star*Core processor
EM_ME16 = 59 # Toyota ME16 processor
EM_ST100 = 60 # STMicroelectronics ST100 processor
EM_TINYJ = 61 # Advanced Logic Corp. TinyJ embedded processor
EM_X86_64 = 62 # AMD x86-64
EM_PDSP = 63 # Sony DSP Processor
EM_PDP10 = 64 # Digital Equipment Corp. PDP-10
EM_PDP11 = 65 # Digital Equipment Corp. PDP-11
EM_FX66 = 66 # Siemens FX66 microcontroller
EM_ST9PLUS = 67 # STMicroelectronics ST9+ 8/16 bit microcontroller
EM_ST7 = 68 # STMicroelectronics ST7 8-bit microcontroller
EM_68HC16 = 69 # Motorola MC68HC16 Microcontroller
EM_68HC11 = 70 # Motorola MC68HC11 Microcontroller
EM_68HC08 = 71 # Motorola MC68HC08 Microcontroller
EM_68HC05 = 72 # Motorola MC68HC05 Microcontroller
EM_SVX = 73 # Silicon Graphics SVx
EM_ST19 = 74 # STMicroelectronics ST19 8-bit cpu
EM_VAX = 75 # Digital VAX
EM_CRIS = 76 # Axis Communications 32-bit embedded processor
EM_JAVELIN = 77 # Infineon Technologies 32-bit embedded cpu
EM_FIREPATH = 78 # Element 14 64-bit DSP processor
EM_ZSP = 79 # LSI Logic's 16-bit DSP processor
EM_MMIX = 80 # Donald Knuth's educational 64-bit processor
EM_HUANY = 81 # Harvard's machine-independent format
EM_PRISM = 82 # SiTera Prism
EM_AVR = 83 # Atmel AVR 8-bit microcontroller
EM_FR30 = 84 # Fujitsu FR30
EM_D10V = 85 # Mitsubishi D10V
EM_D30V = 86 # Mitsubishi D30V
EM_V850 = 87 # Renesas V850
EM_M32R = 88 # Renesas M32R
EM_MN10300 = 89 # Matsushita MN10300
EM_MN10200 = 90 # Matsushita MN10200
EM_PJ = 91 # picoJava
EM_OPENRISC = 92 # OpenRISC 32-bit embedded processor
EM_ARC_COMPACT = 93 # ARC International ARCompact processor
EM_XTENSA = 94 # Tensilica Xtensa Architecture
EM_VIDEOCORE = 95 # Alphamosaic VideoCore processor
EM_TMM_GPP = 96 # Thompson Multimedia General Purpose Processor
EM_NS32K = 97 # National Semiconductor 32000 series
EM_TPC = 98 # Tenor Network TPC processor
EM_SNP1K = 99 # Trebia SNP 1000 processor
EM_ST200 = 100 # STMicroelectronics ST200 microcontroller
EM_IP2K = 101 # Ubicom IP2022 micro controller
EM_MAX = 102 # MAX Processor
EM_CR = 103 # National Semiconductor CompactRISC
EM_F2MC16 = 104 # Fujitsu F2MC16
EM_MSP430 = 105 # TI msp430 micro controller
EM_BLACKFIN = 106 # ADI Blackfin Processor
EM_SE_C33 = 107 # S1C33 Family of Seiko Epson processors
EM_SEP = 108 # Sharp embedded microprocessor
EM_ARCA = 109 # Arca RISC Microprocessor
EM_UNICORE = 110 # Microprocessor series from PKU-Unity Ltd. and MPRC of Peking University
EM_EXCESS = 111 # eXcess: 16/32/64-bit configurable embedded CPU
EM_DXP = 112 # Icera Semiconductor Inc. Deep Execution Processor
EM_ALTERA_NIOS2 = 113 # Altera Nios II soft-core processor
EM_CRX = 114 # National Semiconductor CRX
EM_XGATE = 115 # Motorola XGATE embedded processor
EM_C116 = 116 # Infineon C16x/XC16x processor
EM_M16C = 117 # Renesas M16C series microprocessors
EM_DSPIC30F = 118 # Microchip Technology dsPIC30F Digital Signal Controller
EM_CE = 119 # Freescale Communication Engine RISC core
EM_M32C = 120 # Freescale Communication Engine RISC core
EM_TSK3000 = 131 # Altium TSK3000 core
EM_RS08 = 132 # Freescale RS08 embedded processor
EM_SHARC = 133 # Analog Devices SHARC family of 32-bit DSP processors
EM_ECOG2 = 134 # Cyan Technology eCOG2 microprocessor
EM_SCORE7 = 135 # Sunplus S+core7 RISC processor
EM_DSP24 = 136 # New Japan Radio (NJR) 24-bit DSP Processor
EM_VIDEOCORE3 = 137 # Broadcom VideoCore III processor
EM_LATTICEMICO32 = 138 # RISC processor for Lattice FPGA architecture
EM_SE_C17 = 139 # Seiko Epson C17 family
EM_TI_C6000 = 140 # The Texas Instruments TMS320C6000 DSP family
EM_TI_C2000 = 141 # The Texas Instruments TMS320C2000 DSP family
EM_TI_C5500 = 142 # The Texas Instruments TMS320C55x DSP family
EM_TI_ARP32 = 143 # Texas Instruments Application Specific RISC Processor, 32bit fetch
EM_TI_PRU = 144 # Texas Instruments Programmable Realtime Unit
EM_MMDSP_PLUS = 160 # STMicroelectronics 64bit VLIW Data Signal Processor
EM_CYPRESS_M8C = 161 # Cypress M8C microprocessor
EM_R32C = 162 # Renesas R32C series microprocessors
EM_TRIMEDIA = 163 # NXP Semiconductors TriMedia architecture family
EM_QDSP6 = 164 # QUALCOMM DSP6 Processor
EM_8051 = 165 # Intel 8051 and variants
EM_STXP7X = 166 # STMicroelectronics STxP7x family
EM_NDS32 = 167 # Andes Technology compact code size embedded RISC processor family
EM_ECOG1 = 168 # Cyan Technology eCOG1X family
EM_ECOG1X = 168 # Cyan Technology eCOG1X family
EM_MAXQ30 = 169 # Dallas Semiconductor MAXQ30 Core Micro-controllers
EM_XIMO16 = 170 # New Japan Radio (NJR) 16-bit DSP Processor
EM_MANIK = 171 # M2000 Reconfigurable RISC Microprocessor
EM_CRAYNV2 = 172 # Cray Inc. NV2 vector architecture
EM_RX = 173 # Renesas RX family
EM_METAG = 174 # Imagination Technologies Meta processor architecture
EM_MCST_ELBRUS = 175 # MCST Elbrus general purpose hardware architecture
EM_ECOG16 = 176 # Cyan Technology eCOG16 family
EM_CR16 = 177 # National Semiconductor CompactRISC 16-bit processor
EM_ETPU = 178 # Freescale Extended Time Processing Unit
EM_SLE9X = 179 # Infineon Technologies SLE9X core
EM_L1OM = 180 # Intel L1OM
EM_K1OM = 181 # Intel K1OM
EM_AARCH64 = 183 # ARM 64 bit
EM_AVR32 = 185 # Atmel Corporation 32-bit microprocessor family
EM_STM8 = 186 # STMicroeletronics STM8 8-bit microcontroller
EM_TILE64 = 187 # Tilera TILE64 multicore architecture family
EM_TILEPRO = 188 # Tilera TILEPro
EM_MICROBLAZE = 189 # Xilinx MicroBlaze
EM_CUDA = 190 # NVIDIA CUDA architecture
EM_TILEGX = 191 # Tilera TILE-Gx
EM_CLOUDSHIELD = 192 # CloudShield architecture family
EM_COREA_1ST = 193 # KIPO-KAIST Core-A 1st generation processor family
EM_COREA_2ND = 194 # KIPO-KAIST Core-A 2nd generation processor family
EM_ARC_COMPACT2 = 195 # Synopsys ARCompact V2
EM_OPEN8 = 196 # Open8 8-bit RISC soft processor core
EM_RL78 = 197 # Renesas RL78 family
EM_VIDEOCORE5 = 198 # Broadcom VideoCore V processor
EM_78K0R = 199 # Renesas 78K0R
EM_56800EX = 200 # Freescale 56800EX Digital Signal Controller (DSC)
EM_BA1 = 201 # Beyond BA1 CPU architecture
EM_BA2 = 202 # Beyond BA2 CPU architecture
EM_XCORE = 203 # XMOS xCORE processor family
EM_MCHP_PIC = 204 # Microchip 8-bit PIC(r) family
EM_INTELGT = 205 # Intel Graphics Technology
EM_KM32 = 210 # KM211 KM32 32-bit processor
EM_KMX32 = 211 # KM211 KMX32 32-bit processor
EM_KMX16 = 212 # KM211 KMX16 16-bit processor
EM_KMX8 = 213 # KM211 KMX8 8-bit processor
EM_KVARC = 214 # KM211 KVARC processor
EM_CDP = 215 # Paneve CDP architecture family
EM_COGE = 216 # Cognitive Smart Memory Processor
EM_COOL = 217 # Bluechip Systems CoolEngine
EM_NORC = 218 # Nanoradio Optimized RISC
EM_CSR_KALIMBA = 219 # CSR Kalimba architecture family
EM_Z80 = 220 # Zilog Z80
EM_VISIUM = 221 # Controls and Data Services VISIUMcore processor
EM_FT32 = 222 # FTDI Chip FT32 high performance 32-bit RISC architecture
EM_MOXIE = 223 # Moxie processor family
EM_AMDGPU = 224 # AMD GPU architecture
EM_LANAI = 244 # Lanai 32-bit processor
EM_CEVA = 245 # CEVA Processor Architecture Family
EM_CEVA_X2 = 246 # CEVA X2 Processor Family
EM_BPF = 247 # Linux BPF - in-kernel virtual machine
EM_GRAPHCORE_IPU = 248 # Graphcore Intelligent Processing Unit
EM_IMG1 = 249 # Imagination Technologies
EM_NFP = 250 # Netronome Flow Processor (NFP)
EM_VE = 251 # NEC Vector Engine
EM_CSKY = 252 # C-SKY processor family
EM_ARC_COMPACT3_64 = 253 # Synopsys ARCv2.3 64-bit
EM_MCS6502 = 254 # MOS Technology MCS 6502 processor
EM_ARC_COMPACT3 = 255 # Synopsys ARCv2.3 32-bit
EM_KVX = 256 # Kalray VLIW core of the MPPA processor family
EM_65816 = 257 # WDC 65816/65C816
EM_LOONGARCH = 258 # LoongArch
EM_KF32 = 259 # ChipON KungFu32
EM_U16_U8CORE = 260 # LAPIS nX-U16/U8
EM_TACHYUM = 261 # Tachyum
EM_56800EF = 262 # NXP 56800EF Digital Signal Controller (DSC)
EM_FRV = 0x5441 # Fujitsu FR-V
# This is an interim value that we will use until the committee comes up with a final number.
EM_ALPHA = 0x9026
# Bogus old m32r magic number, used by old tools.
EM_CYGNUS_M32R = 0x9041
# This is the old interim value for S/390 architecture
EM_S390_OLD = 0xA390
# Also Panasonic/MEI MN10300, AM33
EM_CYGNUS_MN10300 = 0xbeef
# Return the architecture name according to +val+.
# Used by {ELFTools::ELFFile#machine}.
#
# Only supports famous archs.
# @param [Integer] val Value of +e_machine+.
# @return [String]
# Name of architecture.
# @example
# mapping(3)
# #=> 'Intel 80386'
# mapping(6)
# #=> 'Intel 80386'
# mapping(62)
# #=> 'Advanced Micro Devices X86-64'
# mapping(1337)
# #=> '<unknown>: 0x539'
def self.mapping(val)
case val
when EM_NONE then 'None'
when EM_386, EM_486 then 'Intel 80386'
when EM_860 then 'Intel 80860'
when EM_MIPS then 'MIPS R3000'
when EM_PPC then 'PowerPC'
when EM_PPC64 then 'PowerPC64'
when EM_ARM then 'ARM'
when EM_IA_64 then 'Intel IA-64'
when EM_AARCH64 then 'AArch64'
when EM_X86_64 then 'Advanced Micro Devices X86-64'
else format('<unknown>: 0x%x', val)
end
end
end
include EM
# This module defines ELF file types.
module ET
ET_NONE = 0 # no file type
ET_REL = 1 # relocatable file
ET_EXEC = 2 # executable file
ET_DYN = 3 # shared object
ET_CORE = 4 # core file
# Return the type name according to +e_type+ in ELF file header.
# @return [String] Type in string format.
def self.mapping(type)
case type
when Constants::ET_NONE then 'NONE'
when Constants::ET_REL then 'REL'
when Constants::ET_EXEC then 'EXEC'
when Constants::ET_DYN then 'DYN'
when Constants::ET_CORE then 'CORE'
else '<unknown>'
end
end
end
include ET
# Program header permission flags, records bitwise OR value in +p_flags+.
module PF
PF_X = 1 # executable
PF_W = 2 # writable
PF_R = 4 # readable
end
include PF
# Program header types, records in +p_type+.
module PT
PT_NULL = 0 # null segment
PT_LOAD = 1 # segment to be load
PT_DYNAMIC = 2 # dynamic tags
PT_INTERP = 3 # interpreter, same as .interp section
PT_NOTE = 4 # same as .note* section
PT_SHLIB = 5 # reserved
PT_PHDR = 6 # where program header starts
PT_TLS = 7 # thread local storage segment
PT_LOOS = 0x60000000 # OS-specific
PT_GNU_EH_FRAME = 0x6474e550 # for exception handler
PT_GNU_STACK = 0x6474e551 # permission of stack
PT_GNU_RELRO = 0x6474e552 # read only after relocation
PT_GNU_PROPERTY = 0x6474e553 # GNU property
PT_GNU_MBIND_HI = 0x6474f554 # Mbind segments (upper bound)
PT_GNU_MBIND_LO = 0x6474e555 # Mbind segments (lower bound)
PT_OPENBSD_RANDOMIZE = 0x65a3dbe6 # Fill with random data
PT_OPENBSD_WXNEEDED = 0x65a3dbe7 # Program does W^X violations
PT_OPENBSD_BOOTDATA = 0x65a41be6 # Section for boot arguments
PT_HIOS = 0x6fffffff # OS-specific
# Values between {PT_LOPROC} and {PT_HIPROC} are reserved for processor-specific semantics.
PT_LOPROC = 0x70000000
PT_ARM_ARCHEXT = 0x70000000 # platform architecture compatibility information
PT_ARM_EXIDX = 0x70000001 # exception unwind tables
PT_MIPS_REGINFO = 0x70000000 # register usage information
PT_MIPS_RTPROC = 0x70000001 # runtime procedure table
PT_MIPS_OPTIONS = 0x70000002 # +.MIPS.options+ section
PT_MIPS_ABIFLAGS = 0x70000003 # +.MIPS.abiflags+ section
PT_AARCH64_ARCHEXT = 0x70000000 # platform architecture compatibility information
PT_AARCH64_UNWIND = 0x70000001 # exception unwind tables
PT_S390_PGSTE = 0x70000000 # 4k page table size
PT_HIPROC = 0x7fffffff # see {PT_LOPROC}
end
include PT
# Special indices to section. These are used when there is no valid index to section header.
# The meaning of these values is left upto the embedding header.
module SHN
SHN_UNDEF = 0 # undefined section
SHN_LORESERVE = 0xff00 # start of reserved indices
# Values between {SHN_LOPROC} and {SHN_HIPROC} are reserved for processor-specific semantics.
SHN_LOPROC = 0xff00
SHN_MIPS_ACOMMON = 0xff00 # defined and allocated common symbol
SHN_MIPS_TEXT = 0xff01 # defined and allocated text symbol
SHN_MIPS_DATA = 0xff02 # defined and allocated data symbol
SHN_MIPS_SCOMMON = 0xff03 # small common symbol
SHN_MIPS_SUNDEFINED = 0xff04 # small undefined symbol
SHN_X86_64_LCOMMON = 0xff02 # large common symbol
SHN_HIPROC = 0xff1f # see {SHN_LOPROC}
# Values between {SHN_LOOS} and {SHN_HIOS} are reserved for operating system-specific semantics.
SHN_LOOS = 0xff20
SHN_HIOS = 0xff3f # see {SHN_LOOS}
SHN_ABS = 0xfff1 # specifies absolute values for the corresponding reference
SHN_COMMON = 0xfff2 # symbols defined relative to this section are common symbols
SHN_XINDEX = 0xffff # escape value indicating that the actual section header index is too large to fit
SHN_HIRESERVE = 0xffff # end of reserved indices
end
include SHN
# Section flag mask types, records in +sh_flag+.
module SHF
SHF_WRITE = (1 << 0) # Writable
SHF_ALLOC = (1 << 1) # Occupies memory during execution
SHF_EXECINSTR = (1 << 2) # Executable
SHF_MERGE = (1 << 4) # Might be merged
SHF_STRINGS = (1 << 5) # Contains nul-terminated strings
SHF_INFO_LINK = (1 << 6) # `sh_info' contains SHT index
SHF_LINK_ORDER = (1 << 7) # Preserve order after combining
SHF_OS_NONCONFORMING = (1 << 8) # Non-standard OS specific handling required
SHF_GROUP = (1 << 9) # Section is member of a group.
SHF_TLS = (1 << 10) # Section hold thread-local data.
SHF_COMPRESSED = (1 << 11) # Section with compressed data.
SHF_MASKOS = 0x0ff00000 # OS-specific.
SHF_MASKPROC = 0xf0000000 # Processor-specific
SHF_GNU_RETAIN = (1 << 21) # Not to be GCed by linker.
SHF_GNU_MBIND = (1 << 24) # Mbind section
SHF_ORDERED = (1 << 30) # Special ordering requirement
SHF_EXCLUDE = (1 << 31) # Section is excluded unless referenced or allocated (Solaris).
end
include SHF
# Section header types, records in +sh_type+.
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | true |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/structs.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/structs.rb | # frozen_string_literal: true
require 'bindata'
module ELFTools
# Define ELF related structures in this module.
#
# Structures are fetched from https://github.com/torvalds/linux/blob/master/include/uapi/linux/elf.h.
# Use gem +bindata+ to have these structures support 32/64 bits and little/big endian simultaneously.
module Structs
# The base structure to define common methods.
class ELFStruct < BinData::Record
# DRY. Many fields have different type in different arch.
CHOICE_SIZE_T = proc do |t = 'uint'|
{ selection: :elf_class, choices: { 32 => :"#{t}32", 64 => :"#{t}64" }, copy_on_change: true }
end
attr_accessor :elf_class # @return [Integer] 32 or 64.
attr_accessor :offset # @return [Integer] The file offset of this header.
# Records which fields have been patched.
# @return [Hash{Integer => Integer}] Patches.
def patches
@patches ||= {}
end
# BinData hash(Snapshot) that behaves like HashWithIndifferentAccess
alias to_h snapshot
class << self
# Hooks the constructor.
#
# +BinData::Record+ doesn't allow us to override +#initialize+, so we hack +new+ here.
def new(*args)
# XXX: The better implementation is +new(*args, **kwargs)+, but we can't do this unless bindata changed
# lib/bindata/dsl.rb#override_new_in_class to invoke +new+ with both +args+ and +kwargs+.
kwargs = args.last.is_a?(Hash) ? args.last : {}
offset = kwargs.delete(:offset)
super.tap do |obj|
obj.offset = offset
obj.field_names.each do |f|
m = "#{f}=".to_sym
old_method = obj.singleton_method(m)
obj.singleton_class.send(:undef_method, m)
obj.define_singleton_method(m) do |val|
org = obj.send(f)
obj.patches[org.abs_offset] = ELFStruct.pack(val, org.num_bytes)
old_method.call(val)
end
end
end
end
# Gets the endianness of current class.
# @return [:little, :big] The endianness.
def self_endian
bindata_name[-2..] == 'be' ? :big : :little
end
# Packs an integer to string.
# @param [Integer] val
# @param [Integer] bytes
# @return [String]
def pack(val, bytes)
raise ArgumentError, "Not supported assign type #{val.class}" unless val.is_a?(Integer)
number = val & ((1 << (8 * bytes)) - 1)
out = []
bytes.times do
out << (number & 0xff)
number >>= 8
end
out = out.pack('C*')
self_endian == :little ? out : out.reverse
end
end
end
# ELF header structure.
class ELF_Ehdr < ELFStruct
endian :big_and_little
struct :e_ident do
string :magic, read_length: 4
int8 :ei_class
int8 :ei_data
int8 :ei_version
int8 :ei_osabi
int8 :ei_abiversion
string :ei_padding, read_length: 7 # no use
end
uint16 :e_type
uint16 :e_machine
uint32 :e_version
# entry point
choice :e_entry, **CHOICE_SIZE_T['uint']
choice :e_phoff, **CHOICE_SIZE_T['uint']
choice :e_shoff, **CHOICE_SIZE_T['uint']
uint32 :e_flags
uint16 :e_ehsize # size of this header
uint16 :e_phentsize # size of each segment
uint16 :e_phnum # number of segments
uint16 :e_shentsize # size of each section
uint16 :e_shnum # number of sections
uint16 :e_shstrndx # index of string table section
end
# Section header structure.
class ELF_Shdr < ELFStruct
endian :big_and_little
uint32 :sh_name
uint32 :sh_type
choice :sh_flags, **CHOICE_SIZE_T['uint']
choice :sh_addr, **CHOICE_SIZE_T['uint']
choice :sh_offset, **CHOICE_SIZE_T['uint']
choice :sh_size, **CHOICE_SIZE_T['uint']
uint32 :sh_link
uint32 :sh_info
choice :sh_addralign, **CHOICE_SIZE_T['uint']
choice :sh_entsize, **CHOICE_SIZE_T['uint']
end
# Program header structure for 32-bit.
class ELF32_Phdr < ELFStruct
endian :big_and_little
uint32 :p_type
uint32 :p_offset
uint32 :p_vaddr
uint32 :p_paddr
uint32 :p_filesz
uint32 :p_memsz
uint32 :p_flags
uint32 :p_align
end
# Program header structure for 64-bit.
class ELF64_Phdr < ELFStruct
endian :big_and_little
uint32 :p_type
uint32 :p_flags
uint64 :p_offset
uint64 :p_vaddr
uint64 :p_paddr
uint64 :p_filesz
uint64 :p_memsz
uint64 :p_align
end
# Gets the class of program header according to bits.
ELF_Phdr = {
32 => ELF32_Phdr,
64 => ELF64_Phdr
}.freeze
# Symbol structure for 32-bit.
class ELF32_sym < ELFStruct
endian :big_and_little
uint32 :st_name
uint32 :st_value
uint32 :st_size
uint8 :st_info
uint8 :st_other
uint16 :st_shndx
end
# Symbol structure for 64-bit.
class ELF64_sym < ELFStruct
endian :big_and_little
uint32 :st_name # Symbol name, index in string tbl
uint8 :st_info # Type and binding attributes
uint8 :st_other # No defined meaning, 0
uint16 :st_shndx # Associated section index
uint64 :st_value # Value of the symbol
uint64 :st_size # Associated symbol size
end
# Get symbol header class according to bits.
ELF_sym = {
32 => ELF32_sym,
64 => ELF64_sym
}.freeze
# Note header.
class ELF_Nhdr < ELFStruct
endian :big_and_little
uint32 :n_namesz # Name size
uint32 :n_descsz # Content size
uint32 :n_type # Content type
end
# Dynamic tag header.
class ELF_Dyn < ELFStruct
endian :big_and_little
choice :d_tag, **CHOICE_SIZE_T['int']
# This is an union type named +d_un+ in original source,
# simplify it to be +d_val+ here.
choice :d_val, **CHOICE_SIZE_T['uint']
end
# Rel header in .rel section.
class ELF_Rel < ELFStruct
endian :big_and_little
choice :r_offset, **CHOICE_SIZE_T['uint']
choice :r_info, **CHOICE_SIZE_T['uint']
# Compatibility with ELF_Rela, both can be used interchangeably
def r_addend
nil
end
end
# Rela header in .rela section.
class ELF_Rela < ELFStruct
endian :big_and_little
choice :r_offset, **CHOICE_SIZE_T['uint']
choice :r_info, **CHOICE_SIZE_T['uint']
choice :r_addend, **CHOICE_SIZE_T['int']
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/note.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/note.rb | # frozen_string_literal: true
require 'elftools/structs'
require 'elftools/util'
module ELFTools
# Since both note sections and note segments refer to notes, this module
# defines common methods for {ELFTools::Sections::NoteSection} and
# {ELFTools::Segments::NoteSegment}.
#
# @note
# This module can only be included in {ELFTools::Sections::NoteSection} and
# {ELFTools::Segments::NoteSegment} since some methods here assume some
# attributes already exist.
module Note
# Since size of {ELFTools::Structs::ELF_Nhdr} will not change no matter in
# what endian and what arch, we can do this here. This value should equal
# to 12.
SIZE_OF_NHDR = Structs::ELF_Nhdr.new(endian: :little).num_bytes
# Iterate all notes in a note section or segment.
#
# Structure of notes are:
# +---------------+
# | Note 1 header |
# +---------------+
# | Note 1 name |
# +---------------+
# | Note 1 desc |
# +---------------+
# | Note 2 header |
# +---------------+
# | ... |
# +---------------+
#
# @note
# This method assume following methods exist:
# stream
# note_start
# note_total_size
# @return [Enumerator<ELFTools::Note::Note>, Array<ELFTools::Note::Note>]
# If block is not given, an enumerator will be returned.
# Otherwise, return the array of notes.
def each_notes
return enum_for(:each_notes) unless block_given?
@notes_offset_map ||= {}
cur = note_start
notes = []
while cur < note_start + note_total_size
stream.pos = cur
@notes_offset_map[cur] ||= create_note(cur)
note = @notes_offset_map[cur]
# name and desc size needs to be 4-bytes align
name_size = Util.align(note.header.n_namesz, 2)
desc_size = Util.align(note.header.n_descsz, 2)
cur += SIZE_OF_NHDR + name_size + desc_size
notes << note
yield note
end
notes
end
# Simply +#notes+ to get all notes.
# @return [Array<ELFTools::Note::Note>]
# Whole notes.
def notes
each_notes.to_a
end
private
# Get the endian.
#
# @note This method assume method +header+ exists.
# @return [Symbol] +:little+ or +:big+.
def endian
header.class.self_endian
end
def create_note(cur)
nhdr = Structs::ELF_Nhdr.new(endian:, offset: stream.pos).read(stream)
ELFTools::Note::Note.new(nhdr, stream, cur)
end
# Class of a note.
class Note
attr_reader :header # @return [ELFTools::Structs::ELF_Nhdr] Note header.
attr_reader :stream # @return [#pos=, #read] Streaming object.
attr_reader :offset # @return [Integer] Address of this note start, includes note header.
# Instantiate a {ELFTools::Note::Note} object.
# @param [ELF_Nhdr] header The note header.
# @param [#pos=, #read] stream Streaming object.
# @param [Integer] offset
# Start address of this note, includes the header.
def initialize(header, stream, offset)
@header = header
@stream = stream
@offset = offset
end
# Name of this note.
# @return [String] The name.
def name
return @name if defined?(@name)
stream.pos = @offset + SIZE_OF_NHDR
@name = stream.read(header.n_namesz)[0..-2]
end
# Description of this note.
# @return [String] The description.
def desc
return @desc if instance_variable_defined?(:@desc)
stream.pos = @offset + SIZE_OF_NHDR + Util.align(header.n_namesz, 2)
@desc = stream.read(header.n_descsz)
end
# If someone likes to use full name.
alias description desc
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/elf_file.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/elf_file.rb | # frozen_string_literal: true
require 'elftools/constants'
require 'elftools/exceptions'
require 'elftools/lazy_array'
require 'elftools/sections/sections'
require 'elftools/segments/segments'
require 'elftools/structs'
module ELFTools
# The main class for using elftools.
class ELFFile
attr_reader :stream # @return [#pos=, #read] The +File+ object.
attr_reader :elf_class # @return [Integer] 32 or 64.
attr_reader :endian # @return [Symbol] +:little+ or +:big+.
# Instantiate an {ELFFile} object.
#
# @param [#pos=, #read] stream
# The +File+ object to be fetch information from.
# @example
# ELFFile.new(File.open('/bin/cat'))
# #=> #<ELFTools::ELFFile:0x00564b106c32a0 @elf_class=64, @endian=:little, @stream=#<File:/bin/cat>>
def initialize(stream)
@stream = stream
# always set binmode if stream is an IO object.
@stream.binmode if @stream.respond_to?(:binmode)
identify # fetch the most basic information
end
# Return the file header.
#
# Lazy loading.
# @return [ELFTools::Structs::ELF_Ehdr] The header.
def header
return @header if defined?(@header)
stream.pos = 0
@header = Structs::ELF_Ehdr.new(endian:, offset: stream.pos)
@header.elf_class = elf_class
@header.read(stream)
end
# Return the BuildID of ELF.
# @return [String, nil]
# BuildID in hex form will be returned.
# +nil+ is returned if the .note.gnu.build-id section
# is not found.
# @example
# elf.build_id
# #=> '73ab62cb7bc9959ce053c2b711322158708cdc07'
def build_id
section = section_by_name('.note.gnu.build-id')
return nil if section.nil?
note = section.notes.first
return nil if note.nil?
note.desc.unpack1('H*')
end
# Get machine architecture.
#
# Mappings of architecture can be found
# in {ELFTools::Constants::EM.mapping}.
# @return [String]
# Name of architecture.
# @example
# elf.machine
# #=> 'Advanced Micro Devices X86-64'
def machine
ELFTools::Constants::EM.mapping(header.e_machine)
end
# Return the ELF type according to +e_type+.
# @return [String] Type in string format.
# @example
# ELFFile.new(File.open('spec/files/libc.so.6')).elf_type
# #=> 'DYN'
# ELFFile.new(File.open('spec/files/amd64.elf')).elf_type
# #=> 'EXEC'
def elf_type
ELFTools::Constants::ET.mapping(header.e_type)
end
#========= method about sections
# Number of sections in this file.
# @return [Integer] The desired number.
# @example
# elf.num_sections
# #=> 29
def num_sections
header.e_shnum
end
# Acquire the section named as +name+.
# @param [String] name The desired section name.
# @return [ELFTools::Sections::Section, nil] The target section.
# @example
# elf.section_by_name('.note.gnu.build-id')
# #=> #<ELFTools::Sections::Section:0x005647b1282428>
# elf.section_by_name('')
# #=> #<ELFTools::Sections::NullSection:0x005647b11da110>
# elf.section_by_name('no such section')
# #=> nil
def section_by_name(name)
each_sections.find { |sec| sec.name == name }
end
# Iterate all sections.
#
# All sections are lazy loading, the section
# only be created whenever accessing it.
# This method is useful for {#section_by_name}
# since not all sections need to be created.
# @yieldparam [ELFTools::Sections::Section] section A section.
# @yieldreturn [void]
# @return [Enumerator<ELFTools::Sections::Section>, Array<ELFTools::Sections::Section>]
# As +Array#each+, if block is not given, a enumerator will be returned,
# otherwise, the whole sections will be returned.
def each_sections(&block)
return enum_for(:each_sections) unless block_given?
Array.new(num_sections) do |i|
section_at(i).tap(&block)
end
end
# Simply use {#sections} to get all sections.
# @return [Array<ELFTools::Sections::Section>]
# Whole sections.
def sections
each_sections.to_a
end
# Acquire the +n+-th section, 0-based.
#
# Sections are lazy loaded.
# @param [Integer] n The index.
# @return [ELFTools::Sections::Section, nil]
# The target section.
# If +n+ is out of bound, +nil+ is returned.
def section_at(n)
@sections ||= LazyArray.new(num_sections, &method(:create_section))
@sections[n]
end
# Fetch all sections with specific type.
#
# The available types are listed in {ELFTools::Constants::PT}.
# This method accept giving block.
# @param [Integer, Symbol, String] type
# The type needed, similar format as {#segment_by_type}.
# @yieldparam [ELFTools::Sections::Section] section A section in specific type.
# @yieldreturn [void]
# @return [Array<ELFTools::Sections::section>] The target sections.
# @example
# elf = ELFTools::ELFFile.new(File.open('spec/files/amd64.elf'))
# elf.sections_by_type(:rela)
# #=> [#<ELFTools::Sections::RelocationSection:0x00563cd3219970>,
# # #<ELFTools::Sections::RelocationSection:0x00563cd3b89d70>]
def sections_by_type(type, &block)
type = Util.to_constant(Constants::SHT, type)
Util.select_by_type(each_sections, type, &block)
end
# Get the string table section.
#
# This section is acquired by using the +e_shstrndx+
# in ELF header.
# @return [ELFTools::Sections::StrTabSection] The desired section.
def strtab_section
section_at(header.e_shstrndx)
end
#========= method about segments
# Number of segments in this file.
# @return [Integer] The desited number.
def num_segments
header.e_phnum
end
# Iterate all segments.
#
# All segments are lazy loading, the segment
# only be created whenever accessing it.
# This method is useful for {#segment_by_type}
# since not all segments need to be created.
# @yieldparam [ELFTools::Segments::Segment] segment A segment.
# @yieldreturn [void]
# @return [Array<ELFTools::Segments::Segment>]
# Whole segments will be returned.
def each_segments(&block)
return enum_for(:each_segments) unless block_given?
Array.new(num_segments) do |i|
segment_at(i).tap(&block)
end
end
# Simply use {#segments} to get all segments.
# @return [Array<ELFTools::Segments::Segment>]
# Whole segments.
def segments
each_segments.to_a
end
# Get the first segment with +p_type=type+.
# The available types are listed in {ELFTools::Constants::PT}.
#
# @note
# This method will return the first segment found,
# to found all segments with specific type you can use {#segments_by_type}.
# @param [Integer, Symbol, String] type
# See examples for clear usage.
# @return [ELFTools::Segments::Segment] The target segment.
# @example
# # type as an integer
# elf.segment_by_type(ELFTools::Constants::PT_NOTE)
# #=> #<ELFTools::Segments::NoteSegment:0x005629dda1e4f8>
#
# elf.segment_by_type(4) # PT_NOTE
# #=> #<ELFTools::Segments::NoteSegment:0x005629dda1e4f8>
#
# # type as a symbol
# elf.segment_by_type(:PT_NOTE)
# #=> #<ELFTools::Segments::NoteSegment:0x005629dda1e4f8>
#
# # you can do this
# elf.segment_by_type(:note) # will be transformed into `PT_NOTE`
# #=> #<ELFTools::Segments::NoteSegment:0x005629dda1e4f8>
#
# # type as a string
# elf.segment_by_type('PT_NOTE')
# #=> #<ELFTools::Segments::NoteSegment:0x005629dda1e4f8>
#
# # this is ok
# elf.segment_by_type('note') # will be transformed into `PT_NOTE`
# #=> #<ELFTools::Segments::NoteSegment:0x005629dda1e4f8>
# @example
# elf.segment_by_type(1337)
# # ArgumentError: No constants in Constants::PT is 1337
#
# elf.segment_by_type('oao')
# # ArgumentError: No constants in Constants::PT named "PT_OAO"
# @example
# elf.segment_by_type(0)
# #=> nil # no such segment exists
def segment_by_type(type)
type = Util.to_constant(Constants::PT, type)
each_segments.find { |seg| seg.header.p_type == type }
end
# Fetch all segments with specific type.
#
# If you want to find only one segment,
# use {#segment_by_type} instead.
# This method accept giving block.
# @param [Integer, Symbol, String] type
# The type needed, same format as {#segment_by_type}.
# @yieldparam [ELFTools::Segments::Segment] segment A segment in specific type.
# @yieldreturn [void]
# @return [Array<ELFTools::Segments::Segment>] The target segments.
def segments_by_type(type, &block)
type = Util.to_constant(Constants::PT, type)
Util.select_by_type(each_segments, type, &block)
end
# Acquire the +n+-th segment, 0-based.
#
# Segments are lazy loaded.
# @param [Integer] n The index.
# @return [ELFTools::Segments::Segment, nil]
# The target segment.
# If +n+ is out of bound, +nil+ is returned.
def segment_at(n)
@segments ||= LazyArray.new(num_segments, &method(:create_segment))
@segments[n]
end
# Get the offset related to file, given virtual memory address.
#
# This method should work no matter ELF is a PIE or not.
# This method refers from (actually equals to) binutils/readelf.c#offset_from_vma.
# @param [Integer] vma The virtual address to be queried.
# @return [Integer] Related file offset.
# @example
# elf = ELFTools::ELFFile.new(File.open('/bin/cat'))
# elf.offset_from_vma(0x401337)
# #=> 4919 # 0x1337
def offset_from_vma(vma, size = 1)
segments_by_type(:load) do |seg|
return seg.vma_to_offset(vma) if seg.vma_in?(vma, size)
end
end
# The patch status.
# @return [Hash{Integer => String}]
def patches
patch = {}
loaded_headers.each do |header|
header.patches.each do |key, val|
patch[key + header.offset] = val
end
end
patch
end
# Apply patches and save as +filename+.
#
# @param [String] filename
# @return [void]
def save(filename)
stream.pos = 0
all = stream.read.force_encoding('ascii-8bit')
patches.each do |pos, val|
all[pos, val.size] = val
end
File.binwrite(filename, all)
end
private
# bad idea..
def loaded_headers
explore = lambda do |obj|
return obj if obj.is_a?(::ELFTools::Structs::ELFStruct)
return obj.map(&explore) if obj.is_a?(Array)
obj.instance_variables.map do |s|
explore.call(obj.instance_variable_get(s))
end
end
explore.call(self).flatten
end
def identify
stream.pos = 0
magic = stream.read(4)
raise ELFMagicError, "Invalid magic number #{magic.inspect}" unless magic == Constants::ELFMAG
ei_class = stream.read(1).ord
@elf_class = {
1 => 32,
2 => 64
}[ei_class]
raise ELFClassError, format('Invalid EI_CLASS "\x%02x"', ei_class) if elf_class.nil?
ei_data = stream.read(1).ord
@endian = {
1 => :little,
2 => :big
}[ei_data]
raise ELFDataError, format('Invalid EI_DATA "\x%02x"', ei_data) if endian.nil?
end
def create_section(n)
stream.pos = header.e_shoff + n * header.e_shentsize
shdr = Structs::ELF_Shdr.new(endian:, offset: stream.pos)
shdr.elf_class = elf_class
shdr.read(stream)
Sections::Section.create(shdr, stream,
offset_from_vma: method(:offset_from_vma),
strtab: method(:strtab_section),
section_at: method(:section_at))
end
def create_segment(n)
stream.pos = header.e_phoff + n * header.e_phentsize
phdr = Structs::ELF_Phdr[elf_class].new(endian:, offset: stream.pos)
phdr.elf_class = elf_class
Segments::Segment.create(phdr.read(stream), stream, offset_from_vma: method(:offset_from_vma))
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/util.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/util.rb | # frozen_string_literal: true
module ELFTools
# Define some util methods.
module Util
# Class methods.
module ClassMethods
# Round up the number to be multiple of
# +2**bit+.
# @param [Integer] num Number to be rounded-up.
# @param [Integer] bit How many bit to be aligned.
# @return [Integer] See examples.
# @example
# align(10, 1) #=> 10
# align(10, 2) #=> 12
# align(10, 3) #=> 16
# align(10, 4) #=> 16
# align(10, 5) #=> 32
def align(num, bit)
n = 2**bit
return num if (num % n).zero?
(num + n) & ~(n - 1)
end
# Fetch the correct value from module +mod+.
#
# See {ELFTools::ELFFile#segment_by_type} for how to
# use this method.
# @param [Module] mod The module defined constant numbers.
# @param [Integer, Symbol, String] val
# Desired value.
# @return [Integer]
# Currently this method always return a value
# from {ELFTools::Constants}.
def to_constant(mod, val)
# Ignore the outest name.
module_name = mod.name.sub('ELFTools::', '')
# if val is an integer, check if exists in mod
if val.is_a?(Integer)
return val if mod.constants.any? { |c| mod.const_get(c) == val }
raise ArgumentError, "No constants in #{module_name} is #{val}"
end
val = val.to_s.upcase
prefix = module_name.split('::')[-1]
val = "#{prefix}_#{val}" unless val.start_with?(prefix)
val = val.to_sym
raise ArgumentError, "No constants in #{module_name} named \"#{val}\"" unless mod.const_defined?(val)
mod.const_get(val)
end
# Read from stream until reach a null-byte.
# @param [#pos=, #read] stream Streaming object
# @param [Integer] offset Start from here.
# @return [String] Result string will never contain null byte.
# @example
# Util.cstring(File.open('/bin/cat'), 0)
# #=> "\x7FELF\x02\x01\x01"
def cstring(stream, offset)
stream.pos = offset
# read until "\x00"
ret = ''
loop do
c = stream.read(1)
return nil if c.nil? # reach EOF
break if c == "\x00"
ret += c
end
ret
end
# Select objects from enumerator with +.type+ property
# equals to +type+.
#
# Different from naive +Array#select+ is this method
# will yield block whenever find a desired object.
#
# This method is used to simplify the same logic in methods
# {ELFFile#sections_by_type}, {ELFFile#segments_by_type}, etc.
# @param [Enumerator] enum An enumerator for further select.
# @param [Object] type The type you want.
# @return [Array<Object>]
# The return value will be objects in +enum+ with attribute
# +.type+ equals to +type+.
def select_by_type(enum, type)
enum.select do |obj|
if obj.type == type
yield obj if block_given?
true
end
end
end
end
extend ClassMethods
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/sections.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/sections.rb | # frozen_string_literal: true
# Require this file to load all sections classes.
require 'elftools/sections/section'
require 'elftools/sections/dynamic_section'
require 'elftools/sections/note_section'
require 'elftools/sections/null_section'
require 'elftools/sections/relocation_section'
require 'elftools/sections/str_tab_section'
require 'elftools/sections/sym_tab_section'
module ELFTools
# Defines different types of sections in this module.
module Sections
# Class methods of {Sections::Section}.
class << Section
# Use different class according to +header.sh_type+.
# @param [ELFTools::Structs::ELF_Shdr] header Section header.
# @param [#pos=, #read] stream Streaming object.
# @return [ELFTools::Sections::Section]
# Return object dependes on +header.sh_type+.
def create(header, stream, *args, **kwargs)
klass = case header.sh_type
when Constants::SHT_DYNAMIC then DynamicSection
when Constants::SHT_NULL then NullSection
when Constants::SHT_NOTE then NoteSection
when Constants::SHT_RELA, Constants::SHT_REL then RelocationSection
when Constants::SHT_STRTAB then StrTabSection
when Constants::SHT_SYMTAB, Constants::SHT_DYNSYM then SymTabSection
else Section
end
klass.new(header, stream, *args, **kwargs)
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/relocation_section.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/relocation_section.rb | # frozen_string_literal: true
require 'elftools/constants'
require 'elftools/sections/section'
require 'elftools/structs'
module ELFTools
module Sections
# Class of note section.
# Note section records notes
class RelocationSection < Section
# Is this relocation a RELA or REL type.
# @return [Boolean] If is RELA.
def rela?
header.sh_type == Constants::SHT_RELA
end
# Number of relocations in this section.
# @return [Integer] The number.
def num_relocations
header.sh_size / header.sh_entsize
end
# Acquire the +n+-th relocation, 0-based.
#
# relocations are lazy loaded.
# @param [Integer] n The index.
# @return [ELFTools::Relocation, nil]
# The target relocation.
# If +n+ is out of bound, +nil+ is returned.
def relocation_at(n)
@relocations ||= LazyArray.new(num_relocations, &method(:create_relocation))
@relocations[n]
end
# Iterate all relocations.
#
# All relocations are lazy loading, the relocation
# only be created whenever accessing it.
# @yieldparam [ELFTools::Relocation] rel A relocation object.
# @yieldreturn [void]
# @return [Enumerator<ELFTools::Relocation>, Array<ELFTools::Relocation>]
# If block is not given, an enumerator will be returned.
# Otherwise, the whole relocations will be returned.
def each_relocations(&block)
return enum_for(:each_relocations) unless block_given?
Array.new(num_relocations) do |i|
relocation_at(i).tap(&block)
end
end
# Simply use {#relocations} to get all relocations.
# @return [Array<ELFTools::Relocation>]
# Whole relocations.
def relocations
each_relocations.to_a
end
private
def create_relocation(n)
stream.pos = header.sh_offset + n * header.sh_entsize
klass = rela? ? Structs::ELF_Rela : Structs::ELF_Rel
rel = klass.new(endian: header.class.self_endian, offset: stream.pos)
rel.elf_class = header.elf_class
rel.read(stream)
Relocation.new(rel, stream)
end
end
end
# A relocation entry.
#
# Can be either a REL or RELA relocation.
# XXX: move this to an independent file?
class Relocation
attr_reader :header # @return [ELFTools::Structs::ELF_Rel, ELFTools::Structs::ELF_Rela] Rel(a) header.
attr_reader :stream # @return [#pos=, #read] Streaming object.
# Instantiate a {Relocation} object.
def initialize(header, stream)
@header = header
@stream = stream
end
# +r_info+ contains sym and type, use two methods
# to access them easier.
# @return [Integer] sym infor.
def r_info_sym
header.r_info >> mask_bit
end
alias symbol_index r_info_sym
# +r_info+ contains sym and type, use two methods
# to access them easier.
# @return [Integer] type infor.
def r_info_type
header.r_info & ((1 << mask_bit) - 1)
end
alias type r_info_type
private
def mask_bit
header.elf_class == 32 ? 8 : 32
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/section.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/section.rb | # frozen_string_literal: true
require 'elftools/constants'
module ELFTools
module Sections
# Base class of sections.
class Section
attr_reader :header # @return [ELFTools::Structs::ELF_Shdr] Section header.
attr_reader :stream # @return [#pos=, #read] Streaming object.
# Instantiate a {Section} object.
# @param [ELFTools::Structs::ELF_Shdr] header
# The section header object.
# @param [#pos=, #read] stream
# The streaming object for further dump.
# @param [ELFTools::Sections::StrTabSection, Proc] strtab
# The string table object. For fetching section names.
# If +Proc+ if given, it will call at the first
# time access +#name+.
# @param [Method] offset_from_vma
# The method to get offset of file, given virtual memory address.
def initialize(header, stream, offset_from_vma: nil, strtab: nil, **_kwargs)
@header = header
@stream = stream
@strtab = strtab
@offset_from_vma = offset_from_vma
end
# Return +header.sh_type+ in a simpler way.
# @return [Integer]
# The type, meaning of types are defined in {Constants::SHT}.
def type
header.sh_type.to_i
end
# Get name of this section.
# @return [String] The name.
def name
@name ||= @strtab.call.name_at(header.sh_name)
end
# Fetch data of this section.
# @return [String] Data.
def data
stream.pos = header.sh_offset
stream.read(header.sh_size)
end
# Is this a null section?
# @return [Boolean] No it's not.
def null?
false
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/sym_tab_section.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/sym_tab_section.rb | # frozen_string_literal: true
require 'elftools/sections/section'
module ELFTools
module Sections
# Class of symbol table section.
# Usually for section .symtab and .dynsym,
# which will refer to symbols in ELF file.
class SymTabSection < Section
# Instantiate a {SymTabSection} object.
# There's a +section_at+ lambda for {SymTabSection}
# to easily fetch other sections.
# @param [ELFTools::Structs::ELF_Shdr] header
# See {Section#initialize} for more information.
# @param [#pos=, #read] stream
# See {Section#initialize} for more information.
# @param [Proc] section_at
# The method for fetching other sections by index.
# This lambda should be {ELFTools::ELFFile#section_at}.
def initialize(header, stream, section_at: nil, **_kwargs)
@section_at = section_at
# For faster #symbol_by_name
super
end
# Number of symbols.
# @return [Integer] The number.
# @example
# symtab.num_symbols
# #=> 75
def num_symbols
header.sh_size / header.sh_entsize
end
# Acquire the +n+-th symbol, 0-based.
#
# Symbols are lazy loaded.
# @param [Integer] n The index.
# @return [ELFTools::Sections::Symbol, nil]
# The target symbol.
# If +n+ is out of bound, +nil+ is returned.
def symbol_at(n)
@symbols ||= LazyArray.new(num_symbols, &method(:create_symbol))
@symbols[n]
end
# Iterate all symbols.
#
# All symbols are lazy loading, the symbol
# only be created whenever accessing it.
# This method is useful for {#symbol_by_name}
# since not all symbols need to be created.
# @yieldparam [ELFTools::Sections::Symbol] sym A symbol object.
# @yieldreturn [void]
# @return [Enumerator<ELFTools::Sections::Symbol>, Array<ELFTools::Sections::Symbol>]
# If block is not given, an enumerator will be returned.
# Otherwise return array of symbols.
def each_symbols(&block)
return enum_for(:each_symbols) unless block_given?
Array.new(num_symbols) do |i|
symbol_at(i).tap(&block)
end
end
# Simply use {#symbols} to get all symbols.
# @return [Array<ELFTools::Sections::Symbol>]
# The whole symbols.
def symbols
each_symbols.to_a
end
# Get symbol by its name.
# @param [String] name
# The name of symbol.
# @return [ELFTools::Sections::Symbol] Desired symbol.
def symbol_by_name(name)
each_symbols.find { |symbol| symbol.name == name }
end
# Return the symbol string section.
# Lazy loaded.
# @return [ELFTools::Sections::StrTabSection] The string table section.
def symstr
@symstr ||= @section_at.call(header.sh_link)
end
private
def create_symbol(n)
stream.pos = header.sh_offset + n * header.sh_entsize
sym = Structs::ELF_sym[header.elf_class].new(endian: header.class.self_endian, offset: stream.pos)
sym.read(stream)
Symbol.new(sym, stream, symstr: method(:symstr))
end
end
# Class of symbol.
#
# XXX: Should this class be defined in an independent file?
class Symbol
attr_reader :header # @return [ELFTools::Structs::ELF32_sym, ELFTools::Structs::ELF64_sym] Section header.
attr_reader :stream # @return [#pos=, #read] Streaming object.
# Instantiate a {ELFTools::Sections::Symbol} object.
# @param [ELFTools::Structs::ELF32_sym, ELFTools::Structs::ELF64_sym] header
# The symbol header.
# @param [#pos=, #read] stream The streaming object.
# @param [ELFTools::Sections::StrTabSection, Proc] symstr
# The symbol string section.
# If +Proc+ is given, it will be called at the first time
# access {Symbol#name}.
def initialize(header, stream, symstr: nil)
@header = header
@stream = stream
@symstr = symstr
end
# Return the symbol name.
# @return [String] The name.
def name
@name ||= @symstr.call.name_at(header.st_name)
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/str_tab_section.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/str_tab_section.rb | # frozen_string_literal: true
require 'elftools/sections/section'
require 'elftools/util'
module ELFTools
module Sections
# Class of string table section.
# Usually for section .strtab and .dynstr,
# which record names.
class StrTabSection < Section
# Return the section or symbol name.
# @param [Integer] offset
# Usually from +shdr.sh_name+ or +sym.st_name+.
# @return [String] The name without null bytes.
def name_at(offset)
Util.cstring(stream, header.sh_offset + offset)
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/dynamic_section.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/dynamic_section.rb | # frozen_string_literal: true
require 'elftools/dynamic'
require 'elftools/sections/section'
module ELFTools
module Sections
# Class for dynamic table section.
#
# This section should always be named .dynamic.
# This class knows how to get the list of dynamic tags.
class DynamicSection < Section
include ELFTools::Dynamic
# Get the start address of tags.
# @return [Integer] Start address of tags.
def tag_start
header.sh_offset
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/null_section.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/null_section.rb | # frozen_string_literal: true
require 'elftools/sections/section'
module ELFTools
module Sections
# Class of null section.
# Null section is for specific the end
# of linked list (+sh_link+) between sections.
class NullSection < Section
# Is this a null section?
# @return [Boolean] Yes it is.
def null?
true
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/note_section.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/sections/note_section.rb | # frozen_string_literal: true
require 'elftools/note'
require 'elftools/sections/section'
module ELFTools
module Sections
# Class of note section.
# Note section records notes
class NoteSection < Section
# Load note related methods.
include ELFTools::Note
# Address offset of notes start.
# @return [Integer] The offset.
def note_start
header.sh_offset
end
# The total size of notes in this section.
# @return [Integer] The size.
def note_total_size
header.sh_size
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/segments/note_segment.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/segments/note_segment.rb | # frozen_string_literal: true
require 'elftools/note'
require 'elftools/segments/segment'
module ELFTools
module Segments
# Class of note segment.
class NoteSegment < Segment
# Load note related methods.
include ELFTools::Note
# Address offset of notes start.
# @return [Integer] The offset.
def note_start
header.p_offset
end
# The total size of notes in this segment.
# @return [Integer] The size.
def note_total_size
header.p_filesz
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/segments/segment.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/segments/segment.rb | # frozen_string_literal: true
module ELFTools
module Segments
# Base class of segments.
class Segment
attr_reader :header # @return [ELFTools::Structs::ELF32_Phdr, ELFTools::Structs::ELF64_Phdr] Program header.
attr_reader :stream # @return [#pos=, #read] Streaming object.
# Instantiate a {Segment} object.
# @param [ELFTools::Structs::ELF32_Phdr, ELFTools::Structs::ELF64_Phdr] header
# Program header.
# @param [#pos=, #read] stream
# Streaming object.
# @param [Method] offset_from_vma
# The method to get offset of file, given virtual memory address.
def initialize(header, stream, offset_from_vma: nil)
@header = header
@stream = stream
@offset_from_vma = offset_from_vma
end
# Return +header.p_type+ in a simpler way.
# @return [Integer]
# The type, meaning of types are defined in {Constants::PT}.
def type
header.p_type
end
# The content in this segment.
# @return [String] The content.
def data
stream.pos = header.p_offset
stream.read(header.p_filesz)
end
# Is this segment readable?
# @return [Boolean] True or false.
def readable?
(header.p_flags & 4) == 4
end
# Is this segment writable?
# @return [Boolean] True or false.
def writable?
(header.p_flags & 2) == 2
end
# Is this segment executable?
# @return [Boolean] True or false.
def executable?
(header.p_flags & 1) == 1
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/segments/load_segment.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/segments/load_segment.rb | # frozen_string_literal: true
require 'elftools/segments/segment'
module ELFTools
module Segments
# For DT_LOAD segment.
# Able to query between file offset and virtual memory address.
class LoadSegment < Segment
# Returns the start of this segment.
# @return [Integer]
# The file offset.
def file_head
header.p_offset.to_i
end
# Returns size in file.
# @return [Integer]
# The size.
def size
header.p_filesz.to_i
end
# Returns the end of this segment.
# @return [Integer]
# The file offset.
def file_tail
file_head + size
end
# Returns the start virtual address of this segment.
# @return [Integer]
# The vma.
def mem_head
header.p_vaddr.to_i
end
# Returns size in memory.
# @return [Integer]
# The size.
def mem_size
header.p_memsz.to_i
end
# Returns the end virtual address of this segment.
# @return [Integer]
# The vma.
def mem_tail
mem_head + mem_size
end
# Query if the given file offset located in this segment.
# @param [Integer] offset
# File offset.
# @param [Integer] size
# Size.
# @return [Boolean]
def offset_in?(offset, size = 0)
file_head <= offset && offset + size < file_tail
end
# Convert file offset into virtual memory address.
# @param [Integer] offset
# File offset.
# @return [Integer]
def offset_to_vma(offset)
# XXX: What if file_head is not aligned with p_vaddr (which is invalid according to ELF spec)?
offset - file_head + header.p_vaddr
end
# Query if the given virtual memory address located in this segment.
# @param [Integer] vma
# Virtual memory address.
# @param [Integer] size
# Size.
# @return [Boolean]
def vma_in?(vma, size = 0)
vma >= (header.p_vaddr & -header.p_align) &&
vma + size <= mem_tail
end
# Convert virtual memory address into file offset.
# @param [Integer] vma
# Virtual memory address.
# @return [Integer]
def vma_to_offset(vma)
vma - header.p_vaddr + header.p_offset
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/segments/segments.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/segments/segments.rb | # frozen_string_literal: true
# Require this file to load all segment classes.
require 'elftools/segments/segment'
require 'elftools/segments/dynamic_segment'
require 'elftools/segments/interp_segment'
require 'elftools/segments/load_segment'
require 'elftools/segments/note_segment'
module ELFTools
# Module for defining different types of segments.
module Segments
# Class methods of {Segments::Segment}.
class << Segment
# Use different class according to +header.p_type+.
# @param [ELFTools::Structs::ELF32_Phdr, ELFTools::Structs::ELF64_Phdr] header Program header of a segment.
# @param [#pos=, #read] stream Streaming object.
# @return [ELFTools::Segments::Segment]
# Return object dependes on +header.p_type+.
def create(header, stream, *args, **kwargs)
klass = case header.p_type
when Constants::PT_DYNAMIC then DynamicSegment
when Constants::PT_INTERP then InterpSegment
when Constants::PT_LOAD then LoadSegment
when Constants::PT_NOTE then NoteSegment
else Segment
end
klass.new(header, stream, *args, **kwargs)
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/segments/dynamic_segment.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/segments/dynamic_segment.rb | # frozen_string_literal: true
require 'elftools/segments/segment'
require 'elftools/dynamic'
module ELFTools
module Segments
# Class for dynamic table segment.
#
# This class knows how to get the list of dynamic tags.
class DynamicSegment < Segment
include Dynamic # rock!
# Get the start address of tags.
# @return [Integer] Start address of tags.
def tag_start
header.p_offset
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/segments/interp_segment.rb | Library/Homebrew/vendor/bundle/ruby/3.4.0/gems/elftools-1.3.1/lib/elftools/segments/interp_segment.rb | # frozen_string_literal: true
require 'elftools/segments/segment'
module ELFTools
module Segments
# For DT_INTERP segment, knows how to get path of
# ELF interpreter.
class InterpSegment < Segment
# Get the path of interpreter.
# @return [String] Path to the interpreter.
# @example
# interp_segment.interp_name
# #=> '/lib64/ld-linux-x86-64.so.2'
def interp_name
data[0..-2] # remove last null byte
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/strategy.rb | Library/Homebrew/livecheck/strategy.rb | # typed: strict
# frozen_string_literal: true
require "utils/curl"
require "livecheck/options"
module Homebrew
module Livecheck
# The `Livecheck::Strategy` module contains the various strategies as well
# as some general-purpose methods for working with them. Within the context
# of the `brew livecheck` command, strategies are established procedures
# for finding new software versions at a given source.
module Strategy
extend Utils::Curl
# {Strategy} priorities informally range from 1 to 10, where 10 is the
# highest priority. 5 is the default priority because it's roughly in
# the middle of this range. Strategies with a priority of 0 (or lower)
# are ignored.
DEFAULT_PRIORITY = 5
# cURL's default `--connect-timeout` value can be up to two minutes, so
# we need to use a more reasonable duration (in seconds) to avoid a
# lengthy wait when a connection can't be established.
CURL_CONNECT_TIMEOUT = 10
# cURL does not set a default `--max-time` value, so we provide a value
# to ensure cURL will time out in a reasonable amount of time.
CURL_MAX_TIME = T.let(CURL_CONNECT_TIMEOUT + 5, Integer)
# The `curl` process will sometimes hang indefinitely (despite setting
# the `--max-time` argument) and it needs to be quit for livecheck to
# continue. This value is used to set the `timeout` argument on
# `Utils::Curl` method calls in {Strategy}.
CURL_PROCESS_TIMEOUT = T.let(CURL_MAX_TIME + 5, Integer)
# The maximum number of redirections that `curl` should allow.
MAX_REDIRECTIONS = 5
# This value is passed to `#parse_curl_output` to ensure that the limit
# for the number of responses it will parse corresponds to the maximum
# number of responses in this context. The `+ 1` here accounts for the
# situation where there are exactly `MAX_REDIRECTIONS` number of
# redirections, followed by a final `200 OK` response.
MAX_PARSE_ITERATIONS = T.let(MAX_REDIRECTIONS + 1, Integer)
# Baseline `curl` arguments used in {Strategy} methods.
DEFAULT_CURL_ARGS = T.let([
# Follow redirections to handle mirrors, relocations, etc.
"--location",
"--max-redirs", MAX_REDIRECTIONS.to_s,
# Avoid progress bar text, so we can reliably identify `curl` error
# messages in output
"--silent"
].freeze, T::Array[String])
# `curl` arguments used in `Strategy#page_content` method.
PAGE_CONTENT_CURL_ARGS = T.let(([
"--compressed",
# Return an error when the HTTP response code is 400 or greater but
# continue to return body content
"--fail-with-body",
# Include HTTP response headers in output, so we can identify the
# final URL after any redirections
"--include",
] + DEFAULT_CURL_ARGS).freeze, T::Array[String])
# Baseline `curl` options used in {Strategy} methods.
DEFAULT_CURL_OPTIONS = T.let({
print_stdout: false,
print_stderr: false,
debug: false,
verbose: false,
timeout: CURL_PROCESS_TIMEOUT,
connect_timeout: CURL_CONNECT_TIMEOUT,
max_time: CURL_MAX_TIME,
retries: 0,
}.freeze, T::Hash[Symbol, T.untyped])
# A regex used to identify a tarball extension at the end of a string.
TARBALL_EXTENSION_REGEX = /
\.t
(?:ar(?:\.(?:bz2|gz|lz|lzma|lzo|xz|Z|zst))?|
b2|bz2?|z2|az|gz|lz|lzma|xz|Z|aZ|zst)
$
/ix
# An error message to use when a `strategy` block returns a value of
# an inappropriate type.
INVALID_BLOCK_RETURN_VALUE_MSG = "Return value of a strategy block must be a string or array of strings."
# Creates and/or returns a `@strategies` `Hash`, which maps a snake
# case strategy name symbol (e.g. `:page_match`) to the associated
# strategy.
#
# At present, this should only be called after tap strategies have been
# loaded, otherwise livecheck won't be able to use them.
# @return [Hash]
sig { returns(T::Hash[Symbol, T.untyped]) }
def self.strategies
@strategies ||= T.let(Strategy.constants.sort.each_with_object({}) do |const_symbol, hash|
constant = Strategy.const_get(const_symbol)
next unless constant.is_a?(Class)
key = Utils.underscore(const_symbol).to_sym
hash[key] = constant
end, T.nilable(T::Hash[Symbol, T.untyped]))
end
private_class_method :strategies
# Returns the strategy that corresponds to the provided `Symbol` (or
# `nil` if there is no matching strategy).
#
# @param symbol [Symbol, nil] the strategy name in snake case as a
# `Symbol` (e.g. `:page_match`)
# @return [Class, nil]
sig { params(symbol: T.nilable(Symbol)).returns(T.untyped) }
def self.from_symbol(symbol)
strategies[symbol] if symbol.present?
end
# Returns an array of strategies that apply to the provided URL.
#
# @param url [String] the URL to check for matching strategies
# @param livecheck_strategy [Symbol] a strategy symbol from the
# `livecheck` block
# @param regex_provided [Boolean] whether a regex is provided in the
# `livecheck` block
# @param block_provided [Boolean] whether a `strategy` block is provided
# in the `livecheck` block
# @return [Array]
sig {
params(
url: String,
livecheck_strategy: T.nilable(Symbol),
regex_provided: T::Boolean,
block_provided: T::Boolean,
).returns(T::Array[T.untyped])
}
def self.from_url(url, livecheck_strategy: nil, regex_provided: false, block_provided: false)
usable_strategies = strategies.select do |strategy_symbol, strategy|
if strategy == PageMatch
# Only treat the strategy as usable if the `livecheck` block
# contains a regex and/or `strategy` block
next if !regex_provided && !block_provided
elsif [Json, Xml, Yaml].include?(strategy)
# Only treat the strategy as usable if the `livecheck` block
# specifies the strategy and contains a `strategy` block
next if (livecheck_strategy != strategy_symbol) || !block_provided
elsif strategy.const_defined?(:PRIORITY) &&
!strategy.const_get(:PRIORITY).positive? &&
livecheck_strategy != strategy_symbol
# Ignore strategies with a priority of 0 or lower, unless the
# strategy is specified in the `livecheck` block
next
end
strategy.respond_to?(:match?) && strategy.match?(url)
end.values
# Sort usable strategies in descending order by priority, using the
# DEFAULT_PRIORITY when a strategy doesn't contain a PRIORITY constant
usable_strategies.sort_by do |strategy|
(strategy.const_defined?(:PRIORITY) ? -strategy.const_get(:PRIORITY) : -DEFAULT_PRIORITY)
end
end
# Creates `curl` `--data` or `--json` arguments (for `POST` requests`)
# from related `livecheck` block `url` options.
#
# @param post_form [Hash, nil] data to encode using `URI::encode_www_form`
# @param post_json [Hash, nil] data to encode using `JSON::generate`
# @return [Array]
sig {
params(
post_form: T.nilable(T::Hash[Symbol, String]),
post_json: T.nilable(T::Hash[Symbol, T.anything]),
).returns(T::Array[String])
}
def self.post_args(post_form: nil, post_json: nil)
args = if post_form.present?
require "uri"
["--data", URI.encode_www_form(post_form)]
elsif post_json.present?
require "json"
["--json", JSON.generate(post_json)]
else
[]
end
if (content_length = args[1]&.length)
args << "--header" << "Content-Length: #{content_length}"
end
args
end
# Collects HTTP response headers, starting with the provided URL.
# Redirections will be followed and all the response headers are
# collected into an array of hashes.
#
# @param url [String] the URL to fetch
# @param options [Options] options to modify behavior
# @return [Array]
sig { params(url: String, options: Options).returns(T::Array[T::Hash[String, String]]) }
def self.page_headers(url, options: Options.new)
headers = []
if options.post_form || options.post_json
curl_post_args = ["--request", "POST", *post_args(
post_form: options.post_form,
post_json: options.post_json,
)]
end
user_agents = if options.user_agent
[options.user_agent]
else
[:default, :browser]
end
user_agents.each do |user_agent|
begin
parsed_output = curl_headers(
*curl_post_args,
"--max-redirs",
MAX_REDIRECTIONS.to_s,
url,
wanted_headers: ["location", "content-disposition"],
use_homebrew_curl: options.homebrew_curl || false,
cookies: options.cookies,
header: options.header,
referer: options.referer,
user_agent:,
**DEFAULT_CURL_OPTIONS,
)
rescue ErrorDuringExecution
next
end
parsed_output[:responses].each { |response| headers << response[:headers] }
break if headers.present?
end
headers
end
# Fetches the content at the URL and returns a hash containing the
# content and, if there are any redirections, the final URL.
# If `curl` encounters an error, the hash will contain a `:messages`
# array with the error message instead.
#
# @param url [String] the URL of the content to check
# @param options [Options] options to modify behavior
# @return [Hash]
sig { params(url: String, options: Options).returns(T::Hash[Symbol, T.untyped]) }
def self.page_content(url, options: Options.new)
if options.post_form || options.post_json
curl_post_args = ["--request", "POST", *post_args(
post_form: options.post_form,
post_json: options.post_json,
)]
end
user_agents = if options.user_agent
[options.user_agent]
else
[:default, :browser]
end
stderr = T.let(nil, T.nilable(String))
user_agents.each do |user_agent|
stdout, stderr, status = curl_output(
*curl_post_args,
*PAGE_CONTENT_CURL_ARGS, url,
**DEFAULT_CURL_OPTIONS,
use_homebrew_curl: options.homebrew_curl ||
!curl_supports_fail_with_body? ||
false,
cookies: options.cookies,
header: options.header,
referer: options.referer,
user_agent:
)
next unless status.success?
# stdout contains the header information followed by the page content.
# We use #scrub here to avoid "invalid byte sequence in UTF-8" errors.
output = stdout.scrub
# Separate the head(s)/body and identify the final URL (after any
# redirections)
parsed_output = parse_curl_output(output, max_iterations: MAX_PARSE_ITERATIONS)
final_url = curl_response_last_location(parsed_output[:responses], absolutize: true, base_url: url)
data = { content: parsed_output[:body] }
data[:final_url] = final_url if final_url.present? && final_url != url
return data
end
error_msgs = stderr&.scan(/^curl:.+$/)
{ messages: error_msgs.presence || ["cURL failed without a detectable error"] }
end
# Handles the return value from a `strategy` block in a `livecheck`
# block.
#
# @param value [] the return value from a `strategy` block
# @return [Array]
sig { params(value: T.untyped).returns(T::Array[String]) }
def self.handle_block_return(value)
case value
when String
[value]
when Array
value.compact.uniq
when nil
[]
else
raise TypeError, INVALID_BLOCK_RETURN_VALUE_MSG
end
end
end
end
end
require_relative "strategy/apache"
require_relative "strategy/bitbucket"
require_relative "strategy/cpan"
require_relative "strategy/crate"
require_relative "strategy/electron_builder"
require_relative "strategy/extract_plist"
require_relative "strategy/git"
require_relative "strategy/github_latest"
require_relative "strategy/github_releases"
require_relative "strategy/gnome"
require_relative "strategy/gnu"
require_relative "strategy/hackage"
require_relative "strategy/header_match"
require_relative "strategy/json"
require_relative "strategy/launchpad"
require_relative "strategy/npm"
require_relative "strategy/page_match"
require_relative "strategy/pypi"
require_relative "strategy/sourceforge"
require_relative "strategy/sparkle"
require_relative "strategy/xml"
require_relative "strategy/xorg"
require_relative "strategy/yaml"
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/options.rb | Library/Homebrew/livecheck/options.rb | # typed: strong
# frozen_string_literal: true
module Homebrew
module Livecheck
# Options to modify livecheck's behavior. These primarily come from
# `livecheck` blocks but they can also be set by livecheck at runtime.
#
# Option values use a `nil` default to indicate that the value has not been
# set.
class Options < T::Struct
# Cookies for curl to use when making a request.
prop :cookies, T.nilable(T::Hash[String, String])
# Header(s) for curl to use when making a request.
prop :header, T.nilable(T.any(String, T::Array[String]))
# Whether to use brewed curl.
prop :homebrew_curl, T.nilable(T::Boolean)
# Form data to use when making a `POST` request.
prop :post_form, T.nilable(T::Hash[Symbol, String])
# JSON data to use when making a `POST` request.
prop :post_json, T.nilable(T::Hash[Symbol, T.anything])
# Referer for curl to use when making a request.
prop :referer, T.nilable(String)
# User agent for curl to use when making a request. Symbol arguments
# should use a value supported by {Utils::Curl.curl_args}.
prop :user_agent, T.nilable(T.any(String, Symbol))
# Returns a `Hash` of options that are provided as arguments to `url`.
sig { returns(T::Hash[Symbol, T.untyped]) }
def url_options
{
cookies:,
header:,
homebrew_curl:,
post_form:,
post_json:,
referer:,
user_agent:,
}
end
# Returns a `Hash` of all instance variables, using `String` keys.
sig { returns(T::Hash[String, T.untyped]) }
def to_hash
T.let(serialize, T::Hash[String, T.untyped])
end
# Returns a `Hash` of all instance variables, using `Symbol` keys.
sig { returns(T::Hash[Symbol, T.untyped]) }
def to_h = to_hash.transform_keys(&:to_sym)
# Returns a new object formed by merging `other` values with a copy of
# `self`.
#
# `nil` values are removed from `other` before merging if it is an
# `Options` object, as these are unitiailized values. This ensures that
# existing values in `self` aren't unexpectedly overwritten with defaults.
sig { params(other: T.any(Options, T::Hash[Symbol, T.untyped])).returns(Options) }
def merge(other)
return dup if other.empty?
this_hash = to_h
other_hash = other.is_a?(Options) ? other.to_h : other
return dup if this_hash == other_hash
new_options = this_hash.merge(other_hash)
Options.new(**new_options)
end
# Merges values from `other` into `self` and returns `self`.
#
# `nil` values are removed from `other` before merging if it is an
# `Options` object, as these are unitiailized values. This ensures that
# existing values in `self` aren't unexpectedly overwritten with defaults.
sig { params(other: T.any(Options, T::Hash[Symbol, T.untyped])).returns(Options) }
def merge!(other)
return self if other.empty?
if other.is_a?(Options)
return self if self == other
other.instance_variables.each do |ivar|
next if (v = T.let(other.instance_variable_get(ivar), Object)).nil?
instance_variable_set(ivar, v)
end
else
other.each do |k, v|
cmd = :"#{k}="
send(cmd, v) if respond_to?(cmd)
end
end
self
end
sig { params(other: Object).returns(T::Boolean) }
def ==(other)
return false unless other.is_a?(Options)
@cookies == other.cookies &&
@header == other.header &&
@homebrew_curl == other.homebrew_curl &&
@post_form == other.post_form &&
@post_json == other.post_json &&
@referer == other.referer &&
@user_agent == other.user_agent
end
alias eql? ==
# Whether the object has only default values.
sig { returns(T::Boolean) }
def empty? = to_hash.empty?
# Whether the object has any non-default values.
sig { returns(T::Boolean) }
def present? = !empty?
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/strategic.rb | Library/Homebrew/livecheck/strategic.rb | # typed: strong
# frozen_string_literal: true
module Homebrew
module Livecheck
# The interface for livecheck strategies. Because third-party strategies
# are not required to extend this module, we do not provide any default
# method implementations here.
module Strategic
extend T::Helpers
interface!
# Whether the strategy can be applied to the provided URL.
#
# @param url [String] the URL to match against
sig { abstract.params(url: String).returns(T::Boolean) }
def match?(url); end
# Checks the content at the URL for new versions. Implementations may not
# support all options.
#
# @param url the URL of the content to check
# @param regex a regex for matching versions in content
# @param provided_content content to check instead of
# fetching
# @param options options to modify behavior
# @param block a block to match the content
sig {
abstract.params(
url: String,
regex: T.nilable(Regexp),
provided_content: T.nilable(String),
options: Options,
block: T.nilable(Proc),
).returns(T::Hash[Symbol, T.anything])
}
def find_versions(url:, regex: nil, provided_content: nil, options: Options.new, &block); end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/livecheck.rb | Library/Homebrew/livecheck/livecheck.rb | # typed: strict
# frozen_string_literal: true
require "livecheck/constants"
require "livecheck/error"
require "livecheck/livecheck_version"
require "livecheck/skip_conditions"
require "livecheck/strategy"
require "addressable"
require "utils/output"
module Homebrew
# The {Livecheck} module consists of methods used by the `brew livecheck`
# command. These methods print the requested livecheck information
# for formulae.
module Livecheck
extend Utils::Output::Mixin
NO_CURRENT_VERSION_MSG = "Unable to identify current version"
NO_VERSIONS_MSG = "Unable to get versions"
UNSTABLE_VERSION_KEYWORDS = T.let(%w[
alpha
beta
bpo
dev
experimental
prerelease
preview
rc
].freeze, T::Array[String])
private_constant :UNSTABLE_VERSION_KEYWORDS
sig { params(strategy_class: T::Class[Strategic]).returns(String) }
private_class_method def self.livecheck_strategy_names(strategy_class)
@livecheck_strategy_names ||= T.let({}, T.nilable(T::Hash[T::Class[Strategic], String]))
@livecheck_strategy_names[strategy_class] ||= Utils.demodulize(strategy_class.name)
end
sig { params(strategy_class: T::Class[Strategic]).returns(T::Array[Symbol]) }
private_class_method def self.livecheck_find_versions_parameters(strategy_class)
@livecheck_find_versions_parameters ||= T.let({}, T.nilable(T::Hash[T::Class[Strategic], T::Array[Symbol]]))
@livecheck_find_versions_parameters[strategy_class] ||=
T::Utils.signature_for_method(strategy_class.method(:find_versions)).parameters.map(&:second)
end
# Uses `formulae_and_casks_to_check` to identify taps in use other than
# homebrew/core and homebrew/cask and loads strategies from them.
sig { params(formulae_and_casks_to_check: T::Array[T.any(Formula, Cask::Cask)]).void }
def self.load_other_tap_strategies(formulae_and_casks_to_check)
other_taps = {}
formulae_and_casks_to_check.each do |formula_or_cask|
next if formula_or_cask.tap.blank?
next if formula_or_cask.tap.core_tap?
next if formula_or_cask.tap.core_cask_tap?
next if other_taps[formula_or_cask.tap.name]
other_taps[formula_or_cask.tap.name] = formula_or_cask.tap
end
other_taps = other_taps.sort.to_h
other_taps.each_value do |tap|
tap_strategy_path = "#{tap.path}/livecheck/strategy"
Dir["#{tap_strategy_path}/*.rb"].each { require(it) } if Dir.exist?(tap_strategy_path)
end
end
# Resolve formula/cask references in `livecheck` blocks to a final formula
# or cask.
sig {
params(
formula_or_cask: T.any(Formula, Cask::Cask),
first_formula_or_cask: T.any(Formula, Cask::Cask),
references: T::Array[T.any(Formula, Cask::Cask)],
full_name: T::Boolean,
debug: T::Boolean,
).returns(T.nilable(T::Array[T.untyped]))
}
def self.resolve_livecheck_reference(
formula_or_cask,
first_formula_or_cask = formula_or_cask,
references = [],
full_name: false,
debug: false
)
# Check the `livecheck` block for a formula or cask reference
livecheck = formula_or_cask.livecheck
livecheck_formula = livecheck.formula
livecheck_cask = livecheck.cask
return [nil, references] if livecheck_formula.blank? && livecheck_cask.blank?
# Load the referenced formula or cask
referenced_formula_or_cask = Homebrew.with_no_api_env do
if livecheck_formula
Formulary.factory(livecheck_formula)
elsif livecheck_cask
Cask::CaskLoader.load(livecheck_cask)
end
end
# Error if a `livecheck` block references a formula/cask that was already
# referenced (or itself)
if referenced_formula_or_cask == first_formula_or_cask ||
referenced_formula_or_cask == formula_or_cask ||
references.include?(referenced_formula_or_cask)
if debug
# Print the chain of references for debugging
puts "Reference Chain:"
puts package_or_resource_name(first_formula_or_cask, full_name:)
references << referenced_formula_or_cask
references.each do |ref_formula_or_cask|
puts package_or_resource_name(ref_formula_or_cask, full_name:)
end
end
raise "Circular formula/cask reference encountered"
end
references << referenced_formula_or_cask
# Check the referenced formula/cask for a reference
next_referenced_formula_or_cask, next_references = resolve_livecheck_reference(
referenced_formula_or_cask,
first_formula_or_cask,
references,
full_name:,
debug:,
)
# Returning references along with the final referenced formula/cask
# allows us to print the chain of references in the debug output
[
next_referenced_formula_or_cask || referenced_formula_or_cask,
next_references,
]
end
# Executes the livecheck logic for each formula/cask in the
# `formulae_and_casks_to_check` array and prints the results.
sig {
params(
formulae_and_casks_to_check: T::Array[T.any(Formula, Cask::Cask)],
full_name: T::Boolean,
handle_name_conflict: T::Boolean,
check_resources: T::Boolean,
json: T::Boolean,
newer_only: T::Boolean,
extract_plist: T::Boolean,
debug: T::Boolean,
quiet: T::Boolean,
verbose: T::Boolean,
).void
}
def self.run_checks(
formulae_and_casks_to_check,
full_name: false, handle_name_conflict: false, check_resources: false, json: false, newer_only: false,
extract_plist: false, debug: false, quiet: false, verbose: false
)
load_other_tap_strategies(formulae_and_casks_to_check)
ambiguous_casks = []
if handle_name_conflict
ambiguous_casks = formulae_and_casks_to_check
.group_by { |item| package_or_resource_name(item, full_name: true) }
.values
.select { |items| items.length > 1 }
.flatten
.select { |item| item.is_a?(Cask::Cask) }
end
ambiguous_names = []
unless full_name
ambiguous_names =
(formulae_and_casks_to_check - ambiguous_casks).group_by { |item| package_or_resource_name(item) }
.values
.select { |items| items.length > 1 }
.flatten
end
has_a_newer_upstream_version = T.let(false, T::Boolean)
formulae_and_casks_total = formulae_and_casks_to_check.count
if json && !quiet && $stderr.tty?
Tty.with($stderr) do |stderr|
stderr.puts Formatter.headline("Running checks", color: :blue)
end
require "ruby-progressbar"
progress = ProgressBar.create(
total: formulae_and_casks_total,
progress_mark: "#",
remainder_mark: ".",
format: " %t: [%B] %c/%C ",
output: $stderr,
)
end
# Allow ExtractPlist strategy if only one formula/cask is being checked.
extract_plist = true if formulae_and_casks_total == 1
formulae_checked = formulae_and_casks_to_check.map.with_index do |formula_or_cask, i|
case formula_or_cask
when Formula
formula = formula_or_cask
formula.head&.downloader&.quiet!
when Cask::Cask
cask = formula_or_cask
end
use_full_name = full_name || ambiguous_names.include?(formula_or_cask)
name = package_or_resource_name(formula_or_cask, full_name: use_full_name)
referenced_formula_or_cask, livecheck_references =
resolve_livecheck_reference(formula_or_cask, full_name: use_full_name, debug:)
if debug && i.positive?
puts <<~EOS
----------
EOS
elsif debug
puts
end
# Check skip conditions for a referenced formula/cask
if referenced_formula_or_cask
skip_info = SkipConditions.referenced_skip_information(
referenced_formula_or_cask,
name,
full_name: use_full_name,
verbose:,
extract_plist:,
)
end
skip_info ||= SkipConditions.skip_information(
formula_or_cask,
full_name: use_full_name,
verbose:,
extract_plist:,
)
if skip_info.present?
next skip_info if json && !newer_only
SkipConditions.print_skip_information(skip_info) if !newer_only && !quiet
next
end
# Use the `stable` version for comparison except for installed
# HEAD-only formulae. A formula with `stable` and `head` that's
# installed using `--head` will still use the `stable` version for
# comparison.
current = if formula
if formula.head_only?
formula_commit = formula.any_installed_version&.version&.commit
Version.new(formula_commit) if formula_commit
elsif (stable = formula.stable)
stable.version
end
else
Version.new(formula_or_cask.version)
end
unless current
raise Livecheck::Error, NO_CURRENT_VERSION_MSG unless json
next if quiet
next status_hash(formula_or_cask, "error", [NO_CURRENT_VERSION_MSG], full_name: use_full_name, verbose:)
end
current_str = current.to_s
current = LivecheckVersion.create(formula_or_cask, current)
latest = if formula&.head_only?
Version.new(T.must(formula.head).downloader.fetch_last_commit)
else
version_info = latest_version(
formula_or_cask,
referenced_formula_or_cask:,
livecheck_references:,
json:, full_name: use_full_name, verbose:, debug:
)
version_info[:latest] if version_info.present?
end
check_for_resources = check_resources && formula_or_cask.is_a?(Formula) && formula_or_cask.resources.present?
if check_for_resources
resource_version_info = formula_or_cask.resources.map do |resource|
res_skip_info ||= SkipConditions.skip_information(resource, verbose:)
if res_skip_info.present?
res_skip_info
else
res_version_info = resource_version(
resource,
latest.to_s,
json:,
full_name: use_full_name,
debug:,
quiet:,
verbose:,
)
if res_version_info.empty?
status_hash(resource, "error", [NO_VERSIONS_MSG], verbose:)
else
res_version_info
end
end
end.compact_blank
Homebrew.failed = true if resource_version_info.any? { |info| info[:status] == "error" }
end
if latest.blank?
raise Livecheck::Error, NO_VERSIONS_MSG unless json
next if quiet
next version_info if version_info.is_a?(Hash) && version_info[:status] && version_info[:messages]
latest_info = status_hash(formula_or_cask, "error", [NO_VERSIONS_MSG], full_name: use_full_name,
verbose:)
if check_for_resources
unless verbose
resource_version_info.map! do |info|
info.delete(:meta)
info
end
end
latest_info[:resources] = resource_version_info
end
next latest_info
end
if (m = latest.to_s.match(/(.*)-release$/)) && !current.to_s.match(/.*-release$/)
latest = Version.new(m[1])
end
latest_str = latest.to_s
latest = LivecheckVersion.create(formula_or_cask, latest)
is_outdated = if formula&.head_only?
# A HEAD-only formula is considered outdated if the latest upstream
# commit hash is different than the installed version's commit hash
(current != latest)
else
(current < latest)
end
is_newer_than_upstream = (formula&.stable? || cask) && (current > latest)
info = {}
info[:formula] = name if formula
info[:cask] = name if cask
info[:version] = {
current: current_str,
latest: latest_str,
latest_throttled: version_info&.dig(:latest_throttled),
outdated: is_outdated,
newer_than_upstream: is_newer_than_upstream,
}.compact
info[:meta] = {
livecheck_defined: formula_or_cask.livecheck_defined?,
}
info[:meta][:head_only] = true if formula&.head_only?
info[:meta].merge!(version_info[:meta]) if version_info.present? && version_info.key?(:meta)
info[:resources] = resource_version_info if check_for_resources
next if newer_only && !info[:version][:outdated]
has_a_newer_upstream_version ||= true
if json
progress&.increment
info.delete(:meta) unless verbose
if check_for_resources && !verbose
resource_version_info.map! do |resource_info|
resource_info.delete(:meta)
resource_info
end
end
next info
end
puts if debug
print_latest_version(info, verbose:, ambiguous_cask: ambiguous_casks.include?(formula_or_cask))
print_resources_info(resource_version_info, verbose:) if check_for_resources
nil
rescue => e
Homebrew.failed = true
use_full_name = full_name || ambiguous_names.include?(formula_or_cask)
if json
progress&.increment
unless quiet
status_hash(formula_or_cask, "error", [e.to_s], full_name: use_full_name,
verbose:)
end
elsif !quiet
name = package_or_resource_name(formula_or_cask, full_name: use_full_name)
name += " (cask)" if ambiguous_casks.include?(formula_or_cask)
onoe "#{Tty.blue}#{name}#{Tty.reset}: #{e}"
if debug && !e.is_a?(Livecheck::Error)
require "utils/backtrace"
$stderr.puts Utils::Backtrace.clean(e)
end
print_resources_info(resource_version_info, verbose:) if check_for_resources
nil
end
end
puts "No newer upstream versions." if newer_only && !has_a_newer_upstream_version && !debug && !json && !quiet
return unless json
if progress
progress.finish
Tty.with($stderr) do |stderr|
stderr.print "#{Tty.up}#{Tty.erase_line}" * 2
end
end
puts JSON.pretty_generate(formulae_checked.compact)
end
sig { params(package_or_resource: T.any(Formula, Cask::Cask, Resource), full_name: T::Boolean).returns(String) }
def self.package_or_resource_name(package_or_resource, full_name: false)
case package_or_resource
when Formula
formula_name(package_or_resource, full_name:)
when Cask::Cask
cask_name(package_or_resource, full_name:)
when Resource
package_or_resource.name
else
T.absurd(package_or_resource)
end
end
# Returns the fully-qualified name of a cask if the `full_name` argument is
# provided; returns the name otherwise.
sig { params(cask: Cask::Cask, full_name: T::Boolean).returns(String) }
private_class_method def self.cask_name(cask, full_name: false)
full_name ? cask.full_name : cask.token
end
# Returns the fully-qualified name of a formula if the `full_name` argument is
# provided; returns the name otherwise.
sig { params(formula: Formula, full_name: T::Boolean).returns(String) }
private_class_method def self.formula_name(formula, full_name: false)
full_name ? formula.full_name : formula.name
end
sig {
params(
package_or_resource: T.any(Formula, Cask::Cask, Resource),
status_str: String,
messages: T.nilable(T::Array[String]),
full_name: T::Boolean,
verbose: T::Boolean,
).returns(T::Hash[Symbol, T.untyped])
}
def self.status_hash(package_or_resource, status_str, messages = nil, full_name: false, verbose: false)
formula = package_or_resource if package_or_resource.is_a?(Formula)
cask = package_or_resource if package_or_resource.is_a?(Cask::Cask)
resource = package_or_resource if package_or_resource.is_a?(Resource)
status_hash = {}
if formula
status_hash[:formula] = formula_name(formula, full_name:)
elsif cask
status_hash[:cask] = cask_name(cask, full_name:)
elsif resource
status_hash[:resource] = resource.name
end
status_hash[:status] = status_str
status_hash[:messages] = messages if messages.is_a?(Array)
status_hash[:meta] = {
livecheck_defined: package_or_resource.livecheck_defined?,
}
status_hash[:meta][:head_only] = true if formula&.head_only?
status_hash
end
# Formats and prints the livecheck result for a formula/cask/resource.
sig { params(info: T::Hash[Symbol, T.untyped], verbose: T::Boolean, ambiguous_cask: T::Boolean).void }
private_class_method def self.print_latest_version(info, verbose: false, ambiguous_cask: false)
package_or_resource_s = info[:resource].present? ? " " : ""
package_or_resource_s += "#{Tty.blue}#{info[:formula] || info[:cask] || info[:resource]}#{Tty.reset}"
package_or_resource_s += " (cask)" if ambiguous_cask
package_or_resource_s += " (guessed)" if verbose && !info[:meta][:livecheck_defined]
current_s = if info[:version][:newer_than_upstream]
"#{Tty.red}#{info[:version][:current]}#{Tty.reset}"
else
info[:version][:current]
end
latest_s = if info[:version][:outdated]
"#{Tty.green}#{info[:version][:latest]}#{Tty.reset}"
else
info[:version][:latest]
end
puts "#{package_or_resource_s}: #{current_s} ==> #{latest_s}"
end
# Prints the livecheck result for the resources of a given Formula.
sig { params(info: T::Array[T::Hash[Symbol, T.untyped]], verbose: T::Boolean).void }
private_class_method def self.print_resources_info(info, verbose: false)
info.each do |r_info|
if r_info[:status] && r_info[:messages]
SkipConditions.print_skip_information(r_info)
else
print_latest_version(r_info, verbose:)
end
end
end
sig {
params(
livecheck_url: T.any(String, Symbol),
package_or_resource: T.any(Formula, Cask::Cask, Resource),
).returns(String)
}
def self.livecheck_url_to_string(livecheck_url, package_or_resource)
livecheck_url_string = case livecheck_url
when String
livecheck_url
when :url
package_or_resource.url&.to_s if package_or_resource.is_a?(Cask::Cask) || package_or_resource.is_a?(Resource)
when :head, :stable
package_or_resource.send(livecheck_url)&.url if package_or_resource.is_a?(Formula)
when :homepage
package_or_resource.homepage unless package_or_resource.is_a?(Resource)
end
if livecheck_url.is_a?(Symbol) && !livecheck_url_string
raise ArgumentError, "`url #{livecheck_url.inspect}` does not reference a checkable URL"
end
livecheck_url_string
end
# Returns an Array containing the formula/cask/resource URLs that can be used by livecheck.
sig { params(package_or_resource: T.any(Formula, Cask::Cask, Resource)).returns(T::Array[String]) }
def self.checkable_urls(package_or_resource)
urls = []
case package_or_resource
when Formula
if package_or_resource.stable
urls << T.must(package_or_resource.stable).url
urls.concat(T.must(package_or_resource.stable).mirrors)
end
urls << T.must(package_or_resource.head).url if package_or_resource.head
urls << package_or_resource.homepage if package_or_resource.homepage
when Cask::Cask
urls << package_or_resource.url.to_s if package_or_resource.url
urls << package_or_resource.homepage if package_or_resource.homepage
when Resource
urls << package_or_resource.url
else
T.absurd(package_or_resource)
end
urls.compact.uniq
end
# livecheck should fetch a URL using brewed curl if the formula/cask
# contains a `stable`/`url` or `head` URL `using: :homebrew_curl` that
# shares the same root domain.
sig { params(formula_or_cask: T.any(Formula, Cask::Cask), url: String).returns(T::Boolean) }
def self.use_homebrew_curl?(formula_or_cask, url)
url_root_domain = Addressable::URI.parse(url)&.domain
return false if url_root_domain.blank?
# Collect root domains of URLs with `using: :homebrew_curl`
homebrew_curl_root_domains = []
case formula_or_cask
when Formula
[:stable, :head].each do |spec_name|
next unless (spec = formula_or_cask.send(spec_name))
next if spec.using != :homebrew_curl
domain = Addressable::URI.parse(spec.url)&.domain
homebrew_curl_root_domains << domain if domain.present?
end
when Cask::Cask
return false if formula_or_cask.url&.using != :homebrew_curl
domain = Addressable::URI.parse(formula_or_cask.url.to_s)&.domain
homebrew_curl_root_domains << domain if domain.present?
end
homebrew_curl_root_domains.include?(url_root_domain)
end
# Identifies the latest version of the formula/cask and returns a Hash containing
# the version information. Returns nil if a latest version couldn't be found.
sig {
params(
formula_or_cask: T.any(Formula, Cask::Cask),
referenced_formula_or_cask: T.nilable(T.any(Formula, Cask::Cask)),
livecheck_references: T::Array[T.any(Formula, Cask::Cask)],
json: T::Boolean,
full_name: T::Boolean,
verbose: T::Boolean,
debug: T::Boolean,
).returns(T.nilable(T::Hash[Symbol, T.untyped]))
}
def self.latest_version(
formula_or_cask,
referenced_formula_or_cask: nil,
livecheck_references: [],
json: false, full_name: false, verbose: false, debug: false
)
formula = formula_or_cask if formula_or_cask.is_a?(Formula)
cask = formula_or_cask if formula_or_cask.is_a?(Cask::Cask)
livecheck_defined = formula_or_cask.livecheck_defined?
livecheck = formula_or_cask.livecheck
referenced_livecheck = referenced_formula_or_cask&.livecheck
livecheck_options = livecheck.options || referenced_livecheck&.options
livecheck_url_options = livecheck_options.url_options.compact
livecheck_url = livecheck.url || referenced_livecheck&.url
livecheck_regex = livecheck.regex || referenced_livecheck&.regex
livecheck_strategy = livecheck.strategy || referenced_livecheck&.strategy
livecheck_strategy_block = livecheck.strategy_block || referenced_livecheck&.strategy_block
livecheck_throttle = livecheck.throttle || referenced_livecheck&.throttle
referenced_package = referenced_formula_or_cask || formula_or_cask
livecheck_url_string = livecheck_url_to_string(livecheck_url, referenced_package) if livecheck_url
urls = [livecheck_url_string] if livecheck_url_string
urls ||= checkable_urls(referenced_package)
if debug
if formula
puts "Formula: #{formula_name(formula, full_name:)}"
puts "Head only?: true" if formula.head_only?
elsif cask
puts "Cask: #{cask_name(formula_or_cask, full_name:)}"
end
puts "livecheck block?: #{livecheck_defined ? "Yes" : "No"}"
puts "Throttle: #{livecheck_throttle}" if livecheck_throttle
livecheck_references.each do |ref_formula_or_cask|
case ref_formula_or_cask
when Formula
puts "Formula Ref: #{formula_name(ref_formula_or_cask, full_name:)}"
when Cask::Cask
puts "Cask Ref: #{cask_name(ref_formula_or_cask, full_name:)}"
end
end
end
checked_urls = []
urls.each_with_index do |original_url, i|
url = original_url
next if checked_urls.include?(url)
strategies = Strategy.from_url(
url,
livecheck_strategy:,
regex_provided: livecheck_regex.present?,
block_provided: livecheck_strategy_block.present?,
)
strategy = Strategy.from_symbol(livecheck_strategy) || strategies.first
next unless strategy
strategy_name = livecheck_strategy_names(strategy)
if strategy.respond_to?(:preprocess_url)
url = strategy.preprocess_url(url)
next if checked_urls.include?(url)
end
if debug
puts
if livecheck_url.is_a?(Symbol)
# This assumes the URL symbol will fit within the available space
puts "URL (#{livecheck_url}):".ljust(18, " ") + original_url
elsif original_url.present? && original_url != "None"
puts "URL: #{original_url}"
end
puts "URL (processed): #{url}" if url != original_url
puts "URL Options: #{livecheck_url_options}" if livecheck_url_options.present?
if strategies.present? && verbose
puts "Strategies: #{strategies.map { |s| livecheck_strategy_names(s) }.join(", ")}"
end
puts "Strategy: #{strategy_name}" if strategy.present?
puts "Regex: #{livecheck_regex.inspect}" if livecheck_regex.present?
end
if livecheck_strategy.present?
if livecheck_url.blank? && strategy.method(:find_versions).parameters.include?([:keyreq, :url])
odebug "#{strategy_name} strategy requires a URL"
next
elsif livecheck_strategy != :page_match && strategies.exclude?(strategy)
odebug "#{strategy_name} strategy does not apply to this URL"
next
end
end
next if strategy.blank?
if (livecheck_homebrew_curl = livecheck_options.homebrew_curl).nil?
case strategy_name
when "PageMatch", "HeaderMatch"
if (homebrew_curl = use_homebrew_curl?(referenced_package, url))
livecheck_options = livecheck_options.merge({ homebrew_curl: })
livecheck_homebrew_curl = homebrew_curl
end
end
end
puts "Homebrew curl?: #{livecheck_homebrew_curl ? "Yes" : "No"}" if debug && !livecheck_homebrew_curl.nil?
# Only use arguments that the strategy's `#find_versions` method
# supports
find_versions_parameters = livecheck_find_versions_parameters(strategy)
strategy_args = {}
strategy_args[:cask] = cask if find_versions_parameters.include?(:cask)
strategy_args[:url] = url if find_versions_parameters.include?(:url)
strategy_args[:regex] = livecheck_regex if find_versions_parameters.include?(:regex)
strategy_args[:options] = livecheck_options if find_versions_parameters.include?(:options)
strategy_args.compact!
strategy_data = strategy.find_versions(**strategy_args, &livecheck_strategy_block)
match_version_map = strategy_data[:matches]
regex = strategy_data[:regex]
messages = strategy_data[:messages]
checked_urls << url
if messages.is_a?(Array) && match_version_map.blank?
puts messages unless json
next if i + 1 < urls.length
return status_hash(formula_or_cask, "error", messages, full_name:, verbose:)
end
if debug
if strategy_data[:url].present? && strategy_data[:url] != url
puts "URL (strategy): #{strategy_data[:url]}"
end
puts "URL (final): #{strategy_data[:final_url]}" if strategy_data[:final_url].present?
if strategy_data[:regex].present? && strategy_data[:regex] != livecheck_regex
puts "Regex (strategy): #{strategy_data[:regex].inspect}"
end
puts "Cached?: Yes" if strategy_data[:cached] == true
end
match_version_map.delete_if do |_match, version|
next true if version.blank?
next false if livecheck_defined
UNSTABLE_VERSION_KEYWORDS.any? do |rejection|
version.to_s.include?(rejection)
end
end
next if match_version_map.blank?
if debug
puts
puts "Matched Versions:"
if verbose
match_version_map.each do |match, version|
puts "#{match} => #{version.inspect}"
end
else
puts match_version_map.values.join(", ")
end
end
version_info = {
latest: Version.new(match_version_map.values.max_by { |v| LivecheckVersion.create(formula_or_cask, v) }),
}
if livecheck_throttle
match_version_map.keep_if { |_match, version| version.patch.to_i.modulo(livecheck_throttle).zero? }
version_info[:latest_throttled] = if match_version_map.blank?
nil
else
Version.new(match_version_map.values.max_by { |v| LivecheckVersion.create(formula_or_cask, v) })
end
if debug
puts
puts "Matched Throttled Versions:"
if verbose
match_version_map.each do |match, version|
puts "#{match} => #{version.inspect}"
end
else
puts match_version_map.values.join(", ")
end
end
end
if json && verbose
version_info[:meta] = {}
if livecheck_references.present?
version_info[:meta][:references] = livecheck_references.map do |ref_formula_or_cask|
case ref_formula_or_cask
when Formula
{ formula: formula_name(ref_formula_or_cask, full_name:) }
when Cask::Cask
{ cask: cask_name(ref_formula_or_cask, full_name:) }
end
end
end
if url != "None"
version_info[:meta][:url] = {}
version_info[:meta][:url][:symbol] = livecheck_url if livecheck_url.is_a?(Symbol) && livecheck_url_string
version_info[:meta][:url][:original] = original_url
version_info[:meta][:url][:processed] = url if url != original_url
if strategy_data[:url].present? && strategy_data[:url] != url
version_info[:meta][:url][:strategy] = strategy_data[:url]
end
version_info[:meta][:url][:final] = strategy_data[:final_url] if strategy_data[:final_url]
version_info[:meta][:url][:options] = livecheck_url_options if livecheck_url_options.present?
end
version_info[:meta][:strategy] = strategy_name if strategy.present?
version_info[:meta][:strategies] = strategies.map { |s| livecheck_strategy_names(s) } if strategies.present?
version_info[:meta][:regex] = regex.inspect if regex.present?
version_info[:meta][:cached] = true if strategy_data[:cached] == true
version_info[:meta][:throttle] = livecheck_throttle if livecheck_throttle
end
return version_info
end
nil
end
# Identifies the latest version of a resource and returns a Hash containing the
# version information. Returns nil if a latest version couldn't be found.
sig {
params(
resource: Resource,
formula_latest: String,
json: T::Boolean,
full_name: T::Boolean,
debug: T::Boolean,
quiet: T::Boolean,
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | true |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/constants.rb | Library/Homebrew/livecheck/constants.rb | # typed: strict
# frozen_string_literal: true
module Homebrew
module Livecheck
# The {Constants} module provides constants that are intended to be used
# in `livecheck` block values (e.g. `url`, `regex`).
module Constants
# A placeholder string used in resource `livecheck` block URLs that will
# be replaced with the latest version from the main formula check.
LATEST_VERSION = "<FORMULA_LATEST_VERSION>"
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/livecheck_version.rb | Library/Homebrew/livecheck/livecheck_version.rb | # typed: strict
# frozen_string_literal: true
module Homebrew
module Livecheck
# A formula or cask version, split into its component sub-versions.
class LivecheckVersion
include Comparable
sig {
params(package_or_resource: T.any(Formula, Cask::Cask, Resource), version: Version).returns(LivecheckVersion)
}
def self.create(package_or_resource, version)
versions = case package_or_resource
when Formula, Resource
[version]
when Cask::Cask
version.to_s.split(",").map { |s| Version.new(s) }
else
T.absurd(package_or_resource)
end
new(versions)
end
sig { returns(T::Array[Version]) }
attr_reader :versions
sig { params(versions: T::Array[Version]).void }
def initialize(versions)
@versions = versions
end
sig { params(other: T.untyped).returns(T.nilable(Integer)) }
def <=>(other)
return unless other.is_a?(LivecheckVersion)
lversions = versions
rversions = other.versions
max = [lversions.count, rversions.count].max
l = r = 0
while l < max
a = lversions[l] || Version::NULL
b = rversions[r] || Version::NULL
if a == b
l += 1
r += 1
next
elsif !a.null? && b.null?
return 1 if a > Version::NULL
l += 1
elsif a.null? && !b.null?
return -1 if b > Version::NULL
r += 1
else
return a <=> b
end
end
0
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/skip_conditions.rb | Library/Homebrew/livecheck/skip_conditions.rb | # typed: strict
# frozen_string_literal: true
module Homebrew
module Livecheck
# The `Livecheck::SkipConditions` module primarily contains methods that
# check for various formula/cask/resource conditions where a check should be skipped.
module SkipConditions
sig {
params(
package_or_resource: T.any(Formula, Cask::Cask, Resource),
livecheck_defined: T::Boolean,
full_name: T::Boolean,
verbose: T::Boolean,
).returns(T::Hash[Symbol, T.untyped])
}
private_class_method def self.package_or_resource_skip(
package_or_resource,
livecheck_defined,
full_name: false,
verbose: false
)
formula = package_or_resource if package_or_resource.is_a?(Formula)
if (stable_url = formula&.stable&.url)
stable_is_gist = stable_url.match?(%r{https?://gist\.github(?:usercontent)?\.com/}i)
stable_from_google_code_archive = stable_url.match?(
%r{https?://storage\.googleapis\.com/google-code-archive-downloads/}i,
)
stable_from_internet_archive = stable_url.match?(%r{https?://web\.archive\.org/}i)
end
skip_message = if package_or_resource.livecheck.skip_msg.present?
package_or_resource.livecheck.skip_msg
elsif !livecheck_defined
if stable_from_google_code_archive
"Stable URL is from Google Code Archive"
elsif stable_from_internet_archive
"Stable URL is from Internet Archive"
elsif stable_is_gist
"Stable URL is a GitHub Gist"
end
end
return {} if !package_or_resource.livecheck.skip? && skip_message.blank?
skip_messages = skip_message ? [skip_message] : nil
Livecheck.status_hash(package_or_resource, "skipped", skip_messages, full_name:, verbose:)
end
sig {
params(
formula: Formula,
_livecheck_defined: T::Boolean,
full_name: T::Boolean,
verbose: T::Boolean,
).returns(T::Hash[Symbol, T.untyped])
}
private_class_method def self.formula_head_only(formula, _livecheck_defined, full_name: false, verbose: false)
return {} if !formula.head_only? || formula.any_version_installed?
Livecheck.status_hash(
formula,
"error",
["HEAD only formula must be installed to be checkable"],
full_name:,
verbose:,
)
end
sig {
params(
formula: Formula,
livecheck_defined: T::Boolean,
full_name: T::Boolean,
verbose: T::Boolean,
).returns(T::Hash[Symbol, T.untyped])
}
private_class_method def self.formula_deprecated(formula, livecheck_defined, full_name: false, verbose: false)
return {} if !formula.deprecated? || livecheck_defined
Livecheck.status_hash(formula, "deprecated", full_name:, verbose:)
end
sig {
params(
formula: Formula,
livecheck_defined: T::Boolean,
full_name: T::Boolean,
verbose: T::Boolean,
).returns(T::Hash[Symbol, T.untyped])
}
private_class_method def self.formula_disabled(formula, livecheck_defined, full_name: false, verbose: false)
return {} if !formula.disabled? || livecheck_defined
Livecheck.status_hash(formula, "disabled", full_name:, verbose:)
end
sig {
params(
formula: Formula,
livecheck_defined: T::Boolean,
full_name: T::Boolean,
verbose: T::Boolean,
).returns(T::Hash[Symbol, T.untyped])
}
private_class_method def self.formula_versioned(formula, livecheck_defined, full_name: false, verbose: false)
return {} if !formula.versioned_formula? || livecheck_defined
Livecheck.status_hash(formula, "versioned", full_name:, verbose:)
end
sig {
params(
cask: Cask::Cask,
livecheck_defined: T::Boolean,
full_name: T::Boolean,
verbose: T::Boolean,
).returns(T::Hash[Symbol, T.untyped])
}
private_class_method def self.cask_deprecated(cask, livecheck_defined, full_name: false, verbose: false)
return {} if !cask.deprecated? || livecheck_defined
return {} if cask.disable_date && cask.deprecation_reason == :fails_gatekeeper_check
Livecheck.status_hash(cask, "deprecated", full_name:, verbose:)
end
sig {
params(
cask: Cask::Cask,
livecheck_defined: T::Boolean,
full_name: T::Boolean,
verbose: T::Boolean,
).returns(T::Hash[Symbol, T.untyped])
}
private_class_method def self.cask_disabled(cask, livecheck_defined, full_name: false, verbose: false)
return {} if !cask.disabled? || livecheck_defined
Livecheck.status_hash(cask, "disabled", full_name:, verbose:)
end
sig {
params(
cask: Cask::Cask,
_livecheck_defined: T::Boolean,
full_name: T::Boolean,
verbose: T::Boolean,
extract_plist: T::Boolean,
).returns(T::Hash[Symbol, T.untyped])
}
private_class_method def self.cask_extract_plist(
cask,
_livecheck_defined,
full_name: false,
verbose: false,
extract_plist: false
)
return {} if extract_plist || cask.livecheck.strategy != :extract_plist
Livecheck.status_hash(
cask,
"skipped",
["Use `--extract-plist` to enable checking multiple casks with ExtractPlist strategy"],
full_name:,
verbose:,
)
end
sig {
params(
cask: Cask::Cask,
livecheck_defined: T::Boolean,
full_name: T::Boolean,
verbose: T::Boolean,
).returns(T::Hash[Symbol, T.untyped])
}
private_class_method def self.cask_version_latest(cask, livecheck_defined, full_name: false, verbose: false)
return {} if !(cask.present? && cask.version&.latest?) || livecheck_defined
Livecheck.status_hash(cask, "latest", full_name:, verbose:)
end
sig {
params(
cask: Cask::Cask,
livecheck_defined: T::Boolean,
full_name: T::Boolean,
verbose: T::Boolean,
).returns(T::Hash[Symbol, T.untyped])
}
private_class_method def self.cask_url_unversioned(cask, livecheck_defined, full_name: false, verbose: false)
return {} if !(cask.present? && cask.url&.unversioned?) || livecheck_defined
Livecheck.status_hash(cask, "unversioned", full_name:, verbose:)
end
# Skip conditions for formulae.
FORMULA_CHECKS = T.let([
:package_or_resource_skip,
:formula_head_only,
:formula_disabled,
:formula_deprecated,
:formula_versioned,
].freeze, T::Array[Symbol])
private_constant :FORMULA_CHECKS
# Skip conditions for casks.
CASK_CHECKS = T.let([
:package_or_resource_skip,
:cask_disabled,
:cask_deprecated,
:cask_extract_plist,
:cask_version_latest,
:cask_url_unversioned,
].freeze, T::Array[Symbol])
private_constant :CASK_CHECKS
# Skip conditions for resources.
RESOURCE_CHECKS = T.let([
:package_or_resource_skip,
].freeze, T::Array[Symbol])
private_constant :RESOURCE_CHECKS
# If a formula/cask/resource should be skipped, we return a hash from
# `Livecheck#status_hash`, which contains a `status` type and sometimes
# error `messages`.
sig {
params(
package_or_resource: T.any(Formula, Cask::Cask, Resource),
full_name: T::Boolean,
verbose: T::Boolean,
extract_plist: T::Boolean,
).returns(T::Hash[Symbol, T.untyped])
}
def self.skip_information(package_or_resource, full_name: false, verbose: false, extract_plist: true)
livecheck_defined = package_or_resource.livecheck_defined?
checks = case package_or_resource
when Formula
FORMULA_CHECKS
when Cask::Cask
CASK_CHECKS
when Resource
RESOURCE_CHECKS
end
checks.each do |method_name|
skip_hash = case method_name
when :cask_extract_plist
send(method_name, package_or_resource, livecheck_defined, full_name:, verbose:, extract_plist:)
else
send(method_name, package_or_resource, livecheck_defined, full_name:, verbose:)
end
return skip_hash if skip_hash.present?
end
{}
end
# Skip conditions for formulae/casks/resources referenced in a `livecheck` block
# are treated differently than normal. We only respect certain skip
# conditions (returning the related hash) and others are treated as
# errors.
sig {
params(
livecheck_package_or_resource: T.any(Formula, Cask::Cask, Resource),
original_package_or_resource_name: String,
full_name: T::Boolean,
verbose: T::Boolean,
extract_plist: T::Boolean,
).returns(T.nilable(T::Hash[Symbol, T.untyped]))
}
def self.referenced_skip_information(
livecheck_package_or_resource,
original_package_or_resource_name,
full_name: false,
verbose: false,
extract_plist: true
)
skip_info = SkipConditions.skip_information(
livecheck_package_or_resource,
full_name:,
verbose:,
extract_plist:,
)
return if skip_info.blank?
referenced_name = Livecheck.package_or_resource_name(livecheck_package_or_resource, full_name:)
referenced_type = case livecheck_package_or_resource
when Formula
:formula
when Cask::Cask
:cask
when Resource
:resource
end
if skip_info[:status] != "error" &&
!(skip_info[:status] == "skipped" && livecheck_package_or_resource.livecheck.skip?)
error_msg_end = if skip_info[:status] == "skipped"
"automatically skipped"
else
"skipped as #{skip_info[:status]}"
end
raise "Referenced #{referenced_type} (#{referenced_name}) is #{error_msg_end}"
end
skip_info[referenced_type] = original_package_or_resource_name
skip_info
end
# Prints default livecheck output in relation to skip conditions.
sig { params(skip_hash: T::Hash[Symbol, T.untyped]).void }
def self.print_skip_information(skip_hash)
return unless skip_hash.is_a?(Hash)
name = if skip_hash[:formula].is_a?(String)
skip_hash[:formula]
elsif skip_hash[:cask].is_a?(String)
skip_hash[:cask]
elsif skip_hash[:resource].is_a?(String)
" #{skip_hash[:resource]}"
end
return unless name
if skip_hash[:messages].is_a?(Array) && skip_hash[:messages].any?
# TODO: Handle multiple messages, only if needed in the future
if skip_hash[:status] == "skipped"
puts "#{Tty.red}#{name}#{Tty.reset}: skipped - #{skip_hash[:messages][0]}"
else
puts "#{Tty.red}#{name}#{Tty.reset}: #{skip_hash[:messages][0]}"
end
elsif skip_hash[:status].present?
puts "#{Tty.red}#{name}#{Tty.reset}: #{skip_hash[:status]}"
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/error.rb | Library/Homebrew/livecheck/error.rb | # typed: strict
# frozen_string_literal: true
module Homebrew
module Livecheck
# Error during a livecheck run.
class Error < RuntimeError
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/strategy/xorg.rb | Library/Homebrew/livecheck/strategy/xorg.rb | # typed: strict
# frozen_string_literal: true
require "livecheck/strategic"
module Homebrew
module Livecheck
module Strategy
# The {Xorg} strategy identifies versions of software at x.org by
# checking directory listing pages.
#
# X.Org URLs take one of the following formats, among several others:
#
# * `https://www.x.org/archive/individual/app/example-1.2.3.tar.bz2`
# * `https://www.x.org/archive/individual/font/example-1.2.3.tar.bz2`
# * `https://www.x.org/archive/individual/lib/libexample-1.2.3.tar.bz2`
# * `https://ftp.x.org/archive/individual/lib/libexample-1.2.3.tar.bz2`
# * `https://www.x.org/pub/individual/doc/example-1.2.3.tar.gz`
# * `https://xorg.freedesktop.org/archive/individual/util/example-1.2.3.tar.xz`
#
# The notable differences between URLs are as follows:
#
# * `www.x.org` and `ftp.x.org` seem to be interchangeable (we prefer
# `www.x.org`).
# * `/archive/` is the current top-level directory and `/pub/` will
# redirect to the same URL using `/archive/` instead. (The strategy
# handles this replacement to avoid the redirection.)
# * The `/individual/` directory contains a number of directories (e.g.
# app, data, doc, driver, font, lib, etc.) which contain a number of
# different archive files.
#
# Since this strategy ends up checking the same directory listing pages
# for multiple formulae, we've included a simple method of page caching.
# This prevents livecheck from fetching the same page more than once and
# also dramatically speeds up these checks. Eventually we hope to
# implement a more sophisticated page cache that all strategies using
# {PageMatch} can use (allowing us to simplify this strategy accordingly).
#
# The default regex identifies versions in archive files found in `href`
# attributes.
#
# @api public
class Xorg
extend Strategic
NICE_NAME = "X.Org"
# A `Regexp` used in determining if the strategy applies to the URL and
# also as part of extracting the module name from the URL basename.
MODULE_REGEX = /(?<module_name>.+)-\d+/i
# A `Regexp` used to extract the module name from the URL basename.
FILENAME_REGEX = /^#{MODULE_REGEX.source.strip}/i
# The `Regexp` used to determine if the strategy applies to the URL.
URL_MATCH_REGEX = %r{
^https?://(?:[^/]+?\.)* # Scheme and any leading subdomains
(?:x\.org/(?:[^/]+/)*individual
|freedesktop\.org/(?:archive|dist|software)
|archive\.mesa3d\.org)
/(?:[^/]+/)*#{MODULE_REGEX.source.strip}
}ix
# Used to cache page content, so we don't fetch the same pages
# repeatedly.
@page_data = T.let({}, T::Hash[String, String])
# Whether the strategy can be applied to the provided URL.
#
# @param url [String] the URL to match against
# @return [Boolean]
sig { override.params(url: String).returns(T::Boolean) }
def self.match?(url)
URL_MATCH_REGEX.match?(url)
end
# Extracts information from a provided URL and uses it to generate
# various input values used by the strategy to check for new versions.
# Some of these values act as defaults and can be overridden in a
# `livecheck` block.
#
# @param url [String] the URL used to generate values
# @return [Hash]
sig { params(url: String).returns(T::Hash[Symbol, T.untyped]) }
def self.generate_input_values(url)
values = {}
file_name = File.basename(url)
match = file_name.match(FILENAME_REGEX)
return values if match.blank?
# /pub/ URLs redirect to the same URL with /archive/, so we replace
# it to avoid the redirection. Removing the filename from the end of
# the URL gives us the relevant directory listing page.
values[:url] = url.sub("x.org/pub/", "x.org/archive/").delete_suffix(file_name)
regex_name = Regexp.escape(T.must(match[:module_name])).gsub("\\-", "-")
# Example regex: `/href=.*?example[._-]v?(\d+(?:\.\d+)+)\.t/i`
values[:regex] = /href=.*?#{regex_name}[._-]v?(\d+(?:\.\d+)+)\.t/i
values
end
# Generates a URL and regex (if one isn't provided) and checks the
# content at the URL for new versions (using the regex for matching).
#
# The behavior in this method for matching text in the content using a
# regex is copied and modified from the {PageMatch} strategy, so that
# we can add some simple page caching. If this behavior is expanded to
# apply to all strategies that use {PageMatch} to identify versions,
# then this strategy can be brought in line with the others.
#
# @param url [String] the URL of the content to check
# @param regex [Regexp] a regex used for matching versions in content
# @param options [Options] options to modify behavior
# @return [Hash]
sig {
override(allow_incompatible: true).params(
url: String,
regex: T.nilable(Regexp),
options: Options,
block: T.nilable(Proc),
).returns(T::Hash[Symbol, T.anything])
}
def self.find_versions(url:, regex: nil, options: Options.new, &block)
generated = generate_input_values(url)
generated_url = generated[:url]
# Use the cached page content to avoid duplicate fetches
cached_content = @page_data[generated_url]
match_data = PageMatch.find_versions(
url: generated_url,
regex: regex || generated[:regex],
provided_content: cached_content,
options:,
&block
)
content = match_data[:content]
return match_data if content.blank?
# Cache any new page content
@page_data[generated_url] = content unless cached_content
match_data
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/strategy/sparkle.rb | Library/Homebrew/livecheck/strategy/sparkle.rb | # typed: strict
# frozen_string_literal: true
require "bundle_version"
require "livecheck/strategic"
module Homebrew
module Livecheck
module Strategy
# The {Sparkle} strategy fetches content at a URL and parses it as a
# Sparkle appcast in XML format.
#
# This strategy is not applied automatically and it's necessary to use
# `strategy :sparkle` in a `livecheck` block to apply it.
class Sparkle
extend Strategic
# A priority of zero causes livecheck to skip the strategy. We do this
# for {Sparkle} so we can selectively apply it when appropriate.
PRIORITY = 0
# The `Regexp` used to determine if the strategy applies to the URL.
URL_MATCH_REGEX = %r{^https?://}i
# Common `os` values used in appcasts to refer to macOS.
APPCAST_MACOS_STRINGS = T.let(["macos", "osx"].freeze, T::Array[String])
# Whether the strategy can be applied to the provided URL.
#
# @param url [String] the URL to match against
# @return [Boolean]
sig { override.params(url: String).returns(T::Boolean) }
def self.match?(url)
URL_MATCH_REGEX.match?(url)
end
Item = Struct.new(
# @api public
:title,
# @api public
:link,
# @api public
:channel,
# @api public
:release_notes_link,
# @api public
:pub_date,
# @api public
:os,
# @api public
:url,
# @api private
:bundle_version,
# @api public
:minimum_system_version,
keyword_init: true,
) do
extend Forwardable
# @!attribute [r] version
# @api public
delegate version: :bundle_version
# @!attribute [r] short_version
# @api public
delegate short_version: :bundle_version
# @!attribute [r] nice_version
# @api public
delegate nice_version: :bundle_version
end
# Identifies version information from a Sparkle appcast.
#
# @param content [String] the text of the Sparkle appcast
# @return [Item, nil]
sig { params(content: String).returns(T::Array[Item]) }
def self.items_from_content(content)
require "rexml/document"
xml = Xml.parse_xml(content)
return [] if xml.blank?
# Remove prefixes, so we can reliably identify elements and attributes
xml.root&.each_recursive do |node|
node.prefix = ""
node.attributes.each_attribute do |attribute|
attribute.prefix = ""
end
end
xml.get_elements("//rss//channel//item").filter_map do |item|
enclosure = item.elements["enclosure"]
if enclosure
url = enclosure["url"].presence
short_version = enclosure["shortVersionString"].presence
version = enclosure["version"].presence
os = enclosure["os"].presence
end
title = Xml.element_text(item, "title")
link = Xml.element_text(item, "link")
url ||= link
channel = Xml.element_text(item, "channel")
release_notes_link = Xml.element_text(item, "releaseNotesLink")
short_version ||= Xml.element_text(item, "shortVersionString")
version ||= Xml.element_text(item, "version")
minimum_system_version_text =
Xml.element_text(item, "minimumSystemVersion")&.gsub(/\A\D+|\D+\z/, "")
if minimum_system_version_text.present?
minimum_system_version = begin
MacOSVersion.new(minimum_system_version_text)
rescue MacOSVersion::Error
nil
end
end
pub_date = Xml.element_text(item, "pubDate")&.then do |date_string|
Time.parse(date_string)
rescue ArgumentError
# Omit unparsable strings (e.g. non-English dates)
nil
end
if (match = title&.match(/(\d+(?:\.\d+)*)\s*(\([^)]+\))?\Z/))
short_version ||= match[1]
version ||= match[2]
end
bundle_version = BundleVersion.new(short_version, version) if short_version || version
data = {
title:,
link:,
channel:,
release_notes_link:,
pub_date:,
os:,
url:,
bundle_version:,
minimum_system_version:,
}.compact
next if data.empty?
# Set a default `pub_date` (for sorting) if one isn't provided
data[:pub_date] ||= Time.new(0)
Item.new(**data)
end
end
# Filters out items that aren't suitable for Homebrew.
#
# @param items [Array] appcast items
# @return [Array]
sig { params(items: T::Array[Item]).returns(T::Array[Item]) }
def self.filter_items(items)
items.select do |item|
# Omit items with an explicit `os` value that isn't macOS
next false if item.os && APPCAST_MACOS_STRINGS.none?(item.os)
# Omit items for prerelease macOS versions
next false if item.minimum_system_version&.strip_patch&.prerelease?
true
end.compact
end
# Sorts items from newest to oldest.
#
# @param items [Array] appcast items
# @return [Array]
sig { params(items: T::Array[Item]).returns(T::Array[Item]) }
def self.sort_items(items)
items.sort_by { |item| [item.pub_date, item.bundle_version] }
.reverse
end
# Uses `#items_from_content` to identify versions from the Sparkle
# appcast content or, if a block is provided, passes the content to
# the block to handle matching.
#
# @param content [String] the content to check
# @param regex [Regexp, nil] a regex for use in a strategy block
# @return [Array]
sig {
params(
content: String,
regex: T.nilable(Regexp),
block: T.nilable(Proc),
).returns(T::Array[String])
}
def self.versions_from_content(content, regex = nil, &block)
items = sort_items(filter_items(items_from_content(content)))
return [] if items.blank?
item = items.first
if block
block_return_value = case block.parameters[0]
when [:opt, :item], [:rest], [:req]
regex.present? ? yield(item, regex) : yield(item)
when [:opt, :items]
regex.present? ? yield(items, regex) : yield(items)
else
raise "First argument of Sparkle `strategy` block must be `item` or `items`"
end
return Strategy.handle_block_return(block_return_value)
end
version = T.must(item).bundle_version&.nice_version
version.present? ? [version] : []
end
# Checks the content at the URL for new versions.
#
# @param url [String] the URL of the content to check
# @param regex [Regexp, nil] a regex for use in a strategy block
# @param options [Options] options to modify behavior
# @return [Hash]
sig {
override(allow_incompatible: true).params(
url: String,
regex: T.nilable(Regexp),
options: Options,
block: T.nilable(Proc),
).returns(T::Hash[Symbol, T.anything])
}
def self.find_versions(url:, regex: nil, options: Options.new, &block)
if regex.present? && !block_given?
raise ArgumentError,
"#{Utils.demodulize(name)} only supports a regex when using a `strategy` block"
end
match_data = { matches: {}, regex:, url: }
match_data.merge!(Strategy.page_content(url, options:))
content = match_data.delete(:content)
return match_data if content.blank?
versions_from_content(content, regex, &block).each do |version_text|
match_data[:matches][version_text] = Version.new(version_text)
end
match_data
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/strategy/bitbucket.rb | Library/Homebrew/livecheck/strategy/bitbucket.rb | # typed: strict
# frozen_string_literal: true
require "livecheck/strategic"
module Homebrew
module Livecheck
module Strategy
# The {Bitbucket} strategy identifies versions of software at
# bitbucket.org by checking a repository's available downloads.
#
# Bitbucket URLs generally take one of the following formats:
#
# * `https://bitbucket.org/example/example/get/1.2.3.tar.gz`
# * `https://bitbucket.org/example/example/downloads/example-1.2.3.tar.gz`
#
# The `/get/` archive files are simply automated snapshots of the files
# for a given tag. The `/downloads/` archive files are files that have
# been uploaded instead.
#
# It's also possible for an archive to come from a repository's wiki,
# like:
# `https://bitbucket.org/example/example/wiki/downloads/example-1.2.3.zip`.
# This scenario is handled by this strategy as well and the `path` in
# this example would be `example/example/wiki` (instead of
# `example/example` with the previous URLs).
#
# The default regex identifies versions in archive files found in `href`
# attributes.
#
# @api public
class Bitbucket
extend Strategic
# The `Regexp` used to determine if the strategy applies to the URL.
URL_MATCH_REGEX = %r{
^https?://bitbucket\.org
/(?<path>.+?) # The path leading up to the get or downloads part
/(?<dl_type>get|downloads) # An indicator of the file download type
/(?<prefix>(?:[^/]+?[_-])?) # Filename text before the version
v?\d+(?:\.\d+)+ # The numeric version
(?<suffix>[^/]+) # Filename text after the version
}ix
# Whether the strategy can be applied to the provided URL.
#
# @param url [String] the URL to match against
sig { override.params(url: String).returns(T::Boolean) }
def self.match?(url)
URL_MATCH_REGEX.match?(url)
end
# Extracts information from a provided URL and uses it to generate
# various input values used by the strategy to check for new versions.
# Some of these values act as defaults and can be overridden in a
# `livecheck` block.
#
# @param url [String] the URL used to generate values
sig { params(url: String).returns(T::Hash[Symbol, T.untyped]) }
def self.generate_input_values(url)
values = {}
match = url.match(URL_MATCH_REGEX)
return values if match.blank?
regex_prefix = Regexp.escape(T.must(match[:prefix])).gsub("\\-", "-")
# `/get/` archives are Git tag snapshots, so we need to check that tab
# instead of the main `/downloads/` page
if match[:dl_type] == "get"
values[:url] = "https://bitbucket.org/#{match[:path]}/downloads/?tab=tags&iframe=true&spa=0"
# Example tag regexes:
# * `/<td[^>]*?class="name"[^>]*?>\s*v?(\d+(?:\.\d+)+)\s*?</im`
# * `/<td[^>]*?class="name"[^>]*?>\s*abc-v?(\d+(?:\.\d+)+)\s*?</im`
values[:regex] = /<td[^>]*?class="name"[^>]*?>\s*#{regex_prefix}v?(\d+(?:\.\d+)+)\s*?</im
else
values[:url] = "https://bitbucket.org/#{match[:path]}/downloads/?iframe=true&spa=0"
# Use `\.t` instead of specific tarball extensions (e.g. .tar.gz)
suffix = T.must(match[:suffix]).sub(Strategy::TARBALL_EXTENSION_REGEX, ".t")
regex_suffix = Regexp.escape(suffix).gsub("\\-", "-")
# Example file regexes:
# * `/href=.*?v?(\d+(?:\.\d+)+)\.t/i`
# * `/href=.*?abc-v?(\d+(?:\.\d+)+)\.t/i`
values[:regex] = /href=.*?#{regex_prefix}v?(\d+(?:\.\d+)+)#{regex_suffix}/i
end
values
end
# Generates a URL and regex (if one isn't provided) and passes them
# to {PageMatch.find_versions} to identify versions in the content.
#
# @param url [String] the URL of the content to check
# @param regex [Regexp] a regex used for matching versions in content
# @param options [Options] options to modify behavior
# @return [Hash]
sig {
override(allow_incompatible: true).params(
url: String,
regex: T.nilable(Regexp),
options: Options,
block: T.nilable(Proc),
).returns(T::Hash[Symbol, T.anything])
}
def self.find_versions(url:, regex: nil, options: Options.new, &block)
generated = generate_input_values(url)
PageMatch.find_versions(
url: generated[:url],
regex: regex || generated[:regex],
options:,
&block
)
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/strategy/xml.rb | Library/Homebrew/livecheck/strategy/xml.rb | # typed: strict
# frozen_string_literal: true
require "livecheck/strategic"
module Homebrew
module Livecheck
module Strategy
# The {Xml} strategy fetches content at a URL, parses it as XML using
# `REXML` and provides the `REXML::Document` to a `strategy` block.
# If a regex is present in the `livecheck` block, it should be passed
# as the second argument to the `strategy` block.
#
# This is a generic strategy that doesn't contain any logic for finding
# versions, as the structure of XML data varies. Instead, a `strategy`
# block must be used to extract version information from the XML data.
# For more information on how to work with an `REXML::Document` object,
# please refer to the [`REXML::Document`](https://ruby.github.io/rexml/REXML/Document.html)
# and [`REXML::Element`](https://ruby.github.io/rexml/REXML/Element.html)
# documentation.
#
# This strategy is not applied automatically and it is necessary to use
# `strategy :xml` in a `livecheck` block (in conjunction with a
# `strategy` block) to use it.
#
# This strategy's {find_versions} method can be used in other strategies
# that work with XML content, so it should only be necessary to write
# the version-finding logic that works with the parsed XML data.
#
# @api public
class Xml
extend Strategic
NICE_NAME = "XML"
# A priority of zero causes livecheck to skip the strategy. We do this
# for {Xml} so we can selectively apply it only when a strategy block
# is provided in a `livecheck` block.
PRIORITY = 0
# The `Regexp` used to determine if the strategy applies to the URL.
URL_MATCH_REGEX = %r{^https?://}i
# Whether the strategy can be applied to the provided URL.
# {Xml} will technically match any HTTP URL but is only usable with
# a `livecheck` block containing a `strategy` block.
#
# @param url [String] the URL to match against
# @return [Boolean]
sig { override.params(url: String).returns(T::Boolean) }
def self.match?(url)
URL_MATCH_REGEX.match?(url)
end
# Parses XML text and returns an `REXML::Document` object.
# @param content [String] the XML text to parse
# @return [REXML::Document, nil]
sig { params(content: String).returns(T.nilable(REXML::Document)) }
def self.parse_xml(content)
parsing_tries = 0
begin
REXML::Document.new(content)
rescue REXML::UndefinedNamespaceException => e
undefined_prefix = e.to_s[/Undefined prefix ([^ ]+) found/i, 1]
raise "Could not identify undefined prefix." if undefined_prefix.blank?
# Only retry parsing once after removing prefix from content
parsing_tries += 1
raise "Could not parse XML after removing undefined prefix." if parsing_tries > 1
# When an XML document contains a prefix without a corresponding
# namespace, it's necessary to remove the prefix from the content
# to be able to successfully parse it using REXML
content = content.gsub(%r{(</?| )#{Regexp.escape(undefined_prefix)}:}, '\1')
retry
end
end
# Retrieves the stripped inner text of an `REXML` element. Returns
# `nil` if the optional child element doesn't exist or the text is
# blank.
# @param element [REXML::Element] an `REXML` element to retrieve text
# from, either directly or from a child element
# @param child_path [String, nil] the XPath of a child element to
# retrieve text from
# @return [String, nil]
sig {
params(
element: REXML::Element,
child_path: T.nilable(String),
).returns(T.nilable(String))
}
def self.element_text(element, child_path = nil)
element = element.get_elements(child_path).first if child_path.present?
return if element.nil?
text = element.text
return if text.blank?
text.strip
end
# Parses XML text and identifies versions using a `strategy` block.
# If a regex is provided, it will be passed as the second argument to
# the `strategy` block (after the parsed XML data).
# @param content [String] the XML text to parse and check
# @param regex [Regexp, nil] a regex used for matching versions in the
# content
# @return [Array]
sig {
params(
content: String,
regex: T.nilable(Regexp),
block: T.nilable(Proc),
).returns(T::Array[String])
}
def self.versions_from_content(content, regex = nil, &block)
return [] if content.blank? || !block_given?
require "rexml"
xml = parse_xml(content)
return [] if xml.blank?
block_return_value = if regex.present?
yield(xml, regex)
elsif block.arity == 2
raise "Two arguments found in `strategy` block but no regex provided."
else
yield(xml)
end
Strategy.handle_block_return(block_return_value)
end
# Checks the XML content at the URL for versions, using the provided
# `strategy` block to extract version information.
#
# @param url [String] the URL of the content to check
# @param regex [Regexp, nil] a regex used for matching versions
# @param provided_content [String, nil] page content to use in place of
# fetching via `Strategy#page_content`
# @param options [Options] options to modify behavior
# @return [Hash]
sig {
override.params(
url: String,
regex: T.nilable(Regexp),
provided_content: T.nilable(String),
options: Options,
block: T.nilable(Proc),
).returns(T::Hash[Symbol, T.anything])
}
def self.find_versions(url:, regex: nil, provided_content: nil, options: Options.new, &block)
raise ArgumentError, "#{Utils.demodulize(name)} requires a `strategy` block" unless block_given?
match_data = { matches: {}, regex:, url: }
return match_data if url.blank?
content = if provided_content.is_a?(String)
match_data[:cached] = true
provided_content
else
match_data.merge!(Strategy.page_content(url, options:))
match_data[:content]
end
return match_data if content.blank?
versions_from_content(content, regex, &block).each do |match_text|
match_data[:matches][match_text] = Version.new(match_text)
end
match_data
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/strategy/sourceforge.rb | Library/Homebrew/livecheck/strategy/sourceforge.rb | # typed: strict
# frozen_string_literal: true
require "livecheck/strategic"
module Homebrew
module Livecheck
module Strategy
# The {Sourceforge} strategy identifies versions of software at
# sourceforge.net by checking a project's RSS feed.
#
# SourceForge URLs take a few different formats:
#
# * `https://downloads.sourceforge.net/project/example/example-1.2.3.tar.gz`
# * `https://svn.code.sf.net/p/example/code/trunk`
# * `:pserver:anonymous:@example.cvs.sourceforge.net:/cvsroot/example`
#
# The RSS feed for a project contains the most recent release archives
# and while this is fine for most projects, this approach has some
# shortcomings. Some project releases involve so many files that the one
# we're interested in isn't present in the feed content. Some projects
# contain additional software and the archive we're interested in is
# pushed out of the feed (especially if it hasn't been updated recently).
#
# Usually we address this situation by adding a `livecheck` block to
# the formula/cask that checks the page for the relevant directory in the
# project instead. In this situation, it's necessary to use
# `strategy :page_match` to prevent the {Sourceforge} strategy from
# being used.
#
# The default regex matches within `url` attributes in the RSS feed
# and identifies versions within directory names or filenames.
#
# @api public
class Sourceforge
extend Strategic
NICE_NAME = "SourceForge"
# The `Regexp` used to determine if the strategy applies to the URL.
URL_MATCH_REGEX = %r{
^https?://(?:[^/]+?\.)*(?:sourceforge|sf)\.net
(?:/projects?/(?<project_name>[^/]+)/
|/p/(?<project_name>[^/]+)/
|(?::/cvsroot)?/(?<project_name>[^/]+))
}ix
# Whether the strategy can be applied to the provided URL.
#
# @param url [String] the URL to match against
# @return [Boolean]
sig { override.params(url: String).returns(T::Boolean) }
def self.match?(url)
URL_MATCH_REGEX.match?(url)
end
# Extracts information from a provided URL and uses it to generate
# various input values used by the strategy to check for new versions.
# Some of these values act as defaults and can be overridden in a
# `livecheck` block.
#
# @param url [String] the URL used to generate values
# @return [Hash]
sig { params(url: String).returns(T::Hash[Symbol, T.untyped]) }
def self.generate_input_values(url)
values = {}
match = url.match(URL_MATCH_REGEX)
return values if match.blank?
# Don't generate a URL if the URL already points to the RSS feed
unless url.match?(%r{/rss(?:/?$|\?)})
values[:url] = "https://sourceforge.net/projects/#{match[:project_name]}/rss"
end
regex_name = Regexp.escape(T.must(match[:project_name])).gsub("\\-", "-")
# It may be possible to improve the generated regex but there's quite
# a bit of variation between projects and it can be challenging to
# create something that works for most URLs.
values[:regex] = %r{url=.*?/#{regex_name}/files/.*?[-_/](\d+(?:[-.]\d+)+)[-_/%.]}i
values
end
# Generates a URL and regex (if one isn't provided) and passes them
# to {PageMatch.find_versions} to identify versions in the content.
#
# @param url [String] the URL of the content to check
# @param regex [Regexp] a regex used for matching versions in content
# @param options [Options] options to modify behavior
# @return [Hash]
sig {
override(allow_incompatible: true).params(
url: String,
regex: T.nilable(Regexp),
options: Options,
block: T.nilable(Proc),
).returns(T::Hash[Symbol, T.anything])
}
def self.find_versions(url:, regex: nil, options: Options.new, &block)
generated = generate_input_values(url)
PageMatch.find_versions(
url: generated[:url] || url,
regex: regex || generated[:regex],
options:,
&block
)
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/strategy/yaml.rb | Library/Homebrew/livecheck/strategy/yaml.rb | # typed: strict
# frozen_string_literal: true
require "livecheck/strategic"
module Homebrew
module Livecheck
module Strategy
# The {Yaml} strategy fetches content at a URL, parses it as YAML and
# provides the parsed data to a `strategy` block. If a regex is present
# in the `livecheck` block, it should be passed as the second argument to
# the `strategy` block.
#
# This is a generic strategy that doesn't contain any logic for finding
# versions, as the structure of YAML data varies. Instead, a `strategy`
# block must be used to extract version information from the YAML data.
#
# This strategy is not applied automatically and it is necessary to use
# `strategy :yaml` in a `livecheck` block (in conjunction with a
# `strategy` block) to use it.
#
# This strategy's {find_versions} method can be used in other strategies
# that work with YAML content, so it should only be necessary to write
# the version-finding logic that works with the parsed YAML data.
#
# @api public
class Yaml
extend Strategic
NICE_NAME = "YAML"
# A priority of zero causes livecheck to skip the strategy. We do this
# for {Yaml} so we can selectively apply it only when a strategy block
# is provided in a `livecheck` block.
PRIORITY = 0
# The `Regexp` used to determine if the strategy applies to the URL.
URL_MATCH_REGEX = %r{^https?://}i
# Whether the strategy can be applied to the provided URL.
# {Yaml} will technically match any HTTP URL but is only usable with
# a `livecheck` block containing a `strategy` block.
#
# @param url [String] the URL to match against
# @return [Boolean]
sig { override.params(url: String).returns(T::Boolean) }
def self.match?(url)
URL_MATCH_REGEX.match?(url)
end
# Parses YAML text and returns the parsed data.
# @param content [String] the YAML text to parse
sig { params(content: String).returns(T.untyped) }
def self.parse_yaml(content)
require "yaml"
begin
YAML.safe_load(content, permitted_classes: [Date, Time])
rescue Psych::SyntaxError
raise "Content could not be parsed as YAML."
end
end
# Parses YAML text and identifies versions using a `strategy` block.
# If a regex is provided, it will be passed as the second argument to
# the `strategy` block (after the parsed YAML data).
# @param content [String] the YAML text to parse and check
# @param regex [Regexp, nil] a regex used for matching versions in the
# content
# @return [Array]
sig {
params(
content: String,
regex: T.nilable(Regexp),
block: T.nilable(Proc),
).returns(T::Array[String])
}
def self.versions_from_content(content, regex = nil, &block)
return [] if content.blank? || !block_given?
yaml = parse_yaml(content)
return [] if yaml.blank?
block_return_value = if regex.present?
yield(yaml, regex)
elsif block.arity == 2
raise "Two arguments found in `strategy` block but no regex provided."
else
yield(yaml)
end
Strategy.handle_block_return(block_return_value)
end
# Checks the YAML content at the URL for versions, using the provided
# `strategy` block to extract version information.
#
# @param url [String] the URL of the content to check
# @param regex [Regexp, nil] a regex used for matching versions
# @param provided_content [String, nil] page content to use in place of
# fetching via `Strategy#page_content`
# @param options [Options] options to modify behavior
# @return [Hash]
sig {
override.params(
url: String,
regex: T.nilable(Regexp),
provided_content: T.nilable(String),
options: Options,
block: T.nilable(Proc),
).returns(T::Hash[Symbol, T.anything])
}
def self.find_versions(url:, regex: nil, provided_content: nil, options: Options.new, &block)
raise ArgumentError, "#{Utils.demodulize(name)} requires a `strategy` block" unless block_given?
match_data = { matches: {}, regex:, url: }
return match_data if url.blank?
content = if provided_content.is_a?(String)
match_data[:cached] = true
provided_content
else
match_data.merge!(Strategy.page_content(url, options:))
match_data[:content]
end
return match_data if content.blank?
versions_from_content(content, regex, &block).each do |match_text|
match_data[:matches][match_text] = Version.new(match_text)
end
match_data
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/strategy/gnu.rb | Library/Homebrew/livecheck/strategy/gnu.rb | # typed: strict
# frozen_string_literal: true
require "livecheck/strategic"
module Homebrew
module Livecheck
module Strategy
# The {Gnu} strategy identifies versions of software at gnu.org by
# checking directory listing pages.
#
# GNU URLs use a variety of formats:
#
# * Archive file URLs:
# * `https://ftp.gnu.org/gnu/example/example-1.2.3.tar.gz`
# * `https://ftp.gnu.org/gnu/example/1.2.3/example-1.2.3.tar.gz`
# * Homepage URLs:
# * `https://www.gnu.org/software/example/`
# * `https://example.gnu.org`
#
# There are other URL formats that this strategy currently doesn't
# support:
#
# * `https://ftp.gnu.org/non-gnu/example/source/feature/1.2.3/example-1.2.3.tar.gz`
# * `https://savannah.nongnu.org/download/example/example-1.2.3.tar.gz`
# * `https://download.savannah.gnu.org/releases/example/example-1.2.3.tar.gz`
# * `https://download.savannah.nongnu.org/releases/example/example-1.2.3.tar.gz`
#
# The default regex identifies versions in archive files found in `href`
# attributes.
#
# @api public
class Gnu
extend Strategic
NICE_NAME = "GNU"
# The `Regexp` used to determine if the strategy applies to the URL.
URL_MATCH_REGEX = T.let(%r{
^https?://
(?:(?:[^/]+?\.)*gnu\.org/(?:gnu|software)/(?<project_name>[^/]+)/
|(?<project_name>[^/]+)\.gnu\.org/?$)
}ix, Regexp)
# Whether the strategy can be applied to the provided URL.
#
# @param url [String] the URL to match against
# @return [Boolean]
sig { override.params(url: String).returns(T::Boolean) }
def self.match?(url)
URL_MATCH_REGEX.match?(url) && url.exclude?("savannah.")
end
# Extracts information from a provided URL and uses it to generate
# various input values used by the strategy to check for new versions.
# Some of these values act as defaults and can be overridden in a
# `livecheck` block.
#
# @param url [String] the URL used to generate values
# @return [Hash]
sig { params(url: String).returns(T::Hash[Symbol, T.untyped]) }
def self.generate_input_values(url)
values = {}
match = url.match(URL_MATCH_REGEX)
return values if match.blank?
# The directory listing page for the project's files
values[:url] = "https://ftpmirror.gnu.org/gnu/#{match[:project_name]}/"
regex_name = Regexp.escape(T.must(match[:project_name])).gsub("\\-", "-")
# The default regex consists of the following parts:
# * `href=.*?`: restricts matching to URLs in `href` attributes
# * The project name
# * `[._-]`: the generic delimiter between project name and version
# * `v?(\d+(?:\.\d+)*)`: the numeric version
# * `(?:\.[a-z]+|/)`: the file extension (a trailing delimiter)
#
# Example regex: `%r{href=.*?example[._-]v?(\d+(?:\.\d+)*)(?:\.[a-z]+|/)}i`
values[:regex] = %r{href=.*?#{regex_name}[._-]v?(\d+(?:\.\d+)*)(?:\.[a-z]+|/)}i
values
end
# Generates a URL and regex (if one isn't provided) and passes them
# to {PageMatch.find_versions} to identify versions in the content.
#
# @param url [String] the URL of the content to check
# @param regex [Regexp] a regex used for matching versions in content
# @param options [Options] options to modify behavior
# @return [Hash]
sig {
override(allow_incompatible: true).params(
url: String,
regex: T.nilable(Regexp),
options: Options,
block: T.nilable(Proc),
).returns(T::Hash[Symbol, T.anything])
}
def self.find_versions(url:, regex: nil, options: Options.new, &block)
generated = generate_input_values(url)
PageMatch.find_versions(
url: generated[:url],
regex: regex || generated[:regex],
options:,
&block
)
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/strategy/json.rb | Library/Homebrew/livecheck/strategy/json.rb | # typed: strict
# frozen_string_literal: true
require "livecheck/strategic"
module Homebrew
module Livecheck
module Strategy
# The {Json} strategy fetches content at a URL, parses it as JSON and
# provides the parsed data to a `strategy` block. If a regex is present
# in the `livecheck` block, it should be passed as the second argument to
# the `strategy` block.
#
# This is a generic strategy that doesn't contain any logic for finding
# versions, as the structure of JSON data varies. Instead, a `strategy`
# block must be used to extract version information from the JSON data.
#
# This strategy is not applied automatically and it is necessary to use
# `strategy :json` in a `livecheck` block (in conjunction with a
# `strategy` block) to use it.
#
# This strategy's {find_versions} method can be used in other strategies
# that work with JSON content, so it should only be necessary to write
# the version-finding logic that works with the parsed JSON data.
#
# @api public
class Json
extend Strategic
NICE_NAME = "JSON"
# A priority of zero causes livecheck to skip the strategy. We do this
# for {Json} so we can selectively apply it only when a strategy block
# is provided in a `livecheck` block.
PRIORITY = 0
# The `Regexp` used to determine if the strategy applies to the URL.
URL_MATCH_REGEX = %r{^https?://}i
# Whether the strategy can be applied to the provided URL.
# {Json} will technically match any HTTP URL but is only usable with
# a `livecheck` block containing a `strategy` block.
#
# @param url [String] the URL to match against
# @return [Boolean]
sig { override.params(url: String).returns(T::Boolean) }
def self.match?(url)
URL_MATCH_REGEX.match?(url)
end
# Parses JSON text and returns the parsed data.
# @param content [String] the JSON text to parse
sig { params(content: String).returns(T.untyped) }
def self.parse_json(content)
require "json"
begin
JSON.parse(content)
rescue JSON::ParserError
raise "Content could not be parsed as JSON."
end
end
# Parses JSON text and identifies versions using a `strategy` block.
# If the block has two parameters, the parsed JSON data will be used as
# the first argument and the regex (if any) will be the second.
# Otherwise, only the parsed JSON data will be passed to the block.
#
# @param content [String] the JSON text to parse and check
# @param regex [Regexp, nil] a regex used for matching versions in the
# content
# @return [Array]
sig {
params(
content: String,
regex: T.nilable(Regexp),
block: T.nilable(Proc),
).returns(T::Array[String])
}
def self.versions_from_content(content, regex = nil, &block)
return [] if content.blank? || !block_given?
json = parse_json(content)
return [] if json.blank?
block_return_value = if block.arity == 2
yield(json, regex)
else
yield(json)
end
Strategy.handle_block_return(block_return_value)
end
# Checks the JSON content at the URL for versions, using the provided
# `strategy` block to extract version information.
#
# @param url [String] the URL of the content to check
# @param regex [Regexp, nil] a regex used for matching versions
# @param provided_content [String, nil] page content to use in place of
# fetching via `Strategy#page_content`
# @param options [Options] options to modify behavior
# @return [Hash]
sig {
override.params(
url: String,
regex: T.nilable(Regexp),
provided_content: T.nilable(String),
options: Options,
block: T.nilable(Proc),
).returns(T::Hash[Symbol, T.anything])
}
def self.find_versions(url:, regex: nil, provided_content: nil, options: Options.new, &block)
raise ArgumentError, "#{Utils.demodulize(name)} requires a `strategy` block" unless block_given?
match_data = { matches: {}, regex:, url: }
return match_data if url.blank?
content = if provided_content.is_a?(String)
match_data[:cached] = true
provided_content
else
match_data.merge!(Strategy.page_content(url, options:))
match_data[:content]
end
return match_data if content.blank?
versions_from_content(content, regex, &block).each do |match_text|
match_data[:matches][match_text] = Version.new(match_text)
end
match_data
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/strategy/apache.rb | Library/Homebrew/livecheck/strategy/apache.rb | # typed: strict
# frozen_string_literal: true
require "livecheck/strategic"
module Homebrew
module Livecheck
module Strategy
# The {Apache} strategy identifies versions of software at apache.org
# by checking directory listing pages.
#
# Most Apache URLs start with `https://www.apache.org/dyn/` and include
# a `filename` or `path` query string parameter where the value is a
# path to a file. The path takes one of the following formats:
#
# * `example/1.2.3/example-1.2.3.tar.gz`
# * `example/example-1.2.3/example-1.2.3.tar.gz`
# * `example/example-1.2.3-bin.tar.gz`
#
# This strategy also handles a few common mirror/backup URLs where the
# path is provided outside of a query string parameter (e.g.
# `https://archive.apache.org/dist/example/1.2.3/example-1.2.3.tar.gz`).
#
# When the path contains a version directory (e.g. `/1.2.3/`,
# `/example-1.2.3/`, etc.), the default regex matches numeric versions
# in directory names. Otherwise, the default regex matches numeric
# versions in filenames.
#
# @api public
class Apache
extend Strategic
# The `Regexp` used to determine if the strategy applies to the URL.
URL_MATCH_REGEX = %r{
^https?://
(?:www\.apache\.org/dyn/.+(?:path|filename)=/?|
archive\.apache\.org/dist/|
dlcdn\.apache\.org/|
downloads\.apache\.org/)
(?<path>.+?)/ # Path to directory of files or version directories
(?<prefix>[^/]*?) # Any text in filename or directory before version
v?\d+(?:\.\d+)+ # The numeric version
(?<suffix>/|[^/]*) # Any text in filename or directory after version
}ix
# Whether the strategy can be applied to the provided URL.
#
# @param url [String] the URL to match against
sig { override.params(url: String).returns(T::Boolean) }
def self.match?(url)
URL_MATCH_REGEX.match?(url)
end
# Extracts information from a provided URL and uses it to generate
# various input values used by the strategy to check for new versions.
# Some of these values act as defaults and can be overridden in a
# `livecheck` block.
#
# @param url [String] the URL used to generate values
sig { params(url: String).returns(T::Hash[Symbol, T.untyped]) }
def self.generate_input_values(url)
values = {}
match = url.match(URL_MATCH_REGEX)
return values if match.blank?
# Example URL: `https://archive.apache.org/dist/example/`
values[:url] = "https://archive.apache.org/dist/#{match[:path]}/"
regex_prefix = Regexp.escape(match[:prefix] || "").gsub("\\-", "-")
# Use `\.t` instead of specific tarball extensions (e.g. .tar.gz)
suffix = match[:suffix]&.sub(Strategy::TARBALL_EXTENSION_REGEX, ".t")
regex_suffix = Regexp.escape(suffix || "").gsub("\\-", "-")
# Example directory regex: `%r{href=["']?v?(\d+(?:\.\d+)+)/}i`
# Example file regexes:
# * `/href=["']?example-v?(\d+(?:\.\d+)+)\.t/i`
# * `/href=["']?example-v?(\d+(?:\.\d+)+)-bin\.zip/i`
values[:regex] = /href=["']?#{regex_prefix}v?(\d+(?:\.\d+)+)#{regex_suffix}/i
values
end
# Generates a URL and regex (if one isn't provided) and passes them
# to {PageMatch.find_versions} to identify versions in the content.
#
# @param url [String] the URL of the content to check
# @param regex [Regexp] a regex used for matching versions in content
# @param options [Options] options to modify behavior
# @return [Hash]
sig {
override(allow_incompatible: true).params(
url: String,
regex: T.nilable(Regexp),
options: Options,
block: T.nilable(Proc),
).returns(T::Hash[Symbol, T.anything])
}
def self.find_versions(url:, regex: nil, options: Options.new, &block)
generated = generate_input_values(url)
PageMatch.find_versions(
url: generated[:url],
regex: regex || generated[:regex],
options:,
&block
)
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/strategy/crate.rb | Library/Homebrew/livecheck/strategy/crate.rb | # typed: strict
# frozen_string_literal: true
require "livecheck/strategic"
module Homebrew
module Livecheck
module Strategy
# The {Crate} strategy identifies versions of a Rust crate by checking
# the information from the `versions` API endpoint.
#
# Crate URLs have the following format:
# `https://static.crates.io/crates/example/example-1.2.3.crate`
#
# The default regex identifies versions like `1.2.3`/`v1.2.3` from the
# version `num` field. This is a common version format but a different
# regex can be provided in a `livecheck` block to override the default
# if a package uses a different format (e.g. `1.2.3d`, `1.2.3-4`, etc.).
#
# @api public
class Crate
extend Strategic
# The default regex used to identify versions when a regex isn't
# provided.
DEFAULT_REGEX = /^v?(\d+(?:\.\d+)+)$/i
# The default `strategy` block used to extract version information when
# a `strategy` block isn't provided.
DEFAULT_BLOCK = T.let(proc do |json, regex|
json["versions"]&.map do |version|
next if version["yanked"]
next unless (match = version["num"]&.match(regex))
match[1]
end
end.freeze, T.proc.params(
arg0: T::Hash[String, T.anything],
arg1: Regexp,
).returns(T.any(String, T::Array[String])))
# The `Regexp` used to determine if the strategy applies to the URL.
URL_MATCH_REGEX = %r{
^https?://static\.crates\.io/crates
/(?<package>[^/]+) # The name of the package
/.+\.crate # The crate filename
}ix
# Whether the strategy can be applied to the provided URL.
#
# @param url [String] the URL to match against
sig { override.params(url: String).returns(T::Boolean) }
def self.match?(url)
URL_MATCH_REGEX.match?(url)
end
# Extracts information from a provided URL and uses it to generate
# various input values used by the strategy to check for new versions.
#
# @param url [String] the URL used to generate values
sig { params(url: String).returns(T::Hash[Symbol, T.untyped]) }
def self.generate_input_values(url)
values = {}
return values unless (match = url.match(URL_MATCH_REGEX))
values[:url] = "https://crates.io/api/v1/crates/#{match[:package]}/versions"
values
end
# Generates a URL and checks the content at the URL for new versions
# using {Json.versions_from_content}.
#
# @param url [String] the URL of the content to check
# @param regex [Regexp, nil] a regex for matching versions in content
# @param provided_content [String, nil] content to check instead of
# fetching
# @param options [Options] options to modify behavior
# @return [Hash]
sig {
override.params(
url: String,
regex: T.nilable(Regexp),
provided_content: T.nilable(String),
options: Options,
block: T.nilable(Proc),
).returns(T::Hash[Symbol, T.anything])
}
def self.find_versions(url:, regex: nil, provided_content: nil, options: Options.new, &block)
match_data = { matches: {}, regex:, url: }
match_data[:cached] = true if provided_content.is_a?(String)
generated = generate_input_values(url)
return match_data if generated.blank?
match_data[:url] = generated[:url]
content = if provided_content
provided_content
else
match_data.merge!(Strategy.page_content(match_data[:url], options:))
match_data[:content]
end
return match_data unless content
Json.versions_from_content(content, regex || DEFAULT_REGEX, &block || DEFAULT_BLOCK).each do |match_text|
match_data[:matches][match_text] = Version.new(match_text)
end
match_data
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/strategy/pypi.rb | Library/Homebrew/livecheck/strategy/pypi.rb | # typed: strict
# frozen_string_literal: true
require "livecheck/strategic"
module Homebrew
module Livecheck
module Strategy
# The {Pypi} strategy identifies the newest version of a PyPI package by
# checking the JSON API endpoint for the project and using the
# `info.version` field from the response.
#
# PyPI URLs have a standard format:
# `https://files.pythonhosted.org/packages/<hex>/<hex>/<long_hex>/example-1.2.3.tar.gz`
#
# Upstream documentation for the PyPI JSON API can be found at:
# https://docs.pypi.org/api/json/#get-a-project
#
# @api public
class Pypi
extend Strategic
NICE_NAME = "PyPI"
# The default `strategy` block used to extract version information when
# a `strategy` block isn't provided.
DEFAULT_BLOCK = T.let(proc do |json, regex|
version = json.dig("info", "version")
next if version.blank?
regex ? version[regex, 1] : version
end.freeze, T.proc.params(
json: T::Hash[String, T.anything],
regex: T.nilable(Regexp),
).returns(T.nilable(String)))
# The `Regexp` used to extract the package name and suffix (e.g. file
# extension) from the URL basename.
FILENAME_REGEX = /
(?<package_name>.+)- # The package name followed by a hyphen
.*? # The version string
(?<suffix>\.tar\.[a-z0-9]+|\.[a-z0-9]+)$ # Filename extension
/ix
# The `Regexp` used to determine if the strategy applies to the URL.
URL_MATCH_REGEX = %r{
^https?://files\.pythonhosted\.org
/packages
(?:/[^/]+)+ # The hexadecimal paths before the filename
/#{FILENAME_REGEX.source.strip} # The filename
}ix
# Whether the strategy can be applied to the provided URL.
#
# @param url [String] the URL to match against
# @return [Boolean]
sig { override.params(url: String).returns(T::Boolean) }
def self.match?(url)
URL_MATCH_REGEX.match?(url)
end
# Extracts the package name from the provided URL and uses it to
# generate the PyPI JSON API URL for the project.
#
# @param url [String] the URL used to generate values
# @return [Hash]
sig { params(url: String).returns(T::Hash[Symbol, T.untyped]) }
def self.generate_input_values(url)
values = {}
match = File.basename(url).match(FILENAME_REGEX)
return values if match.blank?
values[:url] = "https://pypi.org/pypi/#{T.must(match[:package_name]).gsub(/%20|_/, "-")}/json"
values
end
# Generates a PyPI JSON API URL for the project and identifies new
# versions using {Json#find_versions} with a block.
#
# @param url [String] the URL of the content to check
# @param regex [Regexp] a regex used for matching versions in content
# @param provided_content [String, nil] content to check instead of
# fetching
# @param options [Options] options to modify behavior
# @return [Hash]
sig {
override.params(
url: String,
regex: T.nilable(Regexp),
provided_content: T.nilable(String),
options: Options,
block: T.nilable(Proc),
).returns(T::Hash[Symbol, T.anything])
}
def self.find_versions(url:, regex: nil, provided_content: nil, options: Options.new, &block)
match_data = { matches: {}, regex:, url: }
generated = generate_input_values(url)
return match_data if generated.blank?
Json.find_versions(
url: generated[:url],
regex:,
provided_content:,
options:,
&block || DEFAULT_BLOCK
)
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Homebrew/brew | https://github.com/Homebrew/brew/blob/fe0a384e3a04605192726c149570fbe33a8996b0/Library/Homebrew/livecheck/strategy/extract_plist.rb | Library/Homebrew/livecheck/strategy/extract_plist.rb | # typed: strict
# frozen_string_literal: true
require "bundle_version"
require "livecheck/strategic"
require "unversioned_cask_checker"
module Homebrew
module Livecheck
module Strategy
# The {ExtractPlist} strategy downloads the file at a URL and extracts
# versions from contained `.plist` files using {UnversionedCaskChecker}.
#
# In practice, this strategy operates by downloading very large files,
# so it's both slow and data-intensive. As such, the {ExtractPlist}
# strategy should only be used as an absolute last resort.
#
# This strategy is not applied automatically and it's necessary to use
# `strategy :extract_plist` in a `livecheck` block to apply it.
class ExtractPlist
extend Strategic
# A priority of zero causes livecheck to skip the strategy. We do this
# for {ExtractPlist} so we can selectively apply it when appropriate.
PRIORITY = 0
# The `Regexp` used to determine if the strategy applies to the URL.
URL_MATCH_REGEX = %r{^https?://}i
# Whether the strategy can be applied to the provided URL.
#
# @param url [String] the URL to match against
# @return [Boolean]
sig { override.params(url: String).returns(T::Boolean) }
def self.match?(url)
URL_MATCH_REGEX.match?(url)
end
Item = Struct.new(
:bundle_version,
keyword_init: true,
) do
extend Forwardable
# @!attribute [r] version
# @api public
delegate version: :bundle_version
# @!attribute [r] short_version
# @api public
delegate short_version: :bundle_version
end
# Identify versions from `Item`s produced using
# {UnversionedCaskChecker} version information.
#
# @param items [Hash] a hash of `Item`s containing version information
# @param regex [Regexp, nil] a regex for use in a strategy block
# @return [Array]
sig {
params(
items: T::Hash[String, Item],
regex: T.nilable(Regexp),
block: T.nilable(Proc),
).returns(T::Array[String])
}
def self.versions_from_items(items, regex = nil, &block)
if block
block_return_value = regex.present? ? yield(items, regex) : yield(items)
return Strategy.handle_block_return(block_return_value)
end
items.filter_map do |_key, item|
item.bundle_version.nice_version
end.uniq
end
# Uses {UnversionedCaskChecker} on the provided cask to identify
# versions from `plist` files.
#
# @param cask [Cask::Cask] the cask to check for version information
# @param url [String, nil] an alternative URL to check for version
# information
# @param regex [Regexp, nil] a regex for use in a strategy block
# @param options [Options] options to modify behavior
# @return [Hash]
sig {
override(allow_incompatible: true).params(
cask: Cask::Cask,
url: T.nilable(String),
regex: T.nilable(Regexp),
options: Options,
block: T.nilable(Proc),
).returns(T::Hash[Symbol, T.anything])
}
def self.find_versions(cask:, url: nil, regex: nil, options: Options.new, &block)
if regex.present? && !block_given?
raise ArgumentError,
"#{Utils.demodulize(name)} only supports a regex when using a `strategy` block"
end
raise ArgumentError, "The #{Utils.demodulize(name)} strategy only supports casks." unless T.unsafe(cask)
match_data = { matches: {}, regex:, url: }
unversioned_cask_checker = if url.present? && url != cask.url.to_s
# Create a copy of the `cask` that uses the `livecheck` block URL
cask_copy = Cask::CaskLoader.load(cask.sourcefile_path)
cask_copy.allow_reassignment = true
cask_copy.url url
UnversionedCaskChecker.new(cask_copy)
else
UnversionedCaskChecker.new(cask)
end
items = unversioned_cask_checker.all_versions.transform_values { |v| Item.new(bundle_version: v) }
versions_from_items(items, regex, &block).each do |version_text|
match_data[:matches][version_text] = Version.new(version_text)
end
match_data
end
end
end
end
end
| ruby | BSD-2-Clause | fe0a384e3a04605192726c149570fbe33a8996b0 | 2026-01-04T15:37:27.366412Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.