repo stringlengths 5 92 | file_url stringlengths 80 287 | file_path stringlengths 5 197 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:37:27 2026-01-04 17:58:21 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/static_config_analysis.rb | lib/fluent/static_config_analysis.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/config'
require 'fluent/plugin'
module Fluent
# Static Analysis means analysing all plugins and Fluent::Element without invoking Plugin#configure
class StaticConfigAnalysis
module Elem
Input = Struct.new(:plugin, :config)
Output = Struct.new(:plugin, :config)
Filter = Struct.new(:plugin, :config)
Label = Struct.new(:name, :config, :nodes)
Worker = Struct.new(:ids, :config, :nodes)
end
Result = Struct.new(:tree, :outputs, :inputs, :filters, :labels) do
def all_plugins
(outputs + inputs + filters).map(&:plugin)
end
end
# @param workers [Integer] Number of workers
# @return [Fluent::StaticConfigAnalysis::Result]
def self.call(conf, workers: 1)
new(workers).call(conf)
end
def initialize(workers)
@workers = workers
reset
end
def call(config)
reset
tree = [
static_worker_analyse(config),
static_label_analyse(config),
static_filter_and_output_analyse(config),
static_input_analyse(config),
].flatten
Result.new(tree, @outputs, @inputs, @filters, @labels.values)
end
private
def reset
@outputs = []
@inputs = []
@filters = []
@labels = {}
end
def static_worker_analyse(conf)
available_worker_ids = [*0...@workers]
ret = []
supported_directives = %w[source match filter label]
conf.elements(name: 'worker').each do |config|
ids = parse_worker_id(config)
ids.each do |id|
if available_worker_ids.include?(id)
available_worker_ids.delete(id)
else
raise Fluent::ConfigError, "specified worker_id<#{id}> collisions is detected on <worker> directive. Available worker id(s): #{available_worker_ids}"
end
end
config.elements.each do |elem|
unless supported_directives.include?(elem.name)
raise Fluent::ConfigError, "<worker> section cannot have <#{elem.name}> directive"
end
end
nodes = [
static_label_analyse(config),
static_filter_and_output_analyse(config),
static_input_analyse(config),
].flatten
ret << Elem::Worker.new(ids, config, nodes)
end
ret
end
def parse_worker_id(conf)
worker_id_str = conf.arg
if worker_id_str.empty?
raise Fluent::ConfigError, 'Missing worker id on <worker> directive'
end
l, r =
begin
worker_id_str.split('-', 2).map { |v| Integer(v) }
rescue TypeError, ArgumentError
raise Fluent::ConfigError, "worker id should be integer: #{worker_id_str}"
end
if l < 0 || l >= @workers
raise Fluent::ConfigError, "worker id #{l} specified by <worker> directive is not allowed. Available worker id is between 0 and #{@workers-1}"
end
# e.g. specified one worker id like `<worker 0>`
if r.nil?
return [l]
end
if r < 0 || r >= @workers
raise Fluent::ConfigError, "worker id #{r} specified by <worker> directive is not allowed. Available worker id is between 0 and #{@workers-1}"
end
if l > r
raise Fluent::ConfigError, "greater first_worker_id<#{l}> than last_worker_id<#{r}> specified by <worker> directive is not allowed. Available multi worker assign syntax is <smaller_worker_id>-<greater_worker_id>"
end
[l, r]
end
def static_label_analyse(conf)
ret = []
conf.elements(name: 'label').each do |e|
name = e.arg
if name.empty?
raise ConfigError, 'Missing symbol argument on <label> directive'
end
if @labels[name]
raise ConfigError, "Section <label #{name}> appears twice"
end
l = Elem::Label.new(name, e, static_filter_and_output_analyse(e))
ret << l
@labels[name] = l
end
ret
end
def static_filter_and_output_analyse(conf)
ret = []
conf.elements('filter', 'match').each do |e|
type = e['@type']
if type.nil? || type.empty?
raise Fluent::ConfigError, "Missing '@type' parameter on <#{e.name}> directive"
end
if e.name == 'filter'
f = Elem::Filter.new(Fluent::Plugin.new_filter(type), e)
ret << f
@filters << f
else
o = Elem::Output.new(Fluent::Plugin.new_output(type), e)
ret << o
@outputs << o
end
end
ret
end
def static_input_analyse(conf)
ret = []
conf.elements(name: 'source').each do |e|
type = e['@type']
if type.nil? || type.empty?
raise Fluent::ConfigError, "Missing '@type' parameter on <#{e.name}> directive"
end
i = Elem::Input.new(Fluent::Plugin.new_input(type), e)
@inputs << i
ret << i
end
ret
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/capability.rb | lib/fluent/capability.rb | #
# Fluent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "fluent/env"
if Fluent.linux?
begin
require 'capng'
rescue LoadError
end
end
module Fluent
if defined?(CapNG)
class Capability
def initialize(target = nil, pid = nil)
@capng = CapNG.new(target, pid)
end
def usable?
true
end
def apply(select_set)
@capng.apply(select_set)
end
def clear(select_set)
@capng.clear(select_set)
end
def have_capability?(type, capability)
@capng.have_capability?(type, capability)
end
def update(action, type, capability_or_capability_array)
@capng.update(action, type, capability_or_capability_array)
end
def have_capabilities?(select_set)
@capng.have_capabilities?(select_set)
end
end
else
class Capability
def initialize(target = nil, pid = nil)
end
def usable?
false
end
def apply(select_set)
false
end
def clear(select_set)
false
end
def have_capability?(type, capability)
false
end
def update(action, type, capability_or_capability_array)
false
end
def have_capabilities?(select_set)
false
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/source_only_buffer_agent.rb | lib/fluent/source_only_buffer_agent.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/agent'
require 'fluent/system_config'
module Fluent
class SourceOnlyBufferAgent < Agent
# Use INSTANCE_ID to use the same base dir as the other workers.
# This will make recovery easier.
BUFFER_DIR_NAME = Fluent::INSTANCE_ID
def initialize(log:, system_config:)
super(log: log)
@default_buffer_path = File.join(system_config.root_dir || DEFAULT_BACKUP_DIR, 'source-only-buffer', BUFFER_DIR_NAME)
@optional_buffer_config = system_config.source_only_buffer.to_h.transform_keys(&:to_s)
@base_buffer_dir = nil
@actual_buffer_dir = nil
end
def configure(flush: false)
buffer_config = @optional_buffer_config.compact
buffer_config['flush_at_shutdown'] = flush ? 'true' : 'false'
buffer_config['flush_thread_count'] = 0 unless flush
buffer_config['path'] ||= @default_buffer_path
super(
Config::Element.new('SOURCE_ONLY_BUFFER', '', {}, [
Config::Element.new('match', '**', {'@type' => 'buffer', '@label' => '@ROOT'}, [
Config::Element.new('buffer', '', buffer_config, [])
])
])
)
@base_buffer_dir = buffer_config['path']
# It can be "#{@base_buffer_dir}/worker#{fluentd_worker_id}/" when using multiple workers
@actual_buffer_dir = File.dirname(outputs[0].buffer.path)
unless flush
log.info "with-source-only: the emitted data will be stored in the buffer files under" +
" #{@base_buffer_dir}. You can send SIGWINCH to the supervisor process to cancel" +
" with-source-only mode and process data."
end
end
def cleanup
unless (Dir.empty?(@actual_buffer_dir) rescue true)
log.warn "some buffer files remain in #{@base_buffer_dir}." +
" Please consider recovering or saving the buffer files in the directory." +
" To recover them, you can set the buffer path manually to system config and" +
" retry, i.e., restart Fluentd with with-source-only mode and send SIGWINCH again." +
" Config Example:\n#{config_example_to_recover(@base_buffer_dir)}"
return
end
begin
FileUtils.remove_dir(@base_buffer_dir)
rescue Errno::ENOENT
# This worker doesn't need to do anything. Another worker may remove the dir first.
rescue => e
log.warn "failed to remove the buffer directory: #{@base_buffer_dir}", error: e
end
end
def emit_error_event(tag, time, record, error)
error_info = {error: error, location: (error.backtrace ? error.backtrace.first : nil), tag: tag, time: time, record: record}
log.warn "SourceOnlyBufferAgent: dump an error event:", error_info
end
def handle_emits_error(tag, es, error)
error_info = {error: error, location: (error.backtrace ? error.backtrace.first : nil), tag: tag}
log.warn "SourceOnlyBufferAgent: emit transaction failed:", error_info
log.warn_backtrace
end
private
def config_example_to_recover(path)
<<~EOC
<system>
<source_only_buffer>
path #{path}
</source_only_buffer>
</system>
EOC
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/label.rb | lib/fluent/label.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/agent'
module Fluent
class Label < Agent
def initialize(name, log:)
super(log: log)
@context = name
@root_agent = nil
end
attr_accessor :root_agent
def configure(conf)
super
if conf.elements('match').size == 0
raise ConfigError, "Missing <match> sections in <label #{@context}> section"
end
end
def emit_error_event(tag, time, record, e)
@root_agent.emit_error_event(tag, time, record, e)
end
def handle_emits_error(tag, es, e)
@root_agent.handle_emits_error(tag, es, e)
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/set_tag_key_mixin.rb | lib/fluent/compat/set_tag_key_mixin.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/config/error'
require 'fluent/config/types'
require 'fluent/compat/record_filter_mixin'
module Fluent
module Compat
module SetTagKeyMixin
include RecordFilterMixin
attr_accessor :include_tag_key, :tag_key
def configure(conf)
@include_tag_key = false
super
if s = conf['include_tag_key']
include_tag_key = Fluent::Config.bool_value(s)
raise Fluent::ConfigError, "Invalid boolean expression '#{s}' for include_tag_key parameter" if include_tag_key.nil?
@include_tag_key = include_tag_key
end
@tag_key = conf['tag_key'] || 'tag' if @include_tag_key
end
def filter_record(tag, time, record)
super
record[@tag_key] = tag if @include_tag_key
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/formatter_utils.rb | lib/fluent/compat/formatter_utils.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin_helper/compat_parameters'
module Fluent
module Compat
module FormatterUtils
INJECT_PARAMS = Fluent::PluginHelper::CompatParameters::INJECT_PARAMS
FORMATTER_PARAMS = Fluent::PluginHelper::CompatParameters::FORMATTER_PARAMS
module InjectMixin
def format(tag, time, record)
r = owner.inject_values_to_record(tag, time, record)
super(tag, time, r)
end
end
def self.convert_formatter_conf(conf)
return if conf.elements(name: 'inject').first || conf.elements(name: 'format').first
inject_params = {}
INJECT_PARAMS.each do |older, newer|
next unless newer
if conf.has_key?(older)
inject_params[newer] = conf[older]
end
end
if conf.has_key?('include_time_key') && Fluent::Config.bool_value(conf['include_time_key'])
inject_params['time_key'] ||= 'time'
inject_params['time_type'] ||= 'string'
end
if conf.has_key?('time_as_epoch') && Fluent::Config.bool_value(conf['time_as_epoch'])
inject_params['time_type'] = 'unixtime'
end
if conf.has_key?('localtime') || conf.has_key?('utc')
if conf.has_key?('localtime') && conf.has_key?('utc')
raise Fluent::ConfigError, "both of utc and localtime are specified, use only one of them"
elsif conf.has_key?('localtime')
inject_params['localtime'] = Fluent::Config.bool_value(conf['localtime'])
elsif conf.has_key?('utc')
inject_params['localtime'] = !(Fluent::Config.bool_value(conf['utc']))
# Specifying "localtime false" means using UTC in TimeFormatter
# And specifying "utc" is different from specifying "timezone +0000"(it's not always UTC).
# There are difference between "Z" and "+0000" in timezone formatting.
# TODO: add kwargs to TimeFormatter to specify "using localtime", "using UTC" or "using specified timezone" in more explicit way
end
end
if conf.has_key?('include_tag_key') && Fluent::Config.bool_value(conf['include_tag_key'])
inject_params['tag_key'] ||= 'tag'
end
unless inject_params.empty?
conf.elements << Fluent::Config::Element.new('inject', '', inject_params, [])
end
formatter_params = {}
FORMATTER_PARAMS.each do |older, newer|
next unless newer
if conf.has_key?(older)
formatter_params[newer] = conf[older]
end
end
unless formatter_params.empty?
conf.elements << Fluent::Config::Element.new('format', '', formatter_params, [])
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/string_util.rb | lib/fluent/compat/string_util.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Fluent
module Compat
module StringUtil
def match_regexp(regexp, string)
begin
return regexp.match(string)
rescue ArgumentError => e
raise e unless e.message.index("invalid byte sequence in".freeze).zero?
$log.info "invalid byte sequence is replaced in `#{string}`"
string = string.scrub('?')
retry
end
return true
end
module_function :match_regexp
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/filter.rb | lib/fluent/compat/filter.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin'
require 'fluent/plugin/filter'
require 'fluent/compat/call_super_mixin'
require 'fluent/compat/formatter_utils'
require 'fluent/compat/parser_utils'
module Fluent
module Compat
class Filter < Fluent::Plugin::Filter
# TODO: warn when deprecated
helpers_internal :inject
def initialize
super
unless self.class.ancestors.include?(Fluent::Compat::CallSuperMixin)
self.class.prepend Fluent::Compat::CallSuperMixin
end
end
def configure(conf)
ParserUtils.convert_parser_conf(conf)
FormatterUtils.convert_formatter_conf(conf)
super
end
# These definitions are to get instance methods of superclass of 3rd party plugins
# to make it sure to call super
def start
super
if instance_variable_defined?(:@formatter) && @inject_config
unless @formatter.class.ancestors.include?(Fluent::Compat::HandleTagAndTimeMixin)
if @formatter.respond_to?(:owner) && !@formatter.owner
@formatter.owner = self
@formatter.singleton_class.prepend FormatterUtils::InjectMixin
end
end
end
end
def before_shutdown
super
end
def shutdown
super
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/output.rb | lib/fluent/compat/output.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin'
require 'fluent/plugin/buffer'
require 'fluent/plugin/output'
require 'fluent/plugin/bare_output'
require 'fluent/compat/call_super_mixin'
require 'fluent/compat/formatter_utils'
require 'fluent/compat/handle_tag_and_time_mixin'
require 'fluent/compat/parser_utils'
require 'fluent/compat/propagate_default'
require 'fluent/compat/record_filter_mixin'
require 'fluent/compat/output_chain'
require 'fluent/timezone'
require 'fluent/mixin'
require 'fluent/process'
require 'fluent/event'
require 'fluent/plugin_helper/compat_parameters'
require 'time'
module Fluent
module Compat
NULL_OUTPUT_CHAIN = NullOutputChain.instance
BufferQueueLimitError = ::Fluent::Plugin::Buffer::BufferOverflowError
module CompatOutputUtils
def self.buffer_section(conf)
conf.elements(name: 'buffer').first
end
def self.secondary_section(conf)
conf.elements(name: 'secondary').first
end
end
module BufferedEventStreamMixin
include Enumerable
def repeatable?
true
end
def each(&block)
msgpack_each(&block)
end
def to_msgpack_stream
read
end
def key
metadata.tag
end
end
module AddTimeSliceKeyToChunkMixin
def time_slice_format=(format)
@_time_slice_format = format
end
def timekey=(unit)
@_timekey = unit
end
def timezone=(tz)
@_timezone = tz
end
def assume_timekey!
@_formatter = Fluent::TimeFormatter.new(@_time_slice_format, nil, @_timezone)
return if self.metadata.timekey
if self.respond_to?(:path) && self.path =~ /\.(\d+)\.(?:b|q)(?:[a-z0-9]+)/
begin
self.metadata.timekey = Time.parse($1, @_time_slice_format).to_i
rescue ArgumentError
# unknown format / value as timekey
end
end
unless self.metadata.timekey
# file creation time is assumed in the time range of that time slice
# because the first record should be in that range.
time_int = self.created_at.to_i
self.metadata.timekey = time_int - (time_int % @_timekey)
end
end
def key
@_formatter.call(self.metadata.timekey)
end
end
module AddKeyToChunkMixin
def key
self.metadata.variables[:key]
end
end
module ChunkSizeCompatMixin
def size
self.bytesize
end
def size_of_events
@size + @adding_size
end
end
module BufferedChunkMixin
# prepend this module to BufferedOutput (including ObjectBufferedOutput) plugin singleton class
def write(chunk)
chunk.extend(ChunkSizeCompatMixin)
chunk.extend(ChunkMessagePackEventStreamer)
chunk.extend(AddKeyToChunkMixin) if chunk.metadata.variables && chunk.metadata.variables.has_key?(:key)
super
end
end
module TimeSliceChunkMixin
# prepend this module to TimeSlicedOutput plugin singleton class
def write(chunk)
chunk.extend(ChunkSizeCompatMixin)
chunk.extend(ChunkMessagePackEventStreamer)
chunk.extend(AddTimeSliceKeyToChunkMixin)
chunk.time_slice_format = @time_slice_format
chunk.timekey = @_timekey
chunk.timezone = @timezone
chunk.assume_timekey!
super
end
end
class Output < Fluent::Plugin::Output
# TODO: warn when deprecated
helpers_internal :event_emitter, :inject
def support_in_v12_style?(feature)
case feature
when :synchronous then true
when :buffered then false
when :delayed_commit then false
when :custom_format then false
end
end
def process(tag, es)
emit(tag, es, NULL_OUTPUT_CHAIN)
end
def initialize
super
unless self.class.ancestors.include?(Fluent::Compat::CallSuperMixin)
self.class.prepend Fluent::Compat::CallSuperMixin
end
end
def configure(conf)
ParserUtils.convert_parser_conf(conf)
FormatterUtils.convert_formatter_conf(conf)
super
end
def start
super
if instance_variable_defined?(:@formatter) && @inject_config
unless @formatter.class.ancestors.include?(Fluent::Compat::HandleTagAndTimeMixin)
if @formatter.respond_to?(:owner) && !@formatter.owner
@formatter.owner = self
@formatter.singleton_class.prepend FormatterUtils::InjectMixin
end
end
end
end
end
class MultiOutput < Fluent::Plugin::BareOutput
# TODO: warn when deprecated
helpers_internal :event_emitter
def process(tag, es)
emit(tag, es, NULL_OUTPUT_CHAIN)
end
end
class BufferedOutput < Fluent::Plugin::Output
# TODO: warn when deprecated
helpers_internal :event_emitter, :inject
def support_in_v12_style?(feature)
case feature
when :synchronous then false
when :buffered then true
when :delayed_commit then false
when :custom_format then true
end
end
desc 'The buffer type (memory, file)'
config_param :buffer_type, :string, default: 'memory'
desc 'The interval between data flushes.'
config_param :flush_interval, :time, default: 60
config_param :try_flush_interval, :float, default: 1
desc 'If true, the value of `retry_value` is ignored and there is no limit'
config_param :disable_retry_limit, :bool, default: false
desc 'The limit on the number of retries before buffered data is discarded'
config_param :retry_limit, :integer, default: 17
desc 'The initial intervals between write retries.'
config_param :retry_wait, :time, default: 1.0
desc 'The maximum intervals between write retries.'
config_param :max_retry_wait, :time, default: nil
desc 'The number of threads to flush the buffer.'
config_param :num_threads, :integer, default: 1
desc 'The interval between data flushes for queued chunk.'
config_param :queued_chunk_flush_interval, :time, default: 1
desc 'The size of each buffer chunk.'
config_param :buffer_chunk_limit, :size, default: 8*1024*1024
desc 'The length limit of the chunk queue.'
config_param :buffer_queue_limit, :integer, default: 256
desc 'The action when the size of buffer queue exceeds the buffer_queue_limit.'
config_param :buffer_queue_full_action, :enum, list: [:exception, :block, :drop_oldest_chunk], default: :exception
config_param :flush_at_shutdown, :bool, default: true
BUFFER_PARAMS = Fluent::PluginHelper::CompatParameters::BUFFER_PARAMS
def self.propagate_default_params
BUFFER_PARAMS
end
include PropagateDefault
def configure(conf)
bufconf = CompatOutputUtils.buffer_section(conf)
config_style = (bufconf ? :v1 : :v0)
if config_style == :v0
buf_params = {
"flush_mode" => "interval",
"retry_type" => "exponential_backoff",
}
BUFFER_PARAMS.each do |older, newer|
next unless newer
if conf.has_key?(older)
if older == 'buffer_queue_full_action' && conf[older] == 'exception'
buf_params[newer] = 'throw_exception'
else
buf_params[newer] = conf[older]
end
end
end
conf.elements << Fluent::Config::Element.new('buffer', '', buf_params, [])
end
@includes_record_filter = self.class.ancestors.include?(Fluent::Compat::RecordFilterMixin)
methods_of_plugin = self.class.instance_methods(false)
@overrides_emit = methods_of_plugin.include?(:emit)
# RecordFilter mixin uses its own #format_stream method implementation
@overrides_format_stream = methods_of_plugin.include?(:format_stream) || @includes_record_filter
ParserUtils.convert_parser_conf(conf)
FormatterUtils.convert_formatter_conf(conf)
super
if config_style == :v1
unless @buffer_config.chunk_keys.empty?
raise Fluent::ConfigError, "this plugin '#{self.class}' cannot handle arguments for <buffer ...> section"
end
end
self.extend BufferedChunkMixin
if @overrides_emit
self.singleton_class.module_eval do
attr_accessor :last_emit_via_buffer
end
output_plugin = self
m = Module.new do
define_method(:emit) do |key, data, chain|
# receivers of this method are buffer instances
output_plugin.last_emit_via_buffer = [key, data]
end
end
@buffer.extend m
end
end
# original implementation of v0.12 BufferedOutput
def emit(tag, es, chain, key="")
# this method will not be used except for the case that plugin calls super
@emit_count_metrics.inc
data = format_stream(tag, es)
if @buffer.emit(key, data, chain)
submit_flush
end
end
def submit_flush
# nothing todo: blank method to be called from #emit of 3rd party plugins
end
def format_stream(tag, es)
# this method will not be used except for the case that plugin calls super
out = ''
es.each do |time, record|
out << format(tag, time, record)
end
out
end
# #format MUST be implemented in plugin
# #write is also
# This method overrides Fluent::Plugin::Output#handle_stream_simple
# because v0.12 BufferedOutput may overrides #format_stream, but original #handle_stream_simple method doesn't consider about it
def handle_stream_simple(tag, es, enqueue: false)
if @overrides_emit
current_emit_count = @emit_count_metrics.get
size = es.size
key = data = nil
begin
emit(tag, es, NULL_OUTPUT_CHAIN)
key, data = self.last_emit_via_buffer
ensure
@emit_count_metrics.set(current_emit_count)
self.last_emit_via_buffer = nil
end
# on-the-fly key assignment can be done, and it's not configurable if Plugin#emit does it dynamically
meta = @buffer.metadata(variables: (key && !key.empty? ? {key: key} : nil))
write_guard do
@buffer.write({meta => data}, format: ->(_data){ _data }, size: ->(){ size }, enqueue: enqueue)
end
@emit_records_metrics.add(es.size)
@emit_size_metrics.add(es.to_msgpack_stream.bytesize) if @enable_size_metrics
return [meta]
end
if @overrides_format_stream
meta = metadata(nil, nil, nil)
size = es.size
bulk = format_stream(tag, es)
write_guard do
@buffer.write({meta => bulk}, format: ->(_data){ _data }, size: ->(){ size }, enqueue: enqueue)
end
@emit_records_metrics.add(es.size)
@emit_size_metrics.add(es.to_msgpack_stream.bytesize) if @enable_size_metrics
return [meta]
end
meta = metadata(nil, nil, nil)
size = es.size
data = es.map{|time,record| format(tag, time, record) }
write_guard do
@buffer.write({meta => data}, enqueue: enqueue)
end
@emit_records_metrics.add(es.size)
@emit_size_metrics.add(es.to_msgpack_stream.bytesize) if @enable_size_metrics
[meta]
end
def extract_placeholders(str, metadata)
raise "BUG: compat plugin does not support extract_placeholders: use newer plugin API"
end
def initialize
super
unless self.class.ancestors.include?(Fluent::Compat::CallSuperMixin)
self.class.prepend Fluent::Compat::CallSuperMixin
end
end
def start
super
if instance_variable_defined?(:@formatter) && @inject_config
unless @formatter.class.ancestors.include?(Fluent::Compat::HandleTagAndTimeMixin)
if @formatter.respond_to?(:owner) && !@formatter.owner
@formatter.owner = self
@formatter.singleton_class.prepend FormatterUtils::InjectMixin
end
end
end
end
def detach_process(&block)
log.warn "detach_process is not supported in this version. ignored."
block.call
end
def detach_multi_process(&block)
log.warn "detach_process is not supported in this version. ignored."
block.call
end
end
class ObjectBufferedOutput < Fluent::Plugin::Output
# TODO: warn when deprecated
helpers_internal :event_emitter, :inject
# This plugin cannot inherit BufferedOutput because #configure sets chunk_key 'tag'
# to flush chunks per tags, but BufferedOutput#configure doesn't allow setting chunk_key
# in v1 style configuration
def support_in_v12_style?(feature)
case feature
when :synchronous then false
when :buffered then true
when :delayed_commit then false
when :custom_format then false
end
end
desc 'The buffer type (memory, file)'
config_param :buffer_type, :string, default: 'memory'
desc 'The interval between data flushes.'
config_param :flush_interval, :time, default: 60
config_param :try_flush_interval, :float, default: 1
desc 'If true, the value of `retry_value` is ignored and there is no limit'
config_param :disable_retry_limit, :bool, default: false
desc 'The limit on the number of retries before buffered data is discarded'
config_param :retry_limit, :integer, default: 17
desc 'The initial intervals between write retries.'
config_param :retry_wait, :time, default: 1.0
desc 'The maximum intervals between write retries.'
config_param :max_retry_wait, :time, default: nil
desc 'The number of threads to flush the buffer.'
config_param :num_threads, :integer, default: 1
desc 'The interval between data flushes for queued chunk.'
config_param :queued_chunk_flush_interval, :time, default: 1
desc 'The size of each buffer chunk.'
config_param :buffer_chunk_limit, :size, default: 8*1024*1024
desc 'The length limit of the chunk queue.'
config_param :buffer_queue_limit, :integer, default: 256
desc 'The action when the size of buffer queue exceeds the buffer_queue_limit.'
config_param :buffer_queue_full_action, :enum, list: [:exception, :block, :drop_oldest_chunk], default: :exception
config_param :flush_at_shutdown, :bool, default: true
config_set_default :time_as_integer, true
BUFFER_PARAMS = Fluent::PluginHelper::CompatParameters::BUFFER_PARAMS
def self.propagate_default_params
BUFFER_PARAMS
end
include PropagateDefault
def configure(conf)
bufconf = CompatOutputUtils.buffer_section(conf)
config_style = (bufconf ? :v1 : :v0)
if config_style == :v0
buf_params = {
"flush_mode" => "interval",
"retry_type" => "exponential_backoff",
}
BUFFER_PARAMS.each do |older, newer|
next unless newer
if conf.has_key?(older)
if older == 'buffer_queue_full_action' && conf[older] == 'exception'
buf_params[newer] = 'throw_exception'
else
buf_params[newer] = conf[older]
end
end
end
conf.elements << Fluent::Config::Element.new('buffer', 'tag', buf_params, [])
end
ParserUtils.convert_parser_conf(conf)
FormatterUtils.convert_formatter_conf(conf)
super
if config_style == :v1
if @buffer_config.chunk_keys == ['tag']
raise Fluent::ConfigError, "this plugin '#{self.class}' allows <buffer tag> only"
end
end
self.extend BufferedChunkMixin
end
def format_stream(tag, es) # for BufferedOutputTestDriver
if @compress == :gzip
es.to_compressed_msgpack_stream(time_int: @time_as_integer)
else
es.to_msgpack_stream(time_int: @time_as_integer)
end
end
def write(chunk)
write_objects(chunk.metadata.tag, chunk)
end
def extract_placeholders(str, metadata)
raise "BUG: compat plugin does not support extract_placeholders: use newer plugin API"
end
def initialize
super
unless self.class.ancestors.include?(Fluent::Compat::CallSuperMixin)
self.class.prepend Fluent::Compat::CallSuperMixin
end
end
def start
super
if instance_variable_defined?(:@formatter) && @inject_config
unless @formatter.class.ancestors.include?(Fluent::Compat::HandleTagAndTimeMixin)
if @formatter.respond_to?(:owner) && !@formatter.owner
@formatter.owner = self
@formatter.singleton_class.prepend FormatterUtils::InjectMixin
end
end
end
end
def detach_process(&block)
log.warn "detach_process is not supported in this version. ignored."
block.call
end
def detach_multi_process(&block)
log.warn "detach_process is not supported in this version. ignored."
block.call
end
end
class TimeSlicedOutput < Fluent::Plugin::Output
# TODO: warn when deprecated
helpers_internal :event_emitter, :inject
def support_in_v12_style?(feature)
case feature
when :synchronous then false
when :buffered then true
when :delayed_commit then false
when :custom_format then true
end
end
desc 'The buffer type (memory, file)'
config_param :buffer_type, :string, default: 'file'
desc 'The interval between data flushes.'
config_param :flush_interval, :time, default: nil
config_param :try_flush_interval, :float, default: 1
desc 'If true, the value of `retry_value` is ignored and there is no limit'
config_param :disable_retry_limit, :bool, default: false
desc 'The limit on the number of retries before buffered data is discarded'
config_param :retry_limit, :integer, default: 17
desc 'The initial intervals between write retries.'
config_param :retry_wait, :time, default: 1.0
desc 'The maximum intervals between write retries.'
config_param :max_retry_wait, :time, default: nil
desc 'The number of threads to flush the buffer.'
config_param :num_threads, :integer, default: 1
desc 'The interval between data flushes for queued chunk.'
config_param :queued_chunk_flush_interval, :time, default: 1
desc 'The time format used as part of the file name.'
config_param :time_slice_format, :string, default: '%Y%m%d'
desc 'The amount of time Fluentd will wait for old logs to arrive.'
config_param :time_slice_wait, :time, default: 10*60
desc 'Parse the time value in the specified timezone'
config_param :timezone, :string, default: nil
desc 'The size of each buffer chunk.'
config_param :buffer_chunk_limit, :size, default: 256*1024*1024
desc 'The length limit of the chunk queue.'
config_param :buffer_queue_limit, :integer, default: 256
desc 'The action when the size of buffer queue exceeds the buffer_queue_limit.'
config_param :buffer_queue_full_action, :enum, list: [:exception, :block, :drop_oldest_chunk], default: :exception
config_param :flush_at_shutdown, :bool, default: false
attr_accessor :localtime
config_section :buffer do
config_set_default :@type, 'file'
end
BUFFER_PARAMS = Fluent::PluginHelper::CompatParameters::BUFFER_PARAMS.merge(Fluent::PluginHelper::CompatParameters::BUFFER_TIME_SLICED_PARAMS)
def initialize
super
@localtime = true
unless self.class.ancestors.include?(Fluent::Compat::CallSuperMixin)
self.class.prepend Fluent::Compat::CallSuperMixin
end
end
def self.propagate_default_params
BUFFER_PARAMS
end
include PropagateDefault
def configure(conf)
bufconf = CompatOutputUtils.buffer_section(conf)
config_style = (bufconf ? :v1 : :v0)
if config_style == :v0
buf_params = {
"flush_mode" => (conf['flush_interval'] ? "interval" : "lazy"),
"retry_type" => "exponential_backoff",
}
BUFFER_PARAMS.each do |older, newer|
next unless newer
if conf.has_key?(older)
if older == 'buffer_queue_full_action' && conf[older] == 'exception'
buf_params[newer] = 'throw_exception'
else
buf_params[newer] = conf[older]
end
end
end
if conf['timezone']
Fluent::Timezone.validate!(conf['timezone'])
elsif conf['utc']
# v0.12 assumes UTC without any configuration
# 'localtime=false && no timezone key' means UTC
conf['localtime'] = "false"
conf.delete('utc')
elsif conf['localtime']
conf['timezone'] = Time.now.strftime('%z')
conf['localtime'] = "true"
else
# v0.12 assumes UTC without any configuration
# 'localtime=false && no timezone key' means UTC
conf['localtime'] = "false"
end
@_timekey = case conf['time_slice_format']
when /\%S/ then 1
when /\%M/ then 60
when /\%H/ then 3600
when /\%d/ then 86400
when nil then 86400 # default value of TimeSlicedOutput.time_slice_format is '%Y%m%d'
else
raise Fluent::ConfigError, "time_slice_format only with %Y or %m is too long"
end
buf_params["timekey"] = @_timekey
conf.elements << Fluent::Config::Element.new('buffer', 'time', buf_params, [])
end
ParserUtils.convert_parser_conf(conf)
FormatterUtils.convert_formatter_conf(conf)
super
if config_style == :v1
if @buffer_config.chunk_keys == ['tag']
raise Fluent::ConfigError, "this plugin '#{self.class}' allows <buffer tag> only"
end
end
self.extend TimeSliceChunkMixin
end
def start
super
if instance_variable_defined?(:@formatter) && @inject_config
unless @formatter.class.ancestors.include?(Fluent::Compat::HandleTagAndTimeMixin)
if @formatter.respond_to?(:owner) && !@formatter.owner
@formatter.owner = self
@formatter.singleton_class.prepend FormatterUtils::InjectMixin
end
end
end
end
def detach_process(&block)
log.warn "detach_process is not supported in this version. ignored."
block.call
end
def detach_multi_process(&block)
log.warn "detach_process is not supported in this version. ignored."
block.call
end
# Original TimeSlicedOutput#emit doesn't call #format_stream
# #format MUST be implemented in plugin
# #write is also
def extract_placeholders(str, metadata)
raise "BUG: compat plugin does not support extract_placeholders: use newer plugin API"
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/detach_process_mixin.rb | lib/fluent/compat/detach_process_mixin.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Fluent
module Compat
module DetachProcessMixin
def detach_process
log.warn "#{__method__} is not supported in this version. ignored."
yield
end
end
module DetachMultiProcessMixin
def detach_multi_process
log.warn "#{__method__} is not supported in this version. ignored."
yield
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/type_converter.rb | lib/fluent/compat/type_converter.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Fluent
module Compat
module TypeConverter
Converters = {
'string' => lambda { |v| v.to_s },
'integer' => lambda { |v| v.to_i },
'float' => lambda { |v| v.to_f },
'bool' => lambda { |v|
case v.downcase
when 'true', 'yes', '1'
true
else
false
end
},
'time' => lambda { |v, time_parser|
time_parser.parse(v)
},
'array' => lambda { |v, delimiter|
v.to_s.split(delimiter)
}
}
def self.included(klass)
klass.instance_eval {
config_param :types, :string, default: nil
config_param :types_delimiter, :string, default: ','
config_param :types_label_delimiter, :string, default: ':'
}
end
def configure(conf)
super
@type_converters = nil
@type_converters = parse_types_parameter unless @types.nil?
end
private
def convert_type(name, value)
converter = @type_converters[name]
converter.nil? ? value : converter.call(value)
end
def parse_types_parameter
converters = {}
@types.split(@types_delimiter).each { |pattern_name|
name, type, format = pattern_name.split(@types_label_delimiter, 3)
raise ConfigError, "Type is needed" if type.nil?
case type
when 'time'
require 'fluent/parser'
t_parser = Fluent::TextParser::TimeParser.new(format)
converters[name] = lambda { |v|
Converters[type].call(v, t_parser)
}
when 'array'
delimiter = format || ','
converters[name] = lambda { |v|
Converters[type].call(v, delimiter)
}
else
converters[name] = Converters[type]
end
}
converters
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/parser.rb | lib/fluent/compat/parser.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin'
require 'fluent/plugin/parser'
require 'fluent/mixin'
require 'fluent/config'
require 'fluent/compat/type_converter'
require 'fluent/plugin/parser_regexp'
require 'fluent/plugin/parser_json'
require 'fluent/plugin/parser_tsv'
require 'fluent/plugin/parser_ltsv'
require 'fluent/plugin/parser_csv'
require 'fluent/plugin/parser_none'
require 'fluent/plugin/parser_apache2'
require 'fluent/plugin/parser_syslog'
require 'fluent/plugin/parser_multiline'
module Fluent
module Compat
class Parser < Fluent::Plugin::Parser
# TODO: warn when deprecated
end
class TextParser
# Keep backward compatibility for existing plugins
ParserError = Fluent::Plugin::Parser::ParserError
# TODO: will be removed at v1
TypeConverter = Fluent::TypeConverter
def initialize
# TODO: warn when deprecated
@parser = nil
@estimate_current_event = nil
end
attr_reader :parser
# SET false BEFORE CONFIGURE, to return nil when time not parsed
# 'configure()' may raise errors for unexpected configurations
attr_accessor :estimate_current_event
def configure(conf, required=true)
format = conf['format']
@parser = TextParser.lookup(format)
if @parser.respond_to?(:configure)
@parser.configure(conf)
end
if !@estimate_current_event.nil? && @parser.respond_to?(:'estimate_current_event=')
# external code sets parser.estimate_current_event = false
@parser.estimate_current_event = @estimate_current_event
end
return true
end
def parse(text, &block)
if block
@parser.parse(text, &block)
else
@parser.parse(text) { |time, record|
return time, record
}
end
end
def self.register_template(type, template, time_format=nil)
# TODO: warn when deprecated to use Plugin.register_parser directly
if template.is_a?(Class) || template.respond_to?(:call)
Fluent::Plugin.register_parser(type, template)
elsif template.is_a?(Regexp)
Fluent::Plugin.register_parser(type, Proc.new { RegexpParser.new(template, {'time_format' => time_format}) })
else
raise ArgumentError, "Template for parser must be a Class, callable object or regular expression object"
end
end
def self.lookup(format)
# TODO: warn when deprecated to use Plugin.new_parser or RegexpParser.new directly
if format.nil?
raise ConfigError, "'format' parameter is required"
end
if format[0] == ?/ && format[format.length-1] == ?/
# regexp
begin
regexp = Regexp.new(format[1..-2])
if regexp.named_captures.empty?
raise "No named captures"
end
rescue
raise ConfigError, "Invalid regexp '#{format[1..-2]}': #{$!}"
end
RegexpParser.new(regexp)
else
# built-in template
begin
Fluent::Plugin.new_parser(format)
rescue ConfigError # keep same error message
raise ConfigError, "Unknown format template '#{format}'"
end
end
end
module TypeConverterCompatParameters
def convert_type_converter_parameters!(conf)
if conf["types"]
delimiter = conf["types_delimiter"] || ','
label_delimiter = conf["types_label_delimiter"] || ':'
types = {}
conf['types'].split(delimiter).each do |pair|
key, value = pair.split(label_delimiter, 2)
if value.start_with?("time#{label_delimiter}")
value = value.split(label_delimiter, 2).join(':')
elsif value.start_with?("array#{label_delimiter}")
value = value.split(label_delimiter, 2).join(':')
end
types[key] = value
end
conf["types"] = JSON.dump(types)
end
end
end
class TimeParser < Fluent::TimeParser
# TODO: warn when deprecated
end
class RegexpParser < Fluent::Plugin::RegexpParser
include TypeConverterCompatParameters
# TODO: warn when deprecated
def initialize(regexp, conf = {})
super()
@stored_regexp = regexp
@manually_configured = false
unless conf.empty?
conf_init = if conf.is_a?(Fluent::Config::Element)
conf
else
Fluent::Config::Element.new('parse', '', conf, [])
end
self.configure(conf_init)
@manually_configured = true
end
end
def configure(conf)
return if @manually_configured # not to run twice
conf['expression'] ||= @stored_regexp.source
conf['ignorecase'] ||= @stored_regexp.options & Regexp::IGNORECASE != 0
conf['multiline'] ||= @stored_regexp.options & Regexp::MULTILINE != 0
convert_type_converter_parameters!(conf)
super
end
def patterns
{'format' => @regexp, 'time_format' => @time_format}
end
end
class ValuesParser < Parser
include Fluent::Compat::TypeConverter
config_param :keys, :array, default: []
config_param :time_key, :string, default: nil
config_param :null_value_pattern, :string, default: nil
config_param :null_empty_string, :bool, default: false
def configure(conf)
super
if @time_key && !@keys.include?(@time_key) && @estimate_current_event
raise Fluent::ConfigError, "time_key (#{@time_key.inspect}) is not included in keys (#{@keys.inspect})"
end
if @time_format && !@time_key
raise Fluent::ConfigError, "time_format parameter is ignored because time_key parameter is not set. at #{conf.inspect}"
end
@time_parser = time_parser_create
if @null_value_pattern
@null_value_pattern = Regexp.new(@null_value_pattern)
end
@mutex = Mutex.new
end
def values_map(values)
record = Hash[keys.zip(values.map { |value| convert_value_to_nil(value) })]
if @time_key
value = @keep_time_key ? record[@time_key] : record.delete(@time_key)
time = if value.nil?
if @estimate_current_event
Fluent::EventTime.now
else
nil
end
else
@mutex.synchronize { @time_parser.parse(value) }
end
elsif @estimate_current_event
time = Fluent::EventTime.now
else
time = nil
end
convert_field_type!(record) if @type_converters
return time, record
end
private
def convert_field_type!(record)
@type_converters.each_key { |key|
if value = record[key]
record[key] = convert_type(key, value)
end
}
end
def convert_value_to_nil(value)
if value && @null_empty_string
value = (value == '') ? nil : value
end
if value && @null_value_pattern
value = ::Fluent::StringUtil.match_regexp(@null_value_pattern, value) ? nil : value
end
value
end
end
class JSONParser < Fluent::Plugin::JSONParser
include TypeConverterCompatParameters
# TODO: warn when deprecated
def configure(conf)
convert_type_converter_parameters!(conf)
super
end
end
class TSVParser < Fluent::Plugin::TSVParser
include TypeConverterCompatParameters
# TODO: warn when deprecated
def configure(conf)
convert_type_converter_parameters!(conf)
super
end
end
class LabeledTSVParser < Fluent::Plugin::LabeledTSVParser
include TypeConverterCompatParameters
# TODO: warn when deprecated
def configure(conf)
convert_type_converter_parameters!(conf)
super
end
end
class CSVParser < Fluent::Plugin::CSVParser
include TypeConverterCompatParameters
# TODO: warn when deprecated
def configure(conf)
convert_type_converter_parameters!(conf)
super
end
end
class NoneParser < Fluent::Plugin::NoneParser
# TODO: warn when deprecated
end
class ApacheParser < Fluent::Plugin::Apache2Parser
# TODO: warn when deprecated
end
class SyslogParser < Fluent::Plugin::SyslogParser
# TODO: warn when deprecated
end
class MultilineParser < Fluent::Plugin::MultilineParser
# TODO: warn when deprecated
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/parser_utils.rb | lib/fluent/compat/parser_utils.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin_helper/compat_parameters'
module Fluent
module Compat
module ParserUtils
PARSER_PARAMS = Fluent::PluginHelper::CompatParameters::PARSER_PARAMS
def self.convert_parser_conf(conf)
return if conf.elements(name: 'parse').first
parser_params = {}
PARSER_PARAMS.each do |older, newer|
next unless newer
if conf.has_key?(older)
parser_params[newer] = conf[older]
end
end
unless parser_params.empty?
conf.elements << Fluent::Config::Element.new('parse', '', parser_params, [])
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/record_filter_mixin.rb | lib/fluent/compat/record_filter_mixin.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Fluent
module Compat
module RecordFilterMixin
def filter_record(tag, time, record)
end
def format_stream(tag, es)
out = ''
es.each {|time,record|
tag_temp = tag.dup
filter_record(tag_temp, time, record)
out << format(tag_temp, time, record)
}
out
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/file_util.rb | lib/fluent/compat/file_util.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Fluent
module Compat
module FileUtil
# Check file is writable if file exists
# Check directory is writable if file does not exist
#
# @param [String] path File path
# @return [Boolean] file is writable or not
def writable?(path)
return false if File.directory?(path)
return File.writable?(path) if File.exist?(path)
dirname = File.dirname(path)
return false if !File.directory?(dirname)
File.writable?(dirname)
end
module_function :writable?
# Check file is writable in conjunction with mkdir_p(dirname(path))
#
# @param [String] path File path
# @return [Boolean] file writable or not
def writable_p?(path)
return false if File.directory?(path)
return File.writable?(path) if File.exist?(path)
dirname = File.dirname(path)
until File.exist?(dirname)
dirname = File.dirname(dirname)
end
return false if !File.directory?(dirname)
File.writable?(dirname)
end
module_function :writable_p?
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/handle_tag_and_time_mixin.rb | lib/fluent/compat/handle_tag_and_time_mixin.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/time' # TimeFormatter
module Fluent
module Compat
module HandleTagAndTimeMixin
def self.included(klass)
klass.instance_eval {
config_param :include_time_key, :bool, default: false
config_param :time_key, :string, default: 'time'
config_param :time_format, :string, default: nil
config_param :time_as_epoch, :bool, default: false
config_param :include_tag_key, :bool, default: false
config_param :tag_key, :string, default: 'tag'
config_param :localtime, :bool, default: true
config_param :timezone, :string, default: nil
}
end
def configure(conf)
super
if conf['utc']
@localtime = false
end
@timef = Fluent::TimeFormatter.new(@time_format, @localtime, @timezone)
if @time_as_epoch && !@include_time_key
log.warn "time_as_epoch will be ignored because include_time_key is false"
end
end
def filter_record(tag, time, record)
if @include_tag_key
record[@tag_key] = tag
end
if @include_time_key
if @time_as_epoch
record[@time_key] = time.to_i
else
record[@time_key] = @timef.format(time)
end
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/input.rb | lib/fluent/compat/input.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin'
require 'fluent/plugin/input'
require 'fluent/process'
require 'fluent/compat/call_super_mixin'
module Fluent
module Compat
class Input < Fluent::Plugin::Input
# TODO: warn when deprecated
def initialize
super
unless self.class.ancestors.include?(Fluent::Compat::CallSuperMixin)
self.class.prepend Fluent::Compat::CallSuperMixin
end
end
# These definitions are to get instance methods of superclass of 3rd party plugins
# to make it sure to call super
def start
super
end
def before_shutdown
super
end
def shutdown
super
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/set_time_key_mixin.rb | lib/fluent/compat/set_time_key_mixin.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/config/error'
require 'fluent/compat/record_filter_mixin'
require 'fluent/time'
require 'fluent/timezone'
module Fluent
module Compat
module SetTimeKeyMixin
include RecordFilterMixin
attr_accessor :include_time_key, :time_key, :localtime, :timezone
def configure(conf)
@include_time_key = false
@localtime = false
@timezone = nil
super
if s = conf['include_time_key']
include_time_key = Fluent::Config.bool_value(s)
raise Fluent::ConfigError, "Invalid boolean expression '#{s}' for include_time_key parameter" if include_time_key.nil?
@include_time_key = include_time_key
end
if @include_time_key
@time_key = conf['time_key'] || 'time'
@time_format = conf['time_format']
if conf['localtime']
@localtime = true
elsif conf['utc']
@localtime = false
end
if conf['timezone']
@timezone = conf['timezone']
Fluent::Timezone.validate!(@timezone)
end
@timef = Fluent::TimeFormatter.new(@time_format, @localtime, @timezone)
end
end
def filter_record(tag, time, record)
super
record[@time_key] = @timef.format(time) if @include_time_key
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/propagate_default.rb | lib/fluent/compat/propagate_default.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/configurable'
module Fluent
module Compat
module PropagateDefault
# This mixin is to prepend to 3rd party plugins of v0.12 APIs.
# 3rd party plugins may override default values of some parameters, like `buffer_type`.
# But default values of such parameters will NOT used, but defaults of <buffer>@type</buffer>
# will be used in fact. It should bring troubles.
# This mixin defines Class method .config_param and .config_set_default (which should be used by extend)
# to propagate changes of default values to subsections.
def self.included(mod)
mod.extend(ClassMethods)
end
module ClassMethods
CONFIGURABLE_CLASS_METHODS = Fluent::Configurable::ClassMethods
def config_param(name, type = nil, **kwargs, &block)
CONFIGURABLE_CLASS_METHODS.instance_method(:config_param).bind_call(self, name, type, **kwargs, &block)
pparams = propagate_default_params
if kwargs.has_key?(:default) && pparams[name.to_s]
newer = pparams[name.to_s].to_sym
overridden_default_value = kwargs[:default]
CONFIGURABLE_CLASS_METHODS.instance_method(:config_section).bind_call(self, :buffer) do
config_set_default newer, overridden_default_value
end
end
end
def config_set_default(name, defval)
CONFIGURABLE_CLASS_METHODS.instance_method(:config_set_default).bind_call(self, name, defval)
pparams = propagate_default_params
if pparams[name.to_s]
newer = pparams[name.to_s].to_sym
CONFIGURABLE_CLASS_METHODS.instance_method(:config_section).bind_call(self, :buffer) do
self.config_set_default newer, defval
end
end
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/call_super_mixin.rb | lib/fluent/compat/call_super_mixin.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Fluent
module Compat
module CallSuperMixin
# This mixin is to prepend to 3rd party plugins of v0.12 APIs.
# In past, there were not strong rule to call super in #start, #before_shutdown and #shutdown.
# But v0.14 API requires to call super in these methods to setup/teardown plugin helpers and others.
# This mixin prepends method calls to call super forcedly if checker returns false (it shows Fluent::Plugin::Base#methods wasn't called)
def self.prepended(klass)
@@_super_start ||= {}
@@_super_before_shutdown ||= {}
@@_super_shutdown ||= {}
# ancestors[0]: this module
# ancestors[1]: prepended class (plugin itself)
method_search = ->(ancestors, method){
closest = ancestors[2, ancestors.size - 2].index{|m| m.method_defined?(method) }
ancestors[2 + closest].instance_method(method)
}
@@_super_start[klass] = method_search.call(klass.ancestors, :start) # this returns Fluent::Compat::*#start (or helpers on it)
@@_super_before_shutdown[klass] = method_search.call(klass.ancestors, :before_shutdown)
@@_super_shutdown[klass] = method_search.call(klass.ancestors, :shutdown)
end
def start
super
unless self.started?
@@_super_start[self.class].bind_call(self)
# #super will reset logdev (especially in test), so this warn should be after calling it
log.warn "super was not called in #start: called it forcedly", plugin: self.class
end
end
def before_shutdown
super
unless self.before_shutdown?
log.warn "super was not called in #before_shutdown: calling it forcedly", plugin: self.class
@@_super_before_shutdown[self.class].bind_call(self)
end
end
def stop
klass = self.class
@@_super_start.delete(klass)
@@_super_before_shutdown.delete(klass)
@@_super_shutdown.delete(klass)
super
end
def shutdown
super
unless self.shutdown?
log.warn "super was not called in #shutdown: calling it forcedly", plugin: self.class
@@_super_shutdown[self.class].bind_call(self)
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/socket_util.rb | lib/fluent/compat/socket_util.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'ipaddr'
require 'cool.io'
require 'fluent/plugin'
require 'fluent/input'
module Fluent
module Compat
module SocketUtil
def create_udp_socket(host)
if IPAddr.new(IPSocket.getaddress(host)).ipv4?
UDPSocket.new
else
UDPSocket.new(Socket::AF_INET6)
end
end
module_function :create_udp_socket
class UdpHandler < Coolio::IO
def initialize(io, log, body_size_limit, callback)
super(io)
@io = io
@log = log
@body_size_limit = body_size_limit
@callback = callback
end
def on_readable
msg, addr = @io.recvfrom_nonblock(@body_size_limit)
msg.chomp!
@callback.call(msg, addr)
rescue => e
@log.error "unexpected error", error: e
end
end
class TcpHandler < Coolio::Socket
PEERADDR_FAILED = ["?", "?", "name resolution failed", "?"]
def initialize(io, log, delimiter, callback)
super(io)
@timeout = 0
if io.is_a?(TCPSocket)
@addr = (io.peeraddr rescue PEERADDR_FAILED)
opt = [1, @timeout.to_i].pack('I!I!') # { int l_onoff; int l_linger; }
io.setsockopt(Socket::SOL_SOCKET, Socket::SO_LINGER, opt)
end
@delimiter = delimiter
@callback = callback
@log = log
@log.trace { "accepted fluent socket object_id=#{self.object_id}" }
@buffer = "".force_encoding('ASCII-8BIT')
end
def on_connect
end
def on_read(data)
@buffer << data
pos = 0
while i = @buffer.index(@delimiter, pos)
msg = @buffer[pos...i]
@callback.call(msg, @addr)
pos = i + @delimiter.length
end
@buffer.slice!(0, pos) if pos > 0
rescue => e
@log.error "unexpected error", error: e
close
end
def on_close
@log.trace { "closed fluent socket object_id=#{self.object_id}" }
end
end
class BaseInput < Fluent::Input
def initialize
super
require 'fluent/parser'
end
desc 'Tag of output events.'
config_param :tag, :string
desc 'The format of the payload.'
config_param :format, :string
desc 'The port to listen to.'
config_param :port, :integer, default: 5150
desc 'The bind address to listen to.'
config_param :bind, :string, default: '0.0.0.0'
desc "The field name of the client's hostname."
config_param :source_host_key, :string, default: nil
config_param :blocking_timeout, :time, default: 0.5
def configure(conf)
super
@parser = Plugin.new_parser(@format)
@parser.configure(conf)
end
def start
super
@loop = Coolio::Loop.new
@handler = listen(method(:on_message))
@loop.attach(@handler)
@thread = Thread.new(&method(:run))
end
def shutdown
@loop.watchers.each { |w| w.detach }
@loop.stop if @loop.instance_variable_get(:@running)
@handler.close
@thread.join
super
end
def run
@loop.run(@blocking_timeout)
rescue => e
log.error "unexpected error", error: e
log.error_backtrace
end
private
def on_message(msg, addr)
@parser.parse(msg) { |time, record|
unless time && record
log.warn { "pattern not matched: #{msg.inspect}" }
return
end
record[@source_host_key] = addr[3] if @source_host_key
router.emit(@tag, time, record)
}
rescue => e
log.error msg.dump, error: e, host: addr[3]
log.error_backtrace
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/exec_util.rb | lib/fluent/compat/exec_util.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'msgpack'
require 'json'
require 'yajl'
require 'fluent/engine'
require 'fluent/plugin'
require 'fluent/parser'
module Fluent
module Compat
module ExecUtil
SUPPORTED_FORMAT = {
'tsv' => :tsv,
'json' => :json,
'msgpack' => :msgpack,
}
class Parser
def initialize(on_message)
@on_message = on_message
end
end
class TextParserWrapperParser < Parser
def initialize(conf, on_message)
@parser = Plugin.new_parser(conf['format'])
@parser.configure(conf)
super(on_message)
end
def call(io)
io.each_line(&method(:each_line))
end
def each_line(line)
line.chomp!
@parser.parse(line) { |time, record|
@on_message.call(record, time)
}
end
end
class TSVParser < Parser
def initialize(keys, on_message)
@keys = keys
super(on_message)
end
def call(io)
io.each_line(&method(:each_line))
end
def each_line(line)
line.chomp!
vals = line.split("\t")
record = Hash[@keys.zip(vals)]
@on_message.call(record)
end
end
class JSONParser < Parser
def call(io)
y = Yajl::Parser.new
y.on_parse_complete = @on_message
y.parse(io)
end
end
class MessagePackParser < Parser
def call(io)
@u = Fluent::MessagePackFactory.msgpack_unpacker(io)
begin
@u.each(&@on_message)
rescue EOFError
end
end
end
class Formatter
end
class TSVFormatter < Formatter
def initialize(in_keys)
@in_keys = in_keys
super()
end
def call(record, out)
last = @in_keys.length-1
for i in 0..last
key = @in_keys[i]
out << record[key].to_s
out << "\t" if i != last
end
out << "\n"
end
end
class JSONFormatter < Formatter
def call(record, out)
out << JSON.generate(record) << "\n"
end
end
class MessagePackFormatter < Formatter
def call(record, out)
record.to_msgpack(out)
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/formatter.rb | lib/fluent/compat/formatter.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin'
require 'fluent/plugin/formatter'
require 'fluent/compat/handle_tag_and_time_mixin'
require 'fluent/compat/structured_format_mixin'
require 'fluent/plugin/formatter_out_file'
require 'fluent/plugin/formatter_stdout'
require 'fluent/plugin/formatter_json'
require 'fluent/plugin/formatter_hash'
require 'fluent/plugin/formatter_msgpack'
require 'fluent/plugin/formatter_ltsv'
require 'fluent/plugin/formatter_csv'
require 'fluent/plugin/formatter_single_value'
module Fluent
module Compat
class Formatter < Fluent::Plugin::Formatter
# TODO: warn when deprecated
end
module TextFormatter
def self.register_template(type, template)
# TODO: warn when deprecated to use Plugin.register_formatter directly
if template.is_a?(Class)
Fluent::Plugin.register_formatter(type, template)
elsif template.respond_to?(:call) && template.arity == 3 # Proc.new { |tag, time, record| }
Fluent::Plugin.register_formatter(type, Proc.new { ProcWrappedFormatter.new(template) })
elsif template.respond_to?(:call)
Fluent::Plugin.register_formatter(type, template)
else
raise ArgumentError, "Template for formatter must be a Class or callable object"
end
end
def self.lookup(type)
# TODO: warn when deprecated to use Plugin.new_formatter(type, parent: plugin)
Fluent::Plugin.new_formatter(type)
end
# Keep backward-compatibility
def self.create(conf)
# TODO: warn when deprecated
format = conf['format']
if format.nil?
raise ConfigError, "'format' parameter is required"
end
formatter = lookup(format)
if formatter.respond_to?(:configure)
formatter.configure(conf)
end
formatter
end
HandleTagAndTimeMixin = Fluent::Compat::HandleTagAndTimeMixin
StructuredFormatMixin = Fluent::Compat::StructuredFormatMixin
class ProcWrappedFormatter < Fluent::Plugin::ProcWrappedFormatter
# TODO: warn when deprecated
end
class OutFileFormatter < Fluent::Plugin::OutFileFormatter
# TODO: warn when deprecated
end
class StdoutFormatter < Fluent::Plugin::StdoutFormatter
# TODO: warn when deprecated
end
class JSONFormatter < Fluent::Plugin::JSONFormatter
# TODO: warn when deprecated
end
class HashFormatter < Fluent::Plugin::HashFormatter
# TODO: warn when deprecated
end
class MessagePackFormatter < Fluent::Plugin::MessagePackFormatter
# TODO: warn when deprecated
end
class LabeledTSVFormatter < Fluent::Plugin::LabeledTSVFormatter
# TODO: warn when deprecated
end
class CsvFormatter < Fluent::Plugin::CsvFormatter
# TODO: warn when deprecated
# Do not cache because it is hard to consider the thread key correctly.
# (We can try, but it would be low priority.)
def csv_cacheable?
false
end
end
class SingleValueFormatter < Fluent::Plugin::SingleValueFormatter
# TODO: warn when deprecated
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/output_chain.rb | lib/fluent/compat/output_chain.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'singleton'
module Fluent
module Compat
# TODO: remove when old plugin API are removed
class NullOutputChain
include Singleton
def next
end
end
class OutputChain
def initialize(array, tag, es, chain=NullOutputChain.instance)
@array = array
@tag = tag
@es = es
@offset = 0
@chain = chain
end
def next
if @array.length <= @offset
return @chain.next
end
@offset += 1
@array[@offset-1].emit_events(@tag, @es)
self.next
end
end
class CopyOutputChain < OutputChain
def next
if @array.length <= @offset
return @chain.next
end
@offset += 1
es = @array.length > @offset ? @es.dup : @es
@array[@offset-1].emit_events(@tag, es)
self.next
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/handle_tag_name_mixin.rb | lib/fluent/compat/handle_tag_name_mixin.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/compat/record_filter_mixin'
module Fluent
module Compat
module HandleTagNameMixin
include RecordFilterMixin
attr_accessor :remove_tag_prefix, :remove_tag_suffix, :add_tag_prefix, :add_tag_suffix
def configure(conf)
super
@remove_tag_prefix = if conf.has_key?('remove_tag_prefix')
Regexp.new('^' + Regexp.escape(conf['remove_tag_prefix']))
else
nil
end
@remove_tag_suffix = if conf.has_key?('remove_tag_suffix')
Regexp.new(Regexp.escape(conf['remove_tag_suffix']) + '$')
else
nil
end
@add_tag_prefix = conf['add_tag_prefix']
@add_tag_suffix = conf['add_tag_suffix']
end
def filter_record(tag, time, record)
tag.sub!(@remove_tag_prefix, '') if @remove_tag_prefix
tag.sub!(@remove_tag_suffix, '') if @remove_tag_suffix
tag.insert(0, @add_tag_prefix) if @add_tag_prefix
tag << @add_tag_suffix if @add_tag_suffix
super(tag, time, record)
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/compat/structured_format_mixin.rb | lib/fluent/compat/structured_format_mixin.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Fluent
module Compat
module StructuredFormatMixin
def format(tag, time, record)
filter_record(tag, time, record)
format_record(record)
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/storage.rb | lib/fluent/plugin/storage.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/base'
require 'fluent/plugin/owned_by_mixin'
module Fluent
module Plugin
class Storage < Base
include OwnedByMixin
DEFAULT_TYPE = 'local'
configured_in :storage
config_param :persistent, :bool, default: false # load/save with all operations
config_param :autosave, :bool, default: true
config_param :autosave_interval, :time, default: 10
config_param :save_at_shutdown, :bool, default: true
def self.validate_key(key)
raise ArgumentError, "key must be a string (or symbol for to_s)" unless key.is_a?(String) || key.is_a?(Symbol)
key.to_s
end
attr_accessor :log
def persistent_always?
false
end
def synchronized?
false
end
def implementation
self
end
def load
# load storage data from any data source, or initialize storage internally
end
def save
# save internal data store into data source (to be loaded)
end
def get(key)
raise NotImplementedError, "Implement this method in child class"
end
def fetch(key, defval)
raise NotImplementedError, "Implement this method in child class"
end
def put(key, value)
# return value
raise NotImplementedError, "Implement this method in child class"
end
def delete(key)
# return deleted value
raise NotImplementedError, "Implement this method in child class"
end
def update(key, &block) # transactional get-and-update
raise NotImplementedError, "Implement this method in child class"
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/in_unix.rb | lib/fluent/plugin/in_unix.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/env'
require 'fluent/plugin/input'
require 'fluent/msgpack_factory'
require 'cool.io'
require 'yajl'
require 'fileutils'
require 'socket'
module Fluent::Plugin
# TODO: This plugin will be 3rd party plugin
class UnixInput < Input
Fluent::Plugin.register_input('unix', self)
helpers :event_loop
def initialize
super
@lsock = nil
end
desc 'The path to your Unix Domain Socket.'
config_param :path, :string, default: Fluent::DEFAULT_SOCKET_PATH
desc 'The backlog of Unix Domain Socket.'
config_param :backlog, :integer, default: nil
desc "New tag instead of incoming tag"
config_param :tag, :string, default: nil
def configure(conf)
super
end
def start
super
@lsock = listen
event_loop_attach(@lsock)
end
def shutdown
if @lsock
event_loop_detach(@lsock)
@lsock.close
end
super
end
def listen
if File.exist?(@path)
log.warn "Found existing '#{@path}'. Remove this file for in_unix plugin"
File.unlink(@path)
end
FileUtils.mkdir_p(File.dirname(@path))
log.info "listening fluent socket on #{@path}"
s = Coolio::UNIXServer.new(@path, Handler, log, method(:on_message))
s.listen(@backlog) unless @backlog.nil?
s
end
# message Entry {
# 1: long time
# 2: object record
# }
#
# message Forward {
# 1: string tag
# 2: list<Entry> entries
# }
#
# message PackedForward {
# 1: string tag
# 2: raw entries # msgpack stream of Entry
# }
#
# message Message {
# 1: string tag
# 2: long? time
# 3: object record
# }
def on_message(msg)
unless msg.is_a?(Array)
log.warn "incoming data is broken:", msg: msg
return
end
tag = @tag || (msg[0].to_s)
entries = msg[1]
case entries
when String
# PackedForward
es = Fluent::MessagePackEventStream.new(entries)
router.emit_stream(tag, es)
when Array
# Forward
es = Fluent::MultiEventStream.new
entries.each {|e|
record = e[1]
next if record.nil?
time = convert_time(e[0])
es.add(time, record)
}
router.emit_stream(tag, es)
else
# Message
record = msg[2]
return if record.nil?
time = convert_time(msg[1])
router.emit(tag, time, record)
end
end
def convert_time(time)
case time
when nil, 0
Fluent::EventTime.now
when Fluent::EventTime
time
else
Fluent::EventTime.from_time(Time.at(time))
end
end
class Handler < Coolio::Socket
def initialize(io, log, on_message)
super(io)
@on_message = on_message
@log = log
end
def on_connect
end
def on_read(data)
first = data[0]
if first == '{'.freeze || first == '['.freeze
m = method(:on_read_json)
@parser = Yajl::Parser.new
@parser.on_parse_complete = @on_message
else
m = method(:on_read_msgpack)
@parser = Fluent::MessagePackFactory.msgpack_unpacker
end
singleton_class.module_eval do
define_method(:on_read, m)
end
m.call(data)
end
def on_read_json(data)
@parser << data
rescue => e
@log.error "unexpected error in json payload", error: e.to_s
@log.error_backtrace
close
end
def on_read_msgpack(data)
@parser.feed_each(data, &@on_message)
rescue => e
@log.error "unexpected error in msgpack payload", error: e.to_s
@log.error_backtrace
close
end
def on_close
@log.trace { "closed fluent socket object_id=#{self.object_id}" }
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/storage_local.rb | lib/fluent/plugin/storage_local.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/env'
require 'fluent/plugin'
require 'fluent/plugin/storage'
require 'fileutils'
require 'json'
module Fluent
module Plugin
class LocalStorage < Storage
Fluent::Plugin.register_storage('local', self)
config_param :path, :string, default: nil
config_param :mode, default: Fluent::DEFAULT_FILE_PERMISSION do |v|
v.to_i(8)
end
config_param :dir_mode, default: Fluent::DEFAULT_DIR_PERMISSION do |v|
v.to_i(8)
end
config_param :pretty_print, :bool, default: false
attr_reader :store # for test
def initialize
super
@store = {}
@multi_workers_available = nil
end
def configure(conf)
super
@on_memory = false
if @path
if File.exist?(@path) && File.file?(@path)
@multi_workers_available = false
elsif File.exist?(@path) && File.directory?(@path)
@path = File.join(@path, "worker#{fluentd_worker_id}", "storage.json")
@multi_workers_available = true
else # path file/directory doesn't exist
if @path.end_with?('.json') # file
@multi_workers_available = false
else # directory
@path = File.join(@path, "worker#{fluentd_worker_id}", "storage.json")
@multi_workers_available = true
end
end
elsif root_dir = owner.plugin_root_dir
basename = (conf.arg && !conf.arg.empty?) ? "storage.#{conf.arg}.json" : "storage.json"
@path = File.join(root_dir, basename)
@multi_workers_available = true
else
if @persistent
raise Fluent::ConfigError, "Plugin @id or path for <storage> required when 'persistent' is true"
else
if @autosave
log.warn "both of Plugin @id and path for <storage> are not specified. Using on-memory store."
else
log.info "both of Plugin @id and path for <storage> are not specified. Using on-memory store."
end
@on_memory = true
@multi_workers_available = true
end
end
if !@on_memory
dir = File.dirname(@path)
FileUtils.mkdir_p(dir, mode: @dir_mode) unless Dir.exist?(dir)
if File.exist?(@path)
raise Fluent::ConfigError, "Plugin storage path '#{@path}' is not readable/writable" unless File.readable?(@path) && File.writable?(@path)
begin
data = File.open(@path, 'r:utf-8') { |io| io.read }
if data.empty?
log.warn "detect empty plugin storage file during startup. Ignored: #{@path}"
return
end
data = JSON.parse(data)
raise Fluent::ConfigError, "Invalid contents (not object) in plugin storage file: '#{@path}'" unless data.is_a?(Hash)
rescue => e
log.error "failed to read data from plugin storage file", path: @path, error: e
raise Fluent::ConfigError, "Unexpected error: failed to read data from plugin storage file: '#{@path}'"
end
else
raise Fluent::ConfigError, "Directory is not writable for plugin storage file '#{@path}'" unless File.stat(dir).writable?
end
end
end
def multi_workers_ready?
unless @multi_workers_available
log.error "local plugin storage with multi workers should be configured to use directory 'path', or system root_dir and plugin id"
end
@multi_workers_available
end
def load
return if @on_memory
return unless File.exist?(@path)
begin
json_string = File.open(@path, 'r:utf-8'){ |io| io.read }
json = JSON.parse(json_string)
unless json.is_a?(Hash)
log.error "broken content for plugin storage (Hash required: ignored)", type: json.class
log.debug "broken content", content: json_string
return
end
@store = json
rescue => e
log.error "failed to load data for plugin storage from file", path: @path, error: e
end
end
def save
return if @on_memory
tmp_path = @path + '.tmp.' + Fluent::UniqueId.hex(Fluent::UniqueId.generate)
begin
if @pretty_print
json_string = JSON.pretty_generate(@store)
else
json_string = JSON.generate(@store)
end
File.open(tmp_path, 'w:utf-8', @mode) { |io| io.write json_string; io.fsync }
File.rename(tmp_path, @path)
rescue => e
log.error "failed to save data for plugin storage to file", path: @path, tmp: tmp_path, error: e
end
end
def get(key)
@store[key.to_s]
end
def fetch(key, defval)
@store.fetch(key.to_s, defval)
end
def put(key, value)
@store[key.to_s] = value
end
def delete(key)
@store.delete(key.to_s)
end
def update(key, &block)
@store[key.to_s] = block.call(@store[key.to_s])
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/in_dummy.rb | lib/fluent/plugin/in_dummy.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Remove this file in fluentd v2
require_relative 'in_sample'
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/in_tcp.rb | lib/fluent/plugin/in_tcp.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/input'
module Fluent::Plugin
class TcpInput < Input
Fluent::Plugin.register_input('tcp', self)
helpers :server, :parser, :extract, :compat_parameters
desc 'Tag of output events.'
config_param :tag, :string
desc 'The port to listen to.'
config_param :port, :integer, default: 5170
desc 'The bind address to listen to.'
config_param :bind, :string, default: '0.0.0.0'
desc "The field name of the client's hostname."
config_param :source_host_key, :string, default: nil, deprecated: "use source_hostname_key instead."
desc "The field name of the client's hostname."
config_param :source_hostname_key, :string, default: nil
desc "The field name of the client's address."
config_param :source_address_key, :string, default: nil
# Setting default to nil for backward compatibility
desc "The max bytes of message."
config_param :message_length_limit, :size, default: nil
config_param :blocking_timeout, :time, default: 0.5
desc 'The payload is read up to this character.'
config_param :delimiter, :string, default: "\n" # syslog family add "\n" to each message and this seems only way to split messages in tcp stream
desc 'Check the remote connection is still available by sending a keepalive packet if this value is true.'
config_param :send_keepalive_packet, :bool, default: false
# in_forward like host/network restriction
config_section :security, required: false, multi: false do
config_section :client, param_name: :clients, required: true, multi: true do
desc 'The IP address or host name of the client'
config_param :host, :string, default: nil
desc 'Network address specification'
config_param :network, :string, default: nil
end
end
def configure(conf)
compat_parameters_convert(conf, :parser)
parser_config = conf.elements('parse').first
unless parser_config
raise Fluent::ConfigError, "<parse> section is required."
end
super
@_event_loop_blocking_timeout = @blocking_timeout
@source_hostname_key ||= @source_host_key if @source_host_key
@nodes = nil
if @security
@nodes = []
@security.clients.each do |client|
if client.host && client.network
raise Fluent::ConfigError, "both of 'host' and 'network' are specified for client"
end
if !client.host && !client.network
raise Fluent::ConfigError, "Either of 'host' and 'network' must be specified for client"
end
source = nil
if client.host
begin
source = IPSocket.getaddress(client.host)
rescue SocketError
raise Fluent::ConfigError, "host '#{client.host}' cannot be resolved"
end
end
source_addr = begin
IPAddr.new(source || client.network)
rescue ArgumentError
raise Fluent::ConfigError, "network '#{client.network}' address format is invalid"
end
@nodes.push(source_addr)
end
end
@parser = parser_create(conf: parser_config)
end
def multi_workers_ready?
true
end
def zero_downtime_restart_ready?
true
end
def start
super
log.info "listening tcp socket", bind: @bind, port: @port
del_size = @delimiter.length
discard_till_next_delimiter = false
if @_extract_enabled && @_extract_tag_key
server_create(:in_tcp_server_single_emit, @port, bind: @bind, resolve_name: !!@source_hostname_key, send_keepalive_packet: @send_keepalive_packet) do |data, conn|
unless check_client(conn)
conn.close
next
end
conn.buffer << data
buf = conn.buffer
pos = 0
while i = buf.index(@delimiter, pos)
msg = buf[pos...i]
pos = i + del_size
if discard_till_next_delimiter
discard_till_next_delimiter = false
next
end
if !@message_length_limit.nil? && @message_length_limit < msg.bytesize
log.info "The received data is larger than 'message_length_limit', dropped:", limit: @message_length_limit, size: msg.bytesize, head: msg[...32]
next
end
@parser.parse(msg) do |time, record|
unless time && record
log.on_warn { log.warn "pattern not matched", message: msg }
next
end
tag = extract_tag_from_record(record)
tag ||= @tag
time ||= extract_time_from_record(record) || Fluent::EventTime.now
record[@source_address_key] = conn.remote_addr if @source_address_key
record[@source_hostname_key] = conn.remote_host if @source_hostname_key
router.emit(tag, time, record)
end
end
buf.slice!(0, pos) if pos > 0
# If the buffer size exceeds the limit here, it means that the next message will definitely exceed the limit.
# So we should clear the buffer here. Otherwise, it will keep storing useless data until the next delimiter comes.
if !@message_length_limit.nil? && @message_length_limit < buf.bytesize
log.info "The buffer size exceeds 'message_length_limit', cleared:", limit: @message_length_limit, size: buf.bytesize, head: buf[...32]
buf.clear
# We should discard the subsequent data until the next delimiter comes.
discard_till_next_delimiter = true
next
end
end
else
server_create(:in_tcp_server_batch_emit, @port, bind: @bind, resolve_name: !!@source_hostname_key, send_keepalive_packet: @send_keepalive_packet) do |data, conn|
unless check_client(conn)
conn.close
next
end
conn.buffer << data
buf = conn.buffer
pos = 0
es = Fluent::MultiEventStream.new
while i = buf.index(@delimiter, pos)
msg = buf[pos...i]
pos = i + del_size
if discard_till_next_delimiter
discard_till_next_delimiter = false
next
end
if !@message_length_limit.nil? && @message_length_limit < msg.bytesize
log.info "The received data is larger than 'message_length_limit', dropped:", limit: @message_length_limit, size: msg.bytesize, head: msg[...32]
next
end
@parser.parse(msg) do |time, record|
unless time && record
log.on_warn { log.warn "pattern not matched", message: msg }
next
end
time ||= extract_time_from_record(record) || Fluent::EventTime.now
record[@source_address_key] = conn.remote_addr if @source_address_key
record[@source_hostname_key] = conn.remote_host if @source_hostname_key
es.add(time, record)
end
end
router.emit_stream(@tag, es)
buf.slice!(0, pos) if pos > 0
# If the buffer size exceeds the limit here, it means that the next message will definitely exceed the limit.
# So we should clear the buffer here. Otherwise, it will keep storing useless data until the next delimiter comes.
if !@message_length_limit.nil? && @message_length_limit < buf.bytesize
log.info "The buffer size exceeds 'message_length_limit', cleared:", limit: @message_length_limit, size: buf.bytesize, head: buf[...32]
buf.clear
# We should discard the subsequent data until the next delimiter comes.
discard_till_next_delimiter = true
next
end
end
end
end
private
def check_client(conn)
if @nodes
remote_addr = conn.remote_addr
node = @nodes.find { |n| n.include?(remote_addr) rescue false }
unless node
log.warn "anonymous client '#{remote_addr}' denied"
return false
end
end
true
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/out_roundrobin.rb | lib/fluent/plugin/out_roundrobin.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/multi_output'
require 'fluent/config/error'
module Fluent::Plugin
class RoundRobinOutput < MultiOutput
Fluent::Plugin.register_output('roundrobin', self)
config_section :store do
config_param :weight, :integer, default: 1
end
def initialize
super
@weights = []
end
attr_reader :weights
def configure(conf)
super
@stores.each do |store|
@weights << store.weight
end
@rr = -1 # starts from @output[0]
@rand_seed = Random.new.seed
end
def multi_workers_ready?
true
end
def start
super
rebuild_weight_array
end
def process(tag, es)
next_output.emit_events(tag, es)
end
private
def next_output
@rr = 0 if (@rr += 1) >= @weight_array.size
@weight_array[@rr]
end
def rebuild_weight_array
gcd = @weights.inject(0) {|r,w| r.gcd(w) }
weight_array = []
@outputs.zip(@weights).each {|output,weight|
(weight / gcd).times {
weight_array << output
}
}
# don't randomize order if all weight is 1 (=default)
if @weights.any? {|w| w > 1 }
r = Random.new(@rand_seed)
weight_array.sort_by! { r.rand }
end
@weight_array = weight_array
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/filter_parser.rb | lib/fluent/plugin/filter_parser.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/time'
require 'fluent/config/error'
require 'fluent/plugin/filter'
require 'fluent/plugin_helper/parser'
require 'fluent/plugin_helper/compat_parameters'
module Fluent::Plugin
class ParserFilter < Filter
Fluent::Plugin.register_filter('parser', self)
helpers :parser, :record_accessor, :compat_parameters
desc 'Specify field name in the record to parse.'
config_param :key_name, :string
desc 'Keep original key-value pair in parsed result.'
config_param :reserve_data, :bool, default: false
desc 'Keep original event time in parsed result.'
config_param :reserve_time, :bool, default: false
desc 'Remove "key_name" field from the record when parsing is succeeded'
config_param :remove_key_name_field, :bool, default: false
desc 'Store parsed values with specified key name prefix.'
config_param :inject_key_prefix, :string, default: nil
desc 'If true, invalid string is replaced with safe characters and re-parse it.'
config_param :replace_invalid_sequence, :bool, default: false
desc 'Store parsed values as a hash value in a field.'
config_param :hash_value_field, :string, default: nil
desc 'Emit invalid record to @ERROR label'
config_param :emit_invalid_record_to_error, :bool, default: true
attr_reader :parser
def configure(conf)
compat_parameters_convert(conf, :parser)
super
@accessor = record_accessor_create(@key_name)
@parser = parser_create
end
REPLACE_CHAR = '?'.freeze
def filter_stream(tag, es)
new_es = Fluent::MultiEventStream.new
es.each do |time, record|
begin
raw_value = @accessor.call(record)
if raw_value.nil?
new_es.add(time, handle_parsed(tag, record, time, {})) if @reserve_data
raise ArgumentError, "#{@key_name} does not exist"
else
filter_one_record(tag, time, record, raw_value) do |result_time, result_record|
new_es.add(result_time, result_record)
end
end
rescue => e
router.emit_error_event(tag, time, record, e) if @emit_invalid_record_to_error
end
end
new_es
end
private
def filter_one_record(tag, time, record, raw_value)
begin
@parser.parse(raw_value) do |t, values|
if values
t = if @reserve_time
time
else
t.nil? ? time : t
end
@accessor.delete(record) if @remove_key_name_field
else
router.emit_error_event(tag, time, record, Fluent::Plugin::Parser::ParserError.new("pattern not matched with data '#{raw_value}'")) if @emit_invalid_record_to_error
next unless @reserve_data
t = time
values = {}
end
yield(t, handle_parsed(tag, record, t, values))
end
rescue Fluent::Plugin::Parser::ParserError => e
raise e
rescue ArgumentError => e
raise unless @replace_invalid_sequence
raise unless e.message.index("invalid byte sequence in") == 0
raw_value = raw_value.scrub(REPLACE_CHAR)
retry
rescue => e
raise Fluent::Plugin::Parser::ParserError, "parse failed #{e.message}"
end
end
def handle_parsed(tag, record, t, values)
if values && @inject_key_prefix
values = Hash[values.map { |k, v| [@inject_key_prefix + k, v] }]
end
r = @hash_value_field ? {@hash_value_field => values} : values
if @reserve_data
r = r ? record.merge(r) : record
end
r
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/in_sample.rb | lib/fluent/plugin/in_sample.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'json'
require 'fluent/plugin/input'
require 'fluent/config/error'
module Fluent::Plugin
class SampleInput < Input
Fluent::Plugin.register_input('sample', self)
Fluent::Plugin.register_input('dummy', self)
helpers :thread, :storage
BIN_NUM = 10
DEFAULT_STORAGE_TYPE = 'local'
desc "The value is the tag assigned to the generated events."
config_param :tag, :string
desc "The number of events in event stream of each emits."
config_param :size, :integer, default: 1
desc "It configures how many events to generate per second."
config_param :rate, :integer, default: 1
desc "If specified, each generated event has an auto-incremented key field."
config_param :auto_increment_key, :string, default: nil
desc "The boolean to suspend-and-resume incremental value after restart"
config_param :suspend, :bool, default: false,deprecated: 'This parameters is ignored'
desc "Reuse the sample data to reduce the load when sending large amounts of data. You can enable it if filter does not do destructive change"
config_param :reuse_record, :bool, default: false
desc "The sample data to be generated. An array of JSON hashes or a single JSON hash."
config_param :sample, alias: :dummy, default: [{"message" => "sample"}] do |val|
begin
parsed = JSON.parse(val)
rescue JSON::ParserError => ex
# Fluent::ConfigParseError, "got incomplete JSON" will be raised
# at literal_parser.rb with --use-v1-config, but I had to
# take care at here for the case of --use-v0-config.
raise Fluent::ConfigError, "#{ex.class}: #{ex.message}"
end
sample = parsed.is_a?(Array) ? parsed : [parsed]
sample.each_with_index do |e, i|
raise Fluent::ConfigError, "#{i}th element of sample, #{e}, is not a hash" unless e.is_a?(Hash)
end
sample
end
def initialize
super
@storage = nil
end
def configure(conf)
super
@sample_index = 0
config = conf.elements.find{|e| e.name == 'storage' }
@storage = storage_create(usage: 'suspend', conf: config, default_type: DEFAULT_STORAGE_TYPE)
end
def multi_workers_ready?
true
end
def start
super
@storage.put(:increment_value, 0) unless @storage.get(:increment_value)
# keep 'dummy' to avoid breaking changes for existing environment. Change it in fluentd v2
@storage.put(:dummy_index, 0) unless @storage.get(:dummy_index)
if @auto_increment_key && !@storage.get(:auto_increment_value)
@storage.put(:auto_increment_value, -1)
end
thread_create(:sample_input, &method(:run))
end
def run
batch_num = (@rate / BIN_NUM).to_i
residual_num = (@rate % BIN_NUM)
while thread_current_running?
current_time = Time.now.to_i
BIN_NUM.times do
break unless (thread_current_running? && Time.now.to_i <= current_time)
wait(0.1) { emit(batch_num) }
end
emit(residual_num) if thread_current_running?
# wait for next second
while thread_current_running? && Time.now.to_i <= current_time
sleep 0.01
end
end
end
def emit(num)
begin
if @size > 1
num.times do
router.emit_array(@tag, Array.new(@size) { [Fluent::EventTime.now, generate] })
end
else
num.times { router.emit(@tag, Fluent::EventTime.now, generate) }
end
rescue => _
# ignore all errors not to stop emits by emit errors
end
end
def next_sample
d = @reuse_record ? @sample[@sample_index] : @sample[@sample_index].dup
@sample_index += 1
return d if d
@sample_index = 0
next_sample
end
def generate
d = next_sample
if @auto_increment_key
d = d.dup if @reuse_record
d[@auto_increment_key] = @storage.update(:auto_increment_value){|v| v + 1 }
end
d
end
def wait(time)
start_time = Time.now
yield
sleep_time = time - (Time.now - start_time)
sleep sleep_time if sleep_time > 0
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/formatter_csv.rb | lib/fluent/plugin/formatter_csv.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin_helper'
require 'fluent/plugin/formatter'
require 'csv'
module Fluent
module Plugin
class CsvFormatter < Formatter
Plugin.register_formatter('csv', self)
include PluginHelper::Mixin
helpers :record_accessor
config_param :delimiter, default: ',' do |val|
['\t', 'TAB'].include?(val) ? "\t".freeze : val.freeze
end
config_param :force_quotes, :bool, default: true
# "array" looks good for type of :fields, but this implementation removes tailing comma
# TODO: Is it needed to support tailing comma?
config_param :fields, :array, value_type: :string
config_param :add_newline, :bool, default: true
def csv_cacheable?
!!owner
end
def csv_thread_key
csv_cacheable? ? "#{owner.plugin_id}_csv_formatter_#{@usage}_csv" : nil
end
def csv_for_thread
if csv_cacheable?
Thread.current[csv_thread_key] ||= CSV.new("".force_encoding(Encoding::ASCII_8BIT), **@generate_opts)
else
CSV.new("".force_encoding(Encoding::ASCII_8BIT), **@generate_opts)
end
end
def configure(conf)
super
@fields = fields.select{|f| !f.empty? }
raise ConfigError, "empty value is specified in fields parameter" if @fields.empty?
if @fields.any? { |f| record_accessor_nested?(f) }
@accessors = @fields.map { |f| record_accessor_create(f) }
mformat = method(:format_with_nested_fields)
singleton_class.module_eval do
define_method(:format, mformat)
end
end
@generate_opts = {col_sep: @delimiter, force_quotes: @force_quotes, headers: @fields,
row_sep: @add_newline ? :auto : "".force_encoding(Encoding::ASCII_8BIT)}
end
def format(tag, time, record)
csv = csv_for_thread
line = (csv << record).string.dup
# Need manual cleanup because CSV writer doesn't provide such method.
csv.rewind
csv.truncate(0)
line
end
def format_with_nested_fields(tag, time, record)
csv = csv_for_thread
values = @accessors.map { |a| a.call(record) }
line = (csv << values).string.dup
# Need manual cleanup because CSV writer doesn't provide such method.
csv.rewind
csv.truncate(0)
line
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/bare_output.rb | lib/fluent/plugin/bare_output.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/base'
require 'fluent/log'
require 'fluent/plugin_id'
require 'fluent/plugin_helper'
module Fluent
module Plugin
class BareOutput < Base
include PluginHelper::Mixin # for metrics
# DO NOT USE THIS plugin for normal output plugin. Use Output instead.
# This output plugin base class is only for meta-output plugins
# which cannot be implemented on MultiOutput.
# E.g,: forest, config-expander
helpers_internal :metrics
include PluginId
include PluginLoggerMixin
include PluginHelper::Mixin
def process(tag, es)
raise NotImplementedError, "BUG: output plugins MUST implement this method"
end
def initialize
super
@counter_mutex = Mutex.new
# TODO: well organized counters
@num_errors_metrics = nil
@emit_count_metrics = nil
@emit_records_metrics = nil
@emit_size_metrics = nil
end
def configure(conf)
super
@num_errors_metrics = metrics_create(namespace: "fluentd", subsystem: "bare_output", name: "num_errors", help_text: "Number of count num errors")
@emit_count_metrics = metrics_create(namespace: "fluentd", subsystem: "bare_output", name: "emit_count", help_text: "Number of count emits")
@emit_records_metrics = metrics_create(namespace: "fluentd", subsystem: "bare_output", name: "emit_records", help_text: "Number of emit records")
@emit_size_metrics = metrics_create(namespace: "fluentd", subsystem: "bare_output", name: "emit_size", help_text: "Total size of emit events")
@enable_size_metrics = !!system_config.enable_size_metrics
end
def statistics
stats = {
'num_errors' => @num_errors_metrics.get,
'emit_records' => @emit_records_metrics.get,
'emit_count' => @emit_count_metrics.get,
'emit_size' => @emit_size_metrics.get,
}
{ 'bare_output' => stats }
end
def emit_sync(tag, es)
@emit_count_metrics.inc
begin
process(tag, es)
@emit_records_metrics.add(es.size)
@emit_size_metrics.add(es.to_msgpack_stream.bytesize) if @enable_size_metrics
rescue
@num_errors_metrics.inc
raise
end
end
alias :emit_events :emit_sync
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/in_exec.rb | lib/fluent/plugin/in_exec.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/input'
require 'json'
module Fluent::Plugin
class ExecInput < Fluent::Plugin::Input
Fluent::Plugin.register_input('exec', self)
helpers :compat_parameters, :extract, :parser, :child_process
desc 'The command (program) to execute.'
config_param :command, :string
desc 'Specify connect mode to executed process'
config_param :connect_mode, :enum, list: [:read, :read_with_stderr], default: :read
config_section :parse do
config_set_default :@type, 'tsv'
config_set_default :time_type, :float
config_set_default :time_key, nil
config_set_default :estimate_current_event, false
end
config_section :extract do
config_set_default :time_type, :float
end
desc 'Tag of the output events.'
config_param :tag, :string, default: nil
desc 'The interval time between periodic program runs.'
config_param :run_interval, :time, default: nil
desc 'The default block size to read if parser requires partial read.'
config_param :read_block_size, :size, default: 10240 # 10k
desc 'The encoding to receive the result of the command, especially for non-ascii characters.'
config_param :encoding, :string, default: nil
attr_reader :parser
def configure(conf)
compat_parameters_convert(conf, :extract, :parser)
['parse', 'extract'].each do |subsection_name|
if subsection = conf.elements(subsection_name).first
if subsection.has_key?('time_format')
subsection['time_type'] ||= 'string'
end
end
end
super
if !@tag && (!@extract_config || !@extract_config.tag_key)
raise Fluent::ConfigError, "'tag' or 'tag_key' option is required on exec input"
end
validate_encoding(@encoding) if @encoding
@parser = parser_create
end
def validate_encoding(encoding)
Encoding.find(encoding)
rescue ArgumentError => e
raise Fluent::ConfigError, e.message
end
def multi_workers_ready?
true
end
def start
super
options = { mode: [@connect_mode] }
options[:external_encoding] = @encoding if @encoding
if @run_interval
child_process_execute(:exec_input, @command, interval: @run_interval, **options, &method(:run))
else
child_process_execute(:exec_input, @command, immediate: true, **options, &method(:run))
end
end
def run(io)
case
when @parser.implement?(:parse_io)
@parser.parse_io(io, &method(:on_record))
when @parser.implement?(:parse_partial_data)
until io.eof?
@parser.parse_partial_data(io.readpartial(@read_block_size), &method(:on_record))
end
when @parser.parser_type == :text_per_line
io.each_line do |line|
@parser.parse(line.chomp, &method(:on_record))
end
else
@parser.parse(io.read, &method(:on_record))
end
end
def on_record(time, record)
tag = extract_tag_from_record(record)
tag ||= @tag
time ||= extract_time_from_record(record) || Fluent::EventTime.now
router.emit(tag, time, record)
rescue => e
log.error "exec failed to emit", tag: tag, record: JSON.generate(record), error: e
router.emit_error_event(tag, time, record, e) if tag && time && record
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/parser_multiline.rb | lib/fluent/plugin/parser_multiline.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/parser'
require 'fluent/plugin/parser_regexp'
module Fluent
module Plugin
class MultilineParser < Parser
Plugin.register_parser('multiline', self)
desc 'Specify regexp pattern for start line of multiple lines'
config_param :format_firstline, :string, default: nil
desc 'Enable an option returning line as unmatched_line'
config_param :unmatched_lines, :string, default: nil
FORMAT_MAX_NUM = 20
class MultilineRegexpParser < Fluent::Plugin::RegexpParser
def parse(text)
m = @expression.match(text)
unless m
yield nil, nil
return m
end
r = {}
m.names.each do |name|
if (value = m[name])
r[name] = value
end
end
time, record = convert_values(parse_time(r), r)
yield(time, record)
m
end
end
def configure(conf)
super
formats = parse_formats(conf).compact.map { |f| f[1..-2] }.join
begin
regexp = Regexp.new(formats, Regexp::MULTILINE)
if regexp.named_captures.empty?
raise "No named captures"
end
regexp_conf = Fluent::Config::Element.new("", "", { "expression" => "/#{formats}/m" }, [])
@parser = Fluent::Plugin::MultilineParser::MultilineRegexpParser.new
@parser.configure(conf + regexp_conf)
rescue => e
raise Fluent::ConfigError, "Invalid regexp '#{formats}': #{e}"
end
if @format_firstline
check_format_regexp(@format_firstline, 'format_firstline')
@firstline_regex = Regexp.new(@format_firstline[1..-2])
end
end
def parse(text, &block)
loop do
m =
if @unmatched_lines
@parser.call(text) do |time, record|
if time && record
yield(time, record)
else
yield(Fluent::EventTime.now, { 'unmatched_line' => text })
end
end
else
@parser.call(text, &block)
end
return if m.nil?
text = m.post_match
if text.start_with?("\n")
text = text[1..-1]
end
return if text.empty?
end
end
def has_firstline?
!!@format_firstline
end
def firstline?(text)
@firstline_regex.match?(text)
end
private
def parse_formats(conf)
check_format_range(conf)
prev_format = nil
(1..FORMAT_MAX_NUM).map { |i|
format = conf["format#{i}"]
if (i > 1) && prev_format.nil? && !format.nil?
raise Fluent::ConfigError, "Jump of format index found. format#{i - 1} is missing."
end
prev_format = format
next if format.nil?
check_format_regexp(format, "format#{i}")
format
}
end
def check_format_range(conf)
invalid_formats = conf.keys.select { |k|
m = k.match(/^format(\d+)$/)
m ? !((1..FORMAT_MAX_NUM).include?(m[1].to_i)) : false
}
unless invalid_formats.empty?
raise Fluent::ConfigError, "Invalid formatN found. N should be 1 - #{FORMAT_MAX_NUM}: " + invalid_formats.join(",")
end
end
def check_format_regexp(format, key)
if format[0] == '/' && format[-1] == '/'
begin
Regexp.new(format[1..-2], Regexp::MULTILINE)
rescue => e
raise Fluent::ConfigError, "Invalid regexp in #{key}: #{e}"
end
else
raise Fluent::ConfigError, "format should be Regexp, need //, in #{key}: '#{format}'"
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/in_syslog.rb | lib/fluent/plugin/in_syslog.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/input'
require 'fluent/config/error'
require 'fluent/plugin/parser'
require 'json'
module Fluent::Plugin
class SyslogInput < Input
Fluent::Plugin.register_input('syslog', self)
helpers :parser, :compat_parameters, :server
DEFAULT_PARSER = 'syslog'
SYSLOG_REGEXP = /^\<([0-9]+)\>(.*)/
FACILITY_MAP = {
0 => 'kern',
1 => 'user',
2 => 'mail',
3 => 'daemon',
4 => 'auth',
5 => 'syslog',
6 => 'lpr',
7 => 'news',
8 => 'uucp',
9 => 'cron',
10 => 'authpriv',
11 => 'ftp',
12 => 'ntp',
13 => 'audit',
14 => 'alert',
15 => 'at',
16 => 'local0',
17 => 'local1',
18 => 'local2',
19 => 'local3',
20 => 'local4',
21 => 'local5',
22 => 'local6',
23 => 'local7'
}
SEVERITY_MAP = {
0 => 'emerg',
1 => 'alert',
2 => 'crit',
3 => 'err',
4 => 'warn',
5 => 'notice',
6 => 'info',
7 => 'debug'
}
desc 'The port to listen to.'
config_param :port, :integer, default: 5140
desc 'The bind address to listen to.'
config_param :bind, :string, default: '0.0.0.0'
desc 'The prefix of the tag. The tag itself is generated by the tag prefix, facility level, and priority.'
config_param :tag, :string
desc 'The transport protocol used to receive logs.(udp, tcp)'
config_param :protocol_type, :enum, list: [:tcp, :udp], default: nil, deprecated: "use transport directive"
desc 'The message frame type.(traditional, octet_count)'
config_param :frame_type, :enum, list: [:traditional, :octet_count], default: :traditional
desc 'If true, add source host to event record.'
config_param :include_source_host, :bool, default: false, deprecated: 'use "source_hostname_key" or "source_address_key" instead.'
desc 'Specify key of source host when include_source_host is true.'
config_param :source_host_key, :string, default: 'source_host'.freeze
desc 'Enable the option to emit unmatched lines.'
config_param :emit_unmatched_lines, :bool, default: false
desc 'The field name of hostname of sender.'
config_param :source_hostname_key, :string, default: nil
desc 'Try to resolve hostname from IP addresses or not.'
config_param :resolve_hostname, :bool, default: nil
desc 'Check the remote connection is still available by sending a keepalive packet if this value is true.'
config_param :send_keepalive_packet, :bool, default: false
desc 'The field name of source address of sender.'
config_param :source_address_key, :string, default: nil
desc 'The field name of the severity.'
config_param :severity_key, :string, default: nil, alias: :priority_key
desc 'The field name of the facility.'
config_param :facility_key, :string, default: nil
desc "The max bytes of message"
config_param :message_length_limit, :size, default: 2048
config_param :blocking_timeout, :time, default: 0.5
desc 'The delimiter value "\n"'
config_param :delimiter, :string, default: "\n" # syslog family add "\n" to each message
config_section :parse do
config_set_default :@type, DEFAULT_PARSER
config_param :with_priority, :bool, default: true
end
# overwrite server plugin to change default to :udp
config_section :transport, required: false, multi: false, init: true, param_name: :transport_config do
config_argument :protocol, :enum, list: [:tcp, :udp, :tls], default: :udp
end
def configure(conf)
compat_parameters_convert(conf, :parser)
super
if conf.has_key?('priority_key')
log.warn "priority_key is deprecated. Use severity_key instead"
end
@use_default = false
@parser = parser_create
@parser_parse_priority = @parser.respond_to?(:with_priority) && @parser.with_priority
if @include_source_host
if @source_address_key
raise Fluent::ConfigError, "specify either source_address_key or include_source_host"
end
@source_address_key = @source_host_key
end
if @source_hostname_key
if @resolve_hostname.nil?
@resolve_hostname = true
elsif !@resolve_hostname # user specifies "false" in config
raise Fluent::ConfigError, "resolve_hostname must be true with source_hostname_key"
end
end
@_event_loop_run_timeout = @blocking_timeout
protocol = @protocol_type || @transport_config.protocol
if @send_keepalive_packet && protocol == :udp
raise Fluent::ConfigError, "send_keepalive_packet is available for tcp/tls"
end
end
def multi_workers_ready?
true
end
def zero_downtime_restart_ready?
true
end
def start
super
log.info "listening syslog socket on #{@bind}:#{@port} with #{@protocol_type || @transport_config.protocol}"
case @protocol_type || @transport_config.protocol
when :udp then start_udp_server
when :tcp then start_tcp_server
when :tls then start_tcp_server(tls: true)
else
raise "BUG: invalid transport value: #{@protocol_type || @transport_config.protocol}"
end
end
def start_udp_server
server_create_udp(:in_syslog_udp_server, @port, bind: @bind, max_bytes: @message_length_limit, resolve_name: @resolve_hostname) do |data, sock|
message_handler(data.chomp, sock)
end
end
def start_tcp_server(tls: false)
octet_count_frame = @frame_type == :octet_count
delimiter = octet_count_frame ? " " : @delimiter
delimiter_size = delimiter.size
server_create_connection(
tls ? :in_syslog_tls_server : :in_syslog_tcp_server, @port,
bind: @bind,
resolve_name: @resolve_hostname,
send_keepalive_packet: @send_keepalive_packet
) do |conn|
conn.data do |data|
buffer = conn.buffer
buffer << data
pos = 0
if octet_count_frame
while idx = buffer.index(delimiter, pos)
num = Integer(buffer[pos..idx])
msg = buffer[idx + delimiter_size, num]
if msg.size != num
break
end
pos = idx + delimiter_size + num
message_handler(msg, conn)
end
else
while idx = buffer.index(delimiter, pos)
msg = buffer[pos...idx]
pos = idx + delimiter_size
message_handler(msg, conn)
end
end
buffer.slice!(0, pos) if pos > 0
end
end
end
private
def emit_unmatched(data, sock)
record = {"unmatched_line" => data}
record[@source_address_key] = sock.remote_addr if @source_address_key
record[@source_hostname_key] = sock.remote_host if @source_hostname_key
emit("#{@tag}.unmatched", Fluent::EventTime.now, record)
end
def message_handler(data, sock)
pri = nil
text = data
unless @parser_parse_priority
m = SYSLOG_REGEXP.match(data)
unless m
if @emit_unmatched_lines
emit_unmatched(data, sock)
end
log.warn "invalid syslog message: #{data.dump}"
return
end
pri = m[1].to_i
text = m[2]
end
@parser.parse(text) do |time, record|
unless time && record
if @emit_unmatched_lines
emit_unmatched(data, sock)
end
log.warn "failed to parse message", data: data
return
end
pri ||= record.delete('pri')
facility = FACILITY_MAP[pri >> 3]
severity = SEVERITY_MAP[pri & 0b111]
record[@severity_key] = severity if @severity_key
record[@facility_key] = facility if @facility_key
record[@source_address_key] = sock.remote_addr if @source_address_key
record[@source_hostname_key] = sock.remote_host if @source_hostname_key
tag = "#{@tag}.#{facility}.#{severity}"
emit(tag, time, record)
end
rescue => e
if @emit_unmatched_lines
emit_unmatched(data, sock)
end
log.error "invalid input", data: data, error: e
log.error_backtrace
end
def emit(tag, time, record)
router.emit(tag, time, record)
rescue => e
log.error "syslog failed to emit", error: e, tag: tag, record: JSON.generate(record)
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/formatter_json.rb | lib/fluent/plugin/formatter_json.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/formatter'
require 'fluent/oj_options'
module Fluent
module Plugin
class JSONFormatter < Formatter
include Fluent::Plugin::Newline::Mixin
Plugin.register_formatter('json', self)
config_param :json_parser, :string, default: 'oj'
config_param :add_newline, :bool, default: true
def configure(conf)
super
if @json_parser == 'oj'
if Fluent::OjOptions.available?
@dump_proc = Oj.method(:dump)
else
log.info "Oj isn't installed, fallback to JSON as json parser"
@dump_proc = JSON.method(:generate)
end
else
@dump_proc = JSON.method(:generate)
end
# format json is used on various highload environment, so re-define method to skip if check
unless @add_newline
define_singleton_method(:format, method(:format_without_nl))
end
end
def format(tag, time, record)
json_str = @dump_proc.call(record)
"#{json_str}#{@newline}"
ensure
json_str&.clear
end
def format_without_nl(tag, time, record)
@dump_proc.call(record)
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/out_file.rb | lib/fluent/plugin/out_file.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fileutils'
require 'zlib'
require 'time'
require 'pathname'
require 'fluent/plugin/output'
require 'fluent/config/error'
# TODO remove ...
require 'fluent/plugin/file_util'
module Fluent::Plugin
class FileOutput < Output
Fluent::Plugin.register_output('file', self)
helpers :formatter, :inject, :compat_parameters
SUPPORTED_COMPRESS = [:text, :gz, :gzip, :zstd]
SUPPORTED_COMPRESS_MAP = {
text: nil,
gz: :gzip,
gzip: :gzip,
zstd: :zstd,
}
DEFAULT_TIMEKEY = 60 * 60 * 24
desc "The Path of the file."
config_param :path, :string
desc "Specify to add file suffix for bare file path or not."
config_param :add_path_suffix, :bool, default: true
desc "The file suffix added to bare file path."
config_param :path_suffix, :string, default: '.log'
desc "The flushed chunk is appended to existence file or not."
config_param :append, :bool, default: false
desc "Compress flushed file."
config_param :compress, :enum, list: SUPPORTED_COMPRESS, default: :text
desc "Execute compression again even when buffer chunk is already compressed."
config_param :recompress, :bool, default: false
desc "Create symlink to temporary buffered file when buffer_type is file (disabled on Windows)."
config_param :symlink_path, :string, default: nil
desc "Use relative path for symlink target (default: false)"
config_param :symlink_path_use_relative, :bool, default: false
config_section :format do
config_set_default :@type, 'out_file'
end
config_section :buffer do
config_set_default :@type, 'file'
config_set_default :chunk_keys, ['time']
config_set_default :timekey, DEFAULT_TIMEKEY
end
attr_reader :dir_perm
attr_accessor :last_written_path # for tests
module SymlinkBufferMixin
def metadata(timekey: nil, tag: nil, variables: nil)
metadata = super
@latest_metadata ||= new_metadata(timekey: 0)
if metadata.timekey && (metadata.timekey >= @latest_metadata.timekey)
@latest_metadata = metadata
end
metadata
end
def output_plugin_for_symlink=(output_plugin)
@_output_plugin_for_symlink = output_plugin
end
def symlink_path=(path)
@_symlink_path = path
end
def generate_chunk(metadata)
chunk = super
# "symlink" feature is to link from symlink_path to the latest file chunk. Records with latest
# timekey will be appended into that file chunk. On the other side, resumed file chunks might NOT
# have timekey, especially in the cases that resumed file chunks are generated by Fluentd v0.12.
# These chunks will be enqueued immediately, and will be flushed soon.
if chunk.metadata == @latest_metadata
sym_path = @_output_plugin_for_symlink.extract_placeholders(@_symlink_path, chunk)
FileUtils.mkdir_p(File.dirname(sym_path), mode: @_output_plugin_for_symlink.dir_perm)
if @_output_plugin_for_symlink.symlink_path_use_relative
relative_path = Pathname.new(chunk.path).relative_path_from(Pathname.new(File.dirname(sym_path)))
FileUtils.ln_sf(relative_path, sym_path)
else
FileUtils.ln_sf(chunk.path, sym_path)
end
end
chunk
end
end
def configure(conf)
compat_parameters_convert(conf, :formatter, :buffer, :inject, default_chunk_key: "time")
configured_time_slice_format = conf['time_slice_format']
if conf.elements(name: 'buffer').empty?
conf.add_element('buffer', 'time')
end
buffer_conf = conf.elements(name: 'buffer').first
# Fluent::PluginId#configure is not called yet, so we can't use #plugin_root_dir here.
if !buffer_conf.has_key?('path') && !(conf['@id'] && system_config.root_dir)
# v0.14 file buffer handles path as directory if '*' is missing
# 'dummy_path' is not to raise configuration error for 'path' in file buffer plugin,
# but raise it in this plugin.
buffer_conf['path'] = conf['path'] || '/tmp/dummy_path'
end
if conf.has_key?('utc') || conf.has_key?('localtime')
param_name = conf.has_key?('utc') ? 'utc' : 'localtime'
log.warn "'#{param_name}' is deprecated for output plugin. This parameter is used for formatter plugin in compatibility layer. If you want to use same feature, use timekey_use_utc parameter in <buffer> directive instead"
end
super
@compress_method = SUPPORTED_COMPRESS_MAP[@compress]
if @path.include?('*') && !@buffer_config.timekey
raise Fluent::ConfigError, "path including '*' must be used with buffer chunk key 'time'"
end
path_suffix = @add_path_suffix ? @path_suffix : ''
path_timekey = if @chunk_key_time
@as_secondary ? @primary_instance.buffer_config.timekey : @buffer_config.timekey
else
nil
end
@path_template = generate_path_template(@path, path_timekey, @append, @compress_method, path_suffix: path_suffix, time_slice_format: configured_time_slice_format)
if @as_secondary
# When this plugin is configured as secondary & primary plugin has tag key, but this plugin may not have it.
# Increment placeholder can make another output file per chunk tag/keys even if original path doesn't include it.
placeholder_validators(:path, @path_template).select{|v| v.type == :time }.each do |v|
v.validate!
end
else
placeholder_validate!(:path, @path_template)
max_tag_index = get_placeholders_tag(@path_template).max || 1
max_tag_index = 1 if max_tag_index < 1
dummy_tag = (['a'] * max_tag_index).join('.')
dummy_record_keys = get_placeholders_keys(@path_template) || ['message']
dummy_record = Hash[dummy_record_keys.zip(['data'] * dummy_record_keys.size)]
test_chunk1 = chunk_for_test(dummy_tag, Fluent::EventTime.now, dummy_record)
test_path = extract_placeholders(@path_template, test_chunk1)
unless ::Fluent::FileUtil.writable_p?(test_path)
raise Fluent::ConfigError, "out_file: `#{test_path}` is not writable"
end
end
@formatter = formatter_create
if @symlink_path && @buffer.respond_to?(:path)
if @as_secondary
raise Fluent::ConfigError, "symlink_path option is unavailable in <secondary>: consider to use secondary_file plugin"
end
if Fluent.windows?
log.warn "symlink_path is unavailable on Windows platform. disabled."
@symlink_path = nil
else
placeholder_validators(:symlink_path, @symlink_path).reject{ |v| v.type == :time }.each do |v|
begin
v.validate!
rescue Fluent::ConfigError => e
log.warn "#{e}. This means multiple chunks are competing for a single symlink_path, so some logs may not be taken from the symlink."
end
end
@buffer.extend SymlinkBufferMixin
@buffer.symlink_path = @symlink_path
@buffer.output_plugin_for_symlink = self
end
if @compress != :text && @buffer.compress != :text && @buffer.compress != @compress_method
raise Fluent::ConfigError, "You cannot specify different compression formats for Buffer (Buffer: #{@buffer.compress}, Self: #{@compress})"
end
end
@dir_perm = system_config.dir_permission || Fluent::DEFAULT_DIR_PERMISSION
@file_perm = system_config.file_permission || Fluent::DEFAULT_FILE_PERMISSION
@need_lock = system_config.workers > 1
end
def multi_workers_ready?
true
end
def format(tag, time, record)
r = inject_values_to_record(tag, time, record)
@formatter.format(tag, time, r)
end
def write(chunk)
path = extract_placeholders(@path_template, chunk)
FileUtils.mkdir_p File.dirname(path), mode: @dir_perm
writer = case
when @compress_method.nil?
method(:write_without_compression)
when @compress_method != :text
if @buffer.compress == :text || @recompress
method(:write_with_compression).curry.call(@compress_method)
else
method(:write_from_compressed_chunk).curry.call(@compress_method)
end
else
raise "BUG: unknown compression method #{@compress_method}"
end
if @append
if @need_lock
acquire_worker_lock(path) do
writer.call(path, chunk)
end
else
writer.call(path, chunk)
end
else
find_filepath_available(path, with_lock: @need_lock) do |actual_path|
writer.call(actual_path, chunk)
path = actual_path
end
end
@last_written_path = path
end
def write_without_compression(path, chunk)
File.open(path, "ab", @file_perm) do |f|
chunk.write_to(f)
end
end
def write_with_compression(type, path, chunk)
File.open(path, "ab", @file_perm) do |f|
gz = nil
if type == :gzip
gz = Zlib::GzipWriter.new(f)
elsif type == :zstd
gz = Zstd::StreamWriter.new(f)
end
chunk.write_to(gz, compressed: :text)
gz.close
end
end
def write_from_compressed_chunk(type, path, chunk)
File.open(path, "ab", @file_perm) do |f|
chunk.write_to(f, compressed: type)
end
end
def timekey_to_timeformat(timekey)
case timekey
when nil then ''
when 0...60 then '%Y%m%d%H%M%S' # 60 exclusive
when 60...3600 then '%Y%m%d%H%M'
when 3600...86400 then '%Y%m%d%H'
else '%Y%m%d'
end
end
def compression_suffix(compress)
case compress
when :gzip then '.gz'
when :zstd then '.zstd'
when nil then ''
else
raise ArgumentError, "unknown compression type #{compress}"
end
end
# /path/to/dir/file.* -> /path/to/dir/file.%Y%m%d
# /path/to/dir/file.*.data -> /path/to/dir/file.%Y%m%d.data
# /path/to/dir/file -> /path/to/dir/file.%Y%m%d.log
# %Y%m%d -> %Y%m%d_** (non append)
# + .gz (gzipped)
## TODO: remove time_slice_format when end of support of compat_parameters
def generate_path_template(original, timekey, append, compress, path_suffix: '', time_slice_format: nil)
comp_suffix = compression_suffix(compress)
index_placeholder = append ? '' : '_**'
if original.index('*')
raise "BUG: configuration error must be raised for path including '*' without timekey" unless timekey
time_placeholders_part = time_slice_format || timekey_to_timeformat(timekey)
original.gsub('*', time_placeholders_part + index_placeholder) + comp_suffix
else
if timekey
if time_slice_format
"#{original}.#{time_slice_format}#{index_placeholder}#{path_suffix}#{comp_suffix}"
else
time_placeholders = timekey_to_timeformat(timekey)
if time_placeholders.scan(/../).any?{|ph| original.include?(ph) }
raise Fluent::ConfigError, "insufficient timestamp placeholders in path" if time_placeholders.scan(/../).any?{|ph| !original.include?(ph) }
"#{original}#{index_placeholder}#{path_suffix}#{comp_suffix}"
else
"#{original}.#{time_placeholders}#{index_placeholder}#{path_suffix}#{comp_suffix}"
end
end
else
"#{original}#{index_placeholder}#{path_suffix}#{comp_suffix}"
end
end
end
def find_filepath_available(path_with_placeholder, with_lock: false) # for non-append
raise "BUG: index placeholder not found in path: #{path_with_placeholder}" unless path_with_placeholder.index('_**')
i = 0
dir_path = locked = nil
while true
path = path_with_placeholder.sub('_**', "_#{i}")
i += 1
next if File.exist?(path)
if with_lock
dir_path = path + '.lock'
locked = Dir.mkdir(dir_path) rescue false
next unless locked
# ensure that other worker doesn't create a file (and release lock)
# between previous File.exist? and Dir.mkdir
next if File.exist?(path)
end
break
end
yield path
ensure
if dir_path && locked && Dir.exist?(dir_path)
Dir.rmdir(dir_path) rescue nil
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/filter_record_transformer.rb | lib/fluent/plugin/filter_record_transformer.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'socket'
require 'json'
require 'ostruct'
require 'fluent/plugin/filter'
require 'fluent/config/error'
require 'fluent/event'
require 'fluent/time'
module Fluent::Plugin
class RecordTransformerFilter < Fluent::Plugin::Filter
Fluent::Plugin.register_filter('record_transformer', self)
helpers :record_accessor
desc 'A comma-delimited list of keys to delete.'
config_param :remove_keys, :array, default: nil
desc 'A comma-delimited list of keys to keep.'
config_param :keep_keys, :array, default: nil
desc 'Create new Hash to transform incoming data'
config_param :renew_record, :bool, default: false
desc 'Specify field name of the record to overwrite the time of events. Its value must be unix time.'
config_param :renew_time_key, :string, default: nil
desc 'When set to true, the full Ruby syntax is enabled in the ${...} expression.'
config_param :enable_ruby, :bool, default: false
desc 'Use original value type.'
config_param :auto_typecast, :bool, default: true
def configure(conf)
super
map = {}
# <record></record> directive
conf.elements.select { |element| element.name == 'record' }.each do |element|
element.each_pair do |k, v|
element.has_key?(k) # to suppress unread configuration warning
map[k] = parse_value(v)
end
end
if @keep_keys
raise Fluent::ConfigError, "`renew_record` must be true to use `keep_keys`" unless @renew_record
end
@key_deleters = if @remove_keys
@remove_keys.map { |k| record_accessor_create(k) }
end
placeholder_expander_params = {
log: log,
auto_typecast: @auto_typecast,
}
@placeholder_expander =
if @enable_ruby
# require utilities which would be used in ruby placeholders
require 'pathname'
require 'uri'
require 'cgi'
RubyPlaceholderExpander.new(placeholder_expander_params)
else
PlaceholderExpander.new(placeholder_expander_params)
end
@map = @placeholder_expander.preprocess_map(map)
@hostname = Socket.gethostname
end
def filter_stream(tag, es)
new_es = Fluent::MultiEventStream.new
tag_parts = tag.split('.')
tag_prefix = tag_prefix(tag_parts)
tag_suffix = tag_suffix(tag_parts)
placeholder_values = {
'tag' => tag,
'tag_parts' => tag_parts,
'tag_prefix' => tag_prefix,
'tag_suffix' => tag_suffix,
'hostname' => @hostname,
}
es.each do |time, record|
begin
placeholder_values['time'] = @placeholder_expander.time_value(time)
placeholder_values['record'] = record
new_record = reform(record, placeholder_values)
if @renew_time_key && new_record.has_key?(@renew_time_key)
time = Fluent::EventTime.from_time(Time.at(new_record[@renew_time_key].to_f))
end
@key_deleters.each { |deleter| deleter.delete(new_record) } if @key_deleters
new_es.add(time, new_record)
rescue => e
router.emit_error_event(tag, time, record, e)
log.debug { "map:#{@map} record:#{record} placeholder_values:#{placeholder_values}" }
end
end
new_es
end
private
def parse_value(value_str)
if value_str.start_with?('{', '[')
JSON.parse(value_str)
else
value_str
end
rescue => e
log.warn "failed to parse #{value_str} as json. Assuming #{value_str} is a string", error: e
value_str # emit as string
end
def reform(record, placeholder_values)
placeholders = @placeholder_expander.prepare_placeholders(placeholder_values)
new_record = @renew_record ? {} : record.dup
@keep_keys.each do |k|
new_record[k] = record[k] if record.has_key?(k)
end if @keep_keys && @renew_record
new_record.merge!(expand_placeholders(@map, placeholders))
new_record
end
def expand_placeholders(value, placeholders)
if value.is_a?(String)
new_value = @placeholder_expander.expand(value, placeholders)
elsif value.is_a?(Hash)
new_value = {}
value.each_pair do |k, v|
new_key = @placeholder_expander.expand(k, placeholders, true)
new_value[new_key] = expand_placeholders(v, placeholders)
end
elsif value.is_a?(Array)
new_value = []
value.each_with_index do |v, i|
new_value[i] = expand_placeholders(v, placeholders)
end
else
new_value = value
end
new_value
end
def tag_prefix(tag_parts)
return [] if tag_parts.empty?
tag_prefix = [tag_parts.first]
1.upto(tag_parts.size-1).each do |i|
tag_prefix[i] = "#{tag_prefix[i-1]}.#{tag_parts[i]}"
end
tag_prefix
end
def tag_suffix(tag_parts)
return [] if tag_parts.empty?
rev_tag_parts = tag_parts.reverse
rev_tag_suffix = [rev_tag_parts.first]
1.upto(tag_parts.size-1).each do |i|
rev_tag_suffix[i] = "#{rev_tag_parts[i]}.#{rev_tag_suffix[i-1]}"
end
rev_tag_suffix.reverse!
end
# THIS CLASS MUST BE THREAD-SAFE
class PlaceholderExpander
attr_reader :placeholders, :log
def initialize(params)
@log = params[:log]
@auto_typecast = params[:auto_typecast]
end
def time_value(time)
Time.at(time).to_s
end
def preprocess_map(value, force_stringify = false)
value
end
def prepare_placeholders(placeholder_values)
placeholders = {}
placeholder_values.each do |key, value|
if value.kind_of?(Array) # tag_parts, etc
size = value.size
value.each_with_index do |v, idx|
placeholders.store("${#{key}[#{idx}]}", v)
placeholders.store("${#{key}[#{idx-size}]}", v) # support [-1]
end
elsif value.kind_of?(Hash) # record, etc
value.each do |k, v|
placeholders.store(%Q[${#{key}["#{k}"]}], v) # record["foo"]
end
else # string, integer, float, and others?
placeholders.store("${#{key}}", value)
end
end
placeholders
end
# Expand string with placeholders
#
# @param [String] str
# @param [Boolean] force_stringify the value must be string, used for hash key
def expand(str, placeholders, force_stringify = false)
if @auto_typecast && !force_stringify
single_placeholder_matched = str.match(/\A(\${[^}]+}|__[A-Z_]+__)\z/)
if single_placeholder_matched
log_if_unknown_placeholder($1, placeholders)
return placeholders[single_placeholder_matched[1]]
end
end
str.gsub(/(\${[^}]+}|__[A-Z_]+__)/) {
log_if_unknown_placeholder($1, placeholders)
placeholders[$1]
}
end
private
def log_if_unknown_placeholder(placeholder, placeholders)
unless placeholders.include?(placeholder)
log.warn "unknown placeholder `#{placeholder}` found"
end
end
end
# THIS CLASS MUST BE THREAD-SAFE
class RubyPlaceholderExpander
attr_reader :log
def initialize(params)
@log = params[:log]
@auto_typecast = params[:auto_typecast]
@cleanroom_expander = CleanroomExpander.new
end
def time_value(time)
Time.at(time)
end
# Preprocess record map to convert into ruby string expansion
#
# @param [Hash|String|Array] value record map config
# @param [Boolean] force_stringify the value must be string, used for hash key
def preprocess_map(value, force_stringify = false)
new_value = nil
if value.is_a?(String)
if @auto_typecast && !force_stringify
num_placeholders = value.scan('${').size
if num_placeholders == 1 && value.start_with?('${') && value.end_with?('}')
new_value = value[2..-2] # ${..} => ..
end
end
unless new_value
new_value = "%Q[#{value.gsub('${', '#{')}]" # xx${..}xx => %Q[xx#{..}xx]
end
elsif value.is_a?(Hash)
new_value = {}
value.each_pair do |k, v|
new_value[preprocess_map(k, true)] = preprocess_map(v)
end
elsif value.is_a?(Array)
new_value = []
value.each_with_index do |v, i|
new_value[i] = preprocess_map(v)
end
else
new_value = value
end
new_value
end
def prepare_placeholders(placeholder_values)
placeholder_values
end
# Expand string with placeholders
#
# @param [String] str
def expand(str, placeholders, force_stringify = false)
@cleanroom_expander.expand(
str,
placeholders['tag'],
placeholders['time'],
placeholders['record'],
placeholders['tag_parts'],
placeholders['tag_prefix'],
placeholders['tag_suffix'],
placeholders['hostname'],
)
rescue => e
raise "failed to expand `#{str}` : error = #{e}"
end
class CleanroomExpander
def expand(__str_to_eval__, tag, time, record, tag_parts, tag_prefix, tag_suffix, hostname)
instance_eval(__str_to_eval__)
end
(Object.instance_methods).each do |m|
undef_method m unless /^__|respond_to_missing\?|object_id|public_methods|instance_eval|method_missing|define_singleton_method|respond_to\?|new_ostruct_member|^class$/.match?(m.to_s)
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/formatter_hash.rb | lib/fluent/plugin/formatter_hash.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/formatter'
module Fluent
module Plugin
class HashFormatter < Formatter
include Fluent::Plugin::Newline::Mixin
Plugin.register_formatter('hash', self)
config_param :add_newline, :bool, default: true
def format(tag, time, record)
line = record.to_s
line << @newline if @add_newline
line
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/in_object_space.rb | lib/fluent/plugin/in_object_space.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'json'
require 'fluent/plugin/input'
module Fluent::Plugin
class ObjectSpaceInput < Fluent::Plugin::Input
Fluent::Plugin.register_input('object_space', self)
helpers :timer
def initialize
super
end
config_param :emit_interval, :time, default: 60
config_param :tag, :string
config_param :top, :integer, default: 15
def multi_workers_ready?
true
end
def start
super
timer_execute(:object_space_input, @emit_interval, &method(:on_timer))
end
class Counter
def initialize(klass, init_count)
@klass = klass
@count = init_count
end
def incr!
@count += 1
end
def name
@klass.name
end
attr_reader :count
end
def on_timer
now = Fluent::EventTime.now
array = []
map = {}
ObjectSpace.each_object {|obj|
klass = obj.class rescue Object
if c = map[klass]
c.incr!
else
c = Counter.new(klass, 1)
array << c
map[klass] = c
end
}
array.sort_by! {|c| -c.count }
record = {}
array.each_with_index {|c,i|
break if i >= @top
record[c.name] = c.count
}
router.emit(@tag, now, record)
rescue => e
log.error "object space failed to emit", error: e, tag: @tag, record: JSON.generate(record)
log.error_backtrace
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/parser_apache_error.rb | lib/fluent/plugin/parser_apache_error.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/parser_regexp'
module Fluent
module Plugin
class ApacheErrorParser < RegexpParser
Plugin.register_parser("apache_error", self)
config_set_default :expression, /^\[[^ ]* (?<time>[^\]]*)\] \[(?<level>[^\]]*)\](?: \[pid (?<pid>[^\]]*)\])?( \[client (?<client>[^\]]*)\])? (?<message>.*)$/
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/out_relabel.rb | lib/fluent/plugin/out_relabel.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/output'
module Fluent::Plugin
class RelabelOutput < Output
Fluent::Plugin.register_output('relabel', self)
helpers :event_emitter
def multi_workers_ready?
true
end
def process(tag, es)
router.emit_stream(tag, es)
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/in_debug_agent.rb | lib/fluent/plugin/in_debug_agent.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/input'
module Fluent::Plugin
class DebugAgentInput < Input
Fluent::Plugin.register_input('debug_agent', self)
def initialize
require 'drb/drb'
require 'fluent/plugin/file_util'
super
end
config_param :bind, :string, default: '0.0.0.0'
config_param :port, :integer, default: 24230
config_param :unix_path, :string, default: nil
#config_param :unix_mode # TODO
config_param :object, :string, default: 'Fluent::Engine'
def configure(conf)
super
if system_config.workers > 1
@port += fluentd_worker_id
end
if @unix_path
unless ::Fluent::FileUtil.writable?(@unix_path)
raise Fluent::ConfigError, "in_debug_agent: `#{@unix_path}` is not writable"
end
end
end
def multi_workers_ready?
@unix_path.nil?
end
def start
super
if @unix_path
require 'drb/unix'
uri = "drbunix:#{@unix_path}"
else
uri = "druby://#{@bind}:#{@port}"
end
log.info "listening dRuby", uri: uri, object: @object, worker: fluentd_worker_id
obj = eval(@object)
@server = DRb::DRbServer.new(uri, obj)
end
def shutdown
@server.stop_service if @server
super
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/sd_srv.rb | lib/fluent/plugin/sd_srv.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'resolv'
require 'fluent/plugin_helper'
require 'fluent/plugin/service_discovery'
module Fluent
module Plugin
class SrvServiceDiscovery < ServiceDiscovery
include PluginHelper::Mixin
Plugin.register_sd('srv', self)
helpers :timer
desc 'Service without underscore in RFC2782'
config_param :service, :string
desc 'Proto without underscore in RFC2782'
config_param :proto, :string, default: 'tcp'
desc 'Name in RFC2782'
config_param :hostname, :string
desc 'hostname of DNS server to request the SRV record'
config_param :dns_server_host, :string, default: nil
desc 'interval of requesting to DNS server'
config_param :interval, :integer, default: 60
desc "resolve hostname to IP addr of SRV's Target"
config_param :dns_lookup, :bool, default: true
desc 'The shared key per server'
config_param :shared_key, :string, default: nil, secret: true
desc 'The username for authentication'
config_param :username, :string, default: ''
desc 'The password for authentication'
config_param :password, :string, default: '', secret: true
def initialize
super
@target = nil
end
def configure(conf)
super
@target = "_#{@service}._#{@proto}.#{@hostname}"
@dns_resolve =
if @dns_server_host.nil?
Resolv::DNS.new
elsif @dns_server_host.include?(':') # e.g. 127.0.0.1:8600
host, port = @dns_server_host.split(':', 2)
Resolv::DNS.new(nameserver_port: [[host, port.to_i]])
else
Resolv::DNS.new(nameserver: @dns_server_host)
end
@services = fetch_srv_record
end
def start(queue)
timer_execute(:"sd_srv_record_#{@target}", @interval) do
refresh_srv_records(queue)
end
super()
end
private
def refresh_srv_records(queue)
s = begin
fetch_srv_record
rescue => e
@log.error("sd_srv: #{e}")
return
end
if s.nil? || s.empty?
return
end
diff = []
join = s - @services
# Need service_in first to guarantee that server exist at least one all time.
join.each do |j|
diff << ServiceDiscovery.service_in_msg(j)
end
drain = @services - s
drain.each do |d|
diff << ServiceDiscovery.service_out_msg(d)
end
@services = s
diff.each do |a|
queue.push(a)
end
end
def fetch_srv_record
adders = @dns_resolve.getresources(@target, Resolv::DNS::Resource::IN::SRV)
services = []
adders.each do |addr|
host = @dns_lookup ? dns_lookup!(addr.target) : addr.target
services << [
addr.priority,
Service.new(:srv, host.to_s, addr.port.to_i, addr.target.to_s, addr.weight, false, @username, @password, @shared_key)
]
end
services.sort_by(&:first).flat_map { |s| s[1] }
end
def dns_lookup!(host)
# may need to cache the result
@dns_resolve.getaddress(host) # get first result for now
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/buf_memory.rb | lib/fluent/plugin/buf_memory.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/buffer'
require 'fluent/plugin/buffer/memory_chunk'
module Fluent
module Plugin
class MemoryBuffer < Fluent::Plugin::Buffer
Plugin.register_buffer('memory', self)
def resume
return {}, []
end
def generate_chunk(metadata)
Fluent::Plugin::Buffer::MemoryChunk.new(metadata, compress: @compress)
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/string_util.rb | lib/fluent/plugin/string_util.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/compat/string_util'
module Fluent
# obsolete
StringUtil = Fluent::Compat::StringUtil
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/service_discovery.rb | lib/fluent/plugin/service_discovery.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/base'
require 'fluent/log'
require 'fluent/unique_id'
require 'fluent/plugin_id'
module Fluent
module Plugin
class ServiceDiscovery < Base
include PluginId
include PluginLoggerMixin
include UniqueId::Mixin
configured_in :service_discovery
attr_reader :services
Service = Struct.new(:plugin_name, :host, :port, :name, :weight, :standby, :username, :password, :shared_key) do
def discovery_id
@discovery_id ||= Base64.encode64(to_h.to_s)
end
end
SERVICE_IN = :service_in
SERVICE_OUT = :service_out
DiscoveryMessage = Struct.new(:type, :service)
class << self
def service_in_msg(service)
DiscoveryMessage.new(SERVICE_IN, service)
end
def service_out_msg(service)
DiscoveryMessage.new(SERVICE_OUT, service)
end
end
def initialize
@services = []
super
end
def start(queue = nil)
super()
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/formatter_ltsv.rb | lib/fluent/plugin/formatter_ltsv.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/formatter'
module Fluent
module Plugin
class LabeledTSVFormatter < Formatter
include Fluent::Plugin::Newline::Mixin
Plugin.register_formatter('ltsv', self)
# http://ltsv.org/
config_param :delimiter, :string, default: "\t".freeze
config_param :label_delimiter, :string, default: ":".freeze
config_param :replacement, :string, default: " ".freeze
config_param :add_newline, :bool, default: true
def format(tag, time, record)
formatted = ""
record.each do |label, value|
formatted << @delimiter if formatted.length.nonzero?
formatted << "#{label}#{@label_delimiter}#{value.to_s.gsub(@delimiter, @replacement)}"
end
formatted << @newline if @add_newline
formatted
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/sd_static.rb | lib/fluent/plugin/sd_static.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/service_discovery'
module Fluent
module Plugin
class StaticServiceDiscovery < ServiceDiscovery
Plugin.register_sd('static', self)
LISTEN_PORT = 24224
config_section :service, param_name: :service_configs do
desc 'The IP address or host name of the server.'
config_param :host, :string
desc 'The name of the server. Used for logging and certificate verification in TLS transport (when host is address).'
config_param :name, :string, default: nil
desc 'The port number of the host.'
config_param :port, :integer, default: LISTEN_PORT
desc 'The shared key per server.'
config_param :shared_key, :string, default: nil, secret: true
desc 'The username for authentication.'
config_param :username, :string, default: ''
desc 'The password for authentication.'
config_param :password, :string, default: '', secret: true
desc 'Marks a node as the standby node for an Active-Standby model between Fluentd nodes.'
config_param :standby, :bool, default: false
desc 'The load balancing weight.'
config_param :weight, :integer, default: 60
end
def configure(conf)
super
@services = @service_configs.map do |s|
ServiceDiscovery::Service.new(:static, s.host, s.port, s.name, s.weight, s.standby, s.username, s.password, s.shared_key)
end
end
def start(queue = nil)
super()
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/parser_json.rb | lib/fluent/plugin/parser_json.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/parser'
require 'fluent/time'
require 'fluent/oj_options'
require 'yajl'
require 'json'
module Fluent
module Plugin
class JSONParser < Parser
Plugin.register_parser('json', self)
config_set_default :time_key, 'time'
desc 'Set JSON parser'
config_param :json_parser, :enum, list: [:oj, :yajl, :json], default: :oj
# The Yajl library defines a default buffer size of 8KiB when parsing
# from IO streams, so maintain this for backwards-compatibility.
# https://www.rubydoc.info/github/brianmario/yajl-ruby/Yajl%2FParser:parse
desc 'Set the buffer size that Yajl will use when parsing streaming input'
config_param :stream_buffer_size, :integer, default: 8192
config_set_default :time_type, :float
def configure(conf)
if conf.has_key?('time_format')
conf['time_type'] ||= 'string'
end
super
@load_proc, @error_class = configure_json_parser(@json_parser)
end
def configure_json_parser(name)
case name
when :oj
return [Oj.method(:load), Oj::ParseError] if Fluent::OjOptions.available?
log&.info "Oj is not installed, and failing back to JSON for json parser"
configure_json_parser(:json)
when :json then [JSON.method(:parse), JSON::ParserError]
when :yajl then [Yajl.method(:load), Yajl::ParseError]
else
raise "BUG: unknown json parser specified: #{name}"
end
end
def parse(text)
parsed_json = @load_proc.call(text)
if parsed_json.is_a?(Hash)
time, record = parse_one_record(parsed_json)
yield time, record
elsif parsed_json.is_a?(Array)
parsed_json.each do |record|
unless record.is_a?(Hash)
yield nil, nil
next
end
time, parsed_record = parse_one_record(record)
yield time, parsed_record
end
else
yield nil, nil
end
rescue @error_class, EncodingError # EncodingError is for oj 3.x or later
yield nil, nil
end
def parse_one_record(record)
time = parse_time(record)
convert_values(time, record)
end
def parser_type
:text
end
def parse_io(io, &block)
y = Yajl::Parser.new
y.on_parse_complete = ->(record){
block.call(parse_time(record), record)
}
y.parse(io, @stream_buffer_size)
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/out_stdout.rb | lib/fluent/plugin/out_stdout.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/output'
module Fluent::Plugin
class StdoutOutput < Output
Fluent::Plugin.register_output('stdout', self)
helpers :inject, :formatter, :compat_parameters
DEFAULT_LINE_FORMAT_TYPE = 'stdout'
DEFAULT_FORMAT_TYPE = 'json'
desc "If Fluentd logger outputs logs to a file (with -o option), this plugin outputs events to the file as well."
config_param :use_logger, :bool, default: true
config_section :buffer do
config_set_default :chunk_keys, ['tag']
config_set_default :flush_at_shutdown, true
config_set_default :chunk_limit_size, 10 * 1024
end
config_section :format do
config_set_default :@type, DEFAULT_LINE_FORMAT_TYPE
config_set_default :output_type, DEFAULT_FORMAT_TYPE
end
def prefer_buffered_processing
false
end
def multi_workers_ready?
true
end
def dest_io
@use_logger ? $log : $stdout
end
attr_accessor :formatter
def configure(conf)
compat_parameters_convert(conf, :inject, :formatter)
super
@formatter = formatter_create
end
def process(tag, es)
es = inject_values_to_event_stream(tag, es)
es.each {|time,record|
dest_io.write(format(tag, time, record))
}
dest_io.flush
end
def format(tag, time, record)
record = inject_values_to_record(tag, time, record)
@formatter.format(tag, time, record).chomp + "\n"
end
def write(chunk)
chunk.write_to(dest_io)
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/owned_by_mixin.rb | lib/fluent/plugin/owned_by_mixin.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Fluent
module Plugin
module OwnedByMixin
def owner=(plugin)
@_owner = plugin
@_plugin_id = plugin.plugin_id
@log = plugin.log
end
def owner
if instance_variable_defined?(:@_owner)
@_owner
end
end
def log
if instance_variable_defined?(:@log)
@log
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/parser_ltsv.rb | lib/fluent/plugin/parser_ltsv.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/parser'
module Fluent
module Plugin
class LabeledTSVParser < Parser
Plugin.register_parser('ltsv', self)
desc 'The delimiter character (or string) of TSV values'
config_param :delimiter, :string, default: "\t"
desc 'The delimiter pattern of TSV values'
config_param :delimiter_pattern, :regexp, default: nil
desc 'The delimiter character between field name and value'
config_param :label_delimiter, :string, default: ":"
config_set_default :time_key, 'time'
def configure(conf)
super
@delimiter = @delimiter_pattern || @delimiter
end
def parse(text)
r = {}
text.split(@delimiter).each do |pair|
if pair.include? @label_delimiter
key, value = pair.split(@label_delimiter, 2)
r[key] = value
end
end
time, record = convert_values(parse_time(r), r)
yield time, record
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/out_http.rb | lib/fluent/plugin/out_http.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'net/http'
require 'uri'
require 'openssl'
require 'fluent/tls'
require 'fluent/plugin/output'
require 'fluent/plugin_helper/socket'
# patch Net::HTTP to support extra_chain_cert which was added in Ruby feature #9758.
# see: https://github.com/ruby/ruby/commit/31af0dafba6d3769d2a39617c0dddedb97883712
unless Net::HTTP::SSL_IVNAMES.include?(:@extra_chain_cert)
class Net::HTTP
SSL_IVNAMES << :@extra_chain_cert
SSL_ATTRIBUTES << :extra_chain_cert
attr_accessor :extra_chain_cert
end
end
module Fluent::Plugin
class HTTPOutput < Output
Fluent::Plugin.register_output('http', self)
class RetryableResponse < StandardError; end
ConnectionCache = Struct.new(:uri, :conn)
helpers :formatter
desc 'The endpoint for HTTP request, e.g. http://example.com/api'
config_param :endpoint, :string
desc 'The method for HTTP request'
config_param :http_method, :enum, list: [:put, :post], default: :post
desc 'The proxy for HTTP request'
config_param :proxy, :string, default: ENV['HTTP_PROXY'] || ENV['http_proxy']
desc 'Content-Type for HTTP request'
config_param :content_type, :string, default: nil
desc 'JSON array data format for HTTP request body'
config_param :json_array, :bool, default: false
desc 'Additional headers for HTTP request'
config_param :headers, :hash, default: nil
desc 'Additional placeholder based headers for HTTP request'
config_param :headers_from_placeholders, :hash, default: nil
desc 'Compress HTTP request body'
config_param :compress, :enum, list: [:text, :gzip], default: :text
desc 'The connection open timeout in seconds'
config_param :open_timeout, :integer, default: nil
desc 'The read timeout in seconds'
config_param :read_timeout, :integer, default: nil
desc 'The TLS timeout in seconds'
config_param :ssl_timeout, :integer, default: nil
desc 'Try to reuse connections'
config_param :reuse_connections, :bool, default: false
desc 'The CA certificate path for TLS'
config_param :tls_ca_cert_path, :string, default: nil
desc 'The client certificate path for TLS'
config_param :tls_client_cert_path, :string, default: nil
desc 'The client private key path for TLS'
config_param :tls_private_key_path, :string, default: nil
desc 'The client private key passphrase for TLS'
config_param :tls_private_key_passphrase, :string, default: nil, secret: true
desc 'The verify mode of TLS'
config_param :tls_verify_mode, :enum, list: [:none, :peer], default: :peer
desc 'The default version of TLS'
config_param :tls_version, :enum, list: Fluent::TLS::SUPPORTED_VERSIONS, default: Fluent::TLS::DEFAULT_VERSION
desc 'The cipher configuration of TLS'
config_param :tls_ciphers, :string, default: Fluent::TLS::CIPHERS_DEFAULT
desc 'Raise UnrecoverableError when the response is non success, 4xx/5xx'
config_param :error_response_as_unrecoverable, :bool, default: true
desc 'The list of retryable response code'
config_param :retryable_response_codes, :array, value_type: :integer, default: nil
config_section :format do
config_set_default :@type, 'json'
end
config_section :auth, required: false, multi: false do
desc 'The method for HTTP authentication'
config_param :method, :enum, list: [:basic, :aws_sigv4], default: :basic
desc 'The username for basic authentication'
config_param :username, :string, default: nil
desc 'The password for basic authentication'
config_param :password, :string, default: nil, secret: true
desc 'The AWS service to authenticate against'
config_param :aws_service, :string, default: nil
desc 'The AWS region to use when authenticating'
config_param :aws_region, :string, default: nil
desc 'The AWS role ARN to assume when authenticating'
config_param :aws_role_arn, :string, default: nil
end
def connection_cache_id_thread_key
"#{plugin_id}_connection_cache_id"
end
def connection_cache_id_for_thread
Thread.current[connection_cache_id_thread_key]
end
def connection_cache_id_for_thread=(id)
Thread.current[connection_cache_id_thread_key] = id
end
def initialize
super
@uri = nil
@proxy_uri = nil
@formatter = nil
@connection_cache = []
@connection_cache_id_mutex = Mutex.new
@connection_cache_next_id = 0
end
def close
super
@connection_cache.each {|entry| entry.conn.finish if entry.conn&.started? }
end
def configure(conf)
super
@connection_cache = Array.new(actual_flush_thread_count, ConnectionCache.new("", nil)) if @reuse_connections
if @retryable_response_codes.nil?
log.warn('Status code 503 is going to be removed from default `retryable_response_codes` from fluentd v2. Please add it by yourself if you wish')
@retryable_response_codes = [503]
end
@http_opt = setup_http_option
@proxy_uri = URI.parse(@proxy) if @proxy
@formatter = formatter_create
@content_type = setup_content_type unless @content_type
if @json_array
if @formatter_configs.first[:@type] != "json"
raise Fluent::ConfigError, "json_array option could be used with json formatter only"
end
define_singleton_method(:format, method(:format_json_array))
end
if @auth and @auth.method == :aws_sigv4
begin
require 'aws-sigv4'
require 'aws-sdk-core'
rescue LoadError
raise Fluent::ConfigError, "The aws-sdk-core and aws-sigv4 gems are required for aws_sigv4 auth. Run: gem install aws-sdk-core -v '~> 3.191'"
end
raise Fluent::ConfigError, "aws_service is required for aws_sigv4 auth" unless @auth.aws_service != nil
raise Fluent::ConfigError, "aws_region is required for aws_sigv4 auth" unless @auth.aws_region != nil
if @auth.aws_role_arn == nil
aws_credentials = Aws::CredentialProviderChain.new.resolve
else
aws_credentials = Aws::AssumeRoleCredentials.new(
client: Aws::STS::Client.new(
region: @auth.aws_region
),
role_arn: @auth.aws_role_arn,
role_session_name: "fluentd"
)
end
@aws_signer = Aws::Sigv4::Signer.new(
service: @auth.aws_service,
region: @auth.aws_region,
credentials_provider: aws_credentials
)
end
end
def multi_workers_ready?
true
end
def formatted_to_msgpack_binary?
@formatter_configs.first[:@type] == 'msgpack'
end
def format(tag, time, record)
@formatter.format(tag, time, record)
end
def format_json_array(tag, time, record)
@formatter.format(tag, time, record) << ","
end
def write(chunk)
uri = parse_endpoint(chunk)
req = create_request(chunk, uri)
log.debug { "#{@http_method.capitalize} data to #{uri.to_s} with chunk(#{dump_unique_id_hex(chunk.unique_id)})" }
send_request(uri, req)
end
private
def setup_content_type
case @formatter_configs.first[:@type]
when 'json'
@json_array ? 'application/json' : 'application/x-ndjson'
when 'csv'
'text/csv'
when 'tsv', 'ltsv'
'text/tab-separated-values'
when 'msgpack'
'application/x-msgpack'
when 'out_file', 'single_value', 'stdout', 'hash'
'text/plain'
else
raise Fluent::ConfigError, "can't determine Content-Type from formatter type. Set content_type parameter explicitly"
end
end
def setup_http_option
use_ssl = @endpoint.start_with?('https')
opt = {
open_timeout: @open_timeout,
read_timeout: @read_timeout,
ssl_timeout: @ssl_timeout,
use_ssl: use_ssl
}
if use_ssl
if @tls_ca_cert_path
raise Fluent::ConfigError, "tls_ca_cert_path is wrong: #{@tls_ca_cert_path}" unless File.file?(@tls_ca_cert_path)
opt[:ca_file] = @tls_ca_cert_path
end
if @tls_client_cert_path
raise Fluent::ConfigError, "tls_client_cert_path is wrong: #{@tls_client_cert_path}" unless File.file?(@tls_client_cert_path)
bundle = File.read(@tls_client_cert_path)
bundle_certs = bundle.scan(/-----BEGIN CERTIFICATE-----(?:.|\n)+?-----END CERTIFICATE-----/)
opt[:cert] = OpenSSL::X509::Certificate.new(bundle_certs[0])
intermediate_certs = bundle_certs[1..-1]
if intermediate_certs
opt[:extra_chain_cert] = intermediate_certs.map { |cert| OpenSSL::X509::Certificate.new(cert) }
end
end
if @tls_private_key_path
raise Fluent::ConfigError, "tls_private_key_path is wrong: #{@tls_private_key_path}" unless File.file?(@tls_private_key_path)
opt[:key] = OpenSSL::PKey.read(File.read(@tls_private_key_path), @tls_private_key_passphrase)
end
opt[:verify_mode] = case @tls_verify_mode
when :none
OpenSSL::SSL::VERIFY_NONE
when :peer
OpenSSL::SSL::VERIFY_PEER
end
opt[:ciphers] = @tls_ciphers
opt = Fluent::TLS.set_version_to_options(opt, @tls_version, nil, nil)
end
opt
end
def parse_endpoint(chunk)
endpoint = extract_placeholders(@endpoint, chunk)
URI.parse(endpoint)
end
def set_headers(req, uri, chunk)
if @headers
@headers.each do |k, v|
req[k] = v
end
end
if @headers_from_placeholders
@headers_from_placeholders.each do |k, v|
req[k] = extract_placeholders(v, chunk)
end
end
if @compress == :gzip
req['Content-Encoding'] = "gzip"
end
req['Content-Type'] = @content_type
end
def set_auth(req, uri)
return unless @auth
if @auth.method == :basic
req.basic_auth(@auth.username, @auth.password)
elsif @auth.method == :aws_sigv4
signature = @aws_signer.sign_request(
http_method: req.method,
url: uri.request_uri,
headers: {
'Content-Type' => @content_type,
'Host' => uri.host
},
body: req.body
)
req.add_field('x-amz-date', signature.headers['x-amz-date'])
req.add_field('x-amz-security-token', signature.headers['x-amz-security-token'])
req.add_field('x-amz-content-sha256', signature.headers['x-amz-content-sha256'])
req.add_field('authorization', signature.headers['authorization'])
end
end
def create_request(chunk, uri)
req = case @http_method
when :post
Net::HTTP::Post.new(uri.request_uri)
when :put
Net::HTTP::Put.new(uri.request_uri)
end
set_headers(req, uri, chunk)
req.body = @json_array ? "[#{chunk.read.chop}]" : chunk.read
if @compress == :gzip
gz = Zlib::GzipWriter.new(StringIO.new)
gz << req.body
req.body = gz.close.string
end
# At least one authentication method requires the body and other headers, so the order of this call matters
set_auth(req, uri)
req
end
def make_request_cached(uri, req)
id = self.connection_cache_id_for_thread
if id.nil?
@connection_cache_id_mutex.synchronize {
id = @connection_cache_next_id
@connection_cache_next_id += 1
}
self.connection_cache_id_for_thread = id
end
uri_str = uri.to_s
if @connection_cache[id].uri != uri_str
@connection_cache[id].conn.finish if @connection_cache[id].conn&.started?
http = if @proxy_uri
Net::HTTP.start(uri.host, uri.port, @proxy_uri.host, @proxy_uri.port, @proxy_uri.user, @proxy_uri.password, @http_opt)
else
Net::HTTP.start(uri.host, uri.port, @http_opt)
end
@connection_cache[id] = ConnectionCache.new(uri_str, http)
end
@connection_cache[id].conn.request(req)
end
def make_request(uri, req, &block)
if @proxy_uri
Net::HTTP.start(uri.host, uri.port, @proxy_uri.host, @proxy_uri.port, @proxy_uri.user, @proxy_uri.password, @http_opt, &block)
else
Net::HTTP.start(uri.host, uri.port, @http_opt, &block)
end
end
def send_request(uri, req)
res = if @reuse_connections
make_request_cached(uri, req)
else
make_request(uri, req) { |http| http.request(req) }
end
if res.is_a?(Net::HTTPSuccess)
log.debug { "#{res.code} #{res.message.rstrip}#{res.body.lstrip}" }
else
msg = "#{res.code} #{res.message.rstrip} #{res.body.lstrip}"
if @retryable_response_codes.include?(res.code.to_i)
raise RetryableResponse, msg
end
if @error_response_as_unrecoverable
raise Fluent::UnrecoverableError, msg
else
log.error "got error response from '#{@http_method.capitalize} #{uri.to_s}' : #{msg}"
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/parser_csv.rb | lib/fluent/plugin/parser_csv.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/parser'
require 'csv'
module Fluent
module Plugin
class CSVParser < Parser
Plugin.register_parser('csv', self)
desc 'Names of fields included in each lines'
config_param :keys, :array, value_type: :string
desc 'The delimiter character (or string) of CSV values'
config_param :delimiter, :string, default: ','
desc 'The parser type used to parse CSV line'
config_param :parser_engine, :enum, list: [:normal, :fast], default: :normal, alias: :parser_type
def configure(conf)
super
if @parser_engine == :fast
@quote_char = '"'
@escape_pattern = Regexp.compile(@quote_char * 2)
m = method(:parse_fast)
self.singleton_class.module_eval do
define_method(:parse, m)
end
end
end
def parse(text, &block)
values = CSV.parse_line(text, col_sep: @delimiter)
r = Hash[@keys.zip(values)]
time, record = convert_values(parse_time(r), r)
yield time, record
end
def parse_fast(text, &block)
r = parse_fast_internal(text)
time, record = convert_values(parse_time(r), r)
yield time, record
end
# CSV.parse_line is too slow due to initialize lots of object and
# CSV module doesn't provide the efficient method for parsing single line.
# This method avoids the overhead of CSV.parse_line for typical patterns
def parse_fast_internal(text)
record = {}
text.chomp!
return record if text.empty?
# use while because while is now faster than each_with_index
columns = text.split(@delimiter, -1)
num_columns = columns.size
i = 0
j = 0
while j < num_columns
column = columns[j]
case column.count(@quote_char)
when 0
if column.empty?
column = nil
end
when 1
if column.start_with?(@quote_char)
to_merge = [column]
j += 1
while j < num_columns
merged_col = columns[j]
to_merge << merged_col
break if merged_col.end_with?(@quote_char)
j += 1
end
column = to_merge.join(@delimiter)[1..-2]
end
when 2
if column.start_with?(@quote_char) && column.end_with?(@quote_char)
column = column[1..-2]
end
else
if column.start_with?(@quote_char) && column.end_with?(@quote_char)
column = column[1..-2]
end
column.gsub!(@escape_pattern, @quote_char)
end
record[@keys[i]] = column
j += 1
i += 1
end
record
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/formatter_msgpack.rb | lib/fluent/plugin/formatter_msgpack.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/formatter'
module Fluent
module Plugin
class MessagePackFormatter < Formatter
Plugin.register_formatter('msgpack', self)
def formatter_type
:binary
end
def format(tag, time, record)
record.to_msgpack
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/in_tail.rb | lib/fluent/plugin/in_tail.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'cool.io'
require 'fluent/plugin/input'
require 'fluent/config/error'
require 'fluent/event'
require 'fluent/plugin/buffer'
require 'fluent/plugin/parser_multiline'
require 'fluent/variable_store'
require 'fluent/capability'
require 'fluent/plugin/in_tail/position_file'
require 'fluent/plugin/in_tail/group_watch'
require 'fluent/file_wrapper'
module Fluent::Plugin
class TailInput < Fluent::Plugin::Input
include GroupWatch
Fluent::Plugin.register_input('tail', self)
helpers :timer, :event_loop, :parser, :compat_parameters
RESERVED_CHARS = ['/', '*', '%'].freeze
MetricsInfo = Struct.new(:opened, :closed, :rotated, :throttled, :tracked)
class WatcherSetupError < StandardError
def initialize(msg)
@message = msg
end
def to_s
@message
end
end
def initialize
super
@paths = []
@tails = {}
@tails_rotate_wait = {}
@pf_file = nil
@pf = nil
@ignore_list = []
@shutdown_start_time = nil
@metrics = nil
@startup = true
@capability = Fluent::Capability.new(:current_process)
end
desc 'The paths to read. Multiple paths can be specified, separated by comma.'
config_param :path, :string
desc 'path delimiter used for splitting path config'
config_param :path_delimiter, :string, default: ','
desc 'Choose using glob patterns. Adding capabilities to handle [] and ?, and {}.'
config_param :glob_policy, :enum, list: [:backward_compatible, :extended, :always], default: :backward_compatible
desc 'The tag of the event.'
config_param :tag, :string
desc 'The paths to exclude the files from watcher list.'
config_param :exclude_path, :array, default: []
desc 'Specify interval to keep reference to old file when rotate a file.'
config_param :rotate_wait, :time, default: 5
desc 'Fluentd will record the position it last read into this file.'
config_param :pos_file, :string, default: nil
desc 'The cleanup interval of pos file'
config_param :pos_file_compaction_interval, :time, default: nil
desc 'Start to read the logs from the head of file, not bottom.'
config_param :read_from_head, :bool, default: false
# When the program deletes log file and re-creates log file with same filename after passed refresh_interval,
# in_tail may raise a pos_file related error. This is a known issue but there is no such program on production.
# If we find such program / application, we will fix the problem.
desc 'The interval of refreshing the list of watch file.'
config_param :refresh_interval, :time, default: 60
desc 'The number of reading lines at each IO.'
config_param :read_lines_limit, :integer, default: 1000
desc 'The number of reading bytes per second'
config_param :read_bytes_limit_per_second, :size, default: -1
desc 'The interval of flushing the buffer for multiline format'
config_param :multiline_flush_interval, :time, default: nil
desc 'Enable the option to emit unmatched lines.'
config_param :emit_unmatched_lines, :bool, default: false
desc 'Enable the additional watch timer.'
config_param :enable_watch_timer, :bool, default: true
desc 'Enable the stat watcher based on inotify.'
config_param :enable_stat_watcher, :bool, default: true
desc 'The encoding of the input.'
config_param :encoding, :string, default: nil
desc "The original encoding of the input. If set, in_tail tries to encode string from this to 'encoding'. Must be set with 'encoding'. "
config_param :from_encoding, :string, default: nil
desc 'Add the log path being tailed to records. Specify the field name to be used.'
config_param :path_key, :string, default: nil
desc 'Open and close the file on every update instead of leaving it open until it gets rotated.'
config_param :open_on_every_update, :bool, default: false
desc 'Limit the watching files that the modification time is within the specified time range (when use \'*\' in path).'
config_param :limit_recently_modified, :time, default: nil
desc 'Enable the option to skip the refresh of watching list on startup.'
config_param :skip_refresh_on_startup, :bool, default: false
desc 'Ignore repeated permission error logs'
config_param :ignore_repeated_permission_error, :bool, default: false
desc 'Format path with the specified timezone'
config_param :path_timezone, :string, default: nil
desc 'Follow inodes instead of following file names. Guarantees more stable delivery and allows to use * in path pattern with rotating files'
config_param :follow_inodes, :bool, default: false
desc 'Maximum length of line. The longer line is just skipped.'
config_param :max_line_size, :size, default: nil
config_section :parse, required: false, multi: true, init: true, param_name: :parser_configs do
config_argument :usage, :string, default: 'in_tail_parser'
end
attr_reader :paths
def configure(conf)
@variable_store = Fluent::VariableStore.fetch_or_build(:in_tail)
compat_parameters_convert(conf, :parser)
parser_config = conf.elements('parse').first
unless parser_config
raise Fluent::ConfigError, "<parse> section is required."
end
(1..Fluent::Plugin::MultilineParser::FORMAT_MAX_NUM).each do |n|
parser_config["format#{n}"] = conf["format#{n}"] if conf["format#{n}"]
end
parser_config['unmatched_lines'] = conf['emit_unmatched_lines']
super
if !@enable_watch_timer && !@enable_stat_watcher
raise Fluent::ConfigError, "either of enable_watch_timer or enable_stat_watcher must be true"
end
if @glob_policy == :always && @path_delimiter == ','
raise Fluent::ConfigError, "cannot use glob_policy as always with the default path_delimiter: `,\""
end
if @glob_policy == :extended && /\{.*,.*\}/.match?(@path) && extended_glob_pattern(@path)
raise Fluent::ConfigError, "cannot include curly braces with glob patterns in `#{@path}\". Use glob_policy always instead."
end
if RESERVED_CHARS.include?(@path_delimiter)
rc = RESERVED_CHARS.join(', ')
raise Fluent::ConfigError, "#{rc} are reserved words: #{@path_delimiter}"
end
@paths = @path.split(@path_delimiter).map(&:strip).uniq
if @paths.empty?
raise Fluent::ConfigError, "tail: 'path' parameter is required on tail input"
end
if @path_timezone
Fluent::Timezone.validate!(@path_timezone)
@path_formatters = @paths.map{|path| [path, Fluent::Timezone.formatter(@path_timezone, path)]}.to_h
@exclude_path_formatters = @exclude_path.map{|path| [path, Fluent::Timezone.formatter(@path_timezone, path)]}.to_h
end
check_dir_permission unless Fluent.windows?
# TODO: Use plugin_root_dir and storage plugin to store positions if available
if @pos_file
if @variable_store.key?(@pos_file) && !called_in_test?
plugin_id_using_this_path = @variable_store[@pos_file]
raise Fluent::ConfigError, "Other 'in_tail' plugin already use same pos_file path: plugin_id = #{plugin_id_using_this_path}, pos_file path = #{@pos_file}"
end
@variable_store[@pos_file] = self.plugin_id
else
if @follow_inodes
raise Fluent::ConfigError, "Can't follow inodes without pos_file configuration parameter"
end
log.warn "'pos_file PATH' parameter is not set to a 'tail' source."
log.warn "this parameter is highly recommended to save the position to resume tailing."
end
configure_tag
configure_encoding
@multiline_mode = parser_config["@type"].include?("multiline")
@receive_handler = if @multiline_mode
method(:parse_multilines)
else
method(:parse_singleline)
end
@file_perm = system_config.file_permission || Fluent::DEFAULT_FILE_PERMISSION
@dir_perm = system_config.dir_permission || Fluent::DEFAULT_DIR_PERMISSION
# parser is already created by parser helper
@parser = parser_create(usage: parser_config['usage'] || @parser_configs.first.usage)
if @read_bytes_limit_per_second > 0
if !@enable_watch_timer
raise Fluent::ConfigError, "Need to enable watch timer when using log throttling feature"
end
min_bytes = TailWatcher::IOHandler::BYTES_TO_READ
if @read_bytes_limit_per_second < min_bytes
log.warn "Should specify greater equal than #{min_bytes}. Use #{min_bytes} for read_bytes_limit_per_second"
@read_bytes_limit_per_second = min_bytes
end
end
opened_file_metrics = metrics_create(namespace: "fluentd", subsystem: "input", name: "files_opened_total", help_text: "Total number of opened files")
closed_file_metrics = metrics_create(namespace: "fluentd", subsystem: "input", name: "files_closed_total", help_text: "Total number of closed files")
rotated_file_metrics = metrics_create(namespace: "fluentd", subsystem: "input", name: "files_rotated_total", help_text: "Total number of rotated files")
throttling_metrics = metrics_create(namespace: "fluentd", subsystem: "input", name: "files_throttled_total", help_text: "Total number of times throttling occurs per file when throttling enabled")
# The metrics for currently tracking files. Since the value may decrease, it cannot be represented using the counter type, so 'prefer_gauge: true' is used instead.
tracked_file_metrics = metrics_create(namespace: "fluentd", subsystem: "input", name: "files_tracked_count", help_text: "Number of tracked files", prefer_gauge: true)
@metrics = MetricsInfo.new(opened_file_metrics, closed_file_metrics, rotated_file_metrics, throttling_metrics, tracked_file_metrics)
end
def check_dir_permission
expand_paths_raw.select { |path|
not File.exist?(path)
}.each { |path|
inaccessible_dir = Pathname.new(File.expand_path(path))
.ascend
.reverse_each
.find { |p| p.directory? && !p.executable? }
if inaccessible_dir
log.warn "Skip #{path} because '#{inaccessible_dir}' lacks execute permission."
end
}
end
def configure_tag
if @tag.index('*')
@tag_prefix, @tag_suffix = @tag.split('*')
@tag_prefix ||= ''
@tag_suffix ||= ''
else
@tag_prefix = nil
@tag_suffix = nil
end
end
def configure_encoding
unless @encoding
if @from_encoding
raise Fluent::ConfigError, "tail: 'from_encoding' parameter must be specified with 'encoding' parameter."
end
end
@encoding = parse_encoding_param(@encoding) if @encoding
@from_encoding = parse_encoding_param(@from_encoding) if @from_encoding
if @encoding && (@encoding == @from_encoding)
log.warn "'encoding' and 'from_encoding' are same encoding. No effect"
end
end
def parse_encoding_param(encoding_name)
begin
Encoding.find(encoding_name) if encoding_name
rescue ArgumentError => e
raise Fluent::ConfigError, e.message
end
end
def start
super
if @pos_file
pos_file_dir = File.dirname(@pos_file)
FileUtils.mkdir_p(pos_file_dir, mode: @dir_perm) unless Dir.exist?(pos_file_dir)
@pf_file = File.open(@pos_file, File::RDWR|File::CREAT|File::BINARY, @file_perm)
@pf_file.sync = true
@pf = PositionFile.load(@pf_file, @follow_inodes, expand_paths, logger: log)
if @pos_file_compaction_interval
timer_execute(:in_tail_refresh_compact_pos_file, @pos_file_compaction_interval) do
log.info('Clean up the pos file')
@pf.try_compact
end
end
end
refresh_watchers unless @skip_refresh_on_startup
timer_execute(:in_tail_refresh_watchers, @refresh_interval, &method(:refresh_watchers))
end
def stop
if @variable_store
@variable_store.delete(@pos_file)
end
super
end
def shutdown
@shutdown_start_time = Fluent::Clock.now
# during shutdown phase, don't close io. It should be done in close after all threads are stopped. See close.
stop_watchers(existence_path, immediate: true, remove_watcher: false)
@tails_rotate_wait.keys.each do |tw|
detach_watcher(tw, @tails_rotate_wait[tw][:ino], false)
end
@pf_file.close if @pf_file
super
end
def close
super
# close file handles after all threads stopped (in #close of thread plugin helper)
# It may be because we need to wait IOHandler.ready_to_shutdown()
close_watcher_handles
end
def have_read_capability?
@capability.have_capability?(:effective, :dac_read_search) ||
@capability.have_capability?(:effective, :dac_override)
end
def extended_glob_pattern(path)
path.include?('*') || path.include?('?') || /\[.*\]/.match?(path)
end
# Curly braces is not supported with default path_delimiter
# because the default delimiter of path is ",".
# This should be collided for wildcard pattern for curly braces and
# be handled as an error on #configure.
def use_glob?(path)
if @glob_policy == :always
# For future extensions, we decided to use `always' term to handle
# regular expressions as much as possible.
# This is because not using `true' as a returning value
# when choosing :always here.
extended_glob_pattern(path) || /\{.*,.*\}/.match?(path)
elsif @glob_policy == :extended
extended_glob_pattern(path)
elsif @glob_policy == :backward_compatible
path.include?('*')
end
end
def expand_paths_raw
date = Fluent::EventTime.now
paths = []
@paths.each { |path|
path = if @path_timezone
@path_formatters[path].call(date)
else
date.to_time.strftime(path)
end
if use_glob?(path)
paths += Dir.glob(path).select { |p|
begin
is_file = !File.directory?(p)
if (File.readable?(p) || have_read_capability?) && is_file
if @limit_recently_modified && File.mtime(p) < (date.to_time - @limit_recently_modified)
false
else
true
end
else
if is_file
unless @ignore_list.include?(p)
log.warn "#{p} unreadable. It is excluded and would be examined next time."
@ignore_list << p if @ignore_repeated_permission_error
end
end
false
end
rescue Errno::ENOENT, Errno::EACCES
log.debug { "#{p} is missing after refresh file list" }
false
end
}
else
# When file is not created yet, Dir.glob returns an empty array. So just add when path is static.
paths << path
end
}
excluded = @exclude_path.map { |path|
path = if @path_timezone
@exclude_path_formatters[path].call(date)
else
date.to_time.strftime(path)
end
use_glob?(path) ? Dir.glob(path) : path
}.flatten.uniq
paths - excluded
end
def expand_paths
# filter out non existing files, so in case pattern is without '*' we don't do unnecessary work
hash = {}
expand_paths_raw.select { |path|
File.exist?(path)
}.each { |path|
# Even we just checked for existence, there is a race condition here as
# of which stat() might fail with ENOENT. See #3224.
begin
target_info = TargetInfo.new(path, Fluent::FileWrapper.stat(path).ino)
if @follow_inodes
hash[target_info.ino] = target_info
else
hash[target_info.path] = target_info
end
rescue Errno::ENOENT, Errno::EACCES => e
log.warn "expand_paths: stat() for #{path} failed with #{e.class.name}. Skip file."
end
}
hash
end
def existence_path
hash = {}
@tails.each {|path, tw|
if @follow_inodes
hash[tw.ino] = TargetInfo.new(tw.path, tw.ino)
else
hash[tw.path] = TargetInfo.new(tw.path, tw.ino)
end
}
hash
end
# in_tail with '*' path doesn't check rotation file equality at refresh phase.
# So you should not use '*' path when your logs will be rotated by another tool.
# It will cause log duplication after updated watch files.
# In such case, you should separate log directory and specify two paths in path parameter.
# e.g. path /path/to/dir/*,/path/to/rotated_logs/target_file
def refresh_watchers
target_paths_hash = expand_paths
existence_paths_hash = existence_path
log.debug {
target_paths_str = target_paths_hash.collect { |key, target_info| target_info.path }.join(",")
existence_paths_str = existence_paths_hash.collect { |key, target_info| target_info.path }.join(",")
"tailing paths: target = #{target_paths_str} | existing = #{existence_paths_str}"
}
if !@follow_inodes
need_unwatch_in_stop_watchers = true
else
# When using @follow_inodes, need this to unwatch the rotated old inode when it disappears.
# After `update_watcher` detaches an old TailWatcher, the inode is lost from the `@tails`.
# So that inode can't be contained in `removed_hash`, and can't be unwatched by `stop_watchers`.
#
# This logic may work for `@follow_inodes false` too.
# Just limiting the case to suppress the impact to existing logics.
@pf&.unwatch_removed_targets(target_paths_hash)
need_unwatch_in_stop_watchers = false
end
removed_hash = existence_paths_hash.reject {|key, value| target_paths_hash.key?(key)}
added_hash = target_paths_hash.reject {|key, value| existence_paths_hash.key?(key)}
# If an existing TailWatcher already follows a target path with the different inode,
# it means that the TailWatcher following the rotated file still exists. In this case,
# `refresh_watcher` can't start the new TailWatcher for the new current file. So, we
# should output a warning log in order to prevent silent collection stops.
# (Such as https://github.com/fluent/fluentd/pull/4327)
# (Usually, such a TailWatcher should be removed from `@tails` in `update_watcher`.)
# (The similar warning may work for `@follow_inodes true` too. Just limiting the case
# to suppress the impact to existing logics.)
unless @follow_inodes
target_paths_hash.each do |path, target|
next unless @tails.key?(path)
# We can't use `existence_paths_hash[path].ino` because it is from `TailWatcher.ino`,
# which is very unstable parameter. (It can be `nil` or old).
# So, we need to use `TailWatcher.pe.read_inode`.
existing_watcher_inode = @tails[path].pe.read_inode
if existing_watcher_inode != target.ino
log.warn "Could not follow a file (inode: #{target.ino}) because an existing watcher for that filepath follows a different inode: #{existing_watcher_inode} (e.g. keeps watching a already rotated file). If you keep getting this message, please restart Fluentd.",
filepath: target.path
end
end
end
stop_watchers(removed_hash, unwatched: need_unwatch_in_stop_watchers) unless removed_hash.empty?
start_watchers(added_hash) unless added_hash.empty?
@metrics.tracked.set(@tails.size)
@startup = false if @startup
end
def setup_watcher(target_info, pe)
line_buffer_timer_flusher = @multiline_mode ? TailWatcher::LineBufferTimerFlusher.new(log, @multiline_flush_interval, &method(:flush_buffer)) : nil
read_from_head = !@startup || @read_from_head
tw = TailWatcher.new(target_info, pe, log, read_from_head, @follow_inodes, method(:update_watcher), line_buffer_timer_flusher, method(:io_handler), @metrics)
if @enable_watch_timer
tt = TimerTrigger.new(1, log) { tw.on_notify }
tw.register_watcher(tt)
end
if @enable_stat_watcher
tt = StatWatcher.new(target_info.path, log) { tw.on_notify }
tw.register_watcher(tt)
end
tw.watchers.each do |watcher|
event_loop_attach(watcher)
end
tw.group_watcher = add_path_to_group_watcher(target_info.path)
tw
rescue => e
if tw
tw.watchers.each do |watcher|
event_loop_detach(watcher)
end
tw.detach(@shutdown_start_time)
tw.close
end
raise e
end
def construct_watcher(target_info)
path = target_info.path
# The file might be rotated or removed after collecting paths, so check inode again here.
begin
target_info.ino = Fluent::FileWrapper.stat(path).ino
rescue Errno::ENOENT, Errno::EACCES
log.warn "stat() for #{path} failed. Continuing without tailing it."
return
end
pe = nil
if @pf
pe = @pf[target_info]
pe.update(target_info.ino, 0) if @read_from_head && pe.read_inode.zero?
end
begin
tw = setup_watcher(target_info, pe)
rescue WatcherSetupError => e
log.warn "Skip #{path} because unexpected setup error happens: #{e}"
return
end
@tails[path] = tw
tw.on_notify
end
def start_watchers(targets_info)
targets_info.each_value {|target_info|
construct_watcher(target_info)
break if before_shutdown?
}
end
def stop_watchers(targets_info, immediate: false, unwatched: false, remove_watcher: true)
targets_info.each_value { |target_info|
remove_path_from_group_watcher(target_info.path)
if remove_watcher
tw = @tails.delete(target_info.path)
else
tw = @tails[target_info.path]
end
if tw
tw.unwatched = unwatched
if immediate
detach_watcher(tw, target_info.ino, false)
else
detach_watcher_after_rotate_wait(tw, target_info.ino)
end
end
}
end
def close_watcher_handles
@tails.keys.each do |path|
tw = @tails.delete(path)
if tw
tw.close
end
end
@tails_rotate_wait.keys.each do |tw|
tw.close
end
end
# refresh_watchers calls @tails.keys so we don't use stop_watcher -> start_watcher sequence for safety.
def update_watcher(tail_watcher, pe, new_inode)
# TODO we should use another callback for this.
# To suppress impact to existing logics, limit the case to `@follow_inodes`.
# We may not need `@follow_inodes` condition.
if @follow_inodes && new_inode.nil?
# nil inode means the file disappeared, so we only need to stop it.
@tails.delete(tail_watcher.path)
# https://github.com/fluent/fluentd/pull/4237#issuecomment-1633358632
# Because of this problem, log duplication can occur during `rotate_wait`.
# Need to set `rotate_wait 0` for a workaround.
# Duplication will occur if `refresh_watcher` is called during the `rotate_wait`.
# In that case, `refresh_watcher` will add the new TailWatcher to tail the same target,
# and it causes the log duplication.
# (Other `detach_watcher_after_rotate_wait` may have the same problem.
# We need the mechanism not to add duplicated TailWatcher with detaching TailWatcher.)
detach_watcher_after_rotate_wait(tail_watcher, pe.read_inode)
return
end
path = tail_watcher.path
log.info("detected rotation of #{path}; waiting #{@rotate_wait} seconds")
if @pf
pe_inode = pe.read_inode
target_info_from_position_entry = TargetInfo.new(path, pe_inode)
unless pe_inode == @pf[target_info_from_position_entry].read_inode
log.warn "Skip update_watcher because watcher has been already updated by other inotify event",
path: path, inode: pe.read_inode, inode_in_pos_file: @pf[target_info_from_position_entry].read_inode
return
end
end
new_target_info = TargetInfo.new(path, new_inode)
if @follow_inodes
new_position_entry = @pf[new_target_info]
# If `refresh_watcher` find the new file before, this will not be zero.
# In this case, only we have to do is detaching the current tail_watcher.
if new_position_entry.read_inode == 0
@tails[path] = setup_watcher(new_target_info, new_position_entry)
@tails[path].on_notify
end
else
@tails[path] = setup_watcher(new_target_info, pe)
@tails[path].on_notify
end
detach_watcher_after_rotate_wait(tail_watcher, pe.read_inode)
end
def detach_watcher(tw, ino, close_io = true)
if @follow_inodes && tw.ino != ino
log.warn("detach_watcher could be detaching an unexpected tail_watcher with a different ino.",
path: tw.path, actual_ino_in_tw: tw.ino, expect_ino_to_close: ino)
end
tw.watchers.each do |watcher|
event_loop_detach(watcher)
end
tw.detach(@shutdown_start_time)
tw.close if close_io
if @pf && tw.unwatched && (@follow_inode || !@tails[tw.path])
target_info = TargetInfo.new(tw.path, ino)
@pf.unwatch(target_info)
end
end
def throttling_is_enabled?(tw)
return true if @read_bytes_limit_per_second > 0
return true if tw.group_watcher && tw.group_watcher.limit >= 0
false
end
def detach_watcher_after_rotate_wait(tw, ino)
# Call event_loop_attach/event_loop_detach is high-cost for short-live object.
# If this has a problem with large number of files, use @_event_loop directly instead of timer_execute.
if @open_on_every_update
# Detach now because it's already closed, waiting it doesn't make sense.
detach_watcher(tw, ino)
end
return if @tails_rotate_wait[tw]
if throttling_is_enabled?(tw)
# When the throttling feature is enabled, it might not reach EOF yet.
# Should ensure to read all contents before closing it, with keeping throttling.
start_time_to_wait = Fluent::Clock.now
timer = timer_execute(:in_tail_close_watcher, 1, repeat: true) do
elapsed = Fluent::Clock.now - start_time_to_wait
if tw.eof? && elapsed >= @rotate_wait
timer.detach
@tails_rotate_wait.delete(tw)
detach_watcher(tw, ino)
end
end
@tails_rotate_wait[tw] = { ino: ino, timer: timer }
else
# when the throttling feature isn't enabled, just wait @rotate_wait
timer = timer_execute(:in_tail_close_watcher, @rotate_wait, repeat: false) do
@tails_rotate_wait.delete(tw)
detach_watcher(tw, ino)
end
@tails_rotate_wait[tw] = { ino: ino, timer: timer }
end
end
def flush_buffer(tw, buf)
buf.chomp!
@parser.parse(buf) { |time, record|
if time && record
tag = if @tag_prefix || @tag_suffix
@tag_prefix + tw.tag + @tag_suffix
else
@tag
end
record[@path_key] ||= tw.path unless @path_key.nil?
router.emit(tag, time, record)
else
if @emit_unmatched_lines
record = { 'unmatched_line' => buf }
record[@path_key] ||= tail_watcher.path unless @path_key.nil?
tag = if @tag_prefix || @tag_suffix
@tag_prefix + tw.tag + @tag_suffix
else
@tag
end
router.emit(tag, Fluent::EventTime.now, record)
end
log.warn "got incomplete line at shutdown from #{tw.path}: #{buf.inspect}"
end
}
end
# @return true if no error or unrecoverable error happens in emit action. false if got BufferOverflowError
def receive_lines(lines, tail_watcher)
es = @receive_handler.call(lines, tail_watcher)
unless es.empty?
tag = if @tag_prefix || @tag_suffix
@tag_prefix + tail_watcher.tag + @tag_suffix
else
@tag
end
begin
router.emit_stream(tag, es)
rescue Fluent::Plugin::Buffer::BufferOverflowError
return false
rescue
# ignore non BufferQueueLimitError errors because in_tail can't recover. Engine shows logs and backtraces.
return true
end
end
return true
end
def convert_line_to_event(line, es, tail_watcher)
begin
line.chomp! # remove \n
@parser.parse(line) { |time, record|
if time && record
record[@path_key] ||= tail_watcher.path unless @path_key.nil?
es.add(time, record)
else
if @emit_unmatched_lines
record = {'unmatched_line' => line}
record[@path_key] ||= tail_watcher.path unless @path_key.nil?
es.add(Fluent::EventTime.now, record)
end
log.warn { "pattern not matched: #{line.inspect}" }
end
}
rescue => e
log.warn 'invalid line found', file: tail_watcher.path, line: line, error: e.to_s
log.debug_backtrace(e.backtrace)
end
end
def parse_singleline(lines, tail_watcher)
es = Fluent::MultiEventStream.new
lines.each { |line|
convert_line_to_event(line, es, tail_watcher)
}
es
end
# No need to check if line_buffer_timer_flusher is nil, since line_buffer_timer_flusher should exist
def parse_multilines(lines, tail_watcher)
lb = tail_watcher.line_buffer_timer_flusher.line_buffer
es = Fluent::MultiEventStream.new
if @parser.has_firstline?
tail_watcher.line_buffer_timer_flusher.reset_timer
lines.each { |line|
if @parser.firstline?(line)
if lb
convert_line_to_event(lb, es, tail_watcher)
end
lb = line
else
if lb.nil?
if @emit_unmatched_lines
convert_line_to_event(line, es, tail_watcher)
end
log.warn "got incomplete line before first line from #{tail_watcher.path}: #{line.inspect}"
else
lb << line
end
end
}
else
lb ||= ''
lines.each do |line|
lb << line
@parser.parse(lb) { |time, record|
if time && record
convert_line_to_event(lb, es, tail_watcher)
lb = ''
end
}
end
end
tail_watcher.line_buffer_timer_flusher.line_buffer = lb
es
end
def statistics
stats = super
stats = {
'input' => stats["input"].merge({
'opened_file_count' => @metrics.opened.get,
'closed_file_count' => @metrics.closed.get,
'rotated_file_count' => @metrics.rotated.get,
'throttled_log_count' => @metrics.throttled.get,
'tracked_file_count' => @metrics.tracked.get,
})
}
stats
end
private
def io_handler(watcher, path)
opts = {
path: path,
log: log,
read_lines_limit: @read_lines_limit,
read_bytes_limit_per_second: @read_bytes_limit_per_second,
open_on_every_update: @open_on_every_update,
metrics: @metrics,
max_line_size: @max_line_size,
}
unless @encoding.nil?
if @from_encoding.nil?
opts[:encoding] = @encoding
else
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | true |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/filter.rb | lib/fluent/plugin/filter.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/base'
require 'fluent/event'
require 'fluent/log'
require 'fluent/plugin_id'
require 'fluent/plugin_helper'
module Fluent
module Plugin
class Filter < Base
include PluginId
include PluginLoggerMixin
include PluginHelper::Mixin
helpers_internal :event_emitter, :metrics
attr_reader :has_filter_with_time
def initialize
super
@has_filter_with_time = has_filter_with_time?
@emit_records_metrics = nil
@emit_size_metrics = nil
@counter_mutex = Mutex.new
@enable_size_metrics = false
end
def configure(conf)
super
@emit_records_metrics = metrics_create(namespace: "fluentd", subsystem: "filter", name: "emit_records", help_text: "Number of count emit records")
@emit_size_metrics = metrics_create(namespace: "fluentd", subsystem: "filter", name: "emit_size", help_text: "Total size of emit events")
@enable_size_metrics = !!system_config.enable_size_metrics
end
def statistics
stats = {
'emit_records' => @emit_records_metrics.get,
'emit_size' => @emit_size_metrics.get,
}
{ 'filter' => stats }
end
def measure_metrics(es)
@emit_records_metrics.add(es.size)
@emit_size_metrics.add(es.to_msgpack_stream.bytesize) if @enable_size_metrics
end
def filter(tag, time, record)
raise NotImplementedError, "BUG: filter plugins MUST implement this method"
end
def filter_with_time(tag, time, record)
raise NotImplementedError, "BUG: filter plugins MUST implement this method"
end
def filter_stream(tag, es)
new_es = MultiEventStream.new
if @has_filter_with_time
es.each do |time, record|
begin
filtered_time, filtered_record = filter_with_time(tag, time, record)
new_es.add(filtered_time, filtered_record) if filtered_time && filtered_record
rescue => e
router.emit_error_event(tag, time, record, e)
end
end
else
es.each do |time, record|
begin
filtered_record = filter(tag, time, record)
new_es.add(time, filtered_record) if filtered_record
rescue => e
router.emit_error_event(tag, time, record, e)
end
end
end
new_es
end
private
def has_filter_with_time?
implmented_methods = self.class.instance_methods(false)
# Plugins that override `filter_stream` don't need check,
# because they may not call `filter` or `filter_with_time`
# for example fluentd/lib/fluent/plugin/filter_record_transformer.rb
return nil if implmented_methods.include?(:filter_stream)
case
when [:filter, :filter_with_time].all? { |e| implmented_methods.include?(e) }
raise "BUG: Filter plugins MUST implement either `filter` or `filter_with_time`"
when implmented_methods.include?(:filter)
false
when implmented_methods.include?(:filter_with_time)
true
else
raise NotImplementedError, "BUG: Filter plugins MUST implement either `filter` or `filter_with_time`"
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/output.rb | lib/fluent/plugin/output.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/env'
require 'fluent/error'
require 'fluent/plugin/base'
require 'fluent/plugin/buffer'
require 'fluent/plugin_helper/record_accessor'
require 'fluent/msgpack_factory'
require 'fluent/log'
require 'fluent/plugin_id'
require 'fluent/plugin_helper'
require 'fluent/timezone'
require 'fluent/unique_id'
require 'fluent/clock'
require 'fluent/ext_monitor_require'
require 'time'
module Fluent
module Plugin
class Output < Base
include PluginId
include PluginLoggerMixin
include PluginHelper::Mixin
include UniqueId::Mixin
helpers_internal :thread, :retry_state, :metrics
CHUNK_KEY_PATTERN = /^[-_.@a-zA-Z0-9]+$/
CHUNK_KEY_PLACEHOLDER_PATTERN = /\$\{([-_.@$a-zA-Z0-9]+)\}/
CHUNK_TAG_PLACEHOLDER_PATTERN = /\$\{(tag(?:\[-?\d+\])?)\}/
CHUNK_ID_PLACEHOLDER_PATTERN = /\$\{chunk_id\}/
CHUNKING_FIELD_WARN_NUM = 4
config_param :time_as_integer, :bool, default: false
desc 'The threshold to show slow flush logs'
config_param :slow_flush_log_threshold, :float, default: 20.0
# `<buffer>` and `<secondary>` sections are available only when '#format' and '#write' are implemented
config_section :buffer, param_name: :buffer_config, init: true, required: false, multi: false, final: true do
config_argument :chunk_keys, :array, value_type: :string, default: []
config_param :@type, :string, default: 'memory', alias: :type
config_param :timekey, :time, default: nil # range size to be used: `time.to_i / @timekey`
config_param :timekey_wait, :time, default: 600
# These are for #extract_placeholders
config_param :timekey_use_utc, :bool, default: false # default is localtime
config_param :timekey_zone, :string, default: Time.now.strftime('%z') # e.g., "-0700" or "Asia/Tokyo"
desc 'If true, plugin will try to flush buffer just before shutdown.'
config_param :flush_at_shutdown, :bool, default: nil # change default by buffer_plugin.persistent?
desc 'How to enqueue chunks to be flushed. "interval" flushes per flush_interval, "immediate" flushes just after event arrival.'
config_param :flush_mode, :enum, list: [:default, :lazy, :interval, :immediate], default: :default
config_param :flush_interval, :time, default: 60, desc: 'The interval between buffer chunk flushes.'
config_param :flush_thread_count, :integer, default: 1, desc: 'The number of threads to flush the buffer.'
config_param :flush_thread_interval, :float, default: 1.0, desc: 'Seconds to sleep between checks for buffer flushes in flush threads.'
config_param :flush_thread_burst_interval, :float, default: 1.0, desc: 'Seconds to sleep between flushes when many buffer chunks are queued.'
config_param :delayed_commit_timeout, :time, default: 60, desc: 'Seconds of timeout for buffer chunks to be committed by plugins later.'
config_param :overflow_action, :enum, list: [:throw_exception, :block, :drop_oldest_chunk], default: :throw_exception, desc: 'The action when the size of buffer exceeds the limit.'
config_param :retry_forever, :bool, default: false, desc: 'If true, plugin will ignore retry_timeout and retry_max_times options and retry flushing forever.'
config_param :retry_timeout, :time, default: 72 * 60 * 60, desc: 'The maximum seconds to retry to flush while failing, until plugin discards buffer chunks.'
# 72hours == 17 times with exponential backoff (not to change default behavior)
config_param :retry_max_times, :integer, default: nil, desc: 'The maximum number of times to retry to flush while failing.'
config_param :retry_secondary_threshold, :float, default: 0.8, desc: 'ratio of retry_timeout to switch to use secondary while failing.'
# exponential backoff sequence will be initialized at the time of this threshold
desc 'How to wait next retry to flush buffer.'
config_param :retry_type, :enum, list: [:exponential_backoff, :periodic], default: :exponential_backoff
### Periodic -> fixed :retry_wait
### Exponential backoff: k is number of retry times
# c: constant factor, @retry_wait
# b: base factor, @retry_exponential_backoff_base
# k: times
# total retry time: c + c * b^1 + (...) + c*b^k = c*b^(k+1) - 1
config_param :retry_wait, :time, default: 1, desc: 'Seconds to wait before next retry to flush, or constant factor of exponential backoff.'
config_param :retry_exponential_backoff_base, :float, default: 2, desc: 'The base number of exponential backoff for retries.'
config_param :retry_max_interval, :time, default: nil, desc: 'The maximum interval seconds for exponential backoff between retries while failing.'
config_param :retry_randomize, :bool, default: true, desc: 'If true, output plugin will retry after randomized interval not to do burst retries.'
end
config_section :secondary, param_name: :secondary_config, required: false, multi: false, final: true do
config_param :@type, :string, default: nil, alias: :type
config_section :buffer, required: false, multi: false do
# dummy to detect invalid specification for here
end
config_section :secondary, required: false, multi: false do
# dummy to detect invalid specification for here
end
end
def process(tag, es)
raise NotImplementedError, "BUG: output plugins MUST implement this method"
end
def write(chunk)
raise NotImplementedError, "BUG: output plugins MUST implement this method"
end
def try_write(chunk)
raise NotImplementedError, "BUG: output plugins MUST implement this method"
end
def format(tag, time, record)
# standard msgpack_event_stream chunk will be used if this method is not implemented in plugin subclass
raise NotImplementedError, "BUG: output plugins MUST implement this method"
end
def formatted_to_msgpack_binary?
# To indicate custom format method (#format) returns msgpack binary or not.
# If #format returns msgpack binary, override this method to return true.
false
end
# Compatibility for existing plugins
def formatted_to_msgpack_binary
formatted_to_msgpack_binary?
end
def prefer_buffered_processing
# override this method to return false only when all of these are true:
# * plugin has both implementation for buffered and non-buffered methods
# * plugin is expected to work as non-buffered plugin if no `<buffer>` sections specified
true
end
def prefer_delayed_commit
# override this method to decide which is used of `write` or `try_write` if both are implemented
true
end
def multi_workers_ready?
false
end
# Internal states
FlushThreadState = Struct.new(:thread, :next_clock, :mutex, :cond_var)
DequeuedChunkInfo = Struct.new(:chunk_id, :time, :timeout) do
def expired?
time + timeout < Time.now
end
end
attr_reader :as_secondary, :delayed_commit, :delayed_commit_timeout, :timekey_zone
# for tests
attr_reader :buffer, :retry, :secondary, :chunk_keys, :chunk_key_accessors, :chunk_key_time, :chunk_key_tag
attr_accessor :output_enqueue_thread_waiting, :dequeued_chunks, :dequeued_chunks_mutex
# output_enqueue_thread_waiting: for test of output.rb itself
attr_accessor :retry_for_error_chunk # if true, error flush will be retried even if under_plugin_development is true
def initialize
super
@counter_mutex = Mutex.new
@flush_thread_mutex = Mutex.new
@buffering = false
@delayed_commit = false
@as_secondary = false
@primary_instance = nil
# TODO: well organized counters
@num_errors_metrics = nil
@emit_count_metrics = nil
@emit_records_metrics = nil
@emit_size_metrics = nil
@write_count_metrics = nil
@write_secondary_count_metrics = nil
@rollback_count_metrics = nil
@flush_time_count_metrics = nil
@slow_flush_count_metrics = nil
@drop_oldest_chunk_count_metrics = nil
@enable_size_metrics = false
# How to process events is decided here at once, but it will be decided in delayed way on #configure & #start
if implement?(:synchronous)
if implement?(:buffered) || implement?(:delayed_commit)
@buffering = nil # do #configure or #start to determine this for full-featured plugins
else
@buffering = false
end
else
@buffering = true
end
@custom_format = implement?(:custom_format)
@enable_msgpack_streamer = false # decided later
@buffer = nil
@secondary = nil
@retry = nil
@dequeued_chunks = nil
@dequeued_chunks_mutex = nil
@output_enqueue_thread = nil
@output_flush_threads = nil
@output_flush_thread_current_position = 0
@simple_chunking = nil
@chunk_keys = @chunk_key_accessors = @chunk_key_time = @chunk_key_tag = nil
@flush_mode = nil
@timekey_zone = nil
@retry_for_error_chunk = false
end
def acts_as_secondary(primary)
@as_secondary = true
@primary_instance = primary
@chunk_keys = @primary_instance.chunk_keys || []
@chunk_key_tag = @primary_instance.chunk_key_tag || false
if @primary_instance.chunk_key_time
@chunk_key_time = @primary_instance.chunk_key_time
@timekey_zone = @primary_instance.timekey_zone
@output_time_formatter_cache = {}
end
self.context_router = primary.context_router
singleton_class.module_eval do
define_method(:commit_write){ |chunk_id| @primary_instance.commit_write(chunk_id, delayed: delayed_commit, secondary: true) }
define_method(:rollback_write){ |chunk_id, update_retry: true| @primary_instance.rollback_write(chunk_id, update_retry) }
end
end
def configure(conf)
unless implement?(:synchronous) || implement?(:buffered) || implement?(:delayed_commit)
raise "BUG: output plugin must implement some methods. see developer documents."
end
has_buffer_section = (conf.elements(name: 'buffer').size > 0)
has_flush_interval = conf.has_key?('flush_interval')
super
@num_errors_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "num_errors", help_text: "Number of count num errors")
@emit_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "emit_count", help_text: "Number of count emits")
@emit_records_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "emit_records", help_text: "Number of emit records")
@emit_size_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "emit_size", help_text: "Total size of emit events")
@write_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "write_count", help_text: "Number of writing events")
@write_secondary_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "write_secondary_count", help_text: "Number of writing events in secondary")
@rollback_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "rollback_count", help_text: "Number of rollbacking operations")
@flush_time_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "flush_time_count", help_text: "Count of flush time")
@slow_flush_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "slow_flush_count", help_text: "Count of slow flush occurred time(s)")
@drop_oldest_chunk_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "drop_oldest_chunk_count", help_text: "Number of count that old chunk were discarded with drop_oldest_chunk")
if has_buffer_section
unless implement?(:buffered) || implement?(:delayed_commit)
raise Fluent::ConfigError, "<buffer> section is configured, but plugin '#{self.class}' doesn't support buffering"
end
@buffering = true
else # no buffer sections
if implement?(:synchronous)
if !implement?(:buffered) && !implement?(:delayed_commit)
if @as_secondary
raise Fluent::ConfigError, "secondary plugin '#{self.class}' must support buffering, but doesn't."
end
@buffering = false
else
if @as_secondary
# secondary plugin always works as buffered plugin without buffer instance
@buffering = true
else
# @buffering.nil? shows that enabling buffering or not will be decided in lazy way in #start
@buffering = nil
end
end
else # buffered or delayed_commit is supported by `unless` of first line in this method
@buffering = true
end
end
# Enable to update record size metrics or not
@enable_size_metrics = !!system_config.enable_size_metrics
if @as_secondary
if !@buffering && !@buffering.nil?
raise Fluent::ConfigError, "secondary plugin '#{self.class}' must support buffering, but doesn't"
end
end
if (@buffering || @buffering.nil?) && !@as_secondary
# When @buffering.nil?, @buffer_config was initialized with default value for all parameters.
# If so, this configuration MUST success.
@chunk_keys = @buffer_config.chunk_keys.dup
@chunk_key_time = !!@chunk_keys.delete('time')
@chunk_key_tag = !!@chunk_keys.delete('tag')
if @chunk_keys.any? { |key|
begin
k = Fluent::PluginHelper::RecordAccessor::Accessor.parse_parameter(key)
if k.is_a?(String)
k !~ CHUNK_KEY_PATTERN
else
if key.start_with?('$[')
raise Fluent::ConfigError, "in chunk_keys: bracket notation is not allowed"
else
false
end
end
rescue => e
raise Fluent::ConfigError, "in chunk_keys: #{e.message}"
end
}
raise Fluent::ConfigError, "chunk_keys specification includes invalid char"
else
@chunk_key_accessors = Hash[@chunk_keys.map { |key| [key.to_sym, Fluent::PluginHelper::RecordAccessor::Accessor.new(key)] }]
end
if @chunk_key_time
raise Fluent::ConfigError, "<buffer ...> argument includes 'time', but timekey is not configured" unless @buffer_config.timekey
Fluent::Timezone.validate!(@buffer_config.timekey_zone)
@timekey_zone = @buffer_config.timekey_use_utc ? '+0000' : @buffer_config.timekey_zone
@timekey = @buffer_config.timekey
if @timekey <= 0
raise Fluent::ConfigError, "timekey should be greater than 0. current timekey: #{@timekey}"
end
@timekey_use_utc = @buffer_config.timekey_use_utc
@offset = Fluent::Timezone.utc_offset(@timekey_zone)
@calculate_offset = @offset.respond_to?(:call) ? @offset : nil
@output_time_formatter_cache = {}
end
if (@chunk_key_tag ? 1 : 0) + @chunk_keys.size >= CHUNKING_FIELD_WARN_NUM
log.warn "many chunk keys specified, and it may cause too many chunks on your system."
end
# no chunk keys or only tags (chunking can be done without iterating event stream)
@simple_chunking = !@chunk_key_time && @chunk_keys.empty?
@flush_mode = @buffer_config.flush_mode
if @flush_mode == :default
if has_flush_interval
log.info "'flush_interval' is configured at out side of <buffer>. 'flush_mode' is set to 'interval' to keep existing behaviour"
@flush_mode = :interval
else
@flush_mode = (@chunk_key_time ? :lazy : :interval)
end
end
buffer_type = @buffer_config[:@type]
buffer_conf = conf.elements(name: 'buffer').first || Fluent::Config::Element.new('buffer', '', {}, [])
@buffer = Plugin.new_buffer(buffer_type, parent: self)
@buffer.configure(buffer_conf)
keep_buffer_config_compat
@buffer.enable_update_timekeys if @chunk_key_time
@flush_at_shutdown = @buffer_config.flush_at_shutdown
if @flush_at_shutdown.nil?
@flush_at_shutdown = if @buffer.persistent?
false
else
true # flush_at_shutdown is true in default for on-memory buffer
end
elsif !@flush_at_shutdown && !@buffer.persistent?
buf_type = Plugin.lookup_type_from_class(@buffer.class)
log.warn "'flush_at_shutdown' is false, and buffer plugin '#{buf_type}' is not persistent buffer."
log.warn "your configuration will lose buffered data at shutdown. please confirm your configuration again."
end
if (@flush_mode != :interval) && buffer_conf.has_key?('flush_interval')
if buffer_conf.has_key?('flush_mode')
raise Fluent::ConfigError, "'flush_interval' can't be specified when 'flush_mode' is not 'interval' explicitly: '#{@flush_mode}'"
else
log.warn "'flush_interval' is ignored because default 'flush_mode' is not 'interval': '#{@flush_mode}'"
end
end
if @buffer.queued_chunks_limit_size.nil?
@buffer.queued_chunks_limit_size = @buffer_config.flush_thread_count
end
end
if @secondary_config
raise Fluent::ConfigError, "Invalid <secondary> section for non-buffered plugin" unless @buffering
raise Fluent::ConfigError, "<secondary> section cannot have <buffer> section" if @secondary_config.buffer
raise Fluent::ConfigError, "<secondary> section cannot have <secondary> section" if @secondary_config.secondary
if @buffer_config.retry_forever
log.warn "<secondary> with 'retry_forever', only unrecoverable errors are moved to secondary"
end
secondary_type = @secondary_config[:@type]
unless secondary_type
secondary_type = conf['@type'] # primary plugin type
end
secondary_conf = conf.elements(name: 'secondary').first
@secondary = Plugin.new_output(secondary_type)
unless @secondary.respond_to?(:acts_as_secondary)
raise Fluent::ConfigError, "Failed to setup secondary plugin in '#{conf['@type']}'. '#{secondary_type}' plugin in not allowed due to non buffered output"
end
@secondary.acts_as_secondary(self)
@secondary.configure(secondary_conf)
if (@secondary.class.to_s != "Fluent::Plugin::SecondaryFileOutput") &&
(self.class != @secondary.class) &&
(@custom_format || @secondary.implement?(:custom_format))
log.warn "Use different plugin for secondary. Check the plugin works with primary like secondary_file", primary: self.class.to_s, secondary: @secondary.class.to_s
end
else
@secondary = nil
end
self
end
def keep_buffer_config_compat
# Need this to call `@buffer_config.disable_chunk_backup` just as before,
# since some plugins may use this option in this way.
@buffer_config[:disable_chunk_backup] = @buffer.disable_chunk_backup
end
def start
super
if @buffering.nil?
@buffering = prefer_buffered_processing
if !@buffering && @buffer
@buffer.terminate # it's not started, so terminate will be enough
# At here, this plugin works as non-buffered plugin.
# Un-assign @buffer not to show buffering metrics (e.g., in_monitor_agent)
@buffer = nil
end
end
if @buffering
m = method(:emit_buffered)
singleton_class.module_eval do
define_method(:emit_events, m)
end
@custom_format = implement?(:custom_format)
@enable_msgpack_streamer = @custom_format ? formatted_to_msgpack_binary : true
@delayed_commit = if implement?(:buffered) && implement?(:delayed_commit)
prefer_delayed_commit
else
implement?(:delayed_commit)
end
@delayed_commit_timeout = @buffer_config.delayed_commit_timeout
else # !@buffering
m = method(:emit_sync)
singleton_class.module_eval do
define_method(:emit_events, m)
end
end
if @buffering && !@as_secondary
@retry = nil
@retry_mutex = Mutex.new
@buffer.start
@output_enqueue_thread = nil
@output_enqueue_thread_running = true
@output_flush_threads = []
@output_flush_threads_mutex = Mutex.new
@output_flush_threads_running = true
# mainly for test: detect enqueue works as code below:
# @output.interrupt_flushes
# # emits
# @output.enqueue_thread_wait
@output_flush_interrupted = false
@output_enqueue_thread_mutex = Mutex.new
@output_enqueue_thread_waiting = false
@dequeued_chunks = []
@dequeued_chunks_mutex = Mutex.new
@output_flush_thread_current_position = 0
@buffer_config.flush_thread_count.times do |i|
thread_title = "flush_thread_#{i}".to_sym
thread_state = FlushThreadState.new(nil, nil, Mutex.new, ConditionVariable.new)
thread = thread_create(thread_title) do
flush_thread_run(thread_state)
end
thread_state.thread = thread
@output_flush_threads_mutex.synchronize do
@output_flush_threads << thread_state
end
end
if !@under_plugin_development && (@flush_mode == :interval || @chunk_key_time)
@output_enqueue_thread = thread_create(:enqueue_thread, &method(:enqueue_thread_run))
end
end
@secondary.start if @secondary
end
def after_start
super
@secondary.after_start if @secondary
end
def stop
@secondary.stop if @secondary
@buffer.stop if @buffering && @buffer
super
end
def before_shutdown
@secondary.before_shutdown if @secondary
if @buffering && @buffer
if @flush_at_shutdown
force_flush
end
@buffer.before_shutdown
# Need to ensure to stop enqueueing ... after #shutdown, we cannot write any data
@output_enqueue_thread_running = false
if @output_enqueue_thread && @output_enqueue_thread.alive?
@output_enqueue_thread.wakeup
@output_enqueue_thread.join
end
end
super
end
def shutdown
@secondary.shutdown if @secondary
@buffer.shutdown if @buffering && @buffer
super
end
def after_shutdown
try_rollback_all if @buffering && !@as_secondary # rollback regardless with @delayed_commit, because secondary may do it
@secondary.after_shutdown if @secondary
if @buffering && @buffer
@buffer.after_shutdown
@output_flush_threads_running = false
if @output_flush_threads && !@output_flush_threads.empty?
@output_flush_threads.each do |state|
# to wakeup thread and make it to stop by itself
state.mutex.synchronize {
if state.thread&.status
state.next_clock = 0
state.cond_var.signal
end
}
Thread.pass
state.thread.join
end
end
end
super
end
def close
@buffer.close if @buffering && @buffer
@secondary.close if @secondary
super
end
def terminate
@buffer.terminate if @buffering && @buffer
@secondary.terminate if @secondary
super
end
def actual_flush_thread_count
return 0 unless @buffering
return @buffer_config.flush_thread_count unless @as_secondary
@primary_instance.buffer_config.flush_thread_count
end
# Ensures `path` (filename or filepath) processable
# only by the current thread in the current process.
# For multiple workers, the lock is shared if `path` is the same value.
# For multiple threads, the lock is shared by all threads in the same process.
def synchronize_path(path)
synchronize_path_in_workers(path) do
synchronize_in_threads do
yield
end
end
end
def synchronize_path_in_workers(path)
need_worker_lock = system_config.workers > 1
if need_worker_lock
acquire_worker_lock(path) { yield }
else
yield
end
end
def synchronize_in_threads
need_thread_lock = actual_flush_thread_count > 1
if need_thread_lock
@flush_thread_mutex.synchronize { yield }
else
yield
end
end
def support_in_v12_style?(feature)
# for plugins written in v0.12 styles
case feature
when :synchronous then false
when :buffered then false
when :delayed_commit then false
when :custom_format then false
else
raise ArgumentError, "unknown feature: #{feature}"
end
end
def implement?(feature)
methods_of_plugin = self.class.instance_methods(false)
case feature
when :synchronous then methods_of_plugin.include?(:process) || support_in_v12_style?(:synchronous)
when :buffered then methods_of_plugin.include?(:write) || support_in_v12_style?(:buffered)
when :delayed_commit then methods_of_plugin.include?(:try_write)
when :custom_format then methods_of_plugin.include?(:format) || support_in_v12_style?(:custom_format)
else
raise ArgumentError, "Unknown feature for output plugin: #{feature}"
end
end
def placeholder_validate!(name, str)
placeholder_validators(name, str).each do |v|
v.validate!
end
end
def placeholder_validators(name, str, time_key = (@chunk_key_time && @buffer_config.timekey), tag_key = @chunk_key_tag, chunk_keys = @chunk_keys)
validators = []
sec, title, example = get_placeholders_time(str)
if sec || time_key
validators << PlaceholderValidator.new(name, str, :time, {sec: sec, title: title, example: example, timekey: time_key})
end
parts = get_placeholders_tag(str)
if tag_key || !parts.empty?
validators << PlaceholderValidator.new(name, str, :tag, {parts: parts, tagkey: tag_key})
end
keys = get_placeholders_keys(str)
if chunk_keys && !chunk_keys.empty? || !keys.empty?
validators << PlaceholderValidator.new(name, str, :keys, {keys: keys, chunkkeys: chunk_keys})
end
validators
end
class PlaceholderValidator
attr_reader :name, :string, :type, :argument
def initialize(name, str, type, arg)
@name = name
@string = str
@type = type
raise ArgumentError, "invalid type:#{type}" if @type != :time && @type != :tag && @type != :keys
@argument = arg
end
def time?
@type == :time
end
def tag?
@type == :tag
end
def keys?
@type == :keys
end
def validate!
case @type
when :time then validate_time!
when :tag then validate_tag!
when :keys then validate_keys!
end
end
def validate_time!
sec = @argument[:sec]
title = @argument[:title]
example = @argument[:example]
timekey = @argument[:timekey]
if !sec && timekey
raise Fluent::ConfigError, "Parameter '#{name}: #{string}' doesn't have timestamp placeholders for timekey #{timekey.to_i}"
end
if sec && !timekey
raise Fluent::ConfigError, "Parameter '#{name}: #{string}' has timestamp placeholders, but chunk key 'time' is not configured"
end
if sec && timekey && timekey < sec
raise Fluent::ConfigError, "Parameter '#{name}: #{string}' doesn't have timestamp placeholder for #{title}('#{example}') for timekey #{timekey.to_i}"
end
end
def validate_tag!
parts = @argument[:parts]
tagkey = @argument[:tagkey]
if tagkey && parts.empty?
raise Fluent::ConfigError, "Parameter '#{name}: #{string}' doesn't have tag placeholder"
end
if !tagkey && !parts.empty?
raise Fluent::ConfigError, "Parameter '#{name}: #{string}' has tag placeholders, but chunk key 'tag' is not configured"
end
end
def validate_keys!
keys = @argument[:keys]
chunk_keys = @argument[:chunkkeys]
if (chunk_keys - keys).size > 0
not_specified = (chunk_keys - keys).sort
raise Fluent::ConfigError, "Parameter '#{name}: #{string}' doesn't have enough placeholders for keys #{not_specified.join(',')}"
end
if (keys - chunk_keys).size > 0
not_satisfied = (keys - chunk_keys).sort
raise Fluent::ConfigError, "Parameter '#{name}: #{string}' has placeholders, but chunk keys doesn't have keys #{not_satisfied.join(',')}"
end
end
end
TIME_KEY_PLACEHOLDER_THRESHOLDS = [
[1, :second, '%S'],
[60, :minute, '%M'],
[3600, :hour, '%H'],
[86400, :day, '%d'],
]
TIMESTAMP_CHECK_BASE_TIME = Time.parse("2016-01-01 00:00:00 UTC")
# it's not validated to use timekey larger than 1 day
def get_placeholders_time(str)
base_str = TIMESTAMP_CHECK_BASE_TIME.strftime(str)
TIME_KEY_PLACEHOLDER_THRESHOLDS.each do |triple|
sec = triple.first
return triple if (TIMESTAMP_CHECK_BASE_TIME + sec).strftime(str) != base_str
end
nil
end
# -1 means whole tag
def get_placeholders_tag(str)
# [["tag"],["tag[0]"]]
parts = []
str.scan(CHUNK_TAG_PLACEHOLDER_PATTERN).map(&:first).each do |ph|
if ph == "tag"
parts << -1
elsif ph =~ /^tag\[(-?\d+)\]$/
parts << $1.to_i
end
end
parts.sort
end
def get_placeholders_keys(str)
str.scan(CHUNK_KEY_PLACEHOLDER_PATTERN).map(&:first).reject{|s| (s == "tag") || (s == 'chunk_id') }.sort
end
# TODO: optimize this code
def extract_placeholders(str, chunk)
metadata = if chunk.is_a?(Fluent::Plugin::Buffer::Chunk)
chunk_passed = true
chunk.metadata
else
chunk_passed = false
# For existing plugins. Old plugin passes Chunk.metadata instead of Chunk
chunk
end
if metadata.empty?
str.sub(CHUNK_ID_PLACEHOLDER_PATTERN) {
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | true |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/out_exec_filter.rb | lib/fluent/plugin/out_exec_filter.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/output'
require 'fluent/env'
require 'fluent/config/error'
require 'json'
module Fluent::Plugin
class ExecFilterOutput < Output
Fluent::Plugin.register_output('exec_filter', self)
helpers :compat_parameters, :inject, :formatter, :parser, :extract, :child_process, :event_emitter
desc 'The command (program) to execute.'
config_param :command, :string
config_param :remove_prefix, :string, default: nil, deprecated: "use @label instead for event routing"
config_param :add_prefix, :string, default: nil, deprecated: "use @label instead for event routing"
config_section :inject do
config_set_default :time_type, :unixtime
end
config_section :format do
config_set_default :@type, 'tsv'
config_set_default :localtime, true
end
config_section :parse do
config_set_default :@type, 'tsv'
config_set_default :time_key, nil
config_set_default :time_format, nil
config_set_default :localtime, true
config_set_default :estimate_current_event, false
end
config_section :extract do
config_set_default :time_type, :float
end
config_section :buffer do
config_set_default :flush_mode, :interval
config_set_default :flush_interval, 1
end
config_param :tag, :string, default: nil
config_param :tag_key, :string, default: nil, deprecated: "use 'tag_key' in <inject>/<extract> instead"
config_param :time_key, :string, default: nil, deprecated: "use 'time_key' in <inject>/<extract> instead"
config_param :time_format, :string, default: nil, deprecated: "use 'time_format' in <inject>/<extract> instead"
desc 'The default block size to read if parser requires partial read.'
config_param :read_block_size, :size, default: 10240 # 10k
desc 'The number of spawned process for command.'
config_param :num_children, :integer, default: 1
desc 'Respawn command when command exit. ["none", "inf" or positive integer for times to respawn (default: none)]'
# nil, 'none' or 0: no respawn, 'inf' or -1: infinite times, positive integer: try to respawn specified times only
config_param :child_respawn, :string, default: nil
# 0: output logs for all of messages to emit
config_param :suppress_error_log_interval, :time, default: 0
attr_reader :formatter, :parser # for tests
KEYS_FOR_IN_AND_OUT = {
'tag_key' => ['in_tag_key', 'out_tag_key'],
'time_key' => ['in_time_key', 'out_time_key'],
'time_format' => ['in_time_format', 'out_time_format'],
}
COMPAT_INJECT_PARAMS = {
'in_tag_key' => 'tag_key',
'in_time_key' => 'time_key',
'in_time_format' => 'time_format',
}
COMPAT_FORMAT_PARAMS = {
'in_format' => '@type',
'in_keys' => 'keys',
}
COMPAT_PARSE_PARAMS = {
'out_format' => '@type',
'out_keys' => 'keys',
'out_stream_buffer_size' => 'stream_buffer_size',
}
COMPAT_EXTRACT_PARAMS = {
'out_tag_key' => 'tag_key',
'out_time_key' => 'time_key',
'out_time_format' => 'time_format',
}
def exec_filter_compat_parameters_copy_to_subsection!(conf, subsection_name, params)
return unless conf.elements(subsection_name).empty?
return unless params.keys.any?{|k| conf.has_key?(k) }
hash = {}
params.each_pair do |compat, current|
hash[current] = conf[compat] if conf.has_key?(compat)
end
conf.elements << Fluent::Config::Element.new(subsection_name, '', hash, [])
end
def exec_filter_compat_parameters_convert!(conf)
KEYS_FOR_IN_AND_OUT.each_pair do |inout, keys|
if conf.has_key?(inout)
keys.each do |k|
conf[k] = conf[inout]
end
end
end
exec_filter_compat_parameters_copy_to_subsection!(conf, 'inject', COMPAT_INJECT_PARAMS)
exec_filter_compat_parameters_copy_to_subsection!(conf, 'format', COMPAT_FORMAT_PARAMS)
exec_filter_compat_parameters_copy_to_subsection!(conf, 'parse', COMPAT_PARSE_PARAMS)
exec_filter_compat_parameters_copy_to_subsection!(conf, 'extract', COMPAT_EXTRACT_PARAMS)
end
def configure(conf)
exec_filter_compat_parameters_convert!(conf)
compat_parameters_convert(conf, :buffer)
if inject_section = conf.elements('inject').first
if inject_section.has_key?('time_format')
inject_section['time_type'] ||= 'string'
end
end
if extract_section = conf.elements('extract').first
if extract_section.has_key?('time_format')
extract_section['time_type'] ||= 'string'
end
end
super
if !@tag && (!@extract_config || !@extract_config.tag_key)
raise Fluent::ConfigError, "'tag' or '<extract> tag_key </extract>' option is required on exec_filter output"
end
@formatter = formatter_create
@parser = parser_create
if @remove_prefix
@removed_prefix_string = @remove_prefix + '.'
@removed_length = @removed_prefix_string.length
end
if @add_prefix
@added_prefix_string = @add_prefix + '.'
end
@respawns = if @child_respawn.nil? || (@child_respawn == 'none') || (@child_respawn == '0')
0
elsif (@child_respawn == 'inf') || (@child_respawn == '-1')
-1
elsif /^\d+$/.match?(@child_respawn)
@child_respawn.to_i
else
raise ConfigError, "child_respawn option argument invalid: none(or 0), inf(or -1) or positive number"
end
@suppress_error_log_interval ||= 0
@next_log_time = Time.now.to_i
end
def multi_workers_ready?
true
end
ExecutedProcess = Struct.new(:mutex, :pid, :respawns, :readio, :writeio)
def start
super
@children_mutex = Mutex.new
@children = []
@rr = 0
exit_callback = ->(status){
c = @children.find{|child| child.pid == status.pid }
if c
unless self.stopped?
log.warn "child process exits with error code", code: status.to_i, status: status.exitstatus, signal: status.termsig
end
c.mutex.synchronize do
c.writeio&.close rescue nil
c.readio&.close rescue nil
c.pid = c.readio = c.writeio = nil
end
end
}
child_process_callback = ->(index, readio, writeio){
pid = child_process_id
c = @children[index]
writeio.sync = true
c.mutex.synchronize do
c.pid = pid
c.respawns = @respawns
c.readio = readio
c.writeio = writeio
end
run(readio)
}
execute_child_process = ->(index){
child_process_execute("out_exec_filter_child#{index}".to_sym, @command, on_exit_callback: exit_callback) do |readio, writeio|
child_process_callback.call(index, readio, writeio)
end
}
@children_mutex.synchronize do
@num_children.times do |i|
@children << ExecutedProcess.new(Mutex.new, nil, 0, nil, nil)
execute_child_process.call(i)
end
end
if @respawns != 0
thread_create(:out_exec_filter_respawn_monitor) do
while thread_current_running?
@children.each_with_index do |c, i|
if c.mutex && c.mutex.synchronize{ c.pid.nil? && c.respawns != 0 }
respawns = c.mutex.synchronize do
c.respawns -= 1 if c.respawns > 0
c.respawns
end
log.info "respawning child process", num: i, respawn_counter: respawns
execute_child_process.call(i)
end
end
sleep 0.2
end
end
end
end
def terminate
@children = []
super
end
def tag_remove_prefix(tag)
if @remove_prefix
if ((tag[0, @removed_length] == @removed_prefix_string) && (tag.length > @removed_length)) || (tag == @removed_prefix_string)
tag = tag[@removed_length..-1] || ''
end
end
tag
end
NEWLINE = "\n"
def format(tag, time, record)
tag = tag_remove_prefix(tag)
record = inject_values_to_record(tag, time, record)
if @formatter.formatter_type == :text_per_line
@formatter.format(tag, time, record).chomp + NEWLINE
else
@formatter.format(tag, time, record)
end
end
def write(chunk)
try_times = 0
while true
r = @rr = (@rr + 1) % @children.length
if @children[r].pid && writeio = @children[r].writeio
chunk.write_to(writeio)
break
end
try_times += 1
raise "no healthy child processes exist" if try_times >= @children.length
end
end
def run(io)
io.set_encoding(Encoding::ASCII_8BIT)
case
when @parser.implement?(:parse_io)
@parser.parse_io(io, &method(:on_record))
when @parser.implement?(:parse_partial_data)
until io.eof?
@parser.parse_partial_data(io.readpartial(@read_block_size), &method(:on_record))
end
when @parser.parser_type == :text_per_line
io.each_line do |line|
@parser.parse(line.chomp, &method(:on_record))
end
else
@parser.parse(io.read, &method(:on_record))
end
end
def on_record(time, record)
tag = extract_tag_from_record(record)
tag = @added_prefix_string + tag if tag && @add_prefix
tag ||= @tag
time ||= extract_time_from_record(record) || Fluent::EventTime.now
router.emit(tag, time, record)
rescue => e
if @suppress_error_log_interval == 0 || Time.now.to_i > @next_log_time
log.error "exec_filter failed to emit", record: JSON.generate(record), error: e
log.error_backtrace e.backtrace
@next_log_time = Time.now.to_i + @suppress_error_log_interval
end
router.emit_error_event(tag, time, record, e) if tag && time && record
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/parser.rb | lib/fluent/plugin/parser.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/base'
require 'fluent/plugin/owned_by_mixin'
require 'fluent/error'
require 'fluent/mixin' # for TypeConverter
require 'fluent/time'
require 'fluent/plugin/string_util'
require 'serverengine/blocking_flag'
module Fluent
module Plugin
class Parser < Base
class TimeoutChecker
# This implementation now uses mutex because parser is typically used in input.
# If this has a performance issue under high concurrent, use concurrent-ruby's map instead.
def initialize(timeout)
@map = {}
@flag = ServerEngine::BlockingFlag.new
@mutex = Mutex.new
@timeout = timeout
@timeout_checker = nil
end
def start
@thread = ::Thread.new {
until @flag.wait_for_set(0.5)
now = Time.now
@mutex.synchronize {
@map.keys.each { |th|
time = @map[th]
if now - time > @timeout
@map.delete(th)
th.raise UncatchableError, "parsing timed out"
end
}
}
end
}
end
def stop
@flag.set!
@thread.join
end
def execute
th = Thread.current
@mutex.synchronize { @map[th] = Time.now }
yield
ensure
# Need clean up here because if next event is delayed, incorrect exception will be raised in normal flow.
@mutex.synchronize { @map.delete(th) }
end
end
include OwnedByMixin
include TimeMixin::Parser
class ParserError < StandardError; end
configured_in :parse
### types can be specified as string-based hash style
# field1:type, field2:type, field3:type:option, field4:type:option
### or, JSON format
# {"field1":"type", "field2":"type", "field3":"type:option", "field4":"type:option"}
config_param :types, :hash, value_type: :string, default: nil
# available options are:
# array: (1st) delimiter
# time : type[, format, timezone] -> type should be a valid "time_type"(string/unixtime/float)
# : format[, timezone]
config_param :time_key, :string, default: nil
config_param :null_value_pattern, :regexp, default: nil
config_param :null_empty_string, :bool, default: false
config_param :estimate_current_event, :bool, default: true
config_param :keep_time_key, :bool, default: false
config_param :timeout, :time, default: nil
AVAILABLE_PARSER_VALUE_TYPES = ['string', 'integer', 'float', 'bool', 'time', 'array']
# for tests
attr_reader :type_converters
PARSER_TYPES = [:text_per_line, :text, :binary]
def parser_type
:text_per_line
end
def initialize
super
@timeout_checker = nil
end
def configure(conf)
super
@time_parser = time_parser_create
@type_converters = build_type_converters(@types)
@execute_convert_values = @type_converters || @null_value_pattern || @null_empty_string
@timeout_checker = if @timeout
class << self
alias_method :parse_orig, :parse
alias_method :parse, :parse_with_timeout
end
TimeoutChecker.new(@timeout)
else
nil
end
end
def start
super
@timeout_checker.start if @timeout_checker
end
def stop
super
@timeout_checker.stop if @timeout_checker
end
def parse(text, &block)
raise NotImplementedError, "Implement this method in child class"
end
def parse_with_timeout(text, &block)
@timeout_checker.execute {
parse_orig(text, &block)
}
rescue UncatchableError
log.warn "parsing timed out with #{self.class}: text = #{text}"
# Return nil instead of raising error. in_tail or other plugin can emit broken line.
yield nil, nil
end
def call(*a, &b)
# Keep backward compatibility for existing plugins
# TODO: warn when deprecated
parse(*a, &b)
end
def implement?(feature)
methods_of_plugin = self.class.instance_methods(false)
case feature
when :parse_io then methods_of_plugin.include?(:parse_io)
when :parse_partial_data then methods_of_plugin.include?(:parse_partial_data)
else
raise ArgumentError, "Unknown feature for parser plugin: #{feature}"
end
end
def parse_io(io, &block)
raise NotImplementedError, "Optional API #parse_io is not implemented"
end
def parse_partial_data(data, &block)
raise NotImplementedError, "Optional API #parse_partial_data is not implemented"
end
def parse_time(record)
if @time_key && record.respond_to?(:has_key?) && record.has_key?(@time_key)
src = if @keep_time_key
record[@time_key]
else
record.delete(@time_key)
end
@time_parser.parse(src)
elsif @estimate_current_event
Fluent::EventTime.now
else
nil
end
rescue Fluent::TimeParser::TimeParseError => e
raise ParserError, e.message
end
# def parse(text, &block)
# time, record = convert_values(time, record)
# yield time, record
# end
def convert_values(time, record)
return time, record unless @execute_convert_values
record.each_key do |key|
value = record[key]
next unless value # nil/null value is always left as-is.
if value.is_a?(String) && string_like_null(value)
record[key] = nil
next
end
if @type_converters && @type_converters.has_key?(key)
record[key] = @type_converters[key].call(value)
end
end
return time, record
end
def string_like_null(value, null_empty_string = @null_empty_string, null_value_regexp = @null_value_pattern)
null_empty_string && value.empty? || null_value_regexp && string_safe_encoding(value){|s| null_value_regexp.match?(s) }
end
TRUTHY_VALUES = ['true', 'yes', '1']
def build_type_converters(types)
return nil unless types
converters = {}
types.each_pair do |field_name, type_definition|
type, option = type_definition.split(":", 2)
unless AVAILABLE_PARSER_VALUE_TYPES.include?(type)
raise Fluent::ConfigError, "unknown value conversion for key:'#{field_name}', type:'#{type}'"
end
conv = case type
when 'string' then ->(v){ v.to_s }
when 'integer' then ->(v){ v.to_i rescue v.to_s.to_i }
when 'float' then ->(v){ v.to_f rescue v.to_s.to_f }
when 'bool' then ->(v){ TRUTHY_VALUES.include?(v.to_s.downcase) }
when 'time'
# comma-separated: time:[timezone:]time_format
# time_format is unixtime/float/string-time-format
timep = if option
time_type = 'string' # estimate
timezone, time_format = option.split(':', 2)
unless Fluent::Timezone.validate(timezone)
timezone, time_format = nil, option
end
if Fluent::TimeMixin::TIME_TYPES.include?(time_format)
time_type, time_format = time_format, nil # unixtime/float
end
time_parser_create(type: time_type.to_sym, format: time_format, timezone: timezone)
else
time_parser_create(type: :string, format: nil, timezone: nil)
end
->(v){ timep.parse(v) rescue nil }
when 'array'
delimiter = option ? option.to_s : ','
->(v){ string_safe_encoding(v.to_s){|s| s.split(delimiter) } }
else
raise "BUG: unknown type even after check: #{type}"
end
converters[field_name] = conv
end
converters
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/parser_syslog.rb | lib/fluent/plugin/parser_syslog.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/parser'
require 'fluent/time'
module Fluent
module Plugin
class SyslogParser < Parser
Plugin.register_parser('syslog', self)
# TODO: Remove them since these regexps are no longer needed. but keep them for compatibility for now
# From existence TextParser pattern
REGEXP = /^(?<time>[^ ]*\s*[^ ]* [^ ]*) (?<host>[^ ]*) (?<ident>[^ :\[]*)(?:\[(?<pid>[0-9]+)\])?(?:[^\:]*\:)? *(?<message>.*)$/
# From in_syslog default pattern
REGEXP_WITH_PRI = /^\<(?<pri>[0-9]+)\>(?<time>[^ ]* {1,2}[^ ]* [^ ]*) (?<host>[^ ]*) (?<ident>[^ :\[]*)(?:\[(?<pid>[0-9]+)\])?(?:[^\:]*\:)? *(?<message>.*)$/
REGEXP_RFC5424 = <<~'EOS'.chomp
(?<time>[^ ]+) (?<host>[!-~]{1,255}) (?<ident>[!-~]{1,48}) (?<pid>[!-~]{1,128}) (?<msgid>[!-~]{1,32}) (?<extradata>(?:\-|(?:\[.*?(?<!\\)\])+))(?: (?<message>.+))?
EOS
REGEXP_RFC5424_NO_PRI = Regexp.new(<<~'EOS'.chomp % REGEXP_RFC5424, Regexp::MULTILINE)
\A%s\z
EOS
REGEXP_RFC5424_WITH_PRI = Regexp.new(<<~'EOS'.chomp % REGEXP_RFC5424, Regexp::MULTILINE)
\A<(?<pri>[0-9]{1,3})\>[1-9]\d{0,2} %s\z
EOS
REGEXP_DETECT_RFC5424 = /^\<[0-9]{1,3}\>[1-9]\d{0,2}/
RFC3164_WITHOUT_TIME_AND_PRI_REGEXP = /(?<host>[^ ]*) (?<ident>[^ :\[]*)(?:\[(?<pid>[0-9]+)\])?(?:[^\:]*\:)? *(?<message>.*)$/
RFC3164_CAPTURES = RFC3164_WITHOUT_TIME_AND_PRI_REGEXP.names.freeze
RFC3164_PRI_REGEXP = /^<(?<pri>[0-9]{1,3})>/
RFC5424_WITHOUT_TIME_AND_PRI_REGEXP = /(?<host>[!-~]{1,255}) (?<ident>[!-~]{1,48}) (?<pid>[!-~]{1,128}) (?<msgid>[!-~]{1,32}) (?<extradata>(?:\-|(?:\[.*?(?<!\\)\])+))(?: (?<message>.+))?\z/m
RFC5424_CAPTURES = RFC5424_WITHOUT_TIME_AND_PRI_REGEXP.names.freeze
RFC5424_PRI_REGEXP = /^<(?<pri>\d{1,3})>\d\d{0,2}\s/
config_set_default :time_format, "%b %d %H:%M:%S"
desc 'If the incoming logs have priority prefix, e.g. <9>, set true'
config_param :with_priority, :bool, default: false
desc 'Specify protocol format'
config_param :message_format, :enum, list: [:rfc3164, :rfc5424, :auto], default: :rfc3164
desc 'Specify time format for event time for rfc5424 protocol'
config_param :rfc5424_time_format, :string, default: "%Y-%m-%dT%H:%M:%S.%L%z"
desc 'The parser type used to parse syslog message'
config_param :parser_engine, :enum, list: [:regexp, :string], default: :regexp, alias: :parser_type
desc 'support colonless ident in string parser'
config_param :support_colonless_ident, :bool, default: true
def initialize
super
@mutex = Mutex.new
@regexp = nil
@regexp3164 = nil
@regexp5424 = nil
@regexp_parser = nil
@time_parser_rfc3164 = nil
@time_parser_rfc5424 = nil
@space_count_rfc3164 = nil
@space_count_rfc5424 = nil
@skip_space_count_rfc3164 = false
@skip_space_count_rfc5424 = false
@time_parser_rfc5424_without_subseconds = nil
end
def configure(conf)
super
@regexp_parser = @parser_engine == :regexp
@regexp = case @message_format
when :rfc3164
if @regexp_parser
class << self
alias_method :parse, :parse_rfc3164_regex
end
else
class << self
alias_method :parse, :parse_rfc3164
end
end
setup_time_parser_3164(@time_format)
RFC3164_WITHOUT_TIME_AND_PRI_REGEXP
when :rfc5424
if @regexp_parser
class << self
alias_method :parse, :parse_rfc5424_regex
end
else
class << self
alias_method :parse, :parse_rfc5424
end
end
@time_format = @rfc5424_time_format unless conf.has_key?('time_format')
setup_time_parser_5424(@time_format)
RFC5424_WITHOUT_TIME_AND_PRI_REGEXP
when :auto
class << self
alias_method :parse, :parse_auto
end
setup_time_parser_3164(@time_format)
setup_time_parser_5424(@rfc5424_time_format)
nil
end
if @regexp_parser
@regexp3164 = RFC3164_WITHOUT_TIME_AND_PRI_REGEXP
@regexp5424 = RFC5424_WITHOUT_TIME_AND_PRI_REGEXP
end
end
def setup_time_parser_3164(time_fmt)
@time_parser_rfc3164 = time_parser_create(format: time_fmt)
if ['%b %d %H:%M:%S', '%b %d %H:%M:%S.%N'].include?(time_fmt)
@skip_space_count_rfc3164 = true
end
@space_count_rfc3164 = time_fmt.squeeze(' ').count(' ') + 1
end
def setup_time_parser_5424(time_fmt)
@time_parser_rfc5424 = time_parser_create(format: time_fmt)
@time_parser_rfc5424_without_subseconds = time_parser_create(format: "%Y-%m-%dT%H:%M:%S%z")
@skip_space_count_rfc5424 = time_fmt.count(' ').zero?
@space_count_rfc5424 = time_fmt.squeeze(' ').count(' ') + 1
end
# this method is for tests
def patterns
{'format' => @regexp, 'time_format' => @time_format}
end
def parse(text)
# This is overwritten in configure
end
def parse_auto(text, &block)
if REGEXP_DETECT_RFC5424.match?(text)
if @regexp_parser
parse_rfc5424_regex(text, &block)
else
parse_rfc5424(text, &block)
end
else
if @regexp_parser
parse_rfc3164_regex(text, &block)
else
parse_rfc3164(text, &block)
end
end
end
SPLIT_CHAR = ' '.freeze
def parse_rfc3164_regex(text, &block)
idx = 0
record = {}
if @with_priority
if RFC3164_PRI_REGEXP.match?(text)
v = text.index('>')
record['pri'] = text[1..v].to_i # trim `<` and ``>
idx = v + 1
else
yield(nil, nil)
return
end
end
i = idx - 1
sq = false
@space_count_rfc3164.times do
while text[i + 1] == SPLIT_CHAR
sq = true
i += 1
end
i = text.index(SPLIT_CHAR, i + 1)
end
time_str = sq ? text.slice(idx, i - idx).squeeze(SPLIT_CHAR) : text.slice(idx, i - idx)
time = @mutex.synchronize { @time_parser_rfc3164.parse(time_str) }
if @keep_time_key
record['time'] = time_str
end
parse_plain(@regexp3164, time, text, i + 1, record, RFC3164_CAPTURES, &block)
end
def parse_rfc5424_regex(text, &block)
idx = 0
record = {}
if @with_priority
if (m = RFC5424_PRI_REGEXP.match(text))
record['pri'] = m['pri'].to_i
idx = m.end(0)
else
yield(nil, nil)
return
end
end
i = idx - 1
sq = false
@space_count_rfc5424.times {
while text[i + 1] == SPLIT_CHAR
sq = true
i += 1
end
i = text.index(SPLIT_CHAR, i + 1)
}
time_str = sq ? text.slice(idx, i - idx).squeeze(SPLIT_CHAR) : text.slice(idx, i - idx)
time = @mutex.synchronize do
begin
@time_parser_rfc5424.parse(time_str)
rescue Fluent::TimeParser::TimeParseError => e
log.trace(e)
@time_parser_rfc5424_without_subseconds.parse(time_str)
end
end
if @keep_time_key
record['time'] = time_str
end
parse_plain(@regexp5424, time, text, i + 1, record, RFC5424_CAPTURES, &block)
end
# @param time [EventTime]
# @param idx [Integer] note: this argument is needed to avoid string creation
# @param record [Hash]
# @param capture_list [Array] for performance
def parse_plain(re, time, text, idx, record, capture_list, &block)
m = re.match(text, idx)
if m.nil?
yield nil, nil
return
end
capture_list.each { |name|
if value = (m[name] rescue nil)
case name
when "message"
value.chomp!
record[name] = value
else
record[name] = value
end
end
}
if @estimate_current_event
time ||= Fluent::EventTime.now
end
yield time, record
end
def parse_rfc3164(text, &block)
pri = nil
cursor = 0
if @with_priority
if text.start_with?('<'.freeze)
i = text.index('>'.freeze, 1)
if i < 2
yield nil, nil
return
end
pri = text.slice(1, i - 1).to_i
cursor = i + 1
else
yield nil, nil
return
end
end
if @skip_space_count_rfc3164
# header part
time_size = 15 # skip Mmm dd hh:mm:ss
time_end = text[cursor + time_size]
if time_end == SPLIT_CHAR
time_str = text.slice(cursor, time_size)
cursor += 16 # time + ' '
elsif time_end == '.'.freeze
# support subsecond time
i = text.index(SPLIT_CHAR, time_size)
time_str = text.slice(cursor, i - cursor)
cursor = i + 1
else
yield nil, nil
return
end
else
i = cursor - 1
sq = false
@space_count_rfc3164.times do
while text[i + 1] == SPLIT_CHAR
sq = true
i += 1
end
i = text.index(SPLIT_CHAR, i + 1)
end
time_str = sq ? text.slice(idx, i - cursor).squeeze(SPLIT_CHAR) : text.slice(cursor, i - cursor)
cursor = i + 1
end
i = text.index(SPLIT_CHAR, cursor)
if i.nil?
yield nil, nil
return
end
host_size = i - cursor
host = text.slice(cursor, host_size)
cursor += host_size + 1
record = {'host' => host}
record['pri'] = pri if pri
i = text.index(SPLIT_CHAR, cursor)
# message part
msg = if i.nil? # for 'only non-space content case'
text.slice(cursor, text.bytesize)
else
if text[i - 1] == ':'.freeze
if text[i - 2] == ']'.freeze
left_braket_pos = text.index('['.freeze, cursor)
record['ident'] = text.slice(cursor, left_braket_pos - cursor)
record['pid'] = text.slice(left_braket_pos + 1, i - left_braket_pos - 3) # remove '[' / ']:'
else
record['ident'] = text.slice(cursor, i - cursor - 1)
end
text.slice(i + 1, text.bytesize)
else
if @support_colonless_ident
if text[i - 1] == ']'.freeze
left_braket_pos = text.index('['.freeze, cursor)
record['ident'] = text.slice(cursor, left_braket_pos - cursor)
record['pid'] = text.slice(left_braket_pos + 1, i - left_braket_pos - 2) # remove '[' / ']'
else
record['ident'] = text.slice(cursor, i - cursor)
end
text.slice(i + 1, text.bytesize)
else
text.slice(cursor, text.bytesize)
end
end
end
msg.chomp!
record['message'] = msg
time = @time_parser_rfc3164.parse(time_str)
record['time'] = time_str if @keep_time_key
yield time, record
end
NILVALUE = '-'.freeze
def parse_rfc5424(text, &block)
pri = nil
cursor = 0
if @with_priority
if text.start_with?('<'.freeze)
i = text.index('>'.freeze, 1)
if i < 2
yield nil, nil
return
end
pri = text.slice(1, i - 1).to_i
i = text.index(SPLIT_CHAR, i)
cursor = i + 1
else
yield nil, nil
return
end
end
# timestamp part
if @skip_space_count_rfc5424
i = text.index(SPLIT_CHAR, cursor)
time_str = text.slice(cursor, i - cursor)
cursor = i + 1
else
i = cursor - 1
sq = false
@space_count_rfc5424.times do
while text[i + 1] == SPLIT_CHAR
sq = true
i += 1
end
i = text.index(SPLIT_CHAR, i + 1)
end
time_str = sq ? text.slice(idx, i - cursor).squeeze(SPLIT_CHAR) : text.slice(cursor, i - cursor)
cursor = i + 1
end
# Repeat same code for the performance
# host part
i = text.index(SPLIT_CHAR, cursor)
unless i
yield nil, nil
return
end
slice_size = i - cursor
host = text.slice(cursor, slice_size)
cursor += slice_size + 1
# ident part
i = text.index(SPLIT_CHAR, cursor)
unless i
yield nil, nil
return
end
slice_size = i - cursor
ident = text.slice(cursor, slice_size)
cursor += slice_size + 1
# pid part
i = text.index(SPLIT_CHAR, cursor)
unless i
yield nil, nil
return
end
slice_size = i - cursor
pid = text.slice(cursor, slice_size)
cursor += slice_size + 1
# msgid part
i = text.index(SPLIT_CHAR, cursor)
unless i
yield nil, nil
return
end
slice_size = i - cursor
msgid = text.slice(cursor, slice_size)
cursor += slice_size + 1
record = {'host' => host, 'ident' => ident, 'pid' => pid, 'msgid' => msgid}
record['pri'] = pri if pri
# extradata part
ed_start = text[cursor]
if ed_start == NILVALUE
record['extradata'] = NILVALUE
cursor += 1
else
start = cursor
i = text.index('] '.freeze, cursor)
extradata = if i
diff = i + 1 - start # calculate ']' position
cursor += diff
text.slice(start, diff)
else # No message part case
cursor = text.bytesize
text.slice(start, cursor)
end
extradata.tr!("\\".freeze, ''.freeze)
record['extradata'] = extradata
end
# message part
if cursor != text.bytesize
msg = text.slice(cursor + 1, text.bytesize)
msg.chomp!
record['message'] = msg
end
time = begin
@time_parser_rfc5424.parse(time_str)
rescue Fluent::TimeParser::TimeParseError
@time_parser_rfc5424_without_subseconds.parse(time_str)
end
record['time'] = time_str if @keep_time_key
yield time, record
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/out_secondary_file.rb | lib/fluent/plugin/out_secondary_file.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "fileutils"
require "fluent/plugin/file_util"
require "fluent/plugin/output"
require "fluent/config/error"
module Fluent::Plugin
class SecondaryFileOutput < Output
Fluent::Plugin.register_output("secondary_file", self)
PLACEHOLDER_REGEX = /\${(tag(\[\d+\])?|[\w.@-]+)}/
desc "The directory path of the output file."
config_param :directory, :string
desc "The basename of the output file."
config_param :basename, :string, default: "dump.bin"
desc "The flushed chunk is appended to existence file or not."
config_param :append, :bool, default: false
config_param :compress, :enum, list: [:text, :gzip], default: :text
def configure(conf)
super
unless @as_secondary
raise Fluent::ConfigError, "This plugin can only be used in the <secondary> section"
end
if @basename.include?("/")
raise Fluent::ConfigError, "basename should not include `/`"
end
@path_without_suffix = File.join(@directory, @basename)
validate_compatible_with_primary_buffer!(@path_without_suffix)
@suffix = case @compress
when :text
""
when :gzip
".gz"
end
test_path = @path_without_suffix
unless Fluent::FileUtil.writable_p?(test_path)
raise Fluent::ConfigError, "out_secondary_file: `#{@directory}` should be writable"
end
@dir_perm = system_config.dir_permission || Fluent::DEFAULT_DIR_PERMISSION
@file_perm = system_config.file_permission || Fluent::DEFAULT_FILE_PERMISSION
end
def multi_workers_ready?
true
end
def write(chunk)
path_without_suffix = extract_placeholders(@path_without_suffix, chunk)
generate_path(path_without_suffix) do |path|
FileUtils.mkdir_p File.dirname(path), mode: @dir_perm
case @compress
when :text
File.open(path, "ab", @file_perm) {|f|
f.flock(File::LOCK_EX)
chunk.write_to(f)
}
when :gzip
File.open(path, "ab", @file_perm) {|f|
f.flock(File::LOCK_EX)
gz = Zlib::GzipWriter.new(f)
chunk.write_to(gz)
gz.close
}
end
end
end
private
def validate_compatible_with_primary_buffer!(path_without_suffix)
placeholders = path_without_suffix.scan(PLACEHOLDER_REGEX).flat_map(&:first) # to trim suffix [\d+]
if !@chunk_key_time && has_time_format?(path_without_suffix)
raise Fluent::ConfigError, "out_secondary_file: basename or directory has an incompatible placeholder, remove time formats, like `%Y%m%d`, from basename or directory"
end
if !@chunk_key_tag && (ph = placeholders.find { |placeholder| placeholder.match?(/tag(\[\d+\])?/) })
raise Fluent::ConfigError, "out_secondary_file: basename or directory has an incompatible placeholder #{ph}, remove tag placeholder, like `${tag}`, from basename or directory"
end
vars = placeholders.reject { |placeholder| placeholder.match?(/tag(\[\d+\])?/) || (placeholder == 'chunk_id') }
if ph = vars.find { |v| !@chunk_keys.include?(v) }
raise Fluent::ConfigError, "out_secondary_file: basename or directory has an incompatible placeholder #{ph}, remove variable placeholder, like `${varname}`, from basename or directory"
end
end
def has_time_format?(str)
str != Time.now.strftime(str)
end
def generate_path(path_without_suffix)
if @append
path = "#{path_without_suffix}#{@suffix}"
synchronize_path(path) do
yield path
end
return path
end
begin
i = 0
loop do
path = "#{path_without_suffix}.#{i}#{@suffix}"
break unless File.exist?(path)
i += 1
end
synchronize_path(path) do
# If multiple processes or threads select the same path and another
# one entered this locking block first, the file should already
# exist and this one should retry to find new path.
raise FileAlreadyExist if File.exist?(path)
yield path
end
rescue FileAlreadyExist
retry
end
path
end
class FileAlreadyExist < StandardError
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/in_gc_stat.rb | lib/fluent/plugin/in_gc_stat.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/input'
module Fluent::Plugin
class GCStatInput < Fluent::Plugin::Input
Fluent::Plugin.register_input('gc_stat', self)
helpers :timer
def initialize
super
@key_map = nil
end
config_param :emit_interval, :time, default: 60
config_param :use_symbol_keys, :bool, default: true
config_param :tag, :string
def configure(conf)
super
unless @use_symbol_keys
@key_map = {}
GC.stat.each_key { |key|
@key_map[key] = key.to_s
}
end
end
def multi_workers_ready?
true
end
def start
super
timer_execute(:in_gc_stat, @emit_interval, &method(:on_timer))
end
def shutdown
super
end
def on_timer
now = Fluent::EventTime.now
record = GC.stat
unless @use_symbol_keys
new_record = {}
record.each_pair { |k, v|
new_record[@key_map[k]] = v
}
record = new_record
end
router.emit(@tag, now, record)
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/parser_none.rb | lib/fluent/plugin/parser_none.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/parser'
require 'fluent/time'
module Fluent
module Plugin
class NoneParser < Parser
Plugin.register_parser('none', self)
desc 'Field name to contain logs'
config_param :message_key, :string, default: 'message'
def parse(text)
record = {@message_key => text}
time = @estimate_current_event ? Fluent::EventTime.now : nil
yield time, record
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/out_stream.rb | lib/fluent/plugin/out_stream.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'socket'
require 'fileutils'
require 'fluent/output'
require 'fluent/event'
module Fluent
# obsolete
class StreamOutput < BufferedOutput
config_param :send_timeout, :time, default: 60
helpers :compat_parameters
def configure(conf)
compat_parameters_convert(conf, :buffer)
super
end
def format_stream(tag, es)
# use PackedForward
[tag, es.to_msgpack_stream].to_msgpack
end
def write(chunk)
sock = connect
begin
opt = [1, @send_timeout.to_i].pack('I!I!') # { int l_onoff; int l_linger; }
sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_LINGER, opt)
opt = [@send_timeout.to_i, 0].pack('L!L!') # struct timeval
sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_SNDTIMEO, opt)
chunk.write_to(sock)
ensure
sock.close
end
end
def flush_secondary(secondary)
unless secondary.is_a?(StreamOutput)
secondary = ReformatWriter.new(secondary)
end
@buffer.pop(secondary)
end
class ReformatWriter
def initialize(secondary)
@secondary = secondary
end
def write(chunk)
chain = NullOutputChain.instance
chunk.open {|io|
# TODO use MessagePackIoEventStream
u = Fluent::MessagePackFactory.msgpack_unpacker(io)
begin
u.each {|(tag,entries)|
es = MultiEventStream.new
entries.each {|o|
es.add(o[0], o[1])
}
@secondary.emit(tag, es, chain)
}
rescue EOFError
end
}
end
end
end
# obsolete
class TcpOutput < StreamOutput
Plugin.register_output('tcp', self)
LISTEN_PORT = 24224
def initialize
super
log.warn "'tcp' output is obsoleted and will be removed. Use 'forward' instead."
log.warn "see 'forward' section in https://docs.fluentd.org/ for the high-availability configuration."
end
config_param :port, :integer, default: LISTEN_PORT
config_param :host, :string
def configure(conf)
super
end
def connect
TCPSocket.new(@host, @port)
end
end
# obsolete
class UnixOutput < StreamOutput
Plugin.register_output('unix', self)
def initialize
super
log.warn "'unix' output is obsoleted and will be removed."
end
config_param :path, :string
def configure(conf)
super
end
def connect
UNIXSocket.new(@path)
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/parser_tsv.rb | lib/fluent/plugin/parser_tsv.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/parser'
module Fluent
module Plugin
class TSVParser < Parser
Plugin.register_parser('tsv', self)
desc 'Names of fields included in each lines'
config_param :keys, :array, value_type: :string
desc 'The delimiter character (or string) of TSV values'
config_param :delimiter, :string, default: "\t"
def configure(conf)
super
@key_num = @keys.length
end
def parse(text)
values = text.split(@delimiter, @key_num)
r = Hash[@keys.zip(values)]
time, record = convert_values(parse_time(r), r)
yield time, record
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/in_monitor_agent.rb | lib/fluent/plugin/in_monitor_agent.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'json'
require 'fluent/config/types'
require 'fluent/plugin/input'
require 'fluent/plugin/output'
require 'fluent/plugin/multi_output'
require 'fluent/plugin/filter'
module Fluent::Plugin
class MonitorAgentInput < Input
Fluent::Plugin.register_input('monitor_agent', self)
helpers :timer, :thread, :http_server
desc 'The address to bind to.'
config_param :bind, :string, default: '0.0.0.0'
desc 'The port to listen to.'
config_param :port, :integer, default: 24220
desc 'The tag with which internal metrics are emitted.'
config_param :tag, :string, default: nil
desc 'Determine the rate to emit internal metrics as events.'
config_param :emit_interval, :time, default: 60
desc 'Determine whether to include the config information.'
config_param :include_config, :bool, default: true
desc 'Determine whether to include the retry information.'
config_param :include_retry, :bool, default: true
class APIHandler
def initialize(agent)
@agent = agent
end
def plugins_ltsv(req)
list = build_object(build_option(req))
render_ltsv(list)
end
def plugins_json(req)
opts = build_option(req)
obj = build_object(opts)
render_json({ 'plugins' => obj }, pretty_json: opts[:pretty_json])
end
def config_ltsv(_req)
obj = {
'pid' => Process.pid,
'ppid' => Process.ppid,
'version' => Fluent::VERSION,
}.merge(@agent.fluentd_opts)
render_ltsv([obj])
end
def config_json(req)
obj = {
'pid' => Process.pid,
'ppid' => Process.ppid,
'version' => Fluent::VERSION,
}.merge(@agent.fluentd_opts)
opts = build_option(req)
render_json(obj, pretty_json: opts[:pretty_json])
end
private
def render_error_json(code:, msg:, pretty_json: nil, **additional_params)
resp = additional_params.merge('message' => msg)
render_json(resp, code: code, pretty_json: pretty_json)
end
def render_json(obj, code: 200, pretty_json: nil)
body =
if pretty_json
JSON.pretty_generate(obj)
else
obj.to_json
end
[code, { 'Content-Type' => 'application/json' }, body]
end
def render_ltsv(obj, code: 200)
normalized = JSON.parse(obj.to_json)
text = ''
normalized.each do |hash|
row = []
hash.each do |k, v|
if v.is_a?(Array)
row << "#{k}:#{v.join(',')}"
elsif v.is_a?(Hash)
next
else
row << "#{k}:#{v}"
end
end
text << row.join("\t") << "\n"
end
[code, { 'Content-Type' => 'text/plain' }, text]
end
def build_object(opts)
qs = opts[:query]
if tag = qs['tag'.freeze].first
# ?tag= to search an output plugin by match pattern
if obj = @agent.plugin_info_by_tag(tag, opts)
list = [obj]
else
list = []
end
elsif plugin_id = (qs['@id'.freeze].first || qs['id'.freeze].first)
# ?@id= to search a plugin by 'id <plugin_id>' config param
if obj = @agent.plugin_info_by_id(plugin_id, opts)
list = [obj]
else
list = []
end
elsif plugin_type = (qs['@type'.freeze].first || qs['type'.freeze].first)
# ?@type= to search plugins by 'type <type>' config param
list = @agent.plugins_info_by_type(plugin_type, opts)
else
# otherwise show all plugins
list = @agent.plugins_info_all(opts)
end
list
end
def build_option(req)
qs = Hash.new { |_, _| [] }
# parse ?=query string
qs.merge!(req.query || {})
# if ?debug=1 is set, set :with_debug_info for get_monitor_info
# and :pretty_json for render_json_error
opts = { query: qs }
if qs['debug'.freeze].first
opts[:with_debug_info] = true
opts[:pretty_json] = true
end
if ivars = qs['with_ivars'.freeze].first
opts[:ivars] = ivars.split(',')
end
if with_config = qs['with_config'.freeze].first
opts[:with_config] = Fluent::Config.bool_value(with_config)
else
opts[:with_config] = @agent.include_config
end
if with_retry = qs['with_retry'.freeze].first
opts[:with_retry] = Fluent::Config.bool_value(with_retry)
else
opts[:with_retry] = @agent.include_retry
end
opts
end
end
def initialize
super
@first_warn = false
end
def configure(conf)
super
@port += fluentd_worker_id
end
def multi_workers_ready?
true
end
class NotFoundJson
BODY = { 'message' => 'Not found' }.to_json
def self.call(_req)
[404, { 'Content-Type' => 'application/json' }, BODY]
end
end
def start
super
log.debug { "listening monitoring http server on http://#{@bind}:#{@port}/api/plugins for worker#{fluentd_worker_id}" }
api_handler = APIHandler.new(self)
http_server_create_http_server(:in_monitor_http_server_helper, addr: @bind, port: @port, logger: log, default_app: NotFoundJson) do |serv|
serv.get('/api/plugins') { |req| api_handler.plugins_ltsv(req) }
serv.get('/api/plugins.json') { |req| api_handler.plugins_json(req) }
serv.get('/api/config') { |req| api_handler.config_ltsv(req) }
serv.get('/api/config.json') { |req| api_handler.config_json(req) }
end
if @tag
log.debug { "tag parameter is specified. Emit plugins info to '#{@tag}'" }
opts = {with_config: false, with_retry: false}
timer_execute(:in_monitor_agent_emit, @emit_interval, repeat: true) {
es = Fluent::MultiEventStream.new
now = Fluent::EventTime.now
plugins_info_all(opts).each { |record|
es.add(now, record)
}
router.emit_stream(@tag, es)
}
end
end
# They are deprecated but remain for compatibility
MONITOR_INFO = {
'output_plugin' => ->(){ is_a?(::Fluent::Plugin::Output) },
'buffer_queue_length' => ->(){ throw(:skip) unless instance_variable_defined?(:@buffer) && !@buffer.nil? && @buffer.is_a?(::Fluent::Plugin::Buffer); @buffer.queue.size },
'buffer_timekeys' => ->(){ throw(:skip) unless instance_variable_defined?(:@buffer) && !@buffer.nil? && @buffer.is_a?(::Fluent::Plugin::Buffer); @buffer.timekeys },
'buffer_total_queued_size' => ->(){ throw(:skip) unless instance_variable_defined?(:@buffer) && !@buffer.nil? && @buffer.is_a?(::Fluent::Plugin::Buffer); @buffer.stage_size + @buffer.queue_size },
'retry_count' => ->(){ respond_to?(:num_errors) ? num_errors : nil },
}
def all_plugins
array = []
# get all input plugins
array.concat Fluent::Engine.root_agent.inputs
# get all output plugins
array.concat Fluent::Engine.root_agent.outputs
# get all filter plugins
array.concat Fluent::Engine.root_agent.filters
Fluent::Engine.root_agent.labels.each { |name, l|
# TODO: Add label name to outputs / filters for identifying plugins
array.concat l.outputs
array.concat l.filters
}
array
end
# try to match the tag and get the info from the matched output plugin
# TODO: Support output in label
def plugin_info_by_tag(tag, opts={})
matches = Fluent::Engine.root_agent.event_router.instance_variable_get(:@match_rules)
matches.each { |rule|
if rule.match?(tag)
if rule.collector.is_a?(Fluent::Plugin::Output) || rule.collector.is_a?(Fluent::Output)
return get_monitor_info(rule.collector, opts)
end
end
}
nil
end
# search a plugin by plugin_id
def plugin_info_by_id(plugin_id, opts={})
found = all_plugins.find {|pe|
pe.respond_to?(:plugin_id) && pe.plugin_id.to_s == plugin_id
}
if found
get_monitor_info(found, opts)
else
nil
end
end
# This method returns an array because
# multiple plugins could have the same type
def plugins_info_by_type(type, opts={})
array = all_plugins.select {|pe|
(pe.config['@type'] == type) rescue nil
}
array.map {|pe|
get_monitor_info(pe, opts)
}
end
def plugins_info_all(opts={})
all_plugins.map {|pe|
get_monitor_info(pe, opts)
}
end
IGNORE_ATTRIBUTES = %i(@config_root_section @config @masked_config)
# get monitor info from the plugin `pe` and return a hash object
def get_monitor_info(pe, opts={})
obj = {}
# Common plugin information
obj['plugin_id'] = pe.plugin_id
obj['plugin_category'] = plugin_category(pe)
obj['type'] = pe.config['@type']
obj['config'] = pe.config if opts[:with_config]
# run MONITOR_INFO in plugins' instance context and store the info to obj
MONITOR_INFO.each_pair {|key,code|
begin
catch(:skip) do
obj[key] = pe.instance_exec(&code)
end
rescue NoMethodError => e
unless @first_warn
log.error "NoMethodError in monitoring plugins", key: key, plugin: pe.class, error: e
log.error_backtrace
@first_warn = true
end
rescue => e
log.warn "unexpected error in monitoring plugins", key: key, plugin: pe.class, error: e
end
}
if pe.respond_to?(:statistics)
obj.merge!(pe.statistics.dig('output') || {})
obj.merge!(pe.statistics.dig('filter') || {})
obj.merge!(pe.statistics.dig('input') || {})
end
obj['retry'] = get_retry_info(pe.retry) if opts[:with_retry] && pe.instance_variable_defined?(:@retry)
# include all instance variables if :with_debug_info is set
if opts[:with_debug_info]
iv = {}
pe.instance_eval do
instance_variables.each {|sym|
next if IGNORE_ATTRIBUTES.include?(sym)
key = sym.to_s[1..-1] # removes first '@'
iv[key] = instance_variable_get(sym)
}
end
obj['instance_variables'] = iv
elsif ivars = opts[:ivars]
iv = {}
ivars.each {|name|
iname = "@#{name}"
iv[name] = pe.instance_variable_get(iname) if pe.instance_variable_defined?(iname)
}
obj['instance_variables'] = iv
end
obj
end
RETRY_INFO = {
'start' => '@start',
'steps' => '@steps',
'next_time' => '@next_time',
}
def get_retry_info(pe_retry)
retry_variables = {}
if pe_retry
RETRY_INFO.each_pair { |key, param|
retry_variables[key] = pe_retry.instance_variable_get(param)
}
end
retry_variables
end
def plugin_category(pe)
case pe
when Fluent::Plugin::Input
'input'.freeze
when Fluent::Plugin::Output, Fluent::Plugin::MultiOutput, Fluent::Plugin::BareOutput
'output'.freeze
when Fluent::Plugin::Filter
'filter'.freeze
else
'unknown'.freeze
end
end
def fluentd_opts
@fluentd_opts ||= get_fluentd_opts
end
def get_fluentd_opts
opts = {}
ObjectSpace.each_object(Fluent::Supervisor) { |obj|
opts.merge!(obj.options)
break
}
opts
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/out_copy.rb | lib/fluent/plugin/out_copy.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/multi_output'
require 'fluent/config/error'
require 'fluent/event'
module Fluent::Plugin
class CopyOutput < MultiOutput
Fluent::Plugin.register_output('copy', self)
desc 'If true, pass different record to each `store` plugin.'
config_param :deep_copy, :bool, default: false, deprecated: "use 'copy_mode' parameter instead"
desc 'Pass different record to each `store` plugin by specified method'
config_param :copy_mode, :enum, list: [:no_copy, :shallow, :deep, :marshal], default: :no_copy
attr_reader :ignore_errors, :ignore_if_prev_successes
def initialize
super
@ignore_errors = []
@ignore_if_prev_successes = []
end
def configure(conf)
super
@copy_proc = gen_copy_proc
@stores.each_with_index { |store, i|
if i == 0 && store.arg.include?('ignore_if_prev_success')
raise Fluent::ConfigError, "ignore_if_prev_success must specify 2nd or later <store> directives"
end
@ignore_errors << (store.arg.include?('ignore_error'))
@ignore_if_prev_successes << (store.arg.include?('ignore_if_prev_success'))
}
if @ignore_errors.uniq.size == 1 && @ignore_errors.include?(true) && !@ignore_if_prev_successes.include?(true)
log.warn "ignore_errors are specified in all <store>, but ignore_if_prev_success is not specified. Is this intended?"
end
end
def multi_workers_ready?
true
end
def process(tag, es)
unless es.repeatable?
m = Fluent::MultiEventStream.new
es.each {|time,record|
m.add(time, record)
}
es = m
end
success = Array.new(outputs.size)
outputs.each_with_index do |output, i|
begin
if i > 0 && success[i - 1] && @ignore_if_prev_successes[i]
log.debug "ignore copy because prev_success in #{output.plugin_id}", index: i
else
output.emit_events(tag, @copy_proc ? @copy_proc.call(es) : es)
success[i] = true
end
rescue => e
if @ignore_errors[i]
log.error "ignore emit error in #{output.plugin_id}", error: e
else
raise e
end
end
end
end
private
def gen_copy_proc
@copy_mode = :shallow if @deep_copy
case @copy_mode
when :no_copy
nil
when :shallow
Proc.new { |es| es.dup }
when :deep
Proc.new { |es|
packer = Fluent::MessagePackFactory.msgpack_packer
times = []
records = []
es.each { |time, record|
times << time
packer.pack(record)
}
Fluent::MessagePackFactory.msgpack_unpacker.feed_each(packer.full_pack) { |record|
records << record
}
Fluent::MultiEventStream.new(times, records)
}
when :marshal
Proc.new { |es|
new_es = Fluent::MultiEventStream.new
es.each { |time, record|
new_es.add(time, Marshal.load(Marshal.dump(record)))
}
new_es
}
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/base.rb | lib/fluent/plugin/base.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin'
require 'fluent/configurable'
require 'fluent/system_config'
module Fluent
module Plugin
class Base
include Configurable
include SystemConfig::Mixin
State = Struct.new(:configure, :start, :after_start, :stop, :before_shutdown, :shutdown, :after_shutdown, :close, :terminate)
attr_accessor :under_plugin_development
def initialize
@log = nil
super
@fluentd_lock_dir = ENV['FLUENTD_LOCK_DIR']
@_state = State.new(false, false, false, false, false, false, false, false, false)
@_context_router = nil
@_fluentd_worker_id = nil
@under_plugin_development = false
end
def has_router?
false
end
def plugin_root_dir
nil # override this in plugin_id.rb
end
def fluentd_worker_id
return @_fluentd_worker_id if @_fluentd_worker_id
@_fluentd_worker_id = (ENV['SERVERENGINE_WORKER_ID'] || 0).to_i
@_fluentd_worker_id
end
def configure(conf)
raise ArgumentError, "BUG: type of conf must be Fluent::Config::Element, but #{conf.class} is passed." unless conf.is_a?(Fluent::Config::Element)
if conf.for_this_worker? || (Fluent::Engine.supervisor_mode && !conf.for_every_workers?)
system_config_override(workers: conf.target_worker_ids.size)
end
super(conf, system_config.strict_config_value)
@_state ||= State.new(false, false, false, false, false, false, false, false, false)
@_state.configure = true
self
end
def multi_workers_ready?
true
end
def get_lock_path(name)
name = name.gsub(/[^a-zA-Z0-9]/, "_")
File.join(@fluentd_lock_dir, "fluentd-#{name}.lock")
end
def acquire_worker_lock(name)
if @fluentd_lock_dir.nil?
raise InvalidLockDirectory, "can't acquire lock because FLUENTD_LOCK_DIR isn't set"
end
lock_path = get_lock_path(name)
File.open(lock_path, "w") do |f|
f.flock(File::LOCK_EX)
yield
end
# Update access time to prevent tmpwatch from deleting a lock file.
FileUtils.touch(lock_path)
end
def string_safe_encoding(str)
unless str.valid_encoding?
str = str.scrub('?')
log.info "invalid byte sequence is replaced in `#{str}`" if self.respond_to?(:log)
end
yield str
end
def context_router=(router)
@_context_router = router
end
def context_router
@_context_router
end
def start
# By initialization order, plugin logger is created before set log_event_enabled.
# It causes '@id' specified plugin, it uses plugin logger instead of global logger, ignores `<label @FLUENT_LOG>` setting.
# This is adhoc approach but impact is minimal.
if @log.is_a?(Fluent::PluginLogger) && $log.respond_to?(:log_event_enabled) # log_event_enabled check for tests
@log.log_event_enabled = $log.log_event_enabled
end
@_state.start = true
self
end
def after_start
@_state.after_start = true
self
end
def stop
@_state.stop = true
self
end
def before_shutdown
@_state.before_shutdown = true
self
end
def shutdown
@_state.shutdown = true
self
end
def after_shutdown
@_state.after_shutdown = true
self
end
def close
@_state.close = true
self
end
def terminate
@_state.terminate = true
self
end
def configured?
@_state.configure
end
def started?
@_state.start
end
def after_started?
@_state.after_start
end
def stopped?
@_state.stop
end
def before_shutdown?
@_state.before_shutdown
end
def shutdown?
@_state.shutdown
end
def after_shutdown?
@_state.after_shutdown
end
def closed?
@_state.close
end
def terminated?
@_state.terminate
end
def called_in_test?
caller_locations.each do |location|
# Thread::Backtrace::Location#path returns base filename or absolute path.
# #absolute_path returns absolute_path always.
# https://bugs.ruby-lang.org/issues/12159
if /\/test_[^\/]+\.rb$/.match?(location.absolute_path) # location.path =~ /test_.+\.rb$/
return true
end
end
false
end
def inspect
# Plugin instances are sometimes too big to dump because it may have too many thins (buffer,storage, ...)
# Original commit comment says that:
# To emulate normal inspect behavior `ruby -e'o=Object.new;p o;p (o.__id__<<1).to_s(16)'`.
# https://github.com/ruby/ruby/blob/trunk/gc.c#L788
"#<%s:%014x>" % [self.class.name, '0x%014x' % (__id__ << 1)]
end
def reloadable_plugin?
# Engine can't capture all class variables. so it's forbidden to use class variables in each plugins if enabling reload.
self.class.class_variables.empty?
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/parser_apache2.rb | lib/fluent/plugin/parser_apache2.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/parser'
module Fluent
module Plugin
class Apache2Parser < Parser
Plugin.register_parser('apache2', self)
REGEXP = /^(?<host>[^ ]*) [^ ]* (?<user>[^ ]*) \[(?<time>[^\]]*)\] "(?<method>\S+)(?: +(?<path>(?:[^\"]|\\")*?)(?: +\S*)?)?" (?<code>[^ ]*) (?<size>[^ ]*)(?: "(?<referer>(?:[^\"]|\\")*)" "(?<agent>(?:[^\"]|\\")*)")?$/
TIME_FORMAT = "%d/%b/%Y:%H:%M:%S %z"
def initialize
super
@mutex = Mutex.new
end
def configure(conf)
super
@time_parser = time_parser_create(format: TIME_FORMAT)
end
def patterns
{'format' => REGEXP, 'time_format' => TIME_FORMAT}
end
def parse(text)
m = REGEXP.match(text)
unless m
yield nil, nil
return
end
host = m['host']
host = (host == '-') ? nil : host
user = m['user']
user = (user == '-') ? nil : user
time = m['time']
time = @mutex.synchronize { @time_parser.parse(time) }
method = m['method']
path = m['path']
code = m['code'].to_i
code = nil if code == 0
size = m['size']
size = (size == '-') ? nil : size.to_i
referer = m['referer']
referer = (referer == '-') ? nil : referer
agent = m['agent']
agent = (agent == '-') ? nil : agent
record = {
"host" => host,
"user" => user,
"method" => method,
"path" => path,
"code" => code,
"size" => size,
"referer" => referer,
"agent" => agent,
}
record["time"] = m['time'] if @keep_time_key
yield time, record
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/parser_msgpack.rb | lib/fluent/plugin/parser_msgpack.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/parser'
require 'fluent/msgpack_factory'
module Fluent
module Plugin
class MessagePackParser < Parser
Plugin.register_parser('msgpack', self)
def configure(conf)
super
@unpacker = Fluent::MessagePackFactory.engine_factory.unpacker
end
def parser_type
:binary
end
def parse(data, &block)
@unpacker.feed_each(data) do |obj|
parse_unpacked_data(obj, &block)
end
end
alias parse_partial_data parse
def parse_io(io, &block)
u = Fluent::MessagePackFactory.engine_factory.unpacker(io)
u.each do |obj|
parse_unpacked_data(obj, &block)
end
end
def parse_unpacked_data(data)
if data.is_a?(Hash)
time, record = convert_values(parse_time(data), data)
yield time, record
return
end
unless data.is_a?(Array)
yield nil, nil
return
end
data.each do |record|
unless record.is_a?(Hash)
yield nil, nil
next
end
time, converted_record = convert_values(parse_time(record), record)
yield time, converted_record
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/file_util.rb | lib/fluent/plugin/file_util.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/compat/file_util'
module Fluent
# obsolete
FileUtil = Fluent::Compat::FileUtil
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/formatter_tsv.rb | lib/fluent/plugin/formatter_tsv.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/formatter'
module Fluent
module Plugin
class TSVFormatter < Formatter
include Fluent::Plugin::Newline::Mixin
Plugin.register_formatter('tsv', self)
desc 'Field names included in each lines'
config_param :keys, :array, value_type: :string
desc 'The delimiter character (or string) of TSV values'
config_param :delimiter, :string, default: "\t".freeze
desc 'The parameter to enable writing to new lines'
config_param :add_newline, :bool, default: true
def format(tag, time, record)
formatted = @keys.map{|k| record[k].to_s }.join(@delimiter)
formatted << @newline if @add_newline
formatted
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/metrics_local.rb | lib/fluent/plugin/metrics_local.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin'
require 'fluent/plugin/metrics'
module Fluent
module Plugin
class LocalMetrics < Metrics
Fluent::Plugin.register_metrics('local', self)
def initialize
super
@store = 0
@monitor = Monitor.new
end
def configure(conf)
super
if use_gauge_metric
class << self
alias_method :dec, :dec_gauge
alias_method :set, :set_gauge
alias_method :sub, :sub_gauge
end
else
class << self
alias_method :set, :set_counter
end
end
end
def multi_workers_ready?
true
end
def get
@monitor.synchronize do
@store
end
end
def inc
@monitor.synchronize do
@store += 1
end
end
def dec_gauge
@monitor.synchronize do
@store -= 1
end
end
def add(value)
@monitor.synchronize do
@store += value
end
end
def sub_gauge(value)
@monitor.synchronize do
@store -= value
end
end
def set_counter(value)
return if @store > value
@monitor.synchronize do
@store = value
end
end
def set_gauge(value)
@monitor.synchronize do
@store = value
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/sd_file.rb | lib/fluent/plugin/sd_file.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'cool.io'
require 'fluent/plugin_helper'
require 'fluent/plugin/service_discovery'
module Fluent
module Plugin
class FileServiceDiscovery < ServiceDiscovery
include PluginHelper::Mixin
Plugin.register_sd('file', self)
DEFAULT_FILE_TYPE = :yaml
DEFAULT_WEIGHT = 60
DEFAULT_SD_FILE_PATH = ENV['DEFAULT_SD_FILE_PATH'] || '/etc/fluent/sd.yaml'
helpers :event_loop
config_param :path, :string, default: DEFAULT_SD_FILE_PATH
config_param :conf_encoding, :string, default: 'utf-8'
def initialize
super
@file_type = nil
end
def configure(conf)
super
unless File.exist?(@path)
raise Fluent::ConfigError, "sd_file: path=#{@path} not found"
end
@file_type = File.basename(@path).split('.', 2).last.to_sym
unless %i[yaml yml json].include?(@file_type)
@file_type = DEFAULT_FILE_TYPE
end
@services = fetch_server_info
end
def start(queue)
watcher = StatWatcher.new(@path, @log) do |_prev, _cur|
refresh_file(queue)
end
event_loop_attach(watcher)
super()
end
private
def parser
@parser ||=
case @file_type
when :yaml, :yml
require 'yaml'
-> (v) { YAML.safe_load(v).map }
when :json
require 'json'
-> (v) { JSON.parse(v) }
end
end
def refresh_file(queue)
s =
begin
fetch_server_info
rescue => e
@log.error("sd_file: #{e}")
return
end
if s.nil?
# if any error occurs, skip this turn
return
end
diff = []
join = s - @services
# Need service_in first to guarantee that server exist at least one all time.
join.each do |j|
diff << ServiceDiscovery.service_in_msg(j)
end
drain = @services - s
drain.each do |d|
diff << ServiceDiscovery.service_out_msg(d)
end
@services = s
diff.each do |a|
queue.push(a)
end
end
def fetch_server_info
config_data =
begin
File.open(@path, "r:#{@conf_encoding}:utf-8", &:read)
rescue => e
raise Fluent::ConfigError, "sd_file: path=#{@path} couldn't open #{e}"
end
parser.call(config_data).map do |s|
Service.new(
:file,
s.fetch('host'),
s.fetch('port'),
s['name'],
s.fetch('weight', DEFAULT_WEIGHT),
s['standby'],
s['username'],
s['password'],
s['shared_key'],
)
end
rescue KeyError => e
raise Fluent::ConfigError, "#{e}. Service must have `host` and `port`"
end
class StatWatcher < Coolio::StatWatcher
def initialize(path, log, &callback)
@path = path
@log = log
@callback = callback
super(@path)
end
def on_change(prev_stat, cur_stat)
@callback.call(prev_stat, cur_stat)
rescue => e
@log.error(e)
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/input.rb | lib/fluent/plugin/input.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/base'
require 'fluent/log'
require 'fluent/plugin_id'
require 'fluent/plugin_helper'
module Fluent
module Plugin
class Input < Base
include PluginId
include PluginLoggerMixin
include PluginHelper::Mixin
helpers_internal :event_emitter, :metrics
def initialize
super
@emit_records_metrics = nil
@emit_size_metrics = nil
@counter_mutex = Mutex.new
@enable_size_metrics = false
end
def configure(conf)
super
@emit_records_metrics = metrics_create(namespace: "fluentd", subsystem: "input", name: "emit_records", help_text: "Number of count emit records")
@emit_size_metrics = metrics_create(namespace: "fluentd", subsystem: "input", name: "emit_size", help_text: "Total size of emit events")
@enable_size_metrics = !!system_config.enable_size_metrics
end
def statistics
stats = {
'emit_records' => @emit_records_metrics.get,
'emit_size' => @emit_size_metrics.get,
}
{ 'input' => stats }
end
def metric_callback(es)
@emit_records_metrics.add(es.size)
@emit_size_metrics.add(es.to_msgpack_stream.bytesize) if @enable_size_metrics
end
def multi_workers_ready?
false
end
def zero_downtime_restart_ready?
false
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/out_buffer.rb | lib/fluent/plugin/out_buffer.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/output'
module Fluent::Plugin
class BufferOutput < Output
Fluent::Plugin.register_output("buffer", self)
helpers :event_emitter
config_section :buffer do
config_set_default :@type, "file"
config_set_default :chunk_keys, ["tag"]
config_set_default :flush_mode, :interval
config_set_default :flush_interval, 10
end
def multi_workers_ready?
true
end
def write(chunk)
return if chunk.empty?
router.emit_stream(chunk.metadata.tag, Fluent::MessagePackEventStream.new(chunk.read))
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/out_forward.rb | lib/fluent/plugin/out_forward.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/output'
require 'fluent/config/error'
require 'fluent/clock'
require 'fluent/tls'
require 'base64'
require 'forwardable'
require 'fluent/compat/socket_util'
require 'fluent/plugin/out_forward/handshake_protocol'
require 'fluent/plugin/out_forward/load_balancer'
require 'fluent/plugin/out_forward/socket_cache'
require 'fluent/plugin/out_forward/failure_detector'
require 'fluent/plugin/out_forward/error'
require 'fluent/plugin/out_forward/connection_manager'
require 'fluent/plugin/out_forward/ack_handler'
module Fluent::Plugin
class ForwardOutput < Output
Fluent::Plugin.register_output('forward', self)
helpers :socket, :server, :timer, :thread, :compat_parameters, :service_discovery
LISTEN_PORT = 24224
desc 'The transport protocol.'
config_param :transport, :enum, list: [:tcp, :tls], default: :tcp
# TODO: TLS session cache/tickets
desc 'The timeout time when sending event logs.'
config_param :send_timeout, :time, default: 60
desc 'The timeout time for socket connect'
config_param :connect_timeout, :time, default: nil
# TODO: add linger_timeout, recv_timeout
desc 'The protocol to use for heartbeats (default is the same with "transport").'
config_param :heartbeat_type, :enum, list: [:transport, :tcp, :udp, :none], default: :transport
desc 'The interval of the heartbeat packer.'
config_param :heartbeat_interval, :time, default: 1
desc 'The wait time before accepting a server fault recovery.'
config_param :recover_wait, :time, default: 10
desc 'The hard timeout used to detect server failure.'
config_param :hard_timeout, :time, default: 60
desc 'The threshold parameter used to detect server faults.'
config_param :phi_threshold, :integer, default: 16
desc 'Use the "Phi accrual failure detector" to detect server failure.'
config_param :phi_failure_detector, :bool, default: true
desc 'Change the protocol to at-least-once.'
config_param :require_ack_response, :bool, default: false # require in_forward to respond with ack
## The reason of default value of :ack_response_timeout:
# Linux default tcp_syn_retries is 5 (in many environment)
# 3 + 6 + 12 + 24 + 48 + 96 -> 189 (sec)
desc 'This option is used when require_ack_response is true.'
config_param :ack_response_timeout, :time, default: 190
desc 'The interval while reading data from server'
config_param :read_interval_msec, :integer, default: 50 # 50ms
desc 'Reading data size from server'
config_param :read_length, :size, default: 512 # 512bytes
desc 'Set TTL to expire DNS cache in seconds.'
config_param :expire_dns_cache, :time, default: nil # 0 means disable cache
desc 'Enable client-side DNS round robin.'
config_param :dns_round_robin, :bool, default: false # heartbeat_type 'udp' is not available for this
desc 'Ignore DNS resolution and errors at startup time.'
config_param :ignore_network_errors_at_startup, :bool, default: false
desc 'Verify that a connection can be made with one of out_forward nodes at the time of startup.'
config_param :verify_connection_at_startup, :bool, default: false
desc 'Compress buffered data.'
config_param :compress, :enum, list: [:text, :gzip, :zstd], default: :text
desc 'The default version of TLS transport.'
config_param :tls_version, :enum, list: Fluent::TLS::SUPPORTED_VERSIONS, default: Fluent::TLS::DEFAULT_VERSION
desc 'The cipher configuration of TLS transport.'
config_param :tls_ciphers, :string, default: Fluent::TLS::CIPHERS_DEFAULT
desc 'Skip all verification of certificates or not.'
config_param :tls_insecure_mode, :bool, default: false
desc 'Allow self signed certificates or not.'
config_param :tls_allow_self_signed_cert, :bool, default: false
desc 'Verify hostname of servers and certificates or not in TLS transport.'
config_param :tls_verify_hostname, :bool, default: true
desc 'The additional CA certificate path for TLS.'
config_param :tls_ca_cert_path, :array, value_type: :string, default: nil
desc 'The additional certificate path for TLS.'
config_param :tls_cert_path, :array, value_type: :string, default: nil
desc 'The client certificate path for TLS.'
config_param :tls_client_cert_path, :string, default: nil
desc 'The client private key path for TLS.'
config_param :tls_client_private_key_path, :string, default: nil
desc 'The client private key passphrase for TLS.'
config_param :tls_client_private_key_passphrase, :string, default: nil, secret: true
desc 'The certificate thumbprint for searching from Windows system certstore.'
config_param :tls_cert_thumbprint, :string, default: nil, secret: true
desc 'The certificate logical store name on Windows system certstore.'
config_param :tls_cert_logical_store_name, :string, default: nil
desc 'Enable to use certificate enterprise store on Windows system certstore.'
config_param :tls_cert_use_enterprise_store, :bool, default: true
desc "Enable keepalive connection."
config_param :keepalive, :bool, default: false
desc "Expired time of keepalive. Default value is nil, which means to keep connection as long as possible"
config_param :keepalive_timeout, :time, default: nil
config_section :security, required: false, multi: false do
desc 'The hostname'
config_param :self_hostname, :string
desc 'Shared key for authentication'
config_param :shared_key, :string, secret: true
end
config_section :server, param_name: :servers do
desc "The IP address or host name of the server."
config_param :host, :string
desc "The name of the server. Used for logging and certificate verification in TLS transport (when host is address)."
config_param :name, :string, default: nil
desc "The port number of the host."
config_param :port, :integer, default: LISTEN_PORT
desc "The shared key per server."
config_param :shared_key, :string, default: nil, secret: true
desc "The username for authentication."
config_param :username, :string, default: ''
desc "The password for authentication."
config_param :password, :string, default: '', secret: true
desc "Marks a node as the standby node for an Active-Standby model between Fluentd nodes."
config_param :standby, :bool, default: false
desc "The load balancing weight."
config_param :weight, :integer, default: 60
end
attr_reader :nodes
config_param :port, :integer, default: LISTEN_PORT, obsoleted: "User <server> section instead."
config_param :host, :string, default: nil, obsoleted: "Use <server> section instead."
config_section :buffer do
config_set_default :chunk_keys, ["tag"]
end
attr_reader :read_interval, :recover_sample_size
def initialize
super
@nodes = [] #=> [Node]
@loop = nil
@thread = nil
@usock = nil
@keep_alive_watcher_interval = 5 # TODO
@suspend_flush = false
@healthy_nodes_count_metrics = nil
@registered_nodes_count_metrics = nil
end
def configure(conf)
compat_parameters_convert(conf, :buffer, default_chunk_key: 'tag')
super
unless @chunk_key_tag
raise Fluent::ConfigError, "buffer chunk key must include 'tag' for forward output"
end
@read_interval = @read_interval_msec / 1000.0
@recover_sample_size = @recover_wait / @heartbeat_interval
if @heartbeat_type == :tcp
log.warn "'heartbeat_type tcp' is deprecated. use 'transport' instead."
@heartbeat_type = :transport
end
if @dns_round_robin && @heartbeat_type == :udp
raise Fluent::ConfigError, "forward output heartbeat type must be 'transport' or 'none' to use dns_round_robin option"
end
if @transport == :tls
# socket helper adds CA cert or signed certificate to same cert store internally so unify it in this place.
if @tls_cert_path && !@tls_cert_path.empty?
@tls_ca_cert_path = @tls_cert_path
end
if @tls_ca_cert_path && !@tls_ca_cert_path.empty?
@tls_ca_cert_path.each do |path|
raise Fluent::ConfigError, "specified cert path does not exist:#{path}" unless File.exist?(path)
raise Fluent::ConfigError, "specified cert path is not readable:#{path}" unless File.readable?(path)
end
end
if @tls_insecure_mode
log.warn "TLS transport is configured in insecure way"
@tls_verify_hostname = false
@tls_allow_self_signed_cert = true
end
if Fluent.windows?
if (@tls_cert_path || @tls_ca_cert_path) && @tls_cert_logical_store_name
raise Fluent::ConfigError, "specified both cert path and tls_cert_logical_store_name is not permitted"
end
else
raise Fluent::ConfigError, "This parameter is for only Windows" if @tls_cert_logical_store_name
raise Fluent::ConfigError, "This parameter is for only Windows" if @tls_cert_thumbprint
end
end
@ack_handler = @require_ack_response ? AckHandler.new(timeout: @ack_response_timeout, log: @log, read_length: @read_length) : nil
socket_cache = @keepalive ? SocketCache.new(@keepalive_timeout, @log) : nil
@connection_manager = ConnectionManager.new(
log: @log,
secure: !!@security,
connection_factory: method(:create_transfer_socket),
socket_cache: socket_cache,
)
service_discovery_configure(
:out_forward_service_discovery_watcher,
static_default_service_directive: 'server',
load_balancer: LoadBalancer.new(log),
custom_build_method: method(:build_node),
)
service_discovery_services.each do |server|
# it's only for test
@nodes << server
unless @heartbeat_type == :none
begin
server.validate_host_resolution!
rescue => e
raise unless @ignore_network_errors_at_startup
log.warn "failed to resolve node name when configured", server: (server.name || server.host), error: e
server.disable!
end
end
end
unless @as_secondary
if @buffer.compress == :text
@buffer.compress = @compress
else
if @compress == :text
log.info "buffer is compressed. If you also want to save the bandwidth of a network, Add `compress` configuration in <match>"
elsif @compress != @buffer.compress
raise Fluent::ConfigError, "You cannot specify different compression formats for Buffer (Buffer: #{@buffer.compress}, Self: #{@compress})"
end
end
end
if service_discovery_services.empty?
raise Fluent::ConfigError, "forward output plugin requires at least one node is required. Add <server> or <service_discovery>"
end
if !@keepalive && @keepalive_timeout
log.warn('The value of keepalive_timeout is ignored. if you want to use keepalive, please add `keepalive true` to your conf.')
end
raise Fluent::ConfigError, "ack_response_timeout must be a positive integer" if @ack_response_timeout < 1
if @compress == :zstd
log.warn "zstd compression feature is an experimental new feature supported since v1.19.0." +
" Please make sure that the destination server also supports this feature before using it." +
" in_forward plugin for Fluentd supports it since v1.19.0."
end
@healthy_nodes_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "healthy_nodes_count", help_text: "Number of count healthy nodes", prefer_gauge: true)
@registered_nodes_count_metrics = metrics_create(namespace: "fluentd", subsystem: "output", name: "registered_nodes_count", help_text: "Number of count registered nodes", prefer_gauge: true)
end
def multi_workers_ready?
true
end
def prefer_delayed_commit
@require_ack_response
end
def overwrite_delayed_commit_timeout
# Output#start sets @delayed_commit_timeout by @buffer_config.delayed_commit_timeout
# But it should be overwritten by ack_response_timeout to rollback chunks after timeout
if @delayed_commit_timeout != @ack_response_timeout
log.info "delayed_commit_timeout is overwritten by ack_response_timeout"
@delayed_commit_timeout = @ack_response_timeout + 2 # minimum ack_reader IO.select interval is 1s
end
end
def start
super
unless @heartbeat_type == :none
if @heartbeat_type == :udp
@usock = socket_create_udp(service_discovery_services.first.host, service_discovery_services.first.port, nonblock: true)
server_create_udp(:out_forward_heartbeat_receiver, 0, socket: @usock, max_bytes: @read_length, &method(:on_udp_heartbeat_response_recv))
end
timer_execute(:out_forward_heartbeat_request, @heartbeat_interval, &method(:on_heartbeat_timer))
end
if @require_ack_response
overwrite_delayed_commit_timeout
thread_create(:out_forward_receiving_ack, &method(:ack_reader))
end
if @verify_connection_at_startup
service_discovery_services.each do |node|
begin
node.verify_connection
rescue StandardError => e
log.fatal "forward's connection setting error: #{e.message}"
raise Fluent::UnrecoverableError, e.message
end
end
end
if @keepalive
timer_execute(:out_forward_keep_alived_socket_watcher, @keep_alive_watcher_interval, &method(:on_purge_obsolete_socks))
end
end
def close
if @usock
# close socket and ignore errors: this socket will not be used anyway.
@usock.close rescue nil
end
super
end
def stop
super
if @keepalive
@connection_manager.stop
end
end
def before_shutdown
super
@suspend_flush = true
end
def after_shutdown
last_ack if @require_ack_response
super
end
def try_flush
return if @require_ack_response && @suspend_flush
super
end
def last_ack
overwrite_delayed_commit_timeout
ack_check(ack_select_interval)
end
def write(chunk)
return if chunk.empty?
tag = chunk.metadata.tag
service_discovery_select_service { |node| node.send_data(tag, chunk) }
end
def try_write(chunk)
log.trace "writing a chunk to destination", chunk_id: dump_unique_id_hex(chunk.unique_id)
if chunk.empty?
commit_write(chunk.unique_id)
return
end
tag = chunk.metadata.tag
service_discovery_select_service { |node| node.send_data(tag, chunk) }
last_ack if @require_ack_response && @suspend_flush
end
def create_transfer_socket(host, port, hostname, &block)
case @transport
when :tls
socket_create_tls(
host, port,
version: @tls_version,
ciphers: @tls_ciphers,
insecure: @tls_insecure_mode,
verify_fqdn: @tls_verify_hostname,
fqdn: hostname,
allow_self_signed_cert: @tls_allow_self_signed_cert,
cert_paths: @tls_ca_cert_path,
cert_path: @tls_client_cert_path,
private_key_path: @tls_client_private_key_path,
private_key_passphrase: @tls_client_private_key_passphrase,
cert_thumbprint: @tls_cert_thumbprint,
cert_logical_store_name: @tls_cert_logical_store_name,
cert_use_enterprise_store: @tls_cert_use_enterprise_store,
# Enabling SO_LINGER causes tcp port exhaustion on Windows.
# This is because dynamic ports are only 16384 (from 49152 to 65535) and
# expiring SO_LINGER enabled ports should wait 4 minutes
# where set by TcpTimeDelay. Its default value is 4 minutes.
# So, we should disable SO_LINGER on Windows to prevent flood of waiting ports.
linger_timeout: Fluent.windows? ? nil : @send_timeout,
send_timeout: @send_timeout,
recv_timeout: @ack_response_timeout,
connect_timeout: @connect_timeout,
&block
)
when :tcp
socket_create_tcp(
host, port,
linger_timeout: @send_timeout,
send_timeout: @send_timeout,
recv_timeout: @ack_response_timeout,
connect_timeout: @connect_timeout,
&block
)
else
raise "BUG: unknown transport protocol #{@transport}"
end
end
def statistics
stats = super
services = service_discovery_services
@healthy_nodes_count_metrics.set(0)
@registered_nodes_count_metrics.set(services.size)
services.each do |s|
if s.available?
@healthy_nodes_count_metrics.inc
end
end
stats = {
'output' => stats["output"].merge({
'healthy_nodes_count' => @healthy_nodes_count_metrics.get,
'registered_nodes_count' => @registered_nodes_count_metrics.get,
})
}
stats
end
# MessagePack FixArray length is 3
FORWARD_HEADER = [0x93].pack('C').freeze
def forward_header
FORWARD_HEADER
end
private
def build_node(server)
name = server.name || "#{server.host}:#{server.port}"
log.info "adding forwarding server '#{name}'", host: server.host, port: server.port, weight: server.weight, plugin_id: plugin_id
failure = FailureDetector.new(@heartbeat_interval, @hard_timeout, Time.now.to_i.to_f)
if @heartbeat_type == :none
NoneHeartbeatNode.new(self, server, failure: failure, connection_manager: @connection_manager, ack_handler: @ack_handler)
else
Node.new(self, server, failure: failure, connection_manager: @connection_manager, ack_handler: @ack_handler)
end
end
def on_heartbeat_timer
need_rebuild = false
service_discovery_services.each do |n|
begin
log.trace "sending heartbeat", host: n.host, port: n.port, heartbeat_type: @heartbeat_type
n.usock = @usock if @usock
need_rebuild = n.send_heartbeat || need_rebuild
rescue Errno::EAGAIN, Errno::EWOULDBLOCK, Errno::EINTR, Errno::ECONNREFUSED, Errno::ETIMEDOUT => e
log.debug "failed to send heartbeat packet", host: n.host, port: n.port, heartbeat_type: @heartbeat_type, error: e
rescue => e
log.debug "unexpected error happen during heartbeat", host: n.host, port: n.port, heartbeat_type: @heartbeat_type, error: e
end
need_rebuild = n.tick || need_rebuild
end
if need_rebuild
service_discovery_rebalance
end
end
def on_udp_heartbeat_response_recv(data, sock)
sockaddr = Socket.pack_sockaddr_in(sock.remote_port, sock.remote_host)
if node = service_discovery_services.find { |n| n.sockaddr == sockaddr }
# log.trace "heartbeat arrived", name: node.name, host: node.host, port: node.port
if node.heartbeat
service_discovery_rebalance
end
else
log.warn("Unknown heartbeat response received from #{sock.remote_host}:#{sock.remote_port}. It may service out")
end
end
def on_purge_obsolete_socks
@connection_manager.purge_obsolete_socks
end
def ack_select_interval
if @delayed_commit_timeout > 3
1
else
@delayed_commit_timeout / 3.0
end
end
def ack_reader
select_interval = ack_select_interval
while thread_current_running?
ack_check(select_interval)
end
end
def ack_check(select_interval)
@ack_handler.collect_response(select_interval) do |chunk_id, node, sock, result|
@connection_manager.close(sock)
case result
when AckHandler::Result::SUCCESS
commit_write(chunk_id)
when AckHandler::Result::FAILED
node&.disable!
rollback_write(chunk_id, update_retry: false) if chunk_id
when AckHandler::Result::CHUNKID_UNMATCHED
rollback_write(chunk_id, update_retry: false)
else
log.warn("BUG: invalid status #{result} #{chunk_id}")
if chunk_id
rollback_write(chunk_id, update_retry: false)
end
end
end
end
class Node
extend Forwardable
def_delegators :@server, :discovery_id, :host, :port, :name, :weight, :standby
# @param connection_manager [Fluent::Plugin::ForwardOutput::ConnectionManager]
# @param ack_handler [Fluent::Plugin::ForwardOutput::AckHandler]
def initialize(sender, server, failure:, connection_manager:, ack_handler:)
@sender = sender
@log = sender.log
@compress = sender.compress
@server = server
@name = server.name
@host = server.host
@port = server.port
@weight = server.weight
@standby = server.standby
@failure = failure
@available = true
# @hostname is used for certificate verification & TLS SNI
host_is_hostname = !(IPAddr.new(@host) rescue false)
@hostname = case
when host_is_hostname then @host
when @name then @name
else nil
end
@usock = nil
@handshake = HandshakeProtocol.new(
log: @log,
hostname: sender.security&.self_hostname,
shared_key: server.shared_key || sender.security&.shared_key || '',
password: server.password || '',
username: server.username || '',
)
@resolved_host = nil
@resolved_time = 0
@resolved_once = false
@connection_manager = connection_manager
@ack_handler = ack_handler
end
attr_accessor :usock
attr_reader :state
attr_reader :sockaddr # used by on_udp_heartbeat_response_recv
attr_reader :failure # for test
def validate_host_resolution!
resolved_host
end
def available?
@available
end
def disable!
@available = false
end
def standby?
@standby
end
def verify_connection
connect do |sock, ri|
ensure_established_connection(sock, ri)
end
end
def establish_connection(sock, ri)
start_time = Fluent::Clock.now
timeout = @sender.hard_timeout
while ri.state != :established
# Check for timeout to prevent infinite loop
if Fluent::Clock.now - start_time > timeout
@log.warn "handshake timeout after #{timeout}s", host: @host, port: @port
disable!
break
end
begin
# TODO: On Ruby 2.2 or earlier, read_nonblock doesn't work expectedly.
# We need rewrite around here using new socket/server plugin helper.
buf = sock.read_nonblock(@sender.read_length)
if buf.empty?
sleep @sender.read_interval
next
end
Fluent::MessagePackFactory.msgpack_unpacker.feed_each(buf) do |data|
if @handshake.invoke(sock, ri, data) == :established
@log.debug "connection established", host: @host, port: @port
end
end
rescue IO::WaitReadable
# If the exception is Errno::EWOULDBLOCK or Errno::EAGAIN, it is extended by IO::WaitReadable.
# So IO::WaitReadable can be used to rescue the exceptions for retrying read_nonblock.
# https//docs.ruby-lang.org/en/2.3.0/IO.html#method-i-read_nonblock
sleep @sender.read_interval unless ri.state == :established
rescue SystemCallError => e
@log.warn "disconnected by error", host: @host, port: @port, error: e
disable!
break
rescue EOFError
@log.warn "disconnected", host: @host, port: @port
disable!
break
rescue HeloError => e
@log.warn "received invalid helo message from #{@name}"
disable!
break
rescue PingpongError => e
@log.warn "connection refused to #{@name || @host}: #{e.message}"
disable!
break
end
end
end
def send_data_actual(sock, tag, chunk)
option = { 'size' => chunk.size, 'compressed' => @compress }
option['chunk'] = Base64.encode64(chunk.unique_id) if @ack_handler
# https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1#packedforward-mode
# out_forward always uses str32 type for entries.
# str16 can store only 64kbytes, and it should be much smaller than buffer chunk size.
tag = tag.dup.force_encoding(Encoding::UTF_8)
sock.write @sender.forward_header # array, size=3
sock.write tag.to_msgpack # 1. tag: String (str)
chunk.open(compressed: @compress) do |chunk_io|
entries = [0xdb, chunk_io.size].pack('CN')
sock.write entries.force_encoding(Encoding::UTF_8) # 2. entries: String (str32)
IO.copy_stream(chunk_io, sock) # writeRawBody(packed_es)
end
sock.write option.to_msgpack # 3. option: Hash(map)
# TODO: use bin32 for non-utf8 content(entries) when old msgpack-ruby (0.5.x or earlier) not supported
end
def send_data(tag, chunk)
ack = @ack_handler && @ack_handler.create_ack(chunk.unique_id, self)
connect(nil, ack: ack) do |sock, ri|
ensure_established_connection(sock, ri)
send_data_actual(sock, tag, chunk)
end
heartbeat(false)
nil
end
# FORWARD_TCP_HEARTBEAT_DATA = FORWARD_HEADER + ''.to_msgpack + [].to_msgpack
#
# @return [Boolean] return true if it needs to rebuild nodes
def send_heartbeat
begin
dest_addr = resolved_host
@resolved_once = true
rescue ::SocketError => e
if !@resolved_once && @sender.ignore_network_errors_at_startup
@log.warn "failed to resolve node name in heartbeating", server: @name || @host, error: e
return false
end
raise
end
case @sender.heartbeat_type
when :transport
connect(dest_addr) do |sock, ri|
ensure_established_connection(sock, ri)
## don't send any data to not cause a compatibility problem
# sock.write FORWARD_TCP_HEARTBEAT_DATA
# successful tcp connection establishment is considered as valid heartbeat.
# When heartbeat is succeeded after detached, return true. It rebuilds weight array.
heartbeat(true)
end
when :udp
@usock.send "\0", 0, Socket.pack_sockaddr_in(@port, dest_addr)
# response is going to receive at on_udp_heartbeat_response_recv
false
when :none # :none doesn't use this class
raise "BUG: heartbeat_type none must not use Node"
else
raise "BUG: unknown heartbeat_type '#{@sender.heartbeat_type}'"
end
end
def resolved_host
case @sender.expire_dns_cache
when 0
# cache is disabled
resolve_dns!
when nil
# persistent cache
@resolved_host ||= resolve_dns!
else
now = Fluent::EventTime.now
rh = @resolved_host
if !rh || now - @resolved_time >= @sender.expire_dns_cache
rh = @resolved_host = resolve_dns!
@resolved_time = now
end
rh
end
end
def resolve_dns!
addrinfo_list = Socket.getaddrinfo(@host, @port, nil, Socket::SOCK_STREAM)
addrinfo = @sender.dns_round_robin ? addrinfo_list.sample : addrinfo_list.first
@sockaddr = Socket.pack_sockaddr_in(addrinfo[1], addrinfo[3]) # used by on_udp_heartbeat_response_recv
addrinfo[3]
end
private :resolve_dns!
def tick
now = Time.now.to_f
unless available?
if @failure.hard_timeout?(now)
@failure.clear
end
return nil
end
if @failure.hard_timeout?(now)
@log.warn "detached forwarding server '#{@name}'", host: @host, port: @port, hard_timeout: true
disable!
@resolved_host = nil # expire cached host
@failure.clear
return true
end
if @sender.phi_failure_detector
phi = @failure.phi(now)
if phi > @sender.phi_threshold
@log.warn "detached forwarding server '#{@name}'", host: @host, port: @port, phi: phi, phi_threshold: @sender.phi_threshold
disable!
@resolved_host = nil # expire cached host
@failure.clear
return true
end
end
false
end
def heartbeat(detect=true)
now = Time.now.to_f
@failure.add(now)
if detect && !available? && @failure.sample_size > @sender.recover_sample_size
@available = true
@log.warn "recovered forwarding server '#{@name}'", host: @host, port: @port
true
else
nil
end
end
private
def ensure_established_connection(sock, request_info)
if request_info.state != :established
establish_connection(sock, request_info)
if request_info.state != :established
raise ConnectionClosedError, "failed to establish connection with node #{@name}"
end
end
end
def connect(host = nil, ack: false, &block)
@connection_manager.connect(host: host || resolved_host, port: port, hostname: @hostname, ack: ack, &block)
end
end
# Override Node to disable heartbeat
class NoneHeartbeatNode < Node
def available?
true
end
def tick
false
end
def heartbeat(detect=true)
true
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/filter_grep.rb | lib/fluent/plugin/filter_grep.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/filter'
require 'fluent/config/error'
require 'fluent/plugin/string_util'
module Fluent::Plugin
class GrepFilter < Filter
Fluent::Plugin.register_filter('grep', self)
def initialize
super
@_regexp_and_conditions = nil
@_exclude_and_conditions = nil
@_regexp_or_conditions = nil
@_exclude_or_conditions = nil
end
# for test
attr_reader :_regexp_and_conditions, :_exclude_and_conditions, :_regexp_or_conditions, :_exclude_or_conditions
helpers :record_accessor
REGEXP_MAX_NUM = 20
(1..REGEXP_MAX_NUM).each {|i| config_param :"regexp#{i}", :string, default: nil, deprecated: "Use <regexp> section" }
(1..REGEXP_MAX_NUM).each {|i| config_param :"exclude#{i}", :string, default: nil, deprecated: "Use <exclude> section" }
config_section :regexp, param_name: :regexps, multi: true do
desc "The field name to which the regular expression is applied."
config_param :key, :string
desc "The regular expression."
config_param :pattern, :regexp
end
config_section :exclude, param_name: :excludes, multi: true do
desc "The field name to which the regular expression is applied."
config_param :key, :string
desc "The regular expression."
config_param :pattern, :regexp
end
config_section :and, param_name: :and_conditions, multi: true do
config_section :regexp, param_name: :regexps, multi: true do
desc "The field name to which the regular expression is applied."
config_param :key, :string
desc "The regular expression."
config_param :pattern, :regexp
end
config_section :exclude, param_name: :excludes, multi: true do
desc "The field name to which the regular expression is applied."
config_param :key, :string
desc "The regular expression."
config_param :pattern, :regexp
end
end
config_section :or, param_name: :or_conditions, multi: true do
config_section :regexp, param_name: :regexps, multi: true do
desc "The field name to which the regular expression is applied."
config_param :key, :string
desc "The regular expression."
config_param :pattern, :regexp
end
config_section :exclude, param_name: :excludes, multi: true do
desc "The field name to which the regular expression is applied."
config_param :key, :string
desc "The regular expression."
config_param :pattern, :regexp
end
end
def configure(conf)
super
regexp_and_conditions = {}
regexp_or_conditions = {}
exclude_and_conditions = {}
exclude_or_conditions = {}
(1..REGEXP_MAX_NUM).each do |i|
next unless conf["regexp#{i}"]
key, regexp = conf["regexp#{i}"].split(/ /, 2)
raise Fluent::ConfigError, "regexp#{i} does not contain 2 parameters" unless regexp
raise Fluent::ConfigError, "regexp#{i} contains a duplicated key, #{key}" if regexp_and_conditions[key]
regexp_and_conditions[key] = Expression.new(record_accessor_create(key), Regexp.compile(regexp))
end
(1..REGEXP_MAX_NUM).each do |i|
next unless conf["exclude#{i}"]
key, exclude = conf["exclude#{i}"].split(/ /, 2)
raise Fluent::ConfigError, "exclude#{i} does not contain 2 parameters" unless exclude
raise Fluent::ConfigError, "exclude#{i} contains a duplicated key, #{key}" if exclude_or_conditions[key]
exclude_or_conditions[key] = Expression.new(record_accessor_create(key), Regexp.compile(exclude))
end
if @regexps.size > 1
log.info "Top level multiple <regexp> is interpreted as 'and' condition"
end
@regexps.each do |e|
raise Fluent::ConfigError, "Duplicate key: #{e.key}" if regexp_and_conditions.key?(e.key)
regexp_and_conditions[e.key] = Expression.new(record_accessor_create(e.key), e.pattern)
end
if @excludes.size > 1
log.info "Top level multiple <exclude> is interpreted as 'or' condition"
end
@excludes.each do |e|
raise Fluent::ConfigError, "Duplicate key: #{e.key}" if exclude_or_conditions.key?(e.key)
exclude_or_conditions[e.key] = Expression.new(record_accessor_create(e.key), e.pattern)
end
@and_conditions.each do |and_condition|
if !and_condition.regexps.empty? && !and_condition.excludes.empty?
raise Fluent::ConfigError, "Do not specify both <regexp> and <exclude> in <and>"
end
and_condition.regexps.each do |e|
raise Fluent::ConfigError, "Duplicate key in <and>: #{e.key}" if regexp_and_conditions.key?(e.key)
regexp_and_conditions[e.key] = Expression.new(record_accessor_create(e.key), e.pattern)
end
and_condition.excludes.each do |e|
raise Fluent::ConfigError, "Duplicate key in <and>: #{e.key}" if exclude_and_conditions.key?(e.key)
exclude_and_conditions[e.key] = Expression.new(record_accessor_create(e.key), e.pattern)
end
end
@or_conditions.each do |or_condition|
if !or_condition.regexps.empty? && !or_condition.excludes.empty?
raise Fluent::ConfigError, "Do not specify both <regexp> and <exclude> in <or>"
end
or_condition.regexps.each do |e|
raise Fluent::ConfigError, "Duplicate key in <or>: #{e.key}" if regexp_or_conditions.key?(e.key)
regexp_or_conditions[e.key] = Expression.new(record_accessor_create(e.key), e.pattern)
end
or_condition.excludes.each do |e|
raise Fluent::ConfigError, "Duplicate key in <or>: #{e.key}" if exclude_or_conditions.key?(e.key)
exclude_or_conditions[e.key] = Expression.new(record_accessor_create(e.key), e.pattern)
end
end
@_regexp_and_conditions = regexp_and_conditions.values unless regexp_and_conditions.empty?
@_exclude_and_conditions = exclude_and_conditions.values unless exclude_and_conditions.empty?
@_regexp_or_conditions = regexp_or_conditions.values unless regexp_or_conditions.empty?
@_exclude_or_conditions = exclude_or_conditions.values unless exclude_or_conditions.empty?
end
def filter(tag, time, record)
begin
if @_regexp_and_conditions && @_regexp_and_conditions.any? { |expression| !expression.match?(record) }
return nil
end
if @_regexp_or_conditions && @_regexp_or_conditions.none? { |expression| expression.match?(record) }
return nil
end
if @_exclude_and_conditions && @_exclude_and_conditions.all? { |expression| expression.match?(record) }
return nil
end
if @_exclude_or_conditions && @_exclude_or_conditions.any? { |expression| expression.match?(record) }
return nil
end
rescue => e
log.warn "failed to grep events", error: e
log.warn_backtrace
end
record
end
Expression = Struct.new(:key, :pattern) do
def match?(record)
::Fluent::StringUtil.match_regexp(pattern, key.call(record).to_s)
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/parser_nginx.rb | lib/fluent/plugin/parser_nginx.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/parser_regexp'
module Fluent
module Plugin
class NginxParser < RegexpParser
Plugin.register_parser("nginx", self)
config_set_default :expression, /^(?<remote>[^ ]*) (?<host>[^ ]*) (?<user>[^ ]*) \[(?<time>[^\]]*)\] "(?<method>\S+)(?: +(?<path>[^\"]*?)(?: +\S*)?)?" (?<code>[^ ]*) (?<size>[^ ]*)(?: "(?<referer>[^\"]*)" "(?<agent>[^\"]*)"(?:\s+\"?(?<http_x_forwarded_for>[^\"]*)\"?)?)?$/
config_set_default :time_format, "%d/%b/%Y:%H:%M:%S %z"
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/multi_output.rb | lib/fluent/plugin/multi_output.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/base'
require 'fluent/log'
require 'fluent/plugin_id'
require 'fluent/plugin_helper'
module Fluent
module Plugin
class MultiOutput < Base
include PluginId
include PluginLoggerMixin
include PluginHelper::Mixin # for event_emitter
helpers :event_emitter # to get router from agent, which will be supplied to child plugins
helpers_internal :metrics
config_section :store, param_name: :stores, multi: true, required: true do
config_argument :arg, :string, default: ''
config_param :@type, :string, default: nil
end
attr_reader :outputs, :outputs_statically_created
def process(tag, es)
raise NotImplementedError, "BUG: output plugins MUST implement this method"
end
def initialize
super
@outputs = []
@outputs_statically_created = false
@counter_mutex = Mutex.new
# TODO: well organized counters
@num_errors_metrics = nil
@emit_count_metrics = nil
@emit_records_metrics = nil
@emit_size_metrics = nil
# @write_count = 0
# @rollback_count = 0
@enable_size_metrics = false
end
def statistics
stats = {
'num_errors' => @num_errors_metrics.get,
'emit_records' => @emit_records_metrics.get,
'emit_count' => @emit_count_metrics.get,
'emit_size' => @emit_size_metrics.get,
}
{ 'multi_output' => stats }
end
def multi_output?
true
end
def configure(conf)
super
@num_errors_metrics = metrics_create(namespace: "fluentd", subsystem: "multi_output", name: "num_errors", help_text: "Number of count num errors")
@emit_count_metrics = metrics_create(namespace: "fluentd", subsystem: "multi_output", name: "emit_count", help_text: "Number of count emits")
@emit_records_metrics = metrics_create(namespace: "fluentd", subsystem: "multi_output", name: "emit_records", help_text: "Number of emit records")
@emit_size_metrics = metrics_create(namespace: "fluentd", subsystem: "multi_output", name: "emit_size", help_text: "Total size of emit events")
@enable_size_metrics = !!system_config.enable_size_metrics
@stores.each do |store|
store_conf = store.corresponding_config_element
type = store_conf['@type']
unless type
raise Fluent::ConfigError, "Missing '@type' parameter in <store> section"
end
log.debug "adding store", type: type
output = Fluent::Plugin.new_output(type)
output.context_router = self.context_router
output.configure(store_conf)
@outputs << output
end
end
def static_outputs
@outputs_statically_created = true
@outputs
end
# Child plugin's lifecycles are controlled by agent automatically.
# It calls `outputs` to traverse plugins, and invoke start/stop/*shutdown/close/terminate on these directly.
# * `start` of this plugin will be called after child plugins
# * `stop`, `*shutdown`, `close` and `terminate` of this plugin will be called before child plugins
# But when MultiOutput plugins are created dynamically (by forest plugin or others), agent cannot find
# sub-plugins. So child plugins' lifecycles MUST be controlled by MultiOutput plugin itself.
# TODO: this hack will be removed at v2.
def call_lifecycle_method(method_name, checker_name)
return if @outputs_statically_created
@outputs.each do |o|
begin
log.debug "calling #{method_name} on output plugin dynamically created", type: Fluent::Plugin.lookup_type_from_class(o.class), plugin_id: o.plugin_id
o.__send__(method_name) unless o.__send__(checker_name)
rescue Exception => e
log.warn "unexpected error while calling #{method_name} on output plugin dynamically created", plugin: o.class, plugin_id: o.plugin_id, error: e
log.warn_backtrace
end
end
end
def start
super
call_lifecycle_method(:start, :started?)
end
def after_start
super
call_lifecycle_method(:after_start, :after_started?)
end
def stop
super
call_lifecycle_method(:stop, :stopped?)
end
def before_shutdown
super
call_lifecycle_method(:before_shutdown, :before_shutdown?)
end
def shutdown
super
call_lifecycle_method(:shutdown, :shutdown?)
end
def after_shutdown
super
call_lifecycle_method(:after_shutdown, :after_shutdown?)
end
def close
super
call_lifecycle_method(:close, :closed?)
end
def terminate
super
call_lifecycle_method(:terminate, :terminated?)
end
def emit_sync(tag, es)
@emit_count_metrics.inc
begin
process(tag, es)
@emit_records_metrics.add(es.size)
@emit_size_metrics.add(es.to_msgpack_stream.bytesize) if @enable_size_metrics
rescue
@num_errors_metrics.inc
raise
end
end
alias :emit_events :emit_sync
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/formatter_single_value.rb | lib/fluent/plugin/formatter_single_value.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/formatter'
module Fluent
module Plugin
class SingleValueFormatter < Formatter
include Fluent::Plugin::Newline::Mixin
Plugin.register_formatter('single_value', self)
config_param :message_key, :string, default: 'message'.freeze
config_param :add_newline, :bool, default: true
def format(tag, time, record)
text = record[@message_key].to_s.dup
text << @newline if @add_newline
text
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/formatter_stdout.rb | lib/fluent/plugin/formatter_stdout.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/formatter'
module Fluent
module Plugin
class StdoutFormatter < Formatter
Plugin.register_formatter('stdout', self)
TIME_FORMAT = '%Y-%m-%d %H:%M:%S.%N %z'
config_param :output_type, :string, default: 'json'
def configure(conf)
super
@time_formatter = Strftime.new(@time_format || TIME_FORMAT)
@sub_formatter = Plugin.new_formatter(@output_type, parent: self.owner)
@sub_formatter.configure(conf)
end
def start
super
@sub_formatter.start
end
def format(tag, time, record)
"#{@time_formatter.exec(Time.at(time).localtime)} #{tag}: #{@sub_formatter.format(tag, time, record).chomp}\n"
end
def stop
@sub_formatter.stop
super
end
def before_shutdown
@sub_formatter.before_shutdown
super
end
def shutdown
@sub_formatter.shutdown
super
end
def after_shutdown
@sub_formatter.after_shutdown
super
end
def close
@sub_formatter.close
super
end
def terminate
@sub_formatter.terminate
super
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/buf_file_single.rb | lib/fluent/plugin/buf_file_single.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fileutils'
require 'fluent/plugin/buffer'
require 'fluent/plugin/buffer/file_single_chunk'
require 'fluent/system_config'
require 'fluent/variable_store'
module Fluent
module Plugin
class FileSingleBuffer < Fluent::Plugin::Buffer
Plugin.register_buffer('file_single', self)
include SystemConfig::Mixin
DEFAULT_CHUNK_LIMIT_SIZE = 256 * 1024 * 1024 # 256MB
DEFAULT_TOTAL_LIMIT_SIZE = 64 * 1024 * 1024 * 1024 # 64GB
PATH_SUFFIX = ".#{Fluent::Plugin::Buffer::FileSingleChunk::PATH_EXT}"
desc 'The path where buffer chunks are stored.'
config_param :path, :string, default: nil
desc 'Calculate the number of record in chunk during resume'
config_param :calc_num_records, :bool, default: true
desc 'The format of chunk. This is used to calculate the number of record'
config_param :chunk_format, :enum, list: [:msgpack, :text, :auto], default: :auto
config_set_default :chunk_limit_size, DEFAULT_CHUNK_LIMIT_SIZE
config_set_default :total_limit_size, DEFAULT_TOTAL_LIMIT_SIZE
desc 'The permission of chunk file. If no specified, <system> setting or 0644 is used'
config_param :file_permission, :string, default: nil
desc 'The permission of chunk directory. If no specified, <system> setting or 0755 is used'
config_param :dir_permission, :string, default: nil
def initialize
super
@multi_workers_available = false
@additional_resume_path = nil
@variable_store = nil
end
def configure(conf)
super
@variable_store = Fluent::VariableStore.fetch_or_build(:buf_file_single)
if @chunk_format == :auto
@chunk_format = owner.formatted_to_msgpack_binary? ? :msgpack : :text
end
@key_in_path = nil
if owner.chunk_keys.empty?
log.debug "use event tag for buffer key"
else
if owner.chunk_key_tag
raise Fluent::ConfigError, "chunk keys must be tag or one field"
elsif owner.chunk_keys.size > 1
raise Fluent::ConfigError, "2 or more chunk keys is not allowed"
else
@key_in_path = owner.chunk_keys.first.to_sym
end
end
multi_workers_configured = owner.system_config.workers > 1
using_plugin_root_dir = false
unless @path
if root_dir = owner.plugin_root_dir
@path = File.join(root_dir, 'buffer')
using_plugin_root_dir = true # plugin_root_dir path contains worker id
else
raise Fluent::ConfigError, "buffer path is not configured. specify 'path' in <buffer>"
end
end
specified_directory_exists = File.exist?(@path) && File.directory?(@path)
unexisting_path_for_directory = !File.exist?(@path) && !@path.include?('.*')
if specified_directory_exists || unexisting_path_for_directory # directory
if using_plugin_root_dir || !multi_workers_configured
@path = File.join(@path, "fsb.*#{PATH_SUFFIX}")
else
@path = File.join(@path, "worker#{fluentd_worker_id}", "fsb.*#{PATH_SUFFIX}")
if fluentd_worker_id == 0
# worker 0 always checks unflushed buffer chunks to be resumed (might be created while non-multi-worker configuration)
@additional_resume_path = File.join(File.expand_path("../../", @path), "fsb.*#{PATH_SUFFIX}")
end
end
@multi_workers_available = true
else # specified path is file path
if File.basename(@path).include?('.*.')
new_path = File.join(File.dirname(@path), "fsb.*#{PATH_SUFFIX}")
log.warn "file_single doesn't allow user specified 'prefix.*.suffix' style path. Use '#{new_path}' for file instead: #{@path}"
@path = new_path
elsif File.basename(@path).end_with?('.*')
@path = @path + PATH_SUFFIX
else
# existing file will be ignored
@path = @path + ".*#{PATH_SUFFIX}"
end
@multi_workers_available = false
end
type_of_owner = Plugin.lookup_type_from_class(@_owner.class)
if @variable_store.has_key?(@path) && !called_in_test?
type_using_this_path = @variable_store[@path]
raise Fluent::ConfigError, "Other '#{type_using_this_path}' plugin already uses same buffer path: type = #{type_of_owner}, buffer path = #{@path}"
end
@variable_store[@path] = type_of_owner
@dir_permission = if @dir_permission
@dir_permission.to_i(8)
else
system_config.dir_permission || Fluent::DEFAULT_DIR_PERMISSION
end
end
# This method is called only when multi worker is configured
def multi_workers_ready?
unless @multi_workers_available
log.error "file_single buffer with multi workers should be configured to use directory 'path', or system root_dir and plugin id"
end
@multi_workers_available
end
def start
FileUtils.mkdir_p(File.dirname(@path), mode: @dir_permission)
super
end
def stop
if @variable_store
@variable_store.delete(@path)
end
super
end
def persistent?
true
end
def resume
stage = {}
queue = []
exist_broken_file = false
patterns = [@path]
patterns.unshift @additional_resume_path if @additional_resume_path
Dir.glob(escaped_patterns(patterns)) do |path|
next unless File.file?(path)
if owner.respond_to?(:buffer_config) && owner.buffer_config&.flush_at_shutdown
# When `flush_at_shutdown` is `true`, the remaining chunk files during resuming are possibly broken
# since there may be a power failure or similar failure.
log.warn { "restoring buffer file: path = #{path}" }
else
log.debug { "restoring buffer file: path = #{path}" }
end
m = new_metadata() # this metadata will be updated in FileSingleChunk.new
mode = Fluent::Plugin::Buffer::FileSingleChunk.assume_chunk_state(path)
if mode == :unknown
log.debug "unknown state chunk found", path: path
next
end
begin
chunk = Fluent::Plugin::Buffer::FileSingleChunk.new(m, path, mode, @key_in_path, compress: @compress)
chunk.restore_size(@chunk_format) if @calc_num_records
rescue Fluent::Plugin::Buffer::FileSingleChunk::FileChunkError => e
exist_broken_file = true
handle_broken_files(path, mode, e)
next
end
case chunk.state
when :staged
stage[chunk.metadata] = chunk
when :queued
queue << chunk
end
end
queue.sort_by!(&:modified_at)
# If one of the files is corrupted, other files may also be corrupted and be undetected.
# The time periods of each chunk are helpful to check the data.
if exist_broken_file
log.info "Since a broken chunk file was found, it is possible that other files remaining at the time of resuming were also broken. Here is the list of the files."
(stage.values + queue).each { |chunk|
log.info " #{chunk.path}:", :created_at => chunk.created_at, :modified_at => chunk.modified_at
}
end
return stage, queue
end
def generate_chunk(metadata)
# FileChunk generates real path with unique_id
perm = @file_permission || system_config.file_permission
chunk = Fluent::Plugin::Buffer::FileSingleChunk.new(metadata, @path, :create, @key_in_path, perm: perm, compress: @compress)
log.debug "Created new chunk", chunk_id: dump_unique_id_hex(chunk.unique_id), metadata: metadata
chunk
end
def handle_broken_files(path, mode, e)
log.error "found broken chunk file during resume.", :path => path, :mode => mode, :err_msg => e.message
unique_id, _ = Fluent::Plugin::Buffer::FileSingleChunk.unique_id_and_key_from_path(path)
backup(unique_id) { |f|
File.open(path, 'rb') { |chunk|
chunk.set_encoding(Encoding::ASCII_8BIT)
chunk.sync = true
chunk.binmode
IO.copy_stream(chunk, f)
}
}
rescue => error
log.error "backup failed. Delete corresponding files.", :err_msg => error.message
ensure
log.warn "disable_chunk_backup is true. #{dump_unique_id_hex(unique_id)} chunk is thrown away." if @disable_chunk_backup
File.unlink(path) rescue nil
end
def evacuate_chunk(chunk)
unless chunk.is_a?(Fluent::Plugin::Buffer::FileSingleChunk)
raise ArgumentError, "The chunk must be FileSingleChunk, but it was #{chunk.class}."
end
backup_dir = File.join(backup_base_dir, 'buffer', safe_owner_id)
FileUtils.mkdir_p(backup_dir, mode: system_config.dir_permission || Fluent::DEFAULT_DIR_PERMISSION) unless Dir.exist?(backup_dir)
FileUtils.copy(chunk.path, backup_dir)
log.warn "chunk files are evacuated to #{backup_dir}.", chunk_id: dump_unique_id_hex(chunk.unique_id)
rescue => e
log.error "unexpected error while evacuating chunk files.", error: e
end
private
def escaped_patterns(patterns)
patterns.map { |pattern|
# '{' '}' are special character in Dir.glob
pattern.gsub(/[\{\}]/) { |c| "\\#{c}" }
}
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/socket_util.rb | lib/fluent/plugin/socket_util.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/compat/socket_util'
module Fluent
# obsolete
SocketUtil = Fluent::Compat::SocketUtil
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/in_http.rb | lib/fluent/plugin/in_http.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/input'
require 'fluent/plugin/parser'
require 'fluent/event'
require 'http/parser'
require 'webrick/httputils'
require 'uri'
require 'socket'
require 'json'
module Fluent::Plugin
class InHttpParser < Parser
Fluent::Plugin.register_parser('in_http', self)
config_set_default :time_key, 'time'
def configure(conf)
super
# if no time parser related parameters, use in_http's time convert rule
@time_parser = if conf.has_key?('time_type') || conf.has_key?('time_format')
time_parser_create
else
nil
end
end
def parse(text)
# this plugin is dummy implementation not to raise error
yield nil, nil
end
def get_time_parser
@time_parser
end
end
class HttpInput < Input
Fluent::Plugin.register_input('http', self)
helpers :parser, :compat_parameters, :event_loop, :server
EMPTY_GIF_IMAGE = "GIF89a\u0001\u0000\u0001\u0000\x80\xFF\u0000\xFF\xFF\xFF\u0000\u0000\u0000,\u0000\u0000\u0000\u0000\u0001\u0000\u0001\u0000\u0000\u0002\u0002D\u0001\u0000;".force_encoding("UTF-8")
desc 'The port to listen to.'
config_param :port, :integer, default: 9880
desc 'The bind address to listen to.'
config_param :bind, :string, default: '0.0.0.0'
desc 'The size limit of the POSTed element. Default is 32MB.'
config_param :body_size_limit, :size, default: 32*1024*1024 # TODO default
desc 'The timeout limit for keeping the connection alive.'
config_param :keepalive_timeout, :time, default: 10 # TODO default
config_param :backlog, :integer, default: nil
desc 'Add HTTP_ prefix headers to the record.'
config_param :add_http_headers, :bool, default: false
desc 'Add REMOTE_ADDR header to the record.'
config_param :add_remote_addr, :bool, default: false
config_param :blocking_timeout, :time, default: 0.5
desc 'Set a allow list of domains that can do CORS (Cross-Origin Resource Sharing)'
config_param :cors_allow_origins, :array, default: nil
desc 'Tells browsers whether to expose the response to frontend when the credentials mode is "include".'
config_param :cors_allow_credentials, :bool, default: false
desc 'Respond with empty gif image of 1x1 pixel.'
config_param :respond_with_empty_img, :bool, default: false
desc 'Respond status code with 204.'
config_param :use_204_response, :bool, default: false
desc 'Dump error log or not'
config_param :dump_error_log, :bool, default: true
desc 'Add QUERY_ prefix query params to record'
config_param :add_query_params, :bool, default: false
desc "Add prefix to incoming tag"
config_param :add_tag_prefix, :string, default: nil
config_section :parse do
config_set_default :@type, 'in_http'
end
EVENT_RECORD_PARAMETER = '_event_record'
def initialize
super
@km = nil
@format_name = nil
@parser_time_key = nil
# default parsers
@parser_msgpack = nil
@parser_json = nil
@default_time_parser = nil
@default_keep_time_key = nil
@float_time_parser = nil
# <parse> configured parser
@custom_parser = nil
end
def configure(conf)
compat_parameters_convert(conf, :parser)
super
if @cors_allow_credentials
if @cors_allow_origins.nil? || @cors_allow_origins.include?('*')
raise Fluent::ConfigError, "Cannot enable cors_allow_credentials without specific origins"
end
end
raise Fluent::ConfigError, "'add_tag_prefix' parameter must not be empty" if @add_tag_prefix && @add_tag_prefix.empty?
m = if @parser_configs.first['@type'] == 'in_http'
@parser_msgpack = parser_create(usage: 'parser_in_http_msgpack', type: 'msgpack')
@parser_msgpack.time_key = nil
@parser_msgpack.estimate_current_event = false
@parser_json = parser_create(usage: 'parser_in_http_json', type: 'json')
@parser_json.time_key = nil
@parser_json.estimate_current_event = false
default_parser = parser_create(usage: '')
@format_name = 'default'
@parser_time_key = default_parser.time_key
@default_time_parser = default_parser.get_time_parser
@default_keep_time_key = default_parser.keep_time_key
method(:parse_params_default)
else
@custom_parser = parser_create
@format_name = @parser_configs.first['@type']
@parser_time_key = @custom_parser.time_key
method(:parse_params_with_parser)
end
self.singleton_class.module_eval do
define_method(:parse_params, m)
end
end
class KeepaliveManager < Coolio::TimerWatcher
def initialize(timeout)
super(1, true)
@cons = {}
@timeout = timeout.to_i
end
def add(sock)
@cons[sock] = sock
end
def delete(sock)
@cons.delete(sock)
end
def on_timer
@cons.each_pair {|sock,val|
if sock.step_idle > @timeout
sock.close
end
}
end
end
def multi_workers_ready?
true
end
def start
@_event_loop_run_timeout = @blocking_timeout
super
log.debug "listening http", bind: @bind, port: @port
@km = KeepaliveManager.new(@keepalive_timeout)
event_loop_attach(@km)
server_create_connection(:in_http, @port, bind: @bind, backlog: @backlog, &method(:on_server_connect))
@float_time_parser = Fluent::NumericTimeParser.new(:float)
end
def close
server_wait_until_stop
super
end
RES_TEXT_HEADER = {'Content-Type' => 'text/plain'}.freeze
RESPONSE_200 = ["200 OK".freeze, RES_TEXT_HEADER, "".freeze].freeze
RESPONSE_204 = ["204 No Content".freeze, {}.freeze].freeze
RESPONSE_IMG = ["200 OK".freeze, {'Content-Type'=>'image/gif; charset=utf-8'}.freeze, EMPTY_GIF_IMAGE].freeze
RES_400_STATUS = "400 Bad Request".freeze
RES_500_STATUS = "500 Internal Server Error".freeze
def on_request(path_info, params)
begin
path = path_info[1..-1] # remove /
tag = path.split('/').join('.')
tag = "#{@add_tag_prefix}.#{tag}" if @add_tag_prefix
mes = Fluent::MultiEventStream.new
parse_params(params) do |record_time, record|
if record.nil?
log.debug { "incoming event is invalid: path=#{path_info} params=#{params.to_json}" }
next
end
add_params_to_record(record, params)
time = if param_time = params['time']
param_time = param_time.to_f
param_time.zero? ? Fluent::EventTime.now : @float_time_parser.parse(param_time)
else
record_time.nil? ? convert_time_field(record) : record_time
end
mes.add(time, record)
end
rescue => e
if @dump_error_log
log.error "failed to process request", error: e
end
return [RES_400_STATUS, RES_TEXT_HEADER, "400 Bad Request\n#{e}\n"]
end
# TODO server error
begin
router.emit_stream(tag, mes) unless mes.empty?
rescue => e
if @dump_error_log
log.error "failed to emit data", error: e
end
return [RES_500_STATUS, RES_TEXT_HEADER, "500 Internal Server Error\n#{e}\n"]
end
if @respond_with_empty_img
return RESPONSE_IMG
else
if @use_204_response
return RESPONSE_204
else
return RESPONSE_200
end
end
end
private
def on_server_connect(conn)
handler = Handler.new(conn, @km, method(:on_request),
@body_size_limit, @format_name, log,
@cors_allow_origins, @cors_allow_credentials,
@add_query_params)
conn.on(:data) do |data|
handler.on_read(data)
end
conn.on(:write_complete) do |_|
handler.on_write_complete
end
conn.on(:close) do |_|
handler.on_close
end
end
def parse_params_default(params)
if msgpack = params['msgpack']
@parser_msgpack.parse(msgpack) do |_time, record|
yield nil, record
end
elsif js = params['json']
@parser_json.parse(js) do |_time, record|
yield nil, record
end
elsif ndjson = params['ndjson']
ndjson.split(/\r?\n/).each do |js|
@parser_json.parse(js) do |_time, record|
yield nil, record
end
end
else
raise "'json', 'ndjson' or 'msgpack' parameter is required"
end
end
def parse_params_with_parser(params)
if content = params[EVENT_RECORD_PARAMETER]
@custom_parser.parse(content) do |time, record|
yield time, record
end
else
raise "'#{EVENT_RECORD_PARAMETER}' parameter is required"
end
end
def add_params_to_record(record, params)
if @add_http_headers
params.each_pair { |k, v|
if k.start_with?("HTTP_".freeze)
record[k] = v
end
}
end
if @add_query_params
params.each_pair { |k, v|
if k.start_with?("QUERY_".freeze)
record[k] = v
end
}
end
if @add_remote_addr
record['REMOTE_ADDR'] = params['REMOTE_ADDR']
end
end
def convert_time_field(record)
if t = @default_keep_time_key ? record[@parser_time_key] : record.delete(@parser_time_key)
if @default_time_parser
@default_time_parser.parse(t)
else
Fluent::EventTime.from_time(Time.at(t))
end
else
Fluent::EventTime.now
end
end
class Handler
attr_reader :content_type
def initialize(io, km, callback, body_size_limit, format_name, log,
cors_allow_origins, cors_allow_credentials, add_query_params)
@io = io
@km = km
@callback = callback
@body_size_limit = body_size_limit
@next_close = false
@format_name = format_name
@log = log
@cors_allow_origins = cors_allow_origins
@cors_allow_credentials = cors_allow_credentials
@idle = 0
@add_query_params = add_query_params
@km.add(self)
@remote_port, @remote_addr = io.remote_port, io.remote_addr
@parser = Http::Parser.new(self)
end
def step_idle
@idle += 1
end
def on_close
@km.delete(self)
end
def on_read(data)
@idle = 0
@parser << data
rescue
@log.warn "unexpected error", error: $!.to_s
@log.warn_backtrace
@io.close
end
def on_message_begin
@body = ''
end
def on_headers_complete(headers)
expect = nil
size = nil
if @parser.http_version == [1, 1]
@keep_alive = true
else
@keep_alive = false
end
@env = {}
@content_type = ""
@content_encoding = ""
headers.each_pair {|k,v|
@env["HTTP_#{k.tr('-','_').upcase}"] = v
case k
when /\AExpect\z/i
expect = v
when /\AContent-Length\Z/i
size = v.to_i
when /\AContent-Type\Z/i
@content_type = v
when /\AContent-Encoding\Z/i
@content_encoding = v
when /\AConnection\Z/i
if /close/i.match?(v)
@keep_alive = false
elsif /Keep-alive/i.match?(v)
@keep_alive = true
end
when /\AOrigin\Z/i
@origin = v
when /\AX-Forwarded-For\Z/i
# For multiple X-Forwarded-For headers. Use first header value.
v = v.first if v.is_a?(Array)
@remote_addr = v.split(",").first
when /\AAccess-Control-Request-Method\Z/i
@access_control_request_method = v
when /\AAccess-Control-Request-Headers\Z/i
@access_control_request_headers = v
end
}
if expect
if expect == '100-continue'.freeze
if !size || size < @body_size_limit
send_response_nobody("100 Continue", {})
else
send_response_and_close("413 Request Entity Too Large", {}, "Too large")
end
else
send_response_and_close("417 Expectation Failed", {}, "")
end
end
end
def on_body(chunk)
if @body.bytesize + chunk.bytesize > @body_size_limit
unless closing?
send_response_and_close("413 Request Entity Too Large", {}, "Too large")
end
return
end
@body << chunk
end
RES_200_STATUS = "200 OK".freeze
RES_403_STATUS = "403 Forbidden".freeze
# Azure App Service sends GET requests for health checking purpose.
# Respond with `200 OK` to accommodate it.
def handle_get_request
return send_response_and_close(RES_200_STATUS, {}, "")
end
# Web browsers can send an OPTIONS request before performing POST
# to check if cross-origin requests are supported.
def handle_options_request
# Is CORS enabled in the first place?
if @cors_allow_origins.nil?
return send_response_and_close(RES_403_STATUS, {}, "")
end
# in_http does not support HTTP methods except POST
if @access_control_request_method != 'POST'
return send_response_and_close(RES_403_STATUS, {}, "")
end
header = {
"Access-Control-Allow-Methods" => "POST",
"Access-Control-Allow-Headers" => @access_control_request_headers || "",
}
# Check the origin and send back a CORS response
if @cors_allow_origins.include?('*')
header["Access-Control-Allow-Origin"] = "*"
send_response_and_close(RES_200_STATUS, header, "")
elsif include_cors_allow_origin
header["Access-Control-Allow-Origin"] = @origin
if @cors_allow_credentials
header["Access-Control-Allow-Credentials"] = true
end
send_response_and_close(RES_200_STATUS, header, "")
else
send_response_and_close(RES_403_STATUS, {}, "")
end
end
def on_message_complete
return if closing?
if @parser.http_method == 'GET'.freeze
return handle_get_request()
end
if @parser.http_method == 'OPTIONS'.freeze
return handle_options_request()
end
# CORS check
# ==========
# For every incoming request, we check if we have some CORS
# restrictions and allow listed origins through @cors_allow_origins.
# If origin is empty, it's likely a server-to-server request and considered safe.
unless @cors_allow_origins.nil?
unless @cors_allow_origins.include?('*') || include_cors_allow_origin || @origin.nil?
send_response_and_close(RES_403_STATUS, {'Connection' => 'close'}, "")
return
end
end
# Content Encoding
# =================
# Decode payload according to the "Content-Encoding" header.
# For now, we only support 'gzip' and 'deflate'.
begin
if @content_encoding == 'gzip'.freeze
@body = Zlib::GzipReader.new(StringIO.new(@body)).read
elsif @content_encoding == 'deflate'.freeze
@body = Zlib::Inflate.inflate(@body)
end
rescue
@log.warn 'fails to decode payload', error: $!.to_s
send_response_and_close(RES_400_STATUS, {}, "")
return
end
@env['REMOTE_ADDR'] = @remote_addr if @remote_addr
uri = URI.parse(@parser.request_url)
params = parse_query(uri.query)
if @format_name != 'default'
params[EVENT_RECORD_PARAMETER] = @body
elsif /^application\/x-www-form-urlencoded/.match?(@content_type)
params.update parse_query(@body)
elsif @content_type =~ /^multipart\/form-data; boundary=(.+)/
boundary = WEBrick::HTTPUtils.dequote($1)
params.update WEBrick::HTTPUtils.parse_form_data(@body, boundary)
elsif /^application\/json/.match?(@content_type)
params['json'] = @body
elsif /^application\/csp-report/.match?(@content_type)
params['json'] = @body
elsif /^application\/msgpack/.match?(@content_type)
params['msgpack'] = @body
elsif /^application\/x-ndjson/.match?(@content_type)
params['ndjson'] = @body
end
path_info = uri.path
if (@add_query_params)
query_params = parse_query(uri.query)
query_params.each_pair {|k,v|
params["QUERY_#{k.tr('-','_').upcase}"] = v
}
end
params.merge!(@env)
@env.clear
code, header, body = @callback.call(path_info, params)
body = body.to_s
header = header.dup if header.frozen?
unless @cors_allow_origins.nil?
if @cors_allow_origins.include?('*')
header['Access-Control-Allow-Origin'] = '*'
elsif include_cors_allow_origin
header['Access-Control-Allow-Origin'] = @origin
if @cors_allow_credentials
header["Access-Control-Allow-Credentials"] = true
end
end
end
if @keep_alive
header['Connection'] = 'Keep-Alive'.freeze
send_response(code, header, body)
else
send_response_and_close(code, header, body)
end
end
def close
@io.close
end
def on_write_complete
@io.close if @next_close
end
def send_response_and_close(code, header, body)
send_response(code, header, body)
@next_close = true
end
def closing?
@next_close
end
def send_response(code, header, body)
header['Content-Length'] ||= body.bytesize
header['Content-Type'] ||= 'text/plain'.freeze
data = %[HTTP/1.1 #{code}\r\n]
header.each_pair {|k,v|
data << "#{k}: #{v}\r\n"
}
data << "\r\n".freeze
@io.write(data)
@io.write(body)
end
def send_response_nobody(code, header)
data = %[HTTP/1.1 #{code}\r\n]
header.each_pair {|k,v|
data << "#{k}: #{v}\r\n"
}
data << "\r\n".freeze
@io.write(data)
end
def include_cors_allow_origin
if @origin.nil?
return false
end
if @cors_allow_origins.include?(@origin)
return true
end
filtered_cors_allow_origins = @cors_allow_origins.select {|origin| origin != ""}
r = filtered_cors_allow_origins.find do |origin|
(start_str, end_str) = origin.split("*", 2)
@origin.start_with?(start_str) && @origin.end_with?(end_str)
end
!r.nil?
end
def parse_query(query)
query.nil? ? {} : Hash[URI.decode_www_form(query, Encoding::ASCII_8BIT)]
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/in_forward.rb | lib/fluent/plugin/in_forward.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/input'
require 'fluent/msgpack_factory'
require 'yajl'
require 'digest'
require 'securerandom'
module Fluent::Plugin
class ForwardInput < Input
Fluent::Plugin.register_input('forward', self)
# See the wiki page below for protocol specification
# https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1
helpers :server
LISTEN_PORT = 24224
desc 'The port to listen to.'
config_param :port, :integer, default: LISTEN_PORT
desc 'The bind address to listen to.'
config_param :bind, :string, default: '0.0.0.0'
config_param :backlog, :integer, default: nil
# SO_LINGER 0 to send RST rather than FIN to avoid lots of connections sitting in TIME_WAIT at src
desc 'The timeout time used to set linger option.'
config_param :linger_timeout, :integer, default: nil, deprecated: "use transport directive"
# This option is for Cool.io's loop wait timeout to avoid loop stuck at shutdown. Almost users don't need to change this value.
config_param :blocking_timeout, :time, default: 0.5
desc 'Try to resolve hostname from IP addresses or not.'
config_param :resolve_hostname, :bool, default: nil
desc 'Connections will be disconnected right after receiving first message if this value is true.'
config_param :deny_keepalive, :bool, default: false
desc 'Check the remote connection is still available by sending a keepalive packet if this value is true.'
config_param :send_keepalive_packet, :bool, default: false
desc 'Log warning if received chunk size is larger than this value.'
config_param :chunk_size_warn_limit, :size, default: nil
desc 'Received chunk is dropped if it is larger than this value.'
config_param :chunk_size_limit, :size, default: nil
desc 'Skip an event if incoming event is invalid.'
config_param :skip_invalid_event, :bool, default: true
desc "The field name of the client's source address."
config_param :source_address_key, :string, default: nil
desc "The field name of the client's hostname."
config_param :source_hostname_key, :string, default: nil
desc "New tag instead of incoming tag"
config_param :tag, :string, default: nil
desc "Add prefix to incoming tag"
config_param :add_tag_prefix, :string, default: nil
config_section :security, required: false, multi: false do
desc 'The hostname'
config_param :self_hostname, :string
desc 'Shared key for authentication'
config_param :shared_key, :string, secret: true
desc 'If true, use user based authentication'
config_param :user_auth, :bool, default: false
desc 'Allow anonymous source. <client> sections required if disabled.'
config_param :allow_anonymous_source, :bool, default: true
### User based authentication
config_section :user, param_name: :users, required: false, multi: true do
desc 'The username for authentication'
config_param :username, :string
desc 'The password for authentication'
config_param :password, :string, secret: true
end
### Client ip/network authentication & per_host shared key
config_section :client, param_name: :clients, required: false, multi: true do
desc 'The IP address or host name of the client'
config_param :host, :string, default: nil
desc 'Network address specification'
config_param :network, :string, default: nil
desc 'Shared key per client'
config_param :shared_key, :string, default: nil, secret: true
desc 'Array of username.'
config_param :users, :array, default: []
end
end
def configure(conf)
super
if @source_hostname_key
# TODO: add test
if @resolve_hostname.nil?
@resolve_hostname = true
elsif !@resolve_hostname # user specifies "false" in config
raise Fluent::ConfigError, "resolve_hostname must be true with source_hostname_key"
end
end
@enable_field_injection = @source_address_key || @source_hostname_key
raise Fluent::ConfigError, "'tag' parameter must not be empty" if @tag && @tag.empty?
raise Fluent::ConfigError, "'add_tag_prefix' parameter must not be empty" if @add_tag_prefix && @add_tag_prefix.empty?
if @security
if @security.user_auth && @security.users.empty?
raise Fluent::ConfigError, "<user> sections required if user_auth enabled"
end
if !@security.allow_anonymous_source && @security.clients.empty?
raise Fluent::ConfigError, "<client> sections required if allow_anonymous_source disabled"
end
@nodes = []
@security.clients.each do |client|
if client.host && client.network
raise Fluent::ConfigError, "both of 'host' and 'network' are specified for client"
end
if !client.host && !client.network
raise Fluent::ConfigError, "Either of 'host' and 'network' must be specified for client"
end
source = nil
if client.host
begin
source = IPSocket.getaddress(client.host)
rescue SocketError
raise Fluent::ConfigError, "host '#{client.host}' cannot be resolved"
end
end
source_addr = begin
IPAddr.new(source || client.network)
rescue ArgumentError
raise Fluent::ConfigError, "network '#{client.network}' address format is invalid"
end
@nodes.push({
address: source_addr,
shared_key: (client.shared_key || @security.shared_key),
users: client.users
})
end
end
if @send_keepalive_packet && @deny_keepalive
raise Fluent::ConfigError, "both 'send_keepalive_packet' and 'deny_keepalive' cannot be set to true"
end
end
def multi_workers_ready?
true
end
HEARTBEAT_UDP_PAYLOAD = "\0"
def start
super
shared_socket = system_config.workers > 1
log.info "listening port", port: @port, bind: @bind
server_create_connection(
:in_forward_server, @port,
bind: @bind,
shared: shared_socket,
resolve_name: @resolve_hostname,
linger_timeout: @linger_timeout,
send_keepalive_packet: @send_keepalive_packet,
backlog: @backlog,
&method(:handle_connection)
)
server_create(:in_forward_server_udp_heartbeat, @port, shared: shared_socket, proto: :udp, bind: @bind, resolve_name: @resolve_hostname, max_bytes: 128) do |data, sock|
log.trace "heartbeat udp data arrived", host: sock.remote_host, port: sock.remote_port, data: data
begin
sock.write HEARTBEAT_UDP_PAYLOAD
rescue Errno::EAGAIN, Errno::EWOULDBLOCK, Errno::EINTR
log.trace "error while heartbeat response", host: sock.remote_host, error: e
end
end
end
def handle_connection(conn)
send_data = ->(serializer, data){ conn.write serializer.call(data) }
log.trace "connected fluent socket", addr: conn.remote_addr, port: conn.remote_port
state = :established
nonce = nil
user_auth_salt = nil
if @security
# security enabled session MUST use MessagePack as serialization format
state = :helo
nonce = generate_salt
user_auth_salt = generate_salt
send_data.call(:to_msgpack.to_proc, generate_helo(nonce, user_auth_salt))
state = :pingpong
end
log.trace "accepted fluent socket", addr: conn.remote_addr, port: conn.remote_port
read_messages(conn) do |msg, chunk_size, serializer|
case state
when :pingpong
success, reason_or_salt, shared_key = check_ping(msg, conn.remote_addr, user_auth_salt, nonce)
unless success
conn.on(:write_complete) { |c| c.close_after_write_complete }
send_data.call(serializer, generate_pong(false, reason_or_salt, nonce, shared_key))
next
end
send_data.call(serializer, generate_pong(true, reason_or_salt, nonce, shared_key))
log.debug "connection established", address: conn.remote_addr, port: conn.remote_port
state = :established
when :established
options = on_message(msg, chunk_size, conn)
if options && r = response(options)
log.trace "sent response to fluent socket", address: conn.remote_addr, response: r
conn.on(:write_complete) { |c| c.close } if @deny_keepalive
send_data.call(serializer, r)
else
if @deny_keepalive
conn.close
end
end
else
raise "BUG: unknown session state: #{state}"
end
end
end
def read_messages(conn, &block)
feeder = nil
serializer = nil
bytes = 0
conn.data do |data|
# only for first call of callback
unless feeder
first = data[0]
if first == '{' || first == '[' # json
parser = Yajl::Parser.new
parser.on_parse_complete = ->(obj){
block.call(obj, bytes, serializer)
bytes = 0
}
serializer = :to_json.to_proc
feeder = ->(d){ parser << d }
else # msgpack
parser = Fluent::MessagePackFactory.msgpack_unpacker
serializer = :to_msgpack.to_proc
feeder = ->(d){
parser.feed_each(d){|obj|
block.call(obj, bytes, serializer)
bytes = 0
}
}
end
end
bytes += data.bytesize
feeder.call(data)
end
end
def response(option)
if option && option['chunk']
return { 'ack' => option['chunk'] }
end
nil
end
def on_message(msg, chunk_size, conn)
if msg.nil?
# for future TCP heartbeat_request
return
end
# TODO: raise an exception if broken chunk is generated by recoverable situation
unless msg.is_a?(Array)
log.warn "incoming chunk is broken:", host: conn.remote_host, msg: msg
return
end
tag = msg[0]
entries = msg[1]
if @chunk_size_limit && (chunk_size > @chunk_size_limit)
log.warn "Input chunk size is larger than 'chunk_size_limit', dropped:", tag: tag, host: conn.remote_host, limit: @chunk_size_limit, size: chunk_size
return
elsif @chunk_size_warn_limit && (chunk_size > @chunk_size_warn_limit)
log.warn "Input chunk size is larger than 'chunk_size_warn_limit':", tag: tag, host: conn.remote_host, limit: @chunk_size_warn_limit, size: chunk_size
end
tag = @tag.dup if @tag
tag = "#{@add_tag_prefix}.#{tag}" if @add_tag_prefix
case entries
when String
# PackedForward
option = msg[2] || {}
size = option['size'] || 0
if option['compressed'] && option['compressed'] != 'text'
es = Fluent::CompressedMessagePackEventStream.new(entries, nil, size.to_i, compress: option['compressed'].to_sym)
else
es = Fluent::MessagePackEventStream.new(entries, nil, size.to_i)
end
es = check_and_skip_invalid_event(tag, es, conn.remote_host) if @skip_invalid_event
if @enable_field_injection
es = add_source_info(es, conn)
end
router.emit_stream(tag, es)
when Array
# Forward
es = if @skip_invalid_event
check_and_skip_invalid_event(tag, entries, conn.remote_host)
else
es = Fluent::MultiEventStream.new
entries.each { |e|
record = e[1]
next if record.nil?
time = e[0]
time = Fluent::EventTime.now if time.nil? || time.to_i == 0 # `to_i == 0` for empty EventTime
es.add(time, record)
}
es
end
if @enable_field_injection
es = add_source_info(es, conn)
end
router.emit_stream(tag, es)
option = msg[2]
else
# Message
time = msg[1]
record = msg[2]
if @skip_invalid_event && invalid_event?(tag, time, record)
log.warn "got invalid event and drop it:", host: conn.remote_host, tag: tag, time: time, record: record
return msg[3] # retry never succeeded so return ack and drop incoming event.
end
return if record.nil?
time = Fluent::EventTime.now if time.to_i == 0
if @enable_field_injection
record[@source_address_key] = conn.remote_addr if @source_address_key
record[@source_hostname_key] = conn.remote_host if @source_hostname_key
end
router.emit(tag, time, record)
option = msg[3]
end
# return option for response
option
end
def invalid_event?(tag, time, record)
!((time.is_a?(Integer) || time.is_a?(::Fluent::EventTime)) && record.is_a?(Hash) && tag.is_a?(String))
end
def check_and_skip_invalid_event(tag, es, remote_host)
new_es = Fluent::MultiEventStream.new
es.each { |time, record|
if invalid_event?(tag, time, record)
log.warn "skip invalid event:", host: remote_host, tag: tag, time: time, record: record
next
end
new_es.add(time, record)
}
new_es
end
def add_source_info(es, conn)
new_es = Fluent::MultiEventStream.new
if @source_address_key && @source_hostname_key
address = conn.remote_addr
hostname = conn.remote_host
es.each { |time, record|
record[@source_address_key] = address
record[@source_hostname_key] = hostname
new_es.add(time, record)
}
elsif @source_address_key
address = conn.remote_addr
es.each { |time, record|
record[@source_address_key] = address
new_es.add(time, record)
}
elsif @source_hostname_key
hostname = conn.remote_host
es.each { |time, record|
record[@source_hostname_key] = hostname
new_es.add(time, record)
}
else
raise "BUG: don't call this method in this case"
end
new_es
end
def select_authenticate_users(node, username)
if node.nil? || node[:users].empty?
@security.users.select{|u| u.username == username}
else
@security.users.select{|u| node[:users].include?(u.username) && u.username == username}
end
end
def generate_salt
::SecureRandom.random_bytes(16)
end
def generate_helo(nonce, user_auth_salt)
log.debug "generating helo"
# ['HELO', options(hash)]
['HELO', {'nonce' => nonce, 'auth' => (@security ? user_auth_salt : ''), 'keepalive' => !@deny_keepalive}]
end
def check_ping(message, remote_addr, user_auth_salt, nonce)
log.debug "checking ping"
# ['PING', self_hostname, shared_key_salt, sha512_hex(shared_key_salt + self_hostname + nonce + shared_key), username || '', sha512_hex(auth_salt + username + password) || '']
unless message.size == 6 && message[0] == 'PING'
return false, 'invalid ping message'
end
_ping, hostname, shared_key_salt, shared_key_hexdigest, username, password_digest = message
node = @nodes.find{|n| n[:address].include?(remote_addr) rescue false }
if !node && !@security.allow_anonymous_source
log.warn "Anonymous client disallowed", address: remote_addr, hostname: hostname
return false, "anonymous source host '#{remote_addr}' denied", nil
end
shared_key = node ? node[:shared_key] : @security.shared_key
serverside = Digest::SHA512.new.update(shared_key_salt).update(hostname).update(nonce).update(shared_key).hexdigest
if shared_key_hexdigest != serverside
log.warn "Shared key mismatch", address: remote_addr, hostname: hostname
return false, 'shared_key mismatch', nil
end
if @security.user_auth
users = select_authenticate_users(node, username)
success = false
users.each do |user|
passhash = Digest::SHA512.new.update(user_auth_salt).update(username).update(user[:password]).hexdigest
success ||= (passhash == password_digest)
end
unless success
log.warn "Authentication failed", address: remote_addr, hostname: hostname, username: username
return false, 'username/password mismatch', nil
end
end
return true, shared_key_salt, shared_key
end
def generate_pong(auth_result, reason_or_salt, nonce, shared_key)
log.debug "generating pong"
# ['PONG', bool(authentication result), 'reason if authentication failed', self_hostname, sha512_hex(salt + self_hostname + nonce + sharedkey)]
unless auth_result
return ['PONG', false, reason_or_salt, '', '']
end
shared_key_digest_hex = Digest::SHA512.new.update(reason_or_salt).update(@security.self_hostname).update(nonce).update(shared_key).hexdigest
['PONG', true, '', @security.self_hostname, shared_key_digest_hex]
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/exec_util.rb | lib/fluent/plugin/exec_util.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/compat/exec_util'
module Fluent
# obsolete
ExecUtil = Fluent::Compat::ExecUtil
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/parser_regexp.rb | lib/fluent/plugin/parser_regexp.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/parser'
module Fluent
module Plugin
class RegexpParser < Parser
Plugin.register_parser("regexp", self)
desc 'Regular expression for matching logs'
config_param :expression, :regexp
desc 'Ignore case in matching'
config_param :ignorecase, :bool, default: false, deprecated: "Use /pattern/i instead, this option is no longer effective"
desc 'Build regular expression as a multline mode'
config_param :multiline, :bool, default: false, deprecated: "Use /pattern/m instead, this option is no longer effective"
config_set_default :time_key, 'time'
def configure(conf)
super
# For compat layer
if @ignorecase || @multiline
options = 0
options |= Regexp::IGNORECASE if @ignorecase
options |= Regexp::MULTILINE if @multiline
@expression = Regexp.compile(@expression.source, options)
end
@regexp = @expression # For backward compatibility
if @expression.named_captures.empty?
raise Fluent::ConfigError, "No named captures in 'expression' parameter. The regexp must have at least one named capture"
end
end
def parse(text)
m = @expression.match(text)
unless m
yield nil, nil
return
end
r = {}
m.names.each do |name|
if value = m[name]
r[name] = value
end
end
time, record = convert_values(parse_time(r), r)
yield time, record
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/formatter.rb | lib/fluent/plugin/formatter.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/env'
require 'fluent/plugin/base'
require 'fluent/plugin/owned_by_mixin'
require 'fluent/time'
module Fluent
module Plugin
class Formatter < Base
include OwnedByMixin
include TimeMixin::Formatter
configured_in :format
PARSER_TYPES = [:text_per_line, :text, :binary]
def formatter_type
:text_per_line
end
def format(tag, time, record)
raise NotImplementedError, "Implement this method in child class"
end
end
class ProcWrappedFormatter < Formatter
def initialize(proc)
super()
@proc = proc
end
def format(tag, time, record)
@proc.call(tag, time, record)
end
end
module Newline
module Mixin
include Fluent::Configurable
DEFAULT_NEWLINE = if Fluent.windows?
:crlf
else
:lf
end
config_param :newline, :enum, list: [:lf, :crlf], default: DEFAULT_NEWLINE
def configure(conf)
super
@newline = case newline
when :lf
"\n".freeze
when :crlf
"\r\n".freeze
end
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/out_null.rb | lib/fluent/plugin/out_null.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/output'
module Fluent::Plugin
class NullOutput < Output
# This plugin is for tests of non-buffered/buffered plugins
Fluent::Plugin.register_output('null', self)
desc "The parameter for testing to simulate output plugin which never succeed to flush."
config_param :never_flush, :bool, default: false
config_section :buffer do
config_set_default :chunk_keys, ['tag']
config_set_default :flush_at_shutdown, true
config_set_default :chunk_limit_size, 10 * 1024
end
def prefer_buffered_processing
false
end
def prefer_delayed_commit
@delayed
end
attr_accessor :feed_proc, :delayed
def initialize
super
@delayed = false
@feed_proc = nil
end
def multi_workers_ready?
true
end
def process(tag, es)
raise "failed to flush" if @never_flush
# Do nothing
end
def write(chunk)
raise "failed to flush" if @never_flush
if @feed_proc
@feed_proc.call(chunk)
end
end
def try_write(chunk)
raise "failed to flush" if @never_flush
if @feed_proc
@feed_proc.call(chunk)
end
# not to commit chunks for testing
# commit_write(chunk.unique_id)
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/compressable.rb | lib/fluent/plugin/compressable.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'stringio'
require 'zlib'
require 'zstd-ruby'
module Fluent
module Plugin
module Compressable
def compress(data, type: :gzip, **kwargs)
output_io = kwargs[:output_io]
io = output_io || StringIO.new
if type == :gzip
writer = Zlib::GzipWriter.new(io)
elsif type == :zstd
writer = Zstd::StreamWriter.new(io)
else
raise ArgumentError, "Unknown compression type: #{type}"
end
writer.write(data)
writer.finish
output_io || io.string
end
# compressed_data is String like `compress(data1) + compress(data2) + ... + compress(dataN)`
# https://www.ruby-forum.com/topic/971591#979503
def decompress(compressed_data = nil, output_io: nil, input_io: nil, type: :gzip)
case
when input_io && output_io
io_decompress(input_io, output_io, type)
when input_io
output_io = StringIO.new
io = io_decompress(input_io, output_io, type)
io.string
when compressed_data.nil? || compressed_data.empty?
# check compressed_data(String) is 0 length
compressed_data
when output_io
# execute after checking compressed_data is empty or not
io = StringIO.new(compressed_data)
io_decompress(io, output_io, type)
else
string_decompress(compressed_data, type)
end
end
private
def string_decompress_gzip(compressed_data)
io = StringIO.new(compressed_data)
out = ''
loop do
reader = Zlib::GzipReader.new(io)
out << reader.read
unused = reader.unused
reader.finish
unless unused.nil?
adjust = unused.length
io.pos -= adjust
end
break if io.eof?
end
out
end
def string_decompress_zstd(compressed_data)
io = StringIO.new(compressed_data)
reader = Zstd::StreamReader.new(io)
out = ''
loop do
# Zstd::StreamReader needs to specify the size of the buffer
out << reader.read(1024)
# Zstd::StreamReader doesn't provide unused data, so we have to manually adjust the position
break if io.eof?
end
out
end
def string_decompress(compressed_data, type = :gzip)
if type == :gzip
string_decompress_gzip(compressed_data)
elsif type == :zstd
string_decompress_zstd(compressed_data)
else
raise ArgumentError, "Unknown compression type: #{type}"
end
end
def io_decompress_gzip(input, output)
loop do
reader = Zlib::GzipReader.new(input)
v = reader.read
output.write(v)
unused = reader.unused
reader.finish
unless unused.nil?
adjust = unused.length
input.pos -= adjust
end
break if input.eof?
end
output
end
def io_decompress_zstd(input, output)
reader = Zstd::StreamReader.new(input)
loop do
# Zstd::StreamReader needs to specify the size of the buffer
v = reader.read(1024)
output.write(v)
# Zstd::StreamReader doesn't provide unused data, so we have to manually adjust the position
break if input.eof?
end
output
end
def io_decompress(input, output, type = :gzip)
if type == :gzip
io_decompress_gzip(input, output)
elsif type == :zstd
io_decompress_zstd(input, output)
else
raise ArgumentError, "Unknown compression type: #{type}"
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/metrics.rb | lib/fluent/plugin/metrics.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'socket'
require 'fluent/plugin/base'
require 'fluent/log'
require 'fluent/unique_id'
require 'fluent/plugin_id'
module Fluent
module Plugin
class Metrics < Base
include PluginId
include PluginLoggerMixin
include UniqueId::Mixin
DEFAULT_TYPE = 'local'
configured_in :metrics
config_param :default_labels, :hash, default: {agent: "Fluentd", hostname: "#{Socket.gethostname}"}
config_param :labels, :hash, default: {}
attr_reader :use_gauge_metric
attr_reader :has_methods_for_gauge, :has_methods_for_counter
def initialize
super
@has_methods_for_counter = false
@has_methods_for_gauge = false
@use_gauge_metric = false
end
def configure(conf)
super
if use_gauge_metric
@has_methods_for_gauge = has_methods_for_gauge?
else
@has_methods_for_counter = has_methods_for_counter?
end
end
# Some metrics should be counted by gauge.
# ref: https://prometheus.io/docs/concepts/metric_types/#gauge
def use_gauge_metric=(use_gauge_metric=false)
@use_gauge_metric = use_gauge_metric
end
def create(namespace:, subsystem:,name:,help_text:,labels: {})
# This API is for cmetrics type.
end
def get
raise NotImplementedError, "Implement this method in child class"
end
def inc
raise NotImplementedError, "Implement this method in child class"
end
def dec
raise NotImplementedError, "Implement this method in child class"
end
def add(value)
raise NotImplementedError, "Implement this method in child class"
end
def sub(value)
raise NotImplementedError, "Implement this method in child class"
end
def set(value)
raise NotImplementedError, "Implement this method in child class"
end
private
def has_methods_for_counter?
implemented_methods = self.class.instance_methods(false)
if [:get, :inc, :add].all? {|e| implemented_methods.include?(e)} &&
[:set].all?{|e| self.class.method_defined?(e)}
true
else
raise "BUG: metrics plugin on counter mode MUST implement `get`, `inc`, `add` methods. And aliased `set` methods should be aliased from another method"
end
end
def has_methods_for_gauge?
implemented_methods = self.class.instance_methods(false)
if [:get, :inc, :add].all? {|e| implemented_methods.include?(e)} &&
[:set, :dec, :sub].all?{|e| self.class.method_defined?(e)}
true
else
raise "BUG: metrics plugin on gauge mode MUST implement `get`, `inc`, and `add` methods. And `dec`, `sub`, and `set` methods should be aliased from other methods"
end
end
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/filter_stdout.rb | lib/fluent/plugin/filter_stdout.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/filter'
module Fluent::Plugin
class StdoutFilter < Filter
Fluent::Plugin.register_filter('stdout', self)
helpers :formatter, :compat_parameters, :inject
DEFAULT_FORMAT_TYPE = 'stdout'
config_section :format do
config_set_default :@type, DEFAULT_FORMAT_TYPE
end
# for tests
attr_reader :formatter
def configure(conf)
compat_parameters_convert(conf, :inject, :formatter)
super
@formatter = formatter_create
end
def filter_stream(tag, es)
es.each { |time, record|
begin
r = inject_values_to_record(tag, time, record)
log.write @formatter.format(tag, time, r)
rescue => e
router.emit_error_event(tag, time, record, e)
end
}
log.flush
es
end
end
end
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | false |
fluent/fluentd | https://github.com/fluent/fluentd/blob/088cb0c98b56feeec0e6da70d1314a25ffd19d0a/lib/fluent/plugin/buffer.rb | lib/fluent/plugin/buffer.rb | #
# Fluentd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'fluent/plugin/base'
require 'fluent/plugin/owned_by_mixin'
require 'fluent/plugin_id'
require 'fluent/plugin_helper'
require 'fluent/unique_id'
require 'fluent/ext_monitor_require'
module Fluent
module Plugin
class Buffer < Base
include OwnedByMixin
include UniqueId::Mixin
include PluginId
include MonitorMixin
include PluginHelper::Mixin # for metrics
class BufferError < StandardError; end
class BufferOverflowError < BufferError; end
class BufferChunkOverflowError < BufferError; end # A record size is larger than chunk size limit
MINIMUM_APPEND_ATTEMPT_RECORDS = 10
DEFAULT_CHUNK_LIMIT_SIZE = 8 * 1024 * 1024 # 8MB
DEFAULT_TOTAL_LIMIT_SIZE = 512 * 1024 * 1024 # 512MB, same with v0.12 (BufferedOutput + buf_memory: 64 x 8MB)
DEFAULT_CHUNK_FULL_THRESHOLD = 0.95
configured_in :buffer
helpers_internal :metrics
# TODO: system total buffer limit size in bytes by SystemConfig
config_param :chunk_limit_size, :size, default: DEFAULT_CHUNK_LIMIT_SIZE
config_param :total_limit_size, :size, default: DEFAULT_TOTAL_LIMIT_SIZE
# If user specify this value and (chunk_size * queue_length) is smaller than total_size,
# then total_size is automatically configured to that value
config_param :queue_limit_length, :integer, default: nil
# optional new limitations
config_param :chunk_limit_records, :integer, default: nil
# if chunk size (or records) is 95% or more after #write, then that chunk will be enqueued
config_param :chunk_full_threshold, :float, default: DEFAULT_CHUNK_FULL_THRESHOLD
desc 'The max number of queued chunks.'
config_param :queued_chunks_limit_size, :integer, default: nil
desc 'Compress buffered data.'
config_param :compress, :enum, list: [:text, :gzip, :zstd], default: :text
desc 'If true, chunks are thrown away when unrecoverable error happens'
config_param :disable_chunk_backup, :bool, default: false
Metadata = Struct.new(:timekey, :tag, :variables, :seq) do
def initialize(timekey, tag, variables)
super(timekey, tag, variables, 0)
end
def dup_next
m = dup
m.seq = seq + 1
m
end
def empty?
timekey.nil? && tag.nil? && variables.nil?
end
def cmp_variables(v1, v2)
if v1.nil? && v2.nil?
return 0
elsif v1.nil? # v2 is non-nil
return -1
elsif v2.nil? # v1 is non-nil
return 1
end
# both of v1 and v2 are non-nil
v1_sorted_keys = v1.keys.sort
v2_sorted_keys = v2.keys.sort
if v1_sorted_keys != v2_sorted_keys
if v1_sorted_keys.size == v2_sorted_keys.size
v1_sorted_keys <=> v2_sorted_keys
else
v1_sorted_keys.size <=> v2_sorted_keys.size
end
else
v1_sorted_keys.each do |k|
a = v1[k]
b = v2[k]
if a && b && a != b
return a <=> b
elsif a && b || (!a && !b) # same value (including both are nil)
next
elsif a # b is nil
return 1
else # a is nil (but b is non-nil)
return -1
end
end
0
end
end
def <=>(o)
timekey2 = o.timekey
tag2 = o.tag
variables2 = o.variables
if (!!timekey ^ !!timekey2) || (!!tag ^ !!tag2) || (!!variables ^ !!variables2)
# One has value in a field, but another doesn't have value in same field
# This case occurs very rarely
if timekey == timekey2 # including the case of nil == nil
if tag == tag2
cmp_variables(variables, variables2)
elsif tag.nil?
-1
elsif tag2.nil?
1
else
tag <=> tag2
end
elsif timekey.nil?
-1
elsif timekey2.nil?
1
else
timekey <=> timekey2
end
else
# objects have values in same field pairs (comparison with non-nil and nil doesn't occur here)
(timekey <=> timekey2 || 0).nonzero? || # if `a <=> b` is nil, then both are nil
(tag <=> tag2 || 0).nonzero? ||
cmp_variables(variables, variables2)
end
end
# This is an optimization code. Current Struct's implementation is comparing all data.
# https://github.com/ruby/ruby/blob/0623e2b7cc621b1733a760b72af246b06c30cf96/struct.c#L1200-L1203
# Actually this overhead is very small but this class is generated *per chunk* (and used in hash object).
# This means that this class is one of the most called object in Fluentd.
# See https://github.com/fluent/fluentd/pull/2560
def hash
timekey.hash
end
end
# for metrics
attr_reader :stage_size_metrics, :stage_length_metrics, :queue_size_metrics, :queue_length_metrics
attr_reader :available_buffer_space_ratios_metrics, :total_queued_size_metrics
attr_reader :newest_timekey_metrics, :oldest_timekey_metrics
# for tests
attr_reader :stage, :queue, :dequeued, :queued_num
def initialize
super
@chunk_limit_size = nil
@total_limit_size = nil
@queue_limit_length = nil
@chunk_limit_records = nil
@stage = {} #=> Hash (metadata -> chunk) : not flushed yet
@queue = [] #=> Array (chunks) : already flushed (not written)
@dequeued = {} #=> Hash (unique_id -> chunk): already written (not purged)
@queued_num = {} # metadata => int (number of queued chunks)
@dequeued_num = {} # metadata => int (number of dequeued chunks)
@stage_length_metrics = nil
@stage_size_metrics = nil
@queue_length_metrics = nil
@queue_size_metrics = nil
@available_buffer_space_ratios_metrics = nil
@total_queued_size_metrics = nil
@newest_timekey_metrics = nil
@oldest_timekey_metrics = nil
@timekeys = Hash.new(0)
@enable_update_timekeys = false
@mutex = Mutex.new
end
# The metrics_create method defines getter methods named stage_byte_size and queue_byte_size.
# For compatibility, stage_size, stage_size=, queue_size, and queue_size= are still available.
def stage_size
@stage_size_metrics.get
end
def stage_size=(value)
@stage_size_metrics.set(value)
end
def queue_size
@queue_size_metrics.get
end
def queue_size=(value)
@queue_size_metrics.set(value)
end
def persistent?
false
end
def configure(conf)
super
unless @queue_limit_length.nil?
@total_limit_size = @chunk_limit_size * @queue_limit_length
end
@stage_length_metrics = metrics_create(namespace: "fluentd", subsystem: "buffer", name: "stage_length",
help_text: 'Length of stage buffers', prefer_gauge: true)
@stage_length_metrics.set(0)
@stage_size_metrics = metrics_create(namespace: "fluentd", subsystem: "buffer", name: "stage_byte_size",
help_text: 'Total size of stage buffers', prefer_gauge: true)
@stage_size_metrics.set(0) # Ensure zero.
@queue_length_metrics = metrics_create(namespace: "fluentd", subsystem: "buffer", name: "queue_length",
help_text: 'Length of queue buffers', prefer_gauge: true)
@queue_length_metrics.set(0)
@queue_size_metrics = metrics_create(namespace: "fluentd", subsystem: "buffer", name: "queue_byte_size",
help_text: 'Total size of queue buffers', prefer_gauge: true)
@queue_size_metrics.set(0) # Ensure zero.
@available_buffer_space_ratios_metrics = metrics_create(namespace: "fluentd", subsystem: "buffer", name: "available_buffer_space_ratios",
help_text: 'Ratio of available space in buffer', prefer_gauge: true)
@available_buffer_space_ratios_metrics.set(100) # Default is 100%.
@total_queued_size_metrics = metrics_create(namespace: "fluentd", subsystem: "buffer", name: "total_queued_size",
help_text: 'Total size of stage and queue buffers', prefer_gauge: true)
@total_queued_size_metrics.set(0)
@newest_timekey_metrics = metrics_create(namespace: "fluentd", subsystem: "buffer", name: "newest_timekey",
help_text: 'Newest timekey in buffer', prefer_gauge: true)
@oldest_timekey_metrics = metrics_create(namespace: "fluentd", subsystem: "buffer", name: "oldest_timekey",
help_text: 'Oldest timekey in buffer', prefer_gauge: true)
end
def enable_update_timekeys
@enable_update_timekeys = true
end
def start
super
@stage, @queue = resume
@stage.each_pair do |metadata, chunk|
@stage_size_metrics.add(chunk.bytesize)
end
@queue.each do |chunk|
@queued_num[chunk.metadata] ||= 0
@queued_num[chunk.metadata] += 1
@queue_size_metrics.add(chunk.bytesize)
end
update_timekeys
log.debug "buffer started", instance: self.object_id, stage_size: @stage_size_metrics.get, queue_size: @queue_size_metrics.get
end
def close
super
synchronize do
log.debug "closing buffer", instance: self.object_id
@dequeued.each_pair do |chunk_id, chunk|
chunk.close
end
until @queue.empty?
@queue.shift.close
end
@stage.each_pair do |metadata, chunk|
chunk.close
end
end
end
def terminate
super
@dequeued = @stage = @queue = @queued_num = nil
@stage_length_metrics = @stage_size_metrics = @queue_length_metrics = @queue_size_metrics = nil
@available_buffer_space_ratios_metrics = @total_queued_size_metrics = nil
@newest_timekey_metrics = @oldest_timekey_metrics = nil
@timekeys.clear
end
def storable?
@total_limit_size > @stage_size_metrics.get + @queue_size_metrics.get
end
## TODO: for back pressure feature
# def used?(ratio)
# @total_limit_size * ratio > @stage_size_metrics.get + @queue_size_metrics.get
# end
def resume
# return {}, []
raise NotImplementedError, "Implement this method in child class"
end
def generate_chunk(metadata)
raise NotImplementedError, "Implement this method in child class"
end
def new_metadata(timekey: nil, tag: nil, variables: nil)
Metadata.new(timekey, tag, variables)
end
# Keep this method for existing code
def metadata(timekey: nil, tag: nil, variables: nil)
Metadata.new(timekey, tag, variables)
end
def timekeys
@timekeys.keys
end
# metadata MUST have consistent object_id for each variation
# data MUST be Array of serialized events, or EventStream
# metadata_and_data MUST be a hash of { metadata => data }
def write(metadata_and_data, format: nil, size: nil, enqueue: false)
return if metadata_and_data.size < 1
raise BufferOverflowError, "buffer space has too many data" unless storable?
log.on_trace { log.trace "writing events into buffer", instance: self.object_id, metadata_size: metadata_and_data.size }
operated_chunks = []
unstaged_chunks = {} # metadata => [chunk, chunk, ...]
chunks_to_enqueue = []
staged_bytesizes_by_chunk = {}
# track internal BufferChunkOverflowError in write_step_by_step
buffer_chunk_overflow_errors = []
begin
# sort metadata to get lock of chunks in same order with other threads
metadata_and_data.keys.sort.each do |metadata|
data = metadata_and_data[metadata]
write_once(metadata, data, format: format, size: size) do |chunk, adding_bytesize, error|
chunk.mon_enter # add lock to prevent to be committed/rollbacked from other threads
operated_chunks << chunk
if chunk.staged?
#
# https://github.com/fluent/fluentd/issues/2712
# write_once is supposed to write to a chunk only once
# but this block **may** run multiple times from write_step_by_step and previous write may be rollbacked
# So we should be counting the stage_size only for the last successful write
#
staged_bytesizes_by_chunk[chunk] = adding_bytesize
elsif chunk.unstaged?
unstaged_chunks[metadata] ||= []
unstaged_chunks[metadata] << chunk
end
if error && !error.empty?
buffer_chunk_overflow_errors << error
end
end
end
return if operated_chunks.empty?
# Now, this thread acquires many locks of chunks... getting buffer-global lock causes dead lock.
# Any operations needs buffer-global lock (including enqueueing) should be done after releasing locks.
first_chunk = operated_chunks.shift
# Following commits for other chunks also can finish successfully if the first commit operation
# finishes without any exceptions.
# In most cases, #commit just requires very small disk spaces, so major failure reason are
# permission errors, disk failures and other permanent(fatal) errors.
begin
first_chunk.commit
if enqueue || first_chunk.unstaged? || chunk_size_full?(first_chunk)
chunks_to_enqueue << first_chunk
end
first_chunk.mon_exit
rescue
operated_chunks.unshift(first_chunk)
raise
end
errors = []
# Buffer plugin estimates there's no serious error cause: will commit for all chunks either way
operated_chunks.each do |chunk|
begin
chunk.commit
if enqueue || chunk.unstaged? || chunk_size_full?(chunk)
chunks_to_enqueue << chunk
end
chunk.mon_exit
rescue => e
chunk.rollback
chunk.mon_exit
errors << e
end
end
# All locks about chunks are released.
#
# Now update the stage, stage_size with proper locking
# FIX FOR stage_size miscomputation - https://github.com/fluent/fluentd/issues/2712
#
staged_bytesizes_by_chunk.each do |chunk, bytesize|
chunk.synchronize do
synchronize { @stage_size_metrics.add(bytesize) }
log.on_trace { log.trace { "chunk #{chunk.path} size_added: #{bytesize} new_size: #{chunk.bytesize}" } }
end
end
chunks_to_enqueue.each do |c|
if c.staged? && (enqueue || chunk_size_full?(c))
m = c.metadata
enqueue_chunk(m)
if unstaged_chunks[m] && !unstaged_chunks[m].empty?
u = unstaged_chunks[m].pop
u.synchronize do
if u.unstaged? && !chunk_size_full?(u)
# `u.metadata.seq` and `m.seq` can be different but Buffer#enqueue_chunk expect them to be the same value
u.metadata.seq = 0
synchronize {
@stage[m] = u.staged!
@stage_size_metrics.add(u.bytesize)
}
end
end
end
elsif c.unstaged?
enqueue_unstaged_chunk(c)
else
# previously staged chunk is already enqueued, closed or purged.
# no problem.
end
end
operated_chunks.clear if errors.empty?
if errors.size > 0
log.warn "error occurs in committing chunks: only first one raised", errors: errors.map(&:class)
raise errors.first
end
ensure
operated_chunks.each do |chunk|
chunk.rollback rescue nil # nothing possible to do for #rollback failure
if chunk.unstaged?
chunk.purge rescue nil # to prevent leakage of unstaged chunks
end
chunk.mon_exit rescue nil # this may raise ThreadError for chunks already committed
end
unless buffer_chunk_overflow_errors.empty?
# Notify delayed BufferChunkOverflowError here
raise BufferChunkOverflowError, buffer_chunk_overflow_errors.join(", ")
end
end
end
def queue_full?
synchronize { @queue.size } >= @queued_chunks_limit_size
end
def queued_records
synchronize { @queue.reduce(0){|r, chunk| r + chunk.size } }
end
def queued?(metadata = nil, optimistic: false)
if optimistic
optimistic_queued?(metadata)
else
synchronize do
optimistic_queued?(metadata)
end
end
end
def enqueue_chunk(metadata)
log.on_trace { log.trace "enqueueing chunk", instance: self.object_id, metadata: metadata }
chunk = synchronize do
@stage.delete(metadata)
end
return nil unless chunk
chunk.synchronize do
synchronize do
if chunk.empty?
chunk.close
else
chunk.metadata.seq = 0 # metadata.seq should be 0 for counting @queued_num
@queue << chunk
@queued_num[metadata] = @queued_num.fetch(metadata, 0) + 1
chunk.enqueued!
end
bytesize = chunk.bytesize
@stage_size_metrics.sub(bytesize)
@queue_size_metrics.add(bytesize)
end
end
nil
end
def enqueue_unstaged_chunk(chunk)
log.on_trace { log.trace "enqueueing unstaged chunk", instance: self.object_id, metadata: chunk.metadata }
synchronize do
chunk.synchronize do
metadata = chunk.metadata
metadata.seq = 0 # metadata.seq should be 0 for counting @queued_num
@queue << chunk
@queued_num[metadata] = @queued_num.fetch(metadata, 0) + 1
chunk.enqueued!
end
@queue_size_metrics.add(chunk.bytesize)
end
end
def update_timekeys
synchronize do
chunks = @stage.values
chunks.concat(@queue)
@timekeys = chunks.each_with_object({}) do |chunk, keys|
if chunk.metadata&.timekey
t = chunk.metadata.timekey
keys[t] = keys.fetch(t, 0) + 1
end
end
end
end
# At flush_at_shutdown, all staged chunks should be enqueued for buffer flush. Set true to force_enqueue for it.
def enqueue_all(force_enqueue = false)
log.on_trace { log.trace "enqueueing all chunks in buffer", instance: self.object_id }
update_timekeys if @enable_update_timekeys
if block_given?
synchronize{ @stage.keys }.each do |metadata|
return if !force_enqueue && queue_full?
# NOTE: The following line might cause data race depending on Ruby implementations except CRuby
# cf. https://github.com/fluent/fluentd/pull/1721#discussion_r146170251
chunk = @stage[metadata]
next unless chunk
v = yield metadata, chunk
enqueue_chunk(metadata) if v
end
else
synchronize{ @stage.keys }.each do |metadata|
return if !force_enqueue && queue_full?
enqueue_chunk(metadata)
end
end
end
def dequeue_chunk
return nil if @queue.empty?
log.on_trace { log.trace "dequeueing a chunk", instance: self.object_id }
synchronize do
chunk = @queue.shift
# this buffer is dequeued by other thread just before "synchronize" in this thread
return nil unless chunk
@dequeued[chunk.unique_id] = chunk
@queued_num[chunk.metadata] -= 1 # BUG if nil, 0 or subzero
@dequeued_num[chunk.metadata] ||= 0
@dequeued_num[chunk.metadata] += 1
log.trace "chunk dequeued", instance: self.object_id, metadata: chunk.metadata
chunk
end
end
def takeback_chunk(chunk_id)
log.on_trace { log.trace "taking back a chunk", instance: self.object_id, chunk_id: dump_unique_id_hex(chunk_id) }
synchronize do
chunk = @dequeued.delete(chunk_id)
return false unless chunk # already purged by other thread
@queue.unshift(chunk)
log.on_trace { log.trace "chunk taken back", instance: self.object_id, chunk_id: dump_unique_id_hex(chunk_id), metadata: chunk.metadata }
@queued_num[chunk.metadata] += 1 # BUG if nil
@dequeued_num[chunk.metadata] -= 1
end
true
end
def purge_chunk(chunk_id)
metadata = nil
synchronize do
chunk = @dequeued.delete(chunk_id)
return nil unless chunk # purged by other threads
metadata = chunk.metadata
log.on_trace { log.trace "purging a chunk", instance: self.object_id, chunk_id: dump_unique_id_hex(chunk_id), metadata: metadata }
begin
bytesize = chunk.bytesize
chunk.purge
@queue_size_metrics.sub(bytesize)
rescue => e
log.error "failed to purge buffer chunk", chunk_id: dump_unique_id_hex(chunk_id), error_class: e.class, error: e
log.error_backtrace
end
@dequeued_num[chunk.metadata] -= 1
if metadata && !@stage[metadata] && (!@queued_num[metadata] || @queued_num[metadata] < 1) && @dequeued_num[metadata].zero?
@queued_num.delete(metadata)
@dequeued_num.delete(metadata)
end
log.on_trace { log.trace "chunk purged", instance: self.object_id, chunk_id: dump_unique_id_hex(chunk_id), metadata: metadata }
end
nil
end
def clear_queue!
log.on_trace { log.trace "clearing queue", instance: self.object_id }
synchronize do
until @queue.empty?
begin
q = @queue.shift
evacuate_chunk(q)
log.trace("purging a chunk in queue"){ {id: dump_unique_id_hex(chunk.unique_id), bytesize: chunk.bytesize, size: chunk.size} }
q.purge
rescue => e
log.error "unexpected error while clearing buffer queue", error_class: e.class, error: e
log.error_backtrace
end
end
@queue_size_metrics.set(0)
end
end
def evacuate_chunk(chunk)
# Overwrite this on demand.
#
# Note: Difference from the `backup` feature.
# The `backup` feature is for unrecoverable errors, mainly for bad chunks.
# On the other hand, this feature is for normal chunks.
# The main motivation for this feature is to enable recovery by evacuating buffer files
# when the retry limit is reached due to external factors such as network issues.
#
# Note: Difference from the `secondary` feature.
# The `secondary` feature is not suitable for recovery.
# It can be difficult to recover files made by `out_secondary_file` because the metadata
# is lost.
# For file buffers, the easiest way for recovery is to evacuate the chunk files as is.
# Once the issue is recovered, we can put back the chunk files, and restart Fluentd to
# load them.
# This feature enables it.
end
def chunk_size_over?(chunk)
chunk.bytesize > @chunk_limit_size || (@chunk_limit_records && chunk.size > @chunk_limit_records)
end
def chunk_size_full?(chunk)
chunk.bytesize >= @chunk_limit_size * @chunk_full_threshold || (@chunk_limit_records && chunk.size >= @chunk_limit_records * @chunk_full_threshold)
end
class ShouldRetry < StandardError; end
# write once into a chunk
# 1. append whole data into existing chunk
# 2. commit it & return unless chunk_size_over?
# 3. enqueue existing chunk & retry whole method if chunk was not empty
# 4. go to step_by_step writing
def write_once(metadata, data, format: nil, size: nil, &block)
return if data.empty?
stored = false
adding_bytesize = nil
chunk = synchronize { @stage[metadata] ||= generate_chunk(metadata).staged! }
enqueue_chunk_before_retry = false
chunk.synchronize do
# retry this method if chunk is already queued (between getting chunk and entering critical section)
raise ShouldRetry unless chunk.staged?
empty_chunk = chunk.empty?
original_bytesize = chunk.bytesize
begin
if format
serialized = format.call(data)
chunk.concat(serialized, size ? size.call : data.size)
else
chunk.append(data, compress: @compress)
end
adding_bytesize = chunk.bytesize - original_bytesize
if chunk_size_over?(chunk)
if format && empty_chunk
if chunk.bytesize > @chunk_limit_size
log.warn "chunk bytes limit exceeds for an emitted event stream: #{adding_bytesize}bytes"
else
log.warn "chunk size limit exceeds for an emitted event stream: #{chunk.size}records"
end
end
chunk.rollback
if format && !empty_chunk
# Event streams should be appended into a chunk at once
# as far as possible, to improve performance of formatting.
# Event stream may be a MessagePackEventStream. We don't want to split it into
# 2 or more chunks (except for a case that the event stream is larger than chunk limit).
enqueue_chunk_before_retry = true
raise ShouldRetry
end
else
stored = true
end
rescue
chunk.rollback
raise
end
if stored
block.call(chunk, adding_bytesize)
end
end
unless stored
# try step-by-step appending if data can't be stored into existing a chunk in non-bulk mode
#
# 1/10 size of original event stream (splits_count == 10) seems enough small
# to try emitting events into existing chunk.
# it does not matter to split event stream into very small splits, because chunks have less
# overhead to write data many times (even about file buffer chunks).
write_step_by_step(metadata, data, format, 10, &block)
end
rescue ShouldRetry
enqueue_chunk(metadata) if enqueue_chunk_before_retry
retry
end
# EventStream can be split into many streams
# because (es1 + es2).to_msgpack_stream == es1.to_msgpack_stream + es2.to_msgpack_stream
# 1. split event streams into many (10 -> 100 -> 1000 -> ...) chunks
# 2. append splits into the staged chunks as much as possible
# 3. create unstaged chunk and append rest splits -> repeat it for all splits
def write_step_by_step(metadata, data, format, splits_count, &block)
splits = []
if splits_count > data.size
splits_count = data.size
end
slice_size = if data.size % splits_count == 0
data.size / splits_count
else
data.size / (splits_count - 1)
end
slice_origin = 0
while slice_origin < data.size
splits << data.slice(slice_origin, slice_size)
slice_origin += slice_size
end
# This method will append events into the staged chunk at first.
# Then, will generate chunks not staged (not queued) to append rest data.
staged_chunk_used = false
modified_chunks = []
modified_metadata = metadata
get_next_chunk = ->(){
if staged_chunk_used
# Staging new chunk here is bad idea:
# Recovering whole state including newly staged chunks is much harder than current implementation.
modified_metadata = modified_metadata.dup_next
generate_chunk(modified_metadata)
else
synchronize { @stage[modified_metadata] ||= generate_chunk(modified_metadata).staged! }
end
}
writing_splits_index = 0
enqueue_chunk_before_retry = false
while writing_splits_index < splits.size
chunk = get_next_chunk.call
errors = []
# The chunk must be locked until being passed to &block.
chunk.mon_enter
modified_chunks << {chunk: chunk, adding_bytesize: 0, errors: errors}
raise ShouldRetry unless chunk.writable?
staged_chunk_used = true if chunk.staged?
original_bytesize = committed_bytesize = chunk.bytesize
begin
while writing_splits_index < splits.size
split = splits[writing_splits_index]
formatted_split = format ? format.call(split) : nil
if split.size == 1 # Check BufferChunkOverflowError
determined_bytesize = nil
if @compress != :text
determined_bytesize = nil
elsif formatted_split
determined_bytesize = formatted_split.bytesize
elsif split.first.respond_to?(:bytesize)
determined_bytesize = split.first.bytesize
end
if determined_bytesize && determined_bytesize > @chunk_limit_size
# It is a obvious case that BufferChunkOverflowError should be raised here.
# But if it raises here, already processed 'split' or
# the proceeding 'split' will be lost completely.
# So it is a last resort to delay raising such a exception
errors << "a #{determined_bytesize} bytes record (nth: #{writing_splits_index}) is larger than buffer chunk limit size (#{@chunk_limit_size})"
writing_splits_index += 1
next
end
if determined_bytesize.nil? || chunk.bytesize + determined_bytesize > @chunk_limit_size
# The split will (might) cause size over so keep already processed
# 'split' content here (allow performance regression a bit).
chunk.commit
committed_bytesize = chunk.bytesize
end
end
if format
chunk.concat(formatted_split, split.size)
else
chunk.append(split, compress: @compress)
end
adding_bytes = chunk.bytesize - committed_bytesize
if chunk_size_over?(chunk) # split size is larger than difference between size_full? and size_over?
chunk.rollback
committed_bytesize = chunk.bytesize
| ruby | Apache-2.0 | 088cb0c98b56feeec0e6da70d1314a25ffd19d0a | 2026-01-04T15:37:30.958053Z | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.