repo stringlengths 5 92 | file_url stringlengths 80 287 | file_path stringlengths 5 197 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:37:27 2026-01-04 17:58:21 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_websearch.rb | spec/support/adapters/chat_requests_with_websearch.rb | require 'spec_helper'
RSpec.shared_examples 'chat requests with web search' do | options = {} |
# note that there is no practical way to determine if a web search happen except to verify citations
# so the expectation of the adapter is that it will enable citations
context 'where there is one message which requires a web search' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "what is the top news item today?\n" )
response = create_and_make_chat_request( send( options[ :adapter ] || :adapter ), conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be >= 1
contents = choice.message.contents
expect( contents.any? { | c | c.is_a?( Intelligence::MessageContent::WebReference ) } ).to be true
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests_with_tools.rb | spec/support/adapters/stream_requests_with_tools.rb | require 'spec_helper'
require 'debug'
RSpec.shared_examples 'stream requests with tools' do | options = {} |
let( :get_location_tool ) {
Intelligence::Tool.build! do
name :get_location
description \
"The get_location tool will return the users city, state or province and country."
end
}
let( :get_weather_tool ) {
Intelligence::Tool.build! do
name :get_weather
description "The get_weather tool will return the current weather in a given locality."
argument name: :city, type: 'string', required: true do
description "The city or town for which the current weather should be returned."
end
argument name: :state, type: 'string' do
description \
"The state or province for which the current weather should be returned. If this is " \
"not provided the largest or most prominent city with the given name, in the given " \
"country or in the worldi, will be assumed."
end
argument name: :country, type: 'string' do
description \
"The country for which the given weather should be returned. If this is not provided " \
"the largest or most prominent city with the given name will be returned."
end
end
}
context 'where there is a single message and a single tool' do
it 'streams a tool request' do
response = nil
conversation = create_conversation( "Where am I located?\n" )
adapter = send( options[ :adapter ] || :adapter )
response = create_and_make_stream_request( adapter, conversation, tools: [ get_location_tool ] ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
expect( result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
end
expect( response.success? ).to be( true ), response_error_description( response )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
contents = choice.message.contents
expect( contents.length ).to be > 0
tool_calls = contents.select { | content | content.is_a?( Intelligence::MessageContent::ToolCall ) }
expect( tool_calls.length ).to be 1
expect( tool_calls.last.tool_name ).to eq( 'get_location' )
end
end
context 'where there are multiple messages and a single tool' do
it 'streams a tool request' do
conversation = create_conversation(
"I am in Seattle, WA\n",
"Got it! Let me know if you need any local insights or information related to Seattle!\n",
"What is the current weather?\n"
)
adapter = send( options[ :adapter ] || :adapter )
response = create_and_make_stream_request( adapter, conversation, tools: [ get_weather_tool ] ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
expect( result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
end
expect( response.success? ).to be( true ), response_error_description( response )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
contents = choice.message.contents
expect( contents.length ).to be > 0
tool_calls = contents.select { | content | content.is_a?( Intelligence::MessageContent::ToolCall ) }
expect( tool_calls.length ).to be 1
tool_call = tool_calls.first
expect( tool_call.tool_name ).to eq( 'get_weather' )
expect( tool_call.tool_parameters ).to be_a( Hash )
expect( tool_call.tool_parameters[ :city ] ).to match( /seattle/i )
end
end
context 'where there is a single message and a multiple tools' do
it 'streams the correct tool request' do
conversation = create_conversation( "Where am I located?" )
adapter = send( options[ :adapter ] || :adapter )
tools = [ get_location_tool, get_weather_tool ]
response = create_and_make_stream_request( adapter, conversation, tools: tools ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
expect( result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
end
expect( response.success? ).to be( true ), response_error_description( response )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
contents = choice.message.contents
expect( contents.length ).to be > 0
tool_calls = contents.select { | content | content.is_a?( Intelligence::MessageContent::ToolCall ) }
expect( tool_calls.length ).to be 1
tool_call = tool_calls.first
expect( tool_call.tool_name ).to eq( 'get_location' )
end
end
context 'where there are multiple messages and a mulitple tools' do
it 'streams the correct tool request' do
conversation = create_conversation(
"I am in Seattle, WA\n",
"Got it! Let me know if you need any local insights or information related to Seattle!\n",
"What is the current weather?\n"
)
adapter = send( options[ :adapter ] || :adapter )
tools = [ get_location_tool, get_weather_tool ]
response = create_and_make_stream_request( adapter, conversation, tools: tools ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
expect( result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
end
expect( response.success? ).to be( true ), response_error_description( response )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
contents = choice.message.contents
expect( contents.length ).to be > 0
tool_calls = contents.select { | content | content.is_a?( Intelligence::MessageContent::ToolCall ) }
expect( tool_calls.length ).to be 1
tool_call = tool_calls.last
expect( tool_call.tool_name ).to eq( 'get_weather' )
expect( tool_call.tool_parameters ).to be_a( Hash )
expect( tool_call.tool_parameters[ :city ] ).to match( /seattle/i )
end
end
context 'where there is a conversation with a single tool' do
context 'which is composed of a text message, a tool request, and a tool response' do
it 'streams the appropriate generated text' do
response = nil
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a helpfull assistant who answers questions succinctly."
end
message role: :user do
content text: 'Where am I located?'
end
message role: :assistant do
content type: :tool_call do
tool_call_id "MpfiuoRaM"
tool_name :get_location
end
end
message role: :user do
content type: :tool_result do
tool_call_id "MpfiuoRaM"
tool_name :get_location
tool_result "Seattle, WA, USA"
end
end
end
adapter = send( options[ :adapter ] || :adapter )
tools = [ get_location_tool ]
response = create_and_make_stream_request( adapter, conversation, tools: tools ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
expect( result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
end
expect( response.success? ).to be( true ), response_error_description( response )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
contents = choice.message.contents
expect( contents.length ).to be > 0
expect( contents.last ).to be_a( Intelligence::MessageContent::Text )
expect( contents.last.text ).to match( /seattle/i )
end
end
end
context 'where there is a conversation with mutiple tools' do
context 'which is composed of a text message, a tool request, and a tool response' do
it 'streams the appropriate generated text' do
response = nil
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a helpfull assistant who answers questions succinctly."
end
message role: :user do
content text: 'What is the current weather?'
end
message role: :assistant do
content type: :tool_call do
tool_call_id "MpfiuoRaM"
tool_name "get_location"
end
end
message role: :user do
content type: :tool_result do
tool_call_id "MpfiuoRaM"
tool_name "get_location"
tool_result "Seattle, WA, USA"
end
end
end
adapter = send( options[ :adapter ] || :adapter )
tools = [ get_location_tool, get_weather_tool ]
response = create_and_make_stream_request( adapter, conversation, tools: tools ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
expect( result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
end
expect( response.success? ).to be( true ), response_error_description( response )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
contents = choice.message.contents
expect( contents.length ).to be > 0
expect( contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = contents.last
expect( tool_call.tool_name ).to eq( 'get_weather' )
expect( tool_call.tool_parameters ).to be_a( Hash )
expect( tool_call.tool_parameters[ :city ] ).to match( /seattle/i )
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_tools.rb | spec/support/adapters/chat_requests_with_tools.rb | require 'spec_helper'
require 'debug'
RSpec.shared_examples 'chat requests with tools' do | options = {} |
let( :get_location_tool ) {
Intelligence::Tool.build! do
name :get_location
description \
"The get_location tool will return the users city, state or province and country."
end
}
let( :get_weather_tool ) {
Intelligence::Tool.build! do
name :get_weather
description "The get_weather tool will return the current weather in a given locality."
argument name: :city, type: 'string', required: true do
description "The city or town for which the current weather should be returned."
end
argument name: :state, type: 'string' do
description \
"The state or province for which the current weather should be returned. If this is " \
"not provided the largest or most prominent city with the given name, in the given " \
"country or in the worldi, will be assumed."
end
argument name: :country, type: 'string' do
description \
"The country for which the given weather should be returned. If this is not provided " \
"the largest or most prominent city with the given name will be returned."
end
end
}
context 'where there is a single message and a single tool' do
it 'responds with a tool request' do
response = nil
conversation = create_conversation( "Where am I located?\n" )
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_location_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be > 0
expect( choice.message.contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = choice.message.contents.last
expect( tool_call.tool_name ).to eq( 'get_location' )
end
end
context 'where there are multiple messages and a single tool' do
it 'responds with a tool request' do
conversation = create_conversation(
"I am in Seattle, WA\n",
"Got it! Let me know if you need any local insights or information related to Seattle!\n",
"What is the current weather?\n"
)
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_weather_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be > 0
expect( choice.message.contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = choice.message.contents.last
expect( tool_call.tool_name ).to eq( 'get_weather' )
expect( tool_call.tool_parameters ).to be_a( Hash )
expect( tool_call.tool_parameters[ :city ] ).to match( /seattle/i )
end
end
context 'where there is a single message and a multiple tools' do
it 'responds with the correct tool request' do
response = nil
conversation = create_conversation( "Where am I located?" )
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_location_tool, get_weather_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be > 0
expect( choice.message.contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = choice.message.contents.last
expect( tool_call.tool_name ).to eq( 'get_location' )
end
end
context 'where there are multiple messages and a mulitple tools' do
it 'responds with the correct tool request' do
conversation = create_conversation(
"I am in Seattle, WA\n",
"Got it! Let me know if you need any local insights or information related to Seattle!\n",
"What is the current weather?\n"
)
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_location_tool, get_weather_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be > 0
expect( choice.message.contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = choice.message.contents.last
expect( tool_call.tool_name ).to eq( 'get_weather' )
expect( tool_call.tool_parameters ).to be_a( Hash )
expect( tool_call.tool_parameters[ :city ] ).to match( /seattle/i )
end
end
context 'where there is a conversation with a single tool' do
context 'which is composed of a text message, a tool request, and a tool response' do
it 'responds with the appropriate generated text' do
response = nil
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a helpfull assistant who answers questions succinctly."
end
message role: :user do
content text: 'Where am I located?'
end
message role: :assistant do
content type: :tool_call do
tool_call_id "MpfiuoRaM"
tool_name :get_location
end
end
message role: :user do
content type: :tool_result do
tool_call_id "MpfiuoRaM"
tool_name :get_location
tool_result "Seattle, WA, USA"
end
end
end
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_location_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( choice.message.contents[ 0 ] ).to be_a( Intelligence::MessageContent::Text )
content = choice.message.contents[ 0 ]
expect( content.text ).to match( /seattle/i )
end
end
end
context 'where there is a conversation with mutiple tools' do
context 'which is composed of a text message, a tool request, and a tool response' do
it 'responds with the appropriate generated text' do
response = nil
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a helpfull assistant who answers questions succinctly."
end
message role: :user do
content text: 'What is the current weather?'
end
message role: :assistant do
content type: :tool_call do
tool_call_id "MpfiuoRaM"
tool_name "get_location"
end
end
message role: :user do
content type: :tool_result do
tool_call_id "MpfiuoRaM"
tool_name "get_location"
tool_result "Seattle, WA, USA"
end
end
end
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_location_tool, get_weather_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be > 0
expect( choice.message.contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = choice.message.contents.last
expect( tool_call.tool_name ).to eq( 'get_weather' )
expect( tool_call.tool_parameters ).to be_a( Hash )
expect( tool_call.tool_parameters[ :city ] ).to match( /seattle/i )
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests_with_stop_sequence.rb | spec/support/adapters/stream_requests_with_stop_sequence.rb | require 'spec_helper'
RSpec.shared_examples 'stream requests with stop sequence' do | options = {} |
context 'where there is a single message that ends at stop sequence' do
it 'streams generated text up to the stop sequence' do
conversation = create_conversation( "count to twenty in words, all lower case, one word per line\n" )
text = ''
response = create_and_make_stream_request( adapter_with_stop_sequence, conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( options[ :end_reason ] || :ended )
expect( text ).to match( /four/i )
expect( text ).to_not match( /five/i )
end
end
context 'where there are multiple messages and the last ends at stop sequence' do
it 'streams generated text up to the stop sequence' do
conversation = create_conversation(
"count to five in words, all lower case, one word per line\n",
"one\ntwo\nthree\nfour\nfive\n",
"count to twenty in words, all lower case, one word per line\n"
)
text = ''
response = create_and_make_stream_request( adapter_with_stop_sequence, conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( options[ :end_reason ] || :ended )
expect( text ).to match( /four/i )
expect( text ).to_not match( /five/i )
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_invalid_key.rb | spec/support/adapters/chat_requests_with_invalid_key.rb | require 'spec_helper'
RSpec.shared_examples 'chat requests with invalid key' do | options = {} |
context 'where the adapter is configured with an invalid key' do
it 'responds with an appropriate error' do
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter_with_invalid_key ),
create_conversation( "respond only with the word 'hello'\n" )
)
expect( response.success? ).to be( false )
expect( response.result ).to be_a( Intelligence::ChatErrorResult )
expect( response.result.error_type ).not_to be_nil
expect( response.result.error_type ).to(
eq( options[ :error_type ] || 'authentication_error' ),
response.result.error_description
)
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests_with_binary_encoded_images.rb | spec/support/adapters/stream_requests_with_binary_encoded_images.rb | require 'spec_helper'
RSpec.shared_examples 'stream requests with binary encoded images' do | options = {} |
let( :binary_content_of_red_balloon ) {
build_binary_content( fixture_file_path( 'single-red-balloon.png' ) )
}
let( :binary_content_of_three_balloons ) {
build_binary_content( fixture_file_path( 'three-balloons.png' ) )
}
context 'where there is a single message and a binary encoded image' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "identify this image; all lower case\n" )
conversation.messages.last.append_content( binary_content_of_red_balloon )
text = ''
response = create_and_make_stream_request( send( options[ :adapter ] || :adapter ), conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( options[ :end_reason ] || :ended )
expect( text ).to match( /balloon/i ), "Expected text to include 'balloon' but got '#{text}'."
end
end
context 'where there are multiple messages with the first including a binary encoded image' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "identify this image; all lower case\n" )
conversation.messages.last.append_content( binary_content_of_red_balloon )
conversation.messages << build_text_message( :assistant, "balloon\n" )
conversation.messages << build_text_message( :user, "what color?\nrespond in less than 16 words" )
text = ''
response = create_and_make_stream_request( send( options[ :adapter ] || :adapter ), conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( options[ :end_reason ] || :ended )
expect( text ).to match( /red/i ), "Expected text to include 'red' but got '#{text}'."
end
end
context 'where there are multiple messages with each including a binary encoded image' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "identify this image; all lower case\n" )
conversation.messages.last.append_content( binary_content_of_red_balloon )
conversation.messages << build_text_message( :assistant, "one red balloon\n" )
message = build_text_message( :user, "what about this image? respons in less than 16 words\n" )
message.append_content( binary_content_of_three_balloons )
conversation.messages << message
text = ''
response = create_and_make_stream_request( send( options[ :adapter ] || :adapter ), conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be true
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( options[ :end_reason ] || :ended )
expect( text ).to match( /balloons/i ), "Expected text to include 'balloons' but got '#{text}'."
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_binary_encoded_images.rb | spec/support/adapters/chat_requests_with_binary_encoded_images.rb | require 'spec_helper'
RSpec.shared_examples 'chat requests with binary encoded images' do | options = {} |
let( :binary_content_of_red_balloon ) {
build_binary_content( fixture_file_path( 'single-red-balloon.png' ) )
}
let( :binary_content_of_three_balloons ) {
build_binary_content( fixture_file_path( 'three-balloons.png' ) )
}
context 'where there is a single message and a binary encoded image' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "identify this image; respond in less that 16 words\n" )
conversation.messages.last.append_content( binary_content_of_red_balloon )
response = create_and_make_chat_request( send( options[ :adapter ] || :adapter ), conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( options[ :end_reason ] || :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to( match( /balloon/i ) )
end
end
context 'where there are multiple messages with the first including a binary encoded image' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "identify this image; all lower case\n" )
conversation.messages.last.append_content( binary_content_of_red_balloon )
conversation.messages << build_text_message( :assistant, "balloon\n" )
conversation.messages << build_text_message( :user, "what color?\nrespond in less that 16 words\m" )
response = create_and_make_chat_request( send( options[ :adapter ] || :adapter ) , conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( options[ :end_reason ] || :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /red/i )
)
end
end
context 'where there are multiple messages with each including a binary encoded image' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "identify this image; all lower case\n" )
conversation.messages.last.append_content( binary_content_of_red_balloon )
conversation.messages << build_text_message( :assistant, "one red balloon\n" )
message = build_text_message( :user, "what about this image?\nrespond in less that 16 words\n" )
message.append_content( binary_content_of_three_balloons )
conversation.messages << message
response = create_and_make_chat_request( send( options[ :adapter ] || :adapter ) , conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( options[ :end_reason ] || :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /balloons/i )
)
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests_with_websearch.rb | spec/support/adapters/stream_requests_with_websearch.rb | require 'spec_helper'
RSpec.shared_examples 'stream requests with web search' do | options = {} |
# note that there is no practical way to determine if a web search happen except to verify citations
# so the expectation of the adapter is that it will enable citations
context 'where there is one message which requires a web search' do
it 'responds with the appropriate generated text' do
conversation = create_conversation_without_system_message( "what is the top news item today?\n" )
response = create_and_make_stream_request( send( options[ :adapter ] || :adapter ), conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
choice = response.result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
contents = choice.message.contents
expect( contents.length ).to be >= 1
expect( contents.any? { | c | c.is_a?( Intelligence::MessageContent::WebReference ) } ).to be true
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests_with_parallel_tools.rb | spec/support/adapters/stream_requests_with_parallel_tools.rb | require 'spec_helper'
require 'debug'
RSpec.shared_examples 'stream requests with parallel tools' do | options = {} |
let( :get_weather_tool ) {
Intelligence::Tool.build! do
name :get_weather
description "The get_weather tool will return the current weather in a given locality."
argument name: :city, type: 'string', required: true do
description "The city or town for which the current weather should be returned."
end
argument name: :state, type: 'string' do
description \
"The state or province for which the current weather should be returned. If this is " \
"not provided the largest or most prominent city with the given name, in the given " \
"country or in the worldi, will be assumed."
end
argument name: :country, type: 'string' do
description \
"The country for which the given weather should be returned. If this is not provided " \
"the largest or most prominent city with the given name will be returned."
end
end
}
context 'where there is a single message requiring parallel tool calls' do
it 'streams mulitple tool calls' do
response = nil
conversation = create_conversation(
"What is the weather in London, Paris and Rome right now?\n"
)
adapter = send( options[ :adapter ] || :adapter )
tools = [ get_weather_tool ]
response = create_and_make_stream_request( adapter, conversation, tools: tools ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
expect( result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).not_to be_nil
expect( response.result ).to respond_to( :message )
expect( response.result.message ).not_to be_nil
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
contents = response.result.message.contents
expect( contents ).not_to be_nil
expect( contents.length ).to be >= 3
tool_calls = contents.select { | content | content.is_a?( Intelligence::MessageContent::ToolCall ) }
expect( tool_calls.length ).to be >= 3
tool_calls.each do | tool_call |
expect( tool_call.tool_name ).to eq( 'get_weather' )
expect( tool_call.tool_parameters ).to be_a( Hash )
expect( tool_call.tool_parameters[ :city ] ).to match( /london|paris|rome|/i )
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests.rb | spec/support/adapters/stream_requests.rb | require 'spec_helper'
RSpec.shared_examples 'stream requests' do
context 'where there is no system message' do
context 'where there is a single message' do
it 'streams the appropriate generated text' do
conversation = create_conversation_without_system_message( "respond only with the word 'hello'\n" )
text = ''
response = create_and_make_stream_request( adapter, conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( :ended )
message = response.result.choices.first.message
expect( message ).not_to be_nil
expect( message.contents ).not_to be_nil
expect( message.contents.length ).to eq( 1 )
expect( message.contents.last ).to be_a( Intelligence::MessageContent::Text )
expect( message.contents.last.text ).to eq( text )
expect( text ).to match( /hello/i )
end
end
context 'where there are multiple messages' do
it 'streams the appropriate generated text' do
conversation = create_conversation_without_system_message(
"the secret word is 'blue'\n",
"ok\n",
"what is the secret word?\nrespond with the word only\n"
)
text = ''
response = create_and_make_stream_request( adapter, conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( :ended )
message = response.result.choices.first.message
expect( message ).not_to be_nil
expect( message.contents ).not_to be_nil
expect( message.contents.length ).to eq( 1 )
expect( message.contents.last ).to be_a( Intelligence::MessageContent::Text )
expect( message.contents.last.text ).to eq( text )
expect( text ).to match( /blue/i )
end
end
end
context 'where there is a system message' do
context 'where there is a single message' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "respond only with the word 'hello'\n" )
text = ''
response = create_and_make_stream_request( adapter, conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( :ended )
message = response.result.choices.first.message
expect( message ).not_to be_nil
expect( message.contents ).not_to be_nil
expect( message.contents.length ).to eq( 1 )
expect( message.contents.last ).to be_a( Intelligence::MessageContent::Text )
expect( message.contents.last.text ).to eq( text )
expect( text ).to match( /hello/i )
end
end
context 'where there are multiple messages' do
it 'streams the appropriate generated text' do
conversation = create_conversation(
"the secret word is 'blue'\n",
"ok\n",
"what is the secret word?\nrespond with the word only\n"
)
text = ''
response = create_and_make_stream_request( adapter, conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( :ended )
message = response.result.choices.first.message
expect( message ).not_to be_nil
expect( message.contents ).not_to be_nil
expect( message.contents.length ).to eq( 1 )
expect( message.contents.last ).to be_a( Intelligence::MessageContent::Text )
expect( message.contents.last.text ).to eq( text )
expect( text ).to match( /blue/i )
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_parallel_tools.rb | spec/support/adapters/chat_requests_with_parallel_tools.rb | require 'spec_helper'
require 'debug'
RSpec.shared_examples 'chat requests with parallel tools' do | options = {} |
let( :get_weather_tool ) {
Intelligence::Tool.build! do
name :get_weather
description "The get_weather tool will return the current weather in a given locality."
argument name: :city, type: 'string', required: true do
description "The city or town for which the current weather should be returned."
end
argument name: :state, type: 'string' do
description \
"The state or province for which the current weather should be returned. If this is " \
"not provided the largest or most prominent city with the given name, in the given " \
"country or in the worldi, will be assumed."
end
argument name: :country, type: 'string' do
description \
"The country for which the given weather should be returned. If this is not provided " \
"the largest or most prominent city with the given name will be returned."
end
end
}
context 'where there is a single message requiring parallel tool calls' do
it 'responds with mulitple tool requests' do
response = nil
conversation = create_conversation(
"What is the weather in London, Paris and Rome right now?\n"
)
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_weather_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be >= 3
choice.message.contents.last( 3 ).each do | content |
expect( content ).to be_a( Intelligence::MessageContent::ToolCall )
expect( content.tool_name ).to eq( 'get_weather' )
expect( content.tool_parameters[ :city ] ).to match( /paris|london|rome/i )
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests_with_tools_multiturn.rb | spec/support/adapters/stream_requests_with_tools_multiturn.rb | require 'spec_helper'
require 'debug'
RSpec.shared_examples 'stream requests with tools multiturn' do | options = {} |
let( :get_location_tool ) {
Intelligence::Tool.build! do
name :get_location
description \
"The get_location tool will return the users city, state or province and country."
end
}
let( :get_weather_tool ) {
Intelligence::Tool.build! do
name :get_weather
description "The get_weather tool will return the current weather in a given locality."
argument name: :city, type: 'string', required: true do
description "The city or town for which the current weather should be returned."
end
argument name: :state, type: 'string' do
description \
"The state or province for which the current weather should be returned. If this is " \
"not provided the largest or most prominent city with the given name, in the given " \
"country or in the worldi, will be assumed."
end
argument name: :country, type: 'string' do
description \
"The country for which the given weather should be returned. If this is not provided " \
"the largest or most prominent city with the given name will be returned."
end
end
}
context 'where there is a conversation with mutiple tools' do
context 'which requires multiple tools to complete' do
it 'responds with the appropriate generated text' do
response = nil
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a helpfull assistant who answers questions succinctly."
end
message role: :user do
content text: 'What is the current weather?'
end
end
response = create_and_make_stream_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_location_tool, get_weather_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be > 0
expect( choice.message.contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = choice.message.contents.last
expect( tool_call.tool_name ).to eq( 'get_location' )
conversation << choice.message
conversation << Intelligence::Message.build!( role: :user ) do
content type: :tool_result do
tool_call_id tool_call.tool_call_id
tool_name tool_call.tool_name
tool_result( "Seattle, WA, USA" )
end
end
response = create_and_make_stream_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_location_tool, get_weather_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be > 0
expect( choice.message.contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = choice.message.contents.last
expect( tool_call.tool_name ).to eq( 'get_weather' )
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_binary_encoded_text.rb | spec/support/adapters/chat_requests_with_binary_encoded_text.rb | require 'spec_helper'
RSpec.shared_examples 'chat requests with binary encoded text' do
let( :binary_content_of_text_file ) {
build_binary_content( fixture_file_path( 'this-is-a-test.txt' ) )
}
let( :binary_content_of_universe_text_file ) {
build_binary_content( fixture_file_path( 'universe.txt' ) )
}
context 'where there is a single message and binary encoded text' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "what is the text in the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_text_file )
response = create_and_make_chat_request( vision_adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
text = message_contents_to_text( choice.message )
expect( text ).to(
match( /this is a test/i ),
"Expected 'this is a test', received '#{text}'."
)
end
end
context 'where there are multiple messages with the first including binary encoded text' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "what is the text in the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_text_file )
conversation.messages << build_text_message( :assistant, "this is a test\n" )
conversation.messages << build_text_message( :user, "how many words is that?\nreply with just a number\n" )
response = create_and_make_chat_request( vision_adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
text = message_contents_to_text( choice.message )
expect( text ).to(
match( /4/i ),
"Expected '4', received '#{text}'."
)
end
end
context 'where there are multiple messages with each including a binary encoded text' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "what is the text in the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_text_file )
conversation.messages << build_text_message( :assistant, "this is a test\n" )
message = build_text_message( :user, "what about this file? what is it about?\nrespond in less than 16 words\n" )
message.append_content( binary_content_of_universe_text_file )
conversation.messages << message
response = create_and_make_chat_request( vision_adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
text = message_contents_to_text( choice.message )
expect( text ).to(
match( /universe/i ),
"Expected 'universe', received '#{text}'."
)
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_invalid_model.rb | spec/support/adapters/chat_requests_with_invalid_model.rb | require 'spec_helper'
RSpec.shared_examples 'chat requests with invalid model' do | options = {} |
context 'where the adapter is configured with an invalid model' do
it 'responds with an appropriate error' do
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter_with_invalid_model ),
create_conversation( "respond only with the word 'hello'\n" )
)
expect( response.success? ).to be( false )
expect( response.result ).to be_a( Intelligence::ChatErrorResult )
expect( response.result.error_type ).not_to be_nil
expect( response.result.error_type ).to(
eq( options[ :error_type ] || 'not_found_error' ),
response.result.error_description
)
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_file_images.rb | spec/support/adapters/chat_requests_with_file_images.rb | require 'spec_helper'
RSpec.shared_examples 'chat requests with file images' do | options = {} |
let( :file_content_of_red_balloon ) {
Intelligence::MessageContent::File.build do
uri "https://github.com/EndlessInternational/intelligence/blob/main/spec/fixtures/files/single-red-balloon.png?raw=true"
end
}
let( :file_content_of_three_balloons ) {
Intelligence::MessageContent::File.build do
uri "https://github.com/EndlessInternational/intelligence/blob/main/spec/fixtures/files/three-balloons.png?raw=true"
end
}
context 'where there is a single message and a file image' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "identify this image; all lower case\n" )
conversation.messages.last.append_content( file_content_of_red_balloon )
response = create_and_make_chat_request( send( options[ :adapter ] || :adapter ), conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( options[ :end_reason ] || :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to( match( /balloon/i ) )
end
end
context 'where there are multiple messages with the first including a file image' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "identify this image; all lower case\n" )
conversation.messages.last.append_content( file_content_of_red_balloon )
conversation.messages << build_text_message( :assistant, "balloon\n" )
conversation.messages << build_text_message( :user, "what color?\n" )
response = create_and_make_chat_request( send( options[ :adapter ] || :adapter ), conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( options[ :end_reason ] || :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /red/i )
)
end
end
context 'where there are multiple messages with each including a file image' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "identify this image; all lower case\n" )
conversation.messages.last.append_content( file_content_of_red_balloon )
conversation.messages << build_text_message( :assistant, "one red balloon\n" )
message = build_text_message( :user, "what about this image?\n" )
message.append_content( file_content_of_three_balloons )
conversation.messages << message
response = create_and_make_chat_request( send( options[ :adapter ] || :adapter ), conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( options[ :end_reason ] || :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /balloons/i )
)
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests_with_token_limit_exceeded.rb | spec/support/adapters/stream_requests_with_token_limit_exceeded.rb | require 'spec_helper'
RSpec.shared_examples 'stream requests with token limit exceeded' do | options = {} |
context 'where there is a single message the response to which will exceed the token limit' do
it 'streams limited text and an end reason which indicates that the token limit was exceeded' do
conversation = create_conversation( "count to twenty in words, all lower case, one word per line\n" )
text = ''
response = create_and_make_stream_request( send( options[ :adapter ] || :adapter ), conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( options[ :end_reason ] || :token_limit_exceeded )
expect( text ).to match( /five/i )
end
end
context 'where there are multiple messages the response to which will exceed the token limit' do
it 'streams limited text and an end reason which indicates that the token limit was exceeded' do
conversation = create_conversation(
"count to five in words, all lower case, one word per line\n",
"one\ntwo\nthree\nfour\nfive\n",
"count to twenty in words, all lower case, one word per line\n"
)
text = ''
response = create_and_make_stream_request( send( options[ :adapter ] || :adapter ), conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( options[ :end_reason ] || :token_limit_exceeded )
expect( text ).to match( /five/i )
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests_without_alternating_roles.rb | spec/support/adapters/stream_requests_without_alternating_roles.rb | require 'spec_helper'
RSpec.shared_examples 'stream requests without alternating roles' do
context 'where there are two messages with the same role in a sequence' do
it 'streams the appropriate generated text' do
conversation = create_conversation(
"the word to remember is 'blue'\n",
"ok\n"
)
conversation.messages << build_text_message( :user, "the word has been changed to 'red'!\n" )
conversation.messages << build_text_message( :user, "what is the word?\n" )
text = ''
response = create_and_make_stream_request( adapter, conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( text ).to( match( /red/i ) )
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests_with_binary_encoded_audio.rb | spec/support/adapters/stream_requests_with_binary_encoded_audio.rb | require 'spec_helper'
RSpec.shared_examples 'stream requests with binary encoded audio' do
let( :binary_content_of_test_audio_file ) {
build_binary_content( fixture_file_path( 'this-is-a-test.mp3' ) )
}
let( :binary_content_of_hello_world_audio_file ) {
build_binary_content( fixture_file_path( 'hello-world.mp3' ) )
}
context 'where there is a single message and binary encoded audio' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "what is the text in the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_test_audio_file )
text = ''
response = create_and_make_stream_request( vision_adapter, conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( :ended )
expect( text ).to(
match( /this is a test/i ),
"Expected 'this is a test', received '#{text}'."
)
end
end
context 'where there are multiple messages with the first including binary encoded audio' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "what is the text in the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_test_audio_file )
conversation.messages << build_text_message( :assistant, "this is a test\n" )
conversation.messages << build_text_message( :user, "how many words is that?\nreply with just a number\n" )
text = ''
response = create_and_make_stream_request( vision_adapter, conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( :ended )
expect( text ).to(
match( /4/i ),
"Expected '4', received '#{text}'."
)
end
end
context 'where there are multiple messages with each including binary encoded audio' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "what is the text in the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_test_audio_file )
conversation.messages << build_text_message( :assistant, "this is a test\n" )
message = build_text_message( :user, "what about this file?\n" )
message.append_content( binary_content_of_hello_world_audio_file )
conversation.messages << message
text = ''
response = create_and_make_stream_request( vision_adapter, conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( :ended )
expect( text ).to(
match( /hello world/i ),
"Expected 'hello world', received '#{text}'."
)
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_token_limit_exceeded.rb | spec/support/adapters/chat_requests_with_token_limit_exceeded.rb | require 'spec_helper'
RSpec.shared_examples 'chat requests with token limit exceeded' do | options = {} |
context 'where there is a single message the response to which will exceed the token limit' do
it 'responds with limited text and an end reason which indicates that the token limit was exceeded' do
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
create_conversation( "count to twenty in words, all lower case, one word per line\n" )
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( options[ :end_reason ] || :token_limit_exceeded )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /five/i )
)
end
end
context 'where there are multiple messages the response to which will exceed the token limit' do
it 'responds with limited text and an end reason which indicates that the token limit was exceeded' do
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
create_conversation(
"count to five in words, all lower case, one word per line\n",
"one\ntwo\nthree\nfour\nfive\n",
"count to twenty in words, all lower case, one word per line\n"
)
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( options[ :end_reason ] || :token_limit_exceeded )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /five/i )
)
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests_with_calculator_tool.rb | spec/support/adapters/stream_requests_with_calculator_tool.rb | require 'spec_helper'
require 'debug'
require 'dentaku'
RSpec.shared_examples 'stream requests with calculator tool' do | options = {} |
def calculator( arguments )
if arguments.is_a?( String )
begin
arguments = JSON.parse( arguments )
rescue
raise RuntimeError,
"The parameter format is not valid, it should be \"{expression: '2+2'}\"."
end
end
arguments = arguments.transform_keys( &:to_sym )
expression = arguments.fetch( :expression ) do
raise RuntimeError, "The parameter must include an `expression`."
end
calculator = Dentaku::Calculator.new
calculator.store( { pi: Math::PI, e: Math::E } )
calculator.evaluate!( expression )
end
let ( :get_calculator_tool ) {
Intelligence::Tool.build! do
name 'calculator'
description \
"# Calculator\n" \
"The claculator DSL evaluates math, comparisons, logic, and numeric aggregates.\n" \
"## Numbers\n" \
"- Integers and floats are accepted.\n" \
"- Bitwise operators use integers; non-integers are truncated.\n" \
"- The result of bitwise operators are non-numeric but can be converted to numeric using ROUND.\n" \
"## Arithmetic operators\n" \
"- + (add), - (subtract), * (multiply), / (divide), % (modulo), ^ (power).\n" \
"- Bitwise: | (or), & (and), << (shift left), >> (shift right).\n" \
"## Math Functions\n" \
"- SIN, COS, TAN, ASIN, ACOS, ATAN, ATAN2, SINH, COSH, TANH, ASINH, ACOSH, ATANH, " \
" SQRT, EXP, LOG, LOG10, POW.\n" \
"- Angles are in radians.\n" \
"## Comparison Operators\n" \
"- =, !=, <>, <, >, <=, >= return booleans.\n" \
"## Logical functions\n" \
"- IF(cond, then, else).\n" \
"- AND(a, b, ...), OR(a, b, ...), XOR(a, b), NOT(x).\n" \
"- XOR(a, b) is true if exactly one argument is true.\n" \
"## Numeric Functions\n" \
"- MIN(...), MAX(...), SUM(...), AVG(...), COUNT(...).\n" \
"- ROUND(x, [digits]).\n" \
"- ROUNDDOWN(x, [digits]) rounds toward zero.\n" \
"- ROUNDUP(x, [digits]) rounds away from zero.\n" \
"- ABS(x).\n" \
"## Constants\n" \
"- PI, E" \
"## Aggregates\n" \
"- Functions accept a list of values or an array, e.g., SUM(1,2,3) or\n" \
" SUM(1,2,3).\n" \
"- COUNT counts non-nil numeric arguments.\n" \
"## Operator precedence (high to low)\n" \
"1) Parentheses and functions\n" \
"2) Unary +, unary -, NOT\n" \
"3) ^\n" \
"4) *, /, %\n" \
"5) +, -\n" \
"6) <<, >>\n" \
"7) &\n" \
"8) |\n" \
"9) Comparisons (=, !=, <>, <, >, <=, >=)\n" \
"10) AND, XOR, OR\n" \
"## Errors and Edge Cases\n" \
"- Division by zero raises an error.\n" \
"- Non-numeric values in numeric functions raise an error.\n" \
"- Mismatched list sizes in INTERCEPT raise an error.\n" \
argument name: :expression, type: 'string', required: true do
description \
"Formula to evaluate. Examples: \n"\
"- 1 + 2 * 3 -> 7\n" \
"- ^ is power: 2 ^ 3 -> 8\n" \
"- Bitwise: 5 | 2 -> 7, 5 & 2 -> 0, 5 << 1 -> 10\n" \
"- Trig: SIN(PI/2) -> 1\n" \
"- IF(2 > 1, 10, 0) -> 10\n" \
"- ROUND(3.14159, 2) -> 3.14; ROUNDUP(-1.2) -> -1; ROUNDDOWN(1.9) -> 1\n" \
"- AVG(1,2,3,4) -> 2.5\n" \
end
end
}
context 'where there is a conversation about mathematical questions' do
context 'which requires the calculator tool to complete' do
it 'streams the appropriate generated text' do
response = nil
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a helpfull assistant who answers questions succinctly. " +
"Please use the calculator tool when asked mathematical questions."
end
message role: :user do
content text: 'What is 3 * 5?'
end
end
response = create_and_make_stream_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_calculator_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
message = choice.message
expect( message.contents ).not_to be_nil
expect( message.contents.length ).to be > 0
expect( message.contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = message.contents.last
expect( tool_call.tool_name ).to eq( 'calculator' )
value = calculator( tool_call.tool_parameters )
expect( value ).not_to be_nil
conversation << message
conversation << Intelligence::Message.build! do
role :user
content do
type :tool_result
tool_name tool_call.tool_name
tool_call_id tool_call.tool_call_id
tool_result value
end
end
response = create_and_make_stream_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_calculator_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be > 0
expect( choice.message.contents.last ).to be_a( Intelligence::MessageContent::Text )
expect( choice.message.contents.last.text ).to match( /15/i )
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_without_alternating_roles.rb | spec/support/adapters/chat_requests_without_alternating_roles.rb | require 'spec_helper'
RSpec.shared_examples 'chat requests without alternating roles' do
context 'where there are two messages with the same role in a sequence' do
it 'responds with the appropriate generated text' do
conversation = create_conversation(
"the word you should remember is 'blue'\n",
"ok\n"
)
conversation.messages << build_text_message( :user, "the word has been changed to 'red'!\n" )
conversation.messages << build_text_message( :user, "what is the word?\n" )
response = create_and_make_chat_request( adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /red/i )
)
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_calculator_tool.rb | spec/support/adapters/chat_requests_with_calculator_tool.rb | require 'spec_helper'
require 'debug'
require 'dentaku'
RSpec.shared_examples 'chat requests with calculator tool' do | options = {} |
def calculator( arguments )
if arguments.is_a?( String )
begin
arguments = JSON.parse( arguments )
rescue
raise RuntimeError,
"The parameter format is not valid, it should be \"{expression: '2+2'}\"."
end
end
arguments = arguments.transform_keys( &:to_sym )
expression = arguments.fetch( :expression ) do
raise RuntimeError, "The parameter must include an `expression`."
end
calculator = Dentaku::Calculator.new
calculator.store( { pi: Math::PI, e: Math::E } )
calculator.evaluate!( expression )
end
let ( :get_calculator_tool ) {
Intelligence::Tool.build! do
name 'calculator'
description \
"# Calculator\n" \
"The claculator DSL evaluates math, comparisons, logic, and numeric aggregates.\n" \
"## Numbers\n" \
"- Integers and floats are accepted.\n" \
"- Bitwise operators use integers; non-integers are truncated.\n" \
"- The result of bitwise operators are non-numeric but can be converted to numeric using ROUND.\n" \
"## Arithmetic operators\n" \
"- + (add), - (subtract), * (multiply), / (divide), % (modulo), ^ (power).\n" \
"- Bitwise: | (or), & (and), << (shift left), >> (shift right).\n" \
"## Math Functions\n" \
"- SIN, COS, TAN, ASIN, ACOS, ATAN, ATAN2, SINH, COSH, TANH, ASINH, ACOSH, ATANH, " \
" SQRT, EXP, LOG, LOG10, POW.\n" \
"- Angles are in radians.\n" \
"## Comparison Operators\n" \
"- =, !=, <>, <, >, <=, >= return booleans.\n" \
"## Logical functions\n" \
"- IF(cond, then, else).\n" \
"- AND(a, b, ...), OR(a, b, ...), XOR(a, b), NOT(x).\n" \
"- XOR(a, b) is true if exactly one argument is true.\n" \
"## Numeric Functions\n" \
"- MIN(...), MAX(...), SUM(...), AVG(...), COUNT(...).\n" \
"- ROUND(x, [digits]).\n" \
"- ROUNDDOWN(x, [digits]) rounds toward zero.\n" \
"- ROUNDUP(x, [digits]) rounds away from zero.\n" \
"- ABS(x).\n" \
"## Constants\n" \
"- PI, E" \
"## Aggregates\n" \
"- Functions accept a list of values or an array, e.g., SUM(1,2,3) or\n" \
" SUM(1,2,3).\n" \
"- COUNT counts non-nil numeric arguments.\n" \
"## Operator precedence (high to low)\n" \
"1) Parentheses and functions\n" \
"2) Unary +, unary -, NOT\n" \
"3) ^\n" \
"4) *, /, %\n" \
"5) +, -\n" \
"6) <<, >>\n" \
"7) &\n" \
"8) |\n" \
"9) Comparisons (=, !=, <>, <, >, <=, >=)\n" \
"10) AND, XOR, OR\n" \
"## Errors and Edge Cases\n" \
"- Division by zero raises an error.\n" \
"- Non-numeric values in numeric functions raise an error.\n" \
"- Mismatched list sizes in INTERCEPT raise an error.\n" \
argument name: :expression, type: 'string', required: true do
description \
"Formula to evaluate. Examples: \n"\
"- 1 + 2 * 3 -> 7\n" \
"- ^ is power: 2 ^ 3 -> 8\n" \
"- Bitwise: 5 | 2 -> 7, 5 & 2 -> 0, 5 << 1 -> 10\n" \
"- Trig: SIN(PI/2) -> 1\n" \
"- IF(2 > 1, 10, 0) -> 10\n" \
"- ROUND(3.14159, 2) -> 3.14; ROUNDUP(-1.2) -> -1; ROUNDDOWN(1.9) -> 1\n" \
"- AVG(1,2,3,4) -> 2.5\n" \
end
end
}
context 'where there is a conversation about mathematical questions' do
context 'which requires the calculator tool to complete' do
it 'responds with the appropriate generated text' do
response = nil
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a helpfull assistant who answers questions succinctly. " +
"Please use the calculator tool when asked mathematical questions."
end
message role: :user do
content text: 'What is 3 * 5?'
end
end
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_calculator_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
message = choice.message
expect( message.contents ).not_to be_nil
expect( message.contents.length ).to be > 0
expect( message.contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = choice.message.contents.last
expect( tool_call.tool_name ).to eq( 'calculator' )
value = calculator( tool_call.tool_parameters )
expect( value ).not_to be_nil
conversation << message
conversation << Intelligence::Message.build! do
role :user
content do
type :tool_result
tool_name tool_call.tool_name
tool_call_id tool_call.tool_call_id
tool_result value
end
end
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_calculator_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be > 0
expect( choice.message.contents.last ).to be_a( Intelligence::MessageContent::Text )
expect( choice.message.contents.last.text ).to match( /15/i )
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_adapter_tools.rb | spec/support/adapters/chat_requests_with_adapter_tools.rb | require 'spec_helper'
require 'debug'
RSpec.shared_examples 'chat requests with adapter tools' do | options = {} |
context 'where there is a single message and a single tool' do
it 'responds with a tool request' do
response = nil
conversation = create_conversation( "Where am I located?\n" )
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter_with_tool ),
conversation
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be > 0
expect( choice.message.contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = choice.message.contents.last
expect( tool_call.tool_name ).to eq( 'get_location' )
end
end
context 'where there is a conversation with a single tool' do
context 'which is composed of a text message, a tool request, and a tool response' do
it 'responds with the appropriate generated text' do
response = nil
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a helpfull assistant who answers questions succinctly."
end
message role: :user do
content text: 'Where am I located?'
end
message role: :assistant do
content type: :tool_call do
tool_call_id "MpfiuoRaM"
tool_name :get_location
end
end
message role: :user do
content type: :tool_result do
tool_call_id "MpfiuoRaM"
tool_name :get_location
tool_result "Seattle, WA, USA"
end
end
end
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter_with_tool ),
conversation
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( choice.message.contents[ 0 ] ).to be_a( Intelligence::MessageContent::Text )
content = choice.message.contents[ 0 ]
expect( content.text ).to match( /seattle/i )
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests_with_binary_encoded_text.rb | spec/support/adapters/stream_requests_with_binary_encoded_text.rb | require 'spec_helper'
RSpec.shared_examples 'stream requests with binary encoded text' do
let( :binary_content_of_text_file ) {
build_binary_content( fixture_file_path( 'this-is-a-test.txt' ) )
}
let( :binary_content_of_universe_text_file ) {
build_binary_content( fixture_file_path( 'universe.txt' ) )
}
context 'where there is a single message and binary encoded text' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "what is the text in the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_text_file )
text = ''
response = create_and_make_stream_request( vision_adapter, conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( :ended )
expect( text ).to(
match( /this is a test/i ),
"Expected 'this is a test', received '#{text}'."
)
end
end
context 'where there are multiple messages with the first including binary encoded text' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "what is the text in the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_text_file )
conversation.messages << build_text_message( :assistant, "this is a test\n" )
conversation.messages << build_text_message( :user, "how many words is that?\nreply with just a number\n" )
text = ''
response = create_and_make_stream_request( vision_adapter, conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( :ended )
expect( text ).to(
match( /4/i ),
"Expected '4', received '#{text}'."
)
end
end
context 'where there are multiple messages with each including a binary encoded text' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "what is the text in the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_text_file )
conversation.messages << build_text_message( :assistant, "this is a test\n" )
message = build_text_message( :user, "what about this file? what is it about?\nrespond in less than 16 words\n" )
message.append_content( binary_content_of_universe_text_file )
conversation.messages << message
text = ''
response = create_and_make_stream_request( vision_adapter, conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( :ended )
expect( text ).to(
match( /universe/i ),
"Expected 'universe', received '#{text}'."
)
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests_with_file_images.rb | spec/support/adapters/stream_requests_with_file_images.rb | require 'spec_helper'
RSpec.shared_examples 'stream requests with file images' do | options = {} |
let( :file_content_of_red_balloon ) {
Intelligence::MessageContent::File.build do
uri "https://github.com/EndlessInternational/intelligence/blob/main/spec/fixtures/files/single-red-balloon.png?raw=true"
end
}
let( :file_content_of_three_balloons ) {
Intelligence::MessageContent::File.build do
uri "https://github.com/EndlessInternational/intelligence/blob/main/spec/fixtures/files/three-balloons.png?raw=true"
end
}
context 'where there is a single message and a file image' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "identify this image; all lower case\n" )
conversation.messages.last.append_content( file_content_of_red_balloon )
text = ''
response = create_and_make_stream_request( send( options[ :adapter ] || :adapter ), conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( options[ :end_reason ] || :ended )
expect( text ).to match( /balloon/i ), "Expected text to include 'balloon' but got '#{text}'."
end
end
context 'where there are multiple messages with the first including a file image' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "identify this image; all lower case\n" )
conversation.messages.last.append_content( file_content_of_red_balloon )
conversation.messages << build_text_message( :assistant, "balloon\n" )
conversation.messages << build_text_message( :user, "what color?\n" )
text = ''
response = create_and_make_stream_request( send( options[ :adapter ] || :adapter ), conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( options[ :end_reason ] || :ended )
expect( text ).to match( /red/i ), "Expected text to include 'red' but got '#{text}'."
end
end
context 'where there are multiple messages with each including a file image' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "identify this image; all lower case\n" )
conversation.messages.last.append_content( file_content_of_red_balloon )
conversation.messages << build_text_message( :assistant, "one red balloon\n" )
message = build_text_message( :user, "what about this image?\n" )
message.append_content( file_content_of_three_balloons )
conversation.messages << message
text = ''
response = create_and_make_stream_request( send( options[ :adapter ] || :adapter ), conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be true
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( options[ :end_reason ] || :ended )
expect( text ).to match( /balloons/i ), "Expected text to include 'balloons' but got '#{text}'."
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_tools_multiturn.rb | spec/support/adapters/chat_requests_with_tools_multiturn.rb | require 'spec_helper'
require 'debug'
RSpec.shared_examples 'chat requests with tools multiturn' do | options = {} |
let( :get_location_tool ) {
Intelligence::Tool.build! do
name :get_location
description \
"The get_location tool will return the users city, state or province and country."
end
}
let( :get_weather_tool ) {
Intelligence::Tool.build! do
name :get_weather
description "The get_weather tool will return the current weather in a given locality."
argument name: :city, type: 'string', required: true do
description "The city or town for which the current weather should be returned."
end
argument name: :state, type: 'string' do
description \
"The state or province for which the current weather should be returned. If this is " \
"not provided the largest or most prominent city with the given name, in the given " \
"country or in the worldi, will be assumed."
end
argument name: :country, type: 'string' do
description \
"The country for which the given weather should be returned. If this is not provided " \
"the largest or most prominent city with the given name will be returned."
end
end
}
context 'where there is a conversation with mutiple tools' do
context 'which requires multiple turns to complete' do
it 'responds with the appropriate generated text' do
response = nil
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a helpfull assistant who answers questions succinctly."
end
message role: :user do
content text: 'What is the current weather?'
end
end
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_location_tool, get_weather_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be > 0
expect( choice.message.contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = choice.message.contents.last
expect( tool_call.tool_name ).to eq( 'get_location' )
conversation << choice.message
conversation << Intelligence::Message.build!( role: :user ) do
content type: :tool_result do
tool_call_id tool_call.tool_call_id
tool_name tool_call.tool_name
tool_result( "Seattle, WA, USA" )
end
end
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_location_tool, get_weather_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be > 0
expect( choice.message.contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = choice.message.contents.last
expect( tool_call.tool_name ).to eq( 'get_weather' )
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_thought.rb | spec/support/adapters/chat_requests_with_thought.rb | require 'spec_helper'
RSpec.shared_examples 'chat requests with thought' do | options = {} |
context 'where there is a message which requires thought' do
it 'responds with the appropriate generated text' do
conversation = create_conversation(
"Given a triangle ABC with angle A 60 degrees and angle B 40 degrees, extend side BC " +
"through C. Now create triangle DAB such that DC is congruent to AB. What is the angle " +
"measure of angle ADB?\n"
)
response = create_and_make_chat_request( send( options[ :adapter ] || :adapter ), conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be >= 1
contents = choice.message.contents
expect( contents.any? { | c | c.is_a?( Intelligence::MessageContent::Thought ) } ).to be true
contents.each do | content |
if content.is_a?( Intelligence::MessageContent::Thought )
expect( content.text ).not_to be_nil
expect( content.text.length ).to be >= 1
end
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests_with_thought.rb | spec/support/adapters/stream_requests_with_thought.rb | require 'spec_helper'
RSpec.shared_examples 'stream requests with thought' do | options = {} |
# note that there is no practical way to determine if a web search happen except to verify citations
# so the expectation of the adapter is that it will enable citations
context 'where there is a message which requires thought' do
it 'responds with the appropriate generated text' do
conversation = create_conversation_without_system_message(
"In a 30-60-90 triangle, the length of the hypotenuse is 6. " +
"What is the length of the shortest side?\n"
)
response = create_and_make_stream_request( send( options[ :adapter ] || :adapter ), conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).not_to be_nil
expect( response.result ).to respond_to( :message )
expect( response.result.message ).not_to be_nil
contents = response.result.message.contents
expect( contents ).not_to be_nil
expect( contents.length ).to be >= 1
expect( contents.any? { | c | c.is_a?( Intelligence::MessageContent::Thought ) } ).to be true
contents.each do | content |
if content.is_a?( Intelligence::MessageContent::Thought )
expect( content.text ).not_to be_nil
expect( content.text.length ).to be >= 1
end
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/stream_requests_with_binary_encoded_pdf.rb | spec/support/adapters/stream_requests_with_binary_encoded_pdf.rb | require 'spec_helper'
RSpec.shared_examples 'stream requests with binary encoded pdf' do | options = {} |
let( :binary_content_of_nasa_pdf_file ) {
build_binary_content( fixture_file_path( 'nasa.pdf' ) )
}
let( :binary_content_of_nasa_mars_curiosity_pdf_file ) {
build_binary_content( fixture_file_path( 'nasa-mars-curiosity.pdf' ) )
}
context 'where there is a single message and an encoded pdf' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "what is the title of the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_nasa_pdf_file )
text = ''
response = create_and_make_stream_request( send( options[ :adapter ] || :adapter ), conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( :ended )
expect( text ).to(
match( /vision for space exploration/i ),
"Expected 'vision for space exploration', received '#{text}'."
)
end
end
context 'where there are multiple messages with the first including a binary encoded pdf' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "what is the title of the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_nasa_pdf_file )
conversation.messages << build_text_message( :assistant, "Vision for Space Exploration\n" )
conversation.messages << build_text_message( :user, "when was it written?\n" )
text = ''
response = create_and_make_stream_request( send( options[ :adapter ] || :adapter ), conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( :ended )
expect( text ).to(
match( /February 2004/i ),
"Expected 'February 2004', received '#{text}'."
)
end
end
context 'where there are multiple messages with each including a binary encoded pdf' do
it 'streams the appropriate generated text' do
conversation = create_conversation( "what is the title of the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_nasa_pdf_file )
conversation.messages << build_text_message( :assistant, "Vision for Space Exploration\n" )
message = build_text_message( :user, "what about this document? describe it in less than 16 words\n" )
message.append_content( binary_content_of_nasa_mars_curiosity_pdf_file )
conversation.messages << message
text = ''
response = create_and_make_stream_request( send( options[ :adapter ] || :adapter ), conversation ) do | result |
expect( result ).to be_a( Intelligence::ChatResult )
expect( result.choices ).not_to be_nil
expect( result.choices.length ).to eq( 1 )
choice = result.choices.first
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
text += message_contents_to_text( choice.message )
end
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
expect( response.result.choices.first.end_reason ).to eq( :ended )
expect( text ).to(
match( /mars/i ),
"Expected 'mars', received '#{text}'."
)
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_binary_encoded_pdf.rb | spec/support/adapters/chat_requests_with_binary_encoded_pdf.rb | require 'spec_helper'
RSpec.shared_examples 'chat requests with binary encoded pdf' do | options = {} |
let( :binary_content_of_nasa_pdf_file ) {
build_binary_content( fixture_file_path( 'nasa.pdf' ) )
}
let( :binary_content_of_nasa_mars_curiosity_pdf_file ) {
build_binary_content( fixture_file_path( 'nasa-mars-curiosity.pdf' ) )
}
context 'where there is a single message and an encoded pdf' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "what is the title of the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_nasa_pdf_file )
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ), conversation
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
text = message_contents_to_text( choice.message )
expect( text ).to(
match( /vision\s+for\s+space\s+exploration/i ),
"Expected 'vision for space exploration', received '#{text}'."
)
end
end
context 'where there are multiple messages with the first including a binary encoded pdf' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "what is the title of the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_nasa_pdf_file )
conversation.messages << build_text_message( :assistant, "Vision for Space Exploration\n" )
conversation.messages << build_text_message( :user, "when was it written?\n" )
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ), conversation
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
text = message_contents_to_text( choice.message )
expect( text ).to(
match( /February 2004/i ),
"Expected 'February 2004', received '#{text}'."
)
end
end
context 'where there are multiple messages with each including a binary encoded pdf' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "what is the title of the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_nasa_pdf_file )
conversation.messages << build_text_message( :assistant, "Vision for Space Exploration\n" )
message = build_text_message( :user, "what about this document? describe it in less than 16 words\n" )
message.append_content( binary_content_of_nasa_mars_curiosity_pdf_file )
conversation.messages << message
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ), conversation
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
text = message_contents_to_text( choice.message )
expect( text ).to(
match( /mars/i ),
"Expected 'mars', received '#{text}'."
)
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests.rb | spec/support/adapters/chat_requests.rb | require 'spec_helper'
RSpec.shared_examples 'chat requests' do
context 'where there is no system message' do
context 'where there is a single message' do
it 'responds with the appropriate generated text' do
response = nil
conversation = create_conversation_without_system_message( "respond only with the word 'hello'\n" )
response = create_and_make_chat_request( adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /hello/i )
)
end
end
context 'where there are multiple messages' do
it 'responds with the appropriate generated text' do
response = create_and_make_chat_request(
adapter,
create_conversation_without_system_message(
"the secret word is 'red'\n",
"ok\n",
"what is the exact secret word? answer with the word only\n"
)
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /red/i )
)
end
end
end
context 'where there is a system message' do
context 'where there is a single message' do
it 'responds with the appropriate generated text' do
response = nil
conversation = create_conversation( "respond only with the word 'hello'\n" )
response = create_and_make_chat_request( adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /hello/i )
)
end
end
context 'where there are multiple messages' do
it 'responds with the appropriate generated text' do
response = create_and_make_chat_request(
adapter,
create_conversation(
"the secret word is 'blue'\n",
"ok\n",
"what is the secret word?\nrespond with the word only\n"
)
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /blue/i )
)
end
end
end
context 'where there are muliple chat request in a series' do
context 'where there is no system message' do
context 'where there is a single message' do
it 'responds with the appropriate generated text' do
response = nil
conversation = create_conversation_without_system_message( "respond only with the word 'hello'\n" )
response = create_and_make_chat_request( adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.text ).to match( /hello/i )
response = nil
conversation = create_conversation_without_system_message( "respond only with the word 'hello'\n" )
response = create_and_make_chat_request( adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /hello/i )
)
end
end
context 'where there are multiple messages' do
it 'responds with the appropriate generated text' do
response = nil
conversation = create_conversation_without_system_message( "respond only with the word 'hello'\n" )
response = create_and_make_chat_request( adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.text ).to match( /hello/i )
response = create_and_make_chat_request(
adapter,
create_conversation_without_system_message(
"the secret word is 'blue'\n",
"ok\n",
"what is the secret word?\nrespond with the word only\n"
)
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /blue/i )
)
end
end
end
context 'where there is a system message' do
context 'where there is a single message' do
it 'responds with the appropriate generated text' do
response = nil
conversation = create_conversation_without_system_message( "respond only with the word 'hello'\n" )
response = create_and_make_chat_request( adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.text ).to match( /hello/i )
response = nil
conversation = create_conversation( "respond only with the word 'hello'\n" )
response = create_and_make_chat_request( adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /hello/i )
)
end
end
context 'where there are multiple messages' do
it 'responds with the appropriate generated text' do
response = nil
conversation = create_conversation_without_system_message( "respond only with the word 'hello'\n" )
response = create_and_make_chat_request( adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.text ).to match( /hello/i )
response = create_and_make_chat_request(
adapter,
create_conversation(
"the secret word is 'blue'\n",
"ok\n",
"what is the secret word?\nrespond with the word only\n"
)
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
expect( message_contents_to_text( choice.message ) ).to(
match( /blue/i )
)
end
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_complex_tools.rb | spec/support/adapters/chat_requests_with_complex_tools.rb | require 'spec_helper'
require 'debug'
RSpec.shared_examples 'chat requests with complex tools' do | options = {} |
let( :get_complex_weather_tool ) {
Intelligence::Tool.build! do
name :get_weather
description 'The get_weather tool returns the current weather in given locality.'
argument name: :location, required: true, type: 'object' do
description "The locality for which the weather will be returned"
property name: :city, type: 'string', required: true do
description "The city or town for which the current weather should be returned."
end
property name: :state, type: 'string' do
description \
"The state or province for which the current weather should be returned. If this is " \
"not provided the largest or most prominent city with the given name, in the given " \
"country or in the worldi, will be assumed."
end
property name: :country, type: 'string' do
description \
"The country for which the given weather should be returned. If this is not provided " \
"the largest or most prominent city with the given name will be returned."
end
end
end
}
context 'where there is a single message and a single tool' do
context 'where the tool parameters include an object' do
it 'responds with a tool request' do
response = nil
conversation = create_conversation( "Tell me the weather in Seattle.\n" )
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_complex_weather_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be > 0
expect( choice.message.contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = choice.message.contents.last
expect( tool_call.tool_name ).to eq( 'get_weather' )
expect( tool_call.tool_parameters ).to be_a( Hash )
expect( tool_call.tool_parameters[ :location ] ).to be_a( Hash )
expect( tool_call.tool_parameters[ :location ][ :city ] ).to match( /seattle/i )
end
end
end
context 'where there are multiple messages and a single tool' do
context 'where the tool parameters include are an object' do
it 'responds with a tool request' do
conversation = create_conversation(
"I am in Seattle, WA\n",
"Got it! Let me know if you need any local insights or information related to Seattle!\n",
"What is the current weather?\n"
)
response = create_and_make_chat_request(
send( options[ :adapter ] || :adapter ),
conversation,
tools: [ get_complex_weather_tool ]
)
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :tool_called )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to be > 0
expect( choice.message.contents.last ).to be_a( Intelligence::MessageContent::ToolCall )
tool_call = choice.message.contents.last
expect( tool_call.tool_name ).to eq( 'get_weather' )
expect( tool_call.tool_parameters ).to be_a( Hash )
expect( tool_call.tool_parameters[ :location ] ).to be_a( Hash )
expect( tool_call.tool_parameters[ :location ][ :city ] ).to match( /seattle/i )
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/support/adapters/chat_requests_with_binary_encoded_audio.rb | spec/support/adapters/chat_requests_with_binary_encoded_audio.rb | require 'spec_helper'
RSpec.shared_examples 'chat requests with binary encoded audio' do
let( :binary_content_of_test_audio_file ) {
build_binary_content( fixture_file_path( 'this-is-a-test.mp3' ) )
}
let( :binary_content_of_hello_world_audio_file ) {
build_binary_content( fixture_file_path( 'hello-world.mp3' ) )
}
context 'where there is a single message and binary encoded audio' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "what is the text in the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_test_audio_file )
response = create_and_make_chat_request( vision_adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
text = message_contents_to_text( choice.message )
expect( text ).to(
match( /this is a test/i ),
"Expected 'this is a test', received '#{text}'."
)
end
end
context 'where there are multiple messages with the first including binary encoded audio' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "what is the text in the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_test_audio_file )
conversation.messages << build_text_message( :assistant, "this is a test\n" )
conversation.messages << build_text_message( :user, "how many words is that?\nreply with just a number\n" )
response = create_and_make_chat_request( vision_adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
text = message_contents_to_text( choice.message )
expect( text ).to(
match( /4/i ),
"Expected '4', received '#{text}'."
)
end
end
context 'where there are multiple messages with each including binary encoded audio' do
it 'responds with the appropriate generated text' do
conversation = create_conversation( "what is the text in the attached file?\n" )
conversation.messages.last.append_content( binary_content_of_test_audio_file )
conversation.messages << build_text_message( :assistant, "this is a test\n" )
message = build_text_message( :user, "what about this file?\n" )
message.append_content( binary_content_of_hello_world_audio_file )
conversation.messages << message
response = create_and_make_chat_request( vision_adapter, conversation )
expect( response.success? ).to be( true ), response_error_description( response )
expect( response.result ).to be_a( Intelligence::ChatResult )
expect( response.result.choices ).not_to be_nil
expect( response.result.choices.length ).to eq( 1 )
expect( response.result.choices.first ).to be_a( Intelligence::ChatResultChoice )
choice = response.result.choices.first
expect( choice.end_reason ).to eq( :ended )
expect( choice.message ).to be_a( Intelligence::Message )
expect( choice.message.contents ).not_to be_nil
expect( choice.message.contents.length ).to eq( 1 )
text = message_contents_to_text( choice.message )
expect( text ).to(
match( /hello world/i ),
"Expected 'hello world', received '#{text}'."
)
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/message_content/file_spec.rb | spec/message_content/file_spec.rb | require 'spec_helper'
require 'uri'
require 'mime/types'
RSpec.describe Intelligence::MessageContent::File do
describe '#initialize' do
context 'when attributes include uri and content_type' do
let( :attributes ) { { uri: 'http://example.com/file.txt', content_type: 'text/plain' } }
subject { described_class.new( attributes ) }
it 'sets @uri to the given URI' do
expect( subject.instance_variable_get( :@uri ) ).to eq( URI( 'http://example.com/file.txt' ) )
end
it 'sets @content_type to the given content_type' do
expect( subject.instance_variable_get( :@content_type ) ).to eq( 'text/plain' )
end
end
context 'when attributes include only uri' do
let( :attributes ) { { uri: 'http://example.com/file.txt' } }
subject { described_class.new( attributes ) }
it 'sets @uri to the given URI' do
expect( subject.instance_variable_get( :@uri ) ).to eq( URI( 'http://example.com/file.txt' ) )
end
it 'leaves @content_type as nil' do
expect( subject.instance_variable_get( :@content_type ) ).to be_nil
end
end
context 'when attributes include only content_type' do
let( :attributes ) { { content_type: 'text/plain' } }
subject { described_class.new( attributes ) }
it 'leaves @uri as nil' do
expect( subject.instance_variable_get( :@uri ) ).to be_nil
end
it 'sets @content_type to the given content_type' do
expect( subject.instance_variable_get( :@content_type ) ).to eq( 'text/plain' )
end
end
context 'when attributes are empty' do
let( :attributes ) { {} }
subject { described_class.new( attributes ) }
it 'leaves @uri as nil' do
expect( subject.instance_variable_get( :@uri ) ).to be_nil
end
it 'leaves @content_type as nil' do
expect( subject.instance_variable_get( :@content_type ) ).to be_nil
end
end
end
describe '#content_type' do
context 'when @content_type is already set' do
let( :attributes ) { { content_type: 'text/plain' } }
subject { described_class.new( attributes ) }
it 'returns the existing @content_type' do
expect( subject.content_type ).to eq( 'text/plain' )
end
end
context 'when @content_type is not set and valid_uri? returns true' do
context 'and MIME type can be determined from @uri.path' do
let( :attributes ) { { uri: 'http://example.com/file.txt' } }
subject { described_class.new( attributes ) }
it 'sets and returns the MIME type based on @uri.path' do
expect( subject.content_type ).to eq( 'text/plain' )
end
end
context 'and MIME type cannot be determined from @uri.path' do
let( :attributes ) { { uri: 'http://example.com/file.unknown' } }
subject { described_class.new( attributes ) }
it 'returns nil' do
expect( subject.content_type ).to be_nil
end
end
end
context 'when @content_type is not set and valid_uri? returns false' do
let( :attributes ) { { uri: 'ftp://example.com/file.txt' } }
subject { described_class.new( attributes ) }
it 'returns nil' do
expect( subject.content_type ).to be_nil
end
end
end
describe '#valid_uri?' do
context 'with supported scheme and path' do
let( :attributes ) { { uri: 'https://example.com/file.txt' } }
subject { described_class.new( attributes ) }
it 'returns true' do
expect( subject.valid_uri? ).to be true
end
end
context 'with unsupported scheme' do
let( :attributes ) { { uri: 'ftp://example.com/file.txt' } }
subject { described_class.new( attributes ) }
it 'returns false' do
expect( subject.valid_uri? ).to be false
end
end
context 'when @uri is nil' do
let( :attributes ) { {} }
subject { described_class.new( attributes ) }
it 'returns false' do
expect( subject.valid_uri? ).to be false
end
end
end
describe '#valid?' do
context 'when valid_uri? returns true and content_type is known' do
let( :attributes ) { { uri: 'http://example.com/file.txt' } }
subject { described_class.new( attributes ) }
it 'returns true' do
expect( subject.valid? ).to be true
end
end
context 'when valid_uri? returns true but content_type is nil' do
let( :attributes ) { { uri: 'http://example.com/file.unknown' } }
subject { described_class.new( attributes ) }
it 'returns false' do
expect( subject.valid? ).to be false
end
end
context 'when valid_uri? returns false' do
let( :attributes ) { { uri: 'ftp://example.com/file.txt' } }
subject { described_class.new( attributes ) }
it 'returns false' do
expect( subject.valid? ).to be false
end
end
context 'when content_type is unknown' do
let( :attributes ) { { uri: 'http://example.com/file' } }
subject { described_class.new( attributes ) }
it 'returns false' do
expect( subject.valid? ).to be false
end
end
end
describe '#to_h' do
let( :attributes ) { { uri: 'http://example.com/file.txt', content_type: 'text/plain' } }
subject { described_class.new( attributes ) }
it 'returns the correct hash representation' do
expect( subject.to_h ).to eq( { type: :file, content_type: 'text/plain', uri: 'http://example.com/file.txt' } )
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/message_content/text_spec.rb | spec/message_content/text_spec.rb | require 'spec_helper'
RSpec.describe Intelligence::MessageContent::Text, :unit do
describe '#initialize' do
it 'sets @text from attributes when provided' do
attributes = { text: 'Hello, world!' }
content = described_class.new( attributes )
expect( content.text ).to eq( 'Hello, world!' )
end
it 'does not set @text when text is not provided' do
content = described_class.new( {} )
expect( content.text ).to be_nil
end
end
describe '#valid?' do
context 'when text is provided and not empty' do
it 'returns true' do
attributes = { text: 'Sample text' }
content = described_class.new( attributes )
expect( content.valid? ).to be true
end
end
context 'when text is nil' do
it 'returns false' do
attributes = { text: nil }
content = described_class.new( attributes )
expect( content.valid? ).to be false
end
end
context 'when text is empty' do
it 'returns false' do
attributes = { text: '' }
content = described_class.new( attributes )
expect( content.valid? ).to be false
end
end
context 'when text does not respond to #empty?' do
it 'returns false' do
attributes = { text: 12345 }
content = described_class.new( attributes )
expect( content.valid? ).to be false
end
end
end
describe '#to_h' do
it 'returns a hash representation with type and text' do
attributes = { text: 'Test content' }
content = described_class.new( attributes )
expected_hash = { type: :text, text: 'Test content' }
expect( content.to_h ).to eq( expected_hash )
end
it 'includes type :text even when text is nil' do
content = described_class.new( {} )
expected_hash = { type: :text, text: nil }
expect( content.to_h ).to eq( expected_hash )
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/message_content/binary_spec.rb | spec/message_content/binary_spec.rb | require 'spec_helper'
RSpec.describe Intelligence::MessageContent::Binary, :unit do
describe '#initialize' do
it 'sets @content_type and @bytes from attributes when provided' do
attributes = { content_type: 'image/png', bytes: 'binary_data_here' }
content = described_class.new( attributes )
expect( content.content_type ).to eq( 'image/png' )
expect( content.bytes ).to eq( 'binary_data_here' )
end
it 'does not set @content_type and @bytes when not provided' do
content = described_class.new( {} )
expect( content.content_type ).to be_nil
expect( content.bytes ).to be_nil
end
end
describe '#valid?' do
context 'when content_type and bytes are valid' do
it 'returns true' do
attributes = { content_type: 'image/png', bytes: 'binary_data_here' }
content = described_class.new( attributes )
expect( content.valid? ).to be true
end
end
context 'when content_type is invalid' do
it 'returns false' do
attributes = { content_type: 'invalid/type', bytes: 'binary_data_here' }
content = described_class.new( attributes )
expect( content.valid? ).to be false
end
end
context 'when bytes are empty' do
it 'returns false' do
attributes = { content_type: 'image/png', bytes: '' }
content = described_class.new( attributes )
expect( content.valid? ).to be false
end
end
context 'when bytes is nil' do
it 'returns false' do
attributes = { content_type: 'image/png', bytes: nil }
content = described_class.new( attributes )
expect( content.valid? ).to be false
end
end
context 'when content_type is missing' do
it 'returns false' do
attributes = { bytes: 'binary_data_here' }
content = described_class.new( attributes )
expect( content.valid? ).to be false
end
end
context 'when bytes is missing' do
it 'returns false' do
attributes = { content_type: 'image/png' }
content = described_class.new( attributes )
expect( content.valid? ).to be false
end
end
end
describe '#image?' do
context 'when content_type is an image type' do
it 'returns true' do
attributes = { content_type: 'image/jpeg', bytes: 'binary_data_here' }
content = described_class.new( attributes )
expect( content.image? ).to be true
end
end
context 'when content_type is not an image type' do
it 'returns false' do
attributes = { content_type: 'application/pdf', bytes: 'binary_data_here' }
content = described_class.new( attributes )
expect( content.image? ).to be false
end
end
context 'when content_type is invalid' do
it 'returns false' do
attributes = { content_type: 'invalid/type', bytes: 'binary_data_here' }
content = described_class.new( attributes )
expect( content.image? ).to be false
end
end
context 'when content_type is missing' do
it 'returns false' do
attributes = { bytes: 'binary_data_here' }
content = described_class.new( attributes )
expect( content.image? ).to be false
end
end
end
describe '#to_h' do
it 'returns a hash representation with type, content_type, and bytes' do
attributes = { content_type: 'application/octet-stream', bytes: 'test_bytes' }
content = described_class.new( attributes )
expected_hash = { type: :binary, content_type: 'application/octet-stream', bytes: 'test_bytes' }
expect( content.to_h ).to eq( expected_hash )
end
it 'includes type :binary even when content_type and bytes are nil' do
content = described_class.new( {} )
expected_hash = { type: :binary, content_type: nil, bytes: nil }
expect( content.to_h ).to eq( expected_hash )
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/together_ai/chat_spec.rb | spec/adapters/together_ai/chat_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :together_ai ]} chat requests", :together_ai do
include_context 'vcr'
before do
raise "A TOGETHERAI_API_KEY must be defined in the environment." unless ENV[ 'TOGETHERAI_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :together_ai ].build! do
key ENV[ 'TOGETHERAI_API_KEY' ]
chat_options do
model 'deepseek-ai/DeepSeek-V3'
max_tokens 128
temperature 0
end
end
end
let( :adapter_with_tool ) do
Intelligence::Adapter[ :open_ai ].build! do
key ENV[ 'TOGETHERAI_API_KEY' ]
chat_options do
model 'deepseek-ai/DeepSeek-V3'
max_tokens 128
temperature 0
tool do
name :get_location
description \
"The get_location tool will return the users city, state or province and country."
end
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :together_ai ].build! do
key ENV[ 'TOGETHERAI_API_KEY' ]
chat_options do
model 'deepseek-ai/DeepSeek-V3'
max_tokens 24
temperature 0
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :together_ai ].build! do
key ENV[ 'TOGETHERAI_API_KEY' ]
chat_options do
model 'deepseek-ai/DeepSeek-V3'
max_tokens 24
temperature 0
stop 'five'
end
end
end
let( :vision_adapter ) do
Intelligence::Adapter[ :together_ai ].build! do
key ENV[ 'TOGETHERAI_API_KEY' ]
chat_options do
model 'meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo'
max_tokens 24
temperature 0
end
end
end
let( :adapter_with_invalid_key ) do
Intelligence::Adapter[ :together_ai ].build! do
key 'this-key-is-not-valid'
chat_options do
model 'deepseek-ai/DeepSeek-V3'
max_tokens 16
temperature 0
end
end
end
let( :adapter_with_invalid_model ) do
Intelligence::Adapter[ :together_ai ].build! do
key ENV[ 'TOGETHERAI_API_KEY' ]
chat_options do
model 'invalid_model'
max_tokens 16
temperature 0
end
end
end
include_examples 'chat requests'
include_examples 'chat requests with token limit exceeded',
adapter: :adapter_with_limited_max_tokens
include_examples 'chat requests with stop sequence',
adapter: :adapter_with_stop_sequence
include_examples 'chat requests without alternating roles'
include_examples 'chat requests with binary encoded images',
adapter: :vision_adapter
# tools no longer work with together ai although they did historically; this may
# be a model problem but I cannot find a working model
include_examples 'chat requests with tools'
include_examples 'chat requests with complex tools'
include_examples 'chat requests with parallel tools'
include_examples 'chat requests with invalid key'
include_examples 'chat requests with invalid model'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/together_ai/stream_spec.rb | spec/adapters/together_ai/stream_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :together_ai ]} stream requests", :together_ai do
include_context 'vcr'
before do
raise "An TOGETHERAI_API_KEY must be defined in the environment." unless ENV[ 'TOGETHERAI_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :together_ai ].build! do
key ENV[ 'TOGETHERAI_API_KEY' ]
chat_options do
model 'deepseek-ai/DeepSeek-V3'
max_tokens 128
temperature 0
stream true
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :together_ai ].build! do
key ENV[ 'TOGETHERAI_API_KEY' ]
chat_options do
model 'deepseek-ai/DeepSeek-V3'
max_tokens 16
temperature 0
stream true
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :together_ai ].build! do
key ENV[ 'TOGETHERAI_API_KEY' ]
chat_options do
model 'deepseek-ai/DeepSeek-V3'
max_tokens 16
temperature 0
stop 'five'
stream true
end
end
end
let( :vision_adapter ) do
Intelligence::Adapter[ :together_ai ].build! do
key ENV[ 'TOGETHERAI_API_KEY' ]
chat_options do
model 'meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo'
max_tokens 16
temperature 0
stream true
end
end
end
include_examples 'stream requests'
include_examples 'stream requests with token limit exceeded',
adapter: :adapter_with_limited_max_tokens
include_examples 'stream requests with stop sequence',
adapter: :adapter_with_stop_sequence
include_examples 'stream requests without alternating roles'
include_examples 'stream requests with binary encoded images',
adapter: :vision_adapter
# tools no longer work with together ai although they did historically; this may
# be a model problem but I cannot find a working model
include_examples 'stream requests with tools'
include_examples 'stream requests with parallel tools'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/open_ai/chat_spec.rb | spec/adapters/open_ai/chat_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :open_ai ]} chat requests", :open_ai do
include_context 'vcr'
before do
raise "An OPENAI_API_KEY must be defined in the environment." unless ENV[ 'OPENAI_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :open_ai ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-4o'
temperature 0
max_tokens 128
end
end
end
let( :adapter_with_tool ) do
Intelligence::Adapter[ :open_ai ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-4o'
temperature 0
max_tokens 128
tool do
name :get_location
description \
"The get_location tool will return the users city, state or province and country."
end
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :open_ai ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-4o'
temperature 0
max_tokens 16
end
end
end
let( :adapter_with_web_search ) do
Intelligence::Adapter[ :open_ai ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-5'
abilities do
web_search
end
end
end
end
let( :adapter_with_thought ) do
Intelligence::Adapter[ :open_ai ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-5'
reasoning do
effort :medium
summary :detailed
end
include [ 'reasoning.encrypted_content' ]
end
end
end
let( :adapter_with_invalid_key ) do
Intelligence::Adapter[ :open_ai ].build! do
key 'this-key-is-not-valid'
chat_options do
model 'gpt-4o'
max_tokens 16
end
end
end
let( :adapter_with_invalid_model ) do
Intelligence::Adapter[ :open_ai ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'invalid_model'
max_tokens 16
end
end
end
include_examples 'chat requests'
include_examples 'chat requests with token limit exceeded',
adapter: :adapter_with_limited_max_tokens
include_examples 'chat requests with binary encoded images'
include_examples 'chat requests with file images'
include_examples 'chat requests without alternating roles'
include_examples 'chat requests with thought',
adapter: :adapter_with_thought
include_examples 'chat requests with tools'
include_examples 'chat requests with adapter tools'
include_examples 'chat requests with complex tools'
include_examples 'chat requests with parallel tools'
include_examples 'chat requests with tools multiturn'
include_examples 'chat requests with calculator tool',
adapter: :adapter_with_thought
include_examples 'chat requests with web search',
adapter: :adapter_with_web_search
include_examples 'chat requests with invalid key'
include_examples 'chat requests with invalid model',
error_type: 'invalid_request_error'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/open_ai/stream_spec.rb | spec/adapters/open_ai/stream_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :open_ai ]} stream requests", :open_ai do
include_context 'vcr'
before do
raise "An OPENAI_API_KEY must be defined in the environment." unless ENV[ 'OPENAI_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :open_ai ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-4o'
max_tokens 128
temperature 0
stream true
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :open_ai ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-4o'
max_tokens 16
temperature 0
stream true
end
end
end
let( :adapter_with_thought ) do
Intelligence::Adapter[ :open_ai ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-5'
reasoning do
effort :medium
summary :detailed
end
include [ 'reasoning.encrypted_content' ]
stream true
end
end
end
let( :adapter_with_web_search ) do
Intelligence::Adapter[ :open_ai ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-4o'
temperature 0
max_tokens 1024
abilities do
web_search
end
stream true
end
end
end
include_examples 'stream requests'
include_examples 'stream requests with token limit exceeded',
adapter: :adapter_with_limited_max_tokens
include_examples 'stream requests with binary encoded images'
include_examples 'stream requests with file images'
include_examples 'stream requests with thought',
adapter: :adapter_with_thought
include_examples 'stream requests without alternating roles'
include_examples 'stream requests with tools'
include_examples 'stream requests with parallel tools'
include_examples 'stream requests with tools multiturn'
include_examples 'stream requests with web search',
adapter: :adapter_with_web_search
include_examples 'stream requests with calculator tool',
adapter: :adapter_with_thought
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/azure/chat_spec.rb | spec/adapters/azure/chat_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :azure ]} chat requests", :azure do
include_context 'vcr'
before do
raise "An AZURE_API_KEY must be defined in the environment." unless ENV[ 'AZURE_API_KEY' ]
raise "An AZURE_BASE_URI must be defined in the environment." unless ENV[ 'AZURE_BASE_URI' ]
end
# this is needed for azure test to avoid the rate limit
after( :each ) do | example |
cassette = VCR.current_cassette
sleep 2 if !cassette ||
cassette && cassette.new_recorded_interactions.any?
end
let( :adapter ) do
Intelligence::Adapter[ :azure ].build! do
key ENV[ 'AZURE_API_KEY' ]
base_uri ENV[ 'AZURE_BASE_URI' ]
chat_options do
max_tokens 128
temperature 0
end
end
end
let( :adapter_with_tool ) do
Intelligence::Adapter[ :azure ].build! do
key ENV[ 'AZURE_API_KEY' ]
base_uri ENV[ 'AZURE_BASE_URI' ]
chat_options do
max_tokens 128
temperature 0
tool do
name :get_location
description \
"The get_location tool will return the users city, state or province and country."
end
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :azure ].build! do
key ENV[ 'AZURE_API_KEY' ]
base_uri ENV[ 'AZURE_BASE_URI' ]
chat_options do
max_tokens 24
temperature 0
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :azure ].build! do
key ENV[ 'AZURE_API_KEY' ]
base_uri ENV[ 'AZURE_BASE_URI' ]
chat_options do
max_tokens 24
temperature 0
stop 'five'
end
end
end
let( :vision_adapter ) do
Intelligence::Adapter[ :azure ].build! do
key ENV[ 'AZURE_API_KEY' ]
base_uri ENV[ 'AZURE_BASE_URI' ]
chat_options do
max_tokens 32
temperature 0
end
end
end
let( :adapter_with_invalid_key ) do
Intelligence::Adapter[ :azure ].build! do
key 'invalid key'
base_uri ENV[ 'AZURE_BASE_URI' ]
chat_options do
max_tokens 16
temperature 0
end
end
end
include_examples 'chat requests'
include_examples 'chat requests with token limit exceeded', adapter: :adapter_with_limited_max_tokens
include_examples 'chat requests with stop sequence', adapter: :adapter_with_stop_sequence
include_examples 'chat requests without alternating roles'
include_examples 'chat requests with binary encoded images', adapter: :vision_adapter
include_examples 'chat requests with file images', adapter: :vision_adapter
include_examples 'chat requests with tools'
include_examples 'chat requests with adapter tools'
include_examples 'chat requests with complex tools'
include_examples 'chat requests with parallel tools'
include_examples 'chat requests with tools multiturn'
include_examples 'chat requests with invalid key'
# this is not meaningful for Azure because the model is part of the deployment ( in the base uri )
# include_examples 'chat requests with invalid model'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/azure/stream_spec.rb | spec/adapters/azure/stream_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :azure ]} stream requests", :azure do
include_context 'vcr'
before do
raise "An AZURE_API_KEY must be defined in the environment." unless ENV[ 'AZURE_API_KEY' ]
raise "An AZURE_BASE_URI must be defined in the environment." unless ENV[ 'AZURE_BASE_URI' ]
end
# this is needed for azure test to avoid the rate limit
after( :each ) do | example |
cassette = VCR.current_cassette
sleep 2 if !cassette ||
cassette && cassette.new_recorded_interactions.any?
end
let( :adapter ) do
Intelligence::Adapter[ :azure ].build! do
key ENV[ 'AZURE_API_KEY' ]
base_uri ENV[ 'AZURE_BASE_URI' ]
chat_options do
max_tokens 128
temperature 0
stream true
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :azure ].build! do
key ENV[ 'AZURE_API_KEY' ]
base_uri ENV[ 'AZURE_BASE_URI' ]
chat_options do
max_tokens 16
temperature 0
stream true
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :azure ].build! do
key ENV[ 'AZURE_API_KEY' ]
base_uri ENV[ 'AZURE_BASE_URI' ]
chat_options do
max_tokens 16
temperature 0
stop 'five'
stream true
end
end
end
let( :vision_adapter ) do
Intelligence::Adapter[ :azure ].build! do
key ENV[ 'AZURE_API_KEY' ]
base_uri ENV[ 'AZURE_BASE_URI' ]
chat_options do
max_tokens 32
temperature 0
stream true
end
end
end
include_examples 'stream requests'
include_examples 'stream requests without alternating roles'
include_examples 'stream requests with token limit exceeded',
adapter: :adapter_with_limited_max_tokens
include_examples 'stream requests with stop sequence',
adapter: :adapter_with_stop_sequence
include_examples 'stream requests with binary encoded images',
adapter: :vision_adapter
include_examples 'stream requests with file images',
adapter: :vision_adapter
include_examples 'stream requests with tools'
include_examples 'stream requests with parallel tools'
include_examples 'stream requests with tools multiturn'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/hyperbolic/chat_spec.rb | spec/adapters/hyperbolic/chat_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :hyperbolic ]} chat requests", :hyperbolic do
include_context 'vcr'
before do
raise "A HYPERBOLIC_API_KEY must be defined in the environment." unless ENV[ 'HYPERBOLIC_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :hyperbolic ].build! do
key ENV[ 'HYPERBOLIC_API_KEY' ]
chat_options do
model 'Qwen/Qwen2.5-VL-7B-Instruct'
max_tokens 16
temperature 0
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :hyperbolic ].build! do
key ENV[ 'HYPERBOLIC_API_KEY' ]
chat_options do
model 'Qwen/Qwen2.5-VL-7B-Instruct'
max_tokens 24
temperature 0
stop 'five'
end
end
end
let( :vision_adapter ) do
Intelligence::Adapter[ :hyperbolic ].build! do
key ENV[ 'HYPERBOLIC_API_KEY' ]
chat_options do
model 'Qwen/Qwen2.5-VL-7B-Instruct'
max_tokens 32
temperature 0
end
end
end
let( :adapter_with_invalid_key ) do
Intelligence::Adapter[ :hyperbolic ].build! do
key 'this-key-is-not-valid'
chat_options do
model 'Qwen/Qwen2.5-VL-7B-Instruct'
max_tokens 16
temperature 0
end
end
end
let( :adapter_with_invalid_model ) do
Intelligence::Adapter[ :hyperbolic ].build! do
key ENV[ 'HYPERBOLIC_API_KEY' ]
chat_options do
model 'invalid-model'
max_tokens 16
temperature 0
end
end
end
include_examples 'chat requests'
include_examples 'chat requests with token limit exceeded'
include_examples 'chat requests with stop sequence', adapter: :adapter_with_stop_sequence
include_examples 'chat requests with binary encoded images', adapter: :vision_adapter
include_examples 'chat requests without alternating roles'
include_examples 'chat requests with invalid key'
include_examples 'chat requests with invalid model',
error_type: 'invalid_request_error'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/hyperbolic/stream_spec.rb | spec/adapters/hyperbolic/stream_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :hyperbolic ]} stream requests", :hyperbolic do
before do
raise "An HYPERBOLIC_API_KEY must be defined in the environment." unless ENV[ 'HYPERBOLIC_API_KEY' ]
end
include_context 'vcr'
let( :adapter ) do
Intelligence::Adapter[ :hyperbolic ].build! do
key ENV[ 'HYPERBOLIC_API_KEY' ]
chat_options do
model 'Qwen/Qwen2.5-VL-7B-Instruct'
max_tokens 16
temperature 0
stream true
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :hyperbolic ].build! do
key ENV[ 'HYPERBOLIC_API_KEY' ]
chat_options do
model 'Qwen/Qwen2.5-VL-7B-Instruct'
max_tokens 16
temperature 0
stop 'five'
stream true
end
end
end
let( :vision_adapter ) do
Intelligence::Adapter[ :hyperbolic ].build! do
key ENV[ 'HYPERBOLIC_API_KEY' ]
chat_options do
model 'Qwen/Qwen2.5-VL-7B-Instruct'
max_tokens 24
temperature 0
stream true
end
end
end
include_examples 'stream requests'
include_examples 'stream requests with token limit exceeded'
include_examples 'stream requests with stop sequence',
adapter: :adapter_with_stop_sequence
include_examples 'stream requests with binary encoded images',
adapter: :vision_adapter
include_examples 'stream requests without alternating roles'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/open_ai_legacy/chat_spec.rb | spec/adapters/open_ai_legacy/chat_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :open_ai_legacy ]} chat requests", :open_ai_legacy do
include_context 'vcr'
before do
raise "An OPENAI_API_KEY must be defined in the environment." unless ENV[ 'OPENAI_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :open_ai_legacy ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-4o'
temperature 0
max_tokens 128
end
end
end
let( :adapter_with_tool ) do
Intelligence::Adapter[ :open_ai_legacy ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-4o'
temperature 0
max_tokens 128
tool do
name :get_location
description \
"The get_location tool will return the users city, state or province and country."
end
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :open_ai_legacy ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-4o'
temperature 0
max_tokens 16
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :open_ai_legacy ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-4o'
max_tokens 24
temperature 0
stop 'five'
end
end
end
let( :adapter_with_invalid_key ) do
Intelligence::Adapter[ :open_ai_legacy ].build! do
key 'this-key-is-not-valid'
chat_options do
model 'gpt-4o'
max_tokens 16
end
end
end
let( :adapter_with_invalid_model ) do
Intelligence::Adapter[ :open_ai_legacy ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'invalid_model'
max_tokens 16
end
end
end
include_examples 'chat requests'
include_examples 'chat requests with token limit exceeded',
adapter: :adapter_with_limited_max_tokens
include_examples 'chat requests with stop sequence',
adapter: :adapter_with_stop_sequence
include_examples 'chat requests with binary encoded images'
include_examples 'chat requests with file images'
include_examples 'chat requests without alternating roles'
include_examples 'chat requests with tools'
include_examples 'chat requests with adapter tools'
include_examples 'chat requests with complex tools'
include_examples 'chat requests with parallel tools'
include_examples 'chat requests with invalid key'
include_examples 'chat requests with invalid model'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/open_ai_legacy/stream_spec.rb | spec/adapters/open_ai_legacy/stream_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :open_ai_legacy ]} stream requests", :open_ai_legacy do
include_context 'vcr'
before do
raise "An OPENAI_API_KEY must be defined in the environment." unless ENV[ 'OPENAI_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :open_ai_legacy ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-4o'
max_completion_tokens 128
temperature 0
stream true
stream_options do
include_usage true
end
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :open_ai_legacy ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-4o'
max_completion_tokens 16
temperature 0
stream true
stream_options do
include_usage true
end
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :open_ai_legacy ].build! do
key ENV[ 'OPENAI_API_KEY' ]
chat_options do
model 'gpt-4o'
max_tokens 16
temperature 0
stop 'five'
stream true
stream_options do
include_usage true
end
end
end
end
include_examples 'stream requests'
include_examples 'stream requests with token limit exceeded',
adapter: :adapter_with_limited_max_tokens
include_examples 'stream requests with stop sequence',
adapter: :adapter_with_stop_sequence
include_examples 'stream requests with binary encoded images'
include_examples 'stream requests with file images'
include_examples 'stream requests without alternating roles'
include_examples 'stream requests with tools'
include_examples 'stream requests with parallel tools'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/open_router/chat_spec.rb | spec/adapters/open_router/chat_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :open_router ]} chat requests", :open_router do
include_context 'vcr'
before do
raise "A OPENROUTER_API_KEY must be defined in the environment." unless ENV[ 'OPENROUTER_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :open_router ].build! do
key ENV[ 'OPENROUTER_API_KEY' ]
chat_options do
model 'qwen/qwen-2.5-vl-7b-instruct'
max_tokens 24
temperature 0
provider do
order [ 'Hyperbolic' ]
allow_fallbacks false
end
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :open_router ].build! do
key ENV[ 'OPENROUTER_API_KEY' ]
chat_options do
model 'qwen/qwen-2.5-vl-7b-instruct'
max_tokens 24
temperature 0
stop 'five'
provider do
order [ 'Hyperbolic' ]
allow_fallbacks false
end
end
end
end
let( :vision_adapter ) do
Intelligence::Adapter[ :open_router ].build! do
key ENV[ 'OPENROUTER_API_KEY' ]
chat_options do
model 'qwen/qwen-2.5-vl-7b-instruct'
max_tokens 24
temperature 0
provider do
order [ 'Hyperbolic' ]
allow_fallbacks false
end
end
end
end
let( :adapter_with_invalid_key ) do
Intelligence::Adapter[ :open_router ].build! do
key 'invalid key'
chat_options do
model 'qwen/qwen-2.5-vl-7b-instruct'
max_tokens 16
temperature 0
end
end
end
let( :adapter_with_invalid_model ) do
Intelligence::Adapter[ :open_router ].build! do
key ENV[ 'OPENROUTER_API_KEY' ]
chat_options do
model 'invalid_model'
max_tokens 16
temperature 0
end
end
end
include_examples 'chat requests'
include_examples 'chat requests with token limit exceeded'
include_examples 'chat requests with stop sequence', adapter: :adapter_with_stop_sequence
include_examples 'chat requests with binary encoded images', adapter: :vision_adapter
include_examples 'chat requests without alternating roles'
include_examples 'chat requests with invalid key'
include_examples 'chat requests with invalid model', error_type: 'invalid_request_error'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/open_router/stream_spec.rb | spec/adapters/open_router/stream_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :open_router ]} stream requests", :open_router do
before do
raise "An OPENROUTER_API_KEY must be defined in the environment." unless ENV[ 'OPENROUTER_API_KEY' ]
end
include_context 'vcr'
let( :adapter ) do
Intelligence::Adapter[ :open_router ].build! do
key ENV[ 'OPENROUTER_API_KEY' ]
chat_options do
model 'qwen/qwen-2.5-vl-7b-instruct'
max_tokens 32
temperature 0
stream true
provider do
order [ 'Hyperbolic' ]
allow_fallbacks false
end
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :open_router ].build! do
key ENV[ 'OPENROUTER_API_KEY' ]
chat_options do
model 'qwen/qwen-2.5-vl-7b-instruct'
max_tokens 16
temperature 0
stop 'five'
stream true
provider do
order [ 'Hyperbolic' ]
allow_fallbacks false
end
end
end
end
let( :vision_adapter ) do
Intelligence::Adapter[ :open_router ].build! do
key ENV[ 'OPENROUTER_API_KEY' ]
chat_options do
model 'qwen/qwen-2.5-vl-7b-instruct'
max_tokens 16
temperature 0
stream true
provider do
order [ 'Hyperbolic' ]
allow_fallbacks false
end
end
end
end
include_examples 'stream requests'
include_examples 'stream requests with token limit exceeded'
include_examples 'stream requests with stop sequence', adapter: :adapter_with_stop_sequence
include_examples 'stream requests with binary encoded images', adapter: :vision_adapter
include_examples 'stream requests without alternating roles'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/groq/chat_spec.rb | spec/adapters/groq/chat_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :groq ]} chat requests", :groq do
include_context 'vcr'
before do
raise "A GROQ_API_KEY must be defined in the environment." unless ENV[ 'GROQ_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :groq ].build! do
key ENV[ 'GROQ_API_KEY' ]
chat_options do
model 'moonshotai/kimi-k2-instruct-0905'
max_tokens 16
temperature 0
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :groq ].build! do
key ENV[ 'GROQ_API_KEY' ]
chat_options do
model 'moonshotai/kimi-k2-instruct-0905'
max_tokens 32
stop 'five'
temperature 0
end
end
end
let( :adapter_with_extended_tokens ) do
Intelligence::Adapter[ :groq ].build! do
key ENV[ 'GROQ_API_KEY' ]
chat_options do
model 'moonshotai/kimi-k2-instruct-0905'
max_tokens 1024
temperature 0
end
end
end
let( :adapter_with_tool ) do
Intelligence::Adapter[ :groq ].build! do
key ENV[ 'GROQ_API_KEY' ]
chat_options do
model 'openai/gpt-oss-120b'
max_tokens 1024
temperature 0
tool do
name :get_location
description \
"The get_location tool will return the users city, state or province and country."
end
end
end
end
let( :adapter_with_invalid_key ) do
Intelligence::Adapter[ :groq ].build! do
key 'this-key-is-not-valid'
chat_options do
model 'mistral-saba-24b'
max_tokens 16
temperature 0
end
end
end
let( :adapter_with_invalid_model ) do
Intelligence::Adapter[ :groq ].build! do
key ENV[ 'GROQ_API_KEY' ]
chat_options do
model 'invalid-model'
max_tokens 16
temperature 0
end
end
end
include_examples 'chat requests'
include_examples 'chat requests with token limit exceeded'
include_examples 'chat requests with stop sequence', adapter: :adapter_with_stop_sequence
include_examples 'chat requests without alternating roles'
include_examples 'chat requests with tools', adapter: :adapter_with_extended_tokens
include_examples 'chat requests with adapter tools', adapter: :adapter_with_tool
include_examples 'chat requests with complex tools', adapter: :adapter_with_tool
include_examples 'chat requests with parallel tools', adapter: :adapter_with_extended_tokens
include_examples 'chat requests with tools multiturn', adapter: :adapter_with_extended_tokens
include_examples 'chat requests with calculator tool', adapter: :adapter_with_extended_tokens
include_examples 'chat requests with invalid key'
include_examples 'chat requests with invalid model'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/groq/stream_spec.rb | spec/adapters/groq/stream_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :groq ]} stream requests", :groq do
include_context 'vcr'
before do
raise "An GROQ_API_KEY must be defined in the environment." unless ENV[ 'GROQ_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :groq ].build! do
key ENV[ 'GROQ_API_KEY' ]
chat_options do
model 'moonshotai/kimi-k2-instruct-0905'
max_tokens 24
temperature 0
stream true
end
end
end
let( :adapter_with_extended_tokens ) do
Intelligence::Adapter[ :groq ].build! do
key ENV[ 'GROQ_API_KEY' ]
chat_options do
model 'moonshotai/kimi-k2-instruct-0905'
max_tokens 1024
temperature 0
stream true
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :groq ].build! do
key ENV[ 'GROQ_API_KEY' ]
chat_options do
model 'moonshotai/kimi-k2-instruct-0905'
max_tokens 64
stop 'five'
temperature 0
stream true
end
end
end
let( :adapter_with_tool ) do
Intelligence::Adapter[ :groq ].build! do
key ENV[ 'GROQ_API_KEY' ]
chat_options do
model 'moonshotai/kimi-k2-instruct-0905'
max_tokens 256
temperature 0
tool do
name :get_location
description \
"The get_location tool will return the users city, state or province and country."
end
stream true
end
end
end
include_examples 'stream requests'
include_examples 'stream requests with token limit exceeded'
include_examples 'stream requests with stop sequence', adapter: :adapter_with_stop_sequence
include_examples 'stream requests without alternating roles'
include_examples 'stream requests with tools', adapter: :adapter_with_tool
include_examples 'stream requests with parallel tools', adapter: :adapter_with_tool
include_examples 'stream requests with tools multiturn', adapter: :adapter_with_tool
include_examples 'stream requests with calculator tool', adapter: :adapter_with_tool
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/x_ai/chat_spec.rb | spec/adapters/x_ai/chat_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :x_ai ]} chat requests", :x_ai do
include_context 'vcr'
before do
raise "A XAI_API_KEY must be defined in the environment." unless ENV[ 'XAI_API_KEY' ]
end
# this is needed for x-AI test to avoid the rate limit
after( :each ) do | example |
cassette = VCR.current_cassette
sleep 1 if cassette && cassette.new_recorded_interactions.any?
end
let( :adapter ) do
Intelligence::Adapter[ :x_ai ].build! do
key ENV[ 'XAI_API_KEY' ]
chat_options do
model 'grok-3'
max_tokens 128
temperature 0
end
end
end
let( :adapter_with_tool ) do
Intelligence::Adapter[ :x_ai ].build! do
key ENV[ 'XAI_API_KEY' ]
chat_options do
model 'grok-3'
max_tokens 128
temperature 0
tool do
name :get_location
description \
"The get_location tool will return the users city, state or province and country."
end
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :x_ai ].build! do
key ENV[ 'XAI_API_KEY' ]
chat_options do
model 'grok-3'
max_tokens 24
temperature 0
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :x_ai ].build! do
key ENV[ 'XAI_API_KEY' ]
chat_options do
model 'grok-3'
max_tokens 24
temperature 0
stop 'five'
end
end
end
let( :vision_adapter ) do
Intelligence::Adapter[ :x_ai ].build! do
key ENV[ 'XAI_API_KEY' ]
chat_options do
model 'grok-2-vision-1212'
max_tokens 24
temperature 0
end
end
end
let( :adapter_with_web_search ) do
Intelligence::Adapter[ :x_ai ].build! do
key ENV[ 'XAI_API_KEY' ]
chat_options do
model 'grok-3-latest'
max_tokens 1024
temperature 0
abilities do
web_search do
return_citations true
end
end
end
end
end
let( :adapter_with_thought ) do
Intelligence::Adapter[ :x_ai ].build! do
key ENV[ 'XAI_API_KEY' ]
chat_options do
model 'grok-3-mini'
max_tokens 4096
temperature 0
reasoning_effort :low
end
end
end
let( :adapter_with_invalid_key ) do
Intelligence::Adapter[ :x_ai ].build! do
key 'this-key-is-not-valid'
chat_options do
model 'grok-beta'
max_tokens 16
temperature 0
end
end
end
let( :adapter_with_invalid_model ) do
Intelligence::Adapter[ :x_ai ].build! do
key ENV[ 'XAI_API_KEY' ]
chat_options do
model 'invalid_model'
max_tokens 16
temperature 0
end
end
end
include_examples 'chat requests'
include_examples 'chat requests with token limit exceeded',
adapter: :adapter_with_limited_max_tokens
include_examples 'chat requests with stop sequence',
adapter: :adapter_with_stop_sequence
include_examples 'chat requests without alternating roles'
include_examples 'chat requests with binary encoded images',
adapter: :vision_adapter
include_examples 'chat requests with file images',
adapter: :vision_adapter
include_examples 'chat requests without alternating roles'
include_examples 'chat requests with web search',
adapter: :adapter_with_web_search
include_examples 'chat requests with thought',
adapter: :adapter_with_thought
include_examples 'chat requests with tools'
include_examples 'chat requests with adapter tools'
include_examples 'chat requests with complex tools'
include_examples 'chat requests with parallel tools'
include_examples 'chat requests with tools multiturn'
include_examples 'chat requests with invalid key',
error_type: 'invalid_request_error'
include_examples 'chat requests with invalid model'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/x_ai/stream_spec.rb | spec/adapters/x_ai/stream_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :x_ai ]} stream requests", :x_ai do
include_context 'vcr'
before do
raise "An XAI_API_KEY must be defined in the environment." unless ENV[ 'XAI_API_KEY' ]
end
# this is needed for x-AI test to avoid the rate limit
after( :each ) do | example |
cassette = VCR.current_cassette
sleep 1 if cassette && cassette.new_recorded_interactions.any?
end
let( :adapter ) do
Intelligence::Adapter[ :x_ai ].build! do
key ENV[ 'XAI_API_KEY' ]
chat_options do
model 'grok-3'
max_tokens 128
temperature 0
stream true
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :x_ai ].build! do
key ENV[ 'XAI_API_KEY' ]
chat_options do
model 'grok-3'
max_tokens 16
temperature 0
stream true
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :x_ai ].build! do
key ENV[ 'XAI_API_KEY' ]
chat_options do
model 'grok-3'
max_tokens 16
temperature 0
stop 'five'
stream true
end
end
end
let( :vision_adapter ) do
Intelligence::Adapter[ :x_ai ].build! do
key ENV[ 'XAI_API_KEY' ]
chat_options do
model 'grok-2-vision-1212'
max_tokens 16
temperature 0
stream true
end
end
end
let( :adapter_with_web_search ) do
Intelligence::Adapter[ :x_ai ].build! do
key ENV[ 'XAI_API_KEY' ]
chat_options do
model 'grok-3-latest'
max_tokens 1024
temperature 0
abilities do
web_search do
return_citations true
end
end
stream true
end
end
end
let( :adapter_with_thought ) do
Intelligence::Adapter[ :x_ai ].build! do
key ENV[ 'XAI_API_KEY' ]
chat_options do
model 'grok-3-mini'
max_tokens 1024
temperature 0
reasoning_effort :high
stream true
end
end
end
include_examples 'stream requests'
include_examples 'stream requests with token limit exceeded',
adapter: :adapter_with_limited_max_tokens
include_examples 'stream requests with stop sequence',
adapter: :adapter_with_stop_sequence
include_examples 'stream requests without alternating roles'
include_examples 'stream requests with binary encoded images',
adapter: :vision_adapter
include_examples 'stream requests with file images',
adapter: :vision_adapter
include_examples 'stream requests with thought',
adapter: :adapter_with_thought
include_examples 'stream requests with tools'
include_examples 'stream requests with parallel tools'
include_examples 'stream requests with tools multiturn'
include_examples 'stream requests with web search',
adapter: :adapter_with_web_search
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/mistral/chat_spec.rb | spec/adapters/mistral/chat_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :mistral ]} chat requests", :mistral do
include_context 'vcr'
before do
raise "A MISTRAL_API_KEY must be defined in the environment." unless ENV[ 'MISTRAL_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :mistral ].build! do
key ENV[ 'MISTRAL_API_KEY' ]
chat_options do
model 'mistral-large-latest'
max_tokens 128
temperature 0
end
end
end
let( :adapter_with_tool ) do
Intelligence::Adapter[ :mistral ].build! do
key ENV[ 'MISTRAL_API_KEY' ]
chat_options do
model 'mistral-large-latest'
max_tokens 128
temperature 0
tool do
name :get_location
description \
"The get_location tool will return the users city, state or province and country."
end
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :mistral ].build! do
key ENV[ 'MISTRAL_API_KEY' ]
chat_options do
model 'mistral-large-latest'
max_tokens 24
temperature 0
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :mistral ].build! do
key ENV[ 'MISTRAL_API_KEY' ]
chat_options do
model 'mistral-large-latest'
max_tokens 24
temperature 0
stop 'five'
end
end
end
let( :vision_adapter ) do
Intelligence::Adapter[ :mistral ].build! do
key ENV[ 'MISTRAL_API_KEY' ]
chat_options do
model 'pixtral-12b-2409'
max_tokens 32
temperature 0
end
end
end
let( :adapter_with_invalid_key ) do
Intelligence::Adapter[ :mistral ].build! do
key 'invalid key'
chat_options do
model 'pixtral-12b-2409'
max_tokens 16
temperature 0
end
end
end
let( :adapter_with_invalid_model ) do
Intelligence::Adapter[ :mistral ].build! do
key ENV[ 'MISTRAL_API_KEY' ]
chat_options do
model 'invalid'
max_tokens 16
temperature 0
end
end
end
include_examples 'chat requests'
include_examples 'chat requests with token limit exceeded', adapter: :adapter_with_limited_max_tokens
include_examples 'chat requests with stop sequence', adapter: :adapter_with_stop_sequence
include_examples 'chat requests without alternating roles'
include_examples 'chat requests with binary encoded images', adapter: :vision_adapter
include_examples 'chat requests with file images', adapter: :vision_adapter
include_examples 'chat requests with tools'
include_examples 'chat requests with adapter tools'
include_examples 'chat requests with complex tools'
include_examples 'chat requests with parallel tools'
include_examples 'chat requests with tools multiturn'
include_examples 'chat requests with invalid key'
include_examples 'chat requests with invalid model', error_type: 'invalid_request_error'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/mistral/stream_spec.rb | spec/adapters/mistral/stream_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :mistral ]} stream requests", :mistral do
include_context 'vcr'
before do
raise "An MISTRAL_API_KEY must be defined in the environment." unless ENV[ 'MISTRAL_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :mistral ].build! do
key ENV[ 'MISTRAL_API_KEY' ]
chat_options do
model 'mistral-large-latest'
max_tokens 128
temperature 0
stream true
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :mistral ].build! do
key ENV[ 'MISTRAL_API_KEY' ]
chat_options do
model 'mistral-large-latest'
max_tokens 16
temperature 0
stream true
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :mistral ].build! do
key ENV[ 'MISTRAL_API_KEY' ]
chat_options do
model 'mistral-large-latest'
max_tokens 16
temperature 0
stop 'five'
stream true
end
end
end
let( :vision_adapter ) do
Intelligence::Adapter[ :mistral ].build! do
key ENV[ 'MISTRAL_API_KEY' ]
chat_options do
model 'pixtral-12b-2409'
max_tokens 32
temperature 0
stream true
end
end
end
include_examples 'stream requests'
include_examples 'stream requests without alternating roles'
include_examples 'stream requests with token limit exceeded',
adapter: :adapter_with_limited_max_tokens
include_examples 'stream requests with stop sequence',
adapter: :adapter_with_stop_sequence
include_examples 'stream requests with binary encoded images',
adapter: :vision_adapter
include_examples 'stream requests with file images',
adapter: :vision_adapter
include_examples 'stream requests with tools'
include_examples 'stream requests with parallel tools'
include_examples 'stream requests with tools multiturn'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/anthropic/chat_spec.rb | spec/adapters/anthropic/chat_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :anthropic ]} chat requests", :anthropic do
include_context 'vcr'
before do
raise "An ANTHROPIC_API_KEY must be defined in the environment." unless ENV[ 'ANTHROPIC_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :anthropic ].build! do
key ENV[ 'ANTHROPIC_API_KEY' ]
chat_options do
model 'claude-sonnet-4-20250514'
max_tokens 2048
temperature 0
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :anthropic ].build! do
key ENV[ 'ANTHROPIC_API_KEY' ]
chat_options do
model 'claude-sonnet-4-5-20250929'
max_tokens 24
temperature 0
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :anthropic ].build! do
key ENV[ 'ANTHROPIC_API_KEY' ]
chat_options do
model 'claude-sonnet-4-5-20250929'
max_tokens 24
temperature 0
stop 'five'
end
end
end
let( :adapter_with_thought ) do
Intelligence::Adapter[ :anthropic ].build! do
key ENV[ 'ANTHROPIC_API_KEY' ]
chat_options do
model 'claude-sonnet-4-5-20250929'
max_tokens 8192
reasoning do
budget_tokens 8191
end
end
end
end
let ( :adapter_with_tool ) do
Intelligence::Adapter[ :anthropic ].build! do
key ENV[ 'ANTHROPIC_API_KEY' ]
chat_options do
model 'claude-sonnet-4-5-20250929'
max_tokens 128
temperature 0
tool do
name :get_location
description \
"The get_location tool will return the users city, state or province and country."
end
end
end
end
let( :adapter_with_invalid_key ) do
Intelligence::Adapter[ :anthropic ].build! do
key 'this-key-is-not-valid'
chat_options do
model 'claude-sonnet-4-5-20250929'
max_tokens 24
temperature 0
end
end
end
let( :adapter_with_invalid_model ) do
Intelligence::Adapter[ :anthropic ].build! do
key ENV[ 'ANTHROPIC_API_KEY' ]
chat_options do
model 'invalid-model'
max_tokens 24
temperature 0
end
end
end
include_examples 'chat requests'
include_examples 'chat requests with token limit exceeded',
adapter: :adapter_with_limited_max_tokens
include_examples 'chat requests with stop sequence',
adapter: :adapter_with_stop_sequence,
end_reason: :end_sequence_encountered
include_examples 'chat requests without alternating roles'
include_examples 'chat requests with binary encoded images'
include_examples 'chat requests with file images'
include_examples 'chat requests with thought',
adapter: :adapter_with_thought
# include_examples 'chat requests with binary encoded pdf'
include_examples 'chat requests with tools'
include_examples 'chat requests with adapter tools'
include_examples 'chat requests with complex tools'
include_examples 'chat requests with parallel tools'
include_examples 'chat requests with tools multiturn'
include_examples 'chat requests with calculator tool',
adapter: :adapter_with_thought
include_examples 'chat requests with invalid key'
include_examples 'chat requests with invalid model'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/anthropic/stream_spec.rb | spec/adapters/anthropic/stream_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :anthropic ]} stream requests", :anthropic do
include_context 'vcr'
before do
raise "An ANTHROPIC_API_KEY must be defined in the environment." unless ENV[ 'ANTHROPIC_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :anthropic ].build! do
key ENV[ 'ANTHROPIC_API_KEY' ]
chat_options do
model 'claude-sonnet-4-20250514'
temperature 0
max_tokens 4096
stream true
end
end
end
let( :adapter_with_thought ) do
Intelligence::Adapter[ :anthropic ].build! do
key ENV[ 'ANTHROPIC_API_KEY' ]
chat_options do
model 'claude-sonnet-4-5-20250929'
max_tokens 8192
reasoning do
budget_tokens 8191
end
stream true
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :anthropic ].build! do
key ENV[ 'ANTHROPIC_API_KEY' ]
chat_options do
model 'claude-sonnet-4-5-20250929'
max_tokens 24
temperature 0
stream true
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :anthropic ].build! do
key ENV[ 'ANTHROPIC_API_KEY' ]
chat_options do
model 'claude-sonnet-4-5-20250929'
max_tokens 16
stop 'five'
temperature 0
stream true
end
end
end
include_examples 'stream requests'
include_examples 'stream requests without alternating roles'
include_examples 'stream requests with token limit exceeded',
adapter: :adapter_with_limited_max_tokens
include_examples 'stream requests with stop sequence',
adapter: :adapter_with_stop_sequence,
end_reason: :end_sequence_encountered
include_examples 'stream requests with binary encoded images'
include_examples 'stream requests with file images'
# include_examples 'stream requests with binary encoded pdf'
include_examples 'stream requests with thought',
adapter: :adapter_with_thought
include_examples 'stream requests with tools'
include_examples 'stream requests with parallel tools'
include_examples 'stream requests with tools multiturn'
include_examples 'stream requests with calculator tool',
adapter: :adapter_with_thought
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/cerebras/chat_spec.rb | spec/adapters/cerebras/chat_spec.rb | require 'spec_helper'
RSpec.describe Intelligence::Adapter[ :cerebras ], :cerebras do
include_context 'vcr'
# this is needed for cerebras test to avoid the rate limit
after( :each ) do | example |
cassette = VCR.current_cassette
sleep 10 if cassette && cassette.new_recorded_interactions.any?
end
before do
raise "A CEREBRAS_API_KEY must be defined in the environment." unless ENV[ 'CEREBRAS_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :cerebras ].build! do
key ENV[ 'CEREBRAS_API_KEY' ]
chat_options do
model 'llama-3.3-70b'
max_tokens 16
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :cerebras ].build! do
key ENV[ 'CEREBRAS_API_KEY' ]
chat_options do
model 'llama-3.3-70b'
max_tokens 24
stop 'five'
end
end
end
let( :adapter_with_invalid_key ) do
Intelligence::Adapter[ :cerebras ].build! do
key 'this-key-is-not-valid'
chat_options do
model 'llama-3.3-70b'
max_tokens 16
end
end
end
let( :adapter_with_invalid_model ) do
Intelligence::Adapter[ :cerebras ].build! do
key ENV[ 'CEREBRAS_API_KEY' ]
chat_options do
model 'invalid model'
max_tokens 16
end
end
end
include_examples 'chat requests'
include_examples 'chat requests with token limit exceeded'
include_examples 'chat requests with stop sequence', adapter: :adapter_with_stop_sequence
include_examples 'chat requests without alternating roles'
include_examples 'chat requests with invalid key'
include_examples 'chat requests with invalid model'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/cerebras/stream_spec.rb | spec/adapters/cerebras/stream_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :cerebras ]} stream requests", :cerebras do
include_context 'vcr'
# this is needed for cerebras test to avoid the rate limit
after( :each ) do | example |
cassette = VCR.current_cassette
sleep 10 if cassette && cassette.new_recorded_interactions.any?
end
before do
raise "An CEREBRAS_API_KEY must be defined in the environment." unless ENV[ 'CEREBRAS_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :cerebras ].build! do
key ENV[ 'CEREBRAS_API_KEY' ]
chat_options do
model 'llama-3.3-70b'
max_tokens 16
stream true
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :cerebras ].build! do
key ENV[ 'CEREBRAS_API_KEY' ]
chat_options do
model 'llama-3.3-70b'
max_tokens 64
stop 'five'
stream true
end
end
end
include_examples 'stream requests'
include_examples 'stream requests with token limit exceeded'
include_examples 'stream requests with stop sequence', adapter: :adapter_with_stop_sequence
include_examples 'stream requests without alternating roles'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/google/chat_spec.rb | spec/adapters/google/chat_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :google ]} chat requests", :google do
include_context 'vcr'
before do
raise "An GOOGLE_API_KEY must be defined in the environment." unless ENV[ 'GOOGLE_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :google ].build! do
key ENV[ 'GOOGLE_API_KEY' ]
chat_options do
model 'gemini-2.5-flash'
max_tokens 256
temperature 0
reasoning do
budget 0
end
end
end
end
let( :adapter_with_thought ) do
Intelligence::Adapter[ :google ].build! do
key ENV[ 'GOOGLE_API_KEY' ]
chat_options do
model 'gemini-2.5-flash'
max_tokens 16384
temperature 0
reasoning do
budget 8192
summary true
end
end
end
end
let( :adapter_with_adapter_tool ) do
Intelligence::Adapter[ :google ].build! do
key ENV[ 'GOOGLE_API_KEY' ]
chat_options do
model 'gemini-2.5-flash'
max_tokens 128
temperature 0
reasoning do
budget 0
end
tool do
name :get_location
description \
"The get_location tool will return the users city, state or province and country."
end
end
end
end
let( :adapter_with_tools ) do
Intelligence::Adapter[ :google ].build! do
key ENV[ 'GOOGLE_API_KEY' ]
chat_options do
model 'gemini-2.5-flash'
max_tokens 4096
temperature 0
reasoning do
budget 0
end
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :google ].build! do
key ENV[ 'GOOGLE_API_KEY' ]
chat_options do
model 'gemini-2.5-flash'
max_tokens 16
temperature 0
reasoning do
budget 0
end
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :google ].build! do
key ENV[ 'GOOGLE_API_KEY' ]
chat_options do
model 'gemini-2.5-flash'
max_tokens 196
temperature 0
stop 'five'
reasoning do
budget 0
end
end
end
end
let( :vision_adapter ) do
Intelligence::Adapter[ :google ].build! do
key ENV[ 'GOOGLE_API_KEY' ]
chat_options do
model 'gemini-2.5-flash'
max_tokens 256
temperature 0
end
end
end
let( :adapter_with_invalid_key ) do
Intelligence::Adapter[ :google ].build! do
key 'this-key-is-not-valid'
chat_options do
model 'gemini-2.5-flash'
max_tokens 32
temperature 0
end
end
end
let( :adapter_with_invalid_model ) do
Intelligence::Adapter[ :google ].build! do
key ENV[ 'GOOGLE_API_KEY' ]
chat_options do
model 'invalid-model'
max_tokens 32
temperature 0
end
end
end
include_examples 'chat requests'
# the google api currently doesn not respect the max token limit
include_examples 'chat requests with token limit exceeded',
adapter: :adapter_with_limited_max_tokens
include_examples 'chat requests with stop sequence',
adapter: :adapter_with_stop_sequence
include_examples 'chat requests without alternating roles'
include_examples 'chat requests with binary encoded images'
include_examples 'chat requests with binary encoded text'
include_examples 'chat requests with binary encoded pdf'
include_examples 'chat requests with binary encoded audio'
include_examples 'chat requests with thought',
adapter: :adapter_with_thought
include_examples 'chat requests with tools',
adapter: :adapter_with_tools
include_examples 'chat requests with adapter tools',
adapter: :adapter_with_adapter_tool
include_examples 'chat requests with parallel tools',
adapter: :adapter_with_tools
include_examples 'chat requests with complex tools',
adapter: :adapter_with_tools
include_examples 'chat requests with tools multiturn',
adapter: :adapter_with_tools
include_examples 'chat requests with calculator tool',
adapter: :adapter_with_tools
include_examples 'chat requests with invalid key'
include_examples 'chat requests with invalid model'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/google/stream_spec.rb | spec/adapters/google/stream_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :google ]} stream requests", :google do
include_context 'vcr'
before do
raise "An GOOGLE_API_KEY must be defined in the environment." unless ENV[ 'GOOGLE_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :google ].build! do
key ENV[ 'GOOGLE_API_KEY' ]
chat_options do
model 'gemini-2.5-flash'
max_tokens 256
temperature 0
reasoning do
budget 0
end
stream true
end
end
end
let( :adapter_with_thought ) do
Intelligence::Adapter[ :google ].build! do
key ENV[ 'GOOGLE_API_KEY' ]
chat_options do
model 'gemini-2.5-flash'
max_tokens 16384
temperature 0
reasoning do
budget 8192
summary true
end
stream true
end
end
end
let( :adapter_with_tools ) do
Intelligence::Adapter[ :google ].build! do
key ENV[ 'GOOGLE_API_KEY' ]
chat_options do
model 'gemini-2.5-flash'
max_tokens 1024
temperature 0
reasoning do
budget 0
end
stream true
end
end
end
let( :adapter_with_limited_max_tokens ) do
Intelligence::Adapter[ :google ].build! do
key ENV[ 'GOOGLE_API_KEY' ]
chat_options do
model 'gemini-2.5-flash'
max_tokens 32
temperature 0
reasoning do
budget 0
end
stream true
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :google ].build! do
key ENV[ 'GOOGLE_API_KEY' ]
chat_options do
model 'gemini-2.5-flash'
max_tokens 32
temperature 0
stop 'five'
reasoning do
budget 0
end
stream true
end
end
end
let( :vision_adapter ) do
Intelligence::Adapter[ :google ].build! do
key ENV[ 'GOOGLE_API_KEY' ]
chat_options do
model 'gemini-2.5-flash'
max_tokens 256
temperature 0
reasoning do
budget 0
end
stream true
end
end
end
include_examples 'stream requests'
# the google api currently doesn not respect the max token limit
#
include_examples 'stream requests with token limit exceeded',
adapter: :adapter_with_limited_max_tokens
include_examples 'stream requests with stop sequence',
adapter: :adapter_with_limited_max_tokens
include_examples 'stream requests without alternating roles'
include_examples 'stream requests with binary encoded images'
include_examples 'stream requests with binary encoded audio'
include_examples 'stream requests with binary encoded pdf'
include_examples 'stream requests with binary encoded text'
include_examples 'stream requests with thought',
adapter: :adapter_with_thought
include_examples 'stream requests with tools',
adapter: :adapter_with_tools
include_examples 'stream requests with parallel tools',
adapter: :adapter_with_tools
include_examples 'stream requests with tools multiturn',
adapter: :adapter_with_tools
include_examples 'stream requests with calculator tool',
adapter: :adapter_with_tools
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/samba_nova/chat_spec.rb | spec/adapters/samba_nova/chat_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :samba_nova ]} chat requests", :samba_nova do
include_context 'vcr'
before do
raise "A SAMBANOVA_API_KEY must be defined in the environment." unless ENV[ 'SAMBANOVA_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :samba_nova ].build! do
key ENV[ 'SAMBANOVA_API_KEY' ]
chat_options do
model 'Meta-Llama-3.3-70B-Instruct'
max_tokens 24
temperature 0
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :samba_nova ].build! do
key ENV[ 'SAMBANOVA_API_KEY' ]
chat_options do
model 'Meta-Llama-3.3-70B-Instruct'
max_tokens 24
temperature 0
stop 'five'
end
end
end
let( :adapter_with_invalid_key ) do
Intelligence::Adapter[ :samba_nova ].build! do
key 'this-key-is-not-valid'
chat_options do
model 'Meta-Llama-3.3-70B-Instruct'
max_tokens 16
temperature 0
end
end
end
let( :adapter_with_invalid_model ) do
Intelligence::Adapter[ :samba_nova ].build! do
key ENV[ 'SAMBANOVA_API_KEY' ]
chat_options do
model 'invalid_model'
max_tokens 16
temperature 0
end
end
end
include_examples 'chat requests'
include_examples 'chat requests with token limit exceeded'
include_examples 'chat requests with stop sequence', adapter: :adapter_with_stop_sequence
include_examples 'chat requests without alternating roles'
include_examples 'chat requests with invalid key'
include_examples 'chat requests with invalid model'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/spec/adapters/samba_nova/stream_spec.rb | spec/adapters/samba_nova/stream_spec.rb | require 'spec_helper'
RSpec.describe "#{Intelligence::Adapter[ :samba_nova ]} stream requests", :samba_nova do
include_context 'vcr'
before do
raise "An SAMBANOVA_API_KEY must be defined in the environment." unless ENV[ 'SAMBANOVA_API_KEY' ]
end
let( :adapter ) do
Intelligence::Adapter[ :samba_nova ].build! do
key ENV[ 'SAMBANOVA_API_KEY' ]
chat_options do
model 'Meta-Llama-3.3-70B-Instruct'
max_tokens 16
temperature 0
stream true
end
end
end
let( :adapter_with_stop_sequence ) do
Intelligence::Adapter[ :samba_nova ].build! do
key ENV[ 'SAMBANOVA_API_KEY' ]
chat_options do
model 'Meta-Llama-3.3-70B-Instruct'
max_tokens 16
temperature 0
stop 'five'
stream true
end
end
end
include_examples 'stream requests'
include_examples 'stream requests with token limit exceeded'
include_examples 'stream requests with stop sequence', adapter: :adapter_with_stop_sequence
include_examples 'stream requests without alternating roles'
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/google_builder.rb | examples/google_builder.rb | # this line will ensure your code can see the intelligence gem; if you have copied this example
# to another directory be sure to change the next line to: require 'intelligence'
require_relative '../lib/intelligence'
require 'debug'
# this block of code builds and configures your adapter; in this case we have chosen google
adapter = Intelligence::Adapter.build :google do
key ENV[ 'GOOGLE_API_KEY' ] # this is the google api key; here it is
# retrieved from the envionment
chat_options do
model 'gemini-1.5-pro' # this is the google model we want to use
max_tokens 512 # this is the maximum number of tokens we
# want the model to generate
abilities do
google_search_retrieval # the grounding feature is
# google_search_retrieval if you use a 1.5
# model but just google_search if you
# use a 2.0 model
end
end
end
# this line constructs a new request instance; you can reuse the request
request = Intelligence::ChatRequest.new( adapter: adapter )
# this line builds a conversation with a system and user message
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a highly efficient AI assistant. Provide clear, concise responses. " \
"There is no need to caveat your answers."
end
message role: :user do
content text: ARGV[ 0 ] || 'Hello!'
end
end
# this line makes the actual request to the api passing the conversaion we've built
response = request.chat( conversation )
# this line checks if the request was successful; either way it will inclue a 'result'
if response.success?
# if successful, the result will be a ChatResult instance which has a convenience text method
# to easilly retrieve the response text
puts response.result.text
else
# if not successful, the result be a ChatErrorResult instance which includes error information
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/parallax_minimal.rb | examples/parallax_minimal.rb | require_relative '../lib/intelligence'
adapter_options = {
base_uri: 'http://localhost:4001/v1',
chat_options: {
max_tokens: 2048
}
}
adapter = Intelligence::Adapter[ :parallax ].new( adapter_options )
request = Intelligence::ChatRequest.new( adapter: adapter )
response = request.chat( ARGV[ 0 ] || 'Hello!' )
if response.success?
puts response.result.text
else
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/openai_minimal_stream.rb | examples/openai_minimal_stream.rb | # this line will ensure your code can see the intelligence gem; if you have copied this example
# to another directory be sure to change the next line to: require 'intelligence'
require_relative '../lib/intelligence'
# this code defines the adapter options; we're adding the open ai api key from the environment
# plus a gtp-4o model and a limiting tokens per response to 1024; we're also going to stream so
# we have to enable the stream option
adapter_options = {
key: ENV[ 'OPENAI_API_KEY' ],
chat_options: { model: 'gpt-4o', max_tokens: 1024, stream: true }
}
# this code builds and configures your adapter; in this case we have chosen open_ai to match
# our options
adapter = Intelligence::Adapter[ :open_ai ].new( adapter_options )
# this line creates the request instance; you can reuse the request
request = Intelligence::ChatRequest.new( adapter: adapter )
# this lines makes the actual request to the api with the 'conversation' as input; as a
# convenience you can also pass a string which will be convered to a conversation;
# the stream call also requries a block which receives the in-progress (Faraday) request
response = request.stream( ARGV[ 0 ] || 'Hello!' ) do | request |
# the request, in turn, is used to receive the results as these arrive
request.receive_result do | result |
# the result will be an instance of a ChatResult which has a convenience text method
# that you can use to print the text received in the first choice of that result
print result.text
# if we want to check if the stream is complete we can check the end_reason from the
# first choice ( any value that is not nil in end_reason means that this is the last
# result with content we will receive )
if result.choices.first.end_reason
print "\n"
end
end
end
# this line checks if the request was successful; either way it will inclue a 'result'
unless response.success?
# if not successful, the result be a ChatErrorResult instance which includes error information
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/google_minimal.rb | examples/google_minimal.rb | # this line will ensure your code can see the intelligence gem; if you have copied this example
# to another directory be sure to change the next line to: require 'intelligence'
require_relative '../lib/intelligence'
# this code defines the adapter options; we're adding the google api key from the environment
# plus setting the model, in this case gemini flash, and limiting tokens to 256
adapter_options = {
key: ENV[ 'GOOGLE_API_KEY' ],
chat_options: { model: 'gemini-1.5-pro', max_tokens: 256 }
}
# this code builds and configures your adapter; in this case we have chosen google to match
# our options
adapter = Intelligence::Adapter[ :google ].new( adapter_options )
# this line creates the request instance; you can reuse the request instance
request = Intelligence::ChatRequest.new( adapter: adapter )
# this makes the actual request to the api, taking in a 'conversation' and returning a response;
# as a convenience you can pass a string to this method and conversation will be created for you
response = request.chat( ARGV[ 0 ] || 'Hello!' )
# this line checks if the request was successful; either way it will inclue a 'result'
if response.success?
# if successful, the result will be a ChatResult instance which has a convenience text method
# to easilly retrieve the response text
puts response.result.text
else
# if not successful, the result be a ChatErrorResult instance which includes error information
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/google_vision.rb | examples/google_vision.rb | # this line will ensure your code can see the intelligence gem; if you have copied this example
# to another directory be sure to change the next line to: require 'intelligence'
require_relative '../lib/intelligence'
# this line will include the mime types gem; be sure to install it: `gem install "mime-types"`
require 'mime-types'
file_path = ARGV[ 0 ]
if file_path.nil? || !File.exist?( file_path )
puts "Error: You have not specified a file or the file could not be found"
exit
end
file_mime_type = MIME::Types.type_for( file_path )&.first
if file_mime_type.nil?
puts "Error: You have not specified a recognizable file."
exit
end
file_content_type = file_mime_type.content_type
# this block of code builds and configures your adapter; in this case we have chosen google
adapter = Intelligence::Adapter.build :google do
key ENV[ 'GOOGLE_API_KEY' ] # this is the google api key; here it is
# retrieved from the envionment
chat_options do
model 'gemini-1.5-flash-8b' # this is the open ai model we want to use
max_tokens 8192 # this is the maximum number of tokens we
# want the model to generate
end
end
# this line constructs a new request instance; you can reuse the request
request = Intelligence::ChatRequest.new( adapter: adapter )
# this line builds a conversation with a system and user message
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a highly efficient AI assistant. Provide clear, concise responses."
end
message role: :user do
content text: ARGV[ 1 ] || 'What do you see in this image?'
content do
type :binary
content_type file_content_type
bytes File.binread( file_path )
end
end
end
# this line makes the actual request to the api passing the conversaion we've built
response = request.chat( conversation )
# this line checks if the request was successful; either way it will inclue a 'result'
if response.success?
# if successful, the result will be a ChatResult instance which has a convenience text method
# to easilly retrieve the response text
puts response.result.text
else
# if not successful, the result be a ChatErrorResult instance which includes error information
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/ollama_minimal.rb | examples/ollama_minimal.rb | # this line will ensure your code can see the intelligence gem; if you have copied this example
# to another directory be sure to change the next line to: require 'intelligence'
require_relative '../lib/intelligence'
# this code defines the adapter options; we're not using a key as this is a local model but we
# have specified an explicit base_uri which you may wish to change if you are runnin ollama on
# another port ( note that it's not base_url )
adapter_options = {
base_uri: 'http://localhost:11434/v1',
chat_options: {
model: 'qwen2.5vl:7b',
max_tokens: 1024
}
}
# this code builds and configures your adapter; in this case we have chosen ollama to match
# our options
adapter = Intelligence::Adapter[ :ollama ].new( adapter_options )
# this line creates the request instance; you can reuse the request
request = Intelligence::ChatRequest.new( adapter: adapter )
# this makes the actual request to the api, taking in a 'conversation' and returning a response;
# as a convenience you can pass a string to this method and conversation will be created for you
response = request.chat( ARGV[ 0 ] || 'Hello!' )
# this line checks if the request was successful; either way it will inclue a 'result'
if response.success?
# if successful, the result will be a ChatResult instance which has a convenience text method
# to easilly retrieve the response text
puts response.result.text
else
# if not successful, the result be a ChatErrorResult instance which includes error information
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/anthropic_expanded_stream.rb | examples/anthropic_expanded_stream.rb | # this line will ensure your code can see the intelligence gem; if you have copied this example
# to another directory be sure to change the next line to: require 'intelligence'
require_relative '../lib/intelligence'
# this block of code builds and configures your adapter; in this case we have chosen anthropic
adapter = Intelligence::Adapter.build! :anthropic do
key ENV[ 'ANTHROPIC_API_KEY' ] # this is the anthropic api key; here
# it is retrieved from the envionment
chat_options do
model 'claude-3-5-sonnet-20240620' # this is the anthropic model we want
max_tokens 1024 # this is the maximum number of tokens
# we want the model to generate
stream true # the stream option is required when
# calling the stream method
end
end
# this line creates an conversation, which is a collection of messages we want to send to the
# model together
conversation = Intelligence::Conversation.new
# here we are creating a new system message we will add to the conversation
message = Intelligence::Message.new( :system )
# here we are creating a new text content item
content = Intelligence::MessageContent.build( :text, text: 'You are a helpful assistant.' )
# here we add the content to the message
message << content
# and finally we will set the system message for the conversation
conversation.system_message = message
# here we will add a user message, this time using a builder to make the code less verbose
conversation << Intelligence::Message.build! do
role :user
content text: ARGV[ 0 ] || 'Hello!'
end
# this line creates the request instance; you can reuse the request
request = Intelligence::ChatRequest.new( adapter: adapter )
# this lines makes the actual request to the api with the 'conversation' as input;
# the stream call also requires a block which receives the in-progress (Faraday) request
response = request.stream( ARGV[ 0 ] || 'Hello!' ) do | request |
# the request, in turn, is used to receive the results as these arrive
request.receive_result do | result |
# the result will be an instance of a ChatResult; every result has a collection of choices
# ( typically there is only one but some adapters can return n result choices simultanously );
# a choice is an instance of ChatResultChoice
choices = result.choices
# we will select the first choice, as we expect only one
choice = choices.first
# every choice has a message
message = choice.message
# every message has a number of content items we can iterate through
message.each_content do | content |
# finally we can output the text of the content; although we only expect text back from
# the model in some cases ( such as when tools are used ) it may respond with other content
# so we check to make sure we are only displaying text content; remember this content is
# only a fragment of the response and in some cases the fragment will be entirelly empty
print content.text || '' if content.is_a?( Intelligence::MessageContent::Text )
end
# if we want to check if the stream is complete we can check the end_reason from the
# first choice ( any value that is not nil in end_reason means that this is the last
# result with content we will receive )
if result.choices.first.end_reason
print "\n"
end
end
end
# this line checks if the request was successful; either way it will inclue a 'result'
unless response.success?
# if not successful, the result will be a ChatErrorResult instance which includes error
# information
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/openai_minimal.rb | examples/openai_minimal.rb | # this line will ensure your code can see the intelligence gem; if you have copied this example
# to another directory be sure to change the next line to: require 'intelligence'
require_relative '../lib/intelligence'
# this code defines the adapter options; we're adding the open ai api key from the environment
# plus a gtp-4o model and a limiting tokens per response to 256
adapter_options = {
key: ENV[ 'OPENAI_API_KEY' ],
chat_options: { model: 'gpt-4o', max_tokens: 256 }
}
# this code builds and configures your adapter; in this case we have chosen open_ai to match
# our options
adapter = Intelligence::Adapter[ :open_ai ].new( adapter_options )
# this line creates the request instance; you can reuse the request
request = Intelligence::ChatRequest.new( adapter: adapter )
# this makes the actual request to the api, taking in a 'conversation' and returning a response;
# as a convenience you can pass a string to this method and conversation will be created for you
response = request.chat( ARGV[ 0 ] || 'Hello!' )
# this line checks if the request was successful; either way it will inclue a 'result'
if response.success?
# if successful, the result will be a ChatResult instance which has a convenience text method
# to easilly retrieve the response text
puts response.result.text
else
# if not successful, the result be a ChatErrorResult instance which includes error information
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/openai_builders.rb | examples/openai_builders.rb | # this line will ensure your code can see the intelligence gem; if you have copied this example
# to another directory be sure to change the next line to: require 'intelligence'
require_relative '../lib/intelligence'
# this block of code builds and configures your adapter; in this case we have chosen open_ai
adapter = Intelligence::Adapter.build :open_ai do
key ENV[ 'OPENAI_API_KEY' ] # this is the open ai api key; here it is
# retrieved from the envionment
chat_options do
model 'gpt-4o' # this is the open ai model we want to use
max_tokens 256 # this is the maximum number of tokens we
# want the model to generate
end
end
# this line constructs a new request instance; you can reuse the request
request = Intelligence::ChatRequest.new( adapter: adapter )
# this line builds a conversation with a system and user message
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a highly efficient AI assistant. Provide clear, concise responses."
end
message role: :user do
content text: ARGV[ 0 ] || 'Hello!'
end
end
# this line makes the actual request to the api passing the conversaion we've built
response = request.chat( conversation )
# this line checks if the request was successful; either way it will inclue a 'result'
if response.success?
# if successful, the result will be a ChatResult instance which has a convenience text method
# to easilly retrieve the response text
puts response.result.text
else
# if not successful, the result be a ChatErrorResult instance which includes error information
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/hyperbolic_builders.rb | examples/hyperbolic_builders.rb | # this line will ensure your code can see the intelligence gem; if you have copied this example
# to another directory be sure to change the next line to: require 'intelligence'
require_relative '../lib/intelligence'
# this block of code builds and configures your adapter; in this case we have chosen hyperbolic
adapter = Intelligence::Adapter.build :hyperbolic do
key ENV[ 'HYPERBOLIC_API_KEY' ] # this is the hyperbolic api key; here it
# is retrieved from the envionment
chat_options do
model 'Qwen/Qwen2.5-72B-Instruct' # this is the hyperbolic model we want
max_tokens 256 # this is the maximum number of tokens
# we want the model to generate
end
end
# this line constructs a new request instance; you can reuse the request
request = Intelligence::ChatRequest.new( adapter: adapter )
# this line builds a conversation with a system and user message
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a highly efficient AI assistant. Provide clear, concise responses."
end
message role: :user do
content text: ARGV[ 0 ] || 'Hello!'
end
end
# this line makes the actual request to the api passing the conversaion we've built
response = request.chat( conversation )
# this line checks if the request was successful; either way it will inclue a 'result'
if response.success?
# if successful, the result will be a ChatResult instance which has a convenience text method
# to easilly retrieve the response text
puts response.result.text
else
# if not successful, the result be a ChatErrorResult instance which includes error information
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/anthropic_minimal_stream.rb | examples/anthropic_minimal_stream.rb | # this line will ensure your code can see the intelligence gem; if you have copied this example
# to another directory be sure to change the next line to: require 'intelligence'
require_relative '../lib/intelligence'
# this code defines the adapter options; we're adding the anthropic api key from the environment
# plus a claude model and a limiting tokens per response to 1024; we're also going to stream so
# we have to enable the stream option
adapter_options = {
key: ENV[ 'ANTHROPIC_API_KEY' ],
chat_options: { model: 'claude-3-5-sonnet-20240620', max_tokens: 1024, stream: true }
}
# this code builds and configures your adapter; in this case we have chosen anthropic to match
# our options
adapter = Intelligence::Adapter[ :anthropic ].new( adapter_options )
# this line creates the request instance; you can reuse the request
request = Intelligence::ChatRequest.new( adapter: adapter )
# this lines makes the actual request to the api with the 'conversation' as input; as a
# convenience you can also pass a string which will be convered to a conversation;
# the stream call also requries a block which receives the in-progress (Faraday) request
response = request.stream( ARGV[ 0 ] || 'Hello!' ) do | request |
# the request, in turn, is used to receive the results as these arrive
request.receive_result do | result |
# the result will be an instance of a ChatResult which has a convenience text method
# that you can use to print the text received in the first choice of that result
print result.text
# if we want to check if the stream is complete we can check the end_reason from the
# first choice ( any value that is not nil in end_reason means that this is the last
# result with content we will receive )
if result.choices.first.end_reason
print "\n"
end
end
end
# this line checks if the request was successful; either way it will inclue a 'result'
unless response.success?
# if not successful, the result be a ChatErrorResult instance which includes error information
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/google_expanded.rb | examples/google_expanded.rb | # this line will ensure your code can see the intelligence gem; if you have copied this example
# to another directory be sure to change the next line to: require 'intelligence'
require_relative '../lib/intelligence'
# this block of code builds and configures your adapter; in this case we have chosen google
adapter = Intelligence::Adapter.build! :google do
key ENV[ 'GOOGLE_API_KEY' ] # this is the google api key; here it is
# retrieved from the envionment
chat_options do
model 'gemini-1.5-pro' # this is the google model we want to use
max_tokens 256 # this is the maximum number of tokens we
# want the model to generate
end
end
# this line creates an conversation, which is a collection of messages we want to send to the
# model together
conversation = Intelligence::Conversation.new
# here we are creating a new system message we will add to the conversation
message = Intelligence::Message.new( :system )
# here we are creating a new text content item
content = Intelligence::MessageContent.build( :text, text: 'You are a helpful assistant.' )
# here we add the content to the message
message << content
# and finally we will set the system message for the conversation
conversation.system_message = message
# here we will add a user message, this time using a builder to make the code less verbose
conversation << Intelligence::Message.build! do
role :user
content text: ARGV[ 0 ] || 'Hello!'
end
# this line creates the request instance; you can reuse the request
request = Intelligence::ChatRequest.new( adapter: adapter )
# this line makes the actual request to the api and returns a response
response = request.chat( conversation )
# this line checks if the request was successful; either way it will inclue a 'result'
if response.success?
# if successful, the result will be a ChatResult instance
result = response.result
# every successful result has a collection of choices ( typically there is only one but some
# adapters can return n result choices simultanously ); a choice is an instance of
# ChatResultChoice
choices = result.choices
# we will select the first choice, as we expect only one
choice = choices.first
# every choice has a message
message = choice.message
# every message has a number of content items we can iterate through
message.each_content do | content |
# finally we can output the text of the content; although we only expect text back from
# the model in some cases ( such as when tools are used ) it may respond with other content
# so we check to make sure we are only displaying text content
puts content.text if content.is_a?( Intelligence::MessageContent::Text )
end
else
# if not successful, the result will be a ChatErrorResult instance which includes error
# information
error_result = response.error_result
puts "Error: " + error_result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/openai_vision.rb | examples/openai_vision.rb | # this line will ensure your code can see the intelligence gem; if you have copied this example
# to another directory be sure to change the next line to: require 'intelligence'
require_relative '../lib/intelligence'
# this line will include the mime types gem; be sure to install it: `gem install "mime-types"`
require 'mime-types'
require 'debug'
file_path = ARGV[ 0 ]
if file_path.nil? || !File.exist?( file_path )
puts "Error: You have not specified a file or the file could not be found"
exit
end
file_mime_type = MIME::Types.type_for( file_path )&.first
if file_mime_type.nil? || file_mime_type.media_type != 'image'
puts "Error: You have not specified an image file."
exit
end
file_content_type = file_mime_type.content_type
# this block of code builds and configures your adapter; in this case we have chosen open_ai
adapter = Intelligence::Adapter.build :open_ai do
key ENV[ 'OPENAI_API_KEY' ] # this is the open ai api key; here it is
# retrieved from the envionment
chat_options do
model 'gpt-4o' # this is the open ai model we want to use
max_tokens 256 # this is the maximum number of tokens we
# want the model to generate
end
end
# this line constructs a new request instance; you can reuse the request
request = Intelligence::ChatRequest.new( adapter: adapter )
# this line builds a conversation with a system and user message
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a highly efficient AI assistant. Provide clear, concise responses."
end
message role: :user do
content text: ARGV[ 1 ] || 'What do you see in this image?'
content do
type :binary
content_type file_content_type
bytes File.binread( file_path )
end
end
end
# this line makes the actual request to the api passing the conversaion we've built
response = request.chat( conversation )
# this line checks if the request was successful; either way it will inclue a 'result'
if response.success?
# if successful, the result will be a ChatResult instance which has a convenience text method
# to easilly retrieve the response text
puts response.result.text
else
# if not successful, the result be a ChatErrorResult instance which includes error information
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/parallax_minimal_stream.rb | examples/parallax_minimal_stream.rb | require_relative '../lib/intelligence'
adapter_options = {
base_uri: 'http://localhost:4001/v1',
chat_options: {
max_tokens: 2048,
stream: true
}
}
adapter = Intelligence::Adapter[ :parallax ].new( adapter_options )
request = Intelligence::ChatRequest.new( adapter: adapter )
response = request.stream( ARGV[ 0 ] || 'Hello!' ) do | request |
request.receive_result do | result |
print result.text
if result.choices.first.end_reason
print "\n"
end
end
end
unless response.success?
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/google_expanded_stream.rb | examples/google_expanded_stream.rb | # this line will ensure your code can see the intelligence gem; if you have copied this example
# to another directory be sure to change the next line to: require 'intelligence'
require_relative '../lib/intelligence'
# this block of code builds and configures your adapter; in this case we have chosen google
adapter = Intelligence::Adapter.build! :google do
key ENV[ 'GOOGLE_API_KEY' ] # this is the google api key; here it is
# retrieved from the envionment
chat_options do
model 'gemini-1.5-pro' # this is the google model we want to use
max_tokens 256 # this is the maximum number of tokens we
# want the model to generate
stream true
end
end
# this line creates an conversation, which is a collection of messages we want to send to the
# model together
conversation = Intelligence::Conversation.new
# here we are creating a new system message we will add to the conversation
message = Intelligence::Message.new( :system )
# here we are creating a new text content item
content = Intelligence::MessageContent.build( :text, text: 'You are a helpful assistant.' )
# here we add the content to the message
message << content
# and finally we will set the system message for the conversation
conversation.system_message = message
# here we will add a user message, this time using a builder to make the code less verbose
conversation << Intelligence::Message.build! do
role :user
content text: ARGV[ 0 ] || 'Hello!'
end
# this line creates the request instance; you can reuse the request
request = Intelligence::ChatRequest.new( adapter: adapter )
# this lines makes the actual request to the api with the 'conversation' as input; as a
# convenience you can also pass a string which will be convered to a conversation;
# the stream call also requries a block which receives the in-progress (Faraday) request
response = request.stream( ARGV[ 0 ] || 'Hello!' ) do | request |
# the request, in turn, is used to receive the results as these arrive
request.receive_result do | result |
# the result will be an instance of a ChatResult which has a convenience text method
# that you can use to print the text received in the first choice of that result
print result.text
# if we want to check if the stream is complete we can check the end_reason from the
# first choice ( any value that is not nil in end_reason means that this is the last
# result with content we will receive )
if result.choices.first.end_reason
print "\n"
end
end
end
# this line checks if the request was successful; either way it will inclue a 'result'
unless response.success?
# if not successful, the result be a ChatErrorResult instance which includes error information
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/examples/hyperbolic_vision.rb | examples/hyperbolic_vision.rb | # this line will ensure your code can see the intelligence gem; if you have copied this example
# to another directory be sure to change the next line to: require 'intelligence'
require_relative '../lib/intelligence'
# this line will include the mime types gem; be sure to install it: `gem install "mime-types"`
require 'mime-types'
# this block of code checks that you've provided a file path and that the file exists
file_path = ARGV[ 0 ]
if file_path.nil? || !File.exist?( file_path )
puts "Error: You have not specified a file or the file could not be found"
exit
end
# this block of code obtains the file mime type and verfies that it is an image
file_mime_type = MIME::Types.type_for( file_path )&.first
if file_mime_type.nil? || file_mime_type.media_type != 'image'
puts "Error: You have not specified an image file."
exit
end
file_content_type = file_mime_type.content_type
# this block of code constructs a new Faraday connection
connection = Faraday.new do | faraday |
faraday.options.timeout = 100 # read timout set to 60 seconds
faraday.options.open_timeout = 2 # connection open timeout set to 1 seconds
# add other middleware or options as needed
# create a default adapter
faraday.adapter Faraday.default_adapter
end
# this block of code builds and configures your adapter; in this case we have chosen hyperbolic
adapter = Intelligence::Adapter.build :hyperbolic do
key ENV[ 'HYPERBOLIC_API_KEY' ] # this is the hyperbolic api key; here
# it is retrieved from the envionment
chat_options do
model 'Qwen/Qwen2-VL-7B-Instruct' # this is the open ai model we want to use
max_tokens 256 # this is the maximum number of tokens we
# want the model to generate
end
end
# this line constructs a new request instance with the connection and adapter, the adapter is
# required but you can omit the connection; you can reuse the request
request = Intelligence::ChatRequest.new( adapter: adapter )
# this line builds a conversation with a system and user message
conversation = Intelligence::Conversation.build do
system_message do
content text: "You are a highly efficient AI assistant. Provide clear, concise responses."
end
message role: :user do
content text: ARGV[ 1 ] || 'What do you see in this image?'
content do
type :binary
content_type file_content_type
bytes File.binread( file_path )
end
end
end
# this line makes the actual request to the api passing the conversaion we've built
response = request.chat( conversation )
# this line checks if the request was successful; either way it will inclue a 'result'
if response.success?
# if successful, the result will be a ChatResult instance which has a convenience text method
# to easilly retrieve the response text
puts response.result.text
else
# if not successful, the result be a ChatErrorResult instance which includes error information
puts "Error: " + response.result.error_description
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence.rb | lib/intelligence.rb | require 'json'
require 'base64'
require 'faraday'
require 'dynamic_schema'
require 'mime-types'
require 'json/repair'
require 'intelligence/version'
require 'intelligence/utilities/deep_dup'
require 'intelligence/error'
require 'intelligence/adapter_error'
require 'intelligence/unsupported_content_error'
require 'intelligence/error_result'
require 'intelligence/chat_error_result'
require 'intelligence/tool'
require 'intelligence/message_content'
require 'intelligence/message'
require 'intelligence/conversation'
require 'intelligence/adapter'
require 'intelligence/chat_request'
require 'intelligence/chat_result'
require 'intelligence/chat_result_choice'
require 'intelligence/chat_metrics'
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/chat_request.rb | lib/intelligence/chat_request.rb | module Intelligence
#
# The ChatRequestMethods module extends a Faraday request, adding the +receive_result+ method.
#
module ChatRequestMethods
def receive_result( &block )
@_intelligence_result_callback = block
end
end
#
# The ChatResponseMethods module extends a Faraday reponse, adding the +result+ method.
#
module ChatResponseMethods
def result
@_intelligence_result
end
end
##
# The +ChatRequest+ class encapsulates a request to an LLM. After creating a new +ChatRequest+
# instance you can make the actual request by calling the +chat+ or +stream+ methods. In order
# to construct a +ChatRequest+ you must first construct and configure an adapter.
#
# === example
#
# adapter = Intelligence::Adapter.build( :open_ai ) do
# key ENV[ 'OPENAI_API_KEY' ]
# chat_options do
# model 'gpt-4o'
# max_tokens 512
# end
# end
#
# request = Intelligence::ChatRequest.new( adapter: adapter )
# response = request.chat( 'Hello!' )
#
# if response.success?
# puts response.result.text
# else
# puts response.result.error_description
# end
#
class ChatRequest
DEFAULT_CONNECTION = Faraday.new { | builder | builder.adapter Faraday.default_adapter }
##
# The +initialize+ method initializes the +ChatRequest+ instance. You MUST pass a previously
# constructed and configured +adapter+ and optionally a (Faraday) +connection+.
#
def initialize( connection: nil, adapter: , **options )
@connection = connection || DEFAULT_CONNECTION
@adapter = adapter
@options = options || {}
raise ArgumentError.new( 'An adapter must be configured before a request is constructed.' ) \
if @adapter.nil?
end
##
# The +chat+ method leverages the adapter associated with this +ChatRequest+ instance to
# construct and make an HTTP request - through Faraday - to an LLM service. The +chat+ method
# always returns a +Faraday::Respose+ which is augmented with a +result+ method.
#
# If the response is successful ( if +response.success?+ returns true ) the +result+ method
# returns a +ChatResponse+ instance. If the response is not successful a +ChatErrorResult+
# instance is returned.
#
# === arguments
# * +conversation+ - an instance of +Intelligence::Conversation+ or String; this encapsulates
# the content to be sent to the LLM
# * +options+ - one or more Hashes with options; these options overide any of the
# configuration options used to configure the adapter; you can, for
# example, pass +{ chat_options: { max_tokens: 1024 }+ to limit the
# response to 1024 tokens.
def chat( conversation, *options )
conversation = build_quick_conversation( conversation ) if conversation.is_a?( String )
options = options.compact.reduce( {} ) { | accumulator, o | accumulator.merge( o ) }
options = @options.merge( options )
# conversation and tools are presented as simple Hashes to the adapter
conversation = conversation.to_h
options[ :tools ] = options[ :tools ].to_a.map!( &:to_h ) if options[ :tools ]
uri = @adapter.chat_request_uri( options )
headers = @adapter.chat_request_headers( options )
payload = @adapter.chat_request_body( conversation, options )
result_callback = nil
response = @connection.post( uri ) do | request |
headers.each { | key, value | request.headers[ key ] = value }
request.body = payload
yield request.extend( ChatRequestMethods ) if block_given?
result_callback = request.instance_variable_get( "@_intelligence_result_callback" )
end
result = nil
if response.success?
chat_result_attributes = @adapter.chat_result_attributes( response )
result = ChatResult.new( chat_result_attributes )
else
error_result_attributes = @adapter.chat_result_error_attributes( response )
result = ChatErrorResult.new( error_result_attributes )
end
response.instance_variable_set( "@_intelligence_result", result )
response.extend( ChatResponseMethods )
end
def stream( conversation, *options )
conversation = build_quick_conversation( conversation ) if conversation.is_a?( String )
options = options.compact.reduce( {} ) { | accumulator, o | accumulator.merge( o ) }
options = @options.merge( options )
# conversation and tools are presented as simple Hashes to the adapter
conversation = conversation.to_h
options[ :tools ] = options[ :tools ].to_a.map!( &:to_h ) if options[ :tools ]
uri = @adapter.chat_request_uri( options )
headers = @adapter.chat_request_headers( options )
payload = @adapter.chat_request_body( conversation, options )
context = nil
response = @connection.post( uri ) do | request |
headers.each { | key, value | request.headers[ key ] = value }
request.body = payload
yield request.extend( ChatRequestMethods )
result_callback = request.instance_variable_get( "@_intelligence_result_callback" )
request.options.on_data = Proc.new do | chunk, received_bytes |
context, attributes = @adapter.stream_result_chunk_attributes( context, chunk )
result_callback.call( ChatResult.new( attributes ) ) \
unless result_callback.nil? || attributes.nil?
end
end
result = nil
if response.success?
stream_result_attributes = @adapter.stream_result_attributes( context )
result = ChatResult.new( stream_result_attributes )
else
error_result_attributes = @adapter.stream_result_error_attributes( response )
result = ChatErrorResult.new( error_result_attributes )
end
response.instance_variable_set( "@_intelligence_result", result )
response.extend( ChatResponseMethods )
end
private
def build_quick_conversation( text )
conversation = Conversation.new()
conversation.messages << Message.build! do
role :user
content text: text
end
conversation
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/version.rb | lib/intelligence/version.rb | module Intelligence
VERSION = "1.0.0"
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/adapter_error.rb | lib/intelligence/adapter_error.rb | module Intelligence
class AdapterError < Error;
def initialize( adapter_type, text )
adapter_class_name = adapter_type.to_s.split( '_' ).map( &:capitalize ).join
super( "The #{adapter_class_name} adapter #{text}." )
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/unsupported_content_error.rb | lib/intelligence/unsupported_content_error.rb | module Intelligence
class UnsupportedContentError < AdapterError; end
end | ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/invalid_content_error.rb | lib/intelligence/invalid_content_error.rb | module Intelligence
class InvalidContentError < AdapterError; end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/adapter.rb | lib/intelligence/adapter.rb | require_relative 'adapter/module_methods'
require_relative 'adapter/base'
module Intelligence
module Adapter
extend ModuleMethods
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/message_content.rb | lib/intelligence/message_content.rb | Dir[ File.join( __dir__, 'message_content', '*.rb' ) ].each do | file |
require_relative file
end
module Intelligence
module MessageContent
def self.[]( type )
type_name = type.to_s.split( '_' ).map { | word | word.capitalize }.join
klass = Intelligence.const_get( "MessageContent::#{type_name}" ) rescue nil
raise TypeError, "An unknown content type '#{type}' was given." unless klass
klass
end
def self.build!( type, attributes = nil, &block )
self[ type ].build!( attributes, &block )
end
def self.build( type, attributes = nil, &block )
self[ type ].build( attributes, &block )
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/message.rb | lib/intelligence/message.rb | module Intelligence
class Message
include DynamicSchema::Definable
ROLES = [ :system, :user, :assistant ]
schema do
role Symbol, required: true
content array: true, as: :contents do
type Symbol, default: :text
# note: we replicate these schema elements of the individual content types here to
# provide more semantic flexibility when building a message; we don't delegate to the
# individual content type schemas because, unlike for a specific content type, not all
# attributes are required
# text
text String
# binary and file
content_type String
bytes String
uri URI
# tool call and tool result
tool_call_id String
tool_name String
tool_parameters [ Hash, String ]
tool_result [ Hash, String ]
end
end
attr_reader :role
attr_reader :contents
##
# The +build!+ class method constructs and returns a new +Message+ instance. The +build!+
# method accepts message +attributes+ and a block, which may be combined when constructing
# a +Message+. The +role+ is required, either as an attribute or in the block. If the +role+
# is not present, an exception will be raised.
#
# The block offers the +role+ method, as well as a +content+ method permiting the caller to
# set the role and add content respectivelly.
#
# Note that there is no corresponding +build+ method because a +Message+ strictly requires a
# +role+. It cannot be constructed without one.
#
# === examples
#
# message = Message.build!( role: :user )
#
# message = Message.build! do
# role :user
# content do
# type :text
# text 'this is a user message'
# end
# end
#
# message = Message.build!( role: :user ) do
# content text: 'this is a user message'
# end
#
# message = Message.build!( role: :user ) do
# content text: 'what do you see in this image?'
# content type: :binary do
# content_type 'image/png'
# bytes File.binread( '99_red_balloons.png' )
# end
# end
#
def self.build!( attributes = nil, &block )
attributes = self.builder.build!( attributes, &block )
self.new( attributes[ :role ], attributes )
end
def initialize( role, attributes = nil )
@role = role&.to_sym
@contents = []
raise ArgumentError.new( "The role is invalid. It must be one of #{ROLES.join( ', ' )}." ) \
unless ROLES.include?( @role )
if attributes && attributes[ :contents ]
attributes[ :contents ].each do | content |
@contents << MessageContent.build!( content[ :type ], content )
end
end
end
##
# The empty? method return true if the message has no content.
#
def empty?
@contents.empty?
end
##
# The valid? method returns true if the message has a valid role, has content, and the content
# is valid.
#
def valid?
ROLES.include?( @role ) && !@contents.empty? && @contents.all?{ | contents | contents.valid? }
end
##
# The text method is a convenience that returns all text content in the message joined with
# a newline. Any non-text content is skipped. If there is no text content an empty string is
# returned.
#
def text
result = []
each_content do | content |
result << content.text if content.is_a?( MessageContent::Text ) && content.text
end
result.join( "\n" )
end
def each_content( &block )
@contents.each( &block )
end
def append_content( content )
@contents.push( content ) unless content.nil?
self
end
alias :<< :append_content
def to_h
{
role: @role,
contents: @contents.map { | c | c.to_h }
}
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/chat_error_result.rb | lib/intelligence/chat_error_result.rb | module Intelligence
#
# class. ChatErrorResult
#
# The ChatErrorResult class encapsulates error result from a chat request.
#
class ChatErrorResult < ErrorResult
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/conversation.rb | lib/intelligence/conversation.rb | module Intelligence
class Conversation
include DynamicSchema::Definable
include DynamicSchema::Buildable
schema do
system_message default: { role: :system }, &Message.schema
message as: :messages, array: true, &Message.schema
end
attr_reader :system_message
attr_reader :messages
def initialize( attributes = nil )
@messages = []
if attributes
if attributes[ :system_message ]&.any?
system_message = Message.new(
attributes[ :system_message ][ :role ] || :system,
attributes[ :system_message ]
)
@system_message = system_message unless system_message.empty?
end
attributes[ :messages ]&.each do | message_attributes |
@messages << Message.new( message_attributes[ :role ], message_attributes )
end
end
end
def has_system_message?
( @system_message || false ) && !@system_message.empty?
end
def has_messages?
!@messages.empty?
end
def system_message=( message )
raise ArgumentError, "The system message must be a Intelligence::Message." \
unless message.is_a?( Intelligence::Message )
raise ArgumentError, "The system message MUST have a role of 'system'." \
unless message.role == :system
@system_message = message
end
def append_message( *messages )
@messages.concat( messages.flatten )
self
end
alias :<< :append_message
def to_h
result = {}
result[ :system_message ] = @system_message.to_h if @system_message
result[ :messages ] = @messages.map { | m | m.to_h }
result
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/chat_metrics.rb | lib/intelligence/chat_metrics.rb | module Intelligence
#
# class. ChatMetrics
#
# The ChatMetrics class encapsulates metrics information. These metrics include the number of
# input tokens consumed, the number of output tokens generated, the total number of tokens,
# and the duration of the request in milliseconds.
#
class ChatMetrics
# ---------------
attr_reader :duration
attr_reader :input_tokens
attr_reader :cache_read_input_tokens
attr_reader :cache_write_input_tokens
attr_reader :output_tokens
def initialize( attributes )
attributes.each do | key, value |
instance_variable_set( "@#{key}", value ) if self.respond_to?( "#{key}" )
end
end
def total_tokens
@total_tokens = @input_tokens + @output_tokens \
if @total_tokens.nil? && @input_tokens && @output_tokens
@total_tokens
end
def to_h
{
input_tokens: @input_tokens,
output_tokens: @output_tokens,
cache_read_input_tokens: @cache_read_input_tokens,
cache_write_input_tokens: @cache_write_input_tokens
}.compact
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/error_result.rb | lib/intelligence/error_result.rb | module Intelligence
class ErrorResult
# ---------------
attr_reader :error_type
attr_reader :error
attr_reader :error_description
def initialize( error_attributes )
# --------------------------------
error_attributes.each do | key, value |
instance_variable_set( "@#{key}", value ) if self.respond_to?( "#{key}" )
end
end
def empty?
# --------
@error_type.nil? && @error.nil? && @error_description.nil?
end
end
end | ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/chat_result_choice.rb | lib/intelligence/chat_result_choice.rb | module Intelligence
class ChatResultChoice
attr_reader :message
attr_reader :end_reason
attr_reader :end_sequence
def initialize( chat_choice_attributes )
@attributes = chat_choice_attributes.dup
@end_reason = @attributes.delete( :end_reason )
@end_sequence = @attributes.delete( :end_sequence )
message = @attributes.delete( :message )
@message = build_message( message ) if message
end
def key?(key) = @attributes.key?(key)
alias include? key?
def size = @attributes.size
alias length size
alias count size
def empty? = @attributes.empty?
def each( &block) = @attributes.each( &block )
def []( key ) = @attributes[ key ]
private
def build_message( json_message )
message = Message.new( json_message[ :role ]&.to_sym || :assistant )
json_message[ :contents ]&.each do | json_content |
message << MessageContent.build( json_content[ :type ], json_content )
end
message
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/chat_result.rb | lib/intelligence/chat_result.rb | module Intelligence
#
# class. ChatResult
#
# The ChatResult class encapsulates a successful result to a chat or stream request. A result
# includes an array of choices ( it is an array even if there is a single choice ) and the
# metrics associated with the generation of all result choices.
#
class ChatResult
attr_reader :choices
attr_reader :metrics
def initialize( chat_attributes )
raise 'A ChatResult must be initialized with attributes but got nil.' unless chat_attributes
@attributes = chat_attributes.dup
@choices = ( @attributes.delete( :choices ) || [] ).map do | json_choice |
ChatResultChoice.new( json_choice )
end
json_metrics = @attributes.delete( :metrics )
@metrics = ChatMetrics.new( json_metrics ) if json_metrics
end
def id = @attributes[ :id ]
def user = @attributes[ :user ]
def message = @choices.empty? ? nil : @choices.first.message
def text = self.message&.text || ''
def end_reason = @choices.empty? ? nil : @choices.first.end_reason
def key?(key) = @attributes.key?(key)
alias include? key?
def size = @attributes.size
alias length size
alias count size
def empty? = @attributes.empty?
def each( &block) = @attributes.each( &block )
def []( key ) = @attributes[ key ]
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/tool.rb | lib/intelligence/tool.rb | module Intelligence
##
# The +Tool+ class instance encpasulates a definition of a tool that may be executed by a
# model. The properies of a tool include a unique name, a comprehensive description of what
# the tool does and a set of properies that describe the arguments the tool accepts, each
# with its own type and validation rules.
#
# A +Tool+ instance does not implement the tool, only the defintion of the tool, which is
# presented to the model to allow it to make a tool call.
#
# == Configuration
#
# A tool MUST include the +name+ and +description+ and may include any number of
# +arguments+ as well as a +required+ attribute with a list of the required arguments.
# Each argument must include a +name+ and may include +description+, +type+, a list of
# acceptable values through +enum+ and a +minimum+ and +maximum+ for numerical
# arguments.
#
# === Tool Attributes
#
# - +name+: A +String+ representing the unique name of the tool. *required*
# - +description+: A +String+ describing it's purpose and functionality. *required*
#
# === Argument Attributes
#
# An argument is defined through the +argument+ block. The block MUST include the +name+,
# while all other attributes, including the +description+ and +type+ are all optional.
#
# - +type+:
# A +String+ indicating the data type of the property. Accepted values include +string+,
# +number+, +integer+, +array+, +boolean+, +object+.
# - +name+: A +String+ representing the name of the property. *required*
# - +description+: A +String+ that describes the property and its purpose. *required*
# - +minimum+: An +Integer+ or +Float+ specifying the minimum value for numerical properties.
# - +maximum+: An +Integer+ or +Float+ specifying the maximum value for numerical properties.
# - +enum+:
# An +Array+ of acceptable values for the property. Note that ( as per the JSON schema
# specification ) an enum could be composed of mixed types but this is discouraged.
# - +required:
# A boolean ( +TrueClass+, +FalseClass+ ) that specifying if the argument is required.
#
# === Examples
#
# weather_tool = Tool.build! do
# name :get_weather_by_locality
# description \
# "The get_weather_by_locality tool will return the current weather in a given locality " \
# "( city, state or province, and country )."
# argument name: 'city', type: 'string', required: true do
# description "The city or town for which the current weather should be returned."
# end
# argument name: 'state', type: 'string' do
# description \
# "The state or province for which the current weather should be returned. If this is " \
# "not provided the largest or most prominent city with the given name, in the given " \
# "country, will be assumed."
# end
# argument name: 'country', type: 'string', required: true do
# description "The city or town for which the current weather should be returned."
# end
# end
#
# web_browser_tool = Tool.build!
# name :get_web_page
# description "The get_web_page tool will return the content of a web page, given a page url."
# argument name: :url, type: 'string', required: true do
# description \
# "The url of the page including the scheme.\n"
# "Examples:\n"
# " https://example.com\n"
# " https://www.iana.org/help/example-domains\n"
# end
# argument name: :format do
# description \
# "The format of the returned content. By default you will receive 'markdown'. You " \
# "should specify 'html' only if it is specifically required to fullfill the user " \
# "request."
# enum [ 'html', 'markdown' ]
# end
# argument name: :content do
# description \
# "The content of the page to be returned. By default you will receive only the 'main' " \
# "content, excluding header, footer, menu, advertising and other miscelanous elements. " \
# "You should request 'full' only if absolutely neccessry to fullfill the user request. " \
# enum [ 'main', 'full' ]
# end
# argument name: :include_tags do
# description "If content is set to html some tags will."
# end
# end
#
class Tool
include DynamicSchema::Definable
include DynamicSchema::Buildable
ARGUMENT_TYPES = [ 'string', 'number', 'integer', 'array', 'boolean', 'object' ]
ARGUMENT_SCHEMA = proc do
type String, in: ARGUMENT_TYPES
name String, required: true
description String, required: true
required [ TrueClass, FalseClass ]
# note that an enum does not require a type as it implicitly restricts the argument
# to specific values
enum array: true
# for arguments of type number and integer
minimum [ Integer, Float ]
maximum [ Integer, Float ]
# for arguments of type array
maximum_items Integer, as: :maxItems
minimum_items Integer, as: :minItems
unique_items [ TrueClass, FalseClass ]
items do
type in: ARGUMENT_TYPES
property array: true, as: :properties, &ARGUMENT_SCHEMA
end
# for arguments of type object
property array: true, as: :properties, &ARGUMENT_SCHEMA
end
schema do
name String, required: true
description String, required: true
argument array: true, as: :properties do
self.instance_eval( &ARGUMENT_SCHEMA )
end
end
def initialize( attributes = nil )
@properties = attributes
end
def to_h
@properties.to_h
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/error.rb | lib/intelligence/error.rb | module Intelligence
class Error < StandardError; end
end | ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/message_content/web_reference.rb | lib/intelligence/message_content/web_reference.rb | module Intelligence
module MessageContent
class WebReference < Base
schema do
uri URI
title String
summary String
access_date Date
end
attribute :uri, :title, :summary, :access_date
def valid?
@uri && !@uri.empty?
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/message_content/binary.rb | lib/intelligence/message_content/binary.rb | module Intelligence
module MessageContent
class Binary < Base
schema do
content_type String, required: true
bytes String, required: true
end
attribute :content_type, :bytes
def valid?
( @content_type || false ) && !MIME::Types[ @content_type ].empty? &&
( @bytes || false ) && bytes.respond_to?( :empty? ) && !bytes.empty?
end
def image?
( @content_type || false ) &&
( MIME::Types[ @content_type ]&.first&.media_type == 'image' )
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/message_content/thought.rb | lib/intelligence/message_content/thought.rb | module Intelligence
module MessageContent
class Thought < Base
schema do
text String, required: true
end
attribute :text
def valid?
true
end
end
end
end
| ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/message_content/text.rb | lib/intelligence/message_content/text.rb | module Intelligence
module MessageContent
class Text < Base
schema do
text String, required: true
end
attribute :text
def valid?
( text || false ) && text.respond_to?( :empty? ) && !text.empty?
end
end
end
end | ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
EndlessInternational/intelligence | https://github.com/EndlessInternational/intelligence/blob/2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a/lib/intelligence/message_content/file.rb | lib/intelligence/message_content/file.rb | module Intelligence
module MessageContent
class File < Base
schema do
content_type String
uri URI, required: true
end
attribute :uri
def initialize( attributes = nil )
if attributes&.fetch( :uri )
attributes = attributes.dup
attributes[ :uri ] = URI( attributes[ :uri ] ) unless attributes[ :uri ].is_a?( URI )
end
super( attributes )
end
def content_type
@attributes[ :content_type ] ||= begin
computed = valid_uri? ? MIME::Types.type_for( uri.path )&.first&.content_type : nil
computed&.freeze
end
end
def valid_uri?( schemes = [ 'http', 'https' ] )
!!( uri && schemes.include?( uri.scheme ) && uri.path && !uri.path.empty? )
end
def valid?
valid_uri? && !MIME::Types[ content_type ].empty?
end
def to_h
hash = super
hash[ :uri ] = uri.to_s if hash[ :uri ]
hash[ :content_type ] = content_type
hash
end
end
end
end | ruby | MIT | 2ea7d4c0121a2c3eb962bb6199df67d8a38cb50a | 2026-01-04T17:53:00.421273Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.