file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
lua/avante/ui/selector/providers/mini_pick.lua | Lua | local Utils = require("avante.utils")
local M = {}
---@param selector avante.ui.Selector
function M.show(selector)
-- luacheck: globals MiniPick
---@diagnostic disable-next-line: undefined-field
if not _G.MiniPick then
Utils.error("mini.pick is not set up. Please install and set up mini.pick to use it as a file selector.")
return
end
local items = {}
local title_to_id = {}
for _, item in ipairs(selector.items) do
title_to_id[item.title] = item.id
if not vim.list_contains(selector.selected_item_ids, item.id) then table.insert(items, item) end
end
local function choose(item)
if not item then
selector.on_select(nil)
return
end
local item_ids = {}
---item is not a list
for _, item_ in pairs(item) do
table.insert(item_ids, title_to_id[item_])
end
selector.on_select(item_ids)
end
---@diagnostic disable-next-line: undefined-global
MiniPick.ui_select(items, {
prompt = selector.title,
format_item = function(item) return item.title end,
}, choose)
end
return M
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/ui/selector/providers/native.lua | Lua | local M = {}
---@param selector avante.ui.Selector
function M.show(selector)
local items = {}
for _, item in ipairs(selector.items) do
if not vim.list_contains(selector.selected_item_ids, item.id) then table.insert(items, item) end
end
vim.ui.select(items, {
prompt = selector.title,
format_item = function(item)
local title = item.title
if item.id == selector.default_item_id then title = "● " .. title end
return title
end,
}, function(item)
if not item then
selector.on_select(nil)
return
end
-- If on_delete_item callback is provided, prompt for action
if type(selector.on_delete_item) == "function" then
vim.ui.input(
{ prompt = "Action for '" .. item.title .. "': (o)pen, (d)elete, (c)ancel?", default = "" },
function(input)
if not input then -- User cancelled input
selector.on_select(nil) -- Treat as cancellation of selection
return
end
local choice = input:lower()
if choice == "d" or choice == "delete" then
selector.on_delete_item(item.id)
-- The native provider handles the UI flow; we just need to refresh.
selector.on_open() -- Re-open the selector to refresh the list
elseif choice == "" or choice == "o" or choice == "open" then
selector.on_select({ item.id })
elseif choice == "c" or choice == "cancel" then
if type(selector.on_open) == "function" then
selector.on_open()
else
selector.on_select(nil) -- Fallback if on_open is not defined
end
else -- c or any other input, treat as cancel
selector.on_select(nil) -- Fallback if on_open is not defined
end
end
)
else
-- Default behavior: directly select the item
selector.on_select({ item.id })
end
end)
end
return M
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/ui/selector/providers/snacks.lua | Lua | local Utils = require("avante.utils")
local M = {}
---@param selector avante.ui.Selector
function M.show(selector)
---@diagnostic disable-next-line: undefined-field
if not _G.Snacks then
Utils.error("Snacks is not set up. Please install and set up Snacks to use it as a file selector.")
return
end
local function snacks_finder(opts, ctx)
local query = ctx.filter.search or ""
local items = {}
for i, item in ipairs(selector.items) do
if not vim.list_contains(selector.selected_item_ids, item.id) then
if query == "" or item.title:match(query:gsub("[%(%)%.%%%+%-%*%?%[%]%^%$]", "%%%1")) then
table.insert(items, {
formatted = item.title,
text = item.title,
item = item,
idx = i,
preview = selector.get_preview_content and (function()
local content, filetype = selector.get_preview_content(item.id)
return {
text = content,
ft = filetype,
}
end)() or nil,
})
end
end
end
return items
end
local completed = false
---@diagnostic disable-next-line: undefined-global
Snacks.picker.pick(vim.tbl_deep_extend("force", {
source = "select",
live = true,
finder = snacks_finder,
---@diagnostic disable-next-line: undefined-global
format = Snacks.picker.format.ui_select({ format_item = function(item, _) return item.title end }),
title = selector.title,
preview = selector.get_preview_content and "preview" or nil,
layout = {
preset = "default",
},
confirm = function(picker)
if completed then return end
completed = true
picker:close()
local items = picker:selected({ fallback = true })
local selected_item_ids = vim.tbl_map(function(item) return item.item.id end, items)
selector.on_select(selected_item_ids)
end,
on_close = function()
if completed then return end
completed = true
vim.schedule(function() selector.on_select(nil) end)
end,
actions = {
delete_selection = function(picker)
local selections = picker:selected({ fallback = true })
if #selections == 0 then return end
vim.ui.input({ prompt = "Remove·selection?·(" .. #selections .. " items) [y/N]" }, function(input)
if input and input:lower() == "y" then
for _, selection in ipairs(selections) do
selector.on_delete_item(selection.item.id)
for i, item in ipairs(selector.items) do
if item.id == selection.item.id then table.remove(selector.items, i) end
end
end
picker:refresh()
end
end)
end,
},
win = {
input = {
keys = {
["<C-DEL>"] = { "delete_selection", mode = { "i", "n" } },
},
},
},
}, selector.provider_opts))
end
return M
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/ui/selector/providers/telescope.lua | Lua | local Utils = require("avante.utils")
local M = {}
---@param selector avante.ui.Selector
function M.show(selector)
local success, _ = pcall(require, "telescope")
if not success then
Utils.error("telescope is not installed. Please install telescope to use it as a file selector.")
return
end
local pickers = require("telescope.pickers")
local finders = require("telescope.finders")
local conf = require("telescope.config").values
local actions = require("telescope.actions")
local action_state = require("telescope.actions.state")
local previewers = require("telescope.previewers")
local items = {}
for _, item in ipairs(selector.items) do
if not vim.list_contains(selector.selected_item_ids, item.id) then table.insert(items, item) end
end
pickers
.new(
{},
vim.tbl_extend("force", {
prompt_title = selector.title,
finder = finders.new_table({
results = items,
entry_maker = function(entry)
return {
value = entry.id,
display = entry.title,
ordinal = entry.title,
}
end,
}),
sorter = conf.file_sorter(),
previewer = selector.get_preview_content and previewers.new_buffer_previewer({
title = "Preview",
define_preview = function(self, entry)
if not entry then return end
local content, filetype = selector.get_preview_content(entry.value)
local lines = vim.split(content or "", "\n")
-- Ensure the buffer exists and is valid before setting lines
if vim.api.nvim_buf_is_valid(self.state.bufnr) then
vim.api.nvim_buf_set_lines(self.state.bufnr, 0, -1, false, lines)
-- Set filetype after content is loaded
vim.api.nvim_set_option_value("filetype", filetype, { buf = self.state.bufnr })
-- Ensure cursor is within bounds
vim.schedule(function()
if vim.api.nvim_buf_is_valid(self.state.bufnr) then
local row = math.min(vim.api.nvim_buf_line_count(self.state.bufnr), 1)
pcall(vim.api.nvim_win_set_cursor, self.state.winnr, { row, 0 })
end
end)
end
end,
}),
attach_mappings = function(prompt_bufnr, map)
map("i", "<esc>", require("telescope.actions").close)
map("i", "<c-del>", function()
local picker = action_state.get_current_picker(prompt_bufnr)
local selections
local multi_selection = picker:get_multi_selection()
if #multi_selection ~= 0 then
selections = multi_selection
else
selections = action_state.get_selected_entry()
selections = vim.islist(selections) and selections or { selections }
end
local selected_item_ids = vim
.iter(selections)
:map(function(selection) return selection.value end)
:totable()
vim.ui.input({ prompt = "Remove·selection?·(" .. #selected_item_ids .. " items) [y/N]" }, function(input)
if input and input:lower() == "y" then
for _, item_id in ipairs(selected_item_ids) do
selector.on_delete_item(item_id)
end
local new_items = {}
for _, item in ipairs(items) do
if not vim.list_contains(selected_item_ids, item.id) then table.insert(new_items, item) end
end
local new_finder = finders.new_table({
results = new_items,
entry_maker = function(entry)
return {
value = entry.id,
display = entry.title,
ordinal = entry.title,
}
end,
})
picker:refresh(new_finder, { reset_prompt = true })
end
end)
end, { desc = "delete_selection" })
actions.select_default:replace(function()
local picker = action_state.get_current_picker(prompt_bufnr)
local selections
local multi_selection = picker:get_multi_selection()
if #multi_selection ~= 0 then
selections = multi_selection
else
selections = action_state.get_selected_entry()
selections = vim.islist(selections) and selections or { selections }
end
local selected_item_ids = vim
.iter(selections)
:map(function(selection) return selection.value end)
:totable()
selector.on_select(selected_item_ids)
pcall(actions.close, prompt_bufnr)
end)
return true
end,
}, selector.provider_opts)
)
:find()
end
return M
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/utils/diff2search_replace.lua | Lua | local function trim(s) return s:gsub("^%s+", ""):gsub("%s+$", "") end
local function split_lines(text)
local lines = {}
for line in text:gmatch("[^\r\n]+") do
table.insert(lines, line)
end
return lines
end
local function diff2search_replace(diff_text)
if not diff_text:match("@@%s*%-%d+,%d+%s%+") then return diff_text end
local blocks = {}
local pos = 1
local len = #diff_text
-- 解析每一个 @@ 块
while pos <= len do
-- 找到下一个 @@ 起始
local start_at = diff_text:find("@@%s*%-%d+,%d+%s%+", pos)
if not start_at then break end
-- 找到该块结束位置(下一个 @@ 或文件末尾)
local next_at = diff_text:find("@@%s*%-%d+,%d+%s%+", start_at + 1)
local block_end = next_at and (next_at - 1) or len
local block = diff_text:sub(start_at, block_end)
-- 去掉首行的 @@ ... @@ 行
local first_nl = block:find("\n")
if first_nl then block = block:sub(first_nl + 1) end
local search_lines, replace_lines = {}, {}
for _, line in ipairs(split_lines(block)) do
local first = line:sub(1, 1)
if first == "-" then
table.insert(search_lines, line:sub(2))
elseif first == "+" then
table.insert(replace_lines, line:sub(2))
elseif first == " " then
table.insert(search_lines, line:sub(2))
table.insert(replace_lines, line:sub(2))
end
end
local search = table.concat(search_lines, "\n")
local replace = table.concat(replace_lines, "\n")
table.insert(blocks, "------- SEARCH\n" .. trim(search) .. "\n=======\n" .. trim(replace) .. "\n+++++++ REPLACE")
pos = block_end + 1
end
return table.concat(blocks, "\n\n")
end
return diff2search_replace
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/utils/environment.lua | Lua | local Utils = require("avante.utils")
---@class avante.utils.environment
local M = {}
---@private
---@type table<string, string>
M.cache = {}
---Parse environment variable using optional cmd: feature with an override fallback
---@param key_name string
---@param override? string
---@return string | nil
function M.parse(key_name, override)
if key_name == nil then error("Requires key_name") end
local cache_key = type(key_name) == "table" and table.concat(key_name, "__") or key_name
if M.cache[cache_key] ~= nil then return M.cache[cache_key] end
local cmd = type(key_name) == "table" and key_name or key_name:match("^cmd:(.*)")
local value = nil
if cmd ~= nil then
if override ~= nil and override ~= "" then
value = os.getenv(override)
if value ~= nil then
M.cache[cache_key] = value
return value
end
end
if type(cmd) == "table" then cmd = table.concat(cmd, " ") end
Utils.debug("running command:", cmd)
local exit_codes = { 0 }
local result = Utils.shell_run(cmd)
local code = result.code
local stdout = result.stdout and vim.split(result.stdout, "\n") or {}
if vim.tbl_contains(exit_codes, code) then
value = stdout[1]
else
Utils.error(
"failed to get key: (error code" .. code .. ")\n" .. result.stdout,
{ once = true, title = "Avante" }
)
end
else
value = os.getenv(key_name)
end
if value ~= nil then M.cache[cache_key] = value end
return value
end
return M
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/utils/file.lua | Lua | local LRUCache = require("avante.utils.lru_cache")
local Filetype = require("plenary.filetype")
---@class avante.utils.file
local M = {}
local api = vim.api
local fn = vim.fn
local _file_content_lru_cache = LRUCache:new(60)
api.nvim_create_autocmd("BufWritePost", {
callback = function()
local filepath = api.nvim_buf_get_name(0)
local keys = _file_content_lru_cache:keys()
if vim.tbl_contains(keys, filepath) then
local content = table.concat(api.nvim_buf_get_lines(0, 0, -1, false), "\n")
_file_content_lru_cache:set(filepath, content)
end
end,
})
function M.read_content(filepath)
local cached_content = _file_content_lru_cache:get(filepath)
if cached_content then return cached_content end
local content = fn.readfile(filepath)
if content then
content = table.concat(content, "\n")
_file_content_lru_cache:set(filepath, content)
return content
end
return nil
end
function M.exists(filepath)
local stat = vim.uv.fs_stat(filepath)
return stat ~= nil
end
function M.is_in_project(filepath)
local Root = require("avante.utils.root")
local project_root = Root.get()
local abs_filepath = vim.fn.fnamemodify(filepath, ":p")
return abs_filepath:sub(1, #project_root) == project_root
end
function M.get_file_icon(filepath)
local filetype = Filetype.detect(filepath, {}) or "unknown"
---@type string
local icon, hl
---@diagnostic disable-next-line: undefined-field
if _G.MiniIcons ~= nil then
---@diagnostic disable-next-line: undefined-global
icon, hl, _ = MiniIcons.get("filetype", filetype) -- luacheck: ignore
else
local ok, devicons = pcall(require, "nvim-web-devicons")
if ok then
icon, hl = devicons.get_icon(filepath, filetype, { default = false })
if not icon then
icon, hl = devicons.get_icon(filepath, nil, { default = true })
icon = icon or " "
end
else
icon = ""
end
end
return icon, hl
end
return M
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/utils/init.lua | Lua | local api = vim.api
local fn = vim.fn
local lsp = vim.lsp
local LRUCache = require("avante.utils.lru_cache")
local diff2search_replace = require("avante.utils.diff2search_replace")
---@class avante.utils: LazyUtilCore
---@field tokens avante.utils.tokens
---@field root avante.utils.root
---@field file avante.utils.file
---@field path avante.utils.path
---@field environment avante.utils.environment
---@field lsp avante.utils.lsp
---@field logger avante.utils.promptLogger
local M = {}
setmetatable(M, {
__index = function(t, k)
local ok, lazyutil = pcall(require, "lazy.core.util")
if ok and lazyutil[k] then return lazyutil[k] end
---@diagnostic disable-next-line: no-unknown
t[k] = require("avante.utils." .. k)
return t[k]
end,
})
---Check if a plugin is installed
---@param plugin string
---@return boolean
function M.has(plugin)
local ok, LazyConfig = pcall(require, "lazy.core.config")
if ok then return LazyConfig.plugins[plugin] ~= nil end
local res, _ = pcall(require, plugin)
return res
end
function M.is_win() return M.path.is_win() end
M.path_sep = M.path.SEP
---@return "linux" | "darwin" | "windows"
function M.get_os_name()
local os_name = vim.uv.os_uname().sysname
if os_name == "Linux" then
return "linux"
elseif os_name == "Darwin" then
return "darwin"
elseif os_name == "Windows_NT" then
return "windows"
else
error("Unsupported operating system: " .. os_name)
end
end
function M.get_system_info()
local os_name = vim.uv.os_uname().sysname
local os_version = vim.uv.os_uname().release
local os_machine = vim.uv.os_uname().machine
local lang = os.getenv("LANG")
local shell = os.getenv("SHELL")
local res = string.format(
"- Platform: %s-%s-%s\n- Shell: %s\n- Language: %s\n- Current date: %s",
os_name,
os_version,
os_machine,
shell,
lang,
os.date("%Y-%m-%d")
)
local project_root = M.root.get()
if project_root then res = res .. string.format("\n- Project root: %s", project_root) end
local is_git_repo = vim.fn.isdirectory(".git") == 1
if is_git_repo then res = res .. "\n- The user is operating inside a git repository" end
return res
end
---@param input_cmd string
---@param shell_cmd string?
local function get_cmd_for_shell(input_cmd, shell_cmd)
local shell = vim.o.shell:lower()
local cmd = {}
-- powershell then we can just run the cmd
if shell:match("powershell") then
cmd = { "powershell.exe", "-NoProfile", "-Command", input_cmd:gsub('"', "'") }
elseif shell:match("pwsh") then
cmd = { "pwsh.exe", "-NoProfile", "-Command", input_cmd:gsub('"', "'") }
elseif fn.has("win32") > 0 then
cmd = { "powershell.exe", "-NoProfile", "-Command", input_cmd:gsub('"', "'") }
else
-- linux and macos we will just do sh -c
shell_cmd = shell_cmd or "sh -c"
for _, cmd_part in ipairs(vim.split(shell_cmd, " ")) do
table.insert(cmd, cmd_part)
end
table.insert(cmd, input_cmd)
end
return cmd
end
--- This function will run given shell command synchronously.
---@param input_cmd string
---@param shell_cmd string?
---@return vim.SystemCompleted
function M.shell_run(input_cmd, shell_cmd)
local cmd = get_cmd_for_shell(input_cmd, shell_cmd)
local result = vim.system(cmd, { text = true }):wait()
return { stdout = result.stdout, code = result.code }
end
---@param input_cmd string
---@param shell_cmd string?
---@param on_complete fun(output: string, code: integer)
---@param cwd? string
---@param timeout? integer Timeout in milliseconds
function M.shell_run_async(input_cmd, shell_cmd, on_complete, cwd, timeout)
local cmd = get_cmd_for_shell(input_cmd, shell_cmd)
---@type string[]
local output = {}
local timer = nil
local completed = false
-- Create a wrapper for on_complete to ensure it's only called once
local function complete_once(out, code)
if completed then return end
completed = true
-- Clean up timer if it exists
if timer then
timer:stop()
timer:close()
timer = nil
end
on_complete(out, code)
end
-- Start the job
local job_id = fn.jobstart(cmd, {
on_stdout = function(_, data)
if not data then return end
vim.list_extend(output, data)
end,
on_stderr = function(_, data)
if not data then return end
vim.list_extend(output, data)
end,
on_exit = function(_, exit_code) complete_once(table.concat(output, "\n"), exit_code) end,
cwd = cwd,
})
-- Set up timeout if specified
if timeout and timeout > 0 then
timer = vim.uv.new_timer()
if timer then
timer:start(timeout, 0, function()
vim.schedule(function()
if not completed and job_id then
-- Kill the job
fn.jobstop(job_id)
-- Complete with timeout error
complete_once("Command timed out after " .. timeout .. "ms", 124)
end
end)
end)
end
end
end
---@see https://github.com/LazyVim/LazyVim/blob/main/lua/lazyvim/util/toggle.lua
---
---@alias _ToggleSet fun(state: boolean): nil
---@alias _ToggleGet fun(): boolean
---
---@class ToggleBind
---@field name string
---@field set _ToggleSet
---@field get _ToggleGet
---
---@class ToggleBind.wrap: ToggleBind
---@operator call:boolean
---@param toggle ToggleBind
function M.toggle_wrap(toggle)
return setmetatable(toggle, {
__call = function()
toggle.set(not toggle.get())
local state = toggle.get()
if state then
M.info("enabled: " .. toggle.name)
else
M.warn("disabled: " .. toggle.name)
end
return state
end,
}) --[[@as ToggleBind.wrap]]
end
-- Wrapper around vim.keymap.set that will
-- not create a keymap if a lazy key handler exists.
-- It will also set `silent` to true by default.
--
---@param mode string|string[] Mode short-name, see |nvim_set_keymap()|.
--- Can also be list of modes to create mapping on multiple modes.
---@param lhs string Left-hand side |{lhs}| of the mapping.
---@param rhs string|function Right-hand side |{rhs}| of the mapping, can be a Lua function.
---
---@param opts? vim.keymap.set.Opts
function M.safe_keymap_set(mode, lhs, rhs, opts)
---@type boolean
local ok
---@module "lazy.core.handler"
local H
ok, H = pcall(require, "lazy.core.handler")
if not ok then
M.debug("lazy.nvim is not available. Avante will use vim.keymap.set")
vim.keymap.set(mode, lhs, rhs, opts)
return
end
local Keys = H.handlers.keys
---@cast Keys LazyKeysHandler
local modes = type(mode) == "string" and { mode } or mode
---@cast modes -string
---@param m string
---@diagnostic disable-next-line: undefined-field
modes = vim.tbl_filter(function(m) return not (Keys and Keys.have and Keys:have(lhs, m)) end, modes)
-- don't create keymap if a lazy keys handler exists
if #modes > 0 then
opts = opts or {}
opts.silent = opts.silent ~= false
if opts.remap and not vim.g.vscode then
---@diagnostic disable-next-line: no-unknown
opts.remap = nil
end
vim.keymap.set(mode, lhs, rhs, opts)
end
end
---@param str string
---@param opts? {suffix?: string, prefix?: string}
function M.trim(str, opts)
local res = str
if not opts then return res end
if opts.suffix then
res = res:sub(#res - #opts.suffix + 1) == opts.suffix and res:sub(1, #res - #opts.suffix) or res
end
if opts.prefix then res = res:sub(1, #opts.prefix) == opts.prefix and res:sub(#opts.prefix + 1) or res end
return res
end
function M.in_visual_mode()
local current_mode = fn.mode()
return current_mode == "v" or current_mode == "V" or current_mode == ""
end
---Get the selected content and range in Visual mode
---@return avante.SelectionResult | nil Selected content and range
function M.get_visual_selection_and_range()
if not M.in_visual_mode() then return nil end
local Range = require("avante.range")
local SelectionResult = require("avante.selection_result")
-- Get the start and end positions of Visual mode
local start_pos = fn.getpos("v")
local end_pos = fn.getpos(".")
-- Get the start and end line and column numbers
local start_line = start_pos[2]
local start_col = start_pos[3]
local end_line = end_pos[2]
local end_col = end_pos[3]
-- If the start point is after the end point, swap them
if start_line > end_line or (start_line == end_line and start_col > end_col) then
start_line, end_line = end_line, start_line
start_col, end_col = end_col, start_col
end
local content = "" -- luacheck: ignore
local range = Range:new({ lnum = start_line, col = start_col }, { lnum = end_line, col = end_col })
-- Check if it's a single-line selection
if start_line == end_line then
-- Get partial content of a single line
local line = fn.getline(start_line)
-- content = string.sub(line, start_col, end_col)
content = line
else
-- Multi-line selection: Get all lines in the selection
local lines = fn.getline(start_line, end_line)
-- Extract partial content of the first line
-- lines[1] = string.sub(lines[1], start_col)
-- Extract partial content of the last line
-- lines[#lines] = string.sub(lines[#lines], 1, end_col)
-- Concatenate all lines in the selection into a string
if type(lines) == "table" then
content = table.concat(lines, "\n")
else
content = lines
end
end
if not content then return nil end
local filepath = fn.expand("%:p")
local filetype = M.get_filetype(filepath)
-- Return the selected content and range
return SelectionResult:new(filepath, filetype, content, range)
end
---Wrapper around `api.nvim_buf_get_lines` which defaults to the current buffer
---@param start integer
---@param end_ integer
---@param buf integer?
---@return string[]
function M.get_buf_lines(start, end_, buf) return api.nvim_buf_get_lines(buf or 0, start, end_, false) end
---Get cursor row and column as (1, 0) based
---@param win_id integer?
---@return integer, integer
---@diagnostic disable-next-line: redundant-return-value
function M.get_cursor_pos(win_id) return unpack(api.nvim_win_get_cursor(win_id or 0)) end
---Check if the buffer is likely to have actionable conflict markers
---@param bufnr integer?
---@return boolean
function M.is_valid_buf(bufnr)
bufnr = bufnr or 0
return #vim.bo[bufnr].buftype == 0 and vim.bo[bufnr].modifiable
end
--- Check if a NUI container is valid:
--- 1. Container must exist
--- 2. Container must have a valid buffer number
--- 3. Container must have a valid window ID (optional, based on check_winid parameter)
--- Always returns a boolean value
---@param container NuiSplit | nil
---@param check_winid boolean? Whether to check window validity, defaults to false
---@return boolean
function M.is_valid_container(container, check_winid)
-- Default check_winid to false if not specified
if check_winid == nil then check_winid = false end
-- First check if container exists
if container == nil then return false end
-- Check buffer validity
if container.bufnr == nil or not api.nvim_buf_is_valid(container.bufnr) then return false end
-- Check window validity if requested
if check_winid then
if container.winid == nil or not api.nvim_win_is_valid(container.winid) then return false end
end
return true
end
---@param name string?
---@return table
function M.get_hl(name)
if not name then return {} end
return api.nvim_get_hl(0, { name = name, link = false })
end
--- vendor from lazy.nvim for early access and override
---@param path string
---@return string
function M.norm(path) return M.path.normalize(path) end
---@param msg string|string[]
---@param opts? LazyNotifyOpts
function M.notify(msg, opts)
if msg == nil then return end
if vim.in_fast_event() then
return vim.schedule(function() M.notify(msg, opts) end)
end
opts = opts or {}
if type(msg) == "table" then
---@diagnostic disable-next-line: no-unknown
msg = table.concat(vim.tbl_filter(function(line) return line or false end, msg), "\n")
end
---@diagnostic disable-next-line: undefined-field
if opts.stacktrace then
---@diagnostic disable-next-line: undefined-field
msg = msg .. M.pretty_trace({ level = opts.stacklevel or 2 })
end
local lang = opts.lang or "markdown"
---@diagnostic disable-next-line: undefined-field
local n = opts.once and vim.notify_once or vim.notify
n(msg, opts.level or vim.log.levels.INFO, {
on_open = function(win)
pcall(function() vim.treesitter.language.add("markdown") end)
vim.wo[win].conceallevel = 3
vim.wo[win].concealcursor = ""
vim.wo[win].spell = false
local buf = api.nvim_win_get_buf(win)
if not pcall(vim.treesitter.start, buf, lang) then
vim.bo[buf].filetype = lang
vim.bo[buf].syntax = lang
end
end,
title = opts.title or "Avante",
})
end
---@param msg string|string[]
---@param opts? LazyNotifyOpts
function M.error(msg, opts)
opts = opts or {}
opts.level = vim.log.levels.ERROR
M.notify(msg, opts)
end
---@param msg string|string[]
---@param opts? LazyNotifyOpts
function M.info(msg, opts)
opts = opts or {}
opts.level = vim.log.levels.INFO
M.notify(msg, opts)
end
---@param msg string|string[]
---@param opts? LazyNotifyOpts
function M.warn(msg, opts)
opts = opts or {}
opts.level = vim.log.levels.WARN
M.notify(msg, opts)
end
function M.debug(...)
if not require("avante.config").debug then return end
local args = { ... }
if #args == 0 then return end
-- Get caller information
local info = debug.getinfo(2, "Sl")
local caller_source = info.source:match("@(.+)$") or "unknown"
local caller_module = caller_source:gsub("^.*/lua/", ""):gsub("%.lua$", ""):gsub("/", ".")
local timestamp = M.get_timestamp()
local formated_args = {
"[" .. timestamp .. "] [AVANTE] [DEBUG] [" .. caller_module .. ":" .. info.currentline .. "]",
}
for _, arg in ipairs(args) do
if type(arg) == "string" then
table.insert(formated_args, arg)
else
table.insert(formated_args, vim.inspect(arg))
end
end
print(unpack(formated_args))
end
function M.tbl_indexof(tbl, value)
for i, v in ipairs(tbl) do
if v == value then return i end
end
return nil
end
function M.update_win_options(winid, opt_name, key, value)
local cur_opt_value = api.nvim_get_option_value(opt_name, { win = winid })
if cur_opt_value:find(key .. ":") then
cur_opt_value = cur_opt_value:gsub(key .. ":[^,]*", key .. ":" .. value)
else
if #cur_opt_value > 0 then cur_opt_value = cur_opt_value .. "," end
cur_opt_value = cur_opt_value .. key .. ":" .. value
end
api.nvim_set_option_value(opt_name, cur_opt_value, { win = winid })
end
function M.get_win_options(winid, opt_name, key)
local cur_opt_value = api.nvim_get_option_value(opt_name, { win = winid })
if not cur_opt_value then return end
local pieces = vim.split(cur_opt_value, ",")
for _, piece in ipairs(pieces) do
local kv_pair = vim.split(piece, ":")
if kv_pair[1] == key then return kv_pair[2] end
end
end
function M.get_winid(bufnr)
for _, winid in ipairs(api.nvim_list_wins()) do
if api.nvim_win_get_buf(winid) == bufnr then return winid end
end
end
function M.unlock_buf(bufnr)
vim.bo[bufnr].readonly = false
vim.bo[bufnr].modified = false
vim.bo[bufnr].modifiable = true
end
function M.lock_buf(bufnr)
if bufnr == api.nvim_get_current_buf() then vim.cmd("noautocmd stopinsert") end
vim.bo[bufnr].readonly = true
vim.bo[bufnr].modified = false
vim.bo[bufnr].modifiable = false
end
---@param winnr? number
---@return nil
function M.scroll_to_end(winnr)
winnr = winnr or 0
local bufnr = api.nvim_win_get_buf(winnr)
local lnum = api.nvim_buf_line_count(bufnr)
local last_line = api.nvim_buf_get_lines(bufnr, -2, -1, true)[1]
api.nvim_win_set_cursor(winnr, { lnum, api.nvim_strwidth(last_line) })
end
---@param bufnr nil|integer
---@return nil
function M.buf_scroll_to_end(bufnr)
for _, winnr in ipairs(M.buf_list_wins(bufnr or 0)) do
M.scroll_to_end(winnr)
end
end
---@param bufnr nil|integer
---@return integer[]
function M.buf_list_wins(bufnr)
local wins = {}
if not bufnr or bufnr == 0 then bufnr = api.nvim_get_current_buf() end
for _, winnr in ipairs(api.nvim_list_wins()) do
if api.nvim_win_is_valid(winnr) and api.nvim_win_get_buf(winnr) == bufnr then table.insert(wins, winnr) end
end
return wins
end
local sidebar_buffer_var_name = "is_avante_sidebar_buffer"
function M.mark_as_sidebar_buffer(bufnr) api.nvim_buf_set_var(bufnr, sidebar_buffer_var_name, true) end
function M.is_sidebar_buffer(bufnr)
local ok, v = pcall(api.nvim_buf_get_var, bufnr, sidebar_buffer_var_name)
if not ok then return false end
return v == true
end
function M.trim_spaces(s) return s:match("^%s*(.-)%s*$") end
---Remove trailing spaces from each line in a string
---@param content string The content to process
---@return string The content with trailing spaces removed from each line
function M.remove_trailing_spaces(content)
if not content then return content end
local lines = vim.split(content, "\n")
for i, line in ipairs(lines) do
lines[i] = line:gsub("%s+$", "")
end
return table.concat(lines, "\n")
end
function M.fallback(v, default_value) return type(v) == "nil" and default_value or v end
---Join URL parts together, handling slashes correctly
---@param ... string URL parts to join
---@return string Joined URL
function M.url_join(...)
local parts = { ... }
local result = parts[1] or ""
for i = 2, #parts do
local part = parts[i]
if not part or part == "" then goto continue end
-- Remove trailing slash from result if present
if result:sub(-1) == "/" then result = result:sub(1, -2) end
-- Remove leading slash from part if present
if part:sub(1, 1) == "/" then part = part:sub(2) end
-- Join with slash
result = result .. "/" .. part
::continue::
end
if result:sub(-1) == "/" then result = result:sub(1, -2) end
return result
end
-- luacheck: push no max comment line length
---@param type_name "'nil'" | "'number'" | "'string'" | "'boolean'" | "'table'" | "'function'" | "'thread'" | "'userdata'" | "'list'" | '"map"'
---@return boolean
function M.is_type(type_name, v)
---@diagnostic disable-next-line: deprecated
local islist = vim.islist or vim.tbl_islist
if type_name == "list" then return islist(v) end
if type_name == "map" then return type(v) == "table" and not islist(v) end
return type(v) == type_name
end
-- luacheck: pop
---@param text string
---@return string
function M.get_indentation(text)
if not text then return "" end
return text:match("^%s*") or ""
end
function M.trim_space(text)
if not text then return text end
return text:gsub("%s*", "")
end
function M.trim_escapes(text)
if not text then return text end
local res = text
:gsub("//n", "/n")
:gsub("//r", "/r")
:gsub("//t", "/t")
:gsub('/"', '"')
:gsub('\\"', '"')
:gsub("\\n", "\n")
:gsub("\\r", "\r")
:gsub("\\t", "\t")
return res
end
---@param original_lines string[]
---@param target_lines string[]
---@param compare_fn fun(line_a: string, line_b: string): boolean
---@return integer | nil start_line
---@return integer | nil end_line
function M.try_find_match(original_lines, target_lines, compare_fn)
local start_line, end_line
for i = 1, #original_lines - #target_lines + 1 do
local match = true
for j = 1, #target_lines do
if not compare_fn(original_lines[i + j - 1], target_lines[j]) then
match = false
break
end
end
if match then
start_line = i
end_line = i + #target_lines - 1
break
end
end
return start_line, end_line
end
---@param original_lines string[]
---@param target_lines string[]
---@return integer | nil start_line
---@return integer | nil end_line
function M.fuzzy_match(original_lines, target_lines)
local start_line, end_line
---exact match
start_line, end_line = M.try_find_match(
original_lines,
target_lines,
function(line_a, line_b) return line_a == line_b end
)
if start_line ~= nil and end_line ~= nil then return start_line, end_line end
---fuzzy match
start_line, end_line = M.try_find_match(
original_lines,
target_lines,
function(line_a, line_b) return M.trim(line_a, { suffix = " \t" }) == M.trim(line_b, { suffix = " \t" }) end
)
if start_line ~= nil and end_line ~= nil then return start_line, end_line end
---trim_space match
start_line, end_line = M.try_find_match(
original_lines,
target_lines,
function(line_a, line_b) return M.trim_space(line_a) == M.trim_space(line_b) end
)
if start_line ~= nil and end_line ~= nil then return start_line, end_line end
---trim slashes match
start_line, end_line = M.try_find_match(
original_lines,
target_lines,
function(line_a, line_b) return line_a == M.trim_escapes(line_b) end
)
if start_line ~= nil and end_line ~= nil then return start_line, end_line end
---trim slashes and trim_space match
start_line, end_line = M.try_find_match(
original_lines,
target_lines,
function(line_a, line_b) return M.trim_space(line_a) == M.trim_space(M.trim_escapes(line_b)) end
)
return start_line, end_line
end
function M.relative_path(absolute)
local project_root = M.get_project_root()
return M.make_relative_path(absolute, project_root)
end
function M.get_doc()
local absolute = api.nvim_buf_get_name(0)
local params = lsp.util.make_position_params(0, "utf-8")
local position = {
row = params.position.line + 1,
col = params.position.character,
}
local doc = {
uri = params.textDocument.uri,
version = api.nvim_buf_get_var(0, "changedtick"),
relativePath = M.relative_path(absolute),
insertSpaces = vim.o.expandtab,
tabSize = fn.shiftwidth(),
indentSize = fn.shiftwidth(),
position = position,
}
return doc
end
---Prepends line numbers to each line in a list of strings.
---@param lines string[] The lines of content to prepend line numbers to.
---@param start_line? integer The starting line number. Defaults to 1.
---@return string[] A new list of strings with line numbers prepended.
function M.prepend_line_numbers(lines, start_line)
start_line = start_line or 1
return vim
.iter(lines)
:enumerate()
:map(function(i, line) return string.format("L%d: %s", i + start_line, line) end)
:totable()
end
---Iterates through a list of strings and removes prefixes in form of "L<number>: " from them
---@param content string[]
---@return string[]
function M.trim_line_numbers(content)
return vim.iter(content):map(function(line) return (line:gsub("^L%d+: ", "")) end):totable()
end
---Debounce a function call
---@param func fun(...) function to debounce
---@param delay integer delay in milliseconds
---@return fun(...): uv.uv_timer_t debounced function
function M.debounce(func, delay)
local timer = nil
return function(...)
local args = { ... }
if timer and not timer:is_closing() then
timer:stop()
timer:close()
end
timer = vim.defer_fn(function()
func(unpack(args))
timer = nil
end, delay)
return timer
end
end
---Throttle a function call
---@param func fun(...) function to throttle
---@param delay integer delay in milliseconds
---@return fun(...): nil throttled function
function M.throttle(func, delay)
local timer = nil
return function(...)
if timer then return end
local args = { ... }
timer = vim.defer_fn(function()
func(unpack(args))
timer = nil
end, delay)
end
end
function M.winline(winid)
-- If the winid is not provided, then line number should be 1, so that it can land on the first line
if not vim.api.nvim_win_is_valid(winid) then return 1 end
local line = 1
vim.api.nvim_win_call(winid, function() line = fn.winline() end)
return line
end
function M.get_project_root() return M.root.get() end
function M.is_same_file_ext(target_ext, filepath)
local ext = fn.fnamemodify(filepath, ":e")
if (target_ext == "tsx" and ext == "ts") or (target_ext == "ts" and ext == "tsx") then return true end
if (target_ext == "jsx" and ext == "js") or (target_ext == "js" and ext == "jsx") then return true end
return ext == target_ext
end
-- Get recent filepaths in the same project and same file ext
---@param limit? integer
---@param filenames? string[]
---@param same_file_ext? boolean
---@return string[]
function M.get_recent_filepaths(limit, filenames, same_file_ext)
local project_root = M.get_project_root()
local current_ext = fn.expand("%:e")
local oldfiles = vim.v.oldfiles
local recent_files = {}
for _, file in ipairs(oldfiles) do
if vim.startswith(file, project_root) then
local has_ext = file:match("%.%w+$")
if not has_ext then goto continue end
if same_file_ext then
if not M.is_same_file_ext(current_ext, file) then goto continue end
end
if filenames and #filenames > 0 then
for _, filename in ipairs(filenames) do
if file:find(filename) then table.insert(recent_files, file) end
end
else
table.insert(recent_files, file)
end
if #recent_files >= (limit or 10) then break end
end
::continue::
end
return recent_files
end
local function pattern_to_lua(pattern)
local lua_pattern = pattern:gsub("[%(%)%.%%%+%-%*%?%[%]%^%$]", "%%%1")
lua_pattern = lua_pattern:gsub("%*%*/", ".-/")
lua_pattern = lua_pattern:gsub("%*", "[^/]*")
lua_pattern = lua_pattern:gsub("%?", ".")
if lua_pattern:sub(-1) == "/" then lua_pattern = lua_pattern .. ".*" end
return lua_pattern
end
function M.parse_gitignore(gitignore_path)
local ignore_patterns = {}
local negate_patterns = {}
local file = io.open(gitignore_path, "r")
if not file then return ignore_patterns, negate_patterns end
for line in file:lines() do
if line:match("%S") and not line:match("^#") then
local trimmed_line = line:match("^%s*(.-)%s*$")
if trimmed_line:sub(1, 1) == "!" then
table.insert(negate_patterns, pattern_to_lua(trimmed_line:sub(2)))
else
table.insert(ignore_patterns, pattern_to_lua(trimmed_line))
end
end
end
file:close()
ignore_patterns = vim.list_extend(ignore_patterns, { "%.git", "%.worktree", "__pycache__", "node_modules" })
return ignore_patterns, negate_patterns
end
-- @param file string
-- @param ignore_patterns string[]
-- @param negate_patterns string[]
-- @return boolean
function M.is_ignored(file, ignore_patterns, negate_patterns)
for _, pattern in ipairs(negate_patterns) do
if file:match(pattern) then return false end
end
for _, pattern in ipairs(ignore_patterns) do
if file:match(pattern .. "/") or file:match(pattern .. "$") then return true end
end
return false
end
---@param options { directory: string, add_dirs?: boolean, max_depth?: integer }
---@return string[]
function M.scan_directory(options)
local cmd_supports_max_depth = true
local cmd = (function()
if vim.fn.executable("rg") == 1 then
local cmd = {
"rg",
"--files",
"--color",
"never",
"--no-require-git",
"--no-ignore-parent",
"--hidden",
"--glob",
"!.git/",
}
if options.max_depth ~= nil then vim.list_extend(cmd, { "--max-depth", options.max_depth }) end
table.insert(cmd, options.directory)
return cmd
end
-- fd is called 'fdfind' on Debian/Ubuntu due to naming conflicts
local fd_executable = vim.fn.executable("fd") == 1 and "fd"
or (vim.fn.executable("fdfind") == 1 and "fdfind" or nil)
if fd_executable then
local cmd = {
fd_executable,
"--type",
"f",
"--color",
"never",
"--no-require-git",
"--hidden",
"--exclude",
".git",
}
if options.max_depth ~= nil then vim.list_extend(cmd, { "--max-depth", options.max_depth }) end
vim.list_extend(cmd, { "--base-directory", options.directory })
return cmd
end
end)()
if not cmd then
if M.path_exists(M.join_paths(options.directory, ".git")) and vim.fn.executable("git") == 1 then
if vim.fn.has("win32") == 1 then
cmd = {
"powershell",
"-NoProfile",
"-NonInteractive",
"-Command",
string.format(
"Push-Location '%s'; (git ls-files --exclude-standard), (git ls-files --exclude-standard --others)",
options.directory:gsub("/", "\\")
),
}
else
cmd = {
"bash",
"-c",
string.format("cd %s && git ls-files -co --exclude-standard", options.directory),
}
end
cmd_supports_max_depth = false
else
M.error("No search command found, please install fd or fdfind or rg")
return {}
end
end
local files = vim.fn.systemlist(cmd)
files = vim
.iter(files)
:map(function(file)
if not M.is_absolute_path(file) then return M.join_paths(options.directory, file) end
return file
end)
:totable()
if options.max_depth ~= nil and not cmd_supports_max_depth then
files = vim
.iter(files)
:filter(function(file)
local base_dir = options.directory
if base_dir:sub(-2) == "/." then base_dir = base_dir:sub(1, -3) end
local rel_path = M.make_relative_path(file, base_dir)
local pieces = vim.split(rel_path, "/")
return #pieces <= options.max_depth
end)
:totable()
end
if options.add_dirs then
local dirs = {}
local dirs_seen = {}
for _, file in ipairs(files) do
local dir = M.get_parent_path(file)
if not dirs_seen[dir] then
table.insert(dirs, dir)
dirs_seen[dir] = true
end
end
files = vim.list_extend(dirs, files)
end
return files
end
function M.get_parent_path(filepath)
if filepath == nil then error("filepath cannot be nil") end
if filepath == "" then return "" end
local is_abs = M.is_absolute_path(filepath)
if filepath:sub(-1) == M.path_sep then filepath = filepath:sub(1, -2) end
if filepath == "" then return "" end
local parts = vim.split(filepath, M.path_sep)
local parent_parts = vim.list_slice(parts, 1, #parts - 1)
local res = table.concat(parent_parts, M.path_sep)
if res == "" then
if is_abs then return M.path_sep end
return "."
end
return res
end
function M.make_relative_path(filepath, base_dir) return M.path.relative(base_dir, filepath, false) end
function M.is_absolute_path(path) return M.path.is_absolute(path) end
function M.to_absolute_path(path)
if not path or path == "" then return path end
if path:sub(1, 1) == "/" or path:sub(1, 7) == "term://" then return path end
return M.join_paths(M.get_project_root(), path)
end
function M.join_paths(...)
local paths = { ... }
local result = paths[1] or ""
for i = 2, #paths do
local path = paths[i]
if path == nil or path == "" then goto continue end
if M.is_absolute_path(path) then
result = path
goto continue
end
result = result == "" and path or M.path.join(result, path)
::continue::
end
return M.norm(result)
end
function M.path_exists(path) return M.path.is_exist(path) end
function M.is_first_letter_uppercase(str) return string.match(str, "^[A-Z]") ~= nil end
---@param content string
---@return { new_content: string, enable_project_context: boolean, enable_diagnostics: boolean }
function M.extract_mentions(content)
-- if content contains @codebase, enable project context and remove @codebase
local new_content = content
local enable_project_context = false
local enable_diagnostics = false
if content:match("@codebase") then
enable_project_context = true
new_content = content:gsub("@codebase", "")
end
if content:match("@diagnostics") then enable_diagnostics = true end
return {
new_content = new_content,
enable_project_context = enable_project_context,
enable_diagnostics = enable_diagnostics,
}
end
---@return AvanteMention[]
function M.get_mentions()
return {
{
description = "codebase",
command = "codebase",
details = "repo map",
},
{
description = "diagnostics",
command = "diagnostics",
details = "diagnostics",
},
}
end
---@return AvanteMention[]
function M.get_chat_mentions()
local mentions = M.get_mentions()
table.insert(mentions, {
description = "file",
command = "file",
details = "add files...",
callback = function(sidebar) sidebar.file_selector:open() end,
})
table.insert(mentions, {
description = "quickfix",
command = "quickfix",
details = "add files in quickfix list to chat context",
callback = function(sidebar) sidebar.file_selector:add_quickfix_files() end,
})
table.insert(mentions, {
description = "buffers",
command = "buffers",
details = "add open buffers to the chat context",
callback = function(sidebar) sidebar.file_selector:add_buffer_files() end,
})
return mentions
end
---@return AvanteShortcut[]
function M.get_shortcuts()
local Config = require("avante.config")
-- Built-in shortcuts
local builtin_shortcuts = {
{
name = "refactor",
description = "Refactor code with best practices",
details = "Automatically refactor code to improve readability, maintainability, and follow best practices while preserving functionality",
prompt = "Please refactor this code following best practices, improving readability and maintainability while preserving functionality.",
},
{
name = "test",
description = "Generate unit tests",
details = "Create comprehensive unit tests covering edge cases, error scenarios, and various input conditions",
prompt = "Please generate comprehensive unit tests for this code, covering edge cases and error scenarios.",
},
{
name = "document",
description = "Add documentation",
details = "Add clear and comprehensive documentation including function descriptions, parameter explanations, and usage examples",
prompt = "Please add clear and comprehensive documentation to this code, including function descriptions, parameter explanations, and usage examples.",
},
{
name = "debug",
description = "Add debugging information",
details = "Add comprehensive debugging information including logging statements, error handling, and debugging utilities",
prompt = "Please add comprehensive debugging information to this code, including logging statements, error handling, and debugging utilities.",
},
{
name = "optimize",
description = "Optimize performance",
details = "Analyze and optimize code for better performance considering time complexity, memory usage, and algorithmic improvements",
prompt = "Please analyze and optimize this code for better performance, considering time complexity, memory usage, and algorithmic improvements.",
},
{
name = "security",
description = "Security review",
details = "Perform a security review identifying potential vulnerabilities, security best practices, and recommendations for improvement",
prompt = "Please perform a security review of this code, identifying potential vulnerabilities, security best practices, and recommendations for improvement.",
},
}
local user_shortcuts = Config.shortcuts or {}
local result = {}
-- Create a map of builtin shortcuts by name for quick lookup
local builtin_map = {}
for _, shortcut in ipairs(builtin_shortcuts) do
builtin_map[shortcut.name] = shortcut
end
-- Process user shortcuts first (they take precedence)
for _, user_shortcut in ipairs(user_shortcuts) do
if builtin_map[user_shortcut.name] then
-- User has overridden a builtin shortcut
table.insert(result, user_shortcut)
builtin_map[user_shortcut.name] = nil -- Remove from builtin map
else
-- User has added a new shortcut
table.insert(result, user_shortcut)
end
end
-- Add remaining builtin shortcuts that weren't overridden
for _, builtin_shortcut in pairs(builtin_map) do
table.insert(result, builtin_shortcut)
end
return result
end
---@param content string
---@return string new_content
---@return boolean has_shortcuts
function M.extract_shortcuts(content)
local shortcuts = M.get_shortcuts()
local new_content = content
local has_shortcuts = false
for _, shortcut in ipairs(shortcuts) do
local pattern = "#" .. shortcut.name
if content:match(pattern) then
has_shortcuts = true
new_content = new_content:gsub(pattern, shortcut.prompt)
end
end
return new_content, has_shortcuts
end
---@param path string
---@param set_current_buf? boolean
---@return integer bufnr
function M.open_buffer(path, set_current_buf)
if set_current_buf == nil then set_current_buf = true end
local abs_path = M.join_paths(M.get_project_root(), path)
local bufnr ---@type integer
if set_current_buf then
bufnr = vim.fn.bufnr(abs_path)
if bufnr ~= -1 and vim.api.nvim_buf_is_loaded(bufnr) and vim.bo[bufnr].modified then
vim.api.nvim_buf_call(bufnr, function() vim.cmd("noautocmd write") end)
end
vim.cmd("noautocmd edit " .. abs_path)
bufnr = vim.api.nvim_get_current_buf()
else
bufnr = vim.fn.bufnr(abs_path, true)
pcall(vim.fn.bufload, bufnr)
end
vim.cmd("filetype detect")
return bufnr
end
---@param bufnr integer
---@param new_lines string[]
---@return { start_line: integer, end_line: integer, content: string[] }[]
local function get_buffer_content_diffs(bufnr, new_lines)
local old_lines = api.nvim_buf_get_lines(bufnr, 0, -1, false)
local diffs = {}
local prev_diff_idx = nil
for i, line in ipairs(new_lines) do
if line ~= old_lines[i] then
if prev_diff_idx == nil then prev_diff_idx = i end
else
if prev_diff_idx ~= nil then
local content = vim.list_slice(new_lines, prev_diff_idx, i - 1)
table.insert(diffs, { start_line = prev_diff_idx, end_line = i, content = content })
prev_diff_idx = nil
end
end
end
if prev_diff_idx ~= nil then
table.insert(
diffs,
{ start_line = prev_diff_idx, end_line = #new_lines + 1, content = vim.list_slice(new_lines, prev_diff_idx) }
)
end
if #new_lines < #old_lines then
table.insert(diffs, { start_line = #new_lines + 1, end_line = #old_lines + 1, content = {} })
end
table.sort(diffs, function(a, b) return a.start_line > b.start_line end)
return diffs
end
--- Update the buffer content more efficiently by only updating the changed lines
---@param bufnr integer
---@param new_lines string[]
function M.update_buffer_content(bufnr, new_lines)
local diffs = get_buffer_content_diffs(bufnr, new_lines)
if #diffs == 0 then return end
for _, diff in ipairs(diffs) do
api.nvim_buf_set_lines(bufnr, diff.start_line - 1, diff.end_line - 1, false, diff.content)
end
end
---@param ns_id number
---@param bufnr integer
---@param old_lines avante.ui.Line[]
---@param new_lines avante.ui.Line[]
---@param skip_line_count? integer
function M.update_buffer_lines(ns_id, bufnr, old_lines, new_lines, skip_line_count)
skip_line_count = skip_line_count or 0
old_lines = old_lines or {}
new_lines = new_lines or {}
-- Unbind events from existing lines before rewriting the buffer section.
for i, old_line in ipairs(old_lines) do
if old_line and type(old_line.unbind_events) == "function" then
local line_1b = skip_line_count + i
pcall(old_line.unbind_events, old_line, bufnr, line_1b)
end
end
-- Collect the text representation of each line and track their positions.
local cleaned_text_lines = {}
local line_positions = {}
local current_line_0b = skip_line_count
for idx, line in ipairs(new_lines) do
local pieces = vim.split(tostring(line), "\n")
line_positions[idx] = current_line_0b
vim.list_extend(cleaned_text_lines, pieces)
current_line_0b = current_line_0b + #pieces
end
-- Replace the entire dynamic portion of the buffer.
vim.api.nvim_buf_set_lines(bufnr, skip_line_count, -1, false, cleaned_text_lines)
-- Re-apply highlights and bind events for the new lines.
for i, line in ipairs(new_lines) do
local line_pos_0b = line_positions[i] or (skip_line_count + i - 1)
if type(line.set_highlights) == "function" then line:set_highlights(ns_id, bufnr, line_pos_0b) end
if type(line.bind_events) == "function" then
local line_1b = line_pos_0b + 1
pcall(line.bind_events, line, ns_id, bufnr, line_1b)
end
end
vim.cmd("redraw")
-- local diffs = get_lines_diff(old_lines, new_lines)
-- if #diffs == 0 then return end
-- for _, diff in ipairs(diffs) do
-- local lines = diff.content
-- local text_lines = vim.tbl_map(function(line) return tostring(line) end, lines)
-- --- remove newlines from text_lines
-- local cleaned_lines = {}
-- for _, line in ipairs(text_lines) do
-- local lines_ = vim.split(line, "\n")
-- cleaned_lines = vim.list_extend(cleaned_lines, lines_)
-- end
-- vim.api.nvim_buf_set_lines(
-- bufnr,
-- skip_line_count + diff.start_line - 1,
-- skip_line_count + diff.end_line - 1,
-- false,
-- cleaned_lines
-- )
-- for i, line in ipairs(lines) do
-- line:set_highlights(ns_id, bufnr, skip_line_count + diff.start_line + i - 2)
-- end
-- vim.cmd("redraw")
-- end
end
function M.uniform_path(path)
if type(path) ~= "string" then path = tostring(path) end
if not M.file.is_in_project(path) then return path end
local project_root = M.get_project_root()
local abs_path = M.is_absolute_path(path) and path or M.join_paths(project_root, path)
local relative_path = M.make_relative_path(abs_path, project_root)
return relative_path
end
function M.is_same_file(filepath_a, filepath_b) return M.uniform_path(filepath_a) == M.uniform_path(filepath_b) end
---Removes <think> tags, returning only text between them
---@param content string
---@return string
function M.trim_think_content(content) return (content:gsub("^<think>.-</think>", "", 1)) end
local _filetype_lru_cache = LRUCache:new(60)
function M.get_filetype(filepath)
local cached_filetype = _filetype_lru_cache:get(filepath)
if cached_filetype then return cached_filetype end
-- Some files are sometimes not detected correctly when buffer is not included
-- https://github.com/neovim/neovim/issues/27265
local buf = vim.api.nvim_create_buf(false, true)
local filetype = vim.filetype.match({ filename = filepath, buf = buf }) or ""
vim.api.nvim_buf_delete(buf, { force = true })
-- Parse the first filetype from a multifiltype file
filetype = filetype:gsub("%..*$", "")
_filetype_lru_cache:set(filepath, filetype)
return filetype
end
---@param filepath string
---@return string[]|nil lines
---@return string|nil error
---@return string|nil errname
function M.read_file_from_buf_or_disk(filepath)
local abs_path = filepath:sub(1, 7) == "term://" and filepath or M.join_paths(M.get_project_root(), filepath)
--- Lookup if the file is loaded in a buffer
local ok, bufnr = pcall(vim.fn.bufnr, abs_path)
if ok then
if bufnr ~= -1 and vim.api.nvim_buf_is_loaded(bufnr) then
-- If buffer exists and is loaded, get buffer content
local lines = vim.api.nvim_buf_get_lines(bufnr, 0, -1, false)
return lines, nil, nil
end
end
local stat, stat_err, stat_errname = vim.uv.fs_stat(abs_path)
if not stat then return {}, stat_err, stat_errname end
if stat.type == "directory" then return {}, "Cannot read a directory as file" .. filepath, nil end
-- Fallback: read file from disk
local file, open_err = io.open(abs_path, "r")
if file then
local content = file:read("*all")
file:close()
content = content:gsub("\r\n", "\n")
return vim.split(content, "\n"), nil, nil
else
return {}, open_err, nil
end
end
---Check if an icon plugin is installed
---@return boolean
function M.icons_enabled() return M.has("nvim-web-devicons") or M.has("mini.icons") or M.has("mini.nvim") end
---Display an string with icon, if an icon plugin is available.
---Dev icons are an optional install for avante, this function prevents ugly chars
---being displayed by displaying fallback options or nothing at all.
---@param string_with_icon string
---@param utf8_fallback string|nil
---@return string
function M.icon(string_with_icon, utf8_fallback)
if M.icons_enabled() then
return string_with_icon
else
return utf8_fallback or ""
end
end
function M.deep_extend_with_metatable(behavior, ...)
local tables = { ... }
local base = tables[1]
if behavior == "keep" then base = tables[#tables] end
local mt = getmetatable(base)
local result = vim.tbl_deep_extend(behavior, ...)
if mt then setmetatable(result, mt) end
return result
end
function M.utc_now()
local utc_date = os.date("!*t")
---@diagnostic disable-next-line: param-type-mismatch
local utc_time = os.time(utc_date)
return os.date("%Y-%m-%d %H:%M:%S", utc_time)
end
---@param dt1 string
---@param dt2 string
---@return integer delta_seconds
function M.datetime_diff(dt1, dt2)
local pattern = "(%d+)-(%d+)-(%d+) (%d+):(%d+):(%d+)"
local y1, m1, d1, h1, min1, s1 = dt1:match(pattern)
local y2, m2, d2, h2, min2, s2 = dt2:match(pattern)
local time1 = os.time({ year = y1, month = m1, day = d1, hour = h1, min = min1, sec = s1 })
local time2 = os.time({ year = y2, month = m2, day = d2, hour = h2, min = min2, sec = s2 })
local delta_seconds = os.difftime(time2, time1)
return delta_seconds
end
---@param iso_str string
---@return string|nil
---@return string|nil error
function M.parse_iso8601_date(iso_str)
local year, month, day, hour, min, sec = iso_str:match("(%d+)-(%d+)-(%d+)T(%d+):(%d+):(%d+)Z")
if not year then return nil, "Invalid ISO 8601 format" end
local time_table = {
year = tonumber(year),
month = tonumber(month),
day = tonumber(day),
hour = tonumber(hour),
min = tonumber(min),
sec = tonumber(sec),
isdst = false,
}
local timestamp = os.time(time_table)
return tostring(os.date("%Y-%m-%d %H:%M:%S", timestamp)), nil
end
function M.random_string(length)
local charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
local result = {}
for _ = 1, length do
local rand = math.random(1, #charset)
table.insert(result, charset:sub(rand, rand))
end
return table.concat(result)
end
function M.is_left_adjacent(win_a, win_b)
if not vim.api.nvim_win_is_valid(win_a) or not vim.api.nvim_win_is_valid(win_b) then return false end
local _, col_a = unpack(vim.fn.win_screenpos(win_a))
local _, col_b = unpack(vim.fn.win_screenpos(win_b))
local width_a = vim.api.nvim_win_get_width(win_a)
local right_edge_a = col_a + width_a
return right_edge_a + 1 == col_b
end
function M.is_top_adjacent(win_a, win_b)
local row_a, _ = unpack(vim.fn.win_screenpos(win_a))
local row_b, _ = unpack(vim.fn.win_screenpos(win_b))
local height_a = vim.api.nvim_win_get_height(win_a)
return row_a + height_a + 1 == row_b
end
function M.should_hidden_border(win_a, win_b)
return M.is_left_adjacent(win_a, win_b) or M.is_top_adjacent(win_a, win_b)
end
---@param fields AvanteLLMToolParamField[]
---@return table[] properties
---@return string[] required
function M.llm_tool_param_fields_to_json_schema(fields)
local properties = {}
local required = {}
for _, field in ipairs(fields) do
if field.type == "object" and field.fields then
local properties_, required_ = M.llm_tool_param_fields_to_json_schema(field.fields)
properties[field.name] = {
type = field.type,
description = field.get_description and field.get_description() or field.description,
properties = properties_,
required = required_,
}
elseif field.type == "array" and field.items then
local properties_ = M.llm_tool_param_fields_to_json_schema({ field.items })
local _, obj = next(properties_)
properties[field.name] = {
type = field.type,
description = field.get_description and field.get_description() or field.description,
items = obj,
}
else
properties[field.name] = {
type = field.type,
description = field.get_description and field.get_description() or field.description,
}
if field.choices then properties[field.name].enum = field.choices end
end
if not field.optional then table.insert(required, field.name) end
end
if vim.tbl_isempty(properties) then properties = vim.empty_dict() end
return properties, required
end
---@return AvanteSlashCommand[]
function M.get_commands()
local Config = require("avante.config")
---@param items_ {name: string, description: string, shorthelp?: string}[]
---@return string
local function get_help_text(items_)
local help_text = ""
for _, item in ipairs(items_) do
help_text = help_text .. "- " .. item.name .. ": " .. (item.shorthelp or item.description) .. "\n"
end
return help_text
end
local builtin_items = {
{ description = "Show help message", name = "help" },
{ description = "Init AGENTS.md based on the current project", name = "init" },
{ description = "Clear chat history", name = "clear" },
{ description = "New chat", name = "new" },
{ description = "Compact history messages to save tokens", name = "compact" },
{
shorthelp = "Ask a question about specific lines",
description = "/lines <start>-<end> <question>",
name = "lines",
},
{ description = "Commit the changes", name = "commit" },
}
---@type {[AvanteSlashCommandBuiltInName]: AvanteSlashCommandCallback}
local builtin_cbs = {
help = function(sidebar, args, cb)
local help_text = get_help_text(builtin_items)
sidebar:update_content(help_text, { focus = false, scroll = false })
if cb then cb(args) end
end,
clear = function(sidebar, args, cb) sidebar:clear_history(args, cb) end,
new = function(sidebar, args, cb) sidebar:new_chat(args, cb) end,
compact = function(sidebar, args, cb) sidebar:compact_history_messages(args, cb) end,
init = function(sidebar, args, cb) sidebar:init_current_project(args, cb) end,
lines = function(_, args, cb)
if cb then cb(args) end
end,
commit = function(_, _, cb)
local question = "Please commit the changes"
if cb then cb(question) end
end,
}
local builtin_commands = vim
.iter(builtin_items)
:map(
---@param item AvanteSlashCommand
function(item)
return {
name = item.name,
description = item.description,
callback = builtin_cbs[item.name],
details = item.shorthelp and table.concat({ item.shorthelp, item.description }, "\n") or item.description,
}
end
)
:totable()
local commands = {}
local seen = {}
for _, command in ipairs(Config.slash_commands) do
if not seen[command.name] then
table.insert(commands, command)
seen[command.name] = true
end
end
for _, command in ipairs(builtin_commands) do
if not seen[command.name] then
table.insert(commands, command)
seen[command.name] = true
end
end
return commands
end
function M.get_timestamp() return tostring(os.date("%Y-%m-%d %H:%M:%S")) end
function M.uuid()
local template = "xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx"
return string.gsub(template, "[xy]", function(c)
local v = (c == "x") and math.random(0, 0xf) or math.random(8, 0xb)
return string.format("%x", v)
end)
end
---Parse command arguments (fargs) into a structured format
---@param fargs string[] Command arguments
---@param options? {collect_remaining?: boolean, boolean_keys?: string[]} Options for parsing
---@return table parsed_args Key-value pairs from arguments
---@return string|nil remaining_text Concatenated remaining arguments (if collect_remaining is true)
function M.parse_args(fargs, options)
options = options or {}
local parsed_args = {}
local remaining_parts = {}
local boolean_keys = options.boolean_keys or {}
-- Create a lookup table for boolean keys for faster access
local boolean_keys_lookup = {}
for _, key in ipairs(boolean_keys) do
boolean_keys_lookup[key] = true
end
for _, arg in ipairs(fargs) do
local key, value = arg:match("([%w_]+)=(.+)")
if key and value then
-- Convert "true"/"false" string values to boolean for specified keys
if boolean_keys_lookup[key] or value == "true" or value == "false" then
parsed_args[key] = (value == "true")
else
parsed_args[key] = value
end
elseif options.collect_remaining then
table.insert(remaining_parts, arg)
end
end
-- Return the parsed arguments and optionally the concatenated remaining text
if options.collect_remaining and #remaining_parts > 0 then return parsed_args, table.concat(remaining_parts, " ") end
return parsed_args
end
---@param tool_use AvanteLLMToolUse
function M.tool_use_to_xml(tool_use)
local tool_use_json = vim.json.encode({
name = tool_use.name,
input = tool_use.input,
})
local xml = string.format("<tool_use>%s</tool_use>", tool_use_json)
return xml
end
---@param tool_use AvanteLLMToolUse
function M.is_edit_tool_use(tool_use)
return tool_use.name == "str_replace"
or tool_use.name == "edit_file"
or (tool_use.name == "str_replace_editor" and tool_use.input.command == "str_replace")
or (tool_use.name == "str_replace_based_edit_tool" and tool_use.input.command == "str_replace")
end
---Counts number of strings in text, accounting for possibility of a trailing newline
---@param str string | nil
---@return integer
function M.count_lines(str)
if not str or str == "" then return 0 end
local _, count = str:gsub("\n", "\n")
-- Number of lines is one more than number of newlines unless we have a trailing newline
return str:sub(-1) ~= "\n" and count + 1 or count
end
function M.tbl_override(value, override)
override = override or {}
if type(override) == "function" then return override(value) or value end
return vim.tbl_extend("force", value, override)
end
function M.call_once(func)
local called = false
return function(...)
if called then return end
called = true
return func(...)
end
end
--- Some models (e.g., gpt-4o) cannot correctly return diff content and often miss the SEARCH line, so this needs to be manually fixed in such cases.
---@param diff string
---@return string
function M.fix_diff(diff)
diff = diff2search_replace(diff)
-- Normalize block headers to the expected ones (fix for some LLMs output)
diff = diff:gsub("<<<<<<<%s*SEARCH", "------- SEARCH")
diff = diff:gsub(">>>>>>>%s*REPLACE", "+++++++ REPLACE")
diff = diff:gsub("-------%s*REPLACE", "+++++++ REPLACE")
diff = diff:gsub("------- ", "------- SEARCH\n")
diff = diff:gsub("======= ", "=======\n")
local fixed_diff_lines = {}
local lines = vim.split(diff, "\n")
local first_line = lines[1]
if first_line and first_line:match("^%s*```") then
table.insert(fixed_diff_lines, first_line)
table.insert(fixed_diff_lines, "------- SEARCH")
fixed_diff_lines = vim.list_extend(fixed_diff_lines, lines, 2)
else
table.insert(fixed_diff_lines, "------- SEARCH")
if first_line:match("------- SEARCH") then
fixed_diff_lines = vim.list_extend(fixed_diff_lines, lines, 2)
else
fixed_diff_lines = vim.list_extend(fixed_diff_lines, lines, 1)
end
end
local the_final_diff_lines = {}
local has_split_line = false
local replace_block_closed = false
local should_delete_following_lines = false
for _, line in ipairs(fixed_diff_lines) do
if should_delete_following_lines then goto continue end
if line:match("^-------%s*SEARCH") then has_split_line = false end
if line:match("^=======") then
if has_split_line then
should_delete_following_lines = true
goto continue
end
has_split_line = true
end
if line:match("^+++++++%s*REPLACE") then
if not has_split_line then
table.insert(the_final_diff_lines, "=======")
has_split_line = true
goto continue
else
replace_block_closed = true
end
end
table.insert(the_final_diff_lines, line)
::continue::
end
if not replace_block_closed then table.insert(the_final_diff_lines, "+++++++ REPLACE") end
return table.concat(the_final_diff_lines, "\n")
end
function M.get_unified_diff(text1, text2, opts)
opts = opts or {}
opts.result_type = "unified"
opts.ctxlen = opts.ctxlen or 3
return vim.diff(text1, text2, opts)
end
function M.is_floating_window(win_id)
win_id = win_id or 0
if not vim.api.nvim_win_is_valid(win_id) then return false end
local config = vim.api.nvim_win_get_config(win_id)
return config.relative ~= ""
end
return M
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/utils/logo.lua | Lua | local logo = [[
⠀⢠⣤⣤⣤⡄⣷⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⣿⣿⣿⣿⢁⣿⣿⣷⡶⠶⠶⠶⠶⠶⠶⠶⠶⠶⠶⠶⠶⠶⠶⠶⠶⣤⡀⠀⠀
⣀⣛⣛⣛⣛⣸⣿⣿⣿⢁⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⢸⣿⣿⣦⡀
⠈⠻⣿⣿⣿⣿⣿⣿⣿⢸⣿⣿⣿⡿⠿⠿⠿⠿⠿⠿⢿⣿⣿⣿⡟⢸⣿⣿⣿⠇
⠀⠀⢸⢛⣛⣛⣛⣛⣃⣼⣿⣿⣿⠁⣿⣿⣿⣿⣿⣿⢠⣿⣿⣿⡇⣿⣿⣿⣿⠀
⠀⠀⡇⣾⣿⣿⣿⣿⣿⣿⣿⣿⣏⠸⠿⠿⠿⠿⠿⠏⣼⣿⣿⣿⢸⣿⣿⣿⡇⠀
⠀⢸⢸⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡇⣾⣿⣿⣿⠀⠀
⠀⡼⢸⣿⣿⣿⣿⣿⣿⣿⣿⣿⢰⣶⣶⣶⣶⣶⣶⢸⣿⣿⣿⠃⣿⣿⣿⡿⠀⠀
⢸⣀⣉⣉⣉⣉⣉⣉⣉⣉⣉⣡⣿⣿⣿⡿⠀⢸⣅⣉⣉⣉⣁⣿⣿⣿⣿⣿⣦⡀
⠈⠻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡇⠀⠀⠙⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿
⠀⠀⠈⠛⠛⠛⠛⠛⠛⠛⠛⠛⠛⠛⠛⠃⠀⠀⠀⠈⠻⣿⣿⣿⣿⣿⣿⣿⣿⡇
]]
local logo_ = [[
⡉⢭⣭⣭⣭⠅⣿⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠌⣾⢲⡳⣞⢄⡿⣪⢷⠖⢖⠖⢖⠖⢖⠖⢖⠖⢖⠖⢖⠖⢖⠖⠖⠶⣦⡀⠀⠀
⣇⣉⣃⣙⣈⣸⡳⣝⢷⢑⣟⢽⡫⣟⢽⡫⣟⢽⡫⣟⢽⡫⣟⢽⡻⢸⣏⢿⣦⠀
⠈⠻⣿⡿⣿⣟⣿⣜⠯⢰⣏⢷⡝⡮⠳⠝⠮⠳⠝⠮⢳⣝⢮⡳⡏⣸⢯⢷⣽⠁
⠀⠀⣸⢛⣑⣛⣊⣛⣃⡼⣧⢻⣺⠠⣿⣟⣿⣻⣟⣿⢠⡯⣳⢽⠅⣿⡹⣗⣽⠀
⠀⠀⡇⣻⢺⢮⡺⣕⢯⡺⣕⢯⡇⠼⠫⠻⠕⠷⠵⠏⣼⢫⣞⡽⢰⡯⣻⡺⡇⠀
⠀⢨⢃⡿⣕⢯⡞⣵⡫⣯⢺⡵⣫⡻⣛⢟⣝⢟⣝⢯⡺⣵⢣⡏⢸⣯⣫⢟⠅⠀
⠀⡞⢸⡗⣽⢺⡵⣫⢞⡵⣫⠾⢰⣶⣶⣶⣶⣶⡶⢸⡳⣝⡽⢂⣿⡺⣳⡏⠀⠀
⣸⣈⣃⣋⣊⣑⣉⣊⣣⣙⣘⣤⣟⢷⣫⡗⠀⢸⣄⣋⣜⣙⣢⡽⣳⢯⢾⡽⣦⡀
⠈⠛⣿⢿⡿⣿⢿⣿⣻⣟⣿⣳⣟⣷⢝⡇⠀⠀⠹⢿⣻⢿⣽⣻⣗⢿⡵⣻⡺⣏
⠀⠀⠈⠛⠙⠛⠓⠛⠚⠙⠚⠓⠛⠚⠛⠁⠀⠀⠀⠈⠹⣟⣾⣳⣟⢿⣞⣽⣝⡇
]]
return logo
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/utils/lru_cache.lua | Lua | local LRUCache = {}
LRUCache.__index = LRUCache
function LRUCache:new(capacity)
return setmetatable({
capacity = capacity,
cache = {},
head = nil,
tail = nil,
size = 0,
}, LRUCache)
end
-- Internal function: Move node to head (indicating most recently used)
function LRUCache:_move_to_head(node)
if self.head == node then return end
-- Disconnect the node
if node.prev then node.prev.next = node.next end
if node.next then node.next.prev = node.prev end
if self.tail == node then self.tail = node.prev end
-- Insert the node at the head
node.next = self.head
node.prev = nil
if self.head then self.head.prev = node end
self.head = node
if not self.tail then self.tail = node end
end
-- Get value from cache
function LRUCache:get(key)
local node = self.cache[key]
if not node then return nil end
self:_move_to_head(node)
return node.value
end
-- Set value in cache
function LRUCache:set(key, value)
local node = self.cache[key]
if node then
node.value = value
self:_move_to_head(node)
else
node = { key = key, value = value }
self.cache[key] = node
self.size = self.size + 1
self:_move_to_head(node)
if self.size > self.capacity then
local tail_key = self.tail.key
self.tail = self.tail.prev
if self.tail then self.tail.next = nil end
self.cache[tail_key] = nil
self.size = self.size - 1
end
end
end
-- Remove specified cache entry
function LRUCache:remove(key)
local node = self.cache[key]
if not node then return end
if node.prev then
node.prev.next = node.next
else
self.head = node.next
end
if node.next then
node.next.prev = node.prev
else
self.tail = node.prev
end
self.cache[key] = nil
self.size = self.size - 1
end
-- Get current size of cache
function LRUCache:get_size() return self.size end
-- Get capacity of cache
function LRUCache:get_capacity() return self.capacity end
-- Print current cache contents (for debugging)
function LRUCache:print_cache()
local node = self.head
while node do
print(node.key, node.value)
node = node.next
end
end
function LRUCache:keys()
local keys = {}
local node = self.head
while node do
table.insert(keys, node.key)
node = node.next
end
return keys
end
return LRUCache
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/utils/lsp.lua | Lua | ---@class avante.utils.lsp
local M = {}
local LspMethod = vim.lsp.protocol.Methods
---@alias vim.lsp.Client.filter {id?: number, bufnr?: number, name?: string, method?: string, filter?:fun(client: vim.lsp.Client):boolean}
---@param opts? vim.lsp.Client.filter
---@return vim.lsp.Client[]
function M.get_clients(opts)
---@type vim.lsp.Client[]
local ret = vim.lsp.get_clients(opts)
return (opts and opts.filter) and vim.tbl_filter(opts.filter, ret) or ret
end
--- return function or variable or class
local function get_ts_node_parent(node)
if not node then return nil end
local type = node:type()
if
type:match("function")
or type:match("method")
or type:match("variable")
or type:match("class")
or type:match("type")
or type:match("parameter")
or type:match("field")
or type:match("property")
or type:match("enum")
or type:match("assignment")
or type:match("struct")
or type:match("declaration")
then
return node
end
return get_ts_node_parent(node:parent())
end
local function get_full_definition(location)
local uri = location.uri
local filepath = uri:gsub("^file://", "")
local full_lines = vim.fn.readfile(filepath)
local buf = vim.api.nvim_create_buf(false, true)
vim.api.nvim_buf_set_lines(buf, 0, -1, false, full_lines)
local filetype = vim.filetype.match({ filename = filepath, buf = buf }) or ""
--- use tree-sitter to get the full definition
local lang = vim.treesitter.language.get_lang(filetype)
local parser = vim.treesitter.get_parser(buf, lang)
if not parser then
vim.api.nvim_buf_delete(buf, { force = true })
return {}
end
local tree = parser:parse()[1]
local root = tree:root()
local node = root:named_descendant_for_range(
location.range.start.line,
location.range.start.character,
location.range.start.line,
location.range.start.character
)
if not node then
vim.api.nvim_buf_delete(buf, { force = true })
return {}
end
local parent = get_ts_node_parent(node)
if not parent then parent = node end
local text = vim.treesitter.get_node_text(parent, buf)
vim.api.nvim_buf_delete(buf, { force = true })
return vim.split(text, "\n")
end
---@param bufnr number
---@param symbol_name string
---@param show_line_numbers boolean
---@param on_complete fun(definitions: avante.lsp.Definition[] | nil, error: string | nil)
function M.read_definitions(bufnr, symbol_name, show_line_numbers, on_complete)
local clients = vim.lsp.get_clients({ bufnr = bufnr })
if #clients == 0 then
on_complete(nil, "No LSP client found")
return
end
local supports_workspace_symbol = false
for _, client in ipairs(clients) do
if client:supports_method(LspMethod.workspace_symbol) then
supports_workspace_symbol = true
break
end
end
if not supports_workspace_symbol then
on_complete(nil, "Cannot read definitions.")
return
end
local params = { query = symbol_name }
vim.lsp.buf_request_all(bufnr, LspMethod.workspace_symbol, params, function(results)
if not results or #results == 0 then
on_complete(nil, "No results")
return
end
---@type avante.lsp.Definition[]
local res = {}
for _, result in ipairs(results) do
if result.err then
on_complete(nil, result.err.message)
return
end
---@diagnostic disable-next-line: undefined-field
if result.error then
---@diagnostic disable-next-line: undefined-field
on_complete(nil, result.error.message)
return
end
if not result.result then goto continue end
local definitions = vim.tbl_filter(function(d) return d.name == symbol_name end, result.result)
if #definitions == 0 then
on_complete(nil, "No definition found")
return
end
for _, definition in ipairs(definitions) do
local lines = get_full_definition(definition.location)
if show_line_numbers then
local start_line = definition.location.range.start.line
local new_lines = {}
for i, line_ in ipairs(lines) do
table.insert(new_lines, tostring(start_line + i) .. ": " .. line_)
end
lines = new_lines
end
local uri = definition.location.uri
table.insert(res, { content = table.concat(lines, "\n"), uri = uri })
end
::continue::
end
on_complete(res, nil)
end)
end
local severity = {
[1] = "ERROR",
[2] = "WARNING",
[3] = "INFORMATION",
[4] = "HINT",
}
---@class AvanteDiagnostic
---@field content string
---@field start_line number
---@field end_line number
---@field severity string
---@field source string
---@param bufnr integer
---@return AvanteDiagnostic[]
function M.get_diagnostics(bufnr)
if bufnr == nil then bufnr = vim.api.nvim_get_current_buf() end
local diagnositcs = ---@type vim.Diagnostic[]
vim.diagnostic.get(bufnr, {
severity = {
vim.diagnostic.severity.ERROR,
vim.diagnostic.severity.WARN,
vim.diagnostic.severity.INFO,
vim.diagnostic.severity.HINT,
},
})
return vim
.iter(diagnositcs)
:map(function(diagnostic)
local d = {
content = diagnostic.message,
start_line = diagnostic.lnum + 1,
end_line = diagnostic.end_lnum and diagnostic.end_lnum + 1 or diagnostic.lnum + 1,
severity = severity[diagnostic.severity],
source = diagnostic.source,
}
return d
end)
:totable()
end
---@param filepath string
---@return AvanteDiagnostic[]
function M.get_diagnostics_from_filepath(filepath)
local Utils = require("avante.utils")
local bufnr = Utils.open_buffer(filepath, false)
return M.get_diagnostics(bufnr)
end
---@param bufnr integer
---@param selection avante.SelectionResult
function M.get_current_selection_diagnostics(bufnr, selection)
local diagnostics = M.get_diagnostics(bufnr)
local selection_diagnostics = {}
for _, diagnostic in ipairs(diagnostics) do
if selection.range.start.lnum <= diagnostic.start_line and selection.range.finish.lnum >= diagnostic.end_line then
table.insert(selection_diagnostics, diagnostic)
end
end
return selection_diagnostics
end
return M
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/utils/path.lua | Lua | local OS_NAME = vim.uv.os_uname().sysname ---@type string|nil
local IS_WIN = OS_NAME == "Windows_NT" ---@type boolean
local SEP = IS_WIN and "\\" or "/" ---@type string
local BYTE_SLASH = 0x2f ---@type integer '/'
local BYTE_BACKSLASH = 0x5c ---@type integer '\\'
local BYTE_COLON = 0x3a ---@type integer ':'
local BYTE_PATHSEP = string.byte(SEP) ---@type integer
---@class avante.utils.path
local M = {}
M.SEP = SEP
---@return boolean
function M.is_win() return IS_WIN end
---@param filepath string
---@return string
function M.basename(filepath)
if filepath == "" then return "" end
local pos_invalid = #filepath + 1 ---@type integer
local pos_sep = 0 ---@type integer
for i = #filepath, 1, -1 do
local byte = string.byte(filepath, i, i) ---@type integer
if byte == BYTE_SLASH or byte == BYTE_BACKSLASH then
if i + 1 == pos_invalid then
pos_invalid = i
else
pos_sep = i
break
end
end
end
if pos_sep == 0 and pos_invalid == #filepath + 1 then return filepath end
return string.sub(filepath, pos_sep + 1, pos_invalid - 1)
end
---@param filepath string
---@return string
function M.dirname(filepath)
local pieces = M.split(filepath)
if #pieces == 1 then
local piece = pieces[1] ---@type string
return piece == "" and string.byte(filepath, 1, 1) == BYTE_SLASH and "/" or piece
end
local dirpath = #pieces > 0 and table.concat(pieces, SEP, 1, #pieces - 1) or "" ---@type string
return dirpath == "" and string.byte(filepath, 1, 1) == BYTE_SLASH and "/" or dirpath
end
---@param filename string
---@return string
function M.extname(filename) return filename:match("%.[^.]+$") or "" end
---@param filepath string
---@return boolean
function M.is_absolute(filepath)
if IS_WIN then return #filepath > 1 and string.byte(filepath, 2, 2) == BYTE_COLON end
return string.byte(filepath, 1, 1) == BYTE_PATHSEP
end
---@param filepath string
---@return boolean
function M.is_exist(filepath)
local stat = vim.uv.fs_stat(filepath)
return stat ~= nil and not vim.tbl_isempty(stat)
end
---@param dirpath string
---@return boolean
function M.is_exist_dirpath(dirpath)
local stat = vim.uv.fs_stat(dirpath)
return stat ~= nil and stat.type == "directory"
end
---@param filepath string
---@return boolean
function M.is_exist_filepath(filepath)
local stat = vim.uv.fs_stat(filepath)
return stat ~= nil and stat.type == "file"
end
---@param from string
---@param to string
---@return string
function M.join(from, to) return M.normalize(from .. SEP .. to) end
function M.mkdir_if_nonexist(dirpath)
if not M.is_exist(dirpath) then vim.fn.mkdir(dirpath, "p") end
end
---@param filepath string
---@return string
function M.normalize(filepath)
if filepath == "/" and not IS_WIN then return "/" end
if filepath == "" then return "." end
filepath = filepath:gsub("%%(%x%x)", function(hex) return string.char(tonumber(hex, 16)) end)
return table.concat(M.split(filepath), SEP)
end
---@param from string
---@param to string
---@param prefer_slash boolean
---@return string
function M.relative(from, to, prefer_slash)
local is_from_absolute = M.is_absolute(from) ---@type boolean
local is_to_absolute = M.is_absolute(to) ---@type boolean
if is_from_absolute and not is_to_absolute then return M.normalize(to) end
if is_to_absolute and not is_from_absolute then return M.normalize(to) end
local from_pieces = M.split(from) ---@type string[]
local to_pieces = M.split(to) ---@type string[]
local L = #from_pieces < #to_pieces and #from_pieces or #to_pieces
local i = 1
while i <= L do
if from_pieces[i] ~= to_pieces[i] then break end
i = i + 1
end
if i == 2 and is_to_absolute then return M.normalize(to) end
local sep = prefer_slash and "/" or SEP
local p = "" ---@type string
for _ = i, #from_pieces do
p = p .. sep .. ".." ---@type string
end
for j = i, #to_pieces do
p = p .. sep .. to_pieces[j] ---@type string
end
if p == "" then return "." end
return #p > 1 and string.sub(p, 2) or p
end
---@param cwd string
---@param to string
function M.resolve(cwd, to) return M.is_absolute(to) and M.normalize(to) or M.normalize(cwd .. SEP .. to) end
---@param filepath string
---@return string[]
function M.split(filepath)
local pieces = {} ---@type string[]
local pattern = "([^/\\]+)" ---@type string
local has_sep_prefix = SEP == "/" and string.byte(filepath, 1, 1) == BYTE_PATHSEP ---@type boolean
local has_sep_suffix = #filepath > 1 and string.byte(filepath, #filepath, #filepath) == BYTE_PATHSEP ---@type boolean
if has_sep_prefix then pieces[1] = "" end
for piece in string.gmatch(filepath, pattern) do
if piece ~= "" and piece ~= "." then
if piece == ".." and (has_sep_prefix or #pieces > 0) then
pieces[#pieces] = nil
else
pieces[#pieces + 1] = piece
end
end
end
if has_sep_suffix then pieces[#pieces + 1] = "" end
if IS_WIN and #filepath > 1 and string.byte(filepath, 2, 2) == BYTE_COLON then pieces[1] = pieces[1]:upper() end
return pieces
end
return M
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/utils/promptLogger.lua | Lua | local Config = require("avante.config")
local Utils = require("avante.utils")
local AVANTE_PROMPT_INPUT_HL = "AvantePromptInputHL"
-- last one in entries is always to hold current input
local entries, idx = {}, 0
local filtered_entries = {}
---@class avante.utils.promptLogger
local M = {}
function M.init()
vim.api.nvim_set_hl(0, AVANTE_PROMPT_INPUT_HL, {
fg = "#ff7700",
bg = "#333333",
bold = true,
italic = true,
underline = true,
})
entries = {}
local dir = Config.prompt_logger.log_dir
local log_file = Utils.join_paths(dir, "avante_prompts.log")
local file = io.open(log_file, "r")
if file then
local content = file:read("*a"):gsub("\n$", "")
file:close()
local lines = vim.split(content, "\n", { plain = true })
for _, line in ipairs(lines) do
local ok, entry = pcall(vim.fn.json_decode, line)
if ok and entry and entry.time and entry.input then table.insert(entries, entry) end
end
end
table.insert(entries, { input = "" })
idx = #entries - 1
filtered_entries = entries
end
function M.log_prompt(request)
if request == "" then return end
local log_dir = Config.prompt_logger.log_dir
local log_file = Utils.join_paths(log_dir, "avante_prompts.log")
if vim.fn.isdirectory(log_dir) == 0 then vim.fn.mkdir(log_dir, "p") end
local entry = {
time = Utils.get_timestamp(),
input = request,
}
-- Remove any existing entries with the same input
for i = #entries - 1, 1, -1 do
if entries[i].input == entry.input then table.remove(entries, i) end
end
-- Add the new entry
if #entries > 0 then
table.insert(entries, #entries, entry)
idx = #entries - 1
filtered_entries = entries
else
table.insert(entries, entry)
end
local max = Config.prompt_logger.max_entries
-- Left trim entries if the count exceeds max_entries
-- We need to keep the last entry (current input) and trim from the beginning
if max > 0 and #entries > max + 1 then
-- Calculate how many entries to remove
local to_remove = #entries - max - 1
-- Remove oldest entries from the beginning
for _ = 1, to_remove do
table.remove(entries, 1)
end
end
local file = io.open(log_file, "w")
if file then
-- Write all entries to the log file, except the last one
for i = 1, #entries - 1, 1 do
file:write(vim.fn.json_encode(entries[i]) .. "\n")
end
file:close()
else
vim.notify("Failed to log prompt", vim.log.levels.ERROR)
end
end
local function _read_log(delta)
-- index of array starts from 1 in lua, while this idx starts from 0
idx = ((idx - delta) % #filtered_entries + #filtered_entries) % #filtered_entries
return filtered_entries[idx + 1]
end
local function update_current_input()
local user_input = table.concat(vim.api.nvim_buf_get_lines(0, 0, -1, false), "\n")
if idx == #filtered_entries - 1 or filtered_entries[idx + 1].input ~= user_input then
entries[#entries].input = user_input
vim.fn.clearmatches()
-- Apply filtering if there's user input
if user_input and user_input ~= "" then
filtered_entries = {}
for i = 1, #entries - 1 do
if entries[i].input:lower():find(user_input:lower(), 1, true) then
table.insert(filtered_entries, entries[i])
end
end
-- Add the current input as the last entry
table.insert(filtered_entries, entries[#entries])
vim.fn.matchadd(AVANTE_PROMPT_INPUT_HL, user_input)
else
filtered_entries = entries
end
idx = #filtered_entries - 1
end
end
function M.on_log_retrieve(delta)
return function()
update_current_input()
local res = _read_log(delta)
if not res or not res.input then
vim.notify("No log entry found.", vim.log.levels.WARN)
return
end
vim.api.nvim_buf_set_lines(0, 0, -1, false, vim.split(res.input, "\n", { plain = true }))
vim.api.nvim_win_set_cursor(
0,
{ vim.api.nvim_buf_line_count(0), #vim.api.nvim_buf_get_lines(0, -2, -1, false)[1] }
)
end
end
return M
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/utils/prompts.lua | Lua | local Config = require("avante.config")
local M = {}
---@param provider_conf AvanteDefaultBaseProvider
---@param opts AvantePromptOptions
---@return string
function M.get_ReAct_system_prompt(provider_conf, opts)
local system_prompt = opts.system_prompt
local disable_tools = provider_conf.disable_tools or false
if not disable_tools and opts.tools then
local tools_prompts = [[
====
TOOL USE
You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
Tool use is formatted using XML-style tags. Each tool use is wrapped in a <tool_use> tag. The tool use content is a valid JSON with tool name and tool input. Here's the structure:
<tool_use>
{
"name": "tool_name",
"input": {
"parameter1_name": "value1",
"parameter2_name": "value2",
...
}
}
</tool_use>
For example:
<tool_use>
{"name": "attempt_completion", "input": {"result": "I have completed the task..."}}
</tool_use>
<tool_use>
{"name": "bash", "input": {"path": "./src", "command": "npm run dev"}}
</tool_use>
ALWAYS ADHERE TO this format for the tool use to ensure proper parsing and execution.
## RULES
- When outputting the JSON for tool_use, you MUST first output the "name" field and then the "input" field.
- The value of "input" MUST be VALID JSON.
- If the "input" JSON object contains a "path" field, you MUST output the "path" field before any other fields.
## OUTPUT FORMAT
Please remember you are not allowed to use any format related to function calling or fc or tool_code.
# Tools
]]
for _, tool in ipairs(opts.tools) do
local tool_prompt = ([[
## {{name}}
Description: {{description}}
Parameters:
]]):gsub("{{name}}", tool.name):gsub(
"{{description}}",
tool.get_description and tool.get_description() or (tool.description or "")
)
for _, field in ipairs(tool.param.fields) do
if field.optional then
tool_prompt = tool_prompt .. string.format(" - %s: %s\n", field.name, field.description)
else
tool_prompt = tool_prompt
.. string.format(
" - %s: (required) %s\n",
field.name,
field.get_description and field.get_description() or (field.description or "")
)
end
if field.choices then
tool_prompt = tool_prompt .. " - Choices: "
for i, choice in ipairs(field.choices) do
tool_prompt = tool_prompt .. string.format("%s", choice)
if i ~= #field.choices then tool_prompt = tool_prompt .. ", " end
end
tool_prompt = tool_prompt .. "\n"
end
end
if tool.param.usage then
tool_prompt = tool_prompt .. "Usage:\n<tool_use>"
local tool_use = {
name = tool.name,
input = tool.param.usage,
}
local tool_use_json = vim.json.encode(tool_use)
tool_prompt = tool_prompt .. tool_use_json .. "</tool_use>\n"
end
tools_prompts = tools_prompts .. tool_prompt .. "\n"
end
system_prompt = system_prompt .. tools_prompts
system_prompt = system_prompt
.. [[
# Tool Use Examples
## Example 1: Requesting to execute a command
<tool_use>{"name": "bash", "input": {"path": "./src", "command": "npm run dev"}}</tool_use>
]]
if Config.behaviour.enable_fastapply then
system_prompt = system_prompt
.. [[
## Example 2: Requesting to create a new file
<tool_use>{"name": "edit_file", "input": {"path": "src/frontend-config.json", "instructions": "write the following content to the file", "code_edit": "// ... existing code ...\nFIRST_EDIT\n// ... existing code ...\nSECOND_EDIT\n// ... existing code ...\nTHIRD_EDIT\n// ... existing code ...\n\n"}}</tool_use>
## Example 3: Requesting to make targeted edits to a file
<tool_use>{"name": "edit_file", "input": {"path": "src/frontend-config.json", "instructions": "write the following content to the file", "code_edit": "// ... existing code ...\nFIRST_EDIT\n// ... existing code ...\nSECOND_EDIT\n// ... existing code ...\nTHIRD_EDIT\n// ... existing code ...\n\n"}}</tool_use>
]]
else
system_prompt = system_prompt
.. [[
## Example 2: Requesting to create a new file
<tool_use>{"name": "write_to_file", "input": {"path": "src/frontend-config.json", "the_content": "{\n \"apiEndpoint\": \"https://api.example.com\",\n \"theme\": {\n \"primaryColor\": \"#007bff\",\n \"secondaryColor\": \"#6c757d\",\n \"fontFamily\": \"Arial, sans-serif\"\n },\n \"features\": {\n \"darkMode\": true,\n \"notifications\": true,\n \"analytics\": false\n },\n \"version\": \"1.0.0\"\n}"}}</tool_use>
## Example 3: Requesting to make targeted edits to a file
<tool_use>{"name": "str_replace", "input": {"path": "src/components/App.tsx", "old_str": "import React from 'react';", "new_str": "import React, { useState } from 'react';"}}</tool_use>
]]
end
system_prompt = system_prompt
.. [[
## Example 4: Complete current task
<tool_use>{"name": "attempt_completion", "input": {"result": "I've successfully created the requested React component with the following features:\n- Responsive layout\n- Dark/light mode toggle\n- Form validation\n- API integration"}}</tool_use>
## Example 5: Write todos
<tool_use>{"name": "write_todos", "input": {"todos": [{"id": "1", "content": "Implement a responsive layout", "status": "todo", "priority": "low"}, {"id": "2", "content": "Add dark/light mode toggle", "status": "todo", "priority": "medium"}]}}</tool_use>
]]
end
return system_prompt
end
--- Get the content of AGENTS.md or CLAUDE.md or OPENCODE.md
---@return string | nil
function M.get_agents_rules_prompt()
local Utils = require("avante.utils")
local project_root = Utils.get_project_root()
local file_names = {
"AGENTS.md",
"CLAUDE.md",
"OPENCODE.md",
".cursorrules",
".windsurfrules",
Utils.join_paths(".github", "copilot-instructions.md"),
}
for _, file_name in ipairs(file_names) do
local file_path = Utils.join_paths(project_root, file_name)
if vim.fn.filereadable(file_path) == 1 then
local content = vim.fn.readfile(file_path)
if content then return table.concat(content, "\n") end
end
end
return nil
end
---@param selected_files AvanteSelectedFile[]
---@return string | nil
function M.get_cursor_rules_prompt(selected_files)
local Utils = require("avante.utils")
local project_root = Utils.get_project_root()
local accumulated_content = ""
---@type string[]
local mdc_files = vim.fn.globpath(Utils.join_paths(project_root, ".cursor/rules"), "*.mdc", false, true)
for _, file_path in ipairs(mdc_files) do
---@type string[]
local content = vim.fn.readfile(file_path)
if content[1] ~= "---" or content[5] ~= "---" then goto continue end
local header, body = table.concat(content, "\n", 2, 4), table.concat(content, "\n", 6)
local _description, globs, alwaysApply = header:match("description:%s*(.*)\nglobs:%s*(.*)\nalwaysApply:%s*(.*)")
if not globs then goto continue end
globs = vim.trim(globs)
-- TODO: When empty string, this means the agent should request for this rule ad-hoc.
if globs == "" then goto continue end
local globs_array = vim.split(globs, ",%s*")
local path_regexes = {} ---@type string[]
for _, glob in ipairs(globs_array) do
path_regexes[#path_regexes + 1] = glob:gsub("%*%*", ".+"):gsub("%*", "[^/]*")
path_regexes[#path_regexes + 1] = glob:gsub("%*%*/", ""):gsub("%*", "[^/]*")
end
local always_apply = alwaysApply == "true"
if always_apply then
accumulated_content = accumulated_content .. "\n" .. body
else
local matched = false
for _, selected_file in ipairs(selected_files) do
for _, path_regex in ipairs(path_regexes) do
if string.match(selected_file.path, path_regex) then
accumulated_content = accumulated_content .. "\n" .. body
matched = true
break
end
end
if matched then break end
end
end
::continue::
end
return accumulated_content ~= "" and accumulated_content or nil
end
return M
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/utils/root.lua | Lua | -- COPIED and MODIFIED from https://github.com/LazyVim/LazyVim/blob/main/lua/lazyvim/util/root.lua
local Utils = require("avante.utils")
local Config = require("avante.config")
---@class avante.utils.root
---@overload fun(): string
local M = setmetatable({}, {
__call = function(m) return m.get() end,
})
---@class AvanteRoot
---@field paths string[]
---@field spec AvanteRootSpec
---@alias AvanteRootFn fun(buf: number): (string|string[])
---@alias AvanteRootSpec string|string[]|AvanteRootFn
---@type AvanteRootSpec[]
M.spec = {
"lsp",
{
-- Version Control
".git", -- Git repository folder
".svn", -- Subversion repository folder
".hg", -- Mercurial repository folder
".bzr", -- Bazaar repository folder
-- Package Management
"package.json", -- Node.js/JavaScript projects
"composer.json", -- PHP projects
"Gemfile", -- Ruby projects
"requirements.txt", -- Python projects
"setup.py", -- Python projects
"pom.xml", -- Maven (Java) projects
"build.gradle", -- Gradle (Java) projects
"Cargo.toml", -- Rust projects
"go.mod", -- Go projects
"*.csproj", -- .NET projects
"*.sln", -- .NET solution files
-- Build Configuration
"Makefile", -- Make build system
"CMakeLists.txt", -- CMake build system
"build.xml", -- Ant build system
"Rakefile", -- Ruby build tasks
"gulpfile.js", -- Gulp build system
"Gruntfile.js", -- Grunt build system
"webpack.config.js", -- Webpack configuration
-- Project Configuration
".editorconfig", -- Editor configuration
".eslintrc", -- ESLint configuration
".prettierrc", -- Prettier configuration
"tsconfig.json", -- TypeScript configuration
"tox.ini", -- Python testing configuration
"pyproject.toml", -- Python project configuration
".gitlab-ci.yml", -- GitLab CI configuration
".github", -- GitHub configuration folder
".travis.yml", -- Travis CI configuration
"Jenkinsfile", -- Jenkins pipeline configuration
"docker-compose.yml", -- Docker Compose configuration
"Dockerfile", -- Docker configuration
-- Framework-specific
"angular.json", -- Angular projects
"ionic.config.json", -- Ionic projects
"config.xml", -- Cordova projects
"pubspec.yaml", -- Flutter/Dart projects
"mix.exs", -- Elixir projects
"project.clj", -- Clojure projects
"build.sbt", -- Scala projects
"stack.yaml", -- Haskell projects
},
"cwd",
}
M.detectors = {}
function M.detectors.cwd() return { vim.uv.cwd() } end
---@param buf number
function M.detectors.lsp(buf)
local bufpath = M.bufpath(buf)
if not bufpath then return {} end
local roots = {} ---@type string[]
local lsp_clients = Utils.lsp.get_clients({ bufnr = buf })
for _, client in ipairs(lsp_clients) do
local workspace = client.config.workspace_folders
for _, ws in ipairs(workspace or {}) do
roots[#roots + 1] = vim.uri_to_fname(ws.uri)
end
if client.root_dir then roots[#roots + 1] = client.root_dir end
end
return vim.tbl_filter(function(path)
path = Utils.norm(path)
return path and bufpath:find(path, 1, true) == 1
end, roots)
end
---@param patterns string[]|string
function M.detectors.pattern(buf, patterns)
local patterns_ = type(patterns) == "string" and { patterns } or patterns
---@cast patterns_ string[]
local path = M.bufpath(buf) or vim.uv.cwd()
local pattern = vim.fs.find(function(name)
for _, p in ipairs(patterns_) do
if name == p then return true end
if p:sub(1, 1) == "*" and name:find(vim.pesc(p:sub(2)) .. "$") then return true end
end
return false
end, { path = path, upward = true })[1]
return pattern and { vim.fs.dirname(pattern) } or {}
end
function M.bufpath(buf)
if buf == nil or type(buf) ~= "number" then
-- TODO: Consider logging this unexpected buffer type or nil value if assert was bypassed.
vim.notify("avante: M.bufpath received invalid buffer: " .. tostring(buf), vim.log.levels.WARN)
return nil
end
local buf_name_str
local success, result = pcall(vim.api.nvim_buf_get_name, buf)
if not success then
-- TODO: Consider logging the actual error from pcall.
vim.notify(
"avante: nvim_buf_get_name failed for buffer " .. tostring(buf) .. ": " .. tostring(result),
vim.log.levels.WARN
)
return nil
end
buf_name_str = result
-- M.realpath will handle buf_name_str == "" (empty string for unnamed buffer) correctly, returning nil.
return M.realpath(buf_name_str)
end
function M.cwd() return M.realpath(vim.uv.cwd()) or "" end
function M.realpath(path)
if path == "" or path == nil then return nil end
path = vim.uv.fs_realpath(path) or path
return Utils.norm(path)
end
---@param spec AvanteRootSpec
---@return AvanteRootFn
function M.resolve(spec)
if M.detectors[spec] then
return M.detectors[spec]
elseif type(spec) == "function" then
return spec
end
return function(buf) return M.detectors.pattern(buf, spec) end
end
---@param opts? { buf?: number, spec?: AvanteRootSpec[], all?: boolean }
function M.detect(opts)
opts = opts or {}
opts.spec = opts.spec or type(vim.g.root_spec) == "table" and vim.g.root_spec or M.spec
opts.buf = (opts.buf == nil or opts.buf == 0) and vim.api.nvim_get_current_buf() or opts.buf
local ret = {} ---@type AvanteRoot[]
for _, spec in ipairs(opts.spec) do
local paths = M.resolve(spec)(opts.buf)
paths = paths or {}
paths = type(paths) == "table" and paths or { paths }
local roots = {} ---@type string[]
for _, p in ipairs(paths) do
local pp = M.realpath(p)
if pp and not vim.tbl_contains(roots, pp) then roots[#roots + 1] = pp end
end
table.sort(roots, function(a, b) return #a > #b end)
if #roots > 0 then
ret[#ret + 1] = { spec = spec, paths = roots }
if opts.all == false then break end
end
end
return ret
end
---@type table<number, string>
M.cache = {}
local buf_names = {}
-- returns the root directory based on:
-- * lsp workspace folders
-- * lsp root_dir
-- * root pattern of filename of the current buffer
-- * root pattern of cwd
---@param opts? {normalize?:boolean, buf?:number}
---@return string
function M.get(opts)
if Config.ask_opts.project_root then return Config.ask_opts.project_root end
local cwd = vim.uv.cwd()
if Config.behaviour.use_cwd_as_project_root then
if cwd and cwd ~= "" then return cwd end
end
opts = opts or {}
local buf = opts.buf or vim.api.nvim_get_current_buf()
local buf_name = vim.api.nvim_buf_get_name(buf)
local ret = buf_names[buf] == buf_name and M.cache[buf] or nil
if not ret then
local roots = M.detect({ all = false, buf = buf })
ret = roots[1] and roots[1].paths[1] or vim.uv.cwd()
buf_names[buf] = buf_name
M.cache[buf] = ret
end
if cwd ~= nil and #ret > #cwd then ret = cwd end
if opts and opts.normalize then return ret end
return Utils.is_win() and ret:gsub("/", "\\") or ret
end
function M.git()
local root = M.get()
local git_root = vim.fs.find(".git", { path = root, upward = true })[1]
local ret = git_root and vim.fn.fnamemodify(git_root, ":h") or root
return ret
end
return M
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/utils/streaming_json_parser.lua | Lua | -- StreamingJSONParser: 一个能够处理不完整 JSON 流的解析器
local StreamingJSONParser = {}
StreamingJSONParser.__index = StreamingJSONParser
-- Create a new StreamingJSONParser instance
function StreamingJSONParser:new()
local obj = setmetatable({}, StreamingJSONParser)
obj:reset()
return obj
end
-- Reset the parser state
function StreamingJSONParser:reset()
self.buffer = ""
self.state = {
inString = false,
escaping = false,
stack = {},
result = nil,
currentKey = nil,
current = nil,
parentKeys = {},
stringBuffer = "",
}
end
-- Get the current partial result
function StreamingJSONParser:getCurrentPartial() return self.state.result end
-- Add a value to the current object or array
function StreamingJSONParser:addValue(value)
local top = self.state.stack[#self.state.stack]
top.expectingValue = false
if top.type == "object" then
if self.state.current == nil then
self.state.current = {}
if self.state.result == nil then self.state.result = self.state.current end
end
self.state.current[self.state.currentKey] = value
top.expectingComma = true
elseif top.type == "array" then
if self.state.current == nil then
self.state.current = {}
if self.state.result == nil then self.state.result = self.state.current end
end
table.insert(self.state.current, value)
top.expectingComma = true
end
end
-- Parse literal values (true, false, null)
local function parseLiteral(buffer)
if buffer == "true" then
return true
elseif buffer == "false" then
return false
elseif buffer == "null" then
return nil
else
-- Try to parse as number
local num = tonumber(buffer)
if num then return num end
end
return buffer
end
-- Parse a chunk of JSON data
function StreamingJSONParser:parse(chunk)
self.buffer = self.buffer .. chunk
local i = 1
local len = #self.buffer
while i <= len do
local char = self.buffer:sub(i, i)
-- Handle strings specially (they can contain JSON control characters)
if self.state.inString then
if self.state.escaping then
local escapeMap = {
['"'] = '"',
["\\"] = "\\",
["/"] = "/",
["b"] = "\b",
["f"] = "\f",
["n"] = "\n",
["r"] = "\r",
["t"] = "\t",
}
local escapedChar = escapeMap[char]
if escapedChar then
self.state.stringBuffer = self.state.stringBuffer .. escapedChar
else
self.state.stringBuffer = self.state.stringBuffer .. char
end
self.state.escaping = false
elseif char == "\\" then
self.state.escaping = true
elseif char == '"' then
-- End of string
self.state.inString = false
-- If expecting a key in an object
if #self.state.stack > 0 and self.state.stack[#self.state.stack].expectingKey then
self.state.currentKey = self.state.stringBuffer
self.state.stack[#self.state.stack].expectingKey = false
self.state.stack[#self.state.stack].expectingColon = true
-- If expecting a value
elseif #self.state.stack > 0 and self.state.stack[#self.state.stack].expectingValue then
self:addValue(self.state.stringBuffer)
end
self.state.stringBuffer = ""
else
self.state.stringBuffer = self.state.stringBuffer .. char
-- For partial string handling, update the current object with the partial string value
if #self.state.stack > 0 and self.state.stack[#self.state.stack].expectingValue and i == len then
-- If we're at the end of the buffer and still in a string, store the partial value
if self.state.current and self.state.currentKey then
self.state.current[self.state.currentKey] = self.state.stringBuffer
end
end
end
i = i + 1
goto continue
end
-- Skip whitespace when not in a string
if string.match(char, "%s") then
i = i + 1
goto continue
end
-- Start of an object
if char == "{" then
local newObject = {
type = "object",
expectingKey = true,
expectingComma = false,
expectingValue = false,
expectingColon = false,
}
table.insert(self.state.stack, newObject)
-- If we're already in an object/array, save the current state
if self.state.current then
table.insert(self.state.parentKeys, { current = self.state.current, key = self.state.currentKey })
end
-- Create a new current object
self.state.current = {}
-- If this is the root, set result directly
if self.state.result == nil then
self.state.result = self.state.current
elseif #self.state.parentKeys > 0 then
-- Set as child of the parent
local parent = self.state.parentKeys[#self.state.parentKeys].current
local key = self.state.parentKeys[#self.state.parentKeys].key
if self.state.stack[#self.state.stack - 1].type == "array" then
table.insert(parent, self.state.current)
else
parent[key] = self.state.current
end
end
i = i + 1
goto continue
end
-- End of an object
if char == "}" then
table.remove(self.state.stack)
-- Move back to parent if there is one
if #self.state.parentKeys > 0 then
local parentInfo = table.remove(self.state.parentKeys)
self.state.current = parentInfo.current
self.state.currentKey = parentInfo.key
end
-- If this was the last item on stack, we're complete
if #self.state.stack == 0 then
i = i + 1
self.buffer = self.buffer:sub(i)
return self.state.result, true
else
-- Update parent's expectations
self.state.stack[#self.state.stack].expectingComma = true
self.state.stack[#self.state.stack].expectingValue = false
end
i = i + 1
goto continue
end
-- Start of an array
if char == "[" then
local newArray = { type = "array", expectingValue = true, expectingComma = false }
table.insert(self.state.stack, newArray)
-- If we're already in an object/array, save the current state
if self.state.current then
table.insert(self.state.parentKeys, { current = self.state.current, key = self.state.currentKey })
end
-- Create a new current array
self.state.current = {}
-- If this is the root, set result directly
if self.state.result == nil then
self.state.result = self.state.current
elseif #self.state.parentKeys > 0 then
-- Set as child of the parent
local parent = self.state.parentKeys[#self.state.parentKeys].current
local key = self.state.parentKeys[#self.state.parentKeys].key
if self.state.stack[#self.state.stack - 1].type == "array" then
table.insert(parent, self.state.current)
else
parent[key] = self.state.current
end
end
i = i + 1
goto continue
end
-- End of an array
if char == "]" then
table.remove(self.state.stack)
-- Move back to parent if there is one
if #self.state.parentKeys > 0 then
local parentInfo = table.remove(self.state.parentKeys)
self.state.current = parentInfo.current
self.state.currentKey = parentInfo.key
end
-- If this was the last item on stack, we're complete
if #self.state.stack == 0 then
i = i + 1
self.buffer = self.buffer:sub(i)
return self.state.result, true
else
-- Update parent's expectations
self.state.stack[#self.state.stack].expectingComma = true
self.state.stack[#self.state.stack].expectingValue = false
end
i = i + 1
goto continue
end
-- Colon between key and value
if char == ":" then
if #self.state.stack > 0 and self.state.stack[#self.state.stack].expectingColon then
self.state.stack[#self.state.stack].expectingColon = false
self.state.stack[#self.state.stack].expectingValue = true
i = i + 1
goto continue
end
end
-- Comma between items
if char == "," then
if #self.state.stack > 0 and self.state.stack[#self.state.stack].expectingComma then
self.state.stack[#self.state.stack].expectingComma = false
if self.state.stack[#self.state.stack].type == "object" then
self.state.stack[#self.state.stack].expectingKey = true
else -- array
self.state.stack[#self.state.stack].expectingValue = true
end
i = i + 1
goto continue
end
end
-- Start of a key or string value
if char == '"' then
self.state.inString = true
self.state.stringBuffer = ""
i = i + 1
goto continue
end
-- Start of a non-string value (number, boolean, null)
if #self.state.stack > 0 and self.state.stack[#self.state.stack].expectingValue then
local valueBuffer = ""
local j = i
-- Collect until we hit a comma, closing bracket, or brace
while j <= len do
local currentChar = self.buffer:sub(j, j)
if currentChar:match("[%s,}%]]") then break end
valueBuffer = valueBuffer .. currentChar
j = j + 1
end
-- Only process if we have a complete value
if j <= len and self.buffer:sub(j, j):match("[,}%]]") then
local value = parseLiteral(valueBuffer)
self:addValue(value)
i = j
goto continue
end
-- If we reached the end but didn't hit a delimiter, wait for more input
break
end
i = i + 1
::continue::
end
-- Update the buffer to remove processed characters
self.buffer = self.buffer:sub(i)
-- Return partial result if available, but indicate parsing is incomplete
return self.state.result, false
end
return StreamingJSONParser
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/utils/test.lua | Lua | -- This is a helper for unit tests.
local M = {}
function M.read_file(fn)
fn = vim.uv.cwd() .. "/" .. fn
local file = io.open(fn, "r")
if file then
local data = file:read("*all")
file:close()
return data
end
return fn
end
return M
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante/utils/tokens.lua | Lua | --Taken from https://github.com/jackMort/ChatGPT.nvim/blob/main/lua/chatgpt/flows/chat/tokens.lua
local Tokenizer = require("avante.tokenizers")
---@class avante.utils.tokens
local Tokens = {}
---@type table<[string], number>
local cost_per_token = {
davinci = 0.000002,
}
--- Calculate the number of tokens in a given text.
---@param content AvanteLLMMessageContent The text to calculate the number of tokens in.
---@return integer The number of tokens in the given text.
function Tokens.calculate_tokens(content)
local text = ""
if type(content) == "string" then
text = content
elseif type(content) == "table" then
for _, item in ipairs(content) do
if type(item) == "string" then
text = text .. item
elseif type(item) == "table" and item.type == "text" then
text = text .. item.text
elseif type(item) == "table" and item.type == "image" then
text = text .. item.source.data
elseif type(item) == "table" and item.type == "tool_result" then
if type(item.content) == "string" then text = text .. item.content end
end
end
end
if Tokenizer.available() then return Tokenizer.count(text) end
local tokens = 0
local current_token = ""
for char in text:gmatch(".") do
if char == " " or char == "\n" then
if current_token ~= "" then
tokens = tokens + 1
current_token = ""
end
else
current_token = current_token .. char
end
end
if current_token ~= "" then tokens = tokens + 1 end
return tokens
end
--- Calculate the cost of a given text in dollars.
-- @param text The text to calculate the cost of.
-- @param model The model to use to calculate the cost.
-- @return The cost of the given text in dollars.
function Tokens.calculate_usage_in_dollars(text, model)
local tokens = Tokens.calculate_tokens(text)
return Tokens.usage_in_dollars(tokens, model)
end
--- Calculate the cost of a given number of tokens in dollars.
-- @param tokens The number of tokens to calculate the cost of.
-- @param model The model to use to calculate the cost.
-- @return The cost of the given number of tokens in dollars.
function Tokens.usage_in_dollars(tokens, model) return tokens * cost_per_token[model or "davinci"] end
return Tokens
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/avante_lib.lua | Lua | local M = {}
local function get_library_path()
local os_name = require("avante.utils").get_os_name()
local ext = os_name == "linux" and "so" or (os_name == "darwin" and "dylib" or "dll")
local dirname = string.sub(debug.getinfo(1).source, 2, #"/avante_lib.lua" * -1)
return dirname .. ("../build/?.%s"):format(ext)
end
---@type fun(s: string): string
local function trim_semicolon(s) return s:sub(-1) == ";" and s:sub(1, -2) or s end
function M.load()
local library_path = get_library_path()
if not string.find(package.cpath, library_path, 1, true) then
package.cpath = trim_semicolon(package.cpath) .. ";" .. library_path
end
end
return M
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/cmp_avante/commands.lua | Lua | local api = vim.api
---@class CommandsSource : cmp.Source
local CommandsSource = {}
CommandsSource.__index = CommandsSource
function CommandsSource:new()
local instance = setmetatable({}, CommandsSource)
return instance
end
function CommandsSource:is_available() return vim.bo.filetype == "AvanteInput" end
function CommandsSource.get_position_encoding_kind() return "utf-8" end
function CommandsSource:get_trigger_characters() return { "/" } end
function CommandsSource:get_keyword_pattern() return [[\%(@\|#\|/\)\k*]] end
---@param params cmp.SourceCompletionApiParams
function CommandsSource:complete(params, callback)
---@type string?
local trigger_character
if params.completion_context.triggerKind == 1 then
trigger_character = string.match(params.context.cursor_before_line, "%s*(/)%S*$")
elseif params.completion_context.triggerKind == 2 then
trigger_character = params.completion_context.triggerCharacter
end
if not trigger_character or trigger_character ~= "/" then return callback({ items = {}, isIncomplete = false }) end
local Utils = require("avante.utils")
local kind = require("cmp").lsp.CompletionItemKind.Variable
local commands = Utils.get_commands()
local items = {}
for _, command in ipairs(commands) do
table.insert(items, {
label = "/" .. command.name,
kind = kind,
detail = command.details,
data = {
name = command.name,
},
})
end
callback({
items = items,
isIncomplete = false,
})
end
function CommandsSource:execute(item, callback)
local Utils = require("avante.utils")
local commands = Utils.get_commands()
local command = vim.iter(commands):find(function(command) return command.name == item.data.name end)
if not command then
callback()
return
end
local sidebar = require("avante").get()
if not command.callback then
if sidebar then sidebar:submit_input() end
callback()
return
end
command.callback(sidebar, nil, function()
local bufnr = sidebar.containers.input.bufnr ---@type integer
local content = table.concat(api.nvim_buf_get_lines(bufnr, 0, -1, false), "\n")
vim.defer_fn(function()
if vim.api.nvim_buf_is_valid(bufnr) then
local lines = vim.split(content:gsub(item.label, ""), "\n") ---@type string[]
vim.api.nvim_buf_set_lines(bufnr, 0, -1, false, lines)
end
end, 100)
callback()
end)
end
return CommandsSource
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/cmp_avante/mentions.lua | Lua | local api = vim.api
---@class mentions_source : cmp.Source
---@field get_mentions fun(): AvanteMention[]
local MentionsSource = {}
MentionsSource.__index = MentionsSource
---@param get_mentions fun(): AvanteMention[]
function MentionsSource:new(get_mentions)
local instance = setmetatable({}, MentionsSource)
instance.get_mentions = get_mentions
return instance
end
function MentionsSource:is_available()
return vim.bo.filetype == "AvanteInput" or vim.bo.filetype == "AvantePromptInput"
end
function MentionsSource.get_position_encoding_kind() return "utf-8" end
function MentionsSource:get_trigger_characters() return { "@" } end
function MentionsSource:get_keyword_pattern() return [[\%(@\|#\|/\)\k*]] end
---@param params cmp.SourceCompletionApiParams
function MentionsSource:complete(params, callback)
---@type string?
local trigger_character
local kind = require("cmp").lsp.CompletionItemKind.Variable
if params.completion_context.triggerKind == 1 then
trigger_character = string.match(params.context.cursor_before_line, "%s*(@)%S*$")
elseif params.completion_context.triggerKind == 2 then
trigger_character = params.completion_context.triggerCharacter
end
if not trigger_character or trigger_character ~= "@" then return callback({ items = {}, isIncomplete = false }) end
local items = {}
local mentions = self.get_mentions()
for _, mention in ipairs(mentions) do
table.insert(items, {
label = "@" .. mention.command .. " ",
kind = kind,
detail = mention.details,
})
end
callback({
items = items,
isIncomplete = false,
})
end
---@param completion_item table
---@param callback fun(response: {behavior: number})
function MentionsSource:execute(completion_item, callback)
local current_line = api.nvim_get_current_line()
local label = completion_item.label:match("^@(%S+)") -- Extract mention command without '@' and space
local mentions = self.get_mentions()
-- Find the corresponding mention
local selected_mention
for _, mention in ipairs(mentions) do
if mention.command == label then
selected_mention = mention
break
end
end
local sidebar = require("avante").get()
-- Execute the mention's callback if it exists
if selected_mention and type(selected_mention.callback) == "function" then
selected_mention.callback(sidebar)
-- Get the current cursor position
local row, col = unpack(api.nvim_win_get_cursor(0))
-- Replace the current line with the new line (removing the mention)
local new_line = current_line:gsub(vim.pesc(completion_item.label), "")
api.nvim_buf_set_lines(0, row - 1, row, false, { new_line })
-- Adjust the cursor position if needed
local new_col = math.min(col, #new_line)
api.nvim_win_set_cursor(0, { row, new_col })
end
callback({ behavior = require("cmp").ConfirmBehavior.Insert })
end
return MentionsSource
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
lua/cmp_avante/shortcuts.lua | Lua | local api = vim.api
---@class ShortcutsSource : cmp.Source
local ShortcutsSource = {}
ShortcutsSource.__index = ShortcutsSource
function ShortcutsSource:new()
local instance = setmetatable({}, ShortcutsSource)
return instance
end
function ShortcutsSource:is_available() return vim.bo.filetype == "AvanteInput" end
function ShortcutsSource.get_position_encoding_kind() return "utf-8" end
function ShortcutsSource:get_trigger_characters() return { "#" } end
function ShortcutsSource:get_keyword_pattern() return [[\%(@\|#\|/\)\k*]] end
---@param params cmp.SourceCompletionApiParams
function ShortcutsSource:complete(params, callback)
---@type string?
local trigger_character
if params.completion_context.triggerKind == 1 then
trigger_character = string.match(params.context.cursor_before_line, "%s*(#)%S*$")
elseif params.completion_context.triggerKind == 2 then
trigger_character = params.completion_context.triggerCharacter
end
if not trigger_character or trigger_character ~= "#" then return callback({ items = {}, isIncomplete = false }) end
local Utils = require("avante.utils")
local kind = require("cmp").lsp.CompletionItemKind.Variable
local shortcuts = Utils.get_shortcuts()
local items = {}
for _, shortcut in ipairs(shortcuts) do
table.insert(items, {
label = "#" .. shortcut.name,
kind = kind,
detail = shortcut.details,
data = {
name = shortcut.name,
prompt = shortcut.prompt,
details = shortcut.details,
},
})
end
callback({
items = items,
isIncomplete = false,
})
end
function ShortcutsSource:execute(item, callback)
-- ShortcutsSource should only provide completion, not perform replacement
-- The actual shortcut replacement is handled in sidebar.lua handle_submit function
callback()
end
return ShortcutsSource
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
plugin/avante.lua | Lua | if vim.fn.has("nvim-0.10") == 0 then
vim.api.nvim_echo({
{ "Avante requires at least nvim-0.10", "ErrorMsg" },
{ "Please upgrade your neovim version", "WarningMsg" },
{ "Press any key to exit", "ErrorMsg" },
}, true, {})
vim.fn.getchar()
vim.cmd([[quit]])
end
if vim.g.avante ~= nil then return end
vim.g.avante = 1
--- NOTE: We will override vim.paste if img-clip.nvim is available to work with avante.nvim internal logic paste
local Clipboard = require("avante.clipboard")
local Config = require("avante.config")
local Utils = require("avante.utils")
local P = require("avante.path")
local api = vim.api
if Config.support_paste_image() then
vim.paste = (function(overridden)
---@param lines string[]
---@param phase -1|1|2|3
return function(lines, phase)
require("img-clip.util").verbose = false
local bufnr = vim.api.nvim_get_current_buf()
local filetype = vim.api.nvim_get_option_value("filetype", { buf = bufnr })
if filetype ~= "AvanteInput" then return overridden(lines, phase) end
---@type string
local line = lines[1]
local ok = Clipboard.paste_image(line)
if not ok then return overridden(lines, phase) end
-- After pasting, insert a new line and set cursor to this line
vim.api.nvim_buf_set_lines(bufnr, -1, -1, false, { "" })
local last_line = vim.api.nvim_buf_line_count(bufnr)
vim.api.nvim_win_set_cursor(0, { last_line, 0 })
end
end)(vim.paste)
end
---@param n string
---@param c vim.api.keyset.user_command.callback
---@param o vim.api.keyset.user_command.opts
local function cmd(n, c, o)
o = vim.tbl_extend("force", { nargs = 0 }, o or {})
api.nvim_create_user_command("Avante" .. n, c, o)
end
local function ask_complete(prefix, _, _)
local candidates = {} ---@type string[]
vim.list_extend(
candidates,
---@param x string
vim.tbl_map(function(x) return "position=" .. x end, { "left", "right", "top", "bottom" })
)
vim.list_extend(
candidates,
---@param x string
vim.tbl_map(function(x) return "project_root=" .. x.root end, P.list_projects())
)
return vim.tbl_filter(function(candidate) return vim.startswith(candidate, prefix) end, candidates)
end
cmd("Ask", function(opts)
---@type AskOptions
local args = { question = nil, win = {} }
local parsed_args, question = Utils.parse_args(opts.fargs, {
collect_remaining = true,
boolean_keys = { "ask" },
})
if parsed_args.position then args.win.position = parsed_args.position end
require("avante.api").ask(vim.tbl_deep_extend("force", args, {
ask = parsed_args.ask,
project_root = parsed_args.project_root,
question = question or nil,
}))
end, {
desc = "avante: ask AI for code suggestions",
nargs = "*",
complete = ask_complete,
})
cmd("Chat", function(opts)
local args = Utils.parse_args(opts.fargs)
args.ask = false
require("avante.api").ask(args)
end, {
desc = "avante: chat with the codebase",
nargs = "*",
complete = ask_complete,
})
cmd("ChatNew", function(opts)
local args = Utils.parse_args(opts.fargs)
args.ask = false
args.new_chat = true
require("avante.api").ask(args)
end, { desc = "avante: create new chat", nargs = "*", complete = ask_complete })
cmd("Toggle", function() require("avante").toggle() end, { desc = "avante: toggle AI panel" })
cmd("Build", function(opts)
local args = Utils.parse_args(opts.fargs)
if args.source == nil then args.source = false end
require("avante.api").build(args)
end, {
desc = "avante: build dependencies",
nargs = "*",
complete = function(_, _, _) return { "source=true", "source=false" } end,
})
cmd(
"Edit",
function(opts) require("avante.api").edit(vim.trim(opts.args), opts.line1, opts.line2) end,
{ desc = "avante: edit selected block", nargs = "*", range = 2 }
)
cmd("Refresh", function() require("avante.api").refresh() end, { desc = "avante: refresh windows" })
cmd("Focus", function() require("avante.api").focus() end, { desc = "avante: switch focus windows" })
cmd("SwitchProvider", function(opts) require("avante.api").switch_provider(vim.trim(opts.args or "")) end, {
nargs = 1,
desc = "avante: switch provider",
complete = function(_, line, _)
local prefix = line:match("AvanteSwitchProvider%s*(.*)$") or ""
local providers = vim.tbl_filter(
---@param key string
function(key) return key:find(prefix, 1, true) == 1 end,
vim.tbl_keys(Config.providers)
)
for acp_provider_name, _ in pairs(Config.acp_providers) do
if acp_provider_name:find(prefix, 1, true) == 1 then providers[#providers + 1] = acp_provider_name end
end
return providers
end,
})
cmd(
"SwitchSelectorProvider",
function(opts) require("avante.api").switch_selector_provider(vim.trim(opts.args or "")) end,
{
nargs = 1,
desc = "avante: switch selector provider",
}
)
cmd("SwitchInputProvider", function(opts) require("avante.api").switch_input_provider(vim.trim(opts.args or "")) end, {
nargs = 1,
desc = "avante: switch input provider",
complete = function(_, line, _)
local prefix = line:match("AvanteSwitchInputProvider%s*(.*)$") or ""
local providers = { "native", "dressing", "snacks" }
return vim.tbl_filter(function(key) return key:find(prefix, 1, true) == 1 end, providers)
end,
})
cmd("Clear", function(opts)
local arg = vim.trim(opts.args or "")
arg = arg == "" and "history" or arg
if arg == "history" then
local sidebar = require("avante").get()
if not sidebar then
Utils.error("No sidebar found")
return
end
sidebar:clear_history()
elseif arg == "cache" then
local history_path = P.history_path:absolute()
local cache_path = P.cache_path:absolute()
local prompt = string.format("Recursively delete %s and %s?", history_path, cache_path)
if vim.fn.confirm(prompt, "&Yes\n&No", 2) == 1 then P.clear() end
else
Utils.error("Invalid argument. Valid arguments: 'history', 'memory', 'cache'")
return
end
end, {
desc = "avante: clear history, memory or cache",
nargs = "?",
complete = function(_, _, _) return { "history", "cache" } end,
})
cmd("ShowRepoMap", function() require("avante.repo_map").show() end, { desc = "avante: show repo map" })
cmd("Models", function() require("avante.model_selector").open() end, { desc = "avante: show models" })
cmd("History", function() require("avante.api").select_history() end, { desc = "avante: show histories" })
cmd("Stop", function() require("avante.api").stop() end, { desc = "avante: stop current AI request" })
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/run.sh | Shell | #!/usr/bin/env bash
# Set the target directory (use the first argument or default to a local state directory)
TARGET_DIR=$1
if [ -z "$TARGET_DIR" ]; then
TARGET_DIR="$HOME/.local/state/avante-rag-service"
fi
# Create the target directory if it doesn't exist
mkdir -p "$TARGET_DIR"
# Copy the required files to the target directory
cp -r src/ "$TARGET_DIR"
cp requirements.txt "$TARGET_DIR"
cp shell.nix "$TARGET_DIR"
echo "Files have been copied to $TARGET_DIR"
# Change to the target directory
cd "$TARGET_DIR"
# Run the RAG service using nix-shell
# The environment variables (PORT, DATA_DIR, OPENAI_API_KEY, OPENAI_BASE_URL) are passed from the parent process
nix-shell
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/src/libs/configs.py | Python | import os
from pathlib import Path
# Configuration
BASE_DATA_DIR = Path(os.environ.get("DATA_DIR", "data"))
CHROMA_PERSIST_DIR = BASE_DATA_DIR / "chroma_db"
LOG_DIR = BASE_DATA_DIR / "logs"
DB_FILE = BASE_DATA_DIR / "sqlite" / "indexing_history.db"
# Configure directories
BASE_DATA_DIR.mkdir(parents=True, exist_ok=True)
LOG_DIR.mkdir(parents=True, exist_ok=True)
DB_FILE.parent.mkdir(parents=True, exist_ok=True) # Create sqlite directory
CHROMA_PERSIST_DIR.mkdir(parents=True, exist_ok=True)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/src/libs/db.py | Python | import sqlite3
from collections.abc import Generator
from contextlib import contextmanager
from libs.configs import DB_FILE
# SQLite table schemas
CREATE_TABLES_SQL = """
CREATE TABLE IF NOT EXISTS indexing_history (
id INTEGER PRIMARY KEY AUTOINCREMENT,
uri TEXT NOT NULL,
content_hash TEXT NOT NULL,
status TEXT NOT NULL,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
error_message TEXT,
document_id TEXT,
metadata TEXT
);
CREATE INDEX IF NOT EXISTS idx_uri ON indexing_history(uri);
CREATE INDEX IF NOT EXISTS idx_document_id ON indexing_history(document_id);
CREATE INDEX IF NOT EXISTS idx_content_hash ON indexing_history(content_hash);
CREATE TABLE IF NOT EXISTS resources (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
uri TEXT NOT NULL UNIQUE,
type TEXT NOT NULL, -- 'path' or 'https'
status TEXT NOT NULL DEFAULT 'active', -- 'active' or 'inactive'
indexing_status TEXT NOT NULL DEFAULT 'pending', -- 'pending', 'indexing', 'indexed', 'failed'
indexing_status_message TEXT,
indexing_started_at DATETIME,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
last_indexed_at DATETIME,
last_error TEXT
);
CREATE INDEX IF NOT EXISTS idx_resources_name ON resources(name);
CREATE INDEX IF NOT EXISTS idx_resources_uri ON resources(uri);
CREATE INDEX IF NOT EXISTS idx_resources_status ON resources(status);
CREATE INDEX IF NOT EXISTS idx_status ON indexing_history(status);
"""
@contextmanager
def get_db_connection() -> Generator[sqlite3.Connection, None, None]:
"""Get a database connection."""
conn = sqlite3.connect(DB_FILE)
conn.row_factory = sqlite3.Row
try:
yield conn
finally:
conn.close()
def init_db() -> None:
"""Initialize the SQLite database."""
with get_db_connection() as conn:
conn.executescript(CREATE_TABLES_SQL)
conn.commit()
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/src/libs/logger.py | Python | import logging
from datetime import datetime
from libs.configs import LOG_DIR
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
handlers=[
logging.FileHandler(
LOG_DIR / f"rag_service_{datetime.now().astimezone().strftime('%Y%m%d')}.log",
),
logging.StreamHandler(),
],
)
logger = logging.getLogger(__name__)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/src/libs/utils.py | Python | from __future__ import annotations
import re
from pathlib import Path
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from llama_index.core.schema import BaseNode
PATTERN_URI_PART = re.compile(r"(?P<uri>.+)__part_\d+")
METADATA_KEY_URI = "uri"
def uri_to_path(uri: str) -> Path:
"""Convert URI to path."""
return Path(uri.replace("file://", ""))
def path_to_uri(file_path: Path) -> str:
"""Convert path to URI."""
uri = file_path.as_uri()
if file_path.is_dir():
uri += "/"
return uri
def is_local_uri(uri: str) -> bool:
"""Check if the URI is a path URI."""
return uri.startswith("file://")
def is_remote_uri(uri: str) -> bool:
"""Check if the URI is an HTTPS URI or HTTP URI."""
return uri.startswith(("https://", "http://"))
def is_path_node(node: BaseNode) -> bool:
"""Check if the node is a file node."""
uri = get_node_uri(node)
if not uri:
return False
return is_local_uri(uri)
def get_node_uri(node: BaseNode) -> str | None:
"""Get URI from node metadata."""
uri = node.metadata.get(METADATA_KEY_URI)
if not uri:
doc_id = getattr(node, "doc_id", None)
if doc_id:
match = PATTERN_URI_PART.match(doc_id)
uri = match.group("uri") if match else doc_id
if uri:
if uri.startswith("/"):
uri = f"file://{uri}"
return uri
return None
def inject_uri_to_node(node: BaseNode) -> None:
"""Inject file path into node metadata."""
if METADATA_KEY_URI in node.metadata:
return
uri = get_node_uri(node)
if uri:
node.metadata[METADATA_KEY_URI] = uri
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/src/main.py | Python | """RAG Service API for managing document indexing and retrieval.""" # noqa: INP001
from __future__ import annotations
# Standard library imports
import asyncio
import fcntl
import json
import multiprocessing
import os
import re
import shutil
import subprocess
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from contextlib import asynccontextmanager
from pathlib import Path
from typing import TYPE_CHECKING
from urllib.parse import urljoin, urlparse
# Third-party imports
import chromadb
import httpx
import pathspec
from fastapi import BackgroundTasks, FastAPI, HTTPException
# Local application imports
from libs.configs import BASE_DATA_DIR, CHROMA_PERSIST_DIR
from libs.db import init_db
from libs.logger import logger
from libs.utils import (
get_node_uri,
inject_uri_to_node,
is_local_uri,
is_path_node,
is_remote_uri,
path_to_uri,
uri_to_path,
)
from llama_index.core import (
Settings,
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
from llama_index.core.node_parser import CodeSplitter
from llama_index.core.postprocessor import MetadataReplacementPostProcessor
from llama_index.core.schema import Document
from llama_index.vector_stores.chroma import ChromaVectorStore
from markdownify import markdownify as md
from models.resource import Resource
from providers.factory import initialize_embed_model, initialize_llm_model
from pydantic import BaseModel, Field
from services.indexing_history import indexing_history_service
from services.resource import resource_service
from tree_sitter_language_pack import SupportedLanguage, get_parser
from watchdog.events import FileSystemEvent, FileSystemEventHandler
from watchdog.observers import Observer
if TYPE_CHECKING:
from collections.abc import AsyncGenerator
from llama_index.core.schema import NodeWithScore, QueryBundle
from models.indexing_history import IndexingHistory
from watchdog.observers.api import BaseObserver
# Lock file for leader election
LOCK_FILE = BASE_DATA_DIR / "leader.lock"
def try_acquire_leadership() -> bool:
"""Try to acquire leadership using file lock."""
try:
# Ensure the lock file exists
LOCK_FILE.parent.mkdir(parents=True, exist_ok=True)
LOCK_FILE.touch(exist_ok=True)
# Try to acquire an exclusive lock
lock_fd = os.open(str(LOCK_FILE), os.O_RDWR)
fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
# Write current process ID to lock file
os.truncate(lock_fd, 0)
os.write(lock_fd, str(os.getpid()).encode())
return True
except OSError:
return False
@asynccontextmanager
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]: # noqa: ARG001
"""Initialize services on startup."""
# Try to become leader if no worker_id is set
is_leader = try_acquire_leadership()
# Only run initialization in the leader
if is_leader:
logger.info("Starting RAG service as leader (PID: %d)...", os.getpid())
# Get all active resources
active_resources = [r for r in resource_service.get_all_resources() if r.status == "active"]
logger.info("Found %d active resources to sync", len(active_resources))
for resource in active_resources:
try:
if is_local_uri(resource.uri):
directory = uri_to_path(resource.uri)
if not directory.exists():
error_msg = f"Directory not found: {directory}"
logger.error(error_msg)
resource_service.update_resource_status(resource.uri, "error", error_msg)
continue
# Start file system watcher
event_handler = FileSystemHandler(directory=directory)
observer = Observer()
observer.schedule(event_handler, str(directory), recursive=True)
observer.start()
watched_resources[resource.uri] = observer
# Start indexing
await index_local_resource_async(resource)
elif is_remote_uri(resource.uri):
if not is_remote_resource_exists(resource.uri):
error_msg = "HTTPS resource not found"
logger.error("%s: %s", error_msg, resource.uri)
resource_service.update_resource_status(resource.uri, "error", error_msg)
continue
# Start indexing
await index_remote_resource_async(resource)
logger.debug("Successfully synced resource: %s", resource.uri)
except (OSError, ValueError, RuntimeError) as e:
error_msg = f"Failed to sync resource {resource.uri}: {e}"
logger.exception(error_msg)
resource_service.update_resource_status(resource.uri, "error", error_msg)
yield
# Cleanup on shutdown (only in leader)
if is_leader:
for observer in watched_resources.values():
observer.stop()
observer.join()
app = FastAPI(
title="RAG Service API",
description="""
RAG (Retrieval-Augmented Generation) Service API for managing document indexing and retrieval.
## Features
* Add resources for document watching and indexing
* Remove watched resources
* Retrieve relevant information from indexed resources
* Monitor indexing status
""",
version="1.0.0",
docs_url="/docs",
lifespan=lifespan,
redoc_url="/redoc",
)
# Constants
SIMILARITY_THRESHOLD = 0.95
MAX_SAMPLE_SIZE = 100
BATCH_PROCESSING_DELAY = 1
# number of cpu cores to use for parallel processing
MAX_WORKERS = multiprocessing.cpu_count()
BATCH_SIZE = 40 # Number of documents to process per batch
logger.info("data dir: %s", BASE_DATA_DIR.resolve())
# Global variables
watched_resources: dict[str, BaseObserver] = {} # Directory path -> Observer instance mapping
file_last_modified: dict[Path, float] = {} # File path -> Last modified time mapping
index_lock = threading.Lock()
code_ext_map: dict[str, SupportedLanguage] = {
".py": "python",
".js": "javascript",
".ts": "typescript",
".jsx": "javascript",
".tsx": "typescript",
".vue": "vue",
".go": "go",
".java": "java",
".cpp": "cpp",
".c": "c",
".h": "cpp",
".rs": "rust",
".rb": "ruby",
".php": "php",
".scala": "scala",
".kt": "kotlin",
".swift": "swift",
".lua": "lua",
".pl": "perl",
".pm": "perl",
".t": "perl",
".pm6": "perl",
".m": "perl",
}
required_exts = [
".txt",
".pdf",
".docx",
".xlsx",
".pptx",
".rst",
".json",
".ini",
".conf",
".toml",
".md",
".markdown",
".csv",
".tsv",
".html",
".htm",
".xml",
".yaml",
".yml",
".css",
".scss",
".less",
".sass",
".styl",
".sh",
".bash",
".zsh",
".fish",
".rb",
".java",
".go",
".ts",
".tsx",
".js",
".jsx",
".vue",
".py",
".php",
".c",
".cpp",
".h",
".rs",
".swift",
".kt",
".lua",
".perl",
".pl",
".pm",
".t",
".pm6",
".m",
]
http_headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
}
def is_remote_resource_exists(url: str) -> bool:
"""Check if a URL exists."""
try:
response = httpx.head(url, headers=http_headers)
return response.status_code in {
httpx.codes.OK,
httpx.codes.MOVED_PERMANENTLY,
httpx.codes.FOUND,
}
except (OSError, ValueError, RuntimeError) as e:
logger.error("Error checking if URL exists %s: %s", url, e)
return False
def fetch_markdown(url: str) -> str:
"""Fetch markdown content from a URL."""
try:
logger.info("Fetching markdown content from %s", url)
response = httpx.get(url, headers=http_headers)
if response.status_code == httpx.codes.OK:
return md(response.text)
return ""
except (OSError, ValueError, RuntimeError) as e:
logger.error("Error fetching markdown content %s: %s", url, e)
return ""
def markdown_to_links(base_url: str, markdown: str) -> list[str]:
"""Extract links from markdown content."""
links = []
seek = {base_url}
parsed_url = urlparse(base_url)
domain = parsed_url.netloc
scheme = parsed_url.scheme
for match in re.finditer(r"\[(.*?)\]\((.*?)\)", markdown):
url = match.group(2)
if not url.startswith(scheme):
url = urljoin(base_url, url)
if urlparse(url).netloc != domain:
continue
if url in seek:
continue
seek.add(url)
links.append(url)
return links
# Initialize database
init_db()
# Initialize ChromaDB and LlamaIndex services
chroma_client = chromadb.PersistentClient(path=str(CHROMA_PERSIST_DIR))
# # Check if provider or model has changed
rag_embed_provider = os.getenv("RAG_EMBED_PROVIDER", "openai")
rag_embed_endpoint = os.getenv("RAG_EMBED_ENDPOINT", "https://api.openai.com/v1")
rag_embed_model = os.getenv("RAG_EMBED_MODEL", "text-embedding-3-large")
rag_embed_api_key = os.getenv("RAG_EMBED_API_KEY", None)
rag_embed_extra = os.getenv("RAG_EMBED_EXTRA", None)
rag_llm_provider = os.getenv("RAG_LLM_PROVIDER", "openai")
rag_llm_endpoint = os.getenv("RAG_LLM_ENDPOINT", "https://api.openai.com/v1")
rag_llm_model = os.getenv("RAG_LLM_MODEL", "gpt-4o-mini")
rag_llm_api_key = os.getenv("RAG_LLM_API_KEY", None)
rag_llm_extra = os.getenv("RAG_LLM_EXTRA", None)
# Try to read previous config
config_file = BASE_DATA_DIR / "rag_config.json"
if config_file.exists():
with Path.open(config_file, "r") as f:
prev_config = json.load(f)
if prev_config.get("provider") != rag_embed_provider or prev_config.get("embed_model") != rag_embed_model:
# Clear existing data if config changed
logger.info("Detected config change, clearing existing data...")
chroma_client.reset()
# Save current config
with Path.open(config_file, "w") as f:
json.dump({"provider": rag_embed_provider, "embed_model": rag_embed_model}, f)
chroma_collection = chroma_client.get_or_create_collection("documents") # pyright: ignore
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
try:
embed_extra = json.loads(rag_embed_extra) if rag_embed_extra is not None else {}
except json.JSONDecodeError:
logger.error("Failed to decode RAG_EMBED_EXTRA, defaulting to empty dict.")
embed_extra = {}
try:
llm_extra = json.loads(rag_llm_extra) if rag_llm_extra is not None else {}
except json.JSONDecodeError:
logger.error("Failed to decode RAG_LLM_EXTRA, defaulting to empty dict.")
llm_extra = {}
# Initialize embedding model and LLM based on provider using the factory
try:
embed_model = initialize_embed_model(
embed_provider=rag_embed_provider,
embed_model=rag_embed_model,
embed_endpoint=rag_embed_endpoint,
embed_api_key=rag_embed_api_key,
embed_extra=embed_extra,
)
logger.info("Embedding model initialized successfully.")
except (ValueError, RuntimeError) as e:
error_msg = f"Failed to initialize embedding model: {e}"
logger.error(error_msg, exc_info=True)
raise RuntimeError(error_msg) from e
try:
llm_model = initialize_llm_model(
llm_provider=rag_llm_provider,
llm_model=rag_llm_model,
llm_endpoint=rag_llm_endpoint,
llm_api_key=rag_llm_api_key,
llm_extra=llm_extra,
)
logger.info("LLM model initialized successfully.")
except (ValueError, RuntimeError) as e:
error_msg = f"Failed to initialize LLM model: {e}"
logger.error(error_msg, exc_info=True)
raise RuntimeError(error_msg) from e
Settings.embed_model = embed_model
Settings.llm = llm_model
try:
index = load_index_from_storage(storage_context)
except (OSError, ValueError) as e:
logger.error("Failed to load index from storage: %s", e)
index = VectorStoreIndex([], storage_context=storage_context)
class ResourceURIRequest(BaseModel):
"""Request model for resource operations."""
uri: str = Field(..., description="URI of the resource to watch and index")
class ResourceRequest(ResourceURIRequest):
"""Request model for resource operations."""
name: str = Field(..., description="Name of the resource to watch and index")
class SourceDocument(BaseModel):
"""Model for source document information."""
uri: str = Field(..., description="URI of the source")
content: str = Field(..., description="Content snippet from the document")
score: float | None = Field(None, description="Relevance score of the document")
class RetrieveRequest(BaseModel):
"""Request model for information retrieval."""
query: str = Field(
...,
description="The query text to search for in the indexed documents",
)
base_uri: str = Field(..., description="The base URI to search in")
top_k: int | None = Field(5, description="Number of top results to return", ge=1, le=20)
class RetrieveResponse(BaseModel):
"""Response model for information retrieval."""
response: str = Field(..., description="Generated response to the query")
sources: list[SourceDocument] = Field(..., description="List of source documents used")
class FileSystemHandler(FileSystemEventHandler):
"""Handler for file system events."""
def __init__(self: FileSystemHandler, directory: Path) -> None:
"""Initialize the handler."""
self.directory = directory
def on_modified(self: FileSystemHandler, event: FileSystemEvent) -> None:
"""Handle file modification events."""
if not event.is_directory and not str(event.src_path).endswith(".tmp"):
self.handle_file_change(Path(str(event.src_path)))
def on_created(self: FileSystemHandler, event: FileSystemEvent) -> None:
"""Handle file creation events."""
if not event.is_directory and not str(event.src_path).endswith(".tmp"):
self.handle_file_change(Path(str(event.src_path)))
def handle_file_change(self: FileSystemHandler, file_path: Path) -> None:
"""Handle changes to a file."""
current_time = time.time()
abs_file_path = file_path
if not Path(abs_file_path).is_absolute():
abs_file_path = Path(self.directory, file_path)
# Check if the file was recently processed
if abs_file_path in file_last_modified and current_time - file_last_modified[abs_file_path] < BATCH_PROCESSING_DELAY:
return
file_last_modified[abs_file_path] = current_time
threading.Thread(target=update_index_for_file, args=(self.directory, abs_file_path)).start()
def is_valid_text(text: str) -> bool:
"""Check if the text is valid and readable."""
if not text:
logger.debug("Text content is empty")
return False
# Check if the text mainly contains printable characters
printable_ratio = sum(1 for c in text if c.isprintable() or c in "\n\r\t") / len(text)
if printable_ratio <= SIMILARITY_THRESHOLD:
logger.debug("Printable character ratio too low: %.2f%%", printable_ratio * 100)
# Output a small sample for analysis
sample = text[:MAX_SAMPLE_SIZE] if len(text) > MAX_SAMPLE_SIZE else text
logger.debug("Text sample: %r", sample)
return printable_ratio > SIMILARITY_THRESHOLD
def clean_text(text: str) -> str:
"""Clean text content by removing non-printable characters."""
return "".join(char for char in text if char.isprintable() or char in "\n\r\t")
def process_document_batch(documents: list[Document]) -> bool: # noqa: PLR0915, C901, PLR0912, RUF100
"""Process a batch of documents for embedding."""
try:
# Filter out invalid and already processed documents
valid_documents = []
invalid_documents = []
for doc in documents:
doc_id = doc.doc_id
# Check if document with same hash has already been successfully processed
status_records = indexing_history_service.get_indexing_status(doc=doc)
if status_records and status_records[0].status == "completed":
logger.debug(
"Document with same hash already processed, skipping: %s",
doc.doc_id,
)
continue
logger.debug("Processing document: %s", doc.doc_id)
try:
content = doc.get_content()
# If content is bytes type, try to decode
if isinstance(content, bytes):
try:
content = content.decode("utf-8", errors="replace")
except (UnicodeDecodeError, OSError) as e:
error_msg = f"Unable to decode document content: {doc_id}, error: {e!s}"
logger.warning(error_msg)
indexing_history_service.update_indexing_status(doc, "failed", error_message=error_msg)
invalid_documents.append(doc_id)
continue
# Ensure content is string type
content = str(content)
if not is_valid_text(content):
error_msg = f"Invalid document content: {doc_id}"
logger.warning(error_msg)
indexing_history_service.update_indexing_status(doc, "failed", error_message=error_msg)
invalid_documents.append(doc_id)
continue
cleaned_content = clean_text(content)
metadata = getattr(doc, "metadata", {}).copy()
new_doc = Document(
text=cleaned_content,
doc_id=doc_id,
metadata=metadata,
)
inject_uri_to_node(new_doc)
valid_documents.append(new_doc)
# Update status to indexing for valid documents
indexing_history_service.update_indexing_status(doc, "indexing")
except OSError as e:
error_msg = f"Document processing failed: {doc_id}, error: {e!s}"
logger.exception(error_msg)
indexing_history_service.update_indexing_status(doc, "failed", error_message=error_msg)
invalid_documents.append(doc_id)
try:
if valid_documents:
with index_lock:
index.refresh_ref_docs(valid_documents)
# Update status to completed for successfully processed documents
for doc in valid_documents:
indexing_history_service.update_indexing_status(
doc,
"completed",
metadata=doc.metadata,
)
return not invalid_documents
except OSError as e:
error_msg = f"Batch indexing failed: {e!s}"
logger.exception(error_msg)
# Update status to failed for all documents in the batch
for doc in valid_documents:
indexing_history_service.update_indexing_status(doc, "failed", error_message=error_msg)
return False
except OSError as e:
error_msg = f"Batch processing failed: {e!s}"
logger.exception(error_msg)
# Update status to failed for all documents in the batch
for doc in documents:
indexing_history_service.update_indexing_status(doc, "failed", error_message=error_msg)
return False
def get_gitignore_files(directory: Path) -> list[str]:
"""Get patterns from .gitignore file."""
patterns = []
# Always include .git/ if it exists
if (directory / ".git").is_dir():
patterns.append(".git/")
# Check for .gitignore
gitignore_path = directory / ".gitignore"
if gitignore_path.exists():
with gitignore_path.open("r", encoding="utf-8") as f:
patterns.extend(f.readlines())
return patterns
def get_gitcrypt_files(directory: Path) -> list[str]:
"""Get patterns of git-crypt encrypted files using git command."""
git_crypt_patterns = []
git_executable = shutil.which("git")
if not git_executable:
logger.warning("git command not found, git-crypt files will not be excluded")
return git_crypt_patterns
try:
# Find git root directory
git_root_cmd = subprocess.run(
[git_executable, "-C", str(directory), "rev-parse", "--show-toplevel"],
capture_output=True,
text=True,
check=False,
)
if git_root_cmd.returncode != 0:
logger.warning(
"Not a git repository or git command failed: %s",
git_root_cmd.stderr.strip(),
)
return git_crypt_patterns
git_root = Path(git_root_cmd.stdout.strip())
# Get relative path from git root to our directory
rel_path = directory.relative_to(git_root) if directory != git_root else Path()
# Execute git commands separately and pipe the results
git_ls_files = subprocess.run(
[git_executable, "-C", str(git_root), "ls-files", "-z"],
capture_output=True,
text=False,
check=False,
)
if git_ls_files.returncode != 0:
return git_crypt_patterns
# Use Python to process the output instead of xargs, grep, and cut
git_check_attr = subprocess.run(
[
git_executable,
"-C",
str(git_root),
"check-attr",
"filter",
"--stdin",
"-z",
],
input=git_ls_files.stdout,
capture_output=True,
text=False,
check=False,
)
if git_check_attr.returncode != 0:
return git_crypt_patterns
# Process the output in Python to find git-crypt files
output = git_check_attr.stdout.decode("utf-8")
lines = output.split("\0")
for i in range(0, len(lines) - 2, 3):
if i + 2 < len(lines) and lines[i + 2] == "git-crypt":
file_path = lines[i]
# Only include files that are in our directory or subdirectories
file_path_obj = Path(file_path)
if str(rel_path) == "." or file_path_obj.is_relative_to(rel_path):
git_crypt_patterns.append(file_path)
# Log if git-crypt patterns were found
if git_crypt_patterns:
logger.debug("Excluding git-crypt encrypted files: %s", git_crypt_patterns)
except (subprocess.SubprocessError, OSError) as e:
logger.warning("Error getting git-crypt files: %s", str(e))
return git_crypt_patterns
def get_pathspec(directory: Path) -> pathspec.PathSpec | None:
"""Get pathspec for the directory."""
# Collect patterns from both sources
patterns = get_gitignore_files(directory)
patterns.extend(get_gitcrypt_files(directory))
return pathspec.GitIgnoreSpec.from_lines(patterns)
def scan_directory(directory: Path) -> list[str]:
"""Scan directory and return a list of matched files."""
spec = get_pathspec(directory)
binary_extensions = [
# Images
".png",
".jpg",
".jpeg",
".gif",
".bmp",
".ico",
".webp",
".tiff",
".exr",
".hdr",
".svg",
".psd",
".ai",
".eps",
# Audio/Video
".mp3",
".wav",
".mp4",
".avi",
".mov",
".webm",
".flac",
".ogg",
".m4a",
".aac",
".wma",
".flv",
".mkv",
".wmv",
# Documents
".pdf",
".doc",
".docx",
".xls",
".xlsx",
".ppt",
".pptx",
".odt",
# Archives
".zip",
".tar",
".gz",
".7z",
".rar",
".iso",
".dmg",
".pkg",
".deb",
".rpm",
".msi",
".apk",
".xz",
".bz2",
# Compiled
".exe",
".dll",
".so",
".dylib",
".class",
".pyc",
".o",
".obj",
".lib",
".a",
".out",
".app",
".apk",
".jar",
# Fonts
".ttf",
".otf",
".woff",
".woff2",
".eot",
# Other binary
".bin",
".dat",
".db",
".sqlite",
".db",
".DS_Store",
]
matched_files = []
for root, _, files in os.walk(directory):
file_paths = [str(Path(root) / file) for file in files]
for file in file_paths:
file_ext = Path(file).suffix.lower()
if file_ext in binary_extensions:
logger.debug("Skipping binary file: %s", file)
continue
if spec and spec.match_file(os.path.relpath(file, directory)):
logger.debug("Ignoring file: %s", file)
else:
matched_files.append(file)
return matched_files
def update_index_for_file(directory: Path, abs_file_path: Path) -> None:
"""Update the index for a single file."""
logger.debug("Starting to index file: %s", abs_file_path)
if not abs_file_path.is_file():
logger.debug("File does not exist or is not a file, skipping: %s", abs_file_path)
return
rel_file_path = abs_file_path.relative_to(directory)
spec = get_pathspec(directory)
if spec and spec.match_file(rel_file_path):
logger.debug("File is ignored, skipping: %s", abs_file_path)
return
resource = resource_service.get_resource(path_to_uri(directory))
if not resource:
logger.error("Resource not found for directory: %s", directory)
return
resource_service.update_resource_indexing_status(resource.uri, "indexing", "")
documents = SimpleDirectoryReader(
input_files=[abs_file_path],
filename_as_id=True,
required_exts=required_exts,
).load_data()
logger.debug("Updating index: %s", abs_file_path)
processed_documents = split_documents(documents)
success = process_document_batch(processed_documents)
if success:
resource_service.update_resource_indexing_status(resource.uri, "indexed", "")
logger.debug("File indexing completed: %s", abs_file_path)
else:
resource_service.update_resource_indexing_status(resource.uri, "failed", "unknown error")
logger.error("File indexing failed: %s", abs_file_path)
def split_documents(documents: list[Document]) -> list[Document]:
"""Split documents into code and non-code documents."""
# Create file parser configuration
# Initialize CodeSplitter
# Split code documents using CodeSplitter
processed_documents = []
for doc in documents:
uri = get_node_uri(doc)
if not uri:
continue
if not is_path_node(doc):
processed_documents.append(doc)
continue
file_path = uri_to_path(uri)
file_ext = file_path.suffix.lower()
if file_ext in code_ext_map:
# Apply CodeSplitter to code files
language = code_ext_map.get(file_ext, "python")
parser = get_parser(language)
code_splitter = CodeSplitter(
language=language, # Default is python, will auto-detect based on file extension
chunk_lines=80, # Maximum number of lines per code block
chunk_lines_overlap=15, # Number of overlapping lines to maintain context
max_chars=1500, # Maximum number of characters per block
parser=parser,
)
try:
t = doc.get_content()
texts = code_splitter.split_text(t)
except ValueError as e:
logger.error(
"Error splitting document: %s, so skipping split, error: %s",
doc.doc_id,
str(e),
)
processed_documents.append(doc)
continue
for i, text in enumerate(texts):
new_doc = Document(
text=text,
doc_id=f"{doc.doc_id}__part_{i}",
metadata={
**doc.metadata,
"chunk_number": i,
"total_chunks": len(texts),
"language": code_splitter.language,
"orig_doc_id": doc.doc_id,
},
)
processed_documents.append(new_doc)
else:
doc.metadata["orig_doc_id"] = doc.doc_id
# Add non-code files directly
processed_documents.append(doc)
return processed_documents
async def index_remote_resource_async(resource: Resource) -> None:
"""Asynchronously index a remote resource."""
resource_service.update_resource_indexing_status(resource.uri, "indexing", "")
url = resource.uri
try:
logger.debug("Loading resource content: %s", url)
# Fetch markdown content
markdown = fetch_markdown(url)
link_md_pairs = [(url, markdown)]
# Extract links from markdown
links = markdown_to_links(url, markdown)
logger.debug("Found %d sub links", len(links))
logger.debug("Link list: %s", links)
# Use thread pool for parallel batch processing
loop = asyncio.get_event_loop()
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
mds: list[str] = await loop.run_in_executor(
executor,
lambda: list(executor.map(fetch_markdown, links)),
)
zipped = zip(links, mds, strict=True) # pyright: ignore
link_md_pairs.extend(zipped)
# Create documents from links
documents = [Document(text=markdown, doc_id=link) for link, markdown in link_md_pairs]
logger.debug("Found %d documents", len(documents))
logger.debug("Document list: %s", [doc.doc_id for doc in documents])
# Process documents in batches
total_documents = len(documents)
batches = [documents[i : i + BATCH_SIZE] for i in range(0, total_documents, BATCH_SIZE)]
logger.debug("Splitting documents into %d batches for processing", len(batches))
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
results = await loop.run_in_executor(
executor,
lambda: list(executor.map(process_document_batch, batches)),
)
# Check processing results
if all(results):
logger.debug("Resource %s indexing completed", url)
resource_service.update_resource_indexing_status(resource.uri, "indexed", "")
else:
failed_batches = len([r for r in results if not r])
error_msg = f"Some batches failed processing ({failed_batches}/{len(batches)})"
logger.error(error_msg)
resource_service.update_resource_indexing_status(resource.uri, "indexed", error_msg)
except OSError as e:
error_msg = f"Resource indexing failed: {url}"
logger.exception(error_msg)
resource_service.update_resource_indexing_status(resource.uri, "failed", error_msg)
raise e # noqa: TRY201
async def index_local_resource_async(resource: Resource) -> None:
"""Asynchronously index a directory."""
resource_service.update_resource_indexing_status(resource.uri, "indexing", "")
directory_path = uri_to_path(resource.uri)
try:
logger.info("Loading directory content: %s", directory_path)
documents = SimpleDirectoryReader(
input_files=scan_directory(directory_path),
filename_as_id=True,
required_exts=required_exts,
).load_data()
processed_documents = split_documents(documents)
logger.info("Found %d documents", len(processed_documents))
logger.debug("Document list: %s", [doc.doc_id for doc in processed_documents])
# Process documents in batches
total_documents = len(processed_documents)
batches = [processed_documents[i : i + BATCH_SIZE] for i in range(0, total_documents, BATCH_SIZE)]
logger.info("Splitting documents into %d batches for processing", len(batches))
# Use thread pool for parallel batch processing
loop = asyncio.get_event_loop()
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
results = await loop.run_in_executor(
executor,
lambda: list(executor.map(process_document_batch, batches)),
)
# Check processing results
if all(results):
logger.info("Directory %s indexing completed", directory_path)
resource_service.update_resource_indexing_status(resource.uri, "indexed", "")
else:
failed_batches = len([r for r in results if not r])
error_msg = f"Some batches failed processing ({failed_batches}/{len(batches)})"
resource_service.update_resource_indexing_status(resource.uri, "indexed", error_msg)
logger.error(error_msg)
except OSError as e:
error_msg = f"Directory indexing failed: {directory_path}"
resource_service.update_resource_indexing_status(resource.uri, "failed", error_msg)
logger.exception(error_msg)
raise e # noqa: TRY201
@app.get("/api/v1/readyz")
async def readiness_probe() -> dict[str, str]:
"""Readiness probe endpoint."""
return {"status": "ok"}
@app.post(
"/api/v1/add_resource",
response_model="dict[str, str]",
summary="Add a resource for watching and indexing",
description="""
Adds a resource to the watch list and starts indexing all existing documents in it asynchronously.
""",
responses={
200: {"description": "Resource successfully added and indexing started"},
404: {"description": "Resource not found"},
400: {"description": "Resource already being watched"},
},
)
async def add_resource(request: ResourceRequest, background_tasks: BackgroundTasks): # noqa: D103, ANN201, C901
# Check if resource already exists
resource = resource_service.get_resource(request.uri)
if resource and resource.status == "active":
return {
"status": "success",
"message": f"Resource {request.uri} added and indexing started in background",
}
resource_type = "local"
async def background_task(resource: Resource) -> None:
pass
if is_local_uri(request.uri):
directory = uri_to_path(request.uri)
if not directory.exists():
raise HTTPException(status_code=404, detail=f"Directory not found: {directory}")
if not directory.is_dir():
raise HTTPException(status_code=400, detail=f"{directory} is not a directory")
git_directory = directory / ".git"
if not git_directory.exists() or not git_directory.is_dir():
raise HTTPException(status_code=400, detail=f"{git_directory} ia not a git repository")
# Create observer
event_handler = FileSystemHandler(directory=directory)
observer = Observer()
observer.schedule(event_handler, str(directory), recursive=True)
observer.start()
watched_resources[request.uri] = observer
background_task = index_local_resource_async
elif is_remote_uri(request.uri):
if not is_remote_resource_exists(request.uri):
raise HTTPException(status_code=404, detail="web resource not found")
resource_type = "remote"
background_task = index_remote_resource_async
else:
raise HTTPException(status_code=400, detail=f"Invalid URI: {request.uri}")
if resource:
if resource.name != request.name:
raise HTTPException(
status_code=400,
detail=f"Resource name cannot be changed: {resource.name}",
)
resource_service.update_resource_status(resource.uri, "active")
else:
exists_resource = resource_service.get_resource_by_name(request.name)
if exists_resource:
raise HTTPException(status_code=400, detail="Resource with same name already exists")
# Add to database
resource = Resource(
id=None,
name=request.name,
uri=request.uri,
type=resource_type,
status="active",
indexing_status="pending",
indexing_status_message=None,
indexing_started_at=None,
last_indexed_at=None,
last_error=None,
)
resource_service.add_resource_to_db(resource)
background_tasks.add_task(background_task, resource)
return {
"status": "success",
"message": f"Resource {request.uri} added and indexing started in background",
}
@app.post(
"/api/v1/remove_resource",
response_model="dict[str, str]",
summary="Remove a watched resource",
description="Stops watching and indexing the specified resource",
responses={
200: {"description": "Resource successfully removed from watch list"},
404: {"description": "Resource not found in watch list"},
},
)
async def remove_resource(request: ResourceURIRequest): # noqa: D103, ANN201
resource = resource_service.get_resource(request.uri)
if not resource or resource.status != "active":
raise HTTPException(status_code=404, detail="Resource not being watched")
if request.uri in watched_resources:
# Stop watching
observer = watched_resources[request.uri]
observer.stop()
observer.join()
del watched_resources[request.uri]
# Update database status
resource_service.update_resource_status(request.uri, "inactive")
return {"status": "success", "message": f"Resource {request.uri} removed"}
@app.post(
"/api/v1/retrieve",
response_model=RetrieveResponse,
summary="Retrieve information from indexed documents",
description="""
Performs a semantic search over all indexed documents and returns relevant information.
The response includes both the answer and the source documents used to generate it.
""",
responses={
200: {"description": "Successfully retrieved information"},
500: {"description": "Internal server error during retrieval"},
},
)
async def retrieve(request: RetrieveRequest): # noqa: D103, ANN201, C901, PLR0915
if is_local_uri(request.base_uri):
directory = uri_to_path(request.base_uri)
# Validate directory exists
if not directory.exists():
raise HTTPException(status_code=404, detail=f"Directory not found: {request.base_uri}")
logger.info(
"Received retrieval request: %s for base uri: %s",
request.query,
request.base_uri,
)
cached_file_contents = {}
# Create a filter function to only include documents from the specified directory
def filter_documents(node: NodeWithScore) -> bool:
uri = get_node_uri(node.node)
if not uri:
return False
if is_path_node(node.node):
file_path = uri_to_path(uri)
# Check if the file path starts with the specified directory
file_path = file_path.resolve()
directory = uri_to_path(request.base_uri).resolve()
# Check if directory is a parent of file_path
try:
file_path.relative_to(directory)
if not file_path.exists():
logger.warning("File not found: %s", file_path)
return False
content = cached_file_contents.get(file_path)
if content is None:
with file_path.open("r", encoding="utf-8") as f:
content = f.read()
cached_file_contents[file_path] = content
if node.node.get_content() not in content:
logger.warning("File content does not match: %s", file_path)
return False
return True
except ValueError:
return False
if uri == request.base_uri:
return True
base_uri = request.base_uri
if not base_uri.endswith(os.path.sep):
base_uri += os.path.sep
return uri.startswith(base_uri)
# Create a custom post processor
class ResourceFilterPostProcessor(MetadataReplacementPostProcessor):
"""Post-processor for filtering nodes based on directory."""
def __init__(self: ResourceFilterPostProcessor) -> None:
"""Initialize the post-processor."""
super().__init__(target_metadata_key="filtered")
def postprocess_nodes(
self: ResourceFilterPostProcessor,
nodes: list[NodeWithScore],
query_bundle: QueryBundle | None = None, # noqa: ARG002, pyright: ignore
query_str: str | None = None, # noqa: ARG002, pyright: ignore
) -> list[NodeWithScore]:
"""
Filter nodes based on directory path.
Args:
----
nodes: The nodes to process
query_bundle: Optional query bundle for the query
query_str: Optional query string
Returns:
-------
List of filtered nodes
"""
return [node for node in nodes if filter_documents(node)]
# Create query engine with the filter
query_engine = index.as_query_engine(
node_postprocessors=[ResourceFilterPostProcessor()],
)
logger.info("Executing retrieval query")
response = query_engine.query(request.query)
# If no documents were found in the specified directory
if not response.source_nodes:
raise HTTPException(
status_code=404,
detail=f"No relevant documents found in uri: {request.base_uri}",
)
# Process source documents, ensure readable text
sources = []
for node in response.source_nodes[: request.top_k]:
try:
content = node.node.get_content()
uri = get_node_uri(node.node)
# Handle byte-type content
if isinstance(content, bytes):
try:
content = content.decode("utf-8", errors="replace")
except UnicodeDecodeError as e:
logger.warning(
"Unable to decode document content: %s, error: %s",
uri,
str(e),
)
continue
# Validate and clean text
if is_valid_text(str(content)):
cleaned_content = clean_text(str(content))
# Add document source information with file path
doc_info = {
"uri": uri,
"content": cleaned_content,
"score": float(node.score) if node.score is not None else None,
}
sources.append(doc_info)
else:
logger.warning("Skipping invalid document content: %s", uri)
except (OSError, UnicodeDecodeError, json.JSONDecodeError):
logger.warning("Error processing source document", exc_info=True)
continue
logger.info("Retrieval completed, found %d relevant documents", len(sources))
# Process response text similarly
response_text = str(response)
response_text = "".join(char for char in response_text if char.isprintable() or char in "\n\r\t")
return {
"response": response_text,
"sources": sources,
}
class IndexingStatusRequest(BaseModel):
"""Request model for indexing status."""
uri: str = Field(..., description="URI of the resource to get indexing status for")
class IndexingStatusResponse(BaseModel):
"""Model for indexing status response."""
uri: str = Field(..., description="URI of the resource being monitored")
is_watched: bool = Field(..., description="Whether the directory is currently being watched")
files: list[IndexingHistory] = Field(..., description="List of files and their indexing status")
total_files: int = Field(..., description="Total number of files processed in this directory")
status_summary: dict[str, int] = Field(
...,
description="Summary of indexing statuses (count by status)",
)
@app.post(
"/api/v1/indexing-status",
response_model=IndexingStatusResponse,
summary="Get indexing status for a resource",
description="""
Returns the current indexing status for all files in the specified resource, including:
* Whether the resource is being watched
* Status of each files in the resource
""",
responses={
200: {"description": "Successfully retrieved indexing status"},
404: {"description": "Resource not found"},
},
)
async def get_indexing_status_for_resource(request: IndexingStatusRequest): # noqa: D103, ANN201
resource_files = []
status_counts = {}
if is_local_uri(request.uri):
directory = uri_to_path(request.uri).resolve()
if not directory.exists():
raise HTTPException(status_code=404, detail=f"Directory not found: {directory}")
# Get indexing history records for the specific directory
resource_files = indexing_history_service.get_indexing_status(base_uri=request.uri)
logger.info("Found %d files in resource %s", len(resource_files), request.uri)
for file in resource_files:
logger.debug("File status: %s - %s", file.uri, file.status)
# Count files by status
for file in resource_files:
status_counts[file.status] = status_counts.get(file.status, 0) + 1
return IndexingStatusResponse(
uri=request.uri,
is_watched=request.uri in watched_resources,
files=resource_files,
total_files=len(resource_files),
status_summary=status_counts,
)
class ResourceListResponse(BaseModel):
"""Response model for listing resources."""
resources: list[Resource] = Field(..., description="List of all resources")
total_count: int = Field(..., description="Total number of resources")
status_summary: dict[str, int] = Field(
...,
description="Summary of resource statuses (count by status)",
)
@app.get(
"/api/v1/resources",
response_model=ResourceListResponse,
summary="List all resources",
description="""
Returns a list of all resources that have been added to the system, including:
* Resource URI
* Resource type (path/https)
* Current status
* Last indexed timestamp
* Any errors
""",
responses={
200: {"description": "Successfully retrieved resource list"},
},
)
async def list_resources() -> ResourceListResponse:
"""Get all resources and their current status."""
# Get all resources from database
resources = resource_service.get_all_resources()
# Count resources by status
status_counts = {}
for resource in resources:
status_counts[resource.status] = status_counts.get(resource.status, 0) + 1
return ResourceListResponse(
resources=resources,
total_count=len(resources),
status_summary=status_counts,
)
@app.get("/api/health")
async def health_check() -> dict[str, str]:
"""Health check endpoint."""
return {"status": "ok"}
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/src/models/indexing_history.py | Python | """Indexing History Model."""
from datetime import datetime
from typing import Any
from pydantic import BaseModel, Field
class IndexingHistory(BaseModel):
"""Model for indexing history record."""
id: int | None = Field(None, description="Record ID")
uri: str = Field(..., description="URI of the indexed file")
content_hash: str = Field(..., description="MD5 hash of the file content")
status: str = Field(..., description="Indexing status (indexing/completed/failed)")
timestamp: datetime = Field(default_factory=datetime.now, description="Record timestamp")
error_message: str | None = Field(None, description="Error message if failed")
document_id: str | None = Field(None, description="Document ID in the index")
metadata: dict[str, Any] | None = Field(None, description="Additional metadata")
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/src/models/resource.py | Python | """Resource Model."""
from datetime import datetime
from typing import Literal
from pydantic import BaseModel, Field
class Resource(BaseModel):
"""Model for resource record."""
id: int | None = Field(None, description="Resource ID")
name: str = Field(..., description="Name of the resource")
uri: str = Field(..., description="URI of the resource")
type: Literal["local", "remote"] = Field(..., description="Type of resource (path/https)")
status: str = Field("active", description="Status of resource (active/inactive)")
indexing_status: Literal["pending", "indexing", "indexed", "failed"] = Field(
"pending",
description="Indexing status (pending/indexing/indexed/failed)",
)
indexing_status_message: str | None = Field(None, description="Indexing status message")
created_at: datetime = Field(default_factory=datetime.now, description="Creation timestamp")
indexing_started_at: datetime | None = Field(None, description="Indexing start timestamp")
last_indexed_at: datetime | None = Field(None, description="Last indexing timestamp")
last_error: str | None = Field(None, description="Last error message if any")
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/src/providers/dashscope.py | Python | # src/providers/dashscope.py
from typing import Any
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.llms.llm import LLM
from llama_index.embeddings.dashscope import DashScopeEmbedding
from llama_index.llms.dashscope import DashScope
def initialize_embed_model(
embed_endpoint: str, # noqa: ARG001
embed_api_key: str,
embed_model: str,
**embed_extra: Any, # noqa: ANN401
) -> BaseEmbedding:
"""
Create DashScope embedding model.
Args:
embed_endpoint: Not be used directly by the constructor.
embed_api_key: The API key for the DashScope API.
embed_model: The name of the embedding model.
embed_extra: Extra parameters of the embedding model.
Returns:
The initialized embed_model.
"""
# DashScope typically uses the API key and model name.
# The endpoint might be set via environment variables or default.
# We pass embed_api_key and embed_model to the constructor.
# We include embed_endpoint in the signature to match the factory interface,
# but it might not be directly used by the constructor depending on LlamaIndex's implementation.
return DashScopeEmbedding(
model_name=embed_model,
api_key=embed_api_key,
**embed_extra,
)
def initialize_llm_model(
llm_endpoint: str, # noqa: ARG001
llm_api_key: str,
llm_model: str,
**llm_extra: Any, # noqa: ANN401
) -> LLM:
"""
Create DashScope LLM model.
Args:
llm_endpoint: Not be used directly by the constructor.
llm_api_key: The API key for the DashScope API.
llm_model: The name of the LLM model.
llm_extra: Extra parameters of the LLM model.
Returns:
The initialized llm_model.
"""
# DashScope typically uses the API key and model name.
# The endpoint might be set via environment variables or default.
# We pass llm_api_key and llm_model to the constructor.
# We include llm_endpoint in the signature to match the factory interface,
# but it might not be directly used by the constructor depending on LlamaIndex's implementation.
return DashScope(
model_name=llm_model,
api_key=llm_api_key,
**llm_extra,
)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/src/providers/factory.py | Python | import importlib
from typing import TYPE_CHECKING, Any, cast
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.llms.llm import LLM
if TYPE_CHECKING:
from collections.abc import Callable
from libs.logger import logger # Assuming libs.logger exists and provides a logger instance
def initialize_embed_model(
embed_provider: str,
embed_model: str,
embed_endpoint: str | None = None,
embed_api_key: str | None = None,
embed_extra: dict[str, Any] | None = None,
) -> BaseEmbedding:
"""
Initialize embedding model based on specified provider and configuration.
Dynamically loads the provider module based on the embed_provider parameter.
Args:
embed_provider: The name of the embedding provider (e.g., "openai", "ollama").
embed_model: The name of the embedding model.
embed_endpoint: The API endpoint for the embedding provider.
embed_api_key: The API key for the embedding provider.
embed_extra: Additional provider-specific configuration parameters.
Returns:
The initialized embed_model.
Raises:
ValueError: If the specified embed_provider is not supported or module/function not found.
RuntimeError: If model initialization fails for the selected provider.
"""
# Validate provider name
error_msg = f"Invalid EMBED_PROVIDER specified: '{embed_provider}'. Provider name must be alphanumeric or contain underscores."
if not embed_provider.replace("_", "").isalnum():
raise ValueError(error_msg)
try:
provider_module = importlib.import_module(f".{embed_provider}", package="providers")
logger.debug(f"Successfully imported provider module: providers.{embed_provider}")
attribute = getattr(provider_module, "initialize_embed_model", None)
if attribute is None:
error_msg = f"Provider module '{embed_provider}' does not have an 'initialize_embed_model' function."
raise ValueError(error_msg) # noqa: TRY301
initializer = cast("Callable[..., BaseEmbedding]", attribute)
except ImportError as err:
error_msg = f"Unsupported EMBED_PROVIDER specified: '{embed_provider}'. Could not find provider module 'providers.{embed_provider}"
raise ValueError(error_msg) from err
except AttributeError as err:
error_msg = f"Provider module '{embed_provider}' does not have an 'initialize_embed_model' function."
raise ValueError(error_msg) from err
except Exception as err:
logger.error(
f"An unexpected error occurred while loading provider '{embed_provider}': {err!r}",
exc_info=True,
)
error_msg = f"Failed to load provider '{embed_provider}' due to an unexpected error."
raise RuntimeError(error_msg) from err
logger.info(f"Initializing embedding model for provider: {embed_provider}")
try:
embedding: BaseEmbedding = initializer(
embed_endpoint,
embed_api_key,
embed_model,
**(embed_extra or {}),
)
logger.info(f"Embedding model initialized successfully for {embed_provider}")
return embedding
except TypeError as err:
error_msg = f"Provider initializer 'initialize_embed_model' was called with incorrect arguments in '{embed_provider}'"
logger.error(
f"{error_msg}: {err!r}",
exc_info=True,
)
raise RuntimeError(error_msg) from err
except Exception as err:
error_msg = f"Failed to initialize embedding model for provider '{embed_provider}'"
logger.error(
f"{error_msg}: {err!r}",
exc_info=True,
)
raise RuntimeError(error_msg) from err
def initialize_llm_model(
llm_provider: str,
llm_model: str,
llm_endpoint: str | None = None,
llm_api_key: str | None = None,
llm_extra: dict[str, Any] | None = None,
) -> LLM:
"""
Create LLM model with the specified configuration.
Dynamically loads the provider module based on the llm_provider parameter.
Args:
llm_provider: The name of the LLM provider (e.g., "openai", "ollama").
llm_endpoint: The API endpoint for the LLM provider.
llm_api_key: The API key for the LLM provider.
llm_model: The name of the LLM model.
llm_extra: The name of the LLM model.
Returns:
The initialized llm_model.
Raises:
ValueError: If the specified llm_provider is not supported or module/function not found.
RuntimeError: If model initialization fails for the selected provider.
"""
if not llm_provider.replace("_", "").isalnum():
error_msg = f"Invalid LLM_PROVIDER specified: '{llm_provider}'. Provider name must be alphanumeric or contain underscores."
raise ValueError(error_msg)
try:
provider_module = importlib.import_module(
f".{llm_provider}",
package="providers",
)
logger.debug(f"Successfully imported provider module: providers.{llm_provider}")
attribute = getattr(provider_module, "initialize_llm_model", None)
if attribute is None:
error_msg = f"Provider module '{llm_provider}' does not have an 'initialize_llm_model' function."
raise ValueError(error_msg) # noqa: TRY301
initializer = cast("Callable[..., LLM]", attribute)
except ImportError as err:
error_msg = f"Unsupported LLM_PROVIDER specified: '{llm_provider}'. Could not find provider module 'providers.{llm_provider}'."
raise ValueError(error_msg) from err
except AttributeError as err:
error_msg = f"Provider module '{llm_provider}' does not have an 'initialize_llm_model' function."
raise ValueError(error_msg) from err
except Exception as e:
error_msg = f"An unexpected error occurred while loading provider '{llm_provider}': {e}"
logger.error(error_msg, exc_info=True)
raise RuntimeError(error_msg) from e
logger.info(f"Initializing LLM model for provider: '{llm_provider}'")
logger.debug(f"Args: llm_model='{llm_model}', llm_endpoint='{llm_endpoint}'")
try:
llm: LLM = initializer(
llm_endpoint,
llm_api_key,
llm_model,
**(llm_extra or {}),
)
logger.info(f"LLM model initialized successfully for '{llm_provider}'.")
except TypeError as e:
error_msg = f"Provider initializer 'initialize_llm_model' in '{llm_provider}' was called with incorrect arguments: {e}"
logger.error(error_msg, exc_info=True)
raise RuntimeError(error_msg) from e
except Exception as e:
error_msg = f"Failed to initialize LLM model for provider '{llm_provider}': {e}"
logger.error(
error_msg,
exc_info=True,
)
raise RuntimeError(error_msg) from e
return llm
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/src/providers/ollama.py | Python | # src/providers/ollama.py
from typing import Any
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.llms.llm import LLM
from llama_index.embeddings.ollama import OllamaEmbedding
from llama_index.llms.ollama import Ollama
def initialize_embed_model(
embed_endpoint: str,
embed_api_key: str, # noqa: ARG001
embed_model: str,
**embed_extra: Any, # noqa: ANN401
) -> BaseEmbedding:
"""
Create Ollama embedding model.
Args:
embed_endpoint: The API endpoint for the Ollama API.
embed_api_key: Not be used by Ollama.
embed_model: The name of the embedding model.
embed_extra: Extra parameters for Ollama embedding model.
Returns:
The initialized embed_model.
"""
# Ollama typically uses the endpoint directly and may not require an API key
# We include embed_api_key in the signature to match the factory interface
# Pass embed_api_key even if Ollama doesn't use it, to match the signature
return OllamaEmbedding(
model_name=embed_model,
base_url=embed_endpoint,
**embed_extra,
)
def initialize_llm_model(
llm_endpoint: str,
llm_api_key: str, # noqa: ARG001
llm_model: str,
**llm_extra: Any, # noqa: ANN401
) -> LLM:
"""
Create Ollama LLM model.
Args:
llm_endpoint: The API endpoint for the Ollama API.
llm_api_key: Not be used by Ollama.
llm_model: The name of the LLM model.
llm_extra: Extra parameters for LLM model.
Returns:
The initialized llm_model.
"""
# Ollama typically uses the endpoint directly and may not require an API key
# We include llm_api_key in the signature to match the factory interface
# Pass llm_api_key even if Ollama doesn't use it, to match the signature
return Ollama(
model=llm_model,
base_url=llm_endpoint,
**llm_extra,
)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/src/providers/openai.py | Python | # src/providers/openai.py
from typing import Any
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.llms.llm import LLM
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
def initialize_embed_model(
embed_endpoint: str,
embed_api_key: str,
embed_model: str,
**embed_extra: Any, # noqa: ANN401
) -> BaseEmbedding:
"""
Create OpenAI embedding model.
Args:
embed_model: The name of the embedding model.
embed_endpoint: The API endpoint for the OpenAI API.
embed_api_key: The API key for the OpenAI API.
embed_extra: Extra Paramaters for the OpenAI API.
Returns:
The initialized embed_model.
"""
# Use the provided endpoint directly.
# Note: OpenAIEmbedding automatically picks up OPENAI_API_KEY env var
# We are not using embed_api_key parameter here, relying on env var as original code did.
return OpenAIEmbedding(
model=embed_model,
api_base=embed_endpoint,
api_key=embed_api_key,
**embed_extra,
)
def initialize_llm_model(
llm_endpoint: str,
llm_api_key: str,
llm_model: str,
**llm_extra: Any, # noqa: ANN401
) -> LLM:
"""
Create OpenAI LLM model.
Args:
llm_model: The name of the LLM model.
llm_endpoint: The API endpoint for the OpenAI API.
llm_api_key: The API key for the OpenAI API.
llm_extra: Extra paramaters for the OpenAI API.
Returns:
The initialized llm_model.
"""
# Use the provided endpoint directly.
# Note: OpenAI automatically picks up OPENAI_API_KEY env var
# We are not using llm_api_key parameter here, relying on env var as original code did.
return OpenAI(
model=llm_model,
api_base=llm_endpoint,
api_key=llm_api_key,
**llm_extra,
)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/src/providers/openrouter.py | Python | # src/providers/openrouter.py
from typing import Any
from llama_index.core.llms.llm import LLM
from llama_index.llms.openrouter import OpenRouter
def initialize_llm_model(
llm_endpoint: str,
llm_api_key: str,
llm_model: str,
**llm_extra: Any, # noqa: ANN401
) -> LLM:
"""
Create OpenRouter LLM model.
Args:
llm_model: The name of the LLM model.
llm_endpoint: The API endpoint for the OpenRouter API.
llm_api_key: The API key for the OpenRouter API.
llm_extra: The Extra Parameters for OpenROuter,
Returns:
The initialized llm_model.
"""
# Use the provided endpoint directly.
# We are not using llm_api_key parameter here, relying on env var as original code did.
return OpenRouter(
model=llm_model,
api_base=llm_endpoint,
api_key=llm_api_key,
**llm_extra,
)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/src/services/indexing_history.py | Python | import json
import os
from datetime import datetime
from typing import Any
from libs.db import get_db_connection
from libs.logger import logger
from libs.utils import get_node_uri
from llama_index.core.schema import Document
from models.indexing_history import IndexingHistory
class IndexingHistoryService:
def delete_indexing_status(self, uri: str) -> None:
"""Delete indexing status for a specific file."""
with get_db_connection() as conn:
conn.execute(
"""
DELETE FROM indexing_history
WHERE uri = ?
""",
(uri,),
)
conn.commit()
def delete_indexing_status_by_document_id(self, document_id: str) -> None:
"""Delete indexing status for a specific document."""
with get_db_connection() as conn:
conn.execute(
"""
DELETE FROM indexing_history
WHERE document_id = ?
""",
(document_id,),
)
conn.commit()
def update_indexing_status(
self,
doc: Document,
status: str,
error_message: str | None = None,
metadata: dict[str, Any] | None = None,
) -> None:
"""Update the indexing status in the database."""
content_hash = doc.hash
# Get URI from metadata if available
uri = get_node_uri(doc)
if not uri:
logger.warning("URI not found for document: %s", doc.doc_id)
return
record = IndexingHistory(
id=None,
uri=uri,
content_hash=content_hash,
status=status,
error_message=error_message,
document_id=doc.doc_id,
metadata=metadata,
)
with get_db_connection() as conn:
# Check if record exists
existing = conn.execute(
"SELECT id FROM indexing_history WHERE document_id = ?",
(doc.doc_id,),
).fetchone()
if existing:
# Update existing record
conn.execute(
"""
UPDATE indexing_history
SET content_hash = ?, status = ?, error_message = ?, document_id = ?, metadata = ?
WHERE uri = ?
""",
(
record.content_hash,
record.status,
record.error_message,
record.document_id,
json.dumps(record.metadata) if record.metadata else None,
record.uri,
),
)
else:
# Insert new record
conn.execute(
"""
INSERT INTO indexing_history
(uri, content_hash, status, error_message, document_id, metadata)
VALUES (?, ?, ?, ?, ?, ?)
""",
(
record.uri,
record.content_hash,
record.status,
record.error_message,
record.document_id,
json.dumps(record.metadata) if record.metadata else None,
),
)
conn.commit()
def get_indexing_status(self, doc: Document | None = None, base_uri: str | None = None) -> list[IndexingHistory]:
"""Get indexing status from the database."""
with get_db_connection() as conn:
if doc:
uri = get_node_uri(doc)
if not uri:
logger.warning("URI not found for document: %s", doc.doc_id)
return []
content_hash = doc.hash
# For a specific file, get its latest status
query = """
SELECT *
FROM indexing_history
WHERE uri = ? and content_hash = ?
ORDER BY timestamp DESC LIMIT 1
"""
params = (uri, content_hash)
elif base_uri:
# For files in a specific directory, get their latest status
query = """
WITH RankedHistory AS (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY document_id ORDER BY timestamp DESC) as rn
FROM indexing_history
WHERE uri LIKE ? || '%'
)
SELECT id, uri, content_hash, status, timestamp, error_message, document_id, metadata
FROM RankedHistory
WHERE rn = 1
ORDER BY timestamp DESC
"""
params = (base_uri,) if base_uri.endswith(os.path.sep) else (base_uri + os.path.sep,)
else:
# For all files, get their latest status
query = """
WITH RankedHistory AS (
SELECT *,
ROW_NUMBER() OVER (PARTITION BY uri ORDER BY timestamp DESC) as rn
FROM indexing_history
)
SELECT id, uri, content_hash, status, timestamp, error_message, document_id, metadata
FROM RankedHistory
WHERE rn = 1
ORDER BY timestamp DESC
"""
params = ()
rows = conn.execute(query, params).fetchall()
result = []
for row in rows:
row_dict = dict(row)
# Parse metadata JSON if it exists
if row_dict.get("metadata"):
try:
row_dict["metadata"] = json.loads(row_dict["metadata"])
except json.JSONDecodeError:
row_dict["metadata"] = None
# Parse timestamp string to datetime if needed
if isinstance(row_dict.get("timestamp"), str):
row_dict["timestamp"] = datetime.fromisoformat(
row_dict["timestamp"].replace("Z", "+00:00"),
)
result.append(IndexingHistory(**row_dict))
return result
indexing_history_service = IndexingHistoryService()
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
py/rag-service/src/services/resource.py | Python | """Resource Service."""
from libs.db import get_db_connection
from models.resource import Resource
class ResourceService:
"""Resource Service."""
def add_resource_to_db(self, resource: Resource) -> None:
"""Add a resource to the database."""
with get_db_connection() as conn:
conn.execute(
"""
INSERT INTO resources (name, uri, type, status, indexing_status, created_at)
VALUES (?, ?, ?, ?, ?, ?)
""",
(
resource.name,
resource.uri,
resource.type,
resource.status,
resource.indexing_status,
resource.created_at,
),
)
conn.commit()
def update_resource_indexing_status(self, uri: str, indexing_status: str, indexing_status_message: str) -> None:
"""Update resource indexing status in the database."""
with get_db_connection() as conn:
if indexing_status == "indexing":
conn.execute(
"""
UPDATE resources
SET indexing_status = ?, indexing_status_message = ?, indexing_started_at = CURRENT_TIMESTAMP
WHERE uri = ?
""",
(indexing_status, indexing_status_message, uri),
)
else:
conn.execute(
"""
UPDATE resources
SET indexing_status = ?, indexing_status_message = ?, last_indexed_at = CURRENT_TIMESTAMP
WHERE uri = ?
""",
(indexing_status, indexing_status_message, uri),
)
conn.commit()
def update_resource_status(self, uri: str, status: str, error: str | None = None) -> None:
"""Update resource status in the database."""
with get_db_connection() as conn:
if status == "active":
conn.execute(
"""
UPDATE resources
SET status = ?, last_indexed_at = CURRENT_TIMESTAMP, last_error = ?
WHERE uri = ?
""",
(status, error, uri),
)
else:
conn.execute(
"""
UPDATE resources
SET status = ?, last_error = ?
WHERE uri = ?
""",
(status, error, uri),
)
conn.commit()
def get_resource(self, uri: str) -> Resource | None:
"""Get resource from the database."""
with get_db_connection() as conn:
row = conn.execute(
"SELECT * FROM resources WHERE uri = ?",
(uri,),
).fetchone()
if row:
return Resource(**dict(row))
return None
def get_resource_by_name(self, name: str) -> Resource | None:
"""Get resource by name from the database."""
with get_db_connection() as conn:
row = conn.execute(
"SELECT * FROM resources WHERE name = ?",
(name,),
).fetchone()
if row:
return Resource(**dict(row))
return None
def get_all_resources(self) -> list[Resource]:
"""Get all resources from the database."""
with get_db_connection() as conn:
rows = conn.execute("SELECT * FROM resources ORDER BY created_at DESC").fetchall()
return [Resource(**dict(row)) for row in rows]
resource_service = ResourceService()
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
scripts/lua-typecheck.sh | Shell | #!/usr/bin/env bash
set -e
# This script performs a Lua typecheck, with different behaviors for local and CI environments.
#
# It supports two local modes:
# 1. Default (Managed): Downloads all dependencies into a project-local ./target/deps directory.
# 2. --live: Uses the system's installed `nvim` and `lua-language-server`. It does not
# manage plugin dependencies, assuming the user has them configured.
verbose=false
log() {
echo "$1" >&2
}
log_verbose() {
if [ "$verbose" = "true" ]; then
echo "$1" >&2
fi
}
die() {
echo "Error: $1" >&2
exit 1
}
handle_live_mode() {
export DEPS_PATH="$HOME/.local/share/nvim/lazy"
log_verbose "Setting DEPS_PATH for live mode to: $DEPS_PATH"
command -v nvim &>/dev/null || die "nvim command not found. Please install Neovim."
if command -v lua-language-server &>/dev/null; then
log_verbose "Found lua-language-server in PATH."
else
log_verbose "lua-language-server not found in PATH. Checking Mason..."
local mason_luals_path="$HOME/.local/share/nvim/mason/bin/lua-language-server"
if [ -x "$mason_luals_path" ]; then
log_verbose "Found lua-language-server in Mason packages."
export PATH="$HOME/.local/share/nvim/mason/bin:$PATH"
else
die "lua-language-server not found in PATH or in Mason packages. Please install it."
fi
fi
# $VIMRUNTIME is not supposed to be expanded below
# shellcheck disable=SC2016
VIMRUNTIME="$(nvim --headless --noplugin -u NONE -c 'echo $VIMRUNTIME' +qa 2>&1)"
export VIMRUNTIME
}
manage_plugin_dependencies() {
local deps_dir=$1
local setup_deps_flags=$2
log "Cloning/updating dependencies to $deps_dir..."
./scripts/setup-deps.sh "$setup_deps_flags" clone "$deps_dir"
export DEPS_PATH="$deps_dir"
log_verbose "Set DEPS_PATH to $DEPS_PATH"
}
run_typechecker() {
local config_path=$1
if [ -z "$VIMRUNTIME" ]; then
die "VIMRUNTIME is not set. Cannot proceed."
fi
if [ -z "$config_path" ]; then
die "Luarc config path is not set. Cannot proceed."
fi
command -v lua-language-server &>/dev/null || die "lua-language-server not found in PATH."
log "Running Lua typechecker..."
lua-language-server --check="$PWD/lua" \
--loglevel=trace \
--configpath="$config_path" \
--checklevel=Information
log_verbose "Typecheck complete."
}
main() {
local dest_dir="$PWD/target/tests"
local luarc_path="$dest_dir/luarc.json"
local mode="managed"
local setup_deps_flags=""
for arg in "$@"; do
case $arg in
--live)
mode="live"
shift
;;
--verbose|-v)
verbose=true
setup_deps_flags="--verbose"
shift
;;
esac
done
if [ "$GITHUB_ACTIONS" = "true" ]; then
mode="ci"
# Always be verbose in CI
setup_deps_flags="--verbose"
fi
log "mode: $mode"
if [ "$mode" == "live" ]; then
handle_live_mode
else
log "Setting up environment in: $dest_dir"
mkdir -p "$dest_dir"
if [ "$mode" == "managed" ]; then
log "Installing nvim runtime..."
VIMRUNTIME="$(./scripts/setup-deps.sh "$setup_deps_flags" install-nvim "$dest_dir")"
export VIMRUNTIME
log_verbose "Installed nvim runtime at: $VIMRUNTIME"
fi
log "Installing lua-language-server..."
local luals_bin_path
luals_bin_path="$(./scripts/setup-deps.sh "$setup_deps_flags" install-luals "$dest_dir")"
export PATH="$luals_bin_path:$PATH"
log_verbose "Added $luals_bin_path to PATH"
local deps_dir="$dest_dir/deps"
log "Cloning/updating dependencies to $deps_dir..."
./scripts/setup-deps.sh "$setup_deps_flags" clone "$deps_dir"
export DEPS_PATH="$deps_dir"
log_verbose "Set DEPS_PATH to $DEPS_PATH"
fi
./scripts/setup-deps.sh $setup_deps_flags generate-luarc "$luarc_path"
log "VIMRUNTIME: $VIMRUNTIME"
log "DEPS_PATH: $DEPS_PATH"
run_typechecker "$luarc_path"
}
main "$@"
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
scripts/run-luatest.sh | Shell | #!/usr/bin/env bash
set -e
DEST_DIR="$PWD/target/tests"
DEPS_DIR="$DEST_DIR/deps"
log() {
echo "$1" >&2
}
check_tools() {
command -v rg &>/dev/null || {
log "Error: ripgrep (rg) is not installed. Please install it."
exit 1
}
command -v ag &>/dev/null || {
log "Error: silversearcher-ag (ag) is not installed. Please install it."
exit 1
}
}
setup_deps() {
local plenary_path="$DEPS_DIR/plenary.nvim"
if [ -d "$plenary_path/.git" ]; then
log "plenary.nvim already exists. Updating..."
(
cd "$plenary_path"
git fetch -q
if git show-ref --verify --quiet refs/remotes/origin/main; then
git reset -q --hard origin/main
elif git show-ref --verify --quiet refs/remotes/origin/master; then
git reset -q --hard origin/master
fi
)
else
if [ -d "$plenary_path" ]; then
log "Removing non-git plenary.nvim directory and re-cloning."
rm -rf "$plenary_path"
fi
log "Cloning plenary.nvim..."
mkdir -p "$DEPS_DIR"
git clone --depth 1 "https://github.com/nvim-lua/plenary.nvim.git" "$plenary_path"
fi
}
run_tests() {
log "Running tests..."
nvim --headless --clean \
-c "set runtimepath+=$DEPS_DIR/plenary.nvim" \
-c "lua require('plenary.test_harness').test_directory('tests/', { minimal_init = 'NONE' })"
}
main() {
check_tools
setup_deps
run_tests
}
main "$@"
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
scripts/setup-deps.sh | Shell | #!/bin/bash
DEPS=(
"folke/neodev.nvim"
"nvim-lua/plenary.nvim"
"MunifTanjim/nui.nvim"
"stevearc/dressing.nvim"
"folke/snacks.nvim"
"echasnovski/mini.nvim"
"nvim-telescope/telescope.nvim"
"hrsh7th/nvim-cmp"
"ibhagwan/fzf-lua"
"nvim-tree/nvim-web-devicons"
"zbirenbaum/copilot.lua"
"folke/lazy.nvim"
)
LUALS_VERSION="3.13.6"
verbose=false
log() {
echo "$1" >&2
}
log_verbose() {
if [ "$verbose" = "true" ]; then
echo "$1" >&2
fi
}
# Process a single dependency (used for parallel execution)
process_single_dep() {
local dep="$1"
local deps_dir="$2"
local repo_name="$(echo "$dep" | cut -d'/' -f2)"
local repo_path="$deps_dir/$repo_name"
if [ -d "$repo_path/.git" ]; then
log_verbose "Updating existing repository: $repo_path"
(
cd "$repo_path"
git fetch -q
if git show-ref --verify --quiet refs/remotes/origin/main; then
git reset -q --hard origin/main
elif git show-ref --verify --quiet refs/remotes/origin/master; then
git reset -q --hard origin/master
else
log "Could not find main or master branch for $repo_name"
return 1
fi
)
else
if [ -d "$repo_path" ]; then
log_verbose "Directory '$repo_path' exists but is not a git repository. Removing and re-cloning."
rm -rf "$repo_path"
fi
log_verbose "Cloning new repository: $dep to $repo_path"
git clone -q --depth 1 "https://github.com/${dep}.git" "$repo_path"
fi
}
clone_deps() {
local deps_dir=${1:-"$PWD/deps"}
log_verbose "Cloning dependencies into: $deps_dir (parallel mode)"
mkdir -p "$deps_dir"
# Array to store background process PIDs
local pids=()
# Start all dependency processes in parallel
for dep in "${DEPS[@]}"; do
process_single_dep "$dep" "$deps_dir" &
pids+=($!)
done
# Wait for all background processes to complete and check their exit status
local failed_count=0
for pid in "${pids[@]}"; do
if ! wait "$pid"; then
((failed_count++))
fi
done
if [ "$failed_count" -gt 0 ]; then
log "Warning: $failed_count dependencies failed to process"
return 1
fi
log_verbose "All dependencies processed successfully"
}
install_luals() {
local dest_dir=${1:-"$PWD/target/tests"}
# Detect operating system and architecture
local os_name=""
local arch=""
local file_ext=""
local extract_cmd=""
case "$(uname -s)" in
Linux*)
os_name="linux"
file_ext="tar.gz"
;;
Darwin*)
os_name="darwin"
file_ext="tar.gz"
;;
CYGWIN*|MINGW*|MSYS*)
os_name="win32"
file_ext="zip"
;;
*)
log "Unsupported operating system: $(uname -s)"
return 1
;;
esac
case "$(uname -m)" in
x86_64|amd64)
arch="x64"
;;
arm64|aarch64)
arch="arm64"
;;
*)
log "Unsupported architecture: $(uname -m), falling back to x64"
arch="x64"
;;
esac
# Set up extraction command based on file type
if [ "$file_ext" = "tar.gz" ]; then
extract_cmd="tar zx --directory"
else
extract_cmd="unzip -q -d"
fi
local platform="${os_name}-${arch}"
local luals_url_template="https://github.com/LuaLS/lua-language-server/releases/download/__VERSION__/lua-language-server-__VERSION__-__PLATFORM__.__EXT__"
local luals_download_url="${luals_url_template//__VERSION__/$LUALS_VERSION}"
luals_download_url="${luals_download_url//__PLATFORM__/$platform}"
luals_download_url="${luals_download_url//__EXT__/$file_ext}"
local luals_dir="$dest_dir/lua-language-server-${LUALS_VERSION}-${platform}"
if [ ! -d "$luals_dir" ]; then
log "Installing lua-language-server ${LUALS_VERSION} for ${platform}..."
mkdir -p "$luals_dir"
if [ "$file_ext" = "tar.gz" ]; then
curl -sSL "${luals_download_url}" | tar zx --directory "$luals_dir"
else
# For zip files, download first then extract
local temp_file="/tmp/luals-${LUALS_VERSION}.zip"
curl -sSL "${luals_download_url}" -o "$temp_file"
unzip -q "$temp_file" -d "$luals_dir"
rm -f "$temp_file"
fi
else
log_verbose "lua-language-server is already installed in $luals_dir"
fi
echo "$luals_dir/bin"
}
install_nvim_runtime() {
local dest_dir=${1:-"$PWD/target/tests"}
command -v yq &>/dev/null || die "yq is not installed for parsing GitHub API responses."
local nvim_version
nvim_version="$(yq -r '.jobs.typecheck.strategy.matrix.nvim_version[0]' .github/workflows/lua.yaml)"
log_verbose "Parsed nvim version from workflow: $nvim_version"
log_verbose "Resolving ${nvim_version} Neovim release from GitHub API..."
local api_url="https://api.github.com/repos/neovim/neovim/releases"
if [ "$nvim_version" == "stable" ]; then
api_url="$api_url/latest"
else
api_url="$api_url/tags/${nvim_version}"
fi
local release_data
release_data="$(curl -s "$api_url")"
if [ -z "$release_data" ] || echo "$release_data" | yq -e '.message == "Not Found"' > /dev/null; then
die "Failed to fetch release data from GitHub API for version '${nvim_version}'."
fi
# Find the correct asset by regex and extract its name and download URL.
local asset_info
asset_info="$(echo "$release_data" | \
yq -r '.assets[] | select(.name | test("nvim-linux(64|-x86_64)\\.tar\\.gz$")) | .name + " " + .browser_download_url')"
if [ -z "$asset_info" ]; then
die "Could not find a suitable linux tarball asset for version '${nvim_version}'."
fi
local asset_name
local download_url
read -r asset_name download_url <<< "$asset_info"
local actual_version
actual_version="$(echo "$download_url" | grep -E -o 'v[0-9]+\.[0-9]+\.[0-9]+' | head -n 1)"
if [ -z "$actual_version" ]; then
die "Could not resolve a version tag from URL: $download_url"
fi
log_verbose "Resolved Neovim version is ${actual_version}"
local runtime_dir="$dest_dir/nvim-${actual_version}-runtime"
if [ ! -d "$runtime_dir" ]; then
log "Installing Neovim runtime (${actual_version})..."
mkdir -p "$runtime_dir"
curl -sSL "${download_url}" | \
tar xzf - -C "$runtime_dir" --strip-components=4 \
"${asset_name%.tar.gz}/share/nvim/runtime"
else
log_verbose "Neovim runtime (${actual_version}) is already installed"
fi
echo "$runtime_dir"
}
generate_luarc() {
local luarc_path=${1:-"$PWD/target/tests/luarc.json"}
local luarc_template="luarc.json.template"
log_verbose "Generating luarc file at: $luarc_path"
mkdir -p "$(dirname "$luarc_path")"
local lua_deps=""
for dep in "${DEPS[@]}"; do
repo_name="$(echo "$dep" | cut -d'/' -f2)"
lua_deps="${lua_deps},\n \"\$DEPS_PATH/${repo_name}/lua\""
done
sed "s#{{DEPS}}#${lua_deps}#" "$luarc_template" > "$luarc_path"
}
main() {
local command=""
local args=()
# Manual parsing for flags and command
while [[ $# -gt 0 ]]; do
case $1 in
-v|--verbose)
verbose=true
shift
;;
*)
if [ -z "$command" ]; then
command=$1
else
args+=("$1")
fi
shift
;;
esac
done
if [ "$command" == "clone" ]; then
clone_deps "${args[@]}"
elif [ "$command" == "generate-luarc" ]; then
generate_luarc "${args[@]}"
elif [ "$command" == "install-luals" ]; then
install_luals "${args[@]}"
elif [ "$command" == "install-nvim" ]; then
install_nvim_runtime "${args[@]}"
else
echo "Usage: $0 [-v|--verbose] {clone [dir]|generate-luarc [path]|install-luals [dir]|install-nvim [dir]}"
exit 1
fi
}
main "$@"
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
syntax/jinja.vim | Vim Script | " reference: https://github.com/lepture/vim-jinja/blob/master/syntax/jinja.vim
if exists("b:current_syntax")
finish
endif
if !exists("main_syntax")
let main_syntax = 'html'
endif
runtime! syntax/html.vim
unlet b:current_syntax
syntax case match
" jinja template built-in tags and parameters
" 'comment' doesn't appear here because it gets special treatment
syn keyword jinjaStatement contained if else elif endif is not
syn keyword jinjaStatement contained for in recursive endfor
syn keyword jinjaStatement contained raw endraw
syn keyword jinjaStatement contained block endblock extends super scoped
syn keyword jinjaStatement contained macro endmacro call endcall
syn keyword jinjaStatement contained from import as do continue break
syn keyword jinjaStatement contained filter endfilter set endset
syn keyword jinjaStatement contained include ignore missing
syn keyword jinjaStatement contained with without context endwith
syn keyword jinjaStatement contained trans endtrans pluralize
syn keyword jinjaStatement contained autoescape endautoescape
" jinja templete built-in filters
syn keyword jinjaFilter contained abs attr batch capitalize center default
syn keyword jinjaFilter contained dictsort escape filesizeformat first
syn keyword jinjaFilter contained float forceescape format groupby indent
syn keyword jinjaFilter contained int join last length list lower pprint
syn keyword jinjaFilter contained random replace reverse round safe slice
syn keyword jinjaFilter contained sort string striptags sum
syn keyword jinjaFilter contained title trim truncate upper urlize
syn keyword jinjaFilter contained wordcount wordwrap
" jinja template built-in tests
syn keyword jinjaTest contained callable defined divisibleby escaped
syn keyword jinjaTest contained even iterable lower mapping none number
syn keyword jinjaTest contained odd sameas sequence string undefined upper
syn keyword jinjaFunction contained range lipsum dict cycler joiner
" Keywords to highlight within comments
syn keyword jinjaTodo contained TODO FIXME XXX
" jinja template constants (always surrounded by double quotes)
syn region jinjaArgument contained start=/"/ skip=/\\"/ end=/"/
syn region jinjaArgument contained start=/'/ skip=/\\'/ end=/'/
syn keyword jinjaArgument contained true false
" Mark illegal characters within tag and variables blocks
syn match jinjaTagError contained "#}\|{{\|[^%]}}\|[&#]"
syn match jinjaVarError contained "#}\|{%\|%}\|[<>!&#%]"
syn cluster jinjaBlocks add=jinjaTagBlock,jinjaVarBlock,jinjaComBlock,jinjaComment
" jinja template tag and variable blocks
syn region jinjaTagBlock start="{%" end="%}" contains=jinjaStatement,jinjaFilter,jinjaArgument,jinjaFilter,jinjaTest,jinjaTagError display containedin=ALLBUT,@jinjaBlocks
syn region jinjaVarBlock start="{{" end="}}" contains=jinjaFilter,jinjaArgument,jinjaVarError display containedin=ALLBUT,@jinjaBlocks
syn region jinjaComBlock start="{#" end="#}" contains=jinjaTodo containedin=ALLBUT,@jinjaBlocks
hi def link jinjaTagBlock PreProc
hi def link jinjaVarBlock PreProc
hi def link jinjaStatement Statement
hi def link jinjaFunction Function
hi def link jinjaTest Type
hi def link jinjaFilter Identifier
hi def link jinjaArgument Constant
hi def link jinjaTagError Error
hi def link jinjaVarError Error
hi def link jinjaError Error
hi def link jinjaComment Comment
hi def link jinjaComBlock Comment
hi def link jinjaTodo Todo
let b:current_syntax = "jinja"
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/libs/acp_client_spec.lua | Lua | local ACPClient = require("avante.libs.acp_client")
local stub = require("luassert.stub")
describe("ACPClient", function()
local schedule_stub
local setup_transport_stub
before_each(function()
schedule_stub = stub(vim, "schedule")
schedule_stub.invokes(function(fn) fn() end)
setup_transport_stub = stub(ACPClient, "_setup_transport")
end)
after_each(function()
schedule_stub:revert()
setup_transport_stub:revert()
end)
describe("_handle_read_text_file", function()
it("should call error_callback when file read fails", function()
local sent_error = nil
local handler_called = false
local mock_config = {
transport_type = "stdio",
handlers = {
on_read_file = function(path, line, limit, success_callback, err_callback)
handler_called = true
err_callback("File not found", ACPClient.ERROR_CODES.RESOURCE_NOT_FOUND)
end,
},
}
local client = ACPClient:new(mock_config)
client._send_error = stub().invokes(
function(self, id, message, code) sent_error = { id = id, message = message, code = code } end
)
client:_handle_read_text_file(123, { sessionId = "test-session", path = "/nonexistent/file.txt" })
assert.is_true(handler_called)
assert.is_not_nil(sent_error)
assert.equals(123, sent_error.id)
assert.equals("File not found", sent_error.message)
assert.equals(ACPClient.ERROR_CODES.RESOURCE_NOT_FOUND, sent_error.code)
end)
it("should use default error message when error_callback called with nil", function()
local sent_error = nil
local mock_config = {
transport_type = "stdio",
handlers = {
on_read_file = function(path, line, limit, success_callback, err_callback) err_callback(nil, nil) end,
},
}
local client = ACPClient:new(mock_config)
client._send_error = stub().invokes(
function(self, id, message, code) sent_error = { id = id, message = message, code = code } end
)
client:_handle_read_text_file(456, { sessionId = "test-session", path = "/bad/file.txt" })
assert.is_not_nil(sent_error)
assert.equals(456, sent_error.id)
assert.equals("Failed to read file", sent_error.message)
assert.is_nil(sent_error.code)
end)
it("should call success_callback when file read succeeds", function()
local sent_result = nil
local mock_config = {
transport_type = "stdio",
handlers = {
on_read_file = function(path, line, limit, success_callback, err_callback) success_callback("file contents") end,
},
}
local client = ACPClient:new(mock_config)
client._send_result = stub().invokes(function(self, id, result) sent_result = { id = id, result = result } end)
client:_handle_read_text_file(789, { sessionId = "test-session", path = "/existing/file.txt" })
assert.is_not_nil(sent_result)
assert.equals(789, sent_result.id)
assert.equals("file contents", sent_result.result.content)
end)
it("should send error when params are invalid (missing sessionId)", function()
local sent_error = nil
local mock_config = {
transport_type = "stdio",
handlers = {
on_read_file = function() end,
},
}
local client = ACPClient:new(mock_config)
client._send_error = stub().invokes(
function(self, id, message, code) sent_error = { id = id, message = message, code = code } end
)
client:_handle_read_text_file(100, { path = "/file.txt" })
assert.is_not_nil(sent_error)
assert.equals(100, sent_error.id)
assert.equals("Invalid fs/read_text_file params", sent_error.message)
assert.equals(ACPClient.ERROR_CODES.INVALID_PARAMS, sent_error.code)
end)
it("should send error when params are invalid (missing path)", function()
local sent_error = nil
local mock_config = {
transport_type = "stdio",
handlers = {
on_read_file = function() end,
},
}
local client = ACPClient:new(mock_config)
client._send_error = stub().invokes(
function(self, id, message, code) sent_error = { id = id, message = message, code = code } end
)
client:_handle_read_text_file(200, { sessionId = "test-session" })
assert.is_not_nil(sent_error)
assert.equals(200, sent_error.id)
assert.equals("Invalid fs/read_text_file params", sent_error.message)
assert.equals(ACPClient.ERROR_CODES.INVALID_PARAMS, sent_error.code)
end)
it("should send error when handler is not configured", function()
local sent_error = nil
local mock_config = {
transport_type = "stdio",
handlers = {},
}
local client = ACPClient:new(mock_config)
client._send_error = stub().invokes(
function(self, id, message, code) sent_error = { id = id, message = message, code = code } end
)
client:_handle_read_text_file(300, { sessionId = "test-session", path = "/file.txt" })
assert.is_not_nil(sent_error)
assert.equals(300, sent_error.id)
assert.equals("fs/read_text_file handler not configured", sent_error.message)
assert.equals(ACPClient.ERROR_CODES.METHOD_NOT_FOUND, sent_error.code)
end)
end)
describe("_handle_write_text_file", function()
it("should send error when params are invalid (missing sessionId)", function()
local sent_error = nil
local mock_config = {
transport_type = "stdio",
handlers = {
on_write_file = function() end,
},
}
local client = ACPClient:new(mock_config)
client._send_error = stub().invokes(
function(self, id, message, code) sent_error = { id = id, message = message, code = code } end
)
client:_handle_write_text_file(400, { path = "/file.txt", content = "data" })
assert.is_not_nil(sent_error)
assert.equals(400, sent_error.id)
assert.equals("Invalid fs/write_text_file params", sent_error.message)
assert.equals(ACPClient.ERROR_CODES.INVALID_PARAMS, sent_error.code)
end)
it("should send error when params are invalid (missing path)", function()
local sent_error = nil
local mock_config = {
transport_type = "stdio",
handlers = {
on_write_file = function() end,
},
}
local client = ACPClient:new(mock_config)
client._send_error = stub().invokes(
function(self, id, message, code) sent_error = { id = id, message = message, code = code } end
)
client:_handle_write_text_file(500, { sessionId = "test-session", content = "data" })
assert.is_not_nil(sent_error)
assert.equals(500, sent_error.id)
assert.equals("Invalid fs/write_text_file params", sent_error.message)
assert.equals(ACPClient.ERROR_CODES.INVALID_PARAMS, sent_error.code)
end)
it("should send error when params are invalid (missing content)", function()
local sent_error = nil
local mock_config = {
transport_type = "stdio",
handlers = {
on_write_file = function() end,
},
}
local client = ACPClient:new(mock_config)
client._send_error = stub().invokes(
function(self, id, message, code) sent_error = { id = id, message = message, code = code } end
)
client:_handle_write_text_file(600, { sessionId = "test-session", path = "/file.txt" })
assert.is_not_nil(sent_error)
assert.equals(600, sent_error.id)
assert.equals("Invalid fs/write_text_file params", sent_error.message)
assert.equals(ACPClient.ERROR_CODES.INVALID_PARAMS, sent_error.code)
end)
it("should send error when handler is not configured", function()
local sent_error = nil
local mock_config = {
transport_type = "stdio",
handlers = {},
}
local client = ACPClient:new(mock_config)
client._send_error = stub().invokes(
function(self, id, message, code) sent_error = { id = id, message = message, code = code } end
)
client:_handle_write_text_file(700, { sessionId = "test-session", path = "/file.txt", content = "data" })
assert.is_not_nil(sent_error)
assert.equals(700, sent_error.id)
assert.equals("fs/write_text_file handler not configured", sent_error.message)
assert.equals(ACPClient.ERROR_CODES.METHOD_NOT_FOUND, sent_error.code)
end)
end)
describe("MCP tool flow", function()
local MCP_TOOL_UUID = "mcp-test-uuid-12345-67890"
it("receives MCP tool result via session/update when mcp_servers configured", function()
local sent_request = nil
local session_updates = {}
local client
local mock_transport = {
send = function(self, data)
local decoded = vim.json.decode(data)
if decoded.method == "session/new" then
sent_request = decoded.params
vim.schedule(
function()
client:_handle_message({
jsonrpc = "2.0",
id = decoded.id,
result = { sessionId = "test-session-mcp" },
})
end
)
elseif decoded.method == "session/prompt" then
vim.schedule(
function()
client:_handle_message({
jsonrpc = "2.0",
method = "session/update",
params = {
sessionId = "test-session-mcp",
update = {
sessionUpdate = "tool_call",
toolCallId = "mcp-tool-1",
title = "lookup__get_code",
kind = "other",
status = "completed",
content = {
{
type = "content",
content = { type = "text", text = MCP_TOOL_UUID },
},
},
},
},
})
end
)
vim.schedule(
function()
client:_handle_message({
jsonrpc = "2.0",
id = decoded.id,
result = { stopReason = "end_turn" },
})
end
)
end
end,
start = function(self, on_message) end,
stop = function(self) end,
}
local mock_config = {
transport_type = "stdio",
handlers = {
on_session_update = function(update) table.insert(session_updates, update) end,
},
}
client = ACPClient:new(mock_config)
client.transport = mock_transport
client.state = "ready"
local mcp_servers = {
{ type = "http", name = "lookup", url = "http://localhost:8080/mcp" },
}
local session_id = nil
client:create_session("/tmp/test", mcp_servers, function(sid, err) session_id = sid end)
assert.is_not_nil(sent_request)
assert.equals("/tmp/test", sent_request.cwd)
assert.same(mcp_servers, sent_request.mcpServers)
assert.equals("test-session-mcp", session_id)
client:send_prompt("test-session-mcp", { { type = "text", text = "Use the get_code tool" } }, function() end)
assert.equals(1, #session_updates)
assert.equals("tool_call", session_updates[1].sessionUpdate)
assert.equals("lookup__get_code", session_updates[1].title)
assert.equals("completed", session_updates[1].status)
local tool_content = session_updates[1].content[1].content.text
assert.equals(MCP_TOOL_UUID, tool_content)
end)
it("should default mcp_servers to empty array", function()
local sent_params = nil
local client
local mock_transport = {
send = function(self, data)
local decoded = vim.json.decode(data)
if decoded.method == "session/new" then
sent_params = decoded.params
vim.schedule(
function()
client:_handle_message({
jsonrpc = "2.0",
id = decoded.id,
result = { sessionId = "test-session" },
})
end
)
end
end,
start = function(self, on_message) end,
stop = function(self) end,
}
client = ACPClient:new({ transport_type = "stdio", handlers = {} })
client.transport = mock_transport
client.state = "ready"
client:create_session("/tmp/test", nil, function() end)
assert.is_not_nil(sent_params)
assert.same({}, sent_params.mcpServers)
end)
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/libs/jsonparser_spec.lua | Lua | local JsonParser = require("avante.libs.jsonparser")
describe("JsonParser", function()
describe("parse (one-time parsing)", function()
it("should parse simple objects", function()
local result, err = JsonParser.parse('{"name": "test", "value": 42}')
assert.is_nil(err)
assert.equals("test", result.name)
assert.equals(42, result.value)
end)
it("should parse simple arrays", function()
local result, err = JsonParser.parse('[1, 2, 3, "test"]')
assert.is_nil(err)
assert.equals(1, result[1])
assert.equals(2, result[2])
assert.equals(3, result[3])
assert.equals("test", result[4])
end)
it("should parse nested objects", function()
local result, err = JsonParser.parse('{"user": {"name": "John", "age": 30}, "active": true}')
assert.is_nil(err)
assert.equals("John", result.user.name)
assert.equals(30, result.user.age)
assert.is_true(result.active)
end)
it("should parse nested arrays", function()
local result, err = JsonParser.parse("[[1, 2], [3, 4], [5]]")
assert.is_nil(err)
assert.equals(1, result[1][1])
assert.equals(2, result[1][2])
assert.equals(3, result[2][1])
assert.equals(4, result[2][2])
assert.equals(5, result[3][1])
end)
it("should parse mixed nested structures", function()
local result, err = JsonParser.parse('{"items": [{"id": 1, "tags": ["a", "b"]}, {"id": 2, "tags": []}]}')
assert.is_nil(err)
assert.equals(1, result.items[1].id)
assert.equals("a", result.items[1].tags[1])
assert.equals("b", result.items[1].tags[2])
assert.equals(2, result.items[2].id)
assert.equals(0, #result.items[2].tags)
end)
it("should parse literals correctly", function()
local result, err = JsonParser.parse('{"null_val": null, "true_val": true, "false_val": false}')
assert.is_nil(err)
assert.is_nil(result.null_val)
assert.is_true(result.true_val)
assert.is_false(result.false_val)
end)
it("should parse numbers correctly", function()
local result, err = JsonParser.parse('{"int": 42, "float": 3.14, "negative": -10, "exp": 1e5}')
assert.is_nil(err)
assert.equals(42, result.int)
assert.equals(3.14, result.float)
assert.equals(-10, result.negative)
assert.equals(100000, result.exp)
end)
it("should parse escaped strings", function()
local result, err = JsonParser.parse('{"escaped": "line1\\nline2\\ttab\\"quote"}')
assert.is_nil(err)
assert.equals('line1\nline2\ttab"quote', result.escaped)
end)
it("should handle empty objects and arrays", function()
local result1, err1 = JsonParser.parse("{}")
assert.is_nil(err1)
assert.equals("table", type(result1))
local result2, err2 = JsonParser.parse("[]")
assert.is_nil(err2)
assert.equals("table", type(result2))
assert.equals(0, #result2)
end)
it("should handle whitespace", function()
local result, err = JsonParser.parse(' { "key" : "value" } ')
assert.is_nil(err)
assert.equals("value", result.key)
end)
it("should return error for invalid JSON", function()
local result, err = JsonParser.parse('{"invalid": }')
-- The parser returns an empty table for invalid JSON
assert.is_true(result ~= nil and type(result) == "table")
end)
it("should return error for incomplete JSON", function()
local result, err = JsonParser.parse('{"incomplete"')
-- The parser may return incomplete object with _incomplete flag
assert.is_true(result == nil or err ~= nil or (result and result._incomplete))
end)
end)
describe("StreamParser", function()
local parser
before_each(function() parser = JsonParser.createStreamParser() end)
describe("basic functionality", function()
it("should create a new parser instance", function()
assert.is_not_nil(parser)
assert.equals("function", type(parser.addData))
assert.equals("function", type(parser.getAllObjects))
end)
it("should parse complete JSON in one chunk", function()
parser:addData('{"name": "test", "value": 42}')
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("test", results[1].name)
assert.equals(42, results[1].value)
end)
it("should parse multiple complete JSON objects", function()
parser:addData('{"a": 1}{"b": 2}{"c": 3}')
local results = parser:getAllObjects()
assert.equals(3, #results)
assert.equals(1, results[1].a)
assert.equals(2, results[2].b)
assert.equals(3, results[3].c)
end)
end)
describe("streaming functionality", function()
it("should handle JSON split across multiple chunks", function()
parser:addData('{"name": "te')
parser:addData('st", "value": ')
parser:addData("42}")
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("test", results[1].name)
assert.equals(42, results[1].value)
end)
it("should handle string split across chunks", function()
parser:addData('{"message": "Hello ')
parser:addData('World!"}')
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("Hello World!", results[1].message)
end)
it("should handle number split across chunks", function()
parser:addData('{"value": 123')
parser:addData("45}")
local results = parser:getAllObjects()
assert.equals(1, #results)
-- The parser currently parses 123 as complete number and treats 45 as separate
-- This is expected behavior for streaming JSON where numbers at chunk boundaries
-- are finalized when a non-number character is encountered or buffer ends
assert.equals(123, results[1].value)
end)
it("should handle literal split across chunks", function()
parser:addData('{"flag": tr')
parser:addData("ue}")
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.is_true(results[1].flag)
end)
it("should handle escaped strings split across chunks", function()
parser:addData('{"text": "line1\\n')
parser:addData('line2"}')
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("line1\nline2", results[1].text)
end)
it("should handle complex nested structure streaming", function()
parser:addData('{"users": [{"name": "Jo')
parser:addData('hn", "age": 30}, {"name": "Ja')
parser:addData('ne", "age": 25}], "count": 2}')
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("John", results[1].users[1].name)
assert.equals(30, results[1].users[1].age)
assert.equals("Jane", results[1].users[2].name)
assert.equals(25, results[1].users[2].age)
assert.equals(2, results[1].count)
end)
end)
describe("status and error handling", function()
it("should provide status information", function()
local status = parser:getStatus()
assert.equals("ready", status.state)
assert.equals(0, status.completed_objects)
assert.equals(0, status.stack_depth)
assert.equals(0, status.current_depth)
assert.is_false(status.has_incomplete)
end)
it("should handle unexpected closing brackets", function()
parser:addData('{"test": "value"}}')
assert.is_true(parser:hasError())
end)
it("should handle unexpected opening brackets", function()
parser:addData('{"test": {"nested"}}')
-- This may not always be detected as an error in streaming parsers
local results = parser:getAllObjects()
assert.is_true(parser:hasError() or #results >= 0) -- Just ensure no crash
end)
end)
describe("reset functionality", function()
it("should reset parser state", function()
parser:addData('{"test": "value"}')
local results1 = parser:getAllObjects()
assert.equals(1, #results1)
parser:reset()
local status = parser:getStatus()
assert.equals("ready", status.state)
assert.equals(0, status.completed_objects)
parser:addData('{"new": "data"}')
local results2 = parser:getAllObjects()
assert.equals(1, #results2)
assert.equals("data", results2[1].new)
end)
end)
describe("finalize functionality", function()
it("should finalize incomplete objects", function()
parser:addData('{"incomplete": "test"')
-- getAllObjects() automatically triggers finalization
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("test", results[1].incomplete)
end)
it("should handle incomplete nested structures", function()
parser:addData('{"users": [{"name": "John"}')
local results = parser:getAllObjects()
-- The parser may create multiple results during incomplete parsing
assert.is_true(#results >= 1)
-- Check that we have incomplete structures with user data
local found_john = false
for _, result in ipairs(results) do
if result._incomplete then
-- Look for John in various possible structures
if result.users and result.users[1] and result.users[1].name == "John" then
found_john = true
break
elseif result[1] and result[1].name == "John" then
found_john = true
break
end
end
end
assert.is_true(found_john)
end)
it("should handle incomplete JSON", function()
parser:addData('{"incomplete": }')
-- The parser handles malformed JSON gracefully by producing a result
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.is_nil(results[1].incomplete)
end)
it("should handle incomplete string", function()
parser:addData('{"incomplete": "}')
-- The parser handles malformed JSON gracefully by producing a result
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("}", results[1].incomplete)
end)
it("should handle incomplete string2", function()
parser:addData('{"incomplete": "')
-- The parser handles malformed JSON gracefully by producing a result
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("", results[1].incomplete)
end)
it("should handle incomplete string3", function()
parser:addData('{"incomplete": "hello')
-- The parser handles malformed JSON gracefully by producing a result
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("hello", results[1].incomplete)
end)
it("should handle incomplete string4", function()
parser:addData('{"incomplete": "hello\\"')
-- The parser handles malformed JSON gracefully by producing a result
-- Even incomplete strings should be properly unescaped for user consumption
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals('hello"', results[1].incomplete)
end)
it("should handle incomplete string5", function()
parser:addData('{"incomplete": {"key": "value')
-- The parser handles malformed JSON gracefully by producing a result
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("value", results[1].incomplete.key)
end)
it("should handle incomplete string6", function()
parser:addData('{"completed": "hello", "incomplete": {"key": "value')
-- The parser handles malformed JSON gracefully by producing a result
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("value", results[1].incomplete.key)
assert.equals("hello", results[1].completed)
end)
it("should handle incomplete string7", function()
parser:addData('{"completed": "hello", "incomplete": {"key": {"key1": "value')
-- The parser handles malformed JSON gracefully by producing a result
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("value", results[1].incomplete.key.key1)
assert.equals("hello", results[1].completed)
end)
it("should complete incomplete numbers", function()
parser:addData('{"value": 123')
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals(123, results[1].value)
end)
it("should complete incomplete literals", function()
parser:addData('{"flag": tru')
local results = parser:getAllObjects()
assert.equals(1, #results)
-- Incomplete literal "tru" cannot be resolved to "true"
-- This is expected behavior as "tru" is not a valid JSON literal
assert.is_nil(results[1].flag)
end)
end)
describe("edge cases", function()
it("should handle empty input", function()
parser:addData("")
local results = parser:getAllObjects()
assert.equals(0, #results)
end)
it("should handle nil input", function()
parser:addData(nil)
local results = parser:getAllObjects()
assert.equals(0, #results)
end)
it("should handle only whitespace", function()
parser:addData(" \n\t ")
local results = parser:getAllObjects()
assert.equals(0, #results)
end)
it("should handle deeply nested structures", function()
local deep_json = '{"a": {"b": {"c": {"d": {"e": "deep"}}}}}'
parser:addData(deep_json)
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("deep", results[1].a.b.c.d.e)
end)
it("should handle arrays with mixed types", function()
parser:addData('[1, "string", true, null, {"key": "value"}, [1, 2]]')
local results = parser:getAllObjects()
assert.equals(1, #results)
local arr = results[1]
assert.equals(1, arr[1])
assert.equals("string", arr[2])
assert.is_true(arr[3])
-- The parser behavior shows that the null and object get merged somehow
-- This is an implementation detail of this specific parser
assert.equals("value", arr[4].key)
assert.equals(1, arr[5][1])
assert.equals(2, arr[5][2])
end)
it("should handle large numbers", function()
parser:addData('{"big": 123456789012345}')
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals(123456789012345, results[1].big)
end)
it("should handle scientific notation", function()
parser:addData('{"sci": 1.23e-4}')
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals(0.000123, results[1].sci)
end)
it("should handle Unicode escape sequences", function()
parser:addData('{"unicode": "\\u0048\\u0065\\u006C\\u006C\\u006F"}')
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("Hello", results[1].unicode)
end)
end)
describe("real-world scenarios", function()
it("should handle typical API response streaming", function()
-- Simulate chunked API response
parser:addData('{"status": "success", "data": {"users": [')
parser:addData('{"id": 1, "name": "Alice", "email": "alice@example.com"},')
parser:addData('{"id": 2, "name": "Bob", "email": "bob@example.com"}')
parser:addData('], "total": 2}, "message": "Users retrieved successfully"}')
local results = parser:getAllObjects()
assert.equals(1, #results)
local response = results[1]
assert.equals("success", response.status)
assert.equals(2, #response.data.users)
assert.equals("Alice", response.data.users[1].name)
assert.equals("bob@example.com", response.data.users[2].email)
assert.equals(2, response.data.total)
end)
it("should handle streaming multiple JSON objects", function()
-- Simulate server-sent events or JSONL
parser:addData('{"event": "user_joined", "user": "Alice"}')
parser:addData('{"event": "message", "user": "Alice", "text": "Hello!"}')
parser:addData('{"event": "user_left", "user": "Alice"}')
local results = parser:getAllObjects()
assert.equals(3, #results)
assert.equals("user_joined", results[1].event)
assert.equals("Alice", results[1].user)
assert.equals("message", results[2].event)
assert.equals("Hello!", results[2].text)
assert.equals("user_left", results[3].event)
end)
it("should handle incomplete streaming data gracefully", function()
parser:addData('{"partial": "data", "incomplete_array": [1, 2, ')
local status = parser:getStatus()
assert.equals("incomplete", status.state)
assert.equals(0, status.completed_objects)
parser:addData('3, 4], "complete": true}')
local results = parser:getAllObjects()
assert.equals(1, #results)
assert.equals("data", results[1].partial)
assert.equals(4, #results[1].incomplete_array)
assert.is_true(results[1].complete)
end)
end)
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/llm_spec.lua | Lua | local utils = require("avante.utils")
local PPath = require("plenary.path")
local llm = require("avante.llm")
describe("generate_prompts", function()
local project_root = "/tmp/project_root"
before_each(function()
local mock_dir = PPath:new("tests", project_root)
mock_dir:mkdir({ parents = true })
local mock_file = PPath:new("tests", project_root, "avante.md")
mock_file:write("# Mock Instructions\nThis is a mock instruction file.", "w")
-- Mock the project root
utils.root = {}
utils.root.get = function() return mock_dir end
-- Mock Config.providers
local Config = require("avante.config")
Config.instructions_file = "avante.md"
Config.provider = "openai"
Config.acp_providers = {}
Config.providers = {
openai = {
endpoint = "https://api.mock.com/v1",
model = "gpt-mock",
timeout = 10000,
context_window = 1000,
extra_request_body = {
temperature = 0.5,
max_tokens = 1000,
},
},
}
-- Mock Config.history to prevent nil access error in Path.setup()
Config.history = {
max_tokens = 4096,
carried_entry_count = nil,
storage_path = "/tmp/test_avante_history",
paste = {
extension = "png",
filename = "pasted-%Y-%m-%d-%H-%M-%S",
},
}
-- Mock Config.behaviour
Config.behaviour = {
auto_focus_sidebar = true,
auto_suggestions = false, -- Experimental stage
auto_suggestions_respect_ignore = false,
auto_set_highlight_group = true,
auto_set_keymaps = true,
auto_apply_diff_after_generation = false,
jump_result_buffer_on_finish = false,
support_paste_from_clipboard = false,
minimize_diff = true,
enable_token_counting = true,
use_cwd_as_project_root = false,
auto_focus_on_diff_view = false,
auto_approve_tool_permissions = false, -- Default: show permission prompts for all tools
auto_check_diagnostics = true,
enable_fastapply = false,
}
-- Mock Config.rules to prevent nil access error in get_templates_dir()
Config.rules = {
project_dir = nil,
global_dir = nil,
}
-- Mock P.available to always return true
local Path = require("avante.path")
Path.available = function() return true end
-- Mock the Prompt functions directly since _templates_lib is a local variable
-- that we can't easily access from outside the module
Path.prompts.initialize = function(cache_directory, project_directory)
-- Mock initialization - no-op for tests
end
Path.prompts.render_file = function(path, opts)
-- Mock render - return empty string for tests
return ""
end
Path.prompts.render_mode = function(mode, opts)
-- Mock render_mode - return empty string for tests
return ""
end
Path.setup() -- Initialize necessary paths like cache_path
end)
after_each(function()
-- Clean up created test files and directories
local mock_dir = PPath:new("tests", project_root)
if mock_dir:exists() then mock_dir:rmdir() end
end)
it("should include instruction file content when the file exists", function()
local opts = {}
llm.generate_prompts(opts)
assert.are.same("\n# Mock Instructions\nThis is a mock instruction file.", opts.instructions)
end)
it("should not modify instructions if the file does not exist", function()
local mock_file = PPath:new("tests", project_root, "avante.md")
if mock_file:exists() then mock_file:rm() end
local opts = {}
llm.generate_prompts(opts)
assert.are.same(opts.instructions, nil)
end)
it("should set tools to nil when no tools are provided", function()
local opts = {}
local result = llm.generate_prompts(opts)
assert.are.same(result.tools, nil)
end)
it("should set tools to nil when empty tools array is provided", function()
local opts = {
tools = {},
}
local result = llm.generate_prompts(opts)
assert.are.same(result.tools, nil)
end)
it("should set tools to nil when empty prompt_opts.tools array is provided", function()
local opts = {
prompt_opts = {
tools = {},
},
}
local result = llm.generate_prompts(opts)
assert.are.same(result.tools, nil)
end)
it("should include tools when non-empty tools are provided", function()
local mock_tool = {
name = "test_tool",
description = "A test tool",
func = function() end,
}
local opts = {
tools = { mock_tool },
}
local result = llm.generate_prompts(opts)
assert.are.same(#result.tools, 1)
assert.are.same(result.tools[1].name, "test_tool")
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/llm_tools/helpers_spec.lua | Lua | local LlmToolHelpers = require("avante.llm_tools.helpers")
local Utils = require("avante.utils")
local stub = require("luassert.stub")
describe("has_permission_to_access", function()
local test_dir = "/tmp/test_llm_tools_helpers"
before_each(function()
os.execute("mkdir -p " .. test_dir)
-- create .gitignore file with test.idx file
os.execute("rm " .. test_dir .. "/.gitignore 2>/dev/null")
local gitignore_file = io.open(test_dir .. "/.gitignore", "w")
if gitignore_file then
gitignore_file:write("test.txt\n")
gitignore_file:write("data\n")
gitignore_file:close()
end
stub(Utils, "get_project_root", function() return test_dir end)
end)
after_each(function() os.execute("rm -rf " .. test_dir) end)
it("Basic ignored and not ignored", function()
local abs_path
abs_path = test_dir .. "/test.txt"
assert.is_false(LlmToolHelpers.has_permission_to_access(abs_path))
abs_path = test_dir .. "/test1.txt"
assert.is_true(LlmToolHelpers.has_permission_to_access(abs_path))
end)
it("Ignore files inside directories", function()
local abs_path
abs_path = test_dir .. "/data/test.txt"
assert.is_false(LlmToolHelpers.has_permission_to_access(abs_path))
abs_path = test_dir .. "/data/test1.txt"
assert.is_false(LlmToolHelpers.has_permission_to_access(abs_path))
end)
it("Do not ignore files with just similar paths", function()
local abs_path
abs_path = test_dir .. "/data_test/test.txt"
assert.is_false(LlmToolHelpers.has_permission_to_access(abs_path))
abs_path = test_dir .. "/data_test/test1.txt"
assert.is_true(LlmToolHelpers.has_permission_to_access(abs_path))
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/llm_tools_spec.lua | Lua | local stub = require("luassert.stub")
local LlmTools = require("avante.llm_tools")
local LlmToolHelpers = require("avante.llm_tools.helpers")
local Config = require("avante.config")
local Utils = require("avante.utils")
local ls = require("avante.llm_tools.ls")
local grep = require("avante.llm_tools.grep")
local glob = require("avante.llm_tools.glob")
local view = require("avante.llm_tools.view")
local bash = require("avante.llm_tools.bash")
LlmToolHelpers.confirm = function(msg, cb) return cb(true) end
LlmToolHelpers.already_in_context = function(path) return false end
describe("llm_tools", function()
local test_dir = "/tmp/test_llm_tools"
local test_file = test_dir .. "/test.txt"
before_each(function()
Config.setup()
-- 创建测试目录和文件
os.execute("mkdir -p " .. test_dir)
os.execute(string.format("cd %s; git init -b main", test_dir))
local file = io.open(test_file, "w")
if not file then error("Failed to create test file") end
file:write("test content")
file:close()
os.execute("mkdir -p " .. test_dir .. "/test_dir1")
file = io.open(test_dir .. "/test_dir1/test1.txt", "w")
if not file then error("Failed to create test file") end
file:write("test1 content")
file:close()
os.execute("mkdir -p " .. test_dir .. "/test_dir2")
file = io.open(test_dir .. "/test_dir2/test2.txt", "w")
if not file then error("Failed to create test file") end
file:write("test2 content")
file:close()
file = io.open(test_dir .. "/.gitignore", "w")
if not file then error("Failed to create test file") end
file:write("test_dir2/")
file:close()
-- Mock get_project_root
stub(Utils, "get_project_root", function() return test_dir end)
end)
after_each(function()
-- 清理测试目录
os.execute("rm -rf " .. test_dir)
-- 恢复 mock
Utils.get_project_root:revert()
end)
describe("ls", function()
it("should list files in directory", function()
local result, err = ls({ path = ".", max_depth = 1 }, {})
assert.is_nil(err)
assert.falsy(result:find("avante.nvim"))
assert.truthy(result:find("test.txt"))
assert.falsy(result:find("test1.txt"))
end)
it("should list files in directory with depth", function()
local result, err = ls({ path = ".", max_depth = 2 }, {})
assert.is_nil(err)
assert.falsy(result:find("avante.nvim"))
assert.truthy(result:find("test.txt"))
assert.truthy(result:find("test1.txt"))
end)
it("should list files respecting gitignore", function()
local result, err = ls({ path = ".", max_depth = 2 }, {})
assert.is_nil(err)
assert.falsy(result:find("avante.nvim"))
assert.truthy(result:find("test.txt"))
assert.truthy(result:find("test1.txt"))
assert.falsy(result:find("test2.txt"))
end)
end)
describe("view", function()
it("should read file content", function()
view({ path = "test.txt" }, {
on_complete = function(content, err)
assert.is_nil(err)
assert.equals("test content", vim.json.decode(content).content)
end,
})
end)
it("should return error for non-existent file", function()
view({ path = "non_existent.txt" }, {
on_complete = function(content, err)
assert.truthy(err)
assert.equals("", content)
end,
})
end)
it("should read directory content", function()
view({ path = test_dir }, {
on_complete = function(content, err)
assert.is_nil(err)
assert.truthy(content:find("test.txt"))
assert.truthy(content:find("test content"))
end,
})
end)
end)
describe("create_dir", function()
it("should create new directory", function()
LlmTools.create_dir({ path = "new_dir" }, {
session_ctx = {},
on_complete = function(success, err)
assert.is_nil(err)
assert.is_true(success)
local dir_exists = io.open(test_dir .. "/new_dir", "r") ~= nil
assert.is_true(dir_exists)
end,
})
end)
end)
describe("delete_path", function()
it("should delete existing file", function()
LlmTools.delete_path({ path = "test.txt" }, {
session_ctx = {},
on_complete = function(success, err)
assert.is_nil(err)
assert.is_true(success)
local file_exists = io.open(test_file, "r") ~= nil
assert.is_false(file_exists)
end,
})
end)
end)
describe("grep", function()
local original_exepath = vim.fn.exepath
after_each(function() vim.fn.exepath = original_exepath end)
it("should search using ripgrep when available", function()
-- Mock exepath to return rg path
vim.fn.exepath = function(cmd)
if cmd == "rg" then return "/usr/bin/rg" end
return ""
end
-- Create a test file with searchable content
local file = io.open(test_dir .. "/searchable.txt", "w")
if not file then error("Failed to create test file") end
file:write("this is searchable content")
file:close()
file = io.open(test_dir .. "/nothing.txt", "w")
if not file then error("Failed to create test file") end
file:write("this is nothing")
file:close()
local result, err = grep({ path = ".", query = "Searchable", case_sensitive = false }, {})
assert.is_nil(err)
assert.truthy(result:find("searchable.txt"))
assert.falsy(result:find("nothing.txt"))
local result2, err2 = grep({ path = ".", query = "searchable", case_sensitive = true }, {})
assert.is_nil(err2)
assert.truthy(result2:find("searchable.txt"))
assert.falsy(result2:find("nothing.txt"))
local result3, err3 = grep({ path = ".", query = "Searchable", case_sensitive = true }, {})
assert.is_nil(err3)
assert.falsy(result3:find("searchable.txt"))
assert.falsy(result3:find("nothing.txt"))
local result4, err4 = grep({ path = ".", query = "searchable", case_sensitive = false }, {})
assert.is_nil(err4)
assert.truthy(result4:find("searchable.txt"))
assert.falsy(result4:find("nothing.txt"))
local result5, err5 = grep({
path = ".",
query = "searchable",
case_sensitive = false,
exclude_pattern = "search*",
}, {})
assert.is_nil(err5)
assert.falsy(result5:find("searchable.txt"))
assert.falsy(result5:find("nothing.txt"))
end)
it("should search using ag when rg is not available", function()
-- Mock exepath to return ag path
vim.fn.exepath = function(cmd)
if cmd == "ag" then return "/usr/bin/ag" end
return ""
end
-- Create a test file specifically for ag
local file = io.open(test_dir .. "/ag_test.txt", "w")
if not file then error("Failed to create test file") end
file:write("content for ag test")
file:close()
local result, err = grep({ path = ".", query = "ag test" }, {})
assert.is_nil(err)
assert.is_string(result)
assert.truthy(result:find("ag_test.txt"))
end)
it("should search using grep when rg and ag are not available", function()
-- Mock exepath to return grep path
vim.fn.exepath = function(cmd)
if cmd == "grep" then return "/usr/bin/grep" end
return ""
end
-- Create a test file with searchable content
local file = io.open(test_dir .. "/searchable.txt", "w")
if not file then error("Failed to create test file") end
file:write("this is searchable content")
file:close()
file = io.open(test_dir .. "/nothing.txt", "w")
if not file then error("Failed to create test file") end
file:write("this is nothing")
file:close()
local result, err = grep({ path = ".", query = "Searchable", case_sensitive = false }, {})
assert.is_nil(err)
assert.truthy(result:find("searchable.txt"))
assert.falsy(result:find("nothing.txt"))
local result2, err2 = grep({ path = ".", query = "searchable", case_sensitive = true }, {})
assert.is_nil(err2)
assert.truthy(result2:find("searchable.txt"))
assert.falsy(result2:find("nothing.txt"))
local result3, err3 = grep({ path = ".", query = "Searchable", case_sensitive = true }, {})
assert.is_nil(err3)
assert.falsy(result3:find("searchable.txt"))
assert.falsy(result3:find("nothing.txt"))
local result4, err4 = grep({ path = ".", query = "searchable", case_sensitive = false }, {})
assert.is_nil(err4)
assert.truthy(result4:find("searchable.txt"))
assert.falsy(result4:find("nothing.txt"))
local result5, err5 = grep({
path = ".",
query = "searchable",
case_sensitive = false,
exclude_pattern = "search*",
}, {})
assert.is_nil(err5)
assert.falsy(result5:find("searchable.txt"))
assert.falsy(result5:find("nothing.txt"))
end)
it("should return error when no search tool is available", function()
-- Mock exepath to return nothing
vim.fn.exepath = function() return "" end
local result, err = grep({ path = ".", query = "test" }, {})
assert.equals("", result)
assert.equals("No search command found", err)
end)
it("should respect path permissions", function()
local result, err = grep({ path = "../outside_project", query = "test" }, {})
assert.truthy(err:find("No permission to access path"))
end)
it("should handle non-existent paths", function()
local result, err = grep({ path = "non_existent_dir", query = "test" }, {})
assert.equals("", result)
assert.truthy(err)
assert.truthy(err:find("No such file or directory"))
end)
end)
describe("bash", function()
-- it("should execute command and return output", function()
-- bash({ path = ".", command = "echo 'test'" }, nil, function(result, err)
-- assert.is_nil(err)
-- assert.equals("test\n", result)
-- end)
-- end)
it("should return error when running outside current directory", function()
bash({ path = "../outside_project", command = "echo 'test'" }, {
session_ctx = {},
on_complete = function(result, err)
assert.is_false(result)
assert.truthy(err)
assert.truthy(err:find("No permission to access path"))
end,
})
end)
end)
describe("python", function()
it("should execute Python code and return output", function()
LlmTools.python({
path = ".",
code = "print('Hello from Python')",
}, {
session_ctx = {},
on_complete = function(result, err)
assert.is_nil(err)
assert.equals("Hello from Python\n", result)
end,
})
end)
it("should handle Python errors", function()
LlmTools.python({
path = ".",
code = "print(undefined_variable)",
}, {
session_ctx = {},
on_complete = function(result, err)
assert.is_nil(result)
assert.truthy(err)
assert.truthy(err:find("Error"))
end,
})
end)
it("should respect path permissions", function()
LlmTools.python({
path = "../outside_project",
code = "print('test')",
}, {
session_ctx = {},
on_complete = function(result, err)
assert.is_nil(result)
assert.truthy(err:find("No permission to access path"))
end,
})
end)
it("should handle non-existent paths", function()
LlmTools.python({
path = "non_existent_dir",
code = "print('test')",
}, {
session_ctx = {},
on_complete = function(result, err)
assert.is_nil(result)
assert.truthy(err:find("Path not found"))
end,
})
end)
it("should support custom container image", function()
os.execute("docker image rm python:3.12-slim")
LlmTools.python({
path = ".",
code = "print('Hello from custom container')",
container_image = "python:3.12-slim",
}, {
session_ctx = {},
on_complete = function(result, err)
assert.is_nil(err)
assert.equals("Hello from custom container\n", result)
end,
})
end)
end)
describe("glob", function()
it("should find files matching the pattern", function()
-- Create some additional test files with different extensions for glob testing
os.execute("touch " .. test_dir .. "/file1.lua")
os.execute("touch " .. test_dir .. "/file2.lua")
os.execute("touch " .. test_dir .. "/file3.js")
os.execute("mkdir -p " .. test_dir .. "/nested")
os.execute("touch " .. test_dir .. "/nested/file4.lua")
-- Test for lua files in the root
local result, err = glob({ path = ".", pattern = "*.lua" }, {})
assert.is_nil(err)
local files = vim.json.decode(result).matches
assert.equals(2, #files)
assert.truthy(vim.tbl_contains(files, test_dir .. "/file1.lua"))
assert.truthy(vim.tbl_contains(files, test_dir .. "/file2.lua"))
assert.falsy(vim.tbl_contains(files, test_dir .. "/file3.js"))
assert.falsy(vim.tbl_contains(files, test_dir .. "/nested/file4.lua"))
-- Test with recursive pattern
local result2, err2 = glob({ path = ".", pattern = "**/*.lua" }, {})
assert.is_nil(err2)
local files2 = vim.json.decode(result2).matches
assert.equals(3, #files2)
assert.truthy(vim.tbl_contains(files2, test_dir .. "/file1.lua"))
assert.truthy(vim.tbl_contains(files2, test_dir .. "/file2.lua"))
assert.truthy(vim.tbl_contains(files2, test_dir .. "/nested/file4.lua"))
end)
it("should respect path permissions", function()
local result, err = glob({ path = "../outside_project", pattern = "*.txt" }, {})
assert.equals("", result)
assert.truthy(err:find("No permission to access path"))
end)
it("should handle patterns without matches", function()
local result, err = glob({ path = ".", pattern = "*.nonexistent" }, {})
assert.is_nil(err)
local files = vim.json.decode(result).matches
assert.equals(0, #files)
end)
it("should handle files in gitignored directories", function()
-- Create test files in ignored directory
os.execute("touch " .. test_dir .. "/test_dir2/ignored1.lua")
os.execute("touch " .. test_dir .. "/test_dir2/ignored2.lua")
-- Create test files in non-ignored directory
os.execute("touch " .. test_dir .. "/test_dir1/notignored1.lua")
os.execute("touch " .. test_dir .. "/test_dir1/notignored2.lua")
local result, err = glob({ path = ".", pattern = "**/*.lua" }, {})
assert.is_nil(err)
local files = vim.json.decode(result).matches
-- Check that files from non-ignored directory are found
local found_notignored = false
for _, file in ipairs(files) do
if file:find("test_dir1/notignored") then
found_notignored = true
break
end
end
assert.is_true(found_notignored)
-- Note: By default, vim.fn.glob does not respect gitignore files
-- This test simply verifies the glob function works as expected
-- If in the future, the function is modified to respect gitignore,
-- this test can be updated
end)
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/providers/bedrock_spec.lua | Lua | local bedrock_provider = require("avante.providers.bedrock")
local test_util = require("avante.utils.test")
local Config = require("avante.config")
Config.setup({})
describe("bedrock_provider", function()
describe("parse_stream_data", function()
it("should parse response in a stream.", function()
local data = test_util.read_file("tests/data/bedrock_response_stream.bin")
local message = ""
bedrock_provider:parse_stream_data({}, data, {
on_chunk = function(msg) message = message .. msg end,
on_stop = function() end,
})
assert.equals(
"I'll help you fix errors in the HelloLog4j.java file. Let me first understand what errors might be present by examining the code and related files.",
message
)
end)
it("should parse exception inside a stream.", function()
local data = test_util.read_file("tests/data/bedrock_response_stream_with_exception.bin")
local message = ""
bedrock_provider:parse_stream_data({}, data, {
on_chunk = function(msg) message = msg end,
})
assert.equals(
"- Too many requests, please wait before trying again. You have sent too many requests. Wait before trying again.",
message
)
end)
end)
describe("check_curl_version_supports_aws_sig", function()
it(
"should return true for curl version 8.10.0",
function()
assert.is_true(
bedrock_provider.check_curl_version_supports_aws_sig(
"curl 8.10.0 (x86_64-pc-linux-gnu) libcurl/7.68.0 OpenSSL/1.1.1f zlib/1.2.11 brotli/1.0.7 libidn2/2.2.0 libpsl/0.21.0 (+libidn2/2.2.0) libssh2/1.8.0 nghttp2/1.40.0 librtmp/2.3"
)
)
end
)
it(
"should return true for curl version higher than 8.10.0",
function()
assert.is_true(
bedrock_provider.check_curl_version_supports_aws_sig(
"curl 8.11.0 (aarch64-apple-darwin23.6.0) libcurl/8.11.0 OpenSSL/3.4.0 (SecureTransport) zlib/1.2.12 brotli/1.1.0 zstd/1.5.6 AppleIDN libssh2/1.11.1 nghttp2/1.64.0 librtmp/2.3"
)
)
end
)
it(
"should return false for curl version lower than 8.10.0",
function()
assert.is_false(
bedrock_provider.check_curl_version_supports_aws_sig(
"curl 7.68.0 (x86_64-pc-linux-gnu) libcurl/7.68.0 OpenSSL/1.1.1f zlib/1.2.11 brotli/1.0.7 libidn2/2.2.0 libpsl/0.21.0 (+libidn2/2.2.0) libssh2/1.8.0 nghttp2/1.40.0 librtmp/2.3"
)
)
end
)
it(
"should return false for invalid version string",
function() assert.is_false(bedrock_provider.check_curl_version_supports_aws_sig("Invalid version string")) end
)
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/providers/claude_spec.lua | Lua | ---@diagnostic disable: duplicate-set-field, need-check-nil
local busted = require("plenary.busted")
local async = require("plenary.async.tests")
local async_util = require("plenary.async")
local test_util = require("avante.utils.test")
local pkce = require("avante.auth.pkce")
-- Mock data helpers
local function create_mock_token_data(expired)
local now = os.time()
local expires_at = expired and (now - 3600) or (now + 1800)
return {
access_token = "mock_access_token_123",
refresh_token = "mock_refresh_token_456",
expires_at = expires_at,
}
end
local function create_mock_token_response()
return {
access_token = "mock_access_token_abcdef123456",
refresh_token = "mock_refresh_token_xyz789",
expires_in = 1800,
token_type = "Bearer",
}
end
busted.describe("claude provider", function()
-- PKCE Implementation Tests
busted.describe("PKCE implementation", function()
busted.describe("generate_verifier", function()
busted.it("should return a non-empty string", function()
local verifier, err = pkce.generate_verifier()
assert.not_nil(verifier)
assert.is_nil(err)
assert.is_string(verifier)
assert.is_true(#verifier > 0)
end)
busted.it("should generate URL-safe base64 string (no +, /, or =)", function()
local verifier, err = pkce.generate_verifier()
assert.is_nil(err)
assert.is_false(verifier:match("[+/=]") ~= nil, "Verifier should not contain +, /, or =")
end)
busted.it("should generate verifier within valid length range (43-128 chars)", function()
local verifier, err = pkce.generate_verifier()
assert.is_nil(err)
assert.is_true(#verifier >= 43 and #verifier <= 128, "Verifier length should be 43-128 characters")
end)
busted.it("should generate different verifiers on multiple calls", function()
local verifier1, err1 = pkce.generate_verifier()
local verifier2, err2 = pkce.generate_verifier()
assert.is_nil(err1)
assert.is_nil(err2)
assert.not_equal(verifier1, verifier2)
end)
end)
busted.describe("generate_challenge", function()
busted.it("should return a non-empty string", function()
local verifier = "test_verifier_123456"
local challenge, err = pkce.generate_challenge(verifier)
assert.not_nil(challenge)
assert.is_nil(err)
assert.is_string(challenge)
assert.is_true(#challenge > 0)
end)
busted.it("should be deterministic (same verifier produces same challenge)", function()
local verifier = "test_verifier_123456"
local challenge1, err1 = pkce.generate_challenge(verifier)
local challenge2, err2 = pkce.generate_challenge(verifier)
assert.is_nil(err1)
assert.is_nil(err2)
assert.equals(challenge1, challenge2)
end)
busted.it("should generate URL-safe base64 string (no +, /, or =)", function()
local verifier = "test_verifier_123456"
local challenge, err = pkce.generate_challenge(verifier)
assert.is_nil(err)
assert.is_false(challenge:match("[+/=]") ~= nil, "Challenge should not contain +, /, or =")
end)
busted.it("should generate different challenges for different verifiers", function()
local verifier1 = "test_verifier_1"
local verifier2 = "test_verifier_2"
local challenge1, err1 = pkce.generate_challenge(verifier1)
local challenge2, err2 = pkce.generate_challenge(verifier2)
assert.is_nil(err1)
assert.is_nil(err2)
assert.not_equal(challenge1, challenge2)
end)
busted.it("should generate challenge of correct length for SHA256 (43 chars)", function()
local verifier = "test_verifier_123456"
local challenge, err = pkce.generate_challenge(verifier)
assert.is_nil(err)
assert.equals(43, #challenge)
end)
end)
end)
-- Token Storage Tests
busted.describe("Token storage and retrieval", function()
local claude_provider
busted.before_each(function()
-- Reload the provider module to get a fresh state
package.loaded["avante.providers.claude"] = nil
claude_provider = require("avante.providers.claude")
end)
busted.describe("store_tokens", function()
async.it("should store tokens with correct structure in state", function()
-- Initialize state
claude_provider.state = { claude_token = nil }
local mock_tokens = create_mock_token_response()
local original_time = os.time()
-- Mock file operations to avoid actual file I/O
local original_open = io.open
io.open = function(path, mode)
return {
write = function() end,
close = function() end,
}
end
-- Mock vim.fn.system to avoid actual chmod
local original_system = vim.fn.system
vim.fn.system = function() end
claude_provider.store_tokens(mock_tokens)
-- Wait for vim.schedule callback to execute
async_util.util.sleep(100)
-- Restore mocks
io.open = original_open
vim.fn.system = original_system
assert.not_nil(claude_provider.state.claude_token)
assert.equals(mock_tokens.access_token, claude_provider.state.claude_token.access_token)
assert.equals(mock_tokens.refresh_token, claude_provider.state.claude_token.refresh_token)
assert.is_number(claude_provider.state.claude_token.expires_at)
-- expires_at should be approximately now + expires_in
assert.is_true(claude_provider.state.claude_token.expires_at > original_time)
end)
async.it("should include all required fields", function()
claude_provider.state = { claude_token = nil }
local mock_tokens = create_mock_token_response()
-- Mock file operations
local original_open = io.open
io.open = function(path, mode)
return {
write = function() end,
close = function() end,
}
end
local original_system = vim.fn.system
vim.fn.system = function() end
claude_provider.store_tokens(mock_tokens)
-- Wait for vim.schedule callback to execute
async_util.util.sleep(100)
io.open = original_open
vim.fn.system = original_system
local token = claude_provider.state.claude_token
assert.not_nil(token.access_token)
assert.not_nil(token.refresh_token)
assert.not_nil(token.expires_at)
end)
end)
end)
-- Authentication Flow Start Tests
busted.describe("Authentication flow initiation", function()
local claude_provider
local Config
busted.before_each(function()
package.loaded["avante.providers.claude"] = nil
package.loaded["avante.config"] = nil
Config = require("avante.config")
-- Set up minimal config
Config.input = {
provider = "native",
provider_opts = {},
}
claude_provider = require("avante.providers.claude")
end)
busted.describe("authenticate", function()
async.it("should generate PKCE parameters", function()
-- Mock vim.ui.open to prevent browser opening
local captured_url = nil
local original_open = vim.ui.open
vim.ui.open = function(url)
captured_url = url
return true
end
-- Mock vim.notify to prevent notifications
local original_notify = vim.notify
vim.notify = function() end
-- Mock the Input module to prevent UI from actually opening
package.loaded["avante.ui.input"] = {
new = function()
return {
open = function() end,
}
end,
}
claude_provider.authenticate()
-- Wait for vim.schedule callback to execute
async_util.util.sleep(100)
vim.ui.open = original_open
vim.notify = original_notify
-- Verify URL was generated with PKCE parameters
assert.not_nil(captured_url)
assert.is_true(captured_url:match("code_challenge=") ~= nil)
assert.is_true(captured_url:match("code_challenge_method=S256") ~= nil)
end)
async.it("should construct authorization URL with correct parameters", function()
local captured_url = nil
local original_open = vim.ui.open
vim.ui.open = function(url)
captured_url = url
return true
end
local original_notify = vim.notify
vim.notify = function() end
package.loaded["avante.ui.input"] = {
new = function()
return {
open = function() end,
}
end,
}
claude_provider.authenticate()
-- Wait for vim.schedule callback to execute
async_util.util.sleep(100)
vim.ui.open = original_open
vim.notify = original_notify
-- Check for required OAuth parameters
assert.is_true(captured_url:match("client_id=") ~= nil)
assert.is_true(captured_url:match("response_type=code") ~= nil)
assert.is_true(captured_url:match("redirect_uri=") ~= nil)
assert.is_true(captured_url:match("scope=") ~= nil)
assert.is_true(captured_url:match("state=") ~= nil)
assert.is_true(captured_url:match("code_challenge=") ~= nil)
assert.is_true(captured_url:match("code_challenge_method=S256") ~= nil)
end)
async.it("should use correct OAuth endpoint", function()
local captured_url = nil
local original_open = vim.ui.open
vim.ui.open = function(url)
captured_url = url
return true
end
local original_notify = vim.notify
vim.notify = function() end
package.loaded["avante.ui.input"] = {
new = function()
return {
open = function() end,
}
end,
}
claude_provider.authenticate()
-- Wait for vim.schedule callback to execute
async_util.util.sleep(100)
vim.ui.open = original_open
vim.notify = original_notify
assert.is_true(captured_url:match("^https://claude.ai/oauth/authorize") ~= nil)
end)
async.it("should fallback to clipboard when vim.ui.open fails", function()
-- Mock vim.ui.open to fail
local original_open = vim.ui.open
vim.ui.open = function(url) error("Browser open failed") end
-- Mock clipboard operations
local clipboard_content = nil
local original_setreg = vim.fn.setreg
vim.fn.setreg = function(reg, content) clipboard_content = content end
local original_notify = vim.notify
local notify_called = false
vim.notify = function(msg, level) notify_called = true end
package.loaded["avante.ui.input"] = {
new = function()
return {
open = function() end,
}
end,
}
claude_provider.authenticate()
-- Wait for vim.schedule callback to execute
async_util.util.sleep(100)
vim.ui.open = original_open
vim.fn.setreg = original_setreg
vim.notify = original_notify
-- Should have copied URL to clipboard
assert.not_nil(clipboard_content)
assert.is_true(clipboard_content:match("^https://claude.ai/oauth/authorize") ~= nil)
assert.is_true(notify_called)
end)
end)
end)
-- Token Refresh Logic Tests
busted.describe("Token refresh logic", function()
local claude_provider
local curl
busted.before_each(function()
package.loaded["avante.providers.claude"] = nil
package.loaded["plenary.curl"] = nil
claude_provider = require("avante.providers.claude")
curl = require("plenary.curl")
end)
busted.describe("refresh_token", function()
busted.it("should exit early when no state exists", function()
claude_provider.state = nil
local result = claude_provider.refresh_token(false, false)
assert.is_false(result)
end)
busted.it("should exit early when no token exists in state", function()
claude_provider.state = { claude_token = nil }
local result = claude_provider.refresh_token(false, false)
assert.is_false(result)
end)
busted.it("should skip refresh when token is not expired and not forced", function()
local non_expired_token = create_mock_token_data(false)
claude_provider.state = { claude_token = non_expired_token }
local result = claude_provider.refresh_token(false, false)
assert.is_false(result)
end)
async.it("should proceed when forced even if token not expired", function()
local non_expired_token = create_mock_token_data(false)
claude_provider.state = { claude_token = non_expired_token }
-- Mock curl.post
local original_post = curl.post
local post_called = false
curl.post = function(url, opts)
post_called = true
return {
status = 200,
body = vim.json.encode(create_mock_token_response()),
}
end
-- Mock file operations
local original_open = io.open
io.open = function(path, mode)
return {
write = function() end,
close = function() end,
}
end
local original_system = vim.fn.system
vim.fn.system = function() end
claude_provider.refresh_token(false, true)
-- Wait for any vim.schedule callbacks to complete
async_util.util.sleep(100)
curl.post = original_post
io.open = original_open
vim.fn.system = original_system
assert.is_true(post_called)
end)
async.it("should make POST request with correct structure", function()
local expired_token = create_mock_token_data(true)
claude_provider.state = { claude_token = expired_token }
local captured_url = nil
local captured_body = nil
local captured_headers = nil
-- Mock curl.post
local original_post = curl.post
curl.post = function(url, opts)
captured_url = url
if opts.body then captured_body = vim.json.decode(opts.body) end
captured_headers = opts.headers
return {
status = 200,
body = vim.json.encode(create_mock_token_response()),
}
end
-- Mock file operations
local original_open = io.open
io.open = function(path, mode)
return {
write = function() end,
close = function() end,
}
end
local original_system = vim.fn.system
vim.fn.system = function() end
claude_provider.refresh_token(false, false)
-- Wait for any vim.schedule callbacks to complete
async_util.util.sleep(100)
curl.post = original_post
io.open = original_open
vim.fn.system = original_system
-- Verify request structure
assert.is_true(captured_url:match("oauth/token") ~= nil)
assert.not_nil(captured_body)
assert.equals("refresh_token", captured_body.grant_type)
assert.not_nil(captured_body.client_id)
assert.equals(expired_token.refresh_token, captured_body.refresh_token)
assert.not_nil(captured_headers)
assert.equals("application/json", captured_headers["Content-Type"])
end)
async.it("should handle successful refresh response", function()
local expired_token = create_mock_token_data(true)
claude_provider.state = { claude_token = expired_token }
local mock_response = create_mock_token_response()
-- Mock curl.post
local original_post = curl.post
curl.post = function(url, opts)
return {
status = 200,
body = vim.json.encode(mock_response),
}
end
-- Mock file operations
local original_open = io.open
io.open = function(path, mode)
return {
write = function() end,
close = function() end,
}
end
local original_system = vim.fn.system
vim.fn.system = function() end
claude_provider.refresh_token(false, false)
-- Wait for any vim.schedule callbacks to complete
async_util.util.sleep(100)
curl.post = original_post
io.open = original_open
vim.fn.system = original_system
-- Verify token was updated in state
assert.equals(mock_response.access_token, claude_provider.state.claude_token.access_token)
assert.equals(mock_response.refresh_token, claude_provider.state.claude_token.refresh_token)
end)
async.it("should handle error response (status >= 400)", function()
local expired_token = create_mock_token_data(true)
claude_provider.state = { claude_token = expired_token }
-- Mock curl.post to return error
local original_post = curl.post
local original_notify = vim.notify
curl.post = function(url, opts)
return {
status = 401,
body = vim.json.encode({ error = "invalid_grant" }),
}
end
-- Mock vim.notify
vim.notify = function(msg, level)
if level == vim.log.levels.ERROR then
assert.matches('[401]Failed to refresh access token: {"error":"invalid_grant"}', msg, nil, true)
end
end
local result = claude_provider.refresh_token(false, false)
-- Wait for any vim.schedule callbacks to complete
async_util.util.sleep(100)
-- Should not crash and return gracefully
-- State should remain unchanged
assert.equals(expired_token.access_token, claude_provider.state.claude_token.access_token)
curl.post = original_post
vim.notify = original_notify
end)
end)
end)
-- Lockfile Management Tests
busted.describe("Lockfile management", function()
-- Note: These tests are more integration-style as the functions are local to the module
-- We test the observable behavior rather than the internal functions directly
busted.it("should handle lockfile scenarios through setup", function()
-- This is a basic smoke test that the lockfile logic doesn't crash
-- More detailed testing would require exposing the internal functions or using integration tests
local claude_provider = require("avante.providers.claude")
-- Just verify the module loaded without errors
assert.not_nil(claude_provider)
assert.is_function(claude_provider.setup)
end)
end)
-- Provider Setup Tests
busted.describe("Provider setup", function()
local claude_provider
local Config
busted.before_each(function()
package.loaded["avante.providers.claude"] = nil
package.loaded["avante.config"] = nil
Config = require("avante.config")
claude_provider = require("avante.providers.claude")
end)
busted.describe("API mode setup", function()
busted.it("should set correct api_key_name for API auth", function()
-- Mock the provider config
local P = require("avante.providers")
local original_parse = P.parse_config
P.parse_config = function() return { auth_type = "api" }, {} end
-- Mock tokenizer setup
package.loaded["avante.tokenizers"] = {
setup = function() end,
}
Config.provider = "claude"
P["claude"] = { auth_type = "api" }
claude_provider.setup()
P.parse_config = original_parse
-- In API mode, should have set the api_key_name
assert.not_nil(claude_provider.api_key_name)
assert.is_true(claude_provider._is_setup)
end)
end)
busted.describe("Max mode setup", function()
async.it("should initialize state when nil", function()
-- Start with no state
claude_provider.state = nil
-- Mock everything to prevent actual setup
local P = require("avante.providers")
P.parse_config = function() return { auth_type = "max" }, {} end
package.loaded["avante.tokenizers"] = {
setup = function() end,
}
-- Mock Path to simulate no existing token file
local Path = require("plenary.path")
local original_new = Path.new
Path.new = function(path)
local mock_path = {
exists = function() return false end,
}
return mock_path
end
-- Mock vim.ui.open to prevent browser
local original_open = vim.ui.open
vim.ui.open = function() return true end
-- Mock Input
package.loaded["avante.ui.input"] = {
new = function()
return {
open = function() end,
}
end,
}
-- Mock vim.notify
local original_notify = vim.notify
vim.notify = function() end
Config.provider = "claude"
P["claude"] = { auth_type = "max" }
-- This will trigger authenticate since no token file exists
-- We're just checking it doesn't crash
pcall(function() claude_provider.setup() end)
-- Wait for any vim.schedule callbacks to complete
async_util.util.sleep(100)
Path.new = original_new
vim.ui.open = original_open
vim.notify = original_notify
-- State should have been initialized
assert.not_nil(claude_provider.state)
end)
end)
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/providers/watsonx_code_assistant_spec.lua | Lua | local busted = require("plenary.busted")
busted.describe("watsonx_code_assistant provider", function()
local watsonx_provider
busted.before_each(function()
-- Minimal setup without extensive mocking
watsonx_provider = require("avante.providers.watsonx_code_assistant")
end)
busted.describe("basic configuration", function()
busted.it("should have required properties", function()
assert.is_not_nil(watsonx_provider.api_key_name)
assert.equals("WCA_API_KEY", watsonx_provider.api_key_name)
assert.is_not_nil(watsonx_provider.role_map)
assert.equals("USER", watsonx_provider.role_map.user)
assert.equals("ASSISTANT", watsonx_provider.role_map.assistant)
end)
busted.it("should disable streaming", function() assert.is_true(watsonx_provider:is_disable_stream()) end)
busted.it("should have required functions", function()
assert.is_function(watsonx_provider.parse_messages)
assert.is_function(watsonx_provider.parse_response_without_stream)
assert.is_function(watsonx_provider.parse_curl_args)
end)
end)
busted.describe("parse_messages", function()
busted.it("should parse messages with correct role mapping", function()
---@type AvantePromptOptions
local opts = {
system_prompt = "You are a helpful assistant",
messages = {
{ content = "Hello", role = "user" },
{ content = "Hi there", role = "assistant" },
},
}
local result = watsonx_provider:parse_messages(opts)
assert.is_table(result)
assert.equals(3, #result) -- system + 2 messages
assert.equals("SYSTEM", result[1].role)
assert.equals("You are a helpful assistant", result[1].content)
assert.equals("USER", result[2].role)
assert.equals("Hello", result[2].content)
assert.equals("ASSISTANT", result[3].role)
assert.equals("Hi there", result[3].content)
end)
busted.it("should handle WCA_COMMAND system prompt", function()
---@type AvantePromptOptions
local opts = {
system_prompt = "WCA_COMMAND",
messages = {
{ content = "/document main.py", role = "user" },
},
}
local result = watsonx_provider:parse_messages(opts)
assert.is_table(result)
assert.equals(1, #result) -- only user message, no system prompt
assert.equals("USER", result[1].role)
assert.equals("/document main.py", result[1].content)
end)
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/rag_service_spec.lua | Lua | local mock = require("luassert.mock")
local match = require("luassert.match")
describe("RagService", function()
local RagService
local Config_mock
before_each(function()
-- Load the module before each test
RagService = require("avante.rag_service")
-- Setup common mocks
Config_mock = mock(require("avante.config"), true)
Config_mock.rag_service = { host_mount = "/home/user" }
end)
after_each(function()
-- Clean up after each test
package.loaded["avante.rag_service"] = nil
mock.revert(Config_mock)
end)
describe("URI conversion functions", function()
it("should convert URIs between host and container formats", function()
-- Test both directions of conversion
local host_uri = "file:///home/user/project/file.txt"
local container_uri = "file:///host/project/file.txt"
-- Host to container
local result1 = RagService.to_container_uri(host_uri)
assert.equals(container_uri, result1)
-- Container to host
local result2 = RagService.to_local_uri(container_uri)
assert.equals(host_uri, result2)
end)
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/ui/acp_confirm_adapter_spec.lua | Lua | local ACPConfirmAdapter = require("avante.ui.acp_confirm_adapter")
describe("ACPConfirmAdapter", function()
describe("map_acp_options", function()
it("should ignore reject_always", function()
local options = { { kind = "reject_always", optionId = "opt4" } }
local result = ACPConfirmAdapter.map_acp_options(options)
assert.is_nil(result.yes)
assert.is_nil(result.all)
assert.is_nil(result.no)
end)
it("should map multiple options correctly", function()
local options = {
{ kind = "allow_once", optionId = "yes_id" },
{ kind = "allow_always", optionId = "all_id" },
{ kind = "reject_once", optionId = "no_id" },
{ kind = "reject_always", optionId = "ignored_id" },
}
local result = ACPConfirmAdapter.map_acp_options(options)
assert.equals("yes_id", result.yes)
assert.equals("all_id", result.all)
assert.equals("no_id", result.no)
end)
it("should handle empty options", function()
local options = {}
local result = ACPConfirmAdapter.map_acp_options(options)
assert.is_nil(result.yes)
assert.is_nil(result.all)
assert.is_nil(result.no)
end)
end)
describe("generate_buttons_for_acp_options", function()
it("should generate buttons with correct properties for each option kind", function()
local options = {
{ kind = "allow_once", optionId = "opt1", name = "Allow" },
{ kind = "allow_always", optionId = "opt2", name = "Allow always" },
{ kind = "reject_once", optionId = "opt3", name = "Reject" },
{ kind = "reject_always", optionId = "opt4", name = "Reject always" },
}
local result = ACPConfirmAdapter.generate_buttons_for_acp_options(options)
assert.equals(4, #result)
for _, button in ipairs(result) do
assert.is_not_nil(button.id)
assert.is_not_nil(button.name)
assert.is_not_nil(button.icon)
assert.is_string(button.icon)
if button.name == "Reject" or button.name == "Reject always" then
assert.is_not_nil(button.hl)
else
assert.is_nil(button.hl)
end
end
end)
it("should handle multiple options and sort by name", function()
local options = {
{ kind = "reject_once", optionId = "opt3", name = "Reject" },
{ kind = "allow_once", optionId = "opt1", name = "Allow" },
{ kind = "allow_always", optionId = "opt2", name = "Allow always" },
}
local result = ACPConfirmAdapter.generate_buttons_for_acp_options(options)
assert.equals(3, #result)
assert.equals("Allow", result[1].name)
assert.equals("Allow always", result[2].name)
assert.equals("Reject", result[3].name)
end)
it("should handle empty options", function()
local options = {}
local result = ACPConfirmAdapter.generate_buttons_for_acp_options(options)
assert.equals(0, #result)
end)
it("should preserve all button properties", function()
local options = {
{ kind = "allow_once", optionId = "id1", name = "Button 1" },
{ kind = "reject_once", optionId = "id2", name = "Button 2" },
}
local result = ACPConfirmAdapter.generate_buttons_for_acp_options(options)
assert.equals(2, #result)
for _, button in ipairs(result) do
assert.is_not_nil(button.id)
assert.is_not_nil(button.name)
assert.is_not_nil(button.icon)
end
end)
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/utils/file_spec.lua | Lua | local File = require("avante.utils.file")
local mock = require("luassert.mock")
local stub = require("luassert.stub")
describe("File", function()
local test_file = "test.txt"
local test_content = "test content\nline 2"
-- Mock vim API
local api_mock
local uv_mock
before_each(function()
-- Setup mocks
api_mock = mock(vim.api, true)
uv_mock = mock(vim.uv, true)
end)
after_each(function()
-- Clean up mocks
mock.revert(api_mock)
mock.revert(uv_mock)
end)
describe("read_content", function()
it("should read file content", function()
vim.fn.readfile = stub().returns({ "test content", "line 2" })
local content = File.read_content(test_file)
assert.equals(test_content, content)
assert.stub(vim.fn.readfile).was_called_with(test_file)
end)
it("should return nil for non-existent file", function()
vim.fn.readfile = stub().returns(nil)
local content = File.read_content("nonexistent.txt")
assert.is_nil(content)
end)
it("should use cache for subsequent reads", function()
vim.fn.readfile = stub().returns({ "test content", "line 2" })
local new_test_file = "test1.txt"
-- First read
local content1 = File.read_content(new_test_file)
assert.equals(test_content, content1)
-- Second read (should use cache)
local content2 = File.read_content(new_test_file)
assert.equals(test_content, content2)
-- readfile should only be called once
assert.stub(vim.fn.readfile).was_called(1)
end)
end)
describe("exists", function()
it("should return true for existing file", function()
uv_mock.fs_stat.returns({ type = "file" })
assert.is_true(File.exists(test_file))
assert.stub(uv_mock.fs_stat).was_called_with(test_file)
end)
it("should return false for non-existent file", function()
uv_mock.fs_stat.returns(nil)
assert.is_false(File.exists("nonexistent.txt"))
end)
end)
describe("get_file_icon", function()
local Filetype
local devicons_mock
before_each(function()
-- Mock plenary.filetype
Filetype = mock(require("plenary.filetype"), true)
-- Prepare devicons mock
devicons_mock = {
get_icon = stub().returns(""),
}
-- Reset _G.MiniIcons
_G.MiniIcons = nil
end)
after_each(function() mock.revert(Filetype) end)
it("should get icon using nvim-web-devicons", function()
Filetype.detect.returns("lua")
devicons_mock.get_icon.returns("")
-- Mock require for nvim-web-devicons
local old_require = _G.require
_G.require = function(module)
if module == "nvim-web-devicons" then return devicons_mock end
return old_require(module)
end
local icon = File.get_file_icon("test.lua")
assert.equals("", icon)
assert.stub(Filetype.detect).was_called_with("test.lua", {})
assert.stub(devicons_mock.get_icon).was_called()
_G.require = old_require
end)
it("should get icon using MiniIcons if available", function()
_G.MiniIcons = {
get = stub().returns("", "color", "name"),
}
Filetype.detect.returns("lua")
local icon = File.get_file_icon("test.lua")
assert.equals("", icon)
assert.stub(Filetype.detect).was_called_with("test.lua", {})
assert.stub(_G.MiniIcons.get).was_called_with("filetype", "lua")
_G.MiniIcons = nil
end)
it("should handle unknown filetypes", function()
Filetype.detect.returns(nil)
devicons_mock.get_icon.returns("")
-- Mock require for nvim-web-devicons
local old_require = _G.require
_G.require = function(module)
if module == "nvim-web-devicons" then return devicons_mock end
return old_require(module)
end
local icon = File.get_file_icon("unknown.xyz")
assert.equals("", icon)
_G.require = old_require
end)
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/utils/fix_diff_spec.lua | Lua | local Utils = require("avante.utils")
describe("Utils.fix_diff", function()
it("should not break normal diff", function()
local diff = [[------- SEARCH
<Modal isOpen={showLogs} onClose={() => setShowLogs(false)} title="Project PRD Logs" size="xl">
<div className="p-6">
<div className="py-8 overflow-auto text-sm">
<ReactMarkdown remarkPlugins={[remarkGfm]}>{logs.split('\n').join('\n\n')}</ReactMarkdown>
<div className="text-center">{logsLoading && <ScaleLoader color="#555" width={3} height={10} speedMultiplier={2.3} />}</div>
</div>
</div>
{logs.length > 0 && (
<div className="flex justify-end">
<button
onClick={() => setShowLogs(false)}
className="bg-japanese-chigusa-600 text-white px-4 py-2 hover:bg-japanese-chigusa-700 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-offset-2 transition-colors"
>
Close
</button>
</div>
)}
</Modal>
=======
<Modal isOpen={showLogs} onClose={() => setShowLogs(false)} title="Project PRD Logs" size="xl">
<div className="flex flex-col" style={{ maxHeight: '80vh' }}>
<div className="flex-1 overflow-y-auto p-6">
<div className="text-sm font-mono whitespace-pre-wrap">
<ReactMarkdown remarkPlugins={[remarkGfm]}>{logs.split('\n').join('\n\n')}</ReactMarkdown>
</div>
<div className="text-center mt-4">{logsLoading && <ScaleLoader color="#555" width={3} height={10} speedMultiplier={2.3} />}</div>
<div ref={(el) => {
if (el) {
el.scrollIntoView({ behavior: 'smooth', block: 'end' });
}
}} />
</div>
</div>
{logs.length > 0 && (
<div className="flex justify-end p-4 border-t">
<button
onClick={() => setShowLogs(false)}
className="bg-japanese-chigusa-600 text-white px-4 py-2 hover:bg-japanese-chigusa-700 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-offset-2 transition-colors"
>
Close
</button>
</div>
)}
</Modal>
+++++++ REPLACE
]]
local fixed_diff = Utils.fix_diff(diff)
assert.equals(diff, fixed_diff)
end)
it("should not break normal multiple diff", function()
local diff = [[------- SEARCH
<Modal isOpen={showLogs} onClose={() => setShowLogs(false)} title="Project PRD Logs" size="xl">
<div className="p-6">
<div className="py-8 overflow-auto text-sm">
<ReactMarkdown remarkPlugins={[remarkGfm]}>{logs.split('\n').join('\n\n')}</ReactMarkdown>
<div className="text-center">{logsLoading && <ScaleLoader color="#555" width={3} height={10} speedMultiplier={2.3} />}</div>
</div>
</div>
{logs.length > 0 && (
<div className="flex justify-end">
<button
onClick={() => setShowLogs(false)}
className="bg-japanese-chigusa-600 text-white px-4 py-2 hover:bg-japanese-chigusa-700 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-offset-2 transition-colors"
>
Close
</button>
</div>
)}
</Modal>
=======
<Modal isOpen={showLogs} onClose={() => setShowLogs(false)} title="Project PRD Logs" size="xl">
<div className="flex flex-col" style={{ maxHeight: '80vh' }}>
<div className="flex-1 overflow-y-auto p-6">
<div className="text-sm font-mono whitespace-pre-wrap">
<ReactMarkdown remarkPlugins={[remarkGfm]}>{logs.split('\n').join('\n\n')}</ReactMarkdown>
</div>
<div className="text-center mt-4">{logsLoading && <ScaleLoader color="#555" width={3} height={10} speedMultiplier={2.3} />}</div>
<div ref={(el) => {
if (el) {
el.scrollIntoView({ behavior: 'smooth', block: 'end' });
}
}} />
</div>
</div>
{logs.length > 0 && (
<div className="flex justify-end p-4 border-t">
<button
onClick={() => setShowLogs(false)}
className="bg-japanese-chigusa-600 text-white px-4 py-2 hover:bg-japanese-chigusa-700 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-offset-2 transition-colors"
>
Close
</button>
</div>
)}
</Modal>
+++++++ REPLACE
------- SEARCH
<Modal isOpen={showLogs} onClose={() => setShowLogs(false)} title="Project PRD Logs" size="xl">
<div className="p-6">
=======
<Modal isOpen={showLogs} onClose={() => setShowLogs(false)} title="Project PRD Logs" size="xl aaa">
<div className="p-12">
+++++++ REPLACE
]]
local fixed_diff = Utils.fix_diff(diff)
assert.equals(diff, fixed_diff)
end)
it("should fix duplicated REPLACE delimiters", function()
local diff = [[------- SEARCH
<Modal isOpen={showLogs} onClose={() => setShowLogs(false)} title="Project PRD Logs" size="xl">
<div className="p-6">
<div className="py-8 overflow-auto text-sm">
<ReactMarkdown remarkPlugins={[remarkGfm]}>{logs.split('\n').join('\n\n')}</ReactMarkdown>
<div className="text-center">{logsLoading && <ScaleLoader color="#555" width={3} height={10} speedMultiplier={2.3} />}</div>
</div>
</div>
{logs.length > 0 && (
<div className="flex justify-end">
<button
onClick={() => setShowLogs(false)}
className="bg-japanese-chigusa-600 text-white px-4 py-2 hover:bg-japanese-chigusa-700 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-offset-2 transition-colors"
>
Close
</button>
</div>
)}
</Modal>
------- REPLACE
<Modal isOpen={showLogs} onClose={() => setShowLogs(false)} title="Project PRD Logs" size="xl">
<div className="flex flex-col" style={{ maxHeight: '80vh' }}>
<div className="flex-1 overflow-y-auto p-6">
<div className="text-sm font-mono whitespace-pre-wrap">
<ReactMarkdown remarkPlugins={[remarkGfm]}>{logs.split('\n').join('\n\n')}</ReactMarkdown>
</div>
<div className="text-center mt-4">{logsLoading && <ScaleLoader color="#555" width={3} height={10} speedMultiplier={2.3} />}</div>
<div ref={(el) => {
if (el) {
el.scrollIntoView({ behavior: 'smooth', block: 'end' });
}
}} />
</div>
</div>
{logs.length > 0 && (
<div className="flex justify-end p-4 border-t">
<button
onClick={() => setShowLogs(false)}
className="bg-japanese-chigusa-600 text-white px-4 py-2 hover:bg-japanese-chigusa-700 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-offset-2 transition-colors"
>
Close
</button>
</div>
)}
</Modal>
------- REPLACE
]]
local expected_diff = [[------- SEARCH
<Modal isOpen={showLogs} onClose={() => setShowLogs(false)} title="Project PRD Logs" size="xl">
<div className="p-6">
<div className="py-8 overflow-auto text-sm">
<ReactMarkdown remarkPlugins={[remarkGfm]}>{logs.split('\n').join('\n\n')}</ReactMarkdown>
<div className="text-center">{logsLoading && <ScaleLoader color="#555" width={3} height={10} speedMultiplier={2.3} />}</div>
</div>
</div>
{logs.length > 0 && (
<div className="flex justify-end">
<button
onClick={() => setShowLogs(false)}
className="bg-japanese-chigusa-600 text-white px-4 py-2 hover:bg-japanese-chigusa-700 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-offset-2 transition-colors"
>
Close
</button>
</div>
)}
</Modal>
=======
<Modal isOpen={showLogs} onClose={() => setShowLogs(false)} title="Project PRD Logs" size="xl">
<div className="flex flex-col" style={{ maxHeight: '80vh' }}>
<div className="flex-1 overflow-y-auto p-6">
<div className="text-sm font-mono whitespace-pre-wrap">
<ReactMarkdown remarkPlugins={[remarkGfm]}>{logs.split('\n').join('\n\n')}</ReactMarkdown>
</div>
<div className="text-center mt-4">{logsLoading && <ScaleLoader color="#555" width={3} height={10} speedMultiplier={2.3} />}</div>
<div ref={(el) => {
if (el) {
el.scrollIntoView({ behavior: 'smooth', block: 'end' });
}
}} />
</div>
</div>
{logs.length > 0 && (
<div className="flex justify-end p-4 border-t">
<button
onClick={() => setShowLogs(false)}
className="bg-japanese-chigusa-600 text-white px-4 py-2 hover:bg-japanese-chigusa-700 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-offset-2 transition-colors"
>
Close
</button>
</div>
)}
</Modal>
+++++++ REPLACE
]]
local fixed_diff = Utils.fix_diff(diff)
assert.equals(expected_diff, fixed_diff)
end)
it("should fix the delimiter is on the same line as the content", function()
local diff = [[------- // Fetch initial stages when project changes
useEffect(() => {
if (!subscribedProject) return;
const fetchStages = async () => {
try {
const response = await fetch(`/api/projects/${subscribedProject}/stages`);
if (response.ok) {
const stagesData = await response.json();
setStages(stagesData);
}
} catch (error) {
console.error('Failed to fetch stages:', error);
}
};
fetchStages();
}, [subscribedProject, forceUpdateCounter]);
======= // Fetch initial stages when project changes
useEffect(() => {
if (!subscribedProject) return;
const fetchStages = async () => {
try {
// Use the correct API endpoint for stages by project UUID
const response = await fetch(`/api/stages?project_uuid=${subscribedProject}`);
if (response.ok) {
const stagesData = await response.json();
setStages(stagesData);
}
} catch (error) {
console.error('Failed to fetch stages:', error);
}
};
fetchStages();
}, [subscribedProject, forceUpdateCounter]);
+++++++ REPLACE
]]
local expected_diff = [[------- SEARCH
// Fetch initial stages when project changes
useEffect(() => {
if (!subscribedProject) return;
const fetchStages = async () => {
try {
const response = await fetch(`/api/projects/${subscribedProject}/stages`);
if (response.ok) {
const stagesData = await response.json();
setStages(stagesData);
}
} catch (error) {
console.error('Failed to fetch stages:', error);
}
};
fetchStages();
}, [subscribedProject, forceUpdateCounter]);
=======
// Fetch initial stages when project changes
useEffect(() => {
if (!subscribedProject) return;
const fetchStages = async () => {
try {
// Use the correct API endpoint for stages by project UUID
const response = await fetch(`/api/stages?project_uuid=${subscribedProject}`);
if (response.ok) {
const stagesData = await response.json();
setStages(stagesData);
}
} catch (error) {
console.error('Failed to fetch stages:', error);
}
};
fetchStages();
}, [subscribedProject, forceUpdateCounter]);
+++++++ REPLACE
]]
local fixed_diff = Utils.fix_diff(diff)
assert.equals(expected_diff, fixed_diff)
end)
it("should fix unified diff", function()
local diff = [[--- lua/avante/sidebar.lua
+++ lua/avante/sidebar.lua
@@ -3099,7 +3099,7 @@
function Sidebar:create_todos_container()
local history = Path.history.load(self.code.bufnr)
if not history or not history.todos or #history.todos == 0 then
- if self.containers.todos then self.containers.todos:unmount() end
+ if self.containers.todos and Utils.is_valid_container(self.containers.todos) then self.containers.todos:unmount() end
self.containers.todos = nil
self:adjust_layout()
return
@@ -3121,7 +3121,7 @@
}),
position = "bottom",
size = {
- height = 3,
+ height = math.min(3, math.max(1, vim.o.lines - 5)),
},
})
self.containers.todos:mount()
@@ -3151,11 +3151,15 @@
self:render_header(
self.containers.todos.winid,
todos_buf,
- Utils.icon(" ") .. "Todos" .. " (" .. done_count .. "/" .. total_count .. ")",
+ Utils.icon(" ") .. "Todos" .. " (" .. done_count .. "/" .. total_count .. ")",
Highlights.SUBTITLE,
Highlights.REVERSED_SUBTITLE
)
- self:adjust_layout()
+
+ local ok, err = pcall(function()
+ self:adjust_layout()
+ end)
+ if not ok then Utils.debug("Failed to adjust layout after todos creation:", err) end
end
function Sidebar:adjust_layout()
]]
local expected_diff = [[------- SEARCH
function Sidebar:create_todos_container()
local history = Path.history.load(self.code.bufnr)
if not history or not history.todos or #history.todos == 0 then
if self.containers.todos then self.containers.todos:unmount() end
self.containers.todos = nil
self:adjust_layout()
return
=======
function Sidebar:create_todos_container()
local history = Path.history.load(self.code.bufnr)
if not history or not history.todos or #history.todos == 0 then
if self.containers.todos and Utils.is_valid_container(self.containers.todos) then self.containers.todos:unmount() end
self.containers.todos = nil
self:adjust_layout()
return
+++++++ REPLACE
------- SEARCH
}),
position = "bottom",
size = {
height = 3,
},
})
self.containers.todos:mount()
=======
}),
position = "bottom",
size = {
height = math.min(3, math.max(1, vim.o.lines - 5)),
},
})
self.containers.todos:mount()
+++++++ REPLACE
------- SEARCH
self:render_header(
self.containers.todos.winid,
todos_buf,
Utils.icon(" ") .. "Todos" .. " (" .. done_count .. "/" .. total_count .. ")",
Highlights.SUBTITLE,
Highlights.REVERSED_SUBTITLE
)
self:adjust_layout()
end
function Sidebar:adjust_layout()
=======
self:render_header(
self.containers.todos.winid,
todos_buf,
Utils.icon(" ") .. "Todos" .. " (" .. done_count .. "/" .. total_count .. ")",
Highlights.SUBTITLE,
Highlights.REVERSED_SUBTITLE
)
local ok, err = pcall(function()
self:adjust_layout()
end)
if not ok then Utils.debug("Failed to adjust layout after todos creation:", err) end
end
function Sidebar:adjust_layout()
+++++++ REPLACE]]
local fixed_diff = Utils.fix_diff(diff)
assert.equals(expected_diff, fixed_diff)
end)
it("should fix unified diff 2", function()
local diff = [[
@@ -3099,7 +3099,7 @@
function Sidebar:create_todos_container()
local history = Path.history.load(self.code.bufnr)
if not history or not history.todos or #history.todos == 0 then
- if self.containers.todos then self.containers.todos:unmount() end
+ if self.containers.todos and Utils.is_valid_container(self.containers.todos) then self.containers.todos:unmount() end
self.containers.todos = nil
self:adjust_layout()
return
@@ -3121,7 +3121,7 @@
}),
position = "bottom",
size = {
- height = 3,
+ height = math.min(3, math.max(1, vim.o.lines - 5)),
},
})
self.containers.todos:mount()
@@ -3151,11 +3151,15 @@
self:render_header(
self.containers.todos.winid,
todos_buf,
- Utils.icon(" ") .. "Todos" .. " (" .. done_count .. "/" .. total_count .. ")",
+ Utils.icon(" ") .. "Todos" .. " (" .. done_count .. "/" .. total_count .. ")",
Highlights.SUBTITLE,
Highlights.REVERSED_SUBTITLE
)
- self:adjust_layout()
+
+ local ok, err = pcall(function()
+ self:adjust_layout()
+ end)
+ if not ok then Utils.debug("Failed to adjust layout after todos creation:", err) end
end
function Sidebar:adjust_layout()
]]
local expected_diff = [[------- SEARCH
function Sidebar:create_todos_container()
local history = Path.history.load(self.code.bufnr)
if not history or not history.todos or #history.todos == 0 then
if self.containers.todos then self.containers.todos:unmount() end
self.containers.todos = nil
self:adjust_layout()
return
=======
function Sidebar:create_todos_container()
local history = Path.history.load(self.code.bufnr)
if not history or not history.todos or #history.todos == 0 then
if self.containers.todos and Utils.is_valid_container(self.containers.todos) then self.containers.todos:unmount() end
self.containers.todos = nil
self:adjust_layout()
return
+++++++ REPLACE
------- SEARCH
}),
position = "bottom",
size = {
height = 3,
},
})
self.containers.todos:mount()
=======
}),
position = "bottom",
size = {
height = math.min(3, math.max(1, vim.o.lines - 5)),
},
})
self.containers.todos:mount()
+++++++ REPLACE
------- SEARCH
self:render_header(
self.containers.todos.winid,
todos_buf,
Utils.icon(" ") .. "Todos" .. " (" .. done_count .. "/" .. total_count .. ")",
Highlights.SUBTITLE,
Highlights.REVERSED_SUBTITLE
)
self:adjust_layout()
end
function Sidebar:adjust_layout()
=======
self:render_header(
self.containers.todos.winid,
todos_buf,
Utils.icon(" ") .. "Todos" .. " (" .. done_count .. "/" .. total_count .. ")",
Highlights.SUBTITLE,
Highlights.REVERSED_SUBTITLE
)
local ok, err = pcall(function()
self:adjust_layout()
end)
if not ok then Utils.debug("Failed to adjust layout after todos creation:", err) end
end
function Sidebar:adjust_layout()
+++++++ REPLACE]]
local fixed_diff = Utils.fix_diff(diff)
assert.equals(expected_diff, fixed_diff)
end)
it("should fix duplicated replace blocks", function()
local diff = [[------- SEARCH
useEffect(() => {
if (!isExpanded || !textContentRef.current) {
setShowFixedCollapseButton(false);
return;
}
const observer = new IntersectionObserver(
([entry]) => {
setShowFixedCollapseButton(!entry.isIntersecting);
},
{
root: null,
rootMargin: '0px',
threshold: 1.0,
}
);
const collapseButton = collapseButtonRef.current;
if (collapseButton) {
observer.observe(collapseButton);
}
return () => {
if (collapseButton) {
observer.unobserve(collapseButton);
}
};
}, [isExpanded, textContentRef.current]);
=======
useEffect(() => {
if (!isExpanded || !textContentRef.current) {
setShowFixedCollapseButton(false);
return;
}
// Check initial visibility of the collapse button
const checkInitialVisibility = () => {
const collapseButton = collapseButtonRef.current;
if (collapseButton) {
const rect = collapseButton.getBoundingClientRect();
const isVisible = rect.top >= 0 && rect.bottom <= window.innerHeight;
setShowFixedCollapseButton(!isVisible);
}
};
// Small delay to ensure DOM is updated after expansion
const timeoutId = setTimeout(checkInitialVisibility, 100);
const observer = new IntersectionObserver(
([entry]) => {
setShowFixedCollapseButton(!entry.isIntersecting);
},
{
root: null,
rootMargin: '0px',
threshold: [0, 1.0], // Check both when it starts to leave and when fully visible
}
);
const collapseButton = collapseButtonRef.current;
if (collapseButton) {
observer.observe(collapseButton);
}
return () => {
clearTimeout(timeoutId);
if (collapseButton) {
observer.unobserve(collapseButton);
}
};
}, [isExpanded, textContentRef.current]);
=======
useEffect(() => {
if (!isExpanded || !textContentRef.current) {
setShowFixedCollapseButton(false);
return;
}
// Check initial visibility of the collapse button
const checkInitialVisibility = () => {
const collapseButton = collapseButtonRef.current;
if (collapseButton) {
const rect = collapseButton.getBoundingClientRect();
const isVisible = rect.top >= 0 && rect.bottom <= window.innerHeight;
setShowFixedCollapseButton(!isVisible);
}
};
// Small delay to ensure DOM is updated after expansion
const timeoutId = setTimeout(checkInitialVisibility, 100);
const observer = new IntersectionObserver(
([entry]) => {
setShowFixedCollapseButton(!entry.isIntersecting);
},
{
root: null,
rootMargin: '0px',
threshold: [0, 1.0], // Check both when it starts to leave and when fully visible
}
);
const collapseButton = collapseButtonRef.current;
if (collapseButton) {
observer.observe(collapseButton);
}
return () => {
clearTimeout(timeoutId);
if (collapseButton) {
observer.unobserve(collapseButton);
}
};
}, [isExpanded, textContentRef.current]);
+++++++ REPLACE
]]
local expected_diff = [[------- SEARCH
useEffect(() => {
if (!isExpanded || !textContentRef.current) {
setShowFixedCollapseButton(false);
return;
}
const observer = new IntersectionObserver(
([entry]) => {
setShowFixedCollapseButton(!entry.isIntersecting);
},
{
root: null,
rootMargin: '0px',
threshold: 1.0,
}
);
const collapseButton = collapseButtonRef.current;
if (collapseButton) {
observer.observe(collapseButton);
}
return () => {
if (collapseButton) {
observer.unobserve(collapseButton);
}
};
}, [isExpanded, textContentRef.current]);
=======
useEffect(() => {
if (!isExpanded || !textContentRef.current) {
setShowFixedCollapseButton(false);
return;
}
// Check initial visibility of the collapse button
const checkInitialVisibility = () => {
const collapseButton = collapseButtonRef.current;
if (collapseButton) {
const rect = collapseButton.getBoundingClientRect();
const isVisible = rect.top >= 0 && rect.bottom <= window.innerHeight;
setShowFixedCollapseButton(!isVisible);
}
};
// Small delay to ensure DOM is updated after expansion
const timeoutId = setTimeout(checkInitialVisibility, 100);
const observer = new IntersectionObserver(
([entry]) => {
setShowFixedCollapseButton(!entry.isIntersecting);
},
{
root: null,
rootMargin: '0px',
threshold: [0, 1.0], // Check both when it starts to leave and when fully visible
}
);
const collapseButton = collapseButtonRef.current;
if (collapseButton) {
observer.observe(collapseButton);
}
return () => {
clearTimeout(timeoutId);
if (collapseButton) {
observer.unobserve(collapseButton);
}
};
}, [isExpanded, textContentRef.current]);
+++++++ REPLACE]]
local fixed_diff = Utils.fix_diff(diff)
assert.equals(expected_diff, fixed_diff)
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/utils/get_parent_path_spec.lua | Lua | local utils = require("avante.utils")
describe("get_parent_path", function()
-- Define path separator for our tests, using the same logic as in the utils module
local path_sep = jit.os:find("Windows") ~= nil and "\\" or "/"
it("should return the parent directory of a file path", function()
local filepath = "foo" .. path_sep .. "bar" .. path_sep .. "baz.txt"
local expected = "foo" .. path_sep .. "bar"
assert.are.equal(expected, utils.get_parent_path(filepath))
end)
it("should return the parent directory of a directory path", function()
local dirpath = "foo" .. path_sep .. "bar" .. path_sep .. "baz"
local expected = "foo" .. path_sep .. "bar"
assert.are.equal(expected, utils.get_parent_path(dirpath))
end)
it("should handle trailing separators", function()
local dirpath = "foo" .. path_sep .. "bar" .. path_sep .. "baz" .. path_sep
local expected = "foo" .. path_sep .. "bar"
assert.are.equal(expected, utils.get_parent_path(dirpath))
end)
it("should return '.' for a single file or directory", function()
assert.are.equal(".", utils.get_parent_path("foo.txt"))
assert.are.equal(".", utils.get_parent_path("dir"))
end)
it("should handle paths with multiple levels", function()
local filepath = "a" .. path_sep .. "b" .. path_sep .. "c" .. path_sep .. "d" .. path_sep .. "file.txt"
local expected = "a" .. path_sep .. "b" .. path_sep .. "c" .. path_sep .. "d"
assert.are.equal(expected, utils.get_parent_path(filepath))
end)
it("should return empty string for root directory", function()
-- Root directory on Unix-like systems
if path_sep == "/" then
assert.are.equal("/", utils.get_parent_path("/foo"))
else
-- Windows uses drive letters, so parent of "C:\foo" is "C:"
local winpath = "C:" .. path_sep .. "foo"
assert.are.equal("C:", utils.get_parent_path(winpath))
end
end)
it("should return empty string for an empty string", function() assert.are.equal("", utils.get_parent_path("")) end)
it("should throw an error for nil input", function()
assert.has_error(function() utils.get_parent_path(nil) end, "filepath cannot be nil")
end)
it("should handle paths with spaces", function()
local filepath = "path with spaces" .. path_sep .. "file name.txt"
local expected = "path with spaces"
assert.are.equal(expected, utils.get_parent_path(filepath))
end)
it("should handle special characters in paths", function()
local filepath = "folder-name!" .. path_sep .. "file_#$%&.txt"
local expected = "folder-name!"
assert.are.equal(expected, utils.get_parent_path(filepath))
end)
it("should handle absolute paths", function()
if path_sep == "/" then
-- Unix-like paths
local filepath = path_sep .. "home" .. path_sep .. "user" .. path_sep .. "file.txt"
local expected = path_sep .. "home" .. path_sep .. "user"
assert.are.equal(expected, utils.get_parent_path(filepath))
-- Root directory edge case
assert.are.equal("", utils.get_parent_path(path_sep))
else
-- Windows paths
local filepath = "C:" .. path_sep .. "Users" .. path_sep .. "user" .. path_sep .. "file.txt"
local expected = "C:" .. path_sep .. "Users" .. path_sep .. "user"
assert.are.equal(expected, utils.get_parent_path(filepath))
end
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/utils/init_spec.lua | Lua | local Utils = require("avante.utils")
describe("Utils", function()
describe("trim", function()
it("should trim prefix", function() assert.equals("test", Utils.trim("prefix_test", { prefix = "prefix_" })) end)
it("should trim suffix", function() assert.equals("test", Utils.trim("test_suffix", { suffix = "_suffix" })) end)
it(
"should trim both prefix and suffix",
function() assert.equals("test", Utils.trim("prefix_test_suffix", { prefix = "prefix_", suffix = "_suffix" })) end
)
it(
"should return original string if no match",
function() assert.equals("test", Utils.trim("test", { prefix = "xxx", suffix = "yyy" })) end
)
end)
describe("url_join", function()
it("should join url parts correctly", function()
assert.equals("http://example.com/path", Utils.url_join("http://example.com", "path"))
assert.equals("http://example.com/path", Utils.url_join("http://example.com/", "/path"))
assert.equals("http://example.com/path/to", Utils.url_join("http://example.com", "path", "to"))
assert.equals("http://example.com/path", Utils.url_join("http://example.com/", "/path/"))
end)
it("should handle empty parts", function()
assert.equals("http://example.com", Utils.url_join("http://example.com", ""))
assert.equals("http://example.com", Utils.url_join("http://example.com", nil))
end)
end)
describe("is_type", function()
it("should check basic types correctly", function()
assert.is_true(Utils.is_type("string", "test"))
assert.is_true(Utils.is_type("number", 123))
assert.is_true(Utils.is_type("boolean", true))
assert.is_true(Utils.is_type("table", {}))
assert.is_true(Utils.is_type("function", function() end))
assert.is_true(Utils.is_type("nil", nil))
end)
it("should check list type correctly", function()
assert.is_true(Utils.is_type("list", { 1, 2, 3 }))
assert.is_false(Utils.is_type("list", { a = 1, b = 2 }))
end)
it("should check map type correctly", function()
assert.is_true(Utils.is_type("map", { a = 1, b = 2 }))
assert.is_false(Utils.is_type("map", { 1, 2, 3 }))
end)
end)
describe("get_indentation", function()
it("should get correct indentation", function()
assert.equals(" ", Utils.get_indentation(" test"))
assert.equals("\t", Utils.get_indentation("\ttest"))
assert.equals("", Utils.get_indentation("test"))
end)
it("should handle empty or nil input", function()
assert.equals("", Utils.get_indentation(""))
assert.equals("", Utils.get_indentation(nil))
end)
end)
describe("trime_space", function()
it("should remove indentation correctly", function()
assert.equals("test", Utils.trim_space(" test"))
assert.equals("test", Utils.trim_space("\ttest"))
assert.equals("test", Utils.trim_space("test"))
end)
it("should handle empty or nil input", function()
assert.equals("", Utils.trim_space(""))
assert.equals(nil, Utils.trim_space(nil))
end)
end)
describe("is_first_letter_uppercase", function()
it("should detect uppercase first letter", function()
assert.is_true(Utils.is_first_letter_uppercase("Test"))
assert.is_true(Utils.is_first_letter_uppercase("ABC"))
end)
it("should detect lowercase first letter", function()
assert.is_false(Utils.is_first_letter_uppercase("test"))
assert.is_false(Utils.is_first_letter_uppercase("abc"))
end)
end)
describe("extract_mentions", function()
it("should extract @codebase mention", function()
local result = Utils.extract_mentions("test @codebase")
assert.equals("test ", result.new_content)
assert.is_true(result.enable_project_context)
assert.is_false(result.enable_diagnostics)
end)
it("should extract @diagnostics mention", function()
local result = Utils.extract_mentions("test @diagnostics")
assert.equals("test @diagnostics", result.new_content)
assert.is_false(result.enable_project_context)
assert.is_true(result.enable_diagnostics)
end)
it("should handle multiple mentions", function()
local result = Utils.extract_mentions("test @codebase @diagnostics")
assert.equals("test @diagnostics", result.new_content)
assert.is_true(result.enable_project_context)
assert.is_true(result.enable_diagnostics)
end)
end)
describe("get_mentions", function()
it("should return valid mentions", function()
local mentions = Utils.get_mentions()
assert.equals("codebase", mentions[1].command)
assert.equals("diagnostics", mentions[2].command)
end)
end)
describe("trim_think_content", function()
it("should remove think content", function()
local input = "<think>this should be removed</think> Hello World"
assert.equals(" Hello World", Utils.trim_think_content(input))
end)
it("The think tag that is not in the prefix should not be deleted.", function()
local input = "Hello <think>this should not be removed</think> World"
assert.equals("Hello <think>this should not be removed</think> World", Utils.trim_think_content(input))
end)
it("should handle multiple think blocks", function()
local input = "<think>first</think>middle<think>second</think>"
assert.equals("middle<think>second</think>", Utils.trim_think_content(input))
end)
it("should handle empty think blocks", function()
local input = "<think></think>testtest"
assert.equals("testtest", Utils.trim_think_content(input))
end)
it("should handle empty think blocks", function()
local input = "test<think></think>test"
assert.equals("test<think></think>test", Utils.trim_think_content(input))
end)
it("should handle input without think blocks", function()
local input = "just normal text"
assert.equals("just normal text", Utils.trim_think_content(input))
end)
end)
describe("debounce", function()
it("should debounce function calls", function()
local count = 0
local debounced = Utils.debounce(function() count = count + 1 end, 100)
-- Call multiple times in quick succession
debounced()
debounced()
debounced()
-- Should not have executed yet
assert.equals(0, count)
-- Wait for debounce timeout
vim.wait(200, function() return false end)
-- Should have executed once
assert.equals(1, count)
end)
it("should cancel previous timer on new calls", function()
local count = 0
local debounced = Utils.debounce(function(c) count = c end, 100)
-- First call
debounced(1)
-- Wait partial time
vim.wait(50, function() return false end)
-- Second call should cancel first
debounced(233)
-- Count should still be 0
assert.equals(0, count)
-- Wait for timeout
vim.wait(200, function() return false end)
-- Should only execute the latest once
assert.equals(233, count)
end)
it("should pass arguments correctly", function()
local result
local debounced = Utils.debounce(function(x, y) result = x + y end, 100)
debounced(2, 3)
-- Wait for timeout
vim.wait(200, function() return false end)
assert.equals(5, result)
end)
end)
describe("fuzzy_match", function()
it("should match exact lines", function()
local lines = { "test", "test2", "test3", "test4" }
local start_line, end_line = Utils.fuzzy_match(lines, { "test2", "test3" })
assert.equals(2, start_line)
assert.equals(3, end_line)
end)
it("should match lines with suffix", function()
local lines = { "test", "test2", "test3", "test4" }
local start_line, end_line = Utils.fuzzy_match(lines, { "test2 \t", "test3" })
assert.equals(2, start_line)
assert.equals(3, end_line)
end)
it("should match lines with space", function()
local lines = { "test", "test2", "test3", "test4" }
local start_line, end_line = Utils.fuzzy_match(lines, { "test2 ", " test3" })
assert.equals(2, start_line)
assert.equals(3, end_line)
end)
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/utils/join_paths_spec.lua | Lua | local assert = require("luassert")
local utils = require("avante.utils")
describe("join_paths", function()
it("should join multiple path segments with proper separator", function()
local result = utils.join_paths("path", "to", "file.lua")
assert.equals("path" .. utils.path_sep .. "to" .. utils.path_sep .. "file.lua", result)
end)
it("should handle empty path segments", function()
local result = utils.join_paths("", "to", "file.lua")
assert.equals("to" .. utils.path_sep .. "file.lua", result)
end)
it("should handle nil path segments", function()
local result = utils.join_paths(nil, "to", "file.lua")
assert.equals("to" .. utils.path_sep .. "file.lua", result)
end)
it("should handle empty path segments", function()
local result = utils.join_paths("path", "", "file.lua")
assert.equals("path" .. utils.path_sep .. "file.lua", result)
end)
it("should use absolute path when encountered", function()
local absolute_path = utils.is_win() and "C:\\absolute\\path" or "/absolute/path"
local result = utils.join_paths("relative", "path", absolute_path)
assert.equals(absolute_path, result)
end)
it("should handle paths with trailing separators", function()
local path_with_sep = "path" .. utils.path_sep
local result = utils.join_paths(path_with_sep, "file.lua")
assert.equals("path" .. utils.path_sep .. "file.lua", result)
end)
it("should handle no paths provided", function()
local result = utils.join_paths()
assert.equals(".", result)
end)
it("should return first path when only one path provided", function()
local result = utils.join_paths("path")
assert.equals("path", result)
end)
it("should handle path with mixed separators", function()
-- This test is more relevant on Windows where both / and \ are valid separators
local mixed_path = utils.is_win() and "path\\to/file" or "path/to/file"
local result = utils.join_paths("base", mixed_path)
-- The function should use utils.path_sep for joining
assert.equals("base" .. utils.path_sep .. mixed_path, result)
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/utils/make_relative_path_spec.lua | Lua | local assert = require("luassert")
local utils = require("avante.utils")
describe("make_relative_path", function()
it("should remove base directory from filepath", function()
local test_filepath = "/path/to/project/src/file.lua"
local test_base_dir = "/path/to/project"
local result = utils.make_relative_path(test_filepath, test_base_dir)
assert.equals("src/file.lua", result)
end)
it("should handle trailing dot-slash in base_dir", function()
local test_filepath = "/path/to/project/src/file.lua"
local test_base_dir = "/path/to/project/."
local result = utils.make_relative_path(test_filepath, test_base_dir)
assert.equals("src/file.lua", result)
end)
it("should handle trailing dot-slash in filepath", function()
local test_filepath = "/path/to/project/src/."
local test_base_dir = "/path/to/project"
local result = utils.make_relative_path(test_filepath, test_base_dir)
assert.equals("src", result)
end)
it("should handle both having trailing dot-slash", function()
local test_filepath = "/path/to/project/src/."
local test_base_dir = "/path/to/project/."
local result = utils.make_relative_path(test_filepath, test_base_dir)
assert.equals("src", result)
end)
it("should return the filepath when base_dir is not a prefix", function()
local test_filepath = "/path/to/project/src/file.lua"
local test_base_dir = "/different/path"
local result = utils.make_relative_path(test_filepath, test_base_dir)
assert.equals("/path/to/project/src/file.lua", result)
end)
it("should handle identical paths", function()
local test_filepath = "/path/to/project"
local test_base_dir = "/path/to/project"
local result = utils.make_relative_path(test_filepath, test_base_dir)
assert.equals(".", result)
end)
it("should handle empty strings", function()
local result = utils.make_relative_path("", "")
assert.equals(".", result)
end)
it("should preserve trailing slash in filepath", function()
local test_filepath = "/path/to/project/src/"
local test_base_dir = "/path/to/project"
local result = utils.make_relative_path(test_filepath, test_base_dir)
assert.equals("src/", result)
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
tests/utils/streaming_json_parser_spec.lua | Lua | local StreamingJSONParser = require("avante.utils.streaming_json_parser")
describe("StreamingJSONParser", function()
local parser
before_each(function() parser = StreamingJSONParser:new() end)
describe("initialization", function()
it("should create a new parser with empty state", function()
assert.is_not_nil(parser)
assert.equals("", parser.buffer)
assert.is_not_nil(parser.state)
assert.is_false(parser.state.inString)
assert.is_false(parser.state.escaping)
assert.is_table(parser.state.stack)
assert.equals(0, #parser.state.stack)
assert.is_nil(parser.state.result)
assert.is_nil(parser.state.currentKey)
assert.is_nil(parser.state.current)
assert.is_table(parser.state.parentKeys)
end)
end)
describe("parse", function()
it("should parse a complete simple JSON object", function()
local result, complete = parser:parse('{"key": "value"}')
assert.is_true(complete)
assert.is_table(result)
assert.equals("value", result.key)
end)
it("should parse breaklines", function()
local result, complete = parser:parse('{"key": "value\nv"}')
assert.is_true(complete)
assert.is_table(result)
assert.equals("value\nv", result.key)
end)
it("should parse a complete simple JSON array", function()
local result, complete = parser:parse("[1, 2, 3]")
assert.is_true(complete)
assert.is_table(result)
assert.equals(1, result[1])
assert.equals(2, result[2])
assert.equals(3, result[3])
end)
it("should handle streaming JSON in multiple chunks", function()
local result1, complete1 = parser:parse('{"name": "John')
assert.is_false(complete1)
assert.is_table(result1)
assert.equals("John", result1.name)
local result2, complete2 = parser:parse('", "age": 30}')
assert.is_true(complete2)
assert.is_table(result2)
assert.equals("John", result2.name)
assert.equals(30, result2.age)
end)
it("should handle streaming string field", function()
local result1, complete1 = parser:parse('{"name": {"first": "John')
assert.is_false(complete1)
assert.is_table(result1)
assert.equals("John", result1.name.first)
end)
it("should parse nested objects", function()
local json = [[{
"person": {
"name": "John",
"age": 30,
"address": {
"city": "New York",
"zip": "10001"
}
}
}]]
local result, complete = parser:parse(json)
assert.is_true(complete)
assert.is_table(result)
assert.is_table(result.person)
assert.equals("John", result.person.name)
assert.equals(30, result.person.age)
assert.is_table(result.person.address)
assert.equals("New York", result.person.address.city)
assert.equals("10001", result.person.address.zip)
end)
it("should parse nested arrays", function()
local json = [[{
"matrix": [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
}]]
local result, complete = parser:parse(json)
assert.is_true(complete)
assert.is_table(result)
assert.is_table(result.matrix)
assert.equals(3, #result.matrix)
assert.equals(1, result.matrix[1][1])
assert.equals(5, result.matrix[2][2])
assert.equals(9, result.matrix[3][3])
end)
it("should handle boolean values", function()
local result, complete = parser:parse('{"success": true, "failed": false}')
assert.is_true(complete)
assert.is_table(result)
assert.is_true(result.success)
assert.is_false(result.failed)
end)
it("should handle null values", function()
local result, complete = parser:parse('{"value": null}')
assert.is_true(complete)
assert.is_table(result)
assert.is_nil(result.value)
end)
it("should handle escaped characters in strings", function()
local result, complete = parser:parse('{"text": "line1\\nline2\\t\\"quoted\\""}')
assert.is_true(complete)
assert.is_table(result)
assert.equals('line1\nline2\t"quoted"', result.text)
end)
it("should handle numbers correctly", function()
local result, complete = parser:parse('{"integer": 42, "float": 3.14, "negative": -10, "exponent": 1.2e3}')
assert.is_true(complete)
assert.is_table(result)
assert.equals(42, result.integer)
assert.equals(3.14, result.float)
assert.equals(-10, result.negative)
assert.equals(1200, result.exponent)
end)
it("should handle streaming complex JSON", function()
local chunks = {
'{"data": [{"id": 1, "info": {"name":',
' "Product A", "active": true}}, {"id": 2, ',
'"info": {"name": "Product B", "active": false',
'}}], "total": 2}',
}
local complete = false
local result
for _, chunk in ipairs(chunks) do
result, complete = parser:parse(chunk)
end
assert.is_true(complete)
assert.is_table(result)
assert.equals(2, #result.data)
assert.equals(1, result.data[1].id)
assert.equals("Product A", result.data[1].info.name)
assert.is_true(result.data[1].info.active)
assert.equals(2, result.data[2].id)
assert.equals("Product B", result.data[2].info.name)
assert.is_false(result.data[2].info.active)
assert.equals(2, result.total)
end)
it("should reset the parser state correctly", function()
parser:parse('{"key": "value"}')
parser:reset()
assert.equals("", parser.buffer)
assert.is_false(parser.state.inString)
assert.is_false(parser.state.escaping)
assert.is_table(parser.state.stack)
assert.equals(0, #parser.state.stack)
assert.is_nil(parser.state.result)
assert.is_nil(parser.state.currentKey)
assert.is_nil(parser.state.current)
assert.is_table(parser.state.parentKeys)
end)
it("should return partial results for incomplete JSON", function()
parser:reset()
local result, complete = parser:parse('{"stream": [1, 2,')
assert.is_false(complete)
assert.is_table(result)
assert.is_table(result.stream)
assert.equals(1, result.stream[1])
assert.equals(2, result.stream[2])
-- We need exactly one item in the stack (the array)
assert.equals(2, #parser.state.stack)
end)
it("should handle whitespace correctly", function()
parser:reset()
local result, complete = parser:parse('{"key1": "value1", "key2": 42}')
assert.is_true(complete)
assert.is_table(result)
assert.equals("value1", result.key1)
assert.equals(42, result.key2)
end)
it("should provide access to partial results during streaming", function()
parser:parse('{"name": "John", "items": [')
local partial = parser:getCurrentPartial()
assert.is_table(partial)
assert.equals("John", partial.name)
assert.is_table(partial.items)
parser:parse("1, 2]")
local result, complete = parser:parse("}")
assert.is_true(complete)
assert.equals("John", result.name)
assert.equals(1, result.items[1])
assert.equals(2, result.items[2])
end)
end)
end)
| yetone/avante.nvim | 17,366 | Use your Neovim like using Cursor AI IDE! | Lua | yetone | yetone | Isoform |
docs/css/custom.css | CSS | .termynal-comment {
color: #4a968f;
font-style: italic;
display: block;
}
.termy {
/* For right to left languages */
direction: ltr;
}
.termy [data-termynal] {
white-space: pre-wrap;
}
/* Right to left languages */
code {
direction: ltr;
display: inline-block;
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
docs/css/termynal.css | CSS | /**
* termynal.js
*
* @author Ines Montani <ines@ines.io>
* @version 0.0.1
* @license MIT
*/
:root {
--color-bg: #252a33;
--color-text: #eee;
--color-text-subtle: #a2a2a2;
}
[data-termynal] {
width: 750px;
max-width: 100%;
background: var(--color-bg);
color: var(--color-text);
/* font-size: 18px; */
font-size: 15px;
/* font-family: 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace; */
font-family: 'Roboto Mono', 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace;
border-radius: 4px;
padding: 75px 45px 35px;
position: relative;
-webkit-box-sizing: border-box;
box-sizing: border-box;
}
[data-termynal]:before {
content: '';
position: absolute;
top: 15px;
left: 15px;
display: inline-block;
width: 15px;
height: 15px;
border-radius: 50%;
/* A little hack to display the window buttons in one pseudo element. */
background: #d9515d;
-webkit-box-shadow: 25px 0 0 #f4c025, 50px 0 0 #3ec930;
box-shadow: 25px 0 0 #f4c025, 50px 0 0 #3ec930;
}
[data-termynal]:after {
content: 'bash';
position: absolute;
color: var(--color-text-subtle);
top: 5px;
left: 0;
width: 100%;
text-align: center;
}
a[data-terminal-control] {
text-align: right;
display: block;
color: #aebbff;
}
[data-ty] {
display: block;
line-height: 2;
}
[data-ty]:before {
/* Set up defaults and ensure empty lines are displayed. */
content: '';
display: inline-block;
vertical-align: middle;
}
[data-ty="input"]:before,
[data-ty-prompt]:before {
margin-right: 0.75em;
color: var(--color-text-subtle);
}
[data-ty="input"]:before {
content: '$';
}
[data-ty][data-ty-prompt]:before {
content: attr(data-ty-prompt);
}
[data-ty-cursor]:after {
content: attr(data-ty-cursor);
font-family: monospace;
margin-left: 0.5em;
-webkit-animation: blink 1s infinite;
animation: blink 1s infinite;
}
/* Cursor animation */
@-webkit-keyframes blink {
50% {
opacity: 0;
}
}
@keyframes blink {
50% {
opacity: 0;
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
docs/js/custom.js | JavaScript | function setupTermynal() {
document.querySelectorAll(".use-termynal").forEach(node => {
node.style.display = "block";
new Termynal(node, {
lineDelay: 500
});
});
const progressLiteralStart = "---> 100%";
const promptLiteralStart = "$ ";
const customPromptLiteralStart = "# ";
const termynalActivateClass = "termy";
let termynals = [];
function createTermynals() {
document
.querySelectorAll(`.${termynalActivateClass} .highlight`)
.forEach(node => {
const text = node.textContent;
const lines = text.split("\n");
const useLines = [];
let buffer = [];
function saveBuffer() {
if (buffer.length) {
let isBlankSpace = true;
buffer.forEach(line => {
if (line) {
isBlankSpace = false;
}
});
dataValue = {};
if (isBlankSpace) {
dataValue["delay"] = 0;
}
if (buffer[buffer.length - 1] === "") {
// A last single <br> won't have effect
// so put an additional one
buffer.push("");
}
const bufferValue = buffer.join("<br>");
dataValue["value"] = bufferValue;
useLines.push(dataValue);
buffer = [];
}
}
for (let line of lines) {
if (line === progressLiteralStart) {
saveBuffer();
useLines.push({
type: "progress"
});
} else if (line.startsWith(promptLiteralStart)) {
saveBuffer();
const value = line.replace(promptLiteralStart, "").trimEnd();
useLines.push({
type: "input",
value: value
});
} else if (line.startsWith("// ")) {
saveBuffer();
const value = "💬 " + line.replace("// ", "").trimEnd();
useLines.push({
value: value,
class: "termynal-comment",
delay: 0
});
} else if (line.startsWith(customPromptLiteralStart)) {
saveBuffer();
const promptStart = line.indexOf(promptLiteralStart);
if (promptStart === -1) {
console.error("Custom prompt found but no end delimiter", line)
}
const prompt = line.slice(0, promptStart).replace(customPromptLiteralStart, "")
let value = line.slice(promptStart + promptLiteralStart.length);
useLines.push({
type: "input",
value: value,
prompt: prompt
});
} else {
buffer.push(line);
}
}
saveBuffer();
const div = document.createElement("div");
node.replaceWith(div);
const termynal = new Termynal(div, {
lineData: useLines,
noInit: true,
lineDelay: 500
});
termynals.push(termynal);
});
}
function loadVisibleTermynals() {
termynals = termynals.filter(termynal => {
if (termynal.container.getBoundingClientRect().top - innerHeight <= 0) {
termynal.init();
return false;
}
return true;
});
}
window.addEventListener("scroll", loadVisibleTermynals);
createTermynals();
loadVisibleTermynals();
}
async function main() {
setupTermynal();
}
main()
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
docs/js/termynal.js | JavaScript | /**
* termynal.js
* A lightweight, modern and extensible animated terminal window, using
* async/await.
*
* @author Ines Montani <ines@ines.io>
* @version 0.0.1
* @license MIT
*/
'use strict';
/** Generate a terminal widget. */
class Termynal {
/**
* Construct the widget's settings.
* @param {(string|Node)=} container - Query selector or container element.
* @param {Object=} options - Custom settings.
* @param {string} options.prefix - Prefix to use for data attributes.
* @param {number} options.startDelay - Delay before animation, in ms.
* @param {number} options.typeDelay - Delay between each typed character, in ms.
* @param {number} options.lineDelay - Delay between each line, in ms.
* @param {number} options.progressLength - Number of characters displayed as progress bar.
* @param {string} options.progressChar – Character to use for progress bar, defaults to █.
* @param {number} options.progressPercent - Max percent of progress.
* @param {string} options.cursor – Character to use for cursor, defaults to ▋.
* @param {Object[]} lineData - Dynamically loaded line data objects.
* @param {boolean} options.noInit - Don't initialise the animation.
*/
constructor(container = '#termynal', options = {}) {
this.container = (typeof container === 'string') ? document.querySelector(container) : container;
this.pfx = `data-${options.prefix || 'ty'}`;
this.originalStartDelay = this.startDelay = options.startDelay
|| parseFloat(this.container.getAttribute(`${this.pfx}-startDelay`)) || 600;
this.originalTypeDelay = this.typeDelay = options.typeDelay
|| parseFloat(this.container.getAttribute(`${this.pfx}-typeDelay`)) || 90;
this.originalLineDelay = this.lineDelay = options.lineDelay
|| parseFloat(this.container.getAttribute(`${this.pfx}-lineDelay`)) || 1500;
this.progressLength = options.progressLength
|| parseFloat(this.container.getAttribute(`${this.pfx}-progressLength`)) || 40;
this.progressChar = options.progressChar
|| this.container.getAttribute(`${this.pfx}-progressChar`) || '█';
this.progressPercent = options.progressPercent
|| parseFloat(this.container.getAttribute(`${this.pfx}-progressPercent`)) || 100;
this.cursor = options.cursor
|| this.container.getAttribute(`${this.pfx}-cursor`) || '▋';
this.lineData = this.lineDataToElements(options.lineData || []);
this.loadLines()
if (!options.noInit) this.init()
}
loadLines() {
// Load all the lines and create the container so that the size is fixed
// Otherwise it would be changing and the user viewport would be constantly
// moving as she/he scrolls
const finish = this.generateFinish()
finish.style.visibility = 'hidden'
this.container.appendChild(finish)
// Appends dynamically loaded lines to existing line elements.
this.lines = [...this.container.querySelectorAll(`[${this.pfx}]`)].concat(this.lineData);
for (let line of this.lines) {
line.style.visibility = 'hidden'
this.container.appendChild(line)
}
const restart = this.generateRestart()
restart.style.visibility = 'hidden'
this.container.appendChild(restart)
this.container.setAttribute('data-termynal', '');
}
/**
* Initialise the widget, get lines, clear container and start animation.
*/
init() {
/**
* Calculates width and height of Termynal container.
* If container is empty and lines are dynamically loaded, defaults to browser `auto` or CSS.
*/
const containerStyle = getComputedStyle(this.container);
this.container.style.width = containerStyle.width !== '0px' ?
containerStyle.width : undefined;
this.container.style.minHeight = containerStyle.height !== '0px' ?
containerStyle.height : undefined;
this.container.setAttribute('data-termynal', '');
this.container.innerHTML = '';
for (let line of this.lines) {
line.style.visibility = 'visible'
}
this.start();
}
/**
* Start the animation and rener the lines depending on their data attributes.
*/
async start() {
this.addFinish()
await this._wait(this.startDelay);
for (let line of this.lines) {
const type = line.getAttribute(this.pfx);
const delay = line.getAttribute(`${this.pfx}-delay`) || this.lineDelay;
if (type == 'input') {
line.setAttribute(`${this.pfx}-cursor`, this.cursor);
await this.type(line);
await this._wait(delay);
}
else if (type == 'progress') {
await this.progress(line);
await this._wait(delay);
}
else {
this.container.appendChild(line);
await this._wait(delay);
}
line.removeAttribute(`${this.pfx}-cursor`);
}
this.addRestart()
this.finishElement.style.visibility = 'hidden'
this.lineDelay = this.originalLineDelay
this.typeDelay = this.originalTypeDelay
this.startDelay = this.originalStartDelay
}
generateRestart() {
const restart = document.createElement('a')
restart.onclick = (e) => {
e.preventDefault()
this.container.innerHTML = ''
this.init()
}
restart.href = '#'
restart.setAttribute('data-terminal-control', '')
restart.innerHTML = "restart ↻"
return restart
}
generateFinish() {
const finish = document.createElement('a')
finish.onclick = (e) => {
e.preventDefault()
this.lineDelay = 0
this.typeDelay = 0
this.startDelay = 0
}
finish.href = '#'
finish.setAttribute('data-terminal-control', '')
finish.innerHTML = "fast →"
this.finishElement = finish
return finish
}
addRestart() {
const restart = this.generateRestart()
this.container.appendChild(restart)
}
addFinish() {
const finish = this.generateFinish()
this.container.appendChild(finish)
}
/**
* Animate a typed line.
* @param {Node} line - The line element to render.
*/
async type(line) {
const chars = [...line.textContent];
line.textContent = '';
this.container.appendChild(line);
for (let char of chars) {
const delay = line.getAttribute(`${this.pfx}-typeDelay`) || this.typeDelay;
await this._wait(delay);
line.textContent += char;
}
}
/**
* Animate a progress bar.
* @param {Node} line - The line element to render.
*/
async progress(line) {
const progressLength = line.getAttribute(`${this.pfx}-progressLength`)
|| this.progressLength;
const progressChar = line.getAttribute(`${this.pfx}-progressChar`)
|| this.progressChar;
const chars = progressChar.repeat(progressLength);
const progressPercent = line.getAttribute(`${this.pfx}-progressPercent`)
|| this.progressPercent;
line.textContent = '';
this.container.appendChild(line);
for (let i = 1; i < chars.length + 1; i++) {
await this._wait(this.typeDelay);
const percent = Math.round(i / chars.length * 100);
line.textContent = `${chars.slice(0, i)} ${percent}%`;
if (percent>progressPercent) {
break;
}
}
}
/**
* Helper function for animation delays, called with `await`.
* @param {number} time - Timeout, in ms.
*/
_wait(time) {
return new Promise(resolve => setTimeout(resolve, time));
}
/**
* Converts line data objects into line elements.
*
* @param {Object[]} lineData - Dynamically loaded lines.
* @param {Object} line - Line data object.
* @returns {Element[]} - Array of line elements.
*/
lineDataToElements(lineData) {
return lineData.map(line => {
let div = document.createElement('div');
div.innerHTML = `<span ${this._attributes(line)}>${line.value || ''}</span>`;
return div.firstElementChild;
});
}
/**
* Helper function for generating attributes string.
*
* @param {Object} line - Line data object.
* @returns {string} - String of attributes.
*/
_attributes(line) {
let attrs = '';
for (let prop in line) {
// Custom add class
if (prop === 'class') {
attrs += ` class=${line[prop]} `
continue
}
if (prop === 'type') {
attrs += `${this.pfx}="${line[prop]}" `
} else if (prop !== 'value') {
attrs += `${this.pfx}-${prop}="${line[prop]}" `
}
}
return attrs;
}
}
/**
* HTML API: If current script has container(s) specified, initialise Termynal.
*/
if (document.currentScript.hasAttribute('data-termynal-container')) {
const containers = document.currentScript.getAttribute('data-termynal-container');
containers.split('|')
.forEach(container => new Termynal(container))
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/config/environment.rs | Rust | use crate::{
models::{Grouping, Method, OpenapiPath},
utils::Error,
};
use float_eq::float_eq;
use lazy_static::lazy_static;
use std::{
collections::{HashMap, HashSet},
env,
path::Path,
str::FromStr,
sync::{Arc, RwLock},
};
use url::Url;
use super::{CoveAPIConfig, OpenapiSource, Runtime};
const ENV_VAR_APP_BASE_URL: &str = "COVEAPI_APP_BASE_URL";
const ENV_VAR_DEBUG: &str = "COVEAPI_DEBUG";
const ENV_VAR_OPENAPI_SOURCE: &str = "COVEAPI_OPENAPI_SOURCE";
const ENV_VAR_ACCOUNT_FOR_FORBIDDEN: &str = "COVEAPI_ACCOUNT_FOR_FORBIDDEN";
const ENV_VAR_ACCOUNT_FOR_UNAUTORIZED: &str = "COVEAPI_ACCOUNT_FOR_UNAUTORIZED";
const ENV_VAR_TEST_COVERAGE: &str = "COVEAPI_TEST_COVERAGE";
const ENV_VAR_PORT: &str = "COVEAPI_PORT";
const ENV_VAR_MAPPING: &str = "COVEAPI_MAPPING";
const ENV_VAR_IS_MERGE: &str = "COVEAPI_IS_MERGE";
const ENV_VAR_ONLY_ACCOUNT_MERGE: &str = "COVEAPI_ONLY_ACCOUNT_MERGE";
const ENV_VAR_GROUPINGS: &str = "COVEAPI_GROUPINGS";
const DEFAULT_TEST_COVERAGE: f32 = 0.7;
const DEFAULT_PORT: u16 = 13750;
const LIST_SEPERATOR: &str = "COVEAPI_LINE_SEPERATOR";
const MAPPING_SUBDELIMITER: &str = ";";
lazy_static! {
static ref IS_DEBUG: RwLock<bool> = RwLock::new(true);
}
impl CoveAPIConfig {
pub fn from_raw(env_vars: &HashMap<String, String>) -> Result<CoveAPIConfig, Error> {
// Check if all enviroment variables exist
if !key_exists_and_is_not_empty(ENV_VAR_MAPPING, env_vars)
&& (!key_exists_and_is_not_empty(ENV_VAR_OPENAPI_SOURCE, env_vars)
|| !key_exists_and_is_not_empty(ENV_VAR_APP_BASE_URL, env_vars))
{
return Err(Error::MissingConfiguration);
}
if key_exists_and_is_not_empty(ENV_VAR_MAPPING, env_vars)
&& (key_exists_and_is_not_empty(ENV_VAR_PORT, env_vars)
|| key_exists_and_is_not_empty(ENV_VAR_OPENAPI_SOURCE, env_vars)
|| key_exists_and_is_not_empty(ENV_VAR_APP_BASE_URL, env_vars))
{
return Err(Error::ConflictingConfiguration);
}
// fetch values from enviroment variables
let debug = get_bool_env_var(ENV_VAR_DEBUG, env_vars);
let security_accounts_for_forbidden = get_bool_env_var(ENV_VAR_ACCOUNT_FOR_FORBIDDEN, env_vars);
let security_accounts_for_unautorized = get_bool_env_var(ENV_VAR_ACCOUNT_FOR_UNAUTORIZED, env_vars);
let test_coverage = match env_vars.get(ENV_VAR_TEST_COVERAGE) {
Some(coverage_str) => translate_test_coverage(coverage_str)?,
None => 0.7,
};
let is_merge = get_bool_env_var(ENV_VAR_IS_MERGE, env_vars);
let only_account_for_merge = get_bool_env_var(ENV_VAR_ONLY_ACCOUNT_MERGE, env_vars);
let groupings = match env_vars.get(ENV_VAR_GROUPINGS) {
Some(grouping_str) => parse_grouping(grouping_str)?,
None => HashSet::new(),
};
let runtimes = if !key_exists_and_is_not_empty(ENV_VAR_MAPPING, env_vars) {
let openapi_source_str = match env_vars.get(ENV_VAR_OPENAPI_SOURCE) {
Some(openapi_source) => openapi_source,
None => return Err(Error::MissingConfiguration),
};
let app_base_url_str = match env_vars.get(ENV_VAR_APP_BASE_URL) {
Some(openapi_source) => openapi_source,
None => return Err(Error::MissingConfiguration),
};
let port_str = match env_vars.get(ENV_VAR_PORT) {
Some(port_str) => {
if port_str.is_empty() {
None
} else {
Some(port_str.as_str())
}
}
None => None,
};
vec![parse_runtime(openapi_source_str, app_base_url_str, port_str)?]
} else {
let mapping_str = match env_vars.get(ENV_VAR_MAPPING) {
Some(mapping_str) => mapping_str,
None => return Err(Error::MissingMapping),
};
parse_complex_mapping(mapping_str)?
};
// adjust global debug setting
if let Ok(mut is_debug) = IS_DEBUG.write() {
*is_debug = debug
} else {
return Err(Error::UnknownInternalError("debug double write".to_string()));
}
Ok(CoveAPIConfig {
debug,
security_accounts_for_forbidden,
security_accounts_for_unautorized,
test_coverage,
runtimes,
is_merge,
only_account_for_merge,
groupings,
})
}
pub fn from_env() -> Result<CoveAPIConfig, Error> {
let mut env_vars = HashMap::new();
for var in env::vars() {
env_vars.insert(var.0, var.1);
}
CoveAPIConfig::from_raw(&env_vars)
}
pub fn global_is_debug() -> bool {
// unwrap is okay, since there should only be one write operaion
*IS_DEBUG.read().unwrap()
}
}
fn key_exists_and_is_not_empty(key: &str, env_vars: &HashMap<String, String>) -> bool {
match env_vars.get(key) {
Some(content) => !content.is_empty(),
None => false,
}
}
fn parse_complex_mapping(mapping_str: &str) -> Result<Vec<Arc<Runtime>>, Error> {
let mut runtimes = vec![];
for line in mapping_str.split(LIST_SEPERATOR) {
// ignore empty lines that might consist out of tabs or spaces
if line.trim() == "" {
continue;
}
let index = 0;
let (app_base_url_str, index) = parse_untill_mapping_subdelimiter(index, line)?;
let (openapi_source_str, index) = parse_untill_mapping_subdelimiter(index, line)?;
let (port_str, _) = parse_untill_mapping_subdelimiter(index, line)?;
let app_base_url_str = replace_escaped_sequences(app_base_url_str);
let openapi_source_str = replace_escaped_sequences(openapi_source_str);
let port_str = replace_escaped_sequences(port_str);
runtimes.push(parse_runtime(&openapi_source_str, &app_base_url_str, Some(&port_str))?);
}
if runtimes.is_empty() {
return Err(Error::MissingMapping);
}
if !check_runtime_compatability(&runtimes) {
println!("{:?}", runtimes);
return Err(Error::MappingDuplicatePorts);
}
Ok(runtimes)
}
fn parse_grouping(grouping_str: &str) -> Result<HashSet<Grouping>, Error> {
let mut groupings = HashSet::new();
for line in grouping_str.split(LIST_SEPERATOR) {
if line.trim() == "" {
continue;
}
let index = 0;
let (path_str, index) = parse_untill_mapping_subdelimiter(index, line)?;
let (methods_str, index) = parse_untill_mapping_subdelimiter(index, line)?;
let (status_str, index) = parse_untill_mapping_subdelimiter(index, line)?;
let (is_ignore_group_str, _) = parse_untill_mapping_subdelimiter(index, line)?;
groupings.insert(parse_grouping_strings(
path_str,
methods_str,
status_str,
is_ignore_group_str,
)?);
}
Ok(groupings)
}
fn parse_untill_mapping_subdelimiter(index: usize, base: &str) -> Result<(&str, usize), Error> {
let mut final_index = index;
let mut is_escaped = false;
while is_escaped
|| match base.get(final_index..final_index + 1) {
Some(MAPPING_SUBDELIMITER) => false,
Some(_) => true,
None => false,
}
{
is_escaped = base.get(final_index..final_index + 1) == Some("\\");
final_index += 1;
}
final_index += 1;
if base.len() < final_index {
return Err(Error::MappingMissingSemicolon(base.to_string()));
}
match base.get(index..final_index - 1) {
Some(subpart) => Ok((subpart, final_index)),
None => Err(Error::MappingMissingSemicolon(base.to_string())),
}
}
fn check_runtime_compatability(runtimes: &Vec<Arc<Runtime>>) -> bool {
let mut ports = HashSet::new();
for runtime in runtimes {
if !ports.insert(runtime.port) {
return false;
}
}
true
}
fn replace_escaped_sequences(base: &str) -> String {
base.replace("\\;", ";")
}
fn parse_runtime(
openapi_source_str: &str,
app_base_url_str: &str,
port_str: Option<&str>,
) -> Result<Arc<Runtime>, Error> {
let openapi_source = match Url::from_str(openapi_source_str.trim()) {
Ok(openapi_url) => OpenapiSource::Url(openapi_url),
Err(_) => OpenapiSource::Path(Box::from(Path::new(openapi_source_str.trim()))),
};
let app_base_url = match Url::from_str(app_base_url_str.trim()) {
Ok(app_base_url) => app_base_url,
Err(parse_error) => return Err(Error::InvalidApplicationURL(parse_error.to_string())),
};
let port = match port_str {
Some(port_str) => match port_str.trim().parse() {
Ok(port) => port,
Err(_) => return Err(Error::InvalidPortNumber(String::from(port_str))),
},
_ => DEFAULT_PORT,
};
Ok(Arc::from(Runtime {
openapi_source,
app_base_url,
port,
}))
}
fn get_bool_env_var(key: &str, env_vars: &HashMap<String, String>) -> bool {
match env_vars.get(key) {
Some(bool_var) => parse_bool(bool_var),
None => false,
}
}
fn parse_bool(bool_str: &str) -> bool {
// the "nope" is just a fun easter egg
bool_str != "0" && !bool_str.is_empty() && bool_str != "false" && bool_str != "nope"
}
fn translate_test_coverage(coverage_str: &str) -> Result<f32, Error> {
if coverage_str.trim() == "" {
return Ok(DEFAULT_TEST_COVERAGE);
}
let mut coverage = if coverage_str.trim().ends_with('%') {
match coverage_str[0..coverage_str.len() - 1].parse() {
Ok(coverage) => coverage,
Err(_) => return Err(Error::InvalidTestCoverage),
}
} else {
match coverage_str.parse() {
Ok(coverage) => coverage,
Err(_) => return Err(Error::InvalidTestCoverage),
}
};
if coverage > 1.0 {
coverage /= 100.0;
}
if float_eq!(coverage, 0.0, abs <= 0.0001) {
println!("Warning: test coverage is set to 0%");
}
if !(0.0..=1.0).contains(&coverage) {
Err(Error::InvalidTestCoverage)
} else {
Ok(coverage)
}
}
fn parse_grouping_strings(
path_str: &str,
methods_str: &str,
status_str: &str,
is_ignore_group_str: &str,
) -> Result<Grouping, Error> {
let path = OpenapiPath::from_str(path_str.trim())?;
let mut methods = vec![];
for method_str in methods_str.split(',') {
let method = match Method::from_str(method_str.trim()) {
Some(method) => method,
None => return Err(Error::InvalidMethodString(method_str.to_string())),
};
methods.push(method);
}
let mut status = vec![];
for single_status_str in status_str.split(',') {
let single_status = match single_status_str.trim().parse() {
Ok(single_status) => single_status,
Err(_) => return Err(Error::InvalidStatusCode(single_status_str.to_string())),
};
status.push(single_status);
}
let is_ignore_group = parse_bool(is_ignore_group_str.trim());
Ok(Grouping::new(methods, status, path, is_ignore_group))
}
#[cfg(test)]
mod test {
use float_eq::assert_float_eq;
use std::{collections::HashMap, path::Path};
use crate::config::{
environment::{
get_bool_env_var, key_exists_and_is_not_empty, parse_complex_mapping, replace_escaped_sequences,
translate_test_coverage, DEFAULT_TEST_COVERAGE, ENV_VAR_ACCOUNT_FOR_UNAUTORIZED, ENV_VAR_MAPPING,
ENV_VAR_ONLY_ACCOUNT_MERGE, ENV_VAR_PORT,
},
OpenapiSource,
};
use super::{
parse_untill_mapping_subdelimiter, CoveAPIConfig, ENV_VAR_ACCOUNT_FOR_FORBIDDEN, ENV_VAR_APP_BASE_URL,
ENV_VAR_DEBUG, ENV_VAR_IS_MERGE, ENV_VAR_OPENAPI_SOURCE,
};
fn generate_config_map() -> HashMap<String, String> {
let mut config_map = HashMap::new();
config_map.insert(String::from(ENV_VAR_DEBUG), String::from("1"));
config_map.insert(
String::from(ENV_VAR_OPENAPI_SOURCE),
String::from("./dump/swagger.json"),
);
config_map.insert(
String::from(ENV_VAR_APP_BASE_URL),
String::from("http://localhost:8080"),
);
config_map
}
#[test]
fn can_fetch_valid_openapi_path() {
let config_map = generate_config_map();
assert_eq!(
CoveAPIConfig::from_raw(&config_map).unwrap().runtimes[0].openapi_source,
OpenapiSource::Path(Box::from(Path::new("./dump/swagger.json")))
);
}
#[test]
fn can_fetch_valid_url() {
assert_eq!(
CoveAPIConfig::from_raw(&generate_config_map()).unwrap().runtimes[0]
.app_base_url
.as_str(),
"http://localhost:8080/"
);
}
#[test]
fn can_catch_invalid_url() {
let mut config_map = generate_config_map();
config_map.insert(ENV_VAR_APP_BASE_URL.to_string(), String::from("jjjjjj"));
if CoveAPIConfig::from_raw(&config_map).is_ok() {
panic!("Should throw error here")
}
}
#[test]
fn missing_keys_lead_to_err() {
let mut config_map = generate_config_map();
config_map.remove(ENV_VAR_APP_BASE_URL);
assert!(CoveAPIConfig::from_raw(&config_map).is_err());
}
#[test]
fn nonzero_bool_is_true() {
let mut config_map = generate_config_map();
assert!(get_bool_env_var(ENV_VAR_DEBUG, &config_map));
config_map.insert(ENV_VAR_DEBUG.to_string(), String::from("2"));
assert!(get_bool_env_var(ENV_VAR_DEBUG, &config_map));
}
#[test]
fn zero_or_empty_bool_is_false() {
let mut config_map = generate_config_map();
config_map.insert(ENV_VAR_DEBUG.to_string(), String::from("0"));
assert!(!get_bool_env_var(ENV_VAR_DEBUG, &config_map));
config_map.insert(ENV_VAR_DEBUG.to_string(), String::from(""));
assert!(!get_bool_env_var(ENV_VAR_DEBUG, &config_map));
}
#[test]
fn non_existant_bool_is_false_no_error() {
let mut config_map = generate_config_map();
config_map.remove(ENV_VAR_DEBUG);
assert!(!get_bool_env_var(ENV_VAR_DEBUG, &config_map));
}
#[test]
fn debug_val_is_used() {
let config_map = generate_config_map();
assert!(CoveAPIConfig::from_raw(&config_map).unwrap().debug);
}
#[test]
fn account_for_security_val_is_used() {
let config_map = generate_config_map();
assert!(
!CoveAPIConfig::from_raw(&config_map)
.unwrap()
.security_accounts_for_forbidden
);
}
#[test]
fn test_coverage_translator_can_recognise_float() {
assert_float_eq!(translate_test_coverage("0.86").unwrap(), 0.86, abs <= 0.0001);
}
#[test]
fn test_coverage_recognises_percentage_with_sign() {
assert_float_eq!(translate_test_coverage("86%").unwrap(), 0.86, abs <= 0.0001);
assert_float_eq!(translate_test_coverage("85.5%").unwrap(), 0.855, abs <= 0.0001);
}
#[test]
fn test_coverage_recognises_percentage_without_sign() {
assert_float_eq!(translate_test_coverage("86").unwrap(), 0.86, abs <= 0.0001);
}
#[test]
fn test_coverage_throws_error_if_over_100_percent() {
assert!(translate_test_coverage("866").is_err());
}
#[test]
fn test_coverage_throws_error_if_invalid_number() {
assert!(translate_test_coverage("foo%").is_err());
}
#[test]
fn test_coverage_empty_sting_leads_to_default() {
assert_eq!(translate_test_coverage("").unwrap(), DEFAULT_TEST_COVERAGE);
}
#[test]
fn defaults_to_70_percent_test_coverage() {
let config_map = generate_config_map();
assert_float_eq!(
CoveAPIConfig::from_raw(&config_map).unwrap().test_coverage,
0.7,
abs <= 0.0001
);
}
#[test]
fn configuration_defaults_to_port_13750() {
let config_map = generate_config_map();
assert_eq!(CoveAPIConfig::from_raw(&config_map).unwrap().runtimes[0].port, 13750);
}
#[test]
fn configuration_recognises_port_number() {
let mut config_map = generate_config_map();
config_map.insert(ENV_VAR_PORT.to_string(), "9999".to_string());
assert_eq!(CoveAPIConfig::from_raw(&config_map).unwrap().runtimes[0].port, 9999);
}
#[test]
fn configuration_throws_error_for_invalid_port() {
let mut config_map = generate_config_map();
config_map.insert(ENV_VAR_PORT.to_string(), "albert".to_string());
assert!(CoveAPIConfig::from_raw(&config_map).is_err());
config_map.insert(ENV_VAR_PORT.to_string(), "65537".to_string()); // 2^ 16 + 1 (tcp only
// allows 16 bits)
assert!(CoveAPIConfig::from_raw(&config_map).is_err());
}
#[test]
fn throws_error_if_any_parallel_configuration_with_mapping_is_donw() {
let mut config_map = generate_config_map();
config_map.insert(
ENV_VAR_MAPPING.to_string(),
"https://localhost:8090; docs/swagger1.yaml; 13751;".to_string(),
);
assert!(CoveAPIConfig::from_raw(&config_map).is_err());
config_map.remove(ENV_VAR_APP_BASE_URL);
assert!(CoveAPIConfig::from_raw(&config_map).is_err());
config_map.remove(ENV_VAR_OPENAPI_SOURCE);
config_map.insert(
String::from(ENV_VAR_APP_BASE_URL),
String::from("http://localhost:8080"),
);
assert!(CoveAPIConfig::from_raw(&config_map).is_err());
config_map.remove(ENV_VAR_APP_BASE_URL);
config_map.insert(ENV_VAR_PORT.to_string(), "8080".to_string()); // 2^ 16 + 1 (tcp only
assert!(CoveAPIConfig::from_raw(&config_map).is_err());
}
#[test]
fn throws_error_if_no_configuration_or_mapping_is_provided() {
let config_map = HashMap::new();
assert!(CoveAPIConfig::from_raw(&config_map).is_err());
}
#[test]
fn can_recognise_if_env_var_is_empty() {
let mut config_map = HashMap::new();
const KEY: &str = "KEY";
config_map.insert(KEY.to_string(), "".to_string());
assert!(!key_exists_and_is_not_empty(KEY, &config_map));
}
#[test]
fn can_recognise_if_env_var_is_not_empty() {
let mut config_map = HashMap::new();
const KEY: &str = "KEY";
config_map.insert(KEY.to_string(), "test".to_string());
assert!(key_exists_and_is_not_empty(KEY, &config_map));
}
#[test]
fn can_recognise_if_env_var_doesnt_exist() {
let config_map = HashMap::new();
const KEY: &str = "KEY";
assert!(!key_exists_and_is_not_empty(KEY, &config_map));
}
#[test]
fn passing_in_basic_parameters_leads_to_default_runtime_being_initialized() {
let mut config_map = generate_config_map();
config_map.insert(ENV_VAR_PORT.to_string(), "8080".to_string());
let config = CoveAPIConfig::from_raw(&config_map).unwrap();
assert_eq!(config.runtimes.len(), 1);
assert_eq!(config.runtimes[0].port, 8080);
}
#[test]
fn parses_basic_mapping() {
let runtimes = parse_complex_mapping("https://localhost:8090; docs/swagger1.yaml; 13751;COVEAPI_LINE_SEPERATORhttps://example:8091; docs/swagger2.yaml; 13752;").unwrap();
assert_eq!(runtimes.len(), 2);
assert!(runtimes.iter().any(|x| x.port == 13751));
assert!(runtimes.iter().any(|x| x.port == 13752));
assert!(runtimes
.iter()
.any(|x| x.openapi_source == OpenapiSource::Path(Box::from(Path::new("docs/swagger1.yaml")))));
assert!(runtimes
.iter()
.any(|x| x.openapi_source == OpenapiSource::Path(Box::from(Path::new("docs/swagger2.yaml")))));
assert!(runtimes
.iter()
.any(|x| x.app_base_url.as_str() == "https://localhost:8090/"));
assert!(runtimes
.iter()
.any(|x| x.app_base_url.as_str() == "https://example:8091/"));
}
#[test]
fn allows_different_whitespace_ammounts() {
let runtimes = parse_complex_mapping("\n https://localhost:8090; docs/swagger1.yaml ; 13751 ;\n\n");
assert!(runtimes.is_ok())
}
#[test]
fn allows_escaping_of_semicolon() {
let runtimes = parse_complex_mapping(r"https://localhost:8090; docs/swagger\;1.yaml; 13751;").unwrap();
assert_eq!(
runtimes[0].openapi_source,
OpenapiSource::Path(Box::from(Path::new("docs/swagger;1.yaml")))
);
}
#[test]
fn mapping_gets_recognised_in_happy_case() {
let mut config_map = HashMap::new();
config_map.insert(
ENV_VAR_MAPPING.to_string(),
"https://localhost:8090; docs/swagger1.yaml; 13751;COVEAPI_LINE_SEPERATORhttps://example:8091; docs/swagger2.yaml; 13752;".to_string()
);
let config = CoveAPIConfig::from_raw(&config_map).unwrap();
assert_eq!(config.runtimes.len(), 2)
}
#[test]
fn duplicate_ports_lead_to_error_in_mapping() {
let mut config_map = HashMap::new();
config_map.insert(
ENV_VAR_MAPPING.to_string(),
"https://localhost:8090; docs/swagger1.yaml; 13751;COVEAPI_LINE_SEPERATORhttps://example:8091; docs/swagger2.yaml; 13751;".to_string()
);
assert!(CoveAPIConfig::from_raw(&config_map).is_err());
}
#[test]
fn parses_till_limit() {
let test_str = "test test; 123";
let index = 0;
let result = parse_untill_mapping_subdelimiter(index, test_str).unwrap();
assert_eq!(result.0, "test test");
}
#[test]
fn skips_over_escaped_chars() {
let test_str = "test\\; \\test; 123";
let index = 0;
let result = parse_untill_mapping_subdelimiter(index, test_str).unwrap();
assert_eq!(result.0, "test\\; \\test");
}
#[test]
fn returns_error_if_missing_delimiter() {
let test_str = "test test";
let index = 0;
let result = parse_untill_mapping_subdelimiter(index, test_str);
assert!(result.is_err());
}
#[test]
fn remove_escaped_sequences_replaces_escape_sequences() {
let test_str = "test\\; \\test; 123";
assert_eq!(replace_escaped_sequences(test_str), "test; \\test; 123");
}
#[test]
fn remove_escaped_sequences_ignored_unescaped() {
let test_str = "test; \\test; 123";
assert_eq!(replace_escaped_sequences(test_str), test_str);
}
#[test]
fn regocnises_forbidden_security_flag_in_config() {
let mut env_vars = generate_config_map();
env_vars.insert(ENV_VAR_ACCOUNT_FOR_FORBIDDEN.to_string(), "1".to_string());
let config = CoveAPIConfig::from_raw(&env_vars).unwrap();
assert!(config.security_accounts_for_forbidden);
}
#[test]
fn regocnises_unautorized_security_flag_in_config() {
let mut env_vars = generate_config_map();
env_vars.insert(ENV_VAR_ACCOUNT_FOR_UNAUTORIZED.to_string(), "1".to_string());
let config = CoveAPIConfig::from_raw(&env_vars).unwrap();
assert!(config.security_accounts_for_unautorized);
}
#[test]
fn security_is_off_by_default() {
let env_vars = generate_config_map();
let config = CoveAPIConfig::from_raw(&env_vars).unwrap();
assert!(!config.security_accounts_for_unautorized);
assert!(!config.security_accounts_for_forbidden);
}
#[test]
fn regonises_merge() {
let mut env_vars = generate_config_map();
env_vars.insert(ENV_VAR_IS_MERGE.to_string(), "1".to_string());
env_vars.insert(ENV_VAR_OPENAPI_SOURCE.to_string(), "http://example.com".to_string());
let config = CoveAPIConfig::from_raw(&env_vars).unwrap();
assert!(config.is_merge);
}
#[test]
fn recognises_if_account_merge() {
let mut env_vars = generate_config_map();
env_vars.insert(ENV_VAR_ONLY_ACCOUNT_MERGE.to_string(), "1".to_string());
env_vars.insert(ENV_VAR_OPENAPI_SOURCE.to_string(), "http://example.com".to_string());
let config = CoveAPIConfig::from_raw(&env_vars).unwrap();
assert!(config.only_account_for_merge);
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/config/mod.rs | Rust | use std::{collections::HashSet, path::Path, sync::Arc};
use url::Url;
mod environment;
mod nginx;
pub use nginx::configure_nginx;
use crate::models::Grouping;
#[derive(Debug)]
pub struct CoveAPIConfig {
pub debug: bool,
pub security_accounts_for_forbidden: bool,
pub security_accounts_for_unautorized: bool,
pub test_coverage: f32,
pub runtimes: Vec<Arc<Runtime>>,
pub is_merge: bool,
pub only_account_for_merge: bool,
pub groupings: HashSet<Grouping>,
}
#[derive(Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct Runtime {
pub openapi_source: OpenapiSource,
pub app_base_url: Url,
pub port: u16,
}
#[derive(Hash, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum OpenapiSource {
Path(Box<Path>),
Url(Url),
}
impl CoveAPIConfig {
pub fn print(&self) {
println!("Configuration for CoveAPI:");
println!(" - debug: {}", self.debug);
if self.runtimes.len() > 1 {
println!("- Runtimes:")
}
for runtime_index in 0..self.runtimes.len() {
match &self.runtimes[runtime_index].openapi_source {
OpenapiSource::Path(path) => println!(" - openapi path: {:?}", path),
OpenapiSource::Url(url) => print!(" - openapi url: {}", url),
};
println!(" - app_base_url: {}", self.runtimes[runtime_index].app_base_url);
println!(" - port: {}", self.runtimes[runtime_index].port);
}
println!(" - account_for_security: {}", self.security_accounts_for_forbidden);
println!(" - test_coverage: {}", self.test_coverage);
println!(" - is_merge: {}", self.is_merge);
println!(" - only_account_for_merge: {}", self.only_account_for_merge);
}
pub fn all_openapi_sources_are_paths(&self) -> bool {
for runtime in &self.runtimes {
match runtime.openapi_source {
OpenapiSource::Url(_) => return false,
OpenapiSource::Path(_) => (),
}
}
true
}
}
#[cfg(test)]
mod tests {
use std::{path::Path, str::FromStr, sync::Arc};
use reqwest::Url;
use crate::utils::test::create_mock_config;
use super::{OpenapiSource, Runtime};
#[test]
fn should_only_print_merge_if_openapi_source_is_file() {
let mut config = create_mock_config();
config.is_merge = true;
config.runtimes = vec![Arc::new(Runtime {
openapi_source: OpenapiSource::Url(Url::from_str("https://example.com").unwrap()),
app_base_url: Url::from_str("https://example.com").unwrap(),
port: 8080,
})];
assert!(!config.all_openapi_sources_are_paths())
}
#[test]
fn shoul_print_merge_if_is_merge_and_openapi_is_file() {
let mut config = create_mock_config();
config.is_merge = true;
config.runtimes = vec![Arc::new(Runtime {
openapi_source: OpenapiSource::Path(Box::from(Path::new("./dump"))),
app_base_url: Url::from_str("https://example.com").unwrap(),
port: 8080,
})];
assert!(config.all_openapi_sources_are_paths())
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/config/nginx.rs | Rust | use std::{
fs::{File, OpenOptions},
io::{Read, Write},
path::Path,
sync::Arc,
};
use super::{CoveAPIConfig, Runtime};
use crate::utils::Error;
pub fn configure_nginx(config: &CoveAPIConfig) -> Result<(), Error> {
configure_nginx_file(config, Path::new("/etc/nginx/nginx.conf"))
}
fn replace_url(base: &str, url: &str) -> String {
base.replace("INSERT_URL_HERE", url)
}
fn replace_error_log(base: &str) -> String {
base.replace("error_log off;", "error_log /var/log/nginx/error.log notice;")
}
fn replace_port_number(base: &str, port: u16) -> String {
base.replace("INSERT_PORT_HERE", &port.to_string())
}
fn replace_runtime_configurations(base: &str, runtimes: &Vec<Arc<Runtime>>) -> String {
let mut config_string = String::new();
for runtime in runtimes {
config_string.push_str(&build_runtime_config(runtime));
}
base.replace("INSERT_CONFIGURATIONS_HERE", &config_string)
}
fn build_runtime_config(runtime: &Runtime) -> String {
const BASE_CONFIGURATION_STRUCTURE: &str = "
server {
listen INSERT_PORT_HERE;
location /502 {
return 502 'CoveAPI could not connect to your service, please double check that you specified the correct uri.';
}
location / {
proxy_pass INSERT_URL_HERE;
}
}
";
let config = &String::from(BASE_CONFIGURATION_STRUCTURE);
let config = replace_port_number(config, runtime.port);
let config = replace_url(&config, runtime.app_base_url.as_str());
config
}
fn open_config_file(path: &Path, for_writing: bool) -> Result<File, Error> {
match OpenOptions::new()
.write(for_writing)
.read(true)
.truncate(for_writing)
.open(path)
{
Ok(file) => Ok(file),
Err(why) => Err(Error::UnexpectedIOIssue(format!(
"issue opening file {:?} due to: {}",
path, why
))),
}
}
fn configure_nginx_file(config: &CoveAPIConfig, path: &Path) -> Result<(), Error> {
let mut file = open_config_file(path, false)?;
let mut config_string = String::new();
match file.read_to_string(&mut config_string) {
Ok(_) => (),
Err(why) => {
return Err(Error::UnexpectedIOIssue(format!(
"issue reading file {:?} due to: {}",
path, why
)))
}
}
if config.debug {
config_string = replace_error_log(&config_string);
}
config_string = replace_runtime_configurations(&config_string, &config.runtimes);
let mut file = open_config_file(path, true)?;
match file.write_all(config_string.as_bytes()) {
Ok(_) => (),
Err(why) => {
return Err(Error::UnexpectedIOIssue(format!(
"issue writing file {:?} due to: {}",
path, why
)))
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use std::{
fs::File,
io::{Read, Write},
path::Path,
str::FromStr,
sync::Arc,
};
use url::Url;
use crate::{
config::{
nginx::{
configure_nginx_file, replace_error_log, replace_port_number, replace_runtime_configurations,
replace_url,
},
OpenapiSource, Runtime,
},
utils::test::create_mock_config,
};
use super::open_config_file;
#[test]
fn changes_marker_from_string() {
let test_string = String::from("proxy_pass INSERT_URL_HERE");
assert_eq!(
replace_url(&test_string, "https://example.com"),
"proxy_pass https://example.com"
);
}
#[test]
fn replaces_file_correctly() {
write_default_config();
let nginx_path = Path::new("./dump/nginx.conf");
let config = create_mock_config();
configure_nginx_file(&config, nginx_path).unwrap();
let mut conf_string = String::from("");
File::open(nginx_path)
.unwrap()
.read_to_string(&mut conf_string)
.unwrap();
assert!(conf_string.contains("http://example.com"));
assert!(conf_string.contains("13750"));
write_default_config();
}
#[test]
fn generates_multiple_configurations() {
let mut config = create_mock_config();
config.runtimes.push(Arc::from(Runtime {
openapi_source: OpenapiSource::Url(Url::from_str("http://example.com").unwrap()),
app_base_url: Url::from_str("http://example.com").unwrap(),
port: 123,
}));
config.runtimes.push(Arc::from(Runtime {
openapi_source: OpenapiSource::Url(Url::from_str("http://example.com").unwrap()),
app_base_url: Url::from_str("http://example.com").unwrap(),
port: 456,
}));
let config_string = replace_runtime_configurations("INSERT_CONFIGURATIONS_HERE", &config.runtimes);
assert!(config_string.contains("123"));
assert!(config_string.contains("456"));
}
fn write_default_config() {
let mut file = open_config_file(Path::new("./dump/nginx.conf"), true).unwrap();
file.write_all("...some other conf \nINSERT_CONFIGURATIONS_HERE\n...some more conf\n".as_bytes())
.unwrap();
file.flush().unwrap();
}
#[test]
fn replaces_log_when_debug_on() {
let test_string = String::from("... stuff ... error_log off; ... stuff ...");
assert_eq!(
replace_error_log(&test_string),
"... stuff ... error_log /var/log/nginx/error.log notice; ... stuff ..."
);
}
#[test]
fn repaces_port_number() {
let test_string = String::from("... stuff ... INSERT_PORT_HERE ... stuff ...");
assert_eq!(
replace_port_number(&test_string, 13567),
"... stuff ... 13567 ... stuff ..."
);
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/evaluator/compare.rs | Rust | use std::{
cell::RefCell,
collections::{HashMap, HashSet},
};
use crate::models::{EndpointConfiguration, Grouping};
pub fn evaluate<'a>(
openapi_endpoints: &'a Vec<EndpointConfiguration>,
pre_merge_endpoints: &Option<Vec<EndpointConfiguration>>,
nginx_endpoints: &Vec<EndpointConfiguration>,
groupings: &HashSet<Grouping>,
) -> Evaluation<'a> {
let mut grouping_endpoints: HashMap<&Grouping, Vec<RefCell<(&EndpointConfiguration, bool)>>> = HashMap::new();
for grouping in groupings {
grouping_endpoints.insert(grouping, vec![]);
}
let mut unmatched_endpoints: Vec<RefCell<(&EndpointConfiguration, bool)>> = vec![];
let relevant_endpoints = get_endpoints_for_diff(pre_merge_endpoints, openapi_endpoints);
for openapi_endpoint in &relevant_endpoints {
let mut has_group = false;
for grouping in grouping_endpoints.iter_mut() {
if grouping.0.incompases_endpoint_config(openapi_endpoint) {
has_group = true;
if (!grouping.1.is_empty() && grouping.1[0].borrow().1) || grouping.0.is_ignore_group {
grouping.1.push(RefCell::new((openapi_endpoint, true)));
} else if endpoint_incompases_any(openapi_endpoint, nginx_endpoints) {
for endpoint in grouping.1.iter_mut() {
let mut endpoint = endpoint.borrow_mut();
endpoint.1 = true;
}
grouping.1.push(RefCell::new((openapi_endpoint, true)));
} else {
add_endpoint_as_missed(openapi_endpoint, grouping.1, &mut unmatched_endpoints);
}
}
}
if !has_group && !endpoint_incompases_any(openapi_endpoint, nginx_endpoints) {
unmatched_endpoints.push(RefCell::new((openapi_endpoint, false)));
}
}
// filter for met endpoints
unmatched_endpoints.retain(|x| !x.borrow().1);
let test_coverage = if relevant_endpoints.is_empty() {
1.0
} else {
(relevant_endpoints.len() as f32 - unmatched_endpoints.len() as f32) / relevant_endpoints.len() as f32
};
let has_gateway_issues = has_gateway_issues(nginx_endpoints);
let endpoints_not_covered = unmatched_endpoints.iter().map(|x| x.borrow().0).collect();
Evaluation {
has_gateway_issues,
test_coverage,
endpoints_not_covered,
}
}
fn endpoint_incompases_any(
endpoint: &EndpointConfiguration,
possibly_incompased_endpoints: &Vec<EndpointConfiguration>,
) -> bool {
// possible optimisation: remove incompased endpoint configuration from list after finding it
for possible_endpoint in possibly_incompased_endpoints {
if endpoint.incompases_endpoint(possible_endpoint) {
return true;
}
}
false
}
fn get_endpoints_for_diff<'a>(
pre_merge_endpoints: &Option<Vec<EndpointConfiguration>>,
post_merge_endpoints: &'a Vec<EndpointConfiguration>,
) -> HashSet<&'a EndpointConfiguration> {
let mut relevant_endpoints = HashSet::new();
for post_endpoint in post_merge_endpoints {
relevant_endpoints.insert(post_endpoint);
}
if let Some(pre_merge_endpoints) = pre_merge_endpoints {
for pre_endpoint in pre_merge_endpoints {
relevant_endpoints.take(pre_endpoint);
}
}
relevant_endpoints
}
fn add_endpoint_as_missed<'a>(
endpoint: &'a EndpointConfiguration,
grouping_endpoints: &mut Vec<RefCell<(&'a EndpointConfiguration, bool)>>,
unmatched_endpoints: &mut Vec<RefCell<(&'a EndpointConfiguration, bool)>>,
) {
let endpoint = RefCell::new((endpoint, false));
grouping_endpoints.push(endpoint.clone());
unmatched_endpoints.push(endpoint.clone());
}
fn has_gateway_issues(nginx_endpoints: &Vec<EndpointConfiguration>) -> bool {
let gateway_issues = nginx_endpoints.iter().filter(|x| x.status_code == 502).count();
gateway_issues > 40 || gateway_issues > nginx_endpoints.len() / 4
}
pub struct Evaluation<'a> {
pub has_gateway_issues: bool,
pub test_coverage: f32,
pub endpoints_not_covered: Vec<&'a EndpointConfiguration>,
}
#[cfg(test)]
mod tests {
use std::{collections::HashSet, str::FromStr, sync::Arc};
use float_eq::assert_float_eq;
use crate::{
models::{EndpointConfiguration, Grouping, Method, OpenapiPath},
utils::test::create_mock_runtime,
};
use super::{endpoint_incompases_any, evaluate, has_gateway_issues};
fn create_endpoint_a() -> EndpointConfiguration {
EndpointConfiguration::new(Method::GET, "/a", 200, Arc::new(create_mock_runtime()), false).unwrap()
}
fn create_endpoint_b() -> EndpointConfiguration {
EndpointConfiguration::new(Method::GET, "/b", 200, Arc::new(create_mock_runtime()), false).unwrap()
}
fn create_endpoint_c() -> EndpointConfiguration {
EndpointConfiguration::new(Method::POST, "/c", 200, Arc::new(create_mock_runtime()), false).unwrap()
}
#[test]
fn evaluate_covers_simplest_test_coverage_case() {
let openapi_endpoints = vec![create_endpoint_a(), create_endpoint_b(), create_endpoint_c()];
let nginx_endpoints = vec![create_endpoint_a(), create_endpoint_b()];
let evaluation = evaluate(&openapi_endpoints, &None, &nginx_endpoints, &HashSet::new());
assert_float_eq!(evaluation.test_coverage, 2.0 / 3.0, abs <= 0.001);
}
#[test]
fn evaluate_gives_full_coverage_when_no_wanted_and_no_provided() {
let openapi_endpoints = vec![];
let nginx_endpoints = vec![];
let evaluation = evaluate(&openapi_endpoints, &None, &nginx_endpoints, &HashSet::new());
assert_float_eq!(evaluation.test_coverage, 1.0, abs <= 0.001);
}
#[test]
fn evaluate_gives_zero_if_no_nginx_endpoint() {
let openapi_endpoints = vec![create_endpoint_a()];
let nginx_endpoints = vec![];
let evaluation = evaluate(&openapi_endpoints, &None, &nginx_endpoints, &HashSet::new());
assert_float_eq!(evaluation.test_coverage, 0.0, abs <= 0.001);
}
#[test]
fn evaluate_groups_two_endpoints() {
let openapi_endpoints = vec![create_endpoint_a(), create_endpoint_b(), create_endpoint_c()];
let nginx_endpoints = vec![create_endpoint_a(), create_endpoint_b()];
let grouping = Grouping::new(
vec![Method::GET, Method::POST],
vec![200],
OpenapiPath::from_str("/{foo}").unwrap(),
false,
);
let mut groupings = HashSet::new();
groupings.insert(grouping);
let evaluation = evaluate(&openapi_endpoints, &None, &nginx_endpoints, &groupings);
assert_float_eq!(evaluation.test_coverage, 1.0, abs <= 0.001);
}
#[test]
fn internal_incompases_all_check_matches_base_case() {
let endpoint = create_endpoint_a();
let possibly_incompased_endpoints = vec![create_endpoint_a()];
assert!(endpoint_incompases_any(&endpoint, &possibly_incompased_endpoints))
}
#[test]
fn internal_incompases_all_check_functions_for_sized_arrays() {
let endpoint = create_endpoint_c();
let possibly_incompased_endpoints = vec![create_endpoint_a(), create_endpoint_b()];
assert!(!endpoint_incompases_any(&endpoint, &possibly_incompased_endpoints))
}
#[test]
fn internal_incompases_all_check_returns_false_for_empty_possibilities() {
let endpoint = create_endpoint_a();
let possibly_incompased_endpoints = vec![];
assert!(!endpoint_incompases_any(&endpoint, &possibly_incompased_endpoints))
}
#[test]
fn correctly_asserts_gateway_issues() {
let nginx_endpoints =
vec![EndpointConfiguration::new(Method::GET, "/", 502, Arc::new(create_mock_runtime()), false).unwrap()];
assert!(has_gateway_issues(&nginx_endpoints));
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/evaluator/mod.rs | Rust | mod compare;
pub use compare::evaluate;
pub use compare::Evaluation;
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/lib.rs | Rust | use std::process::{Command, Stdio};
use config::{configure_nginx, CoveAPIConfig};
use evaluator::evaluate;
use models::EndpointConfiguration;
use parser::{get_openapi_endpoint_configs, get_pre_merge_openapi_endpoints};
use utils::print_debug_message;
use crate::{parser::parse_nginx_access_log, utils::print_error_and_exit};
pub mod config;
pub mod evaluator;
pub mod models;
pub mod parser;
pub mod utils;
pub fn run_nginx(config: &CoveAPIConfig) {
// insert application URL to nginx file
match configure_nginx(config) {
Ok(_) => (),
Err(error) => error.display_error_and_exit(),
}
// spawn nginx as a subprocess
print_debug_message("Starting nginx");
let mut nginx_cmd = Command::new("nginx");
nginx_cmd.arg("-g").arg("daemon off;");
if !config.debug {
nginx_cmd.stdout(Stdio::null());
}
match nginx_cmd.stdout(Stdio::null()).status() {
Ok(status) => {
if !status.success() {
print_error_and_exit("Error: Unexpected non-zero exit code from nginx");
}
}
Err(err) => {
print_error_and_exit(format!("Error: Running Nginx failed with: {}", err));
}
}
}
pub fn initialize_coveapi() -> (
CoveAPIConfig,
Vec<EndpointConfiguration>,
Option<Vec<EndpointConfiguration>>,
) {
let config = match CoveAPIConfig::from_env() {
Ok(config) => config,
Err(error) => error.display_error_and_exit(),
};
let openapi_endpoints = match get_openapi_endpoint_configs(&config) {
Ok(openapi_endpoints) => openapi_endpoints,
Err(error) => error.display_error_and_exit(),
};
let mut pre_merge_endpoints = None;
// filter out impossible szenarios, where they require only_account_for_merge but nothing can
// be compared
if config.only_account_for_merge && !config.all_openapi_sources_are_paths() {
if config.is_merge {
print_error_and_exit("Your configuration contains a dynamically loaded openapi spec. CoveAPI needs it to be a local file when only accounting for the difference between commits.");
} else {
print_error_and_exit("You need to have two commits to compare (ex. pull/merge request) when only accounting for the difference between commits.");
}
}
// add pre_merge_endpoints is a merge is taking place
if config.is_merge && config.only_account_for_merge {
let mut endpoints = vec![];
for runtime in &config.runtimes {
let mut pre_merge_endpoints_of_runtime = match get_pre_merge_openapi_endpoints(runtime.clone()) {
Ok(endpoints) => endpoints,
Err(err) => err.display_error_and_exit(),
};
endpoints.append(&mut pre_merge_endpoints_of_runtime);
}
pre_merge_endpoints = Some(endpoints);
}
(config, openapi_endpoints, pre_merge_endpoints)
}
pub fn run_eval(
config: &CoveAPIConfig,
openapi_endpoints: Vec<EndpointConfiguration>,
pre_merge_endpoints: Option<Vec<EndpointConfiguration>>,
) {
print_debug_message("Evaluating endpoint coverage");
let nginx_endpoints = match parse_nginx_access_log(&config.runtimes) {
Ok(nginx_endpoints) => nginx_endpoints,
Err(_) => print_error_and_exit("An unexpected error occured while parsing the nginx logs"),
};
let evaluation = evaluate(
&openapi_endpoints,
&pre_merge_endpoints,
&nginx_endpoints,
&config.groupings,
);
if evaluation.has_gateway_issues {
println!("WARNING: an unusual amount of 502 status codes were found, your setup might have gateway issues.");
}
println!("Test Coverage: {}%", evaluation.test_coverage * 100.0);
if !evaluation.endpoints_not_covered.is_empty() {
println!("The following endpoints were missed:");
for endpoint in evaluation.endpoints_not_covered {
println!("- {} {} {}", endpoint.path, endpoint.method, endpoint.status_code);
}
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/main.rs | Rust | use coveapi::{initialize_coveapi, run_eval, run_nginx};
fn main() {
let (config, openapi_endpoints, pre_merge_endpoints) = initialize_coveapi();
if config.debug {
config.print();
}
run_nginx(&config);
run_eval(&config, openapi_endpoints, pre_merge_endpoints);
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/models/endpoint.rs | Rust | use std::{fmt::Display, str::FromStr, sync::Arc};
use crate::{config::Runtime, utils::Error};
use super::misc::Method;
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
pub struct EndpointConfiguration {
pub method: Method,
pub path: OpenapiPath,
pub status_code: u16,
pub runtime: Arc<Runtime>,
pub is_generated: bool,
}
impl EndpointConfiguration {
pub fn new(
method: Method,
openapi_path: &str,
status_code: u16,
runtime: Arc<Runtime>,
is_generated: bool,
) -> Result<EndpointConfiguration, Error> {
Ok(EndpointConfiguration {
method,
path: OpenapiPath::from_str(openapi_path)?,
status_code,
runtime,
is_generated,
})
}
pub fn incompases_endpoint(&self, other: &EndpointConfiguration) -> bool {
self.method == other.method
&& self.status_code == other.status_code
&& self.runtime == other.runtime
&& self.path.incompases_openapi_path(&other.path)
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct OpenapiPath {
components: Vec<OpenapiPathComponent>,
original_source: String,
}
impl OpenapiPath {
pub fn incompases_openapi_path(&self, other: &OpenapiPath) -> bool {
let mut parse_index = 0;
let other_as_str = &other.original_source;
for component_index in 0..self.components.len() {
let component = &self.components[component_index];
match component {
OpenapiPathComponent::Fixed(fixed) => {
if fixed.len() + parse_index > other_as_str.len()
|| fixed != &other_as_str[parse_index..parse_index + fixed.len()]
{
return false;
}
parse_index += fixed.len();
}
OpenapiPathComponent::Variable => {
const EMPTY_NEXT_STRING: &str = "";
let next_string = match self.components.get(component_index + 1) {
Some(next_component) => match next_component {
OpenapiPathComponent::Fixed(original_source) => original_source,
OpenapiPathComponent::Variable => EMPTY_NEXT_STRING,
},
None => EMPTY_NEXT_STRING,
};
while parse_index < other_as_str.len() {
if &other_as_str[parse_index..parse_index + 1] == "/"
|| (next_string != EMPTY_NEXT_STRING
&& other_as_str.len() > next_string.len() + parse_index
&& &other_as_str[parse_index..parse_index + next_string.len()] == next_string)
{
break;
}
parse_index += 1;
}
}
}
}
parse_index == other_as_str.len()
}
}
impl Display for OpenapiPath {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.original_source)
}
}
impl FromStr for OpenapiPath {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut path = vec![];
let mut current_component = String::new();
let mut cached_component = String::new();
let mut is_in_variable = false;
for character in s.chars() {
if is_in_variable && character.to_string() == "}" {
is_in_variable = false;
if !cached_component.is_empty() {
path.push(OpenapiPathComponent::Fixed(cached_component.to_string()));
cached_component = String::new();
}
path.push(OpenapiPathComponent::Variable);
current_component = String::new();
} else if !is_in_variable && character.to_string() == "{" {
is_in_variable = true;
cached_component = current_component.clone();
current_component = String::new();
} else {
current_component.push(character);
}
}
if !current_component.is_empty() {
// deal with opened brackets
let infix = if is_in_variable { "{" } else { "" };
path.push(OpenapiPathComponent::Fixed(format!(
"{}{}{}",
cached_component, infix, current_component
)));
} else if !cached_component.is_empty() {
path.push(OpenapiPathComponent::Fixed(current_component.to_string()));
}
Ok(OpenapiPath {
components: path,
original_source: s.to_string(),
})
}
}
#[derive(PartialEq, Eq, Debug, Clone, Hash)]
pub enum OpenapiPathComponent {
Fixed(String),
Variable,
}
#[cfg(test)]
mod tests {
use std::{str::FromStr, sync::Arc};
use crate::{models::Method, utils::test::create_mock_runtime};
use super::{EndpointConfiguration, OpenapiPath, OpenapiPathComponent};
#[test]
fn parses_fixed_path() {
let expected = OpenapiPath {
components: vec![OpenapiPathComponent::Fixed("/foo/bar".to_string())],
original_source: "/foo/bar".to_string(),
};
let got = OpenapiPath::from_str("/foo/bar").unwrap();
assert_eq!(expected, got);
}
#[test]
fn parses_variable_path() {
let expected = OpenapiPath {
components: vec![
OpenapiPathComponent::Fixed("/foo/".to_string()),
OpenapiPathComponent::Variable,
OpenapiPathComponent::Fixed("/moo".to_string()),
],
original_source: "/foo/{bar}/moo".to_string(),
};
let got = OpenapiPath::from_str("/foo/{bar}/moo").unwrap();
assert_eq!(expected, got);
}
#[test]
fn ignores_single_opening_bracket() {
let expected = OpenapiPath {
components: vec![OpenapiPathComponent::Fixed("/foo/{bar".to_string())],
original_source: "/foo/{bar".to_string(),
};
let got = OpenapiPath::from_str("/foo/{bar").unwrap();
assert_eq!(expected, got);
}
#[test]
fn ignores_single_closing_bracket() {
let expected = OpenapiPath {
components: vec![OpenapiPathComponent::Fixed("/foo/}bar".to_string())],
original_source: "/foo/}bar".to_string(),
};
let got = OpenapiPath::from_str("/foo/}bar").unwrap();
assert_eq!(expected, got);
}
#[test]
fn correctly_identifies_variable_end() {
let expected = OpenapiPath {
components: vec![
OpenapiPathComponent::Fixed("/foo/".to_string()),
OpenapiPathComponent::Variable,
],
original_source: "/foo/{bar}".to_string(),
};
let got = OpenapiPath::from_str("/foo/{bar}").unwrap();
assert_eq!(expected, got);
}
fn test_incompas_path_with_string(a: &str, b: &str, expected: bool) {
assert!(get_path_string_incompasing_bool(a, b) == expected);
}
fn get_path_string_incompasing_bool(a: &str, b: &str) -> bool {
let path_a = OpenapiPath::from_str(a).unwrap();
let path_b = OpenapiPath::from_str(b).unwrap();
path_a.incompases_openapi_path(&path_b)
}
#[test]
fn fixed_endpoints_encompas_eachother() {
test_incompas_path_with_string("/foo/bar", "/foo/bar", true);
}
#[test]
fn expoints_with_different_methods_dont_encompas_eachother() {
let endpoint_cfg_a =
EndpointConfiguration::new(Method::PUT, "/foo/bar", 200, Arc::from(create_mock_runtime()), false).unwrap();
let endpoint_cfg_b =
EndpointConfiguration::new(Method::GET, "/foo/bar", 200, Arc::from(create_mock_runtime()), false).unwrap();
assert!(!endpoint_cfg_a.incompases_endpoint(&endpoint_cfg_b));
}
#[test]
fn expoints_with_different_status_codes_dont_encompas_eachother() {
let endpoint_cfg_a =
EndpointConfiguration::new(Method::GET, "/foo/bar", 400, Arc::from(create_mock_runtime()), false).unwrap();
let endpoint_cfg_b =
EndpointConfiguration::new(Method::GET, "/foo/bar", 200, Arc::from(create_mock_runtime()), false).unwrap();
assert!(!endpoint_cfg_a.incompases_endpoint(&endpoint_cfg_b));
}
#[test]
fn expoints_with_different_runtimes_dont_encompas_eachother() {
let endpoint_cfg_a =
EndpointConfiguration::new(Method::GET, "/foo/bar", 400, Arc::from(create_mock_runtime()), false).unwrap();
let endpoint_cfg_b =
EndpointConfiguration::new(Method::GET, "/foo/bar", 200, Arc::from(create_mock_runtime()), false).unwrap();
assert!(!endpoint_cfg_a.incompases_endpoint(&endpoint_cfg_b));
}
#[test]
fn dynamic_endpoints_encompas_eachother() {
test_incompas_path_with_string("/foo/{bar}/moo", "/foo/bar/moo", true);
}
#[test]
fn different_endpoints_dont_encompas_eachother() {
test_incompas_path_with_string("/foo/{bar}", "/foo/bar/moo", false);
test_incompas_path_with_string("/foo/{bar}/moo", "/foo/bar", false);
}
#[test]
fn same_variable_endpoints_encompas_eachother() {
test_incompas_path_with_string("/foo/{bar}/moo", "/foo/{moo}/moo", true);
}
#[test]
fn matches_numerics_as_vairable_in_path() {
test_incompas_path_with_string("/foo/{bar}", "/foo/69", true);
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/models/grouping.rs | Rust | use super::{EndpointConfiguration, Method, OpenapiPath};
#[derive(Debug, Hash, PartialEq, Eq)]
pub struct Grouping {
methods: Vec<Method>,
status: Vec<u16>,
path: OpenapiPath,
pub is_ignore_group: bool,
}
impl Grouping {
pub fn incompases_endpoint_config(&self, endpoint: &EndpointConfiguration) -> bool {
self.methods.contains(&endpoint.method)
&& self.status.contains(&endpoint.status_code)
&& self.path.incompases_openapi_path(&endpoint.path)
}
pub fn new(methods: Vec<Method>, status: Vec<u16>, path: OpenapiPath, is_ignore_group: bool) -> Grouping {
Grouping {
methods,
status,
path,
is_ignore_group,
}
}
}
#[cfg(test)]
mod tests {
use std::{str::FromStr, sync::Arc};
use crate::{
models::{EndpointConfiguration, Method, OpenapiPath},
utils::test::create_mock_runtime,
};
use super::Grouping;
#[test]
fn grouping_detects_incompased_endpoint() {
let grouping = Grouping {
methods: vec![Method::GET],
status: vec![200],
path: OpenapiPath::from_str("/foo/{bar}").unwrap(),
is_ignore_group: false,
};
let endpoint =
EndpointConfiguration::new(Method::GET, "/foo/69", 200, Arc::from(create_mock_runtime()), false).unwrap();
assert!(grouping.incompases_endpoint_config(&endpoint));
}
#[test]
fn different_status_leads_to_not_incompased() {
let grouping = Grouping {
methods: vec![Method::POST],
status: vec![418],
path: OpenapiPath::from_str("/foo/{bar}").unwrap(),
is_ignore_group: false,
};
let endpoint =
EndpointConfiguration::new(Method::GET, "/foo/69", 200, Arc::from(create_mock_runtime()), false).unwrap();
assert!(!grouping.incompases_endpoint_config(&endpoint));
}
#[test]
fn different_method_leads_to_not_incompased() {
let grouping = Grouping {
methods: vec![Method::POST],
status: vec![200],
path: OpenapiPath::from_str("/foo/{bar}").unwrap(),
is_ignore_group: false,
};
let endpoint =
EndpointConfiguration::new(Method::GET, "/foo/69", 200, Arc::from(create_mock_runtime()), false).unwrap();
assert!(!grouping.incompases_endpoint_config(&endpoint));
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/models/misc.rs | Rust | use std::fmt::Display;
const METHOD_GET_STR: &str = "GET";
const METHOD_PUT_STR: &str = "PUT";
const METHOD_POST_STR: &str = "POST";
const METHOD_DELETE_STR: &str = "DELETE";
const METHOD_OPTIONS_STR: &str = "OPTIONS";
const METHOD_HEAD_STR: &str = "HEAD";
const METHOD_PATCH_STR: &str = "PATCH";
const METHOD_TRACE_STR: &str = "TRACE";
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum Method {
GET,
PUT,
POST,
DELETE,
OPTIONS,
HEAD,
PATCH,
TRACE,
}
impl Display for Method {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl Method {
pub fn as_str(&self) -> &str {
match self {
Method::GET => METHOD_GET_STR,
Method::PUT => METHOD_PUT_STR,
Method::POST => METHOD_POST_STR,
Method::DELETE => METHOD_DELETE_STR,
Method::OPTIONS => METHOD_OPTIONS_STR,
Method::HEAD => METHOD_HEAD_STR,
Method::PATCH => METHOD_PATCH_STR,
Method::TRACE => METHOD_TRACE_STR,
}
}
}
#[allow(clippy::should_implement_trait)]
impl Method {
pub fn from_str(method_str: &str) -> Option<Method> {
match method_str.to_uppercase().as_str() {
METHOD_GET_STR => Some(Method::GET),
METHOD_PUT_STR => Some(Method::PUT),
METHOD_POST_STR => Some(Method::POST),
METHOD_DELETE_STR => Some(Method::DELETE),
METHOD_OPTIONS_STR => Some(Method::OPTIONS),
METHOD_HEAD_STR => Some(Method::HEAD),
METHOD_PATCH_STR => Some(Method::PATCH),
METHOD_TRACE_STR => Some(Method::TRACE),
&_ => None,
}
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/models/mod.rs | Rust | mod endpoint;
mod grouping;
mod misc;
pub use endpoint::EndpointConfiguration;
pub use endpoint::OpenapiPath;
pub use grouping::Grouping;
pub use misc::Method;
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/parser/common.rs | Rust | pub fn format_basepath(basepath: &str) -> &str {
if basepath.ends_with('/') {
&basepath[0..basepath.len() - 1]
} else {
basepath
}
}
#[cfg(test)]
mod tests {
use super::format_basepath;
#[test]
fn coverts_slash_to_empty_string() {
assert_eq!(format_basepath("/"), "");
}
#[test]
fn removes_trailing_slash() {
assert_eq!(format_basepath("/hello/"), "/hello");
}
#[test]
fn ignores_emty_string() {
assert_eq!(format_basepath(""), "");
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/parser/http.rs | Rust | use std::sync::Arc;
use crate::{
config::{OpenapiSource, Runtime},
models::EndpointConfiguration,
utils::{print_debug_message, Error},
};
use super::{json_parser::parse_json_doc, yaml_parser::parse_yaml_doc};
pub fn fetch_openapi_endpoints_for_runtime(runtime: Arc<Runtime>) -> Result<Vec<EndpointConfiguration>, Error> {
let mut openapi_url = match &runtime.openapi_source {
OpenapiSource::Url(openapi_url) => openapi_url.clone(),
OpenapiSource::Path(_) => return Err(Error::UnknownInternalError("ota fetch with path".to_string())),
};
if openapi_url.host_str() == Some("localhost") {
// unwrap here is fine, since the IP address provided is allways valid
openapi_url.set_host(Some("172.17.0.1")).unwrap();
}
// note: using blocking client here because all following steps require it
let openapi_spec = match reqwest::blocking::get(openapi_url.as_str()) {
Ok(openapi_response) => match openapi_response.text() {
Ok(openapi_spec) => openapi_spec,
Err(why) => {
print_debug_message(format!("{}", why));
return Err(Error::OpenapiMalformedOnlineComponents);
}
},
Err(why) => {
print_debug_message(format!("{}", why));
return Err(Error::OpenapiFetchConnectionFailure);
}
};
// attempt to parse as json -> on syntax err attempt yaml
match parse_json_doc(&openapi_spec, runtime.clone()) {
Ok(endpoints) => Ok(endpoints),
Err(Error::InvalidParseSyntax) => parse_yaml_doc(&openapi_spec, runtime.clone()),
Err(error) => Err(error),
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/parser/json_parser.rs | Rust | use std::sync::Arc;
use json::JsonValue;
use crate::{
config::Runtime,
models::{EndpointConfiguration, Method},
utils::Error,
};
use super::common::format_basepath;
pub fn parse_json_doc(json_string: &str, runtime: Arc<Runtime>) -> Result<Vec<EndpointConfiguration>, Error> {
let mut endpoints = vec![];
let json_obj = match json::parse(json_string) {
Ok(json_obj) => json_obj,
Err(_) => return Err(Error::InvalidParseSyntax),
};
let base_path = match &json_obj["basePath"] {
JsonValue::Null => "",
base_path => match base_path.as_str() {
Some(base_path) => base_path,
None => return Err(Error::InvalidBasePath),
},
};
let base_path = format_basepath(base_path);
let paths = match &json_obj["paths"] {
json::Null => return Err(Error::InvalidParseSyntax),
responses => responses,
};
for path_json in paths.entries() {
let mut path = String::from(base_path);
match path_json.0 {
"/" => (),
_ => path.push_str(path_json.0),
}
if path.is_empty() {
path.push('/');
}
for (method, method_json) in get_methods_from_path(path_json.1)?.into_iter() {
let responses = match &method_json["responses"] {
json::Null => return Err(Error::InvalidParseSyntax),
responses => responses,
};
if !&method_json["security"].is_null() {
endpoints.push(EndpointConfiguration::new(
method.clone(),
&path,
401,
runtime.clone(),
false,
)?);
endpoints.push(EndpointConfiguration::new(
method.clone(),
&path,
403,
runtime.clone(),
false,
)?);
}
for response in responses.entries() {
let status_code = match response.0.parse() {
Ok(status_code) => status_code,
Err(_) => return Err(Error::InvalidParseStatusCode(response.0.to_string())),
};
endpoints.push(EndpointConfiguration::new(
method.clone(),
&path,
status_code,
runtime.clone(),
false,
)?)
}
}
}
Ok(endpoints)
}
fn get_methods_from_path(path_json: &JsonValue) -> Result<Vec<(Method, &JsonValue)>, Error> {
let mut methods = vec![];
for method_entry in path_json.entries() {
let method = match Method::from_str(method_entry.0) {
Some(method) => method,
None => return Err(Error::InvalidParseMethod(method_entry.0.to_string())),
};
methods.push((method, method_entry.1));
}
Ok(methods)
}
#[cfg(test)]
mod test {
use std::{str::FromStr, sync::Arc};
use crate::{
models::{Method, OpenapiPath},
parser::json_parser::parse_json_doc,
utils::test::create_mock_runtime,
};
const JSON_STRING: &str = r#"
{
"basePath": "/",
"paths" : {
"/": {
"get": {
"security": [],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": " #/definitions/controller.IsValid"
}
},
"400": {
"description": "Bad Request",
"schema": {
"$ref": " #/definitions/util.ErrorMessage"
}
}
}
},
"put": {
"responses": {
"418": {
"description": "I'm a teapot",
"schema": {
"$ref": " #/definitions/controller.IsValid"
}
}
}
}
},
"/test": {
"post": {
"responses": {
"418": {
"description": "I'm a teapot",
"schema": {
"$ref": " #/definitions/controller.IsValid"
}
}
}
}
}
}
}
"#;
#[test]
fn parses_correct_number_of_responses() {
assert_eq!(
parse_json_doc(JSON_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.len(),
6
);
}
#[test]
fn parses_correct_status_codes() {
assert!(parse_json_doc(JSON_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.status_code == 200));
assert!(parse_json_doc(JSON_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.status_code == 400));
assert!(parse_json_doc(JSON_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.status_code == 418));
}
#[test]
fn parses_correct_path() {
assert!(parse_json_doc(JSON_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.path == OpenapiPath::from_str("/").unwrap()));
assert!(parse_json_doc(JSON_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.path == OpenapiPath::from_str("/test").unwrap()));
}
#[test]
fn parses_correct_method() {
assert!(parse_json_doc(JSON_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.method == Method::GET));
assert!(parse_json_doc(JSON_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.method == Method::POST));
assert!(parse_json_doc(JSON_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.method == Method::PUT));
}
#[test]
fn adds_401_403_for_security_headers() {
assert_eq!(
parse_json_doc(JSON_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.filter(|x| x.method == Method::GET
&& x.status_code == 401
&& x.path == OpenapiPath::from_str("/").unwrap())
.count(),
1
);
assert_eq!(
parse_json_doc(JSON_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.filter(|x| x.method == Method::GET
&& x.status_code == 403
&& x.path == OpenapiPath::from_str("/").unwrap())
.count(),
1
);
}
const JSON_STRING_DIFF_BASEPATH: &str = r#"
{
"basePath": "/foo",
"paths" : {
"/": {
"get": {
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": " #/definitions/controller.IsValid"
}
}
}
}
},
"/bar": {
"get": {
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": " #/definitions/controller.IsValid"
}
}
}
}
}
}
}
"#;
#[test]
fn parses_correct_basepath() {
assert!(
parse_json_doc(JSON_STRING_DIFF_BASEPATH, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.path == OpenapiPath::from_str("/foo").unwrap())
);
assert!(
parse_json_doc(JSON_STRING_DIFF_BASEPATH, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.path == OpenapiPath::from_str("/foo/bar").unwrap())
);
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/parser/mod.rs | Rust | mod common;
mod http;
mod json_parser;
mod nginx_parser;
mod yaml_parser;
use std::{path::Path, sync::Arc};
pub use nginx_parser::parse_nginx_access_log;
use crate::{
config::{CoveAPIConfig, OpenapiSource, Runtime},
models::EndpointConfiguration,
utils::{read_file_to_string_or_err, Error},
};
use self::{http::fetch_openapi_endpoints_for_runtime, json_parser::parse_json_doc, yaml_parser::parse_yaml_doc};
const OPENAPI_MOUNT_POINT: &str = "/repo";
const PRE_MERGE_PATH_EXTENSION: &str = ".coveapi.old";
pub fn get_openapi_endpoint_configs(config: &CoveAPIConfig) -> Result<Vec<EndpointConfiguration>, Error> {
let mut openapi_endpoints = vec![];
for runtime in &config.runtimes {
let mut endpoints = match get_runtime_openapi_endpoint_configs(runtime.clone()) {
Ok(endpoints) => endpoints,
Err(err) => return Err(err),
};
openapi_endpoints.append(&mut endpoints);
}
Ok(openapi_endpoints)
}
pub fn get_runtime_openapi_endpoint_configs(runtime: Arc<Runtime>) -> Result<Vec<EndpointConfiguration>, Error> {
match runtime.openapi_source {
OpenapiSource::Url(_) => fetch_openapi_endpoints_for_runtime(runtime),
OpenapiSource::Path(_) => parse_openapi_file(runtime, OPENAPI_MOUNT_POINT, ""),
}
}
pub fn get_pre_merge_openapi_endpoints(runtime: Arc<Runtime>) -> Result<Vec<EndpointConfiguration>, Error> {
match runtime.openapi_source {
OpenapiSource::Url(_) => fetch_openapi_endpoints_for_runtime(runtime),
OpenapiSource::Path(_) => parse_openapi_file(runtime, OPENAPI_MOUNT_POINT, PRE_MERGE_PATH_EXTENSION),
}
}
pub fn parse_openapi_file(
runtime: Arc<Runtime>,
mount_point: &str,
path_extension: &str,
) -> Result<Vec<EndpointConfiguration>, Error> {
let openapi_path = match &runtime.openapi_source {
OpenapiSource::Path(path) => path,
OpenapiSource::Url(_) => return Err(Error::UnknownInternalError("open api path read on url".to_string())),
};
let mut buf = openapi_path.clone().into_path_buf();
let extension = match buf.extension() {
Some(extension) => match extension.to_str() {
Some(extension) => extension.to_string(),
None => return Err(Error::UnknownOpenApiFormat),
},
None => return Err(Error::UnknownOpenApiFormat),
};
let full_extension = format!("{}{}", extension, path_extension);
buf.set_extension(full_extension);
let openapi_path = Path::new(mount_point).join(buf);
if extension == "json" {
Ok(parse_json_doc(
&read_file_to_string_or_err(
&openapi_path,
Error::ProblemOpeningFile(Box::from(openapi_path.as_path())),
)?,
runtime,
)?)
} else if extension == "yaml" || extension == "yml" {
Ok(parse_yaml_doc(
&read_file_to_string_or_err(
&openapi_path,
Error::ProblemOpeningFile(Box::from(openapi_path.as_path())),
)?,
runtime,
)?)
} else {
Err(Error::UnknownOpenApiFormat)
}
}
#[cfg(test)]
mod tests {
use std::{path::Path, sync::Arc};
use crate::{
config::OpenapiSource,
parser::{parse_openapi_file, PRE_MERGE_PATH_EXTENSION},
utils::test::create_mock_runtime,
};
#[test]
fn parses_json_file_correctly() {
let path = Path::new("./dump/swagger.json");
let mut runtime = create_mock_runtime();
runtime.openapi_source = OpenapiSource::Path(Box::from(path));
assert_eq!(parse_openapi_file(Arc::from(runtime), "./", "").unwrap().len(), 6);
}
#[test]
fn parses_yaml_file_correctly() {
let path = Path::new("./dump/swagger.yaml");
let mut runtime = create_mock_runtime();
runtime.openapi_source = OpenapiSource::Path(Box::from(path));
assert_eq!(parse_openapi_file(Arc::from(runtime), "./", "").unwrap().len(), 6);
}
#[test]
fn throws_error_when_providing_absolute_path() {
let path = Path::new("/test");
let mut runtime = create_mock_runtime();
runtime.openapi_source = OpenapiSource::Path(Box::from(path));
assert!(parse_openapi_file(Arc::from(runtime), "./", "").is_err())
}
#[test]
fn parses_old_file_correctly() {
let path = Path::new("./dump/swagger.yaml");
let mut runtime = create_mock_runtime();
runtime.openapi_source = OpenapiSource::Path(Box::from(path));
assert_eq!(
parse_openapi_file(Arc::from(runtime), "./", PRE_MERGE_PATH_EXTENSION)
.unwrap()
.len(),
6
);
let path = Path::new("./dump/swagger.json");
let mut runtime = create_mock_runtime();
runtime.openapi_source = OpenapiSource::Path(Box::from(path));
assert_eq!(
parse_openapi_file(Arc::from(runtime), "./", PRE_MERGE_PATH_EXTENSION)
.unwrap()
.len(),
6
);
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/parser/nginx_parser.rs | Rust | use std::{
fs::File,
io::{BufRead, BufReader},
path::Path,
sync::Arc,
};
use crate::{
config::Runtime,
models::{EndpointConfiguration, Method},
utils::{print_debug_message, Error},
};
use lazy_static::lazy_static;
use regex::Regex;
pub fn parse_nginx_access_log(runtimes: &Vec<Arc<Runtime>>) -> Result<Vec<EndpointConfiguration>, Error> {
parse_access_log(runtimes, Path::new("/var/log/nginx/access.log"))
}
fn parse_access_log(runtimes: &Vec<Arc<Runtime>>, path: &Path) -> Result<Vec<EndpointConfiguration>, Error> {
let mut endpoints = Vec::new();
let reader = match File::open(path) {
Ok(file) => BufReader::new(file),
Err(why) => {
print_debug_message(why.to_string());
return Err(Error::ProblemOpeningFile(Box::from(path)));
}
};
for line in reader.lines() {
let line_str = match line {
Ok(line_str) => line_str,
Err(why) => {
print_debug_message(why.to_string());
return Err(Error::ProblemOpeningFile(Box::from(path)));
}
};
endpoints.push(parse_nginx_line(runtimes, &line_str)?);
}
Ok(endpoints)
}
fn parse_nginx_line(runtimes: &Vec<Arc<Runtime>>, line: &str) -> Result<EndpointConfiguration, Error> {
lazy_static! {
static ref NGINX_LINE_REGEX: Regex =
Regex::new("^(\\[.+\\]) \"(\\w{3, 4}) (/\\S*) HTTP/\\d\\.\\d\" (\\d{3}) (\\d{1, 5})").unwrap();
}
let captures = match NGINX_LINE_REGEX.captures(line) {
Some(captures) => captures,
None => return Err(Error::InvalidParseSyntax),
};
let status = {
let status_string = match captures.get(4) {
Some(status_string) => status_string,
None => return Err(Error::InvalidParseSyntax),
};
match status_string.as_str().parse() {
Ok(status) => status,
Err(..) => return Err(Error::InvalidParseStatusCode(status_string.as_str().to_string())),
}
};
let method = {
let method_string = match captures.get(2) {
Some(method_string) => method_string.as_str(),
None => return Err(Error::UnknownInternalError("no method nginx logs".to_string())),
};
match Method::from_str(method_string) {
Some(method) => method,
None => {
return Err(Error::UnknownInternalError(format!(
"invalid method nginx {}",
method_string
)))
}
}
};
let path = match captures.get(3) {
Some(path) => String::from(path.as_str()),
None => return Err(Error::UnknownInternalError("invalid path nginx logs".to_string())),
};
let port = match captures.get(5) {
Some(port_string) => match port_string.as_str().parse() {
Ok(port) => port,
Err(_) => return Err(Error::UnknownInternalError("invalid port nginx logs".to_string())),
},
None => return Err(Error::UnknownInternalError("no port number nginx logs".to_string())),
};
EndpointConfiguration::new(method, &path, status, find_runtime_by_port(runtimes, port)?, false)
}
fn find_runtime_by_port(runtimes: &Vec<Arc<Runtime>>, port: u16) -> Result<Arc<Runtime>, Error> {
for runtime in runtimes {
if runtime.port == port {
return Ok(runtime.clone());
}
}
Err(Error::UnknownInternalError("unknown port in nginx logs".to_string()))
}
#[cfg(test)]
mod test {
use std::{path::Path, str::FromStr, sync::Arc};
use reqwest::Url;
use crate::{
config::{OpenapiSource, Runtime},
models::{Method, OpenapiPath},
parser::nginx_parser::{parse_access_log, parse_nginx_line},
};
use super::find_runtime_by_port;
fn generate_runtimes() -> Vec<Arc<Runtime>> {
vec![
Arc::from(Runtime {
port: 13750,
openapi_source: OpenapiSource::Path(Box::from(Path::new("./dump"))),
app_base_url: Url::from_str("http://example.con").unwrap(),
}),
Arc::from(Runtime {
port: 8080,
openapi_source: OpenapiSource::Path(Box::from(Path::new("./dump"))),
app_base_url: Url::from_str("http://example.con").unwrap(),
}),
]
}
#[test]
fn parses_correct_status() {
assert_eq!(
parse_nginx_line(
&generate_runtimes(),
"[11/Jul/2023:08:50:03 +0000] \"GET /weather HTTP/1.1\" 200 8080"
)
.unwrap()
.status_code,
200
);
assert_eq!(
parse_nginx_line(
&generate_runtimes(),
"[11/Jul/2023:08:52:45 +0000] \"GET /user HTTP/1.1\" 404 8080"
)
.unwrap()
.status_code,
404
);
}
#[test]
fn parses_correct_method() {
assert_eq!(
parse_nginx_line(
&generate_runtimes(),
"[11/Jul/2023:08:50:03 +0000] \"GET /weather HTTP/1.1\" 200 8080"
)
.unwrap()
.method,
Method::GET
);
assert_eq!(
parse_nginx_line(
&generate_runtimes(),
"[11/Jul/2023:08:50:03 +0000] \"POST /weather HTTP/1.1\" 200 8080"
)
.unwrap()
.method,
Method::POST
);
}
#[test]
fn parses_correct_path() {
assert_eq!(
parse_nginx_line(
&generate_runtimes(),
"[11/Jul/2023:08:50:03 +0000] \"GET /weather HTTP/1.1\" 200 8080"
)
.unwrap()
.path,
OpenapiPath::from_str("/weather").unwrap(),
);
assert_eq!(
parse_nginx_line(
&generate_runtimes(),
"[11/Jul/2023:08:52:45 +0000] \"GET /user HTTP/1.1\" 404 8080"
)
.unwrap()
.path,
OpenapiPath::from_str("/user").unwrap(),
);
assert_eq!(
parse_nginx_line(
&generate_runtimes(),
"[11/Jul/2023:08:52:45 +0000] \"GET / HTTP/1.1\" 404 8080"
)
.unwrap()
.path,
OpenapiPath::from_str("/").unwrap(),
);
}
#[test]
fn parses_correct_port() {
assert_eq!(
parse_nginx_line(
&generate_runtimes(),
"[11/Jul/2023:08:50:03 +0000] \"GET /weather HTTP/1.1\" 200 8080"
)
.unwrap()
.runtime
.port,
8080
);
assert_eq!(
parse_nginx_line(
&generate_runtimes(),
"[11/Jul/2023:08:50:03 +0000] \"POST /weather HTTP/1.1\" 200 13750"
)
.unwrap()
.runtime
.port,
13750
);
}
#[test]
fn parses_full_access_log() {
let path = Path::new("./dump/access.log");
assert_eq!(parse_access_log(&generate_runtimes(), path).unwrap().len(), 9);
}
#[test]
fn finds_runtime_by_port() {
let runtimes = vec![
Arc::from(Runtime {
port: 8080,
openapi_source: OpenapiSource::Path(Box::from(Path::new("./dump"))),
app_base_url: Url::from_str("http://example.con").unwrap(),
}),
Arc::from(Runtime {
port: 7890,
openapi_source: OpenapiSource::Path(Box::from(Path::new("./dump"))),
app_base_url: Url::from_str("http://example.con").unwrap(),
}),
Arc::from(Runtime {
port: 443,
openapi_source: OpenapiSource::Path(Box::from(Path::new("./dump"))),
app_base_url: Url::from_str("http://example.con").unwrap(),
}),
];
assert_eq!(find_runtime_by_port(&runtimes, 7890).unwrap(), runtimes[1]);
assert_eq!(find_runtime_by_port(&runtimes, 443).unwrap(), runtimes[2]);
}
#[test]
fn throws_error_if_port_is_not_from_runtime() {
let runtimes = vec![];
assert!(find_runtime_by_port(&runtimes, 7890).is_err());
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/parser/yaml_parser.rs | Rust | use std::sync::Arc;
use linked_hash_map::LinkedHashMap;
use yaml_rust::{Yaml, YamlLoader};
use crate::{
config::Runtime,
models::{EndpointConfiguration, Method},
parser::common::format_basepath,
utils::Error,
};
pub fn parse_yaml_doc(yaml_string: &str, runtime: Arc<Runtime>) -> Result<Vec<EndpointConfiguration>, Error> {
let spec = match YamlLoader::load_from_str(yaml_string) {
Ok(spec) => spec,
Err(_) => return Err(Error::InvalidParseSyntax),
};
let spec = &spec[0];
let spec = match spec.as_hash() {
Some(spec) => spec,
None => return Err(Error::UnknownInternalError("yaml spec can't be serialized".to_string())),
};
let basepath = match spec.get(&Yaml::from_str("basePath")) {
Some(basepath) => match basepath.as_str() {
Some(basepath) => basepath,
None => return Err(Error::InvalidBasePath),
},
None => "",
};
let basepath = format_basepath(basepath);
let paths = match spec.get(&Yaml::from_str("paths")) {
Some(paths) => match paths.as_hash() {
Some(paths) => paths,
None => return Err(Error::InvalidParseSyntax),
},
None => return Err(Error::InvalidParseSyntax),
};
let mut endpoints = vec![];
for path_key in paths.keys() {
// unwrap is fine here, as we can expect keys to be strings
let path = format!("{}{}", basepath, path_key.as_str().unwrap());
let methods = retrive_value_as_hash_map(paths, path_key)?;
for method_key in methods.keys() {
// unwrap is fine here, as we can expect keys to be strings
let method = match Method::from_str(method_key.as_str().unwrap()) {
Some(method) => method,
None => return Err(Error::InvalidParseMethod(String::from(path_key.as_str().unwrap()))),
};
let method_infos = retrive_value_as_hash_map(methods, method_key)?;
let statuses = retrive_value_as_hash_map(method_infos, &Yaml::from_str("responses"))?;
if method_infos.get(&Yaml::from_str("security")).is_some() {
endpoints.push(EndpointConfiguration::new(
method.clone(),
&path,
401,
runtime.clone(),
true,
)?);
endpoints.push(EndpointConfiguration::new(
method.clone(),
&path,
403,
runtime.clone(),
true,
)?);
}
for status_key in statuses.keys() {
let status_code = match status_key.as_str().unwrap().parse() {
Ok(status_code) => status_code,
Err(_) => return Err(Error::InvalidParseStatusCode(status_key.as_str().unwrap().to_string())),
};
endpoints.push(EndpointConfiguration::new(
method.clone(),
&path,
status_code,
runtime.clone(),
false,
)?);
}
}
}
Ok(endpoints)
}
fn retrive_value_as_hash_map<'a>(
parent: &'a LinkedHashMap<Yaml, Yaml>,
key: &Yaml,
) -> Result<&'a LinkedHashMap<Yaml, Yaml>, Error> {
match parent.get(key) {
Some(child) => match child.as_hash() {
Some(child) => Ok(child),
None => Err(Error::InvalidParseSyntax),
},
None => Err(Error::UnknownInternalError("yaml spec parent should exist".to_string())),
}
}
#[cfg(test)]
mod tests {
use std::{str::FromStr, sync::Arc};
use crate::{
models::{Method, OpenapiPath},
parser::yaml_parser::parse_yaml_doc,
utils::test::create_mock_runtime,
};
const YAML_STRING: &str = "
basePath: /
paths:
/:
get:
security:
- BasicAuth: []
responses:
\"400\":
description: Bad Request
schema:
$ref: '#/definitions/util.ErrorMessage'
\"200\":
description: OK
schema:
$ref: '#/definitions/controller.BaseResponse'
put:
responses:
\"418\":
description: Im a teapot
schema:
$ref: '#/definitions/controller.IsValid'
/test:
get:
responses:
\"418\":
description: Im a teapot
schema:
$ref: '#/definitions/controller.IsValid'
";
#[test]
fn finds_all_paths() {
assert!(parse_yaml_doc(YAML_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.path == OpenapiPath::from_str("/").unwrap()));
assert!(parse_yaml_doc(YAML_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.path == OpenapiPath::from_str("/test").unwrap()));
}
#[test]
fn finds_all_methods() {
assert!(parse_yaml_doc(YAML_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.method == Method::GET));
assert!(parse_yaml_doc(YAML_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.method == Method::PUT));
}
#[test]
fn finds_all_statuses() {
assert!(parse_yaml_doc(YAML_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.status_code == 200));
assert!(parse_yaml_doc(YAML_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.status_code == 400));
assert!(parse_yaml_doc(YAML_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.any(|x| x.status_code == 418));
}
#[test]
fn adds_401_403_for_security_headers() {
assert_eq!(
parse_yaml_doc(YAML_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.filter(|x| x.method == Method::GET
&& x.status_code == 401
&& x.path == OpenapiPath::from_str("/").unwrap())
.count(),
1
);
assert_eq!(
parse_yaml_doc(YAML_STRING, Arc::from(create_mock_runtime()))
.unwrap()
.iter()
.filter(|x| x.method == Method::GET
&& x.status_code == 403
&& x.path == OpenapiPath::from_str("/").unwrap())
.count(),
1
);
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/utils/debug.rs | Rust | use std::{fmt::Display, process};
use crate::config::CoveAPIConfig;
pub fn print_debug_message<T: Display>(debug_message: T) {
if CoveAPIConfig::global_is_debug() {
println!("{}", debug_message);
}
}
pub fn print_error_and_exit<T: Display>(debug_message: T) -> ! {
eprintln!("{}", debug_message);
process::exit(1);
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/utils/error.rs | Rust | use std::path::Path;
use super::print_error_and_exit;
#[derive(Debug)]
pub enum Error {
InvalidApplicationURL(String),
MissingConfiguration,
ConflictingConfiguration,
UnexpectedIOIssue(String),
InvalidParseSyntax,
InvalidBasePath,
InvalidStatusCode(String),
InvalidMethodString(String),
InvalidParseStatusCode(String),
InvalidParseMethod(String),
ProblemOpeningFile(Box<Path>),
UnknownInternalError(String),
UnknownOpenApiFormat,
InvalidTestCoverage,
OpenapiFetchConnectionFailure,
OpenapiFetchInvalidUrl,
OpenapiMalformedOnlineComponents,
InvalidPortNumber(String),
InvalidMappingSyntax(String),
MissingMapping,
MappingMissingSemicolon(String),
OpenapiPathIsAbsolute(Box<Path>),
MappingDuplicatePorts,
InvalidPath(String),
}
impl Error {
fn get_error_msg(&self) -> String {
match self {
Error::InvalidApplicationURL(err_msg) => format!("Invalid application URL provided: {}", err_msg),
Error::MissingConfiguration => "Your configuration is missing wither a mapping or an openapi source with it's respective application URL.".to_string(),
Error::ConflictingConfiguration => "You can either provide a mapping or openapi location, port and application URL. Providing both is not possible at this time.".to_string(),
Error::UnexpectedIOIssue(err_msg) => format!("An issue with IO occured: {}", err_msg),
Error::ProblemOpeningFile(path) => format!("An issue opening the openapi ({:?}) file occured.", path),
Error::InvalidParseSyntax => "The syntax of the openapi file is incorrect.".to_string(),
Error::InvalidParseMethod(method) => format!("The openapi file contains an invalid method: {}", method),
Error::InvalidParseStatusCode(code) => format!("The openapi file contains an invalid status code: {}", code),
Error::UnknownInternalError(err) => format!("An unknown internal error occured, please open an issue on github for this [{}].", err),
Error::InvalidBasePath => "Basepath provided in openapi spec isn't valid.".to_string(),
Error::InvalidMethodString(method) => format!("The following method you provided is invalid: \"{}\"", method),
Error::InvalidStatusCode(code) => format!("The following status code you provided is invalid: \"{}\"", code),
Error::UnknownOpenApiFormat => "CoveAPI can only parse json and yaml formats,".to_string(),
Error::InvalidTestCoverage => "Your test coverage has to be a value between 0 and 1 or a percentage between 0% and 100%.".to_string(),
Error::OpenapiFetchConnectionFailure => "No connection to the specified openapi url could be made.".to_string(),
Error::OpenapiFetchInvalidUrl => "The specified openapi url is invalid.".to_string(),
Error::OpenapiMalformedOnlineComponents => "Some contents of the specified openapi resource are malformed.".to_string(),
Error::InvalidPortNumber(port_str) => format!("The specified port number is invalid: \"{}\"", port_str),
Error::InvalidMappingSyntax(mapping_string) => format!("The syntax of your mapping is invalid: {}", mapping_string),
Error::MissingMapping => "Please provide a mapping to your configuration, the current mapping is either empty or wasn't provided.".to_string(),
Error::MappingMissingSemicolon(mapping) => format!("The follwing mapping is missing a semicolon or is incomplete, please follow the 'service url; openapi source; port;' syntax: {}", mapping),
Error::OpenapiPathIsAbsolute(path) => format!("The following path is absolute, please only specify relative paths: {}", path.to_str().unwrap_or("<empty>")),
Error::MappingDuplicatePorts => "The mapping contains duplicate ports, every port can only be used once.".to_string(),
Error::InvalidPath(path) => format!("The following path failed to parse: {}", path),
}
}
pub fn display_error_and_exit(&self) -> ! {
print!("Error: ");
print_error_and_exit(self.get_error_msg())
}
pub fn display_error(&self) {
eprintln!("{}", self.get_error_msg());
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/utils/io.rs | Rust | use std::{fs::File, io::Read, path::Path};
use crate::models::EndpointConfiguration;
use super::print_debug_message;
pub fn read_file_to_string_or_err<E>(path: &Path, err: E) -> Result<String, E> {
let mut file = match File::open(path) {
Ok(file) => file,
Err(why) => {
print_debug_message(why.to_string());
return Err(err);
}
};
let mut file_str = String::new();
match file.read_to_string(&mut file_str) {
Ok(_) => Ok(file_str),
Err(_) => Err(err),
}
}
pub fn print_endpoints<'a, T: Iterator<Item = &'a EndpointConfiguration>>(endpoints: T) {
for endpoint in endpoints {
println!(
"- \"{}\", {:?}, {}",
endpoint.path, endpoint.method, endpoint.status_code
);
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/utils/mod.rs | Rust | mod debug;
mod error;
mod io;
mod runtime;
#[cfg(test)]
pub mod test;
pub use debug::print_debug_message;
pub use debug::print_error_and_exit;
pub use error::Error;
pub use io::print_endpoints;
pub use io::read_file_to_string_or_err;
pub use runtime::sort_by_runtime;
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/utils/runtime.rs | Rust | use std::{collections::HashMap, sync::Arc};
use crate::{config::Runtime, models::EndpointConfiguration};
pub fn sort_by_runtime(
endpoint_configs: &Vec<EndpointConfiguration>,
) -> HashMap<Arc<Runtime>, Vec<&EndpointConfiguration>> {
let mut runtime_sorted_endpoint_configs = HashMap::new();
for endpoit_config in endpoint_configs {
runtime_sorted_endpoint_configs
.entry(endpoit_config.runtime.clone())
.or_insert(vec![])
.push(endpoit_config);
}
runtime_sorted_endpoint_configs
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use crate::{
models::{EndpointConfiguration, Method},
utils::test::create_mock_runtime,
};
use super::sort_by_runtime;
#[test]
fn sorts_into_runtime_correctly() {
let mut runtime_a = create_mock_runtime();
runtime_a.port = 400;
let mut runtime_b = create_mock_runtime();
runtime_b.port = 200;
let runtime_a = Arc::from(runtime_a);
let runtime_b = Arc::from(runtime_b);
let endpoint_configs = vec![
EndpointConfiguration::new(Method::GET, "/", 200, runtime_a.clone(), false).unwrap(),
EndpointConfiguration::new(Method::GET, "/", 502, runtime_a.clone(), false).unwrap(),
EndpointConfiguration::new(Method::GET, "/", 404, runtime_b.clone(), false).unwrap(),
];
let sorted = sort_by_runtime(&endpoint_configs);
assert_eq!(sorted.get(&runtime_a.clone()).unwrap().len(), 2);
assert!(sorted
.get(&runtime_a.clone())
.unwrap()
.iter()
.any(|x| x.status_code == 200));
assert!(sorted
.get(&runtime_a.clone())
.unwrap()
.iter()
.any(|x| x.status_code == 502));
assert_eq!(sorted.get(&runtime_b.clone()).unwrap().len(), 1);
assert!(sorted
.get(&runtime_b.clone())
.unwrap()
.iter()
.any(|x| x.status_code == 404));
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
src/utils/test.rs | Rust | use std::{collections::HashMap, str::FromStr};
use reqwest::Url;
use crate::config::{CoveAPIConfig, OpenapiSource, Runtime};
pub fn create_mock_config() -> CoveAPIConfig {
let mut env_vars = HashMap::new();
env_vars.insert("COVEAPI_DEBUG".to_string(), "1".to_string());
env_vars.insert("COVEAPI_APP_BASE_URL".to_string(), "http://example.com".to_string());
env_vars.insert("COVEAPI_OPENAPI_SOURCE".to_string(), "./example".to_string());
CoveAPIConfig::from_raw(&env_vars).unwrap()
}
pub fn create_mock_runtime() -> Runtime {
Runtime {
openapi_source: OpenapiSource::Url(Url::from_str("https://example.com").unwrap()),
app_base_url: Url::from_str("https://example.com").unwrap(),
port: 8080,
}
}
| yezz123/CoveAPI | 25 | OpenAPI-based test coverage analysis tool that helps teams improve integration test coverage in CI/CD pipelines | Rust | yezz123 | Yasser Tahiri | Yezz LLC. |
asgi_aws/__init__.py | Python | """Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨"""
__version__ = "2.0.0"
import asyncio
from typing import Any, Callable, Type, Union
from asgi_aws.service import find_service
from asgi_aws.services import Service
from asgi_aws.services.aws import AWS
from asgi_aws.types import ASGIApp, ASGICycle
class Asgi:
"""
This is the main entry point for the ASGI server, which is called by the AWS Lambda runtime, or by the AWS API Gateway, or by the AWS API Gateway REST API, or by the AWS API Gateway HTTP API
"""
def __init__(self, app: ASGIApp, http_cycle: Type[ASGICycle]) -> None:
self.app = app
self._http_cycle = http_cycle
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
def __call__(self, request: Any) -> Any:
cycle = self._http_cycle(request)
cycle(app=self.app)
return cycle.response
@classmethod
def entry_point(
cls, app: ASGIApp, service: Union[str, Service, None] = None
) -> Callable[..., Any]:
"""
:param app: The ASGI Application
:param service: The service type, which is either a string or an enum of type `Service`
:return: The entry point for the ASGI server
"""
if service is None:
service = find_service()
def entrypoint(event: Any, context: Any) -> Any:
return cls(app, AWS)(request={"event": event, "context": context})
return entrypoint
else:
service = ", ".join(x.value for x in Service)
raise ValueError(f"Unknown service: {service}")
| yezz123/asgi-aws | 29 | Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨ | Python | yezz123 | Yasser Tahiri | Yezz LLC. |
asgi_aws/service.py | Python | import os
from typing import Union
from asgi_aws.services import Service
def find_service() -> Union[Service, None]:
# ref: https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html
return Service.aws_lambda if "AWS_LAMBDA_FUNCTION_NAME" in os.environ else None
| yezz123/asgi-aws | 29 | Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨ | Python | yezz123 | Yasser Tahiri | Yezz LLC. |
asgi_aws/services/__init__.py | Python | from enum import Enum
class Service(str, Enum):
aws_lambda = "AWS Lambda"
| yezz123/asgi-aws | 29 | Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨ | Python | yezz123 | Yasser Tahiri | Yezz LLC. |
asgi_aws/services/aws.py | Python | from base64 import b64decode, b64encode
from typing import Any, Dict, Iterator
from urllib.parse import urlencode
from asgi_aws.services.http import HttpCycleBase
from asgi_aws.types import Message, Scope
Request = Dict[str, Dict[str, Any]]
Response = Dict[str, Any]
class AWS(HttpCycleBase[Request, Response]):
"""AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨"""
@property
def scope(self) -> Scope:
event = self.request["event"]
def gene_query_string() -> Iterator[str]:
if "multiValueQueryStringParameters" in event:
params = event["multiValueQueryStringParameters"] or {}
for key, values in params.items():
for vale in values:
yield urlencode({key: vale})
elif "queryStringParameters" in event:
params = event["queryStringParameters"] or {}
for key, values in params.items():
for vale in values.split(","):
yield urlencode({key: vale})
return
query_string = "&".join(gene_query_string()).encode()
if "httpMethod" in event:
method = event["httpMethod"]
else:
method = event["requestContext"]["http"]["method"]
if "path" in event:
path = event["path"]
else:
path = event["requestContext"]["http"]["path"]
if "multiValueHeaders" in event:
headers = tuple(
(k.lower().encode("latin-1"), (",".join(vs)).encode("latin-1"))
for k, vs in event["multiValueHeaders"].items()
)
else:
headers = tuple(
(k.lower().encode("latin-1"), v.encode("latin-1"))
for k, v in event["headers"].items()
)
if "cookies" in event:
cookies = ";".join(event["cookies"])
headers = headers + ((b"cookie", cookies.encode("latin-1")),)
return {
"type": "http",
"asgi": {"version": "3.0", "spec_version": "2.2"},
"http_version": "1.1",
"method": method,
"scheme": "http",
"path": path,
"query_string": query_string,
"headers": headers,
"server": None,
"client": None,
"state": self.state,
}
async def receive(self) -> Message:
event = self.request["event"]
body = event.get("body", "")
if body is None:
body = b""
elif event.get("isBase64Encoded", False):
body = b64decode(body)
else:
body = body.encode()
return {
"type": "http.request",
"body": body,
"more_body": False,
}
@property
def response(self) -> Response:
event = self.request["event"]
if "version" in event:
is_base64_encoded = True
body = b64encode(self.body).decode()
else:
is_base64_encoded = False
try:
body = self.body.decode()
except UnicodeDecodeError:
is_base64_encoded = True
body = b64encode(self.body).decode()
return {
"statusCode": self.status_code,
"headers": dict(self.headers),
"body": body,
"isBase64Encoded": is_base64_encoded,
}
| yezz123/asgi-aws | 29 | Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨ | Python | yezz123 | Yasser Tahiri | Yezz LLC. |
asgi_aws/services/http.py | Python | import asyncio
from typing import Any, Generic, Iterable, MutableMapping, Tuple, TypeVar
from asgi_aws.types import ASGIApp, Message, Scope
Req = TypeVar("Req")
Res = TypeVar("Res")
class Lifespan:
def __init__(self, startup_event: asyncio.Event, shutdown_event: asyncio.Event):
self.state: MutableMapping[str, Any] = {}
self.should_exit = False
self.started = False
self.startup_event = startup_event
self.shutdown_event = shutdown_event
async def receive(self) -> Message:
if self.started:
await self.shutdown_event.wait()
return {"type": "lifespan.shutdown"}
else:
self.started = True
return {"type": "lifespan.startup"}
async def send(self, message: Message) -> None:
if message["type"] == "lifespan.startup.complete":
self.startup_event.set()
elif message["type"] == "lifespan.shutdown.complete":
pass
elif message["type"] in [
"lifespan.startup.failed",
"lifespan.shutdown.failed",
]:
self.shutdown_event.set()
self.should_exit = True
return None
async def run(self, app: ASGIApp) -> None:
try:
await app(
{"type": "lifespan", "asgi": {"version": "3.0"}, "state": self.state},
self.receive,
self.send,
)
except BaseException as e:
self.should_exit = True
self.startup_event.set()
raise e
class HttpCycleBase(Generic[Req, Res]):
def __init__(self, request: Req):
self.request = request
self.status_code = 200
self.headers: Iterable[Tuple[str, str]] = ()
self.body = b""
self.startup_event = asyncio.Event()
self.shutdown_event = asyncio.Event()
self.lifespan = Lifespan(self.startup_event, self.shutdown_event)
@property
def state(self) -> MutableMapping[str, Any]:
return self.lifespan.state
def __call__(self, app: ASGIApp) -> None:
loop = asyncio.get_event_loop()
lifespan_task = loop.create_task(self.lifespan.run(app))
main_task = loop.create_task(self.run(app))
loop.run_until_complete(main_task)
loop.run_until_complete(lifespan_task)
if self.lifespan.should_exit:
if err := lifespan_task.exception():
raise err
@property
def response(self) -> Res:
raise NotImplementedError
async def send(self, message: Message) -> None:
if message["type"] == "http.response.start":
self.status_code = message["status"]
self.headers = tuple(
(key.decode("latin-1"), value.decode("latin-1"))
for (key, value) in message["headers"]
)
elif message["type"] == "http.response.body":
self.body = message["body"]
return
async def run(self, app: ASGIApp) -> None:
try:
await self.startup_event.wait()
if self.lifespan.should_exit:
return
await app(self.scope, self.receive, self.send)
finally:
self.shutdown_event.set()
async def receive(self) -> Message:
raise NotImplementedError
@property
def scope(self) -> Scope:
raise NotImplementedError
| yezz123/asgi-aws | 29 | Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨ | Python | yezz123 | Yasser Tahiri | Yezz LLC. |
asgi_aws/types.py | Python | from typing import Any, Awaitable, Callable, MutableMapping
from typing_extensions import Protocol
Message = MutableMapping[str, Any]
Scope = MutableMapping[str, Any]
Receive = Callable[[], Awaitable[Message]]
Send = Callable[[Message], Awaitable[None]]
Request = Any
Response = Any
class ASGIApp(Protocol):
"""ASGI Application, which is a callable that accepts a call function."""
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
...
class ASGICycle(Protocol):
"""ASGI Cycle, which is a callable that accepts a send, a receive and a run function."""
def __init__(self, request: Request) -> None:
... # pragma: nocover
def __call__(self, app: ASGIApp) -> None:
... # pragma: nocover
async def run(self, app: ASGIApp) -> None:
... # pragma: nocover
async def receive(self) -> Message:
... # pragma: nocover
async def send(self, message: Message) -> None:
... # pragma: nocover
@property
def scope(self) -> Scope:
... # pragma: nocover
@property
def response(self) -> Response:
... # pragma: nocover
| yezz123/asgi-aws | 29 | Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨ | Python | yezz123 | Yasser Tahiri | Yezz LLC. |
scripts/clean.sh | Shell | #!/bin/sh -e
rm -f `find . -type f -name '*.py[co]' `
rm -f `find . -type f -name '*~' `
rm -f `find . -type f -name '.*~' `
rm -f `find . -type f -name .coverage`
rm -f `find . -type f -name ".coverage.*"`
rm -rf `find . -name __pycache__`
rm -rf `find . -type d -name '*.egg-info' `
rm -rf `find . -type d -name 'pip-wheel-metadata' `
rm -rf `find . -type d -name .pytest_cache`
rm -rf `find . -type d -name .ruff_cache`
rm -rf `find . -type d -name .cache`
rm -rf `find . -type d -name .mypy_cache`
rm -rf `find . -type d -name htmlcov`
rm -rf `find . -type d -name "*.egg-info"`
rm -rf `find . -type d -name build`
rm -rf `find . -type d -name dist`
| yezz123/asgi-aws | 29 | Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨ | Python | yezz123 | Yasser Tahiri | Yezz LLC. |
scripts/format.sh | Shell | #!/usr/bin/env bash
set -e
set -x
pre-commit run --all-files --verbose --show-diff-on-failure
| yezz123/asgi-aws | 29 | Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨ | Python | yezz123 | Yasser Tahiri | Yezz LLC. |
scripts/lint.sh | Shell | #!/usr/bin/env bash
set -e
set -x
mypy --show-error-codes asgi_aws
| yezz123/asgi-aws | 29 | Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨ | Python | yezz123 | Yasser Tahiri | Yezz LLC. |
scripts/test.sh | Shell | #!/usr/bin/env bash
set -e
set -x
echo "ENV=${ENV}"
export PYTHONPATH=.
pytest --cov=asgi_aws --cov=tests
| yezz123/asgi-aws | 29 | Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨ | Python | yezz123 | Yasser Tahiri | Yezz LLC. |
tests/template/app.py | Python | import contextlib
from typing import Optional
from fastapi import (
Cookie,
FastAPI,
File,
Form,
Header,
HTTPException,
Request,
UploadFile,
)
from fastapi.responses import PlainTextResponse, Response
from pydantic import BaseModel
class Item(BaseModel):
name: str
@contextlib.asynccontextmanager
async def lifespan(app):
yield {"message": "hello"}
app = FastAPI(lifespan=lifespan)
@app.get("/")
def root():
return {"Hello": "World"}
@app.get("/empty")
def empty():
return {}
@app.get("/empty/text")
def empty_text():
return ""
@app.get("/none")
def none():
return None
@app.get("/items/{item_id}")
def read_item(item_id: int, q: Optional[str] = None):
return {"item_id": item_id, "q": q}
@app.post("/items")
def post_item(item: Item, authorization: Optional[str] = Header(None)):
if authorization is None or authorization != "Bearer foobar":
raise HTTPException(status_code=401)
return item
@app.get("/cookies")
def cookies(c1: Optional[str] = Cookie(None), c2: Optional[str] = Cookie(None)):
return {
"c1": c1,
"c2": c2,
}
@app.get("/text")
def text():
return PlainTextResponse("test message!")
@app.get("/image")
def image():
return Response(
content=b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x0c\x00\x00\x00\x0c\x08\x02\x00\x00\x00\xd9\x17\xcb\xb0\x00\x00\x00\x16IDATx\x9ccLIIa \x04\x98\x08\xaa\x18U4\x00\x8a\x00\x1c\xa2\x01D2\xdd\xa6B\x00\x00\x00\x00IEND\xaeB`\x82",
media_type="image/png",
)
@app.post("/form")
def form(token: str = Form(...)):
return {"token": token}
@app.post("/file")
async def file(file: bytes = File(...)):
return {"file_size": len(file)}
@app.post("/uploadfile")
async def uploadfile(file: UploadFile = File(...)):
return {"filename": file.filename}
@app.post("/file_and_form")
async def file_and_form(
file: bytes = File(...), fileb: UploadFile = File(...), token: str = Form(...)
):
return {
"file_size": len(file),
"token": token,
"fileb_content_type": fileb.content_type,
"filename": fileb.filename,
}
@app.get("/lifespan")
async def lifespan_(request: Request):
return {"message": request.state.message}
@contextlib.asynccontextmanager
async def fail_lifespan(app):
raise Exception("Expected failed")
failed_app = FastAPI(lifespan=fail_lifespan)
| yezz123/asgi-aws | 29 | Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨ | Python | yezz123 | Yasser Tahiri | Yezz LLC. |
tests/test_app.py | Python | import base64
import pytest
@pytest.fixture()
def app():
from asgi_aws import Asgi
from tests.template.app import app
return Asgi.entry_point(app)
@pytest.fixture()
def failed_app():
from asgi_aws import Asgi
from tests.template.app import failed_app
return Asgi.entry_point(failed_app)
def test_simple(app):
event = {
"body": None,
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "GET",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "GET",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/",
"stage": "Prod",
},
"resource": "/",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
assert res["isBase64Encoded"]
body = base64.b64decode(res["body"])
assert body == b'{"Hello":"World"}', body
def test_empty(app):
event = {
"body": None,
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "GET",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/empty",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "GET",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/empty",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/empty",
"stage": "Prod",
},
"resource": "/empty",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b"{}", body
def test_empty_text(app):
event = {
"body": None,
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "GET",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/empty/text",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "GET",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/empty/text",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/empty/text",
"stage": "Prod",
},
"resource": "/empty/text",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'""', body
def test_none(app):
event = {
"body": None,
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "GET",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/none",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "GET",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/none",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/none",
"stage": "Prod",
},
"resource": "/none",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b"null", body
def test_items_get(app):
event = {
"body": None,
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "GET",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/items/1",
"pathParameters": {"item_id": "1"},
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "GET",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/items/{item_id}",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/items/{item_id}",
"stage": "Prod",
},
"resource": "/items/{item_id}",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"item_id":1,"q":null}', body
event = {
"body": None,
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "GET",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": {"q": ["aaa"]},
"path": "/items/1",
"pathParameters": {"item_id": "1"},
"queryStringParameters": {"q": "aaa"},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "GET",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/items/{item_id}",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/items/{item_id}",
"stage": "Prod",
},
"resource": "/items/{item_id}",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"item_id":1,"q":"aaa"}', body
def test_items_post(app):
event = {
"body": '{"name": "abc"}',
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Content-Length": "15",
"Content-Type": "application/json",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "POST",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Content-Length": ["15"],
"Content-Type": ["application/json"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/items",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "POST",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/items",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/items",
"stage": "Prod",
},
"resource": "/items",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 401, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
event = {
"body": '{"name": "abc"}',
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Authorization": "Bearer foobar",
"Connection": "keep-alive",
"Content-Length": "15",
"Content-Type": "application/json",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "POST",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Authorization": ["Bearer foobar"],
"Connection": ["keep-alive"],
"Content-Length": ["15"],
"Content-Type": ["application/json"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/items",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "POST",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/items",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/items",
"stage": "Prod",
},
"resource": "/items",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"name":"abc"}', body
def test_cookies(app):
event = {
"body": None,
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "GET",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/cookies",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "GET",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/cookies",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/cookies",
"stage": "Prod",
},
"resource": "/cookies",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"c1":null,"c2":null}', body
event = {
"body": None,
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Cookie": "c1=123",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "GET",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Cookie": ["c1=123"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/cookies",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "GET",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/cookies",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/cookies",
"stage": "Prod",
},
"resource": "/cookies",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"c1":"123","c2":null}', body
event = {
"body": None,
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Cookie": "c1=123; c2=abc",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "GET",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Cookie": ["c1=123; c2=abc"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/cookies",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "GET",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/cookies",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/cookies",
"stage": "Prod",
},
"resource": "/cookies",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"c1":"123","c2":"abc"}', body
def test_text(app):
event = {
"body": None,
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "GET",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/text",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "GET",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/text",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/text",
"stage": "Prod",
},
"resource": "/text",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"].startswith("text/plain"), res["headers"]
body = base64.b64decode(res["body"])
assert body == b"test message!", body
def test_image(app):
event = {
"body": None,
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "GET",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/image",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "GET",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/image",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/image",
"stage": "Prod",
},
"resource": "/image",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "image/png", res["headers"]
body = base64.b64decode(res["body"])
assert (
body
== b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x0c\x00\x00\x00\x0c\x08\x02\x00\x00\x00\xd9\x17\xcb\xb0\x00\x00\x00\x16IDATx\x9ccLIIa \x04\x98\x08\xaa\x18U4\x00\x8a\x00\x1c\xa2\x01D2\xdd\xa6B\x00\x00\x00\x00IEND\xaeB`\x82"
), body
def test_form(app):
event = {
"body": '--5a3f74f74809037868662ef4311302e3\r\nContent-Disposition: form-data; name="token"\r\n\r\nabc\r\n--5a3f74f74809037868662ef4311302e3--\r\n',
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Content-Length": "127",
"Content-Type": "multipart/form-data; boundary=5a3f74f74809037868662ef4311302e3",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "POST",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Content-Length": ["127"],
"Content-Type": [
"multipart/form-data; boundary=5a3f74f74809037868662ef4311302e3"
],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/form",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "POST",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/form",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/form",
"stage": "Prod",
},
"resource": "/form",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"token":"abc"}', body
def test_file(app):
event = {
"body": '--0f8c93977acb69401587fd5aaf2ee9c1\r\nContent-Disposition: form-data; name="file"; filename="test.file"\r\nContent-Type: text/plain\r\n\r\nabc\r\n--0f8c93977acb69401587fd5aaf2ee9c1--\r\n',
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Content-Length": "174",
"Content-Type": "multipart/form-data; boundary=0f8c93977acb69401587fd5aaf2ee9c1",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "POST",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Content-Length": ["174"],
"Content-Type": [
"multipart/form-data; boundary=0f8c93977acb69401587fd5aaf2ee9c1"
],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/file",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "POST",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/file",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/file",
"stage": "Prod",
},
"resource": "/file",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"file_size":3}', body
def test_uploadfile(app):
event = {
"body": '--d37f40a4a2ea35b98a812aed99a32d66\r\nContent-Disposition: form-data; name="file"; filename="test.file"\r\nContent-Type: text/plain\r\n\r\nabc\r\n--d37f40a4a2ea35b98a812aed99a32d66--\r\n',
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Content-Length": "174",
"Content-Type": "multipart/form-data; boundary=d37f40a4a2ea35b98a812aed99a32d66",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "POST",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Content-Length": ["174"],
"Content-Type": [
"multipart/form-data; boundary=d37f40a4a2ea35b98a812aed99a32d66"
],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/uploadfile",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "POST",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/uploadfile",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/uploadfile",
"stage": "Prod",
},
"resource": "/uploadfile",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"filename":"test.file"}', body
def test_file_and_form(app):
event = {
"body": '--20de67a24309dc60edf59fe113e9edb5\r\nContent-Disposition: form-data; name="file"; filename="test.file"\r\nContent-Type: text/plain\r\n\r\nabc\r\n--20de67a24309dc60edf59fe113e9edb5\r\nContent-Disposition: form-data; name="fileb"; filename="test.fileb"\r\nContent-Type: text/csv\r\n\r\nabcb\r\n--20de67a24309dc60edf59fe113e9edb5\r\nContent-Disposition: form-data; name="token"\r\n\r\nfoo\r\n--20de67a24309dc60edf59fe113e9edb5--\r\n',
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Content-Length": "400",
"Content-Type": "multipart/form-data; boundary=20de67a24309dc60edf59fe113e9edb5",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "POST",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Content-Length": ["400"],
"Content-Type": [
"multipart/form-data; boundary=20de67a24309dc60edf59fe113e9edb5"
],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/file_and_form",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "POST",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/file_and_form",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/file_and_form",
"stage": "Prod",
},
"resource": "/file_and_form",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == (
b"{"
b'"file_size":3,'
b'"token":"foo",'
b'"fileb_content_type":"text/csv",'
b'"filename":"test.fileb"'
b"}"
), body
def test_lifespan(app):
event = {
"body": None,
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "GET",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/lifespan",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "GET",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/lifespan",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/lifespan",
"stage": "Prod",
},
"resource": "/",
"stageVariables": None,
"version": "1.0",
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
assert res["isBase64Encoded"]
body = base64.b64decode(res["body"])
assert body == b'{"message":"hello"}', body
def test_failed_lifespan(failed_app):
event = {
"body": None,
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"X-Forwarded-Port": "3000",
"X-Forwarded-Proto": "http",
},
"httpMethod": "GET",
"isBase64Encoded": False,
"multiValueHeaders": {
"Accept": ["*/*"],
"Accept-Encoding": ["gzip, deflate, br"],
"Connection": ["keep-alive"],
"Host": ["127.0.0.1:3000"],
"User-Agent": ["python-requests/2.27.0"],
"X-Forwarded-Port": ["3000"],
"X-Forwarded-Proto": ["http"],
},
"multiValueQueryStringParameters": None,
"path": "/lifespan",
"pathParameters": None,
"queryStringParameters": None,
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"domainName": "127.0.0.1:3000",
"extendedRequestId": None,
"httpMethod": "GET",
"identity": {
"accountId": None,
"apiKey": None,
"caller": None,
"cognitoAuthenticationProvider": None,
"cognitoAuthenticationType": None,
"cognitoIdentityPoolId": None,
"sourceIp": "127.0.0.1",
"user": None,
"userAgent": "Custom User Agent String",
"userArn": None,
},
"path": "/lifespan",
"protocol": "HTTP/1.1",
"requestId": "884737a5-13cc-4308-b61c-54e068c8649b",
"requestTime": "19/Feb/2022:07:07:23 +0000",
"requestTimeEpoch": 1645254443,
"resourceId": "123456",
"resourcePath": "/lifespan",
"stage": "Prod",
},
"resource": "/",
"stageVariables": None,
"version": "1.0",
}
with pytest.raises(Exception):
failed_app(event, {})
| yezz123/asgi-aws | 29 | Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨ | Python | yezz123 | Yasser Tahiri | Yezz LLC. |
tests/test_http.py | Python | import base64
import pytest
@pytest.fixture()
def app():
from asgi_aws import Asgi
from tests.template.app import app
return Asgi.entry_point(app)
@pytest.fixture()
def failed_app():
from asgi_aws import Asgi
from tests.template.app import failed_app
return Asgi.entry_point(failed_app)
def test_simple(app):
event = {
"version": "2.0",
"routeKey": "GET /",
"rawPath": "/",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept": "*/*",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "GET",
"path": "/",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "d13a6872-ceb0-4fdf-85d0-f1e84f8e2b19",
"routeKey": "GET /",
"stage": "$default",
"time": "19/Feb/2022:06:36:04 +0000",
"timeEpoch": 1645252564,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": "",
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
assert res["isBase64Encoded"]
body = base64.b64decode(res["body"])
assert body == b'{"Hello":"World"}', body
def test_empty(app):
event = {
"version": "2.0",
"routeKey": "GET /empty",
"rawPath": "/empty",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "GET",
"path": "/empty",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "3f0b3323-c570-4ede-9044-fd0bf3128ba8",
"routeKey": "GET /empty",
"stage": "$default",
"time": "19/Feb/2022:06:55:52 +0000",
"timeEpoch": 1645253752,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": "",
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b"{}", body
def test_empty_text(app):
event = {
"version": "2.0",
"routeKey": "GET /empty/text",
"rawPath": "/empty/text",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "GET",
"path": "/empty/text",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "3f0b3323-c570-4ede-9044-fd0bf3128ba8",
"routeKey": "GET /empty/text",
"stage": "$default",
"time": "19/Feb/2022:06:55:52 +0000",
"timeEpoch": 1645253752,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": "",
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'""', body
def test_none(app):
event = {
"version": "2.0",
"routeKey": "GET /none",
"rawPath": "/none",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "GET",
"path": "/none",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "3f0b3323-c570-4ede-9044-fd0bf3128ba8",
"routeKey": "GET /none",
"stage": "$default",
"time": "19/Feb/2022:06:55:52 +0000",
"timeEpoch": 1645253752,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": "",
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b"null", body
def test_items_get(app):
event = {
"version": "2.0",
"routeKey": "GET /items/<item_id>",
"rawPath": "/items/1",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "GET",
"path": "/items/1",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "3f0b3323-c570-4ede-9044-fd0bf3128ba8",
"routeKey": "GET /items/<item_id>",
"stage": "$default",
"time": "19/Feb/2022:06:55:52 +0000",
"timeEpoch": 1645253752,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": "",
"pathParameters": {"item_id": "1"},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"item_id":1,"q":null}', body
event = {
"version": "2.0",
"routeKey": "GET /items/<item_id>",
"rawPath": "/items/1",
"rawQueryString": "q=aaa",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {"q": "aaa"},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "GET",
"path": "/items/1",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "32029027-5f43-496f-9dfa-0c04c0d7fbe0",
"routeKey": "GET /items/<item_id>",
"stage": "$default",
"time": "19/Feb/2022:06:59:34 +0000",
"timeEpoch": 1645253974,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": "",
"pathParameters": {"item_id": "1"},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"item_id":1,"q":"aaa"}', body
def test_items_post(app):
event = {
"version": "2.0",
"routeKey": "POST /items",
"rawPath": "/items",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"Content-Length": "15",
"Content-Type": "application/json",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "POST",
"path": "/items",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "32029027-5f43-496f-9dfa-0c04c0d7fbe0",
"routeKey": "POST /items",
"stage": "$default",
"time": "19/Feb/2022:06:59:34 +0000",
"timeEpoch": 1645253974,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": '{"name": "abc"}',
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 401, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
event = {
"version": "2.0",
"routeKey": "POST /items",
"rawPath": "/items",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"Authorization": "Bearer foobar",
"Content-Length": "15",
"Content-Type": "application/json",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "POST",
"path": "/items",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "32029027-5f43-496f-9dfa-0c04c0d7fbe0",
"routeKey": "POST /items",
"stage": "$default",
"time": "19/Feb/2022:06:59:34 +0000",
"timeEpoch": 1645253974,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": '{"name": "abc"}',
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"name":"abc"}', body
def test_cookies(app):
event = {
"version": "2.0",
"routeKey": "GET /cookies",
"rawPath": "/cookies",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "GET",
"path": "/cookies",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "32029027-5f43-496f-9dfa-0c04c0d7fbe0",
"routeKey": "GET /cookies",
"stage": "$default",
"time": "19/Feb/2022:06:59:34 +0000",
"timeEpoch": 1645253974,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": "",
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"c1":null,"c2":null}', body
event = {
"version": "2.0",
"routeKey": "GET /cookies",
"rawPath": "/cookies",
"rawQueryString": "",
"cookies": ["c1=123"],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"Cookie": "c1=123",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "GET",
"path": "/cookies",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "32029027-5f43-496f-9dfa-0c04c0d7fbe0",
"routeKey": "GET /cookies",
"stage": "$default",
"time": "19/Feb/2022:06:59:34 +0000",
"timeEpoch": 1645253974,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": "",
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"c1":"123","c2":null}', body
event = {
"version": "2.0",
"routeKey": "GET /cookies",
"rawPath": "/cookies",
"rawQueryString": "",
"cookies": ["c1=123", "c2=abc"],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"Cookie": "c1=123; c2=abc",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "GET",
"path": "/cookies",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "32029027-5f43-496f-9dfa-0c04c0d7fbe0",
"routeKey": "GET /cookies",
"stage": "$default",
"time": "19/Feb/2022:06:59:34 +0000",
"timeEpoch": 1645253974,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": "",
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"c1":"123","c2":"abc"}', body
def test_text(app):
event = {
"version": "2.0",
"routeKey": "GET /text",
"rawPath": "/text",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "GET",
"path": "/text",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "32029027-5f43-496f-9dfa-0c04c0d7fbe0",
"routeKey": "GET /text",
"stage": "$default",
"time": "19/Feb/2022:06:59:34 +0000",
"timeEpoch": 1645253974,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": "",
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"].startswith("text/plain"), res["headers"]
body = base64.b64decode(res["body"])
assert body == b"test message!", body
def test_image(app):
event = {
"version": "2.0",
"routeKey": "GET /image",
"rawPath": "/image",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "GET",
"path": "/image",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "32029027-5f43-496f-9dfa-0c04c0d7fbe0",
"routeKey": "GET /image",
"stage": "$default",
"time": "19/Feb/2022:06:59:34 +0000",
"timeEpoch": 1645253974,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": "",
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "image/png", res["headers"]
body = base64.b64decode(res["body"])
assert (
body
== b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x0c\x00\x00\x00\x0c\x08\x02\x00\x00\x00\xd9\x17\xcb\xb0\x00\x00\x00\x16IDATx\x9ccLIIa \x04\x98\x08\xaa\x18U4\x00\x8a\x00\x1c\xa2\x01D2\xdd\xa6B\x00\x00\x00\x00IEND\xaeB`\x82"
), body
def test_form(app):
event = {
"version": "2.0",
"routeKey": "POST /form",
"rawPath": "/form",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"Content-Length": "127",
"Content-Type": "multipart/form-data; boundary=25372e895be785cd05ddf0a169c03ed4",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "POST",
"path": "/form",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "32029027-5f43-496f-9dfa-0c04c0d7fbe0",
"routeKey": "POST /form",
"stage": "$default",
"time": "19/Feb/2022:06:59:34 +0000",
"timeEpoch": 1645253974,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": '--25372e895be785cd05ddf0a169c03ed4\r\nContent-Disposition: form-data; name="token"\r\n\r\nabc\r\n--25372e895be785cd05ddf0a169c03ed4--\r\n',
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"token":"abc"}', body
def test_file(app):
event = {
"version": "2.0",
"routeKey": "POST /file",
"rawPath": "/file",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"Content-Length": "174",
"Content-Type": "multipart/form-data; boundary=870164ee14ae53fa9b1c67404812ce2b",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "POST",
"path": "/file",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "32029027-5f43-496f-9dfa-0c04c0d7fbe0",
"routeKey": "POST /file",
"stage": "$default",
"time": "19/Feb/2022:06:59:34 +0000",
"timeEpoch": 1645253974,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": '--870164ee14ae53fa9b1c67404812ce2b\r\nContent-Disposition: form-data; name="file"; filename="test.file"\r\nContent-Type: text/plain\r\n\r\nabc\r\n--870164ee14ae53fa9b1c67404812ce2b--\r\n',
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"file_size":3}', body
def test_uploadfile(app):
event = {
"version": "2.0",
"routeKey": "POST /uploadfile",
"rawPath": "/uploadfile",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"Content-Length": "174",
"Content-Type": "multipart/form-data; boundary=32505a09b173e369a0fbe7e9618e4f05",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "POST",
"path": "/uploadfile",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "32029027-5f43-496f-9dfa-0c04c0d7fbe0",
"routeKey": "POST /uploadfile",
"stage": "$default",
"time": "19/Feb/2022:06:59:34 +0000",
"timeEpoch": 1645253974,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": '--32505a09b173e369a0fbe7e9618e4f05\r\nContent-Disposition: form-data; name="file"; filename="test.file"\r\nContent-Type: text/plain\r\n\r\nabc\r\n--32505a09b173e369a0fbe7e9618e4f05--\r\n',
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"filename":"test.file"}', body
def test_file_and_form(app):
event = {
"version": "2.0",
"routeKey": "POST /file_and_form",
"rawPath": "/file_and_form",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"Content-Length": "400",
"Content-Type": "multipart/form-data; boundary=2d86210d57cf12c027cf46dfe1321668",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "POST",
"path": "/file_and_form",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "32029027-5f43-496f-9dfa-0c04c0d7fbe0",
"routeKey": "POST /file_and_form",
"stage": "$default",
"time": "19/Feb/2022:06:59:34 +0000",
"timeEpoch": 1645253974,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": '--2d86210d57cf12c027cf46dfe1321668\r\nContent-Disposition: form-data; name="file"; filename="test.file"\r\nContent-Type: text/plain\r\n\r\nabc\r\n--2d86210d57cf12c027cf46dfe1321668\r\nContent-Disposition: form-data; name="fileb"; filename="test.fileb"\r\nContent-Type: text/csv\r\n\r\nabcb\r\n--2d86210d57cf12c027cf46dfe1321668\r\nContent-Disposition: form-data; name="token"\r\n\r\nfoo\r\n--2d86210d57cf12c027cf46dfe1321668--\r\n',
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == (
b"{"
b'"file_size":3,'
b'"token":"foo",'
b'"fileb_content_type":"text/csv",'
b'"filename":"test.fileb"'
b"}"
), body
def test_lifespan(app):
event = {
"version": "2.0",
"routeKey": "GET /lifespan",
"rawPath": "/lifespan",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "GET",
"path": "/lifespan",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "3f0b3323-c570-4ede-9044-fd0bf3128ba8",
"routeKey": "GET /lifespan",
"stage": "$default",
"time": "19/Feb/2022:06:55:52 +0000",
"timeEpoch": 1645253752,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": "",
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
res = app(event, {})
assert res["statusCode"] == 200, res["statusCode"]
assert res["headers"]["content-type"] == "application/json", res["headers"]
body = base64.b64decode(res["body"])
assert body == b'{"message":"hello"}', body
def test_failed_lifespan(failed_app):
event = {
"version": "2.0",
"routeKey": "GET /lifespan",
"rawPath": "/lifespan",
"rawQueryString": "",
"cookies": [],
"headers": {
"Host": "127.0.0.1:3000",
"User-Agent": "python-requests/2.27.0",
"Accept-Encoding": "gzip, deflate, br",
"Accept": "*/*",
"Connection": "keep-alive",
"X-Forwarded-Proto": "http",
"X-Forwarded-Port": "3000",
},
"queryStringParameters": {},
"requestContext": {
"accountId": "123456789012",
"apiId": "1234567890",
"http": {
"method": "GET",
"path": "/lifespan",
"protocol": "HTTP/1.1",
"sourceIp": "127.0.0.1",
"userAgent": "Custom User Agent String",
},
"requestId": "3f0b3323-c570-4ede-9044-fd0bf3128ba8",
"routeKey": "GET /lifespan",
"stage": "$default",
"time": "19/Feb/2022:06:55:52 +0000",
"timeEpoch": 1645253752,
"domainName": "localhost",
"domainPrefix": "localhost",
},
"body": "",
"pathParameters": {},
"stageVariables": None,
"isBase64Encoded": False,
}
with pytest.raises(Exception):
failed_app(event, {})
| yezz123/asgi-aws | 29 | Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨ | Python | yezz123 | Yasser Tahiri | Yezz LLC. |
tests/test_version.py | Python | import asgi_aws
def test_version() -> None:
assert asgi_aws.__version__ == "2.0.0"
| yezz123/asgi-aws | 29 | Build API with ASGI in AWS Lambda with API Gateway HTTP API or REST API, or with Function URL ✨ | Python | yezz123 | Yasser Tahiri | Yezz LLC. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.