Note
For Codeium code completion, you need to set up dependencies:
dependencies = { "nvim-lua/plenary.nvim", "MunifTanjim/nui.nvim", "Exafunction/codeium.nvim" }
,
Note
Your model needs to support FIM (Fill-in-Middle).
In fact, you should also make the most of FIM as it has an advantage in completion, being able to fill in based on context rather than just continuing from the preceding text.
The url used by the code completion tool and the url used by the chat task are usually two different ones.
- You can use
autocmd
to enable the completion feature of llm.nvim.
vim.api.nvim_create_autocmd({ "VimEnter" }, {
callback = function()
vim.api.nvim_command("LLMAppHandler Completion")
end,
})
- You can make the completion of llm.nvim take effect immediately by disabling lazy loading. (Completion AI tool requires setting
auto_trigger = true
)
{
"Kurama622/llm.nvim",
dependencies = { "nvim-lua/plenary.nvim", "MunifTanjim/nui.nvim" },
lazy = false,
config = function()
...
end,
}
Completion AI tool requires setting style = "virtual_text"
Set key mapping for virtual_text
Completion = {
opts = {
keymap = {
virtual_text = {
accept = {
mode = "i",
keys = "<A-a>",
},
next = {
mode = "i",
keys = "<A-n>",
},
prev = {
mode = "i",
keys = "<A-p>",
},
toggle = {
mode = "n",
keys = "<leader>cp",
},
},
},
},
}
Completion AI tool requires setting style = "blink.cmp"
- blink.cmp config
{
"saghen/blink.cmp",
opts = {
completion = {
trigger = {
prefetch_on_insert = false
-- allow triggering by white space
show_on_blocked_trigger_characters = {},
},
},
keymap = {
["<C-y>"] = {
function(cmp)
cmp.show({ providers = { "llm" } })
end,
},
},
sources = {
-- if you want to use auto-complete
default = { "llm" },
providers = {
llm = {
name = "llm",
module = "llm.common.completion.frontends.blink",
timeout_ms = 10000,
score_offset = 100,
async = true,
},
},
},
},
}
Completion AI tool requires setting style = "nvim-cmp"
{
"hrsh7th/nvim-cmp",
optional = true,
opts = function(_, opts)
-- if you wish to use autocomplete
table.insert(opts.sources, 1, {
name = "llm",
group_index = 1,
priority = 100,
})
opts.performance = {
-- It is recommended to increase the timeout duration due to
-- the typically slower response speed of LLMs compared to
-- other completion sources. This is not needed when you only
-- need manual completion.
fetching_timeout = 5000,
}
end,
},