diff --git a/lazy-lock.json b/lazy-lock.json index be190ee..1b46110 100644 --- a/lazy-lock.json +++ b/lazy-lock.json @@ -9,7 +9,7 @@ "cmp-nvim-lsp-document-symbol": { "branch": "main", "commit": "f94f7ba948e32cd302caba1c2ca3f7c697fb4fcf" }, "cmp-nvim-lsp-signature-help": { "branch": "main", "commit": "031e6ba70b0ad5eee49fd2120ff7a2e325b17fa7" }, "cmp_luasnip": { "branch": "master", "commit": "98d9cb5c2c38532bd9bdb481067b20fea8f32e90" }, - "codecompanion.nvim": { "branch": "main", "commit": "67a747eeab28456971979c8ee03c648135e8687c" }, + "codecompanion.nvim": { "branch": "main", "commit": "9c3e01a5ff933566775119121e68c318fcfe219c" }, "copilot-cmp": { "branch": "master", "commit": "15fc12af3d0109fa76b60b5cffa1373697e261d1" }, "copilot-lualine": { "branch": "main", "commit": "dc4b8ed0f75bc2557b3158c526624bf04ad233ea" }, "copilot.lua": { "branch": "master", "commit": "30321e33b03cb924fdcd6a806a0dc6fa0b0eafb9" }, @@ -27,7 +27,7 @@ "lspkind.nvim": { "branch": "master", "commit": "d79a1c3299ad0ef94e255d045bed9fa26025dab6" }, "lualine.nvim": { "branch": "master", "commit": "f4f791f67e70d378a754d02da068231d2352e5bc" }, "mini.diff": { "branch": "main", "commit": "bc3a7be30fd45ed4961ea90de1d9d04637cdeae6" }, - "mini.nvim": { "branch": "main", "commit": "dc1775613f672e6a804577945813353c5c4e6fe5" }, + "mini.nvim": { "branch": "main", "commit": "f89f4e20dc998d8f0cb8b21734877a64a9adee92" }, "noice.nvim": { "branch": "main", "commit": "0427460c2d7f673ad60eb02b35f5e9926cf67c59" }, "none-ls.nvim": { "branch": "main", "commit": "f41624ea1a73f020ddbd33438f74abb95ea17d55" }, "nui.nvim": { "branch": "main", "commit": "53e907ffe5eedebdca1cd503b00aa8692068ca46" }, diff --git a/lua/plugins/codecompanion.lua b/lua/plugins/codecompanion.lua index 1b17161..e06e28f 100644 --- a/lua/plugins/codecompanion.lua +++ b/lua/plugins/codecompanion.lua @@ -32,10 +32,24 @@ return { llama_cpp = function() return require("codecompanion.adapters").extend("openai_compatible", { name = "llama-cpp", + opts = { + stream = true, + }, schema = { model = { default = "qwen2.5-coder-14b-instruct", }, + temperature = { + order = 2, + mapping = "parameters", + type = "number", + optional = true, + default = 0.2, + desc = "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both.", + validate = function(n) + return n >= 0 and n <= 2, "Must be between 0 and 2" + end, + }, }, env = { url = "http://localhost:8888", @@ -46,9 +60,12 @@ return { }, strategies = { chat = { - adapter = "copilot", + adapter = "llama_cpp", roles = { - llm = " Assistant", + -- llm = " Assistant", + llm = function(adapter) + return "CodeCompanion (" .. adapter.formatted_name .. ")" + end, user = " User", }, slash_commands = { @@ -95,14 +112,41 @@ return { show_token_count = true, -- Show the token count for each response? start_in_insert_mode = false, -- Open the chat buffer in insert mode? }, + window = { + layout = "vertical", + position = nil, + border = "rounded", + height = 0.45, + width = 0.45, + relative = "editor", + opts = { + breakindent = true, + cursorcolumn = false, + cursorline = false, + foldcolumn = "0", + linebreak = true, + list = false, + numberwidth = 1, + signcolumn = "no", + spell = false, + wrap = true, + }, + }, diff = { enabled = true, provider = "mini_diff", }, + ---Customize how tokens are displayed + ---@param tokens number + ---@param adapter CodeCompanion.Adapter + ---@return string + token_count = function(tokens, adapter) + return " (" .. tokens .. " tokens)" + end, }, opts = { - -- log_level = "DEBUG", - log_level = "TRACE", + log_level = "DEBUG", + -- log_level = "TRACE", }, }, init = function()