Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 42 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,7 @@ What is 1 + 11
### Models

You can list available models with `:CopilotChatModels` command. Model determines the AI model used for the chat.
You can set the model in the prompt by using `$` followed by the model name.
Default models are:

- `gpt-4o` - This is the default Copilot Chat model. It is a versatile, multimodal model that excels in both text and image processing and is designed to provide fast, reliable responses. It also has superior performance in non-English languages. Gpt-4o is hosted on Azure.
Expand All @@ -176,11 +177,14 @@ You can install more agents from [here](https://github.com/marketplace?type=apps

Contexts are used to determine the context of the chat.
You can set the context in the prompt by using `#` followed by the context name.
Supported contexts are:
If context supports input, you can set the input in the prompt by using `:` followed by the input (or pressing `complete` key after `:`).
Default contexts are:

- `buffer` - Includes only the current buffer in chat context. Supports input.
- `buffers` - Includes all open buffers in chat context
- `buffer` - Includes only the current buffer in chat context
- `files` - Includes all non-hidden filenames in the current workspace in chat context
- `file` - Includes content of provided file in chat context. Supports input.
- `files` - Includes all non-hidden filenames in the current workspace in chat context. Supports input.
- `git` - Includes current git diff in chat context. Supports input.

### API

Expand Down Expand Up @@ -261,17 +265,16 @@ Also see [here](/lua/CopilotChat/config.lua):
proxy = nil, -- [protocol://]host[:port] Use this proxy
allow_insecure = false, -- Allow insecure server connections

system_prompt = prompts.COPILOT_INSTRUCTIONS, -- System prompt to use
model = 'gpt-4o', -- Default model to use, see ':CopilotChatModels' for available models
system_prompt = prompts.COPILOT_INSTRUCTIONS, -- System prompt to use (can be specified manually in prompt via /).
model = 'gpt-4o', -- Default model to use, see ':CopilotChatModels' for available models (can be specified manually in prompt via $).
agent = 'copilot', -- Default agent to use, see ':CopilotChatAgents' for available agents (can be specified manually in prompt via @).
context = nil, -- Default context to use, 'buffers', 'buffer', 'files' or none (can be specified manually in prompt via #).
context = nil, -- Default context to use (can be specified manually in prompt via #).
temperature = 0.1, -- GPT result temperature

question_header = '## User ', -- Header to use for user questions
answer_header = '## Copilot ', -- Header to use for AI answers
error_header = '## Error ', -- Header to use for errors
separator = '───', -- Separator to use in chat
highlight_headers = true, -- Highlight headers in chat, disable if using markdown renderers (like render-markdown.nvim)

show_folds = true, -- Shows folds for sections in chat
show_help = true, -- Shows help message as virtual lines when waiting for user input
Expand All @@ -280,6 +283,7 @@ Also see [here](/lua/CopilotChat/config.lua):
insert_at_end = false, -- Move cursor to end of buffer when inserting text
clear_chat_on_new_prompt = false, -- Clears chat on every new prompt
highlight_selection = true, -- Highlight selection in the source buffer when in the chat window
highlight_headers = true, -- Highlight headers in chat, disable if using markdown renderers (like render-markdown.nvim)

history_path = vim.fn.stdpath('data') .. '/copilotchat_history', -- Default path to stored history
callback = nil, -- Callback to use when ask response is received
Expand All @@ -289,16 +293,43 @@ Also see [here](/lua/CopilotChat/config.lua):
return select.visual(source) or select.buffer(source)
end,

-- default contexts
contexts = {
buffer = {
-- see config.lua for implementation
input = function(callback) end,
resolve = function(input, source) end,
},
buffers = {
-- see config.lua for implementation
resolve = function(input, source) end,
},
file = {
-- see config.lua for implementation
input = function(callback) end,
resolve = function(input, source) end,
},
files = {
-- see config.lua for implementation
input = function(callback) end,
resolve = function(input, source) end,
},
git = {
-- see config.lua for implementation
input = function(callback) end,
resolve = function(input, source) end,
},
},

-- default prompts
prompts = {
Explain = {
prompt = '> /COPILOT_EXPLAIN\n\nWrite an explanation for the selected code and diagnostics as paragraphs of text.',
},
Review = {
-- see config.lua for implementation
prompt = '> /COPILOT_REVIEW\n\nReview the selected code.',
callback = function(response, source)
-- see config.lua for implementation
end,
callback = function(response, source) end,
},
Fix = {
prompt = '> /COPILOT_GENERATE\n\nThere is a problem in this code. Rewrite the code to show it with the bug fixed.',
Expand All @@ -313,8 +344,7 @@ Also see [here](/lua/CopilotChat/config.lua):
prompt = '> /COPILOT_GENERATE\n\nPlease generate tests for my code.',
},
Commit = {
prompt = 'Write commit message for the change with commitizen convention. Make sure the title has maximum 50 characters and message is wrapped at 72 characters. Wrap the whole message in code block with language gitcommit.',
selection = select.gitdiff,
prompt = '> #git:staged\n\nWrite commit message for the change with commitizen convention. Make sure the title has maximum 50 characters and message is wrapped at 72 characters. Wrap the whole message in code block with language gitcommit.',
},
},

Expand Down
89 changes: 83 additions & 6 deletions lua/CopilotChat/config.lua
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
local prompts = require('CopilotChat.prompts')
local context = require('CopilotChat.context')
local select = require('CopilotChat.select')

--- @class CopilotChat.config.source
Expand All @@ -23,6 +24,11 @@ local select = require('CopilotChat.select')
---@field end_row number?
---@field end_col number?

---@class CopilotChat.config.context
---@field description string?
---@field input fun(callback: fun(input: string?))?
---@field resolve fun(input: string?, source: CopilotChat.config.source):table<CopilotChat.copilot.embed>

---@class CopilotChat.config.prompt
---@field prompt string?
---@field description string?
Expand Down Expand Up @@ -83,9 +89,11 @@ local select = require('CopilotChat.select')
---@field auto_insert_mode boolean?
---@field clear_chat_on_new_prompt boolean?
---@field highlight_selection boolean?
---@field highlight_headers boolean?
---@field history_path string?
---@field callback fun(response: string, source: CopilotChat.config.source)?
---@field selection nil|fun(source: CopilotChat.config.source):CopilotChat.config.selection?
---@field contexts table<string, CopilotChat.config.context>?
---@field prompts table<string, CopilotChat.config.prompt|string>?
---@field window CopilotChat.config.window?
---@field mappings CopilotChat.config.mappings?
Expand All @@ -95,17 +103,16 @@ return {
proxy = nil, -- [protocol://]host[:port] Use this proxy
allow_insecure = false, -- Allow insecure server connections

system_prompt = prompts.COPILOT_INSTRUCTIONS, -- System prompt to use
model = 'gpt-4o', -- Default model to use, see ':CopilotChatModels' for available models
system_prompt = prompts.COPILOT_INSTRUCTIONS, -- System prompt to use (can be specified manually in prompt via /).
model = 'gpt-4o', -- Default model to use, see ':CopilotChatModels' for available models (can be specified manually in prompt via $).
agent = 'copilot', -- Default agent to use, see ':CopilotChatAgents' for available agents (can be specified manually in prompt via @).
context = nil, -- Default context to use, 'buffers', 'buffer', 'files' or none (can be specified manually in prompt via #).
context = nil, -- Default context to use (can be specified manually in prompt via #).
temperature = 0.1, -- GPT result temperature

question_header = '## User ', -- Header to use for user questions
answer_header = '## Copilot ', -- Header to use for AI answers
error_header = '## Error ', -- Header to use for errors
separator = '───', -- Separator to use in chat
highlight_headers = true, -- Highlight headers in chat, disable if using markdown renderers (like render-markdown.nvim)

show_folds = true, -- Shows folds for sections in chat
show_help = true, -- Shows help message as virtual lines when waiting for user input
Expand All @@ -114,6 +121,7 @@ return {
insert_at_end = false, -- Move cursor to end of buffer when inserting text
clear_chat_on_new_prompt = false, -- Clears chat on every new prompt
highlight_selection = true, -- Highlight selection
highlight_headers = true, -- Highlight headers in chat, disable if using markdown renderers (like render-markdown.nvim)

history_path = vim.fn.stdpath('data') .. '/copilotchat_history', -- Default path to stored history
callback = nil, -- Callback to use when ask response is received
Expand All @@ -123,6 +131,76 @@ return {
return select.visual(source) or select.buffer(source)
end,

-- default contexts
contexts = {
buffer = {
description = 'Includes only the current buffer in chat context. Supports input.',
input = function(callback)
vim.ui.select(vim.api.nvim_list_bufs(), {
prompt = 'Select a buffer> ',
}, callback)
end,
resolve = function(input, source)
return {
context.outline(input and tonumber(input) or source.bufnr),
}
end,
},
buffers = {
description = 'Includes all open buffers in chat context.',
resolve = function()
return vim.tbl_map(
context.outline,
vim.tbl_filter(function(b)
return vim.api.nvim_buf_is_loaded(b) and vim.fn.buflisted(b) == 1
end, vim.api.nvim_list_bufs())
)
end,
},
file = {
description = 'Includes content of provided file in chat context. Supports input.',
input = function(callback)
local files = vim.tbl_filter(function(file)
return vim.fn.isdirectory(file) == 0
end, vim.fn.glob('**/*', false, true))

vim.ui.select(files, {
prompt = 'Select a file> ',
}, callback)
end,
resolve = function(input)
return {
context.file(input),
}
end,
},
files = {
description = 'Includes all non-hidden filenames in the current workspace in chat context. Supports input.',
input = function(callback)
vim.ui.input({
prompt = 'Enter a file pattern> ',
default = '**/*',
}, callback)
end,
resolve = function(input)
return context.files(input)
end,
},
git = {
description = 'Includes current git diff in chat context. Supports input.',
input = function(callback)
vim.ui.select({ 'unstaged', 'staged' }, {
prompt = 'Select diff type> ',
}, callback)
end,
resolve = function(input, source)
return {
context.gitdiff(input, source.bufnr),
}
end,
},
},

-- default prompts
prompts = {
Explain = {
Expand Down Expand Up @@ -183,8 +261,7 @@ return {
prompt = '> /COPILOT_GENERATE\n\nPlease generate tests for my code.',
},
Commit = {
prompt = 'Write commit message for the change with commitizen convention. Make sure the title has maximum 50 characters and message is wrapped at 72 characters. Wrap the whole message in code block with language gitcommit.',
selection = select.gitdiff,
prompt = '> #git:staged\n\nWrite commit message for the change with commitizen convention. Make sure the title has maximum 50 characters and message is wrapped at 72 characters. Wrap the whole message in code block with language gitcommit.',
},
},

Expand Down
Loading