We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent e9b7a5c commit 87f4102Copy full SHA for 87f4102
llama.cpp
@@ -5433,7 +5433,7 @@ static int llama_decode_internal(
5433
5434
GGML_ASSERT(n_tokens <= n_batch);
5435
5436
- int n_threads = n_tokens < 32 ? cparams.n_threads : cparams.n_threads_batch;
+ int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
5437
GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
5438
5439
const int64_t t_start_us = ggml_time_us();
0 commit comments