@@ -280,22 +280,22 @@ int main(int argc, char ** argv) {
280
280
fprintf (stderr, " %s: prompt: '%s'\n " , __func__, params.prompt .c_str ());
281
281
fprintf (stderr, " %s: number of tokens in prompt = %zu\n " , __func__, embd_inp.size ());
282
282
for (int i = 0 ; i < (int ) embd_inp.size (); i++) {
283
- fprintf (stderr, " %6d -> '%s'\n " , embd_inp[i], llama_token_to_str (ctx, embd_inp[i]).c_str ());
283
+ fprintf (stderr, " %6d -> '%s'\n " , embd_inp[i], llama_token_to_piece (ctx, embd_inp[i]).c_str ());
284
284
}
285
285
286
286
if (ctx_guidance) {
287
287
fprintf (stderr, " \n " );
288
288
fprintf (stderr, " %s: negative prompt: '%s'\n " , __func__, params.cfg_negative_prompt .c_str ());
289
289
fprintf (stderr, " %s: number of tokens in negative prompt = %zu\n " , __func__, guidance_inp.size ());
290
290
for (int i = 0 ; i < (int ) guidance_inp.size (); i++) {
291
- fprintf (stderr, " %6d -> '%s'\n " , guidance_inp[i], llama_token_to_str (ctx, guidance_inp[i]).c_str ());
291
+ fprintf (stderr, " %6d -> '%s'\n " , guidance_inp[i], llama_token_to_piece (ctx, guidance_inp[i]).c_str ());
292
292
}
293
293
}
294
294
295
295
if (params.n_keep > 0 ) {
296
296
fprintf (stderr, " %s: static prompt based on n_keep: '" , __func__);
297
297
for (int i = 0 ; i < params.n_keep ; i++) {
298
- fprintf (stderr, " %s" , llama_token_to_str (ctx, embd_inp[i]).c_str ());
298
+ fprintf (stderr, " %s" , llama_token_to_piece (ctx, embd_inp[i]).c_str ());
299
299
}
300
300
fprintf (stderr, " '\n " );
301
301
}
@@ -451,7 +451,7 @@ int main(int argc, char ** argv) {
451
451
// printf("\n---\n");
452
452
// printf("resetting: '");
453
453
// for (int i = 0; i < (int) embd.size(); i++) {
454
- // printf("%s", llama_token_to_str (ctx, embd[i]));
454
+ // printf("%s", llama_token_to_piece (ctx, embd[i]));
455
455
// }
456
456
// printf("'\n");
457
457
// printf("\n---\n");
@@ -504,7 +504,7 @@ int main(int argc, char ** argv) {
504
504
input_size = embd_guidance.size ();
505
505
// fprintf(stderr, "\n---------------------\n");
506
506
// for (int i = 0; i < (int) embd_guidance.size(); i++) {
507
- // fprintf(stderr, "%s", llama_token_to_str (ctx, embd_guidance[i]));
507
+ // fprintf(stderr, "%s", llama_token_to_piece (ctx, embd_guidance[i]));
508
508
// }
509
509
// fprintf(stderr, "\n---------------------\n");
510
510
} else {
@@ -663,7 +663,7 @@ int main(int argc, char ** argv) {
663
663
// display text
664
664
if (input_echo) {
665
665
for (auto id : embd) {
666
- printf (" %s" , llama_token_to_str (ctx, id).c_str ());
666
+ printf (" %s" , llama_token_to_piece (ctx, id).c_str ());
667
667
}
668
668
fflush (stdout);
669
669
}
@@ -679,7 +679,7 @@ int main(int argc, char ** argv) {
679
679
if (params.antiprompt .size ()) {
680
680
std::string last_output;
681
681
for (auto id : last_n_tokens) {
682
- last_output += llama_token_to_str (ctx, id);
682
+ last_output += llama_token_to_piece (ctx, id);
683
683
}
684
684
685
685
is_antiprompt = false ;
0 commit comments