Skip to content

talk-llama no longer builds #1256

@przemoc

Description

@przemoc

It got broken in commit 59a3d0c.

cc  -I.              -O3 -DNDEBUG -std=c11   -fPIC -pthread -msse3 -mssse3   -c ggml.c -o ggml.o
g++ -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC -pthread -msse3 -mssse3 -c whisper.cpp -o whisper.o
g++ -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC -pthread -msse3 -mssse3 examples/talk-llama/talk-llama.cpp examples/talk-llama/llama.cpp examples/common.cpp examples/common-ggml.cpp examples/common-sdl.cpp ggml.o whisper.o -o talk-lla
ma `sdl2-config --cflags --libs`
examples/talk-llama/llama.cpp: In function 'bool llama_eval_internal(llama_context&, const llama_token*, int, int, int)':
examples/talk-llama/llama.cpp:1207:8: error: 'struct ggml_cgraph' has no member named 'n_threads'
 1207 |     gf.n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
      |        ^~~~~~~~~
examples/talk-llama/llama.cpp:1224:32: error: too few arguments to function 'ggml_tensor* ggml_rms_norm(ggml_context*, ggml_tensor*, float)'
 1224 |             cur = ggml_rms_norm(ctx0, inpL);
      |                   ~~~~~~~~~~~~~^~~~~~~~~~~~
In file included from examples/talk-llama/llama.cpp:12:
./ggml.h:933:35: note: declared here
  933 |     GGML_API struct ggml_tensor * ggml_rms_norm(
      |                                   ^~~~~~~~~~~~~
examples/talk-llama/llama.cpp:1332:36: error: too few arguments to function 'ggml_tensor* ggml_rms_norm(ggml_context*, ggml_tensor*, float)'
 1332 |                 cur = ggml_rms_norm(ctx0, inpFF);
      |                       ~~~~~~~~~~~~~^~~~~~~~~~~~~
./ggml.h:933:35: note: declared here
  933 |     GGML_API struct ggml_tensor * ggml_rms_norm(
      |                                   ^~~~~~~~~~~~~
examples/talk-llama/llama.cpp:1370:29: error: too few arguments to function 'ggml_tensor* ggml_rms_norm(ggml_context*, ggml_tensor*, float)'
 1370 |         inpL = ggml_rms_norm(ctx0, inpL);
      |                ~~~~~~~~~~~~~^~~~~~~~~~~~
./ggml.h:933:35: note: declared here
  933 |     GGML_API struct ggml_tensor * ggml_rms_norm(
      |                                   ^~~~~~~~~~~~~
examples/talk-llama/llama.cpp:1388:31: error: cannot convert 'ggml_context*' to 'ggml_cgraph*'
 1388 |     ggml_graph_compute       (ctx0, &gf);
      |                               ^~~~
      |                               |
      |                               ggml_context*
./ggml.h:1632:72: note:   initializing argument 1 of 'int ggml_graph_compute(ggml_cgraph*, ggml_cplan*)'
 1632 |     GGML_API               int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
      |                                                   ~~~~~~~~~~~~~~~~~~~~~^~~~~~
./ggml.h:287:12: note: class type 'ggml_context' is incomplete
  287 |     struct ggml_context;
      |            ^~~~~~~~~~~~
examples/talk-llama/llama.cpp: In function 'int llama_apply_lora_from_file_internal(llama_context*, const char*, const char*, int)':
examples/talk-llama/llama.cpp:2491:16: error: 'struct ggml_cgraph' has no member named 'n_threads'
 2491 |             gf.n_threads = n_threads;
      |                ^~~~~~~~~
examples/talk-llama/llama.cpp:2492:32: error: cannot convert 'ggml_context*' to 'ggml_cgraph*'
 2492 |             ggml_graph_compute(lora_ctx, &gf);
      |                                ^~~~~~~~
      |                                |
      |                                ggml_context*
./ggml.h:1632:72: note:   initializing argument 1 of 'int ggml_graph_compute(ggml_cgraph*, ggml_cplan*)'
 1632 |     GGML_API               int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
      |                                                   ~~~~~~~~~~~~~~~~~~~~~^~~~~~
./ggml.h:287:12: note: class type 'ggml_context' is incomplete
  287 |     struct ggml_context;
      |            ^~~~~~~~~~~~
examples/talk-llama/llama.cpp: In function 'size_t llama_copy_state_data(llama_context*, uint8_t*)':
examples/talk-llama/llama.cpp:2638:16: error: 'struct ggml_cgraph' has no member named 'n_threads'
 2638 |             gf.n_threads = 1;
      |                ^~~~~~~~~
examples/talk-llama/llama.cpp:2658:32: error: cannot convert 'ggml_context*' to 'ggml_cgraph*'
 2658 |             ggml_graph_compute(cpy_ctx, &gf);
      |                                ^~~~~~~
      |                                |
      |                                ggml_context*
./ggml.h:1632:72: note:   initializing argument 1 of 'int ggml_graph_compute(ggml_cgraph*, ggml_cplan*)'
 1632 |     GGML_API               int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
      |                                                   ~~~~~~~~~~~~~~~~~~~~~^~~~~~
./ggml.h:287:12: note: class type 'ggml_context' is incomplete
  287 |     struct ggml_context;
      |            ^~~~~~~~~~~~
examples/talk-llama/llama.cpp: In function 'size_t llama_set_state_data(llama_context*, uint8_t*)':
examples/talk-llama/llama.cpp:2746:16: error: 'struct ggml_cgraph' has no member named 'n_threads'
 2746 |             gf.n_threads = 1;
      |                ^~~~~~~~~
examples/talk-llama/llama.cpp:2766:32: error: cannot convert 'ggml_context*' to 'ggml_cgraph*'
 2766 |             ggml_graph_compute(cpy_ctx, &gf);
      |                                ^~~~~~~
      |                                |
      |                                ggml_context*
./ggml.h:1632:72: note:   initializing argument 1 of 'int ggml_graph_compute(ggml_cgraph*, ggml_cplan*)'
 1632 |     GGML_API               int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
      |                                                   ~~~~~~~~~~~~~~~~~~~~~^~~~~~
./ggml.h:287:12: note: class type 'ggml_context' is incomplete
  287 |     struct ggml_context;
      |            ^~~~~~~~~~~~

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions