- 
                Notifications
    You must be signed in to change notification settings 
- Fork 13.5k
Closed
Labels
Description
Hello I have tried a minimal Emscripten support to Makefile adding
# WASM
EMCXX = em++
EMCC = emcc
EMCXXFLAGS = --bind --std=c++11 -s WASM=1 -s ALLOW_MEMORY_GROWTH=1 -s "EXPORTED_RUNTIME_METHODS=['addOnPostRun','FS']" -s "DISABLE_EXCEPTION_CATCHING=0" -s "EXCEPTION_DEBUG=1" -s "FORCE_FILESYSTEM=1" -s "MODULARIZE=1" -s "EXPORT_ES6=0" -s 'EXPORT_NAME="LLAMAModule"' -s "USE_ES6_IMPORT_META=0" -I./
EMCCFLAGS = --bind -s WASM=1 -s ALLOW_MEMORY_GROWTH=1 -s "EXPORTED_RUNTIME_METHODS=['addOnPostRun','FS']" -s "DISABLE_EXCEPTION_CATCHING=0" -s "EXCEPTION_DEBUG=1" -s "FORCE_FILESYSTEM=1" -s "MODULARIZE=1" -s "EXPORT_ES6=0" -s 'EXPORT_NAME="LLAMAModule"' -s "USE_ES6_IMPORT_META=0" -I./ 
EMOBJS = utils.bc ggml.bc
wasm: llama_wasm.js quantize_wasm.js
wasmdebug: export EMCC_DEBUG=1
wasmdebug: llama_wasm.js quantize_wasm.js
#
# WASM lib
#
ggml.bc: ggml.c ggml.h
	$(EMCC) -c $(EMCCFLAGS) ggml.c -o ggml.bc
utils.bc: utils.cpp utils.h
	$(EMCXX) -c $(EMCXXFLAGS) utils.cpp -o utils.bc
$(info I EMOBJS:      $(EMOBJS))
#
# WASM executable
#
llama_wasm.js: $(EMOBJS) main.cpp Makefile
	$(EMCXX) $(EMCXXFLAGS) $(EMOBJS) -o llama_wasm.js
quantize_wasm.js: $(EMOBJS) quantize.cpp Makefile
	$(EMCXX) $(EMCXXFLAGS) $(EMOBJS) quantize.cpp -o quantize_wasm.js
It complies ok with both em++ and emcc. At this stage the problem is that main.cpp and quantize.cpp does not expose a proper header file, and I cannot call main as a module, or a function export using Emscripten EMSCRIPTEN_KEEPALIVE to main by example.
In fact a simple C++ headers could be compiled as a node module and then called like
/** file:llama.js */
const llamaModularized = require('./llama_wasm.js');
var llamaModule = null
const _initLLAMAModule = async function () {
    llamaModule = await llamaModularized();
    return true
}
let postRunFunc = null;
const addOnPostRun = function (func) {
    postRunFunc = func;
};
_initLLAMAModule().then((res) => {
    if (postRunFunc) {
        postRunFunc();
    }
});
class LLaMa {
    constructor() {
        this.f = new llamaModule.LLaMa();
    }
    // here modules fun impl
}
module.exports = { LLaMa, addOnPostRun };and then executed in node scripts like
/** file:run.js */
(async () => {
    const LLaMa = require('./llama.js');
    const loadWASM = function () {
        var self = this;
        return new Promise(function (resolve, reject) {
            LLaMa.addOnPostRun(() => {
                let model = new LLaMa.LLaMa();
                /** use model functions */
            });
        });
    }//loadWASM
    await loadWASM();
}).call(this);elloza and rtkclouds