From 0f730e400544d7547192ff6ad9f3dcec51320c89 Mon Sep 17 00:00:00 2001 From: ch1y0q Date: Fri, 9 Aug 2024 17:33:28 +0800 Subject: [PATCH 1/4] add yaml and modify `concat_csv.py` for `transformers` 4.43.1 --- .github/workflows/llm_performance_tests.yml | 189 ++++++++++++++++++ .../benchmark/arc-perf-transformers-443.yaml | 17 ++ python/llm/test/benchmark/concat_csv.py | 2 +- .../benchmark/igpu-perf/1024-128_443.yaml | 14 ++ .../igpu-perf/1024-128_int4_fp16_443.yaml | 14 ++ .../1024-128_int4_fp16_loadlowbit_443.yaml | 14 ++ .../igpu-perf/2048-256_int4_fp16_443.yaml | 14 ++ .../igpu-perf/3072-384_int4_fp16_443.yaml | 14 ++ .../igpu-perf/32-32_int4_fp16_443.yaml | 14 ++ .../igpu-perf/4096-512_int4_fp16_443.yaml | 14 ++ 10 files changed, 305 insertions(+), 1 deletion(-) create mode 100644 python/llm/test/benchmark/arc-perf-transformers-443.yaml create mode 100644 python/llm/test/benchmark/igpu-perf/1024-128_443.yaml create mode 100644 python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_443.yaml create mode 100644 python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_443.yaml create mode 100644 python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_443.yaml create mode 100644 python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_443.yaml create mode 100644 python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_443.yaml create mode 100644 python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_443.yaml diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index 9185e4282d5..4fc5e70b650 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -714,6 +714,33 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.43 (32-32 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_443.yaml + + - name: Test on igpu for transformers 4.43 (32-32 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.43.1 + pip install trl + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\32-32_int4_fp16_443.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\32-32_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 if %ERRORLEVEL% neq -1073740791 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + if %ERRORLEVEL% neq 0 (exit /b 1) + + pip uninstall trl + call conda deactivate + - name: Concat csv and generate html (32-32 int4+fp16) shell: cmd run: | @@ -811,6 +838,33 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.43 (1024-128 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_443.yaml + + - name: Test on igpu for transformers 4.43 (1024-128 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.43.1 + pip install trl + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_443.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + if %ERRORLEVEL% neq 0 (exit /b 1) + + pip uninstall trl + call conda deactivate + - name: Concat csv and generate html (1024-128 int4+fp16) shell: cmd run: | @@ -907,6 +961,33 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.43 (2048-256 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_443.yaml + + - name: Test on igpu for transformers 4.43 (2048-256 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.43.1 + pip install trl + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\2048-256_int4_fp16_443.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\2048-256_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + if %ERRORLEVEL% neq 0 (exit /b 1) + + pip uninstall trl + call conda deactivate + - name: Concat csv and generate html (2048-256 int4+fp16) shell: cmd run: | @@ -1003,6 +1084,33 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.43 (3072-384 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_443.yaml + + - name: Test on igpu for transformers 4.43 (3072-384 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.43.1 + pip install trl + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\3072-384_int4_fp16_438.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\3072-384_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + if %ERRORLEVEL% neq 0 (exit /b 1) + + pip uninstall trl + call conda deactivate + - name: Concat csv and generate html (3072-384 int4+fp16) shell: cmd run: | @@ -1099,6 +1207,33 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.43 (4096-512 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_443.yaml + + - name: Test on igpu for transformers 4.43 (4096-512 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.43.1 + pip install trl + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\4096-512_int4_fp16_443.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + if %ERRORLEVEL% neq 0 (exit /b 1) + + pip uninstall trl + call conda deactivate + - name: Concat csv and generate html (4096-512 int4+fp16) shell: cmd run: | @@ -1195,6 +1330,33 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.43 (load_low_bit 1024-128 int4+fp16) + shell: bash + run: | + sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_443.yaml + + - name: Test on igpu for transformers 4.43 (load_low_bit 1024-128 int4+fp16) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.43.1 + pip install trl + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\1024-128_int4_fp16_loadlowbit_443.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + if %ERRORLEVEL% neq 0 (exit /b 1) + + pip uninstall trl + call conda deactivate + - name: Concat csv and generate html (load_low_bit 1024-128 int4+fp16) shell: cmd run: | @@ -1290,6 +1452,33 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for transformers 4.43 (1024-128) + shell: bash + run: | + sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_443.yaml + + - name: Test on igpu for transformers 4.43 (1024-128) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.43.1 + pip install trl + + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\1024-128_443.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2 + if %ERRORLEVEL% neq 0 (exit /b 1) + + pip uninstall trl + call conda deactivate + - name: Concat csv and generate html (1024-128) shell: cmd run: | diff --git a/python/llm/test/benchmark/arc-perf-transformers-443.yaml b/python/llm/test/benchmark/arc-perf-transformers-443.yaml new file mode 100644 index 00000000000..903bd2ea09e --- /dev/null +++ b/python/llm/test/benchmark/arc-perf-transformers-443.yaml @@ -0,0 +1,17 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: '/mnt/disk1/models' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '32-32' + - '1024-128' + - '2048-256' +test_api: + - "transformer_int4_fp16_gpu" # on Intel GPU +cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api) +task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' diff --git a/python/llm/test/benchmark/concat_csv.py b/python/llm/test/benchmark/concat_csv.py index 908f71f1aed..442eab7cc9a 100644 --- a/python/llm/test/benchmark/concat_csv.py +++ b/python/llm/test/benchmark/concat_csv.py @@ -36,7 +36,7 @@ def main(): merged_df = pd.concat([pd.read_csv(file, index_col=0) for file in csv_files], ignore_index=True) merged_df.reset_index(drop=True, inplace=True) - merged_csv = csv_files[0].replace("_test1", "").replace("_test2", "").replace("_test3", "") + merged_csv = csv_files[0].replace("_test1", "").replace("_test2", "").replace("_test3", "").replace("_test4", "") merged_df.to_csv(merged_csv) if __name__ == "__main__": diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_443.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_443.yaml new file mode 100644 index 00000000000..4667ff34c3a --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/1024-128_443.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '1024-128' +test_api: + - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_443.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_443.yaml new file mode 100644 index 00000000000..2f4bbd2270d --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_443.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '1024-128' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows, use fp16 for non-linear layer +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_443.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_443.yaml new file mode 100644 index 00000000000..8d8e16c5c42 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_443.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '1024-128' +test_api: + - "transformer_int4_fp16_loadlowbit_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_443.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_443.yaml new file mode 100644 index 00000000000..3f8e554d19d --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_443.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '2048-256' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_443.yaml b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_443.yaml new file mode 100644 index 00000000000..ac1a4d6511a --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_443.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '3072-384' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_443.yaml b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_443.yaml new file mode 100644 index 00000000000..a02b19b1cf2 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_443.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: 'path to your local model hub' +warm_up: 3 +num_trials: 5 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '32-32' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_443.yaml b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_443.yaml new file mode 100644 index 00000000000..53467812ac7 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_443.yaml @@ -0,0 +1,14 @@ +repo_id: + - 'google/gemma-2-2b-it' + - 'google/gemma-2-9b-it' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '4096-512' +test_api: + - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) From 7bda1acea034ecbc5a697d6efcaf546bf8b5f200 Mon Sep 17 00:00:00 2001 From: ch1y0q Date: Tue, 13 Aug 2024 11:11:27 +0800 Subject: [PATCH 2/4] remove 4.43 for arc; fix; --- .github/workflows/llm_performance_tests.yml | 26 +++++++++---------- .../benchmark/arc-perf-transformers-443.yaml | 17 ------------ 2 files changed, 13 insertions(+), 30 deletions(-) delete mode 100644 python/llm/test/benchmark/arc-perf-transformers-443.yaml diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index 4fc5e70b650..afaf0680576 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -735,7 +735,7 @@ jobs: set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\32-32_int4_fp16\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 if %ERRORLEVEL% neq -1073740791 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 if %ERRORLEVEL% neq 0 (exit /b 1) pip uninstall trl @@ -764,7 +764,7 @@ jobs: shell: bash run: | sed -i 's/32-32/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml - name: Test on igpu (1024-128 int4+fp16) @@ -859,7 +859,7 @@ jobs: set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 if %ERRORLEVEL% neq 0 (exit /b 1) pip uninstall trl @@ -887,7 +887,7 @@ jobs: shell: bash run: | sed -i 's/1024-128/2048-256/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml - name: Test on igpu (2048-256 int4+fp16) @@ -982,7 +982,7 @@ jobs: set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\2048-256_int4_fp16\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 if %ERRORLEVEL% neq 0 (exit /b 1) pip uninstall trl @@ -1010,7 +1010,7 @@ jobs: shell: bash run: | sed -i 's/2048-256/3072-384/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml - name: Test on igpu (3072-384 int4+fp16) @@ -1105,7 +1105,7 @@ jobs: set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\3072-384_int4_fp16\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 if %ERRORLEVEL% neq 0 (exit /b 1) pip uninstall trl @@ -1133,7 +1133,7 @@ jobs: shell: bash run: | sed -i 's/3072-384/4096-512/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml - name: Test on igpu (4096-512 int4+fp16) @@ -1228,7 +1228,7 @@ jobs: set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 if %ERRORLEVEL% neq 0 (exit /b 1) pip uninstall trl @@ -1256,7 +1256,7 @@ jobs: shell: bash run: | sed -i 's/4096-512/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml - name: Test on igpu (load_low_bit 1024-128 int4+fp16) @@ -1351,7 +1351,7 @@ jobs: set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\1024-128_int4_fp16_loadlowbit\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test3 + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 if %ERRORLEVEL% neq 0 (exit /b 1) pip uninstall trl @@ -1378,7 +1378,7 @@ jobs: - name: Prepare igpu perf test (1024-128) shell: bash run: | - sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128.yaml - name: Test on igpu (1024-128) @@ -1473,7 +1473,7 @@ jobs: set PYTHONIOENCODING=utf-8 python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1 if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test2 + python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 if %ERRORLEVEL% neq 0 (exit /b 1) pip uninstall trl diff --git a/python/llm/test/benchmark/arc-perf-transformers-443.yaml b/python/llm/test/benchmark/arc-perf-transformers-443.yaml deleted file mode 100644 index 903bd2ea09e..00000000000 --- a/python/llm/test/benchmark/arc-perf-transformers-443.yaml +++ /dev/null @@ -1,17 +0,0 @@ -repo_id: - - 'google/gemma-2-2b-it' - - 'google/gemma-2-9b-it' -local_model_hub: '/mnt/disk1/models' -warm_up: 1 -num_trials: 3 -num_beams: 1 # default to greedy search -low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) -batch_size: 1 # default to 1 -in_out_pairs: - - '32-32' - - '1024-128' - - '2048-256' -test_api: - - "transformer_int4_fp16_gpu" # on Intel GPU -cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api) -task: 'continuation' # task can be 'continuation', 'QA' and 'summarize' From ad39b84c15f9a86768906410774144026b572433 Mon Sep 17 00:00:00 2001 From: ch1y0q Date: Tue, 13 Aug 2024 11:16:02 +0800 Subject: [PATCH 3/4] remove 4096-512 for 4.43 --- .github/workflows/llm_performance_tests.yml | 57 +++++++++++---------- 1 file changed, 30 insertions(+), 27 deletions(-) diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index afaf0680576..2f6de2d5afe 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -1207,32 +1207,35 @@ jobs: call conda deactivate - - name: Prepare igpu perf test for transformers 4.43 (4096-512 int4+fp16) - shell: bash - run: | - sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_443.yaml - - - name: Test on igpu for transformers 4.43 (4096-512 int4+fp16) - shell: cmd - run: | - call conda activate igpu-perf - pip install transformers==4.43.1 - pip install trl - - set SYCL_CACHE_PERSISTENT=1 - set BIGDL_LLM_XMX_DISABLED=1 - - cd python\llm\dev\benchmark\all-in-one - move ..\..\..\test\benchmark\igpu-perf\4096-512_int4_fp16_443.yaml config.yaml - set PYTHONIOENCODING=utf-8 - python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 - if %ERRORLEVEL% neq 0 (exit /b 1) - python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 - if %ERRORLEVEL% neq 0 (exit /b 1) - - pip uninstall trl - call conda deactivate + # NOTE: Gemma2 not working for 4096-512. + # When it works, uncomment this section and remember to change "'s/{today}_test3/{today}_test1/g'" in next section. + + #- name: Prepare igpu perf test for transformers 4.43 (4096-512 int4+fp16) + # shell: bash + # run: | + # sed -i 's/{today}_test3/{today}_test4/g' python/llm/dev/benchmark/all-in-one/run.py + # sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_443.yaml + + #- name: Test on igpu for transformers 4.43 (4096-512 int4+fp16) + # shell: cmd + # run: | + # call conda activate igpu-perf + # pip install transformers==4.43.1 + # pip install trl + # + # set SYCL_CACHE_PERSISTENT=1 + # set BIGDL_LLM_XMX_DISABLED=1 + # + # cd python\llm\dev\benchmark\all-in-one + # move ..\..\..\test\benchmark\igpu-perf\4096-512_int4_fp16_443.yaml config.yaml + # set PYTHONIOENCODING=utf-8 + # python run.py >> %CSV_SAVE_PATH%\4096-512_int4_fp16\log\%LOG_FILE% 2>&1 + # if %ERRORLEVEL% neq 0 (exit /b 1) + # python ..\..\..\test\benchmark\igpu-perf\check_csv_results.py --yaml-file config.yaml --suffix test4 + # if %ERRORLEVEL% neq 0 (exit /b 1) + # + # pip uninstall trl + # call conda deactivate - name: Concat csv and generate html (4096-512 int4+fp16) shell: cmd @@ -1256,7 +1259,7 @@ jobs: shell: bash run: | sed -i 's/4096-512/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test4/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml - name: Test on igpu (load_low_bit 1024-128 int4+fp16) From ef8e4459235a625c1c359b97e691b925ac03cd43 Mon Sep 17 00:00:00 2001 From: ch1y0q Date: Tue, 13 Aug 2024 11:18:55 +0800 Subject: [PATCH 4/4] comment some models --- .../test/benchmark/igpu-perf/1024-128.yaml | 26 +++++++++---------- .../benchmark/igpu-perf/1024-128_437.yaml | 8 +++--- .../igpu-perf/1024-128_int4_fp16.yaml | 24 ++++++++--------- .../igpu-perf/1024-128_int4_fp16_437.yaml | 8 +++--- .../1024-128_int4_fp16_loadlowbit.yaml | 24 ++++++++--------- .../1024-128_int4_fp16_loadlowbit_437.yaml | 8 +++--- .../igpu-perf/2048-256_int4_fp16.yaml | 24 ++++++++--------- .../igpu-perf/2048-256_int4_fp16_437.yaml | 8 +++--- .../igpu-perf/3072-384_int4_fp16.yaml | 22 ++++++++-------- .../igpu-perf/3072-384_int4_fp16_437.yaml | 8 +++--- .../benchmark/igpu-perf/32-32_int4_fp16.yaml | 24 ++++++++--------- .../igpu-perf/32-32_int4_fp16_437.yaml | 8 +++--- .../igpu-perf/4096-512_int4_fp16.yaml | 20 +++++++------- .../igpu-perf/4096-512_int4_fp16_437.yaml | 8 +++--- 14 files changed, 110 insertions(+), 110 deletions(-) diff --git a/python/llm/test/benchmark/igpu-perf/1024-128.yaml b/python/llm/test/benchmark/igpu-perf/1024-128.yaml index b0bd5f30c20..98fab56cbc4 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128.yaml @@ -1,17 +1,17 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - 'RWKV/v5-Eagle-7B-HF' - - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - - 'openbmb/MiniCPM-1B-sft-bf16' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- 'RWKV/v5-Eagle-7B-HF' + #- '01-ai/Yi-6B-Chat' + #- 'Qwen/Qwen-VL-Chat' + #- 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml index f191801c7dc..5658115b79b 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml @@ -1,8 +1,8 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' + #- 'Qwen/Qwen1.5-7B-Chat' + #- 'Qwen/Qwen2-7B-Instruct' + #- 'microsoft/Phi-3-mini-4k-instruct' + #- 'microsoft/Phi-3-mini-128k-instruct' - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml index 39d575680ab..60b3bffc61c 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml @@ -1,16 +1,16 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - - 'openbmb/MiniCPM-1B-sft-bf16' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- '01-ai/Yi-6B-Chat' + #- 'Qwen/Qwen-VL-Chat' + #- 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml index f9db9131ca3..dc10439d78f 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml @@ -1,8 +1,8 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' + #- 'Qwen/Qwen1.5-7B-Chat' + #- 'Qwen/Qwen2-7B-Instruct' + #- 'microsoft/Phi-3-mini-4k-instruct' + #- 'microsoft/Phi-3-mini-128k-instruct' - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml index 2730e465d47..dd5304c6695 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit.yaml @@ -1,16 +1,16 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - - 'openbmb/MiniCPM-1B-sft-bf16' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- '01-ai/Yi-6B-Chat' + #- 'Qwen/Qwen-VL-Chat' + #- 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml index abd17aaa1e2..111a2be8f94 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_loadlowbit_437.yaml @@ -1,8 +1,8 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' + #- 'Qwen/Qwen1.5-7B-Chat' + #- 'Qwen/Qwen2-7B-Instruct' + #- 'microsoft/Phi-3-mini-4k-instruct' + #- 'microsoft/Phi-3-mini-128k-instruct' - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml index c53e6283919..850912f7a5f 100644 --- a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16.yaml @@ -1,16 +1,16 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - - 'openbmb/MiniCPM-1B-sft-bf16' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- '01-ai/Yi-6B-Chat' + #- 'Qwen/Qwen-VL-Chat' + #- 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml index fd4fbbfaec1..74c36281e79 100644 --- a/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/2048-256_int4_fp16_437.yaml @@ -1,8 +1,8 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' + #- 'Qwen/Qwen1.5-7B-Chat' + #- 'Qwen/Qwen2-7B-Instruct' + #- 'microsoft/Phi-3-mini-4k-instruct' + #- 'microsoft/Phi-3-mini-128k-instruct' - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml index 47b9839a789..14adf06075b 100644 --- a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16.yaml @@ -1,15 +1,15 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - - 'openbmb/MiniCPM-1B-sft-bf16' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- '01-ai/Yi-6B-Chat' + #- 'Qwen/Qwen-VL-Chat' + #- 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml index cfd7cc31afa..813cf2f32a6 100644 --- a/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/3072-384_int4_fp16_437.yaml @@ -1,8 +1,8 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' + #- 'Qwen/Qwen1.5-7B-Chat' + #- 'Qwen/Qwen2-7B-Instruct' + #- 'microsoft/Phi-3-mini-4k-instruct' + #- 'microsoft/Phi-3-mini-128k-instruct' - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml index 39115e0231b..23d46b79201 100644 --- a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16.yaml @@ -1,16 +1,16 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'baichuan-inc/Baichuan2-13B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-6B-Chat' - - 'Qwen/Qwen-VL-Chat' - - 'openbmb/MiniCPM-1B-sft-bf16' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'baichuan-inc/Baichuan2-13B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- '01-ai/Yi-6B-Chat' + #- 'Qwen/Qwen-VL-Chat' + #- 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' local_model_hub: 'path to your local model hub' warm_up: 3 diff --git a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml index 93fdc926e5f..cb6de9024b2 100644 --- a/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-32_int4_fp16_437.yaml @@ -1,8 +1,8 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' + #- 'Qwen/Qwen1.5-7B-Chat' + #- 'Qwen/Qwen2-7B-Instruct' + #- 'microsoft/Phi-3-mini-4k-instruct' + #- 'microsoft/Phi-3-mini-128k-instruct' - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 3 diff --git a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml index 26e128a564c..04e22f983c0 100644 --- a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16.yaml @@ -1,14 +1,14 @@ repo_id: - - 'THUDM/chatglm3-6b' - - 'THUDM/glm-4-9b-chat' - - 'baichuan-inc/Baichuan2-7B-Chat' - - 'meta-llama/Llama-2-7b-chat-hf' - - 'meta-llama/Llama-2-13b-chat-hf' - - 'meta-llama/Meta-Llama-3-8B-Instruct' - - 'mistralai/Mistral-7B-Instruct-v0.2' - - 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' - - '01-ai/Yi-6B-Chat' - - 'openbmb/MiniCPM-1B-sft-bf16' + #- 'THUDM/chatglm3-6b' + #- 'THUDM/glm-4-9b-chat' + #- 'baichuan-inc/Baichuan2-7B-Chat' + #- 'meta-llama/Llama-2-7b-chat-hf' + #- 'meta-llama/Llama-2-13b-chat-hf' + #- 'meta-llama/Meta-Llama-3-8B-Instruct' + #- 'mistralai/Mistral-7B-Instruct-v0.2' + #- 'deepseek-ai/deepseek-coder-7b-instruct-v1.5' + #- '01-ai/Yi-6B-Chat' + #- 'openbmb/MiniCPM-1B-sft-bf16' - 'openbmb/MiniCPM-2B-sft-bf16' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml index 7c2632d3d96..8941efeccd3 100644 --- a/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/4096-512_int4_fp16_437.yaml @@ -1,8 +1,8 @@ repo_id: - - 'Qwen/Qwen1.5-7B-Chat' - - 'Qwen/Qwen2-7B-Instruct' - - 'microsoft/Phi-3-mini-4k-instruct' - - 'microsoft/Phi-3-mini-128k-instruct' + #- 'Qwen/Qwen1.5-7B-Chat' + #- 'Qwen/Qwen2-7B-Instruct' + #- 'microsoft/Phi-3-mini-4k-instruct' + #- 'microsoft/Phi-3-mini-128k-instruct' - 'microsoft/phi-3-vision-128k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1