diff --git a/docs/mddocs/Overview/install_gpu.md b/docs/mddocs/Overview/install_gpu.md index 639c49fa9a8..8a9931694e8 100644 --- a/docs/mddocs/Overview/install_gpu.md +++ b/docs/mddocs/Overview/install_gpu.md @@ -46,7 +46,7 @@ We recommend using [Miniforge](https://conda-forge.org/download/) to create a py The easiest ways to install `ipex-llm` is the following commands. -- For **Intel Core™ Ultra Series 2 (a.k.a. Lunar Lake) with Intel Arc™ Graphics**: +- For **Intel Core™ Ultra Series 2 with Intel Arc™ Graphics (a.k.a. Lunar Lake)**: Choose either US or CN website for `extra-index-url`: @@ -94,7 +94,7 @@ The easiest ways to install `ipex-llm` is the following commands. If you encounter network issues when installing IPEX, you can also install IPEX-LLM dependencies for Intel XPU from source archives. First you need to download and install torch/torchvision/ipex from wheels listed below before installing `ipex-llm`. -- For **Intel Core™ Ultra Series 2 (a.k.a. Lunar Lake) with Intel Arc™ Graphics**: +- For **Intel Core™ Ultra Series 2 with Intel Arc™ Graphics (a.k.a. Lunar Lake)**: Download the wheels on Windows system: diff --git a/docs/mddocs/Quickstart/install_windows_gpu.md b/docs/mddocs/Quickstart/install_windows_gpu.md index 0d90d99958c..f18d3578953 100644 --- a/docs/mddocs/Quickstart/install_windows_gpu.md +++ b/docs/mddocs/Quickstart/install_windows_gpu.md @@ -47,7 +47,7 @@ conda activate llm With the `llm` environment active, use `pip` to install `ipex-llm` for GPU: -- For **Intel Core™ Ultra Series 2 (a.k.a. Lunar Lake) with Intel Arc™ Graphics**: +- For **Intel Core™ Ultra Series 2 with Intel Arc™ Graphics (a.k.a. Lunar Lake)**: Choose either US or CN website for `extra-index-url`: diff --git a/python/llm/setup.py b/python/llm/setup.py index 5556f3d0974..878d070b21f 100644 --- a/python/llm/setup.py +++ b/python/llm/setup.py @@ -275,11 +275,9 @@ def setup_package(): oneapi_2024_0_requires = ["dpcpp-cpp-rt==2024.0.2;platform_system=='Windows'", - "mkl-dpcpp==2024.0.0;platform_system=='Windows'", - "onednn==2024.0.0;platform_system=='Windows'"] + "mkl-dpcpp==2024.0.0;platform_system=='Windows'"] oneapi_2024_2_requires = ["dpcpp-cpp-rt==2024.2.1;platform_system=='Windows'", - "mkl-dpcpp==2024.2.1;platform_system=='Windows'", - "onednn==2024.2.1;platform_system=='Windows'"] + "mkl-dpcpp==2024.2.1;platform_system=='Windows'"] # Linux install with --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ xpu_21_requires = copy.deepcopy(all_requires) for exclude_require in cpu_torch_version: @@ -303,7 +301,8 @@ def setup_package(): "intel-extension-for-pytorch==2.3.110+xpu", "bigdl-core-xe-23==" + CORE_XE_VERSION, "bigdl-core-xe-batch-23==" + CORE_XE_VERSION, - "bigdl-core-xe-addons-23==" + CORE_XE_VERSION] + "bigdl-core-xe-addons-23==" + CORE_XE_VERSION, + "onednn-devel==2024.1.1;platform_system=='Windows'"] cpp_requires = ["bigdl-core-cpp==" + CORE_XE_VERSION, "onednn-devel==2024.2.1;platform_system=='Windows'"] @@ -344,6 +343,7 @@ def setup_package(): "npu": npu_requires, "xpu-2-1": xpu_21_requires, "xpu-lnl": xpu_lnl_requires, + "xpu-arl": xpu_lnl_requires, "serving": serving_requires, "cpp": cpp_requires, "llama-index": llama_index_requires}, # for internal usage when upstreaming for llama-index