From 52feb4120bd54197a715ed302e970d772f1f78c5 Mon Sep 17 00:00:00 2001 From: "Huang, Xinshengzi" Date: Thu, 25 Apr 2024 10:26:06 +0800 Subject: [PATCH 01/13] add codegemma example in GPU/HF-Transformers-AutoModels/ --- .../Model/codagemma/generate.py | 83 +++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/generate.py diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/generate.py b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/generate.py new file mode 100644 index 00000000000..2689e70ccfb --- /dev/null +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/generate.py @@ -0,0 +1,83 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import torch +import time +import argparse + +from ipex_llm.transformers import AutoModelForCausalLM +from transformers import AutoTokenizer + +# The instruction-tuned models use a chat template that must be adhered to for conversational use. +# see https://huggingface.co/google/codegemma-7b-it#chat-template. +chat = [ + { "role": "user", "content": "Write a hello world program" }, +] + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for CodeGemma model') + parser.add_argument('--repo-id-or-model-path', type=str, default="google/codegemma-7b-it", + help='The huggingface repo id for the CodeGemma to be downloaded' + ', or the path to the huggingface checkpoint folder') + parser.add_argument('--prompt', type=str, default="Write a hello world program", + help='Prompt to infer') + parser.add_argument('--n-predict', type=int, default=32, + help='Max tokens to predict') + + args = parser.parse_args() + model_path = args.repo_id_or_model_path + + # Load model in 4 bit, + # which convert the relevant layers in the model into INT4 format + # When running LLMs on Intel iGPUs for Windows users, we recommend setting `cpu_embedding=True` in the from_pretrained function. + # This will allow the memory-intensive embedding layer to utilize the CPU instead of iGPU. + model = AutoModelForCausalLM.from_pretrained(model_path, + load_in_4bit=True, + optimize_model=True, + trust_remote_code=True, + use_cache=True, + modules_to_not_convert=["lm_head"]) + model = model.to('xpu') + + # Load tokenizer + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + + # Generate predicted tokens + with torch.inference_mode(): + chat[0]['content'] = args.prompt + prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) + input_ids = tokenizer.encode(prompt, return_tensors="pt").to('xpu') + # ipex_llm model needs a warmup, then inference time can be accurate + output = model.generate(input_ids, + max_new_tokens=args.n_predict) + + # start inference + st = time.time() + # if your selected model is capable of utilizing previous key/value attentions + # to enhance decoding speed, but has `"use_cache": false` in its model config, + # it is important to set `use_cache=True` explicitly in the `generate` function + # to obtain optimal performance with IPEX-LLM INT4 optimizations + output = model.generate(input_ids, + max_new_tokens=args.n_predict) + torch.xpu.synchronize() + end = time.time() + output = output.cpu() + output_str = tokenizer.decode(output[0], skip_special_tokens=True) + print(f'Inference time: {end-st} s') + print('-'*20, 'Prompt', '-'*20) + print(prompt) + print('-'*20, 'Output', '-'*20) + print(output_str) From 72c0ae7192c08f8cb4d301098bdd946473af2e8c Mon Sep 17 00:00:00 2001 From: "Huang, Xinshengzi" Date: Thu, 25 Apr 2024 11:05:43 +0800 Subject: [PATCH 02/13] add README of codegemma example in GPU/HF-Transformers-AutoModels/ --- .../Model/codagemma/README.md | 142 ++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/README.md diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/README.md new file mode 100644 index 00000000000..bc72717def6 --- /dev/null +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/README.md @@ -0,0 +1,142 @@ +# CodeGemma +In this directory, you will find examples on how you could apply IPEX-LLM INT4 optimizations on Google CodeGemma models on [Intel GPUs](../../../README.md). For illustration purposes, we utilize the [google/codegemma-7b-it ](https://huggingface.co/google/codegemma-7b-it) as reference CodeGemma models. + +## Requirements +To run these examples with IPEX-LLM on Intel GPUs, we have some recommended requirements for your machine, please refer to [here](../../../README.md#requirements) for more information. + +**Important: According to CodeGemma's requirement, please make sure you have installed `transformers==4.38.1` to run the example.** + +## Example: Predict Tokens using `generate()` API +In the example [generate.py](./generate.py), we show a basic use case for a CodeGemma model to predict the next N tokens using `generate()` API, with IPEX-LLM INT4 optimizations on Intel GPUs. +### 1. Install +#### 1.1 Installation on Linux +We suggest using conda to manage environment: +```bash +conda create -n llm python=3.11 +conda activate llm +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default +pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ + +# According to CodeGemma's requirement, please make sure you are using a stable version of Transformers, 4.38.1 or newer. +pip install transformers==4.38.1 +``` + +#### 1.2 Installation on Windows +We suggest using conda to manage environment: +```bash +conda create -n llm python=3.11 libuv +conda activate llm +# below command will use pip to install the Intel oneAPI Base Toolkit 2024.0 +pip install dpcpp-cpp-rt==2024.0.2 mkl-dpcpp==2024.0.0 onednn==2024.0.0 + +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default +pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ + +# According to CodeGemma's requirement, please make sure you are using a stable version of Transformers, 4.38.1 or newer. +pip install transformers==4.38.1 +``` + +### 2. Configures OneAPI environment variables for Linux + +> [!NOTE] +> Skip this step if you are running on Windows. + +This is a required step on Linux for APT or offline installed oneAPI. Skip this step for PIP-installed oneAPI. + +```bash +source /opt/intel/oneapi/setvars.sh +``` + +### 3. Runtime Configurations +For optimal performance, it is recommended to set several environment variables. Please check out the suggestions based on your device. +#### 3.1 Configurations for Linux +
+ +For Intel Arc™ A-Series Graphics and Intel Data Center GPU Flex Series + +```bash +export USE_XETLA=OFF +export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 +export SYCL_CACHE_PERSISTENT=1 +``` + +
+ +
+ +For Intel Data Center GPU Max Series + +```bash +export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so +export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 +export SYCL_CACHE_PERSISTENT=1 +export ENABLE_SDP_FUSION=1 +``` +> Note: Please note that `libtcmalloc.so` can be installed by `conda install -c conda-forge -y gperftools=2.10`. +
+ +
+ +For Intel iGPU + +```bash +export SYCL_CACHE_PERSISTENT=1 +export BIGDL_LLM_XMX_DISABLED=1 +``` + +
+ +#### 3.2 Configurations for Windows +
+ +For Intel iGPU + +```cmd +set SYCL_CACHE_PERSISTENT=1 +set BIGDL_LLM_XMX_DISABLED=1 +``` + +
+ +
+ +For Intel Arc™ A-Series Graphics + +```cmd +set SYCL_CACHE_PERSISTENT=1 +``` + +
+ +> [!NOTE] +> For the first time that each model runs on Intel iGPU/Intel Arc™ A300-Series or Pro A60, it may take several minutes to compile. +### 4. Running examples + +``` +python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT +``` + +Arguments info: +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the CodeGemma model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'google/codegemma-7b-it'`. +- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'Write a hello world program'`. +- `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. + +##### Sample Output +##### [google/codegemma-7b-it](https://huggingface.co/google/codegemma-7b-it) +```log +Inference time: xxxx s +-------------------- Prompt -------------------- +user +Write a hello world program +model + +-------------------- Output -------------------- +user +Write a hello world program +model +```python +print("Hello, world!") +``` + +This program will print the message "Hello, world!" to the console. +``` From a31ef9b3bb35cf0c71ffc74c4de46173a4b63a42 Mon Sep 17 00:00:00 2001 From: "Huang, Xinshengzi" Date: Thu, 25 Apr 2024 13:54:25 +0800 Subject: [PATCH 03/13] add codegemma example in GPU/PyTorch-Models/ --- .../Model/codagemma/generate.py | 81 +++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 python/llm/example/GPU/PyTorch-Models/Model/codagemma/generate.py diff --git a/python/llm/example/GPU/PyTorch-Models/Model/codagemma/generate.py b/python/llm/example/GPU/PyTorch-Models/Model/codagemma/generate.py new file mode 100644 index 00000000000..7c78ab8d4d3 --- /dev/null +++ b/python/llm/example/GPU/PyTorch-Models/Model/codagemma/generate.py @@ -0,0 +1,81 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import torch +import time +import argparse + +from transformers import AutoModelForCausalLM, AutoTokenizer +from ipex_llm import optimize_model + +# The instruction-tuned models use a chat template that must be adhered to for conversational use. +# see https://huggingface.co/google/codegemma-7b-it#chat-template. +chat = [ + { "role": "user", "content": "Write a hello world program" }, +] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for CodeGemma model') + parser.add_argument('--repo-id-or-model-path', type=str, default="google/codegemma-7b-it", + help='The huggingface repo id for the CodeGemma model to be downloaded' + ', or the path to the huggingface checkpoint folder') + parser.add_argument('--prompt', type=str, default="Write a hello world program", + help='Prompt to infer') + parser.add_argument('--n-predict', type=int, default=32, + help='Max tokens to predict') + + args = parser.parse_args() + model_path = args.repo_id_or_model_path + + # Load model + model = AutoModelForCausalLM.from_pretrained(model_path, + trust_remote_code=True, + torch_dtype='auto', + low_cpu_mem_usage=True) + + # With only one line to enable IPEX-LLM optimization on model + # When running LLMs on Intel iGPUs for Windows users, we recommend setting `cpu_embedding=True` in the optimize_model function. + # This will allow the memory-intensive embedding layer to utilize the CPU instead of iGPU. + model = optimize_model(model, modules_to_not_convert=["lm_head"]) + + model = model.to('xpu') + + # Load tokenizer + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + + # Generate predicted tokens + with torch.inference_mode(): + chat[0]['content'] = args.prompt + prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) + input_ids = tokenizer.encode(prompt, return_tensors="pt").to('xpu') + # ipex_llm model needs a warmup, then inference time can be accurate + output = model.generate(input_ids, + max_new_tokens=args.n_predict) + + # start inference + st = time.time() + output = model.generate(input_ids, + max_new_tokens=args.n_predict) + torch.xpu.synchronize() + end = time.time() + output = output.cpu() + output_str = tokenizer.decode(output[0], skip_special_tokens=True) + print(f'Inference time: {end-st} s') + print('-'*20, 'Prompt', '-'*20) + print(prompt) + print('-'*20, 'Output', '-'*20) + print(output_str) From 0e25c1ff3d852c04eb1c4fc617bd0a20e1a4017e Mon Sep 17 00:00:00 2001 From: "Huang, Xinshengzi" Date: Thu, 25 Apr 2024 14:05:47 +0800 Subject: [PATCH 04/13] add readme of codegemma example in GPU/PyTorch-Models/ --- .../PyTorch-Models/Model/codagemma/README.md | 140 ++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 python/llm/example/GPU/PyTorch-Models/Model/codagemma/README.md diff --git a/python/llm/example/GPU/PyTorch-Models/Model/codagemma/README.md b/python/llm/example/GPU/PyTorch-Models/Model/codagemma/README.md new file mode 100644 index 00000000000..b0130c91562 --- /dev/null +++ b/python/llm/example/GPU/PyTorch-Models/Model/codagemma/README.md @@ -0,0 +1,140 @@ +# CodeGemma +In this directory, you will find examples on how you could use IPEX-LLM `optimize_model` API to accelerate CodeGemma models. For illustration purposes, we utilize the [google/codegemma-7b-it](https://huggingface.co/google/codegemma-7b-it) as reference CodeGemma models. + +## Requirements +To run these examples with IPEX-LLM on Intel GPUs, we have some recommended requirements for your machine, please refer to [here](../../../README.md#requirements) for more information. + +**Important: According to CodeGemma's requirement, please make sure you have installed `transformers==4.38.1` to run the example.** + +## Example: Predict Tokens using `generate()` API +In the example [generate.py](./generate.py), we show a basic use case for a CodeGemma model to predict the next N tokens using `generate()` API, with IPEX-LLM INT4 optimizations on Intel GPUs. +### 1. Install +#### 1.1 Installation on Linux +We suggest using conda to manage environment: +```bash +conda create -n llm python=3.11 +conda activate llm +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default +pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ + +# According to CodeGemma's requirement, please make sure you are using a stable version of Transformers, 4.38.1 or newer. +pip install transformers==4.38.1 +``` + +#### 1.2 Installation on Windows +We suggest using conda to manage environment: +```bash +conda create -n llm python=3.11 libuv +conda activate llm +# below command will use pip to install the Intel oneAPI Base Toolkit 2024.0 +pip install dpcpp-cpp-rt==2024.0.2 mkl-dpcpp==2024.0.0 onednn==2024.0.0 + +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default +pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ + +# According to CodeGemma's requirement, please make sure you are using a stable version of Transformers, 4.38.1 or newer. +pip install transformers==4.38.1 +``` + +### 2. Configures OneAPI environment variables for Linux + +> [!NOTE] +> Skip this step if you are running on Windows. + +This is a required step on Linux for APT or offline installed oneAPI. Skip this step for PIP-installed oneAPI. + +```bash +source /opt/intel/oneapi/setvars.sh +``` + +### 3. Runtime Configurations +For optimal performance, it is recommended to set several environment variables. Please check out the suggestions based on your device. +#### 3.1 Configurations for Linux +
+ +For Intel Arc™ A-Series Graphics and Intel Data Center GPU Flex Series + +```bash +export USE_XETLA=OFF +export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 +export SYCL_CACHE_PERSISTENT=1 +``` + +
+ +
+ +For Intel Data Center GPU Max Series + +```bash +export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so +export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 +export SYCL_CACHE_PERSISTENT=1 +export ENABLE_SDP_FUSION=1 +``` +> Note: Please note that `libtcmalloc.so` can be installed by `conda install -c conda-forge -y gperftools=2.10`. +
+ +
+ +For Intel iGPU + +```bash +export SYCL_CACHE_PERSISTENT=1 +export BIGDL_LLM_XMX_DISABLED=1 +``` + +
+ +#### 3.2 Configurations for Windows +
+ +For Intel iGPU + +```cmd +set SYCL_CACHE_PERSISTENT=1 +set BIGDL_LLM_XMX_DISABLED=1 +``` + +
+ +
+ +For Intel Arc™ A-Series Graphics + +```cmd +set SYCL_CACHE_PERSISTENT=1 +``` + +
+ +> [!NOTE] +> For the first time that each model runs on Intel iGPU/Intel Arc™ A300-Series or Pro A60, it may take several minutes to compile. +### 4. Running examples + +```bash +python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT +``` + +In the example, several arguments can be passed to satisfy your requirements: + +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the CodeGemma model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'google/codegemma-7b-it'`. +- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `Write a hello world program'`. +- `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. + +#### 4.1 Sample Output +#### [google/codegemma-7b-it](https://huggingface.co/google/codegemma-7b-it) +```log +Inference time: xxxx s +-------------------- Prompt -------------------- +user +Write a hello world program +model + +-------------------- Output -------------------- +user +Write a hello world program +model +```python +print("Hello, world!") +``` \ No newline at end of file From 88bd85a6b286d18a3b617340b09b131f52672240 Mon Sep 17 00:00:00 2001 From: "Huang, Xinshengzi" Date: Thu, 25 Apr 2024 14:13:33 +0800 Subject: [PATCH 05/13] add codegemma example in CPU/HF-Transformers-AutoModels/ --- .../Model/codegemma/generate.py | 74 +++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/generate.py diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/generate.py b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/generate.py new file mode 100644 index 00000000000..bf40b6a494a --- /dev/null +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/generate.py @@ -0,0 +1,74 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import torch +import time +import argparse + +from ipex_llm.transformers import AutoModelForCausalLM +from transformers import AutoTokenizer + +# The instruction-tuned models use a chat template that must be adhered to for conversational use. +# see https://huggingface.co/google/codegemma-7b-it#chat-template. +chat = [ + { "role": "user", "content": "Write a hello world program" }, +] + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for CodeGemma model') + parser.add_argument('--repo-id-or-model-path', type=str, default="google/codegemma-7b-it", + help='The huggingface repo id for the CodeGemma to be downloaded' + ', or the path to the huggingface checkpoint folder') + parser.add_argument('--prompt', type=str, default="Write a hello world program", + help='Prompt to infer') + parser.add_argument('--n-predict', type=int, default=32, + help='Max tokens to predict') + + args = parser.parse_args() + model_path = args.repo_id_or_model_path + + # Load model in 4 bit, + # which convert the relevant layers in the model into INT4 format + model = AutoModelForCausalLM.from_pretrained(model_path, + load_in_4bit=True, + trust_remote_code=True, + use_cache=True, + modules_to_not_convert=["lm_head"]) + + # Load tokenizer + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + + # Generate predicted tokens + with torch.inference_mode(): + chat[0]['content'] = args.prompt + prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) + input_ids = tokenizer.encode(prompt, return_tensors="pt") + + # start inference + st = time.time() + # if your selected model is capable of utilizing previous key/value attentions + # to enhance decoding speed, but has `"use_cache": false` in its model config, + # it is important to set `use_cache=True` explicitly in the `generate` function + # to obtain optimal performance with IPEX-LLM INT4 optimizations + output = model.generate(input_ids, + max_new_tokens=args.n_predict) + end = time.time() + output_str = tokenizer.decode(output[0], skip_special_tokens=True) + print(f'Inference time: {end-st} s') + print('-'*20, 'Prompt', '-'*20) + print(prompt) + print('-'*20, 'Output', '-'*20) + print(output_str) From eb5070afe6a17860f823f0ab132a76fe522271c1 Mon Sep 17 00:00:00 2001 From: "Huang, Xinshengzi" Date: Thu, 25 Apr 2024 14:53:27 +0800 Subject: [PATCH 06/13] add readme of codegemma example in CPU/HF-Transformers-AutoModels/ --- .../Model/codegemma/README.md | 70 +++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/README.md diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/README.md b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/README.md new file mode 100644 index 00000000000..0d7e0559739 --- /dev/null +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/README.md @@ -0,0 +1,70 @@ +# CodeGemma +In this directory, you will find examples on how you could apply IPEX-LLM INT4 optimizations on CodeGemma models. For illustration purposes, we utilize the [google/codegemma-7b-it](https://huggingface.co/google/codegemma-7b-it) as reference CodeGemma models. + +## 0. Requirements +To run these examples with IPEX-LLM, we have some recommended requirements for your machine, please refer to [here](../README.md#recommended-requirements) for more information. + +## Example: Predict Tokens using `generate()` API +In the example [generate.py](./generate.py), we show a basic use case for a CodeGemma model to predict the next N tokens using `generate()` API, with IPEX-LLM INT4 optimizations. +### 1. Install +We suggest using conda to manage environment: +```bash +conda create -n llm python=3.11 +conda activate llm + +# install ipex-llm with 'all' option +pip install ipex-llm[all] + +# According to CodeGemma's requirement, please make sure you are using a stable version of Transformers, 4.38.1 or newer. +pip install transformers==4.38.1 +``` + +### 2. Run +``` +python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT +``` + +Arguments info: +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the CodeGemma model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'google/codegemma-7b-it'`. +- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'Write a hello world program'`. +- `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. + +> **Note**: When loading the model in 4-bit, IPEX-LLM converts linear layers in the model into INT4 format. In theory, a *X*B model saved in 16-bit will requires approximately 2*X* GB of memory for loading, and ~0.5*X* GB memory for further inference. +> +> Please select the appropriate size of the CodeLlama model based on the capabilities of your machine. + +#### 2.1 Client +On client Windows machine, it is recommended to run directly with full utilization of all cores: +```powershell +python ./generate.py +``` + +#### 2.2 Server +For optimal performance on server, it is recommended to set several environment variables (refer to [here](../README.md#best-known-configuration-on-linux) for more information), and run the example with all the physical cores of a single socket. + +E.g. on Linux, +```bash +# set IPEX-LLM env variables +source ipex-llm-init + +# e.g. for a server with 48 cores per socket +export OMP_NUM_THREADS=48 +numactl -C 0-47 -m 0 python ./generate.py +``` + +#### 2.3 Sample Output +#### [google/codegemma-7b-it](https://huggingface.co/google/codegemma-7b-it) +```log +Inference time: xxxx s +-------------------- Prompt -------------------- +user +Write a hello world program +model + +-------------------- Output -------------------- +user +Write a hello world program +model +```python +print("Hello, world!") +``` \ No newline at end of file From 4932899ea50289cc800818ec35750940bde73a17 Mon Sep 17 00:00:00 2001 From: "Huang, Xinshengzi" Date: Thu, 25 Apr 2024 14:55:35 +0800 Subject: [PATCH 07/13] add codegemma example in CPU/PyTorch-Models/ --- .../Model/codegemma/generate.py | 67 +++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 python/llm/example/CPU/PyTorch-Models/Model/codegemma/generate.py diff --git a/python/llm/example/CPU/PyTorch-Models/Model/codegemma/generate.py b/python/llm/example/CPU/PyTorch-Models/Model/codegemma/generate.py new file mode 100644 index 00000000000..0252edd9b88 --- /dev/null +++ b/python/llm/example/CPU/PyTorch-Models/Model/codegemma/generate.py @@ -0,0 +1,67 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import torch +import time +import argparse + +from transformers import AutoModelForCausalLM, AutoTokenizer +from ipex_llm import optimize_model + +# The instruction-tuned models use a chat template that must be adhered to for conversational use. +# see https://huggingface.co/google/codegemma-7b-it#chat-template. +chat = [ + { "role": "user", "content": "Write a hello world program" }, +] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for CodeGemma model') + parser.add_argument('--repo-id-or-model-path', type=str, default="google/codegemma-7b-it", + help='The huggingface repo id for the CodeGemma model to be downloaded' + ', or the path to the huggingface checkpoint folder') + parser.add_argument('--prompt', type=str, default="Write a hello world program", + help='Prompt to infer') + parser.add_argument('--n-predict', type=int, default=32, + help='Max tokens to predict') + + args = parser.parse_args() + model_path = args.repo_id_or_model_path + + # Load model + model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True) + + # With only one line to enable IPEX-LLM optimization on model + model = optimize_model(model, modules_to_not_convert=["lm_head"]) + + # Load tokenizer + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + + # Generate predicted tokens + with torch.inference_mode(): + chat[0]['content'] = args.prompt + prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) + input_ids = tokenizer.encode(prompt, return_tensors="pt") + st = time.time() + output = model.generate(input_ids, + max_new_tokens=args.n_predict) + end = time.time() + output_str = tokenizer.decode(output[0], skip_special_tokens=True) + print(f'Inference time: {end-st} s') + print('-'*20, 'Prompt', '-'*20) + print(prompt) + print('-'*20, 'Output', '-'*20) + print(output_str) From 3834726e4b69c0d02f41d3d335d2ba73fd9b2a3d Mon Sep 17 00:00:00 2001 From: "Huang, Xinshengzi" Date: Thu, 25 Apr 2024 15:09:48 +0800 Subject: [PATCH 08/13] add readme of codegemma example in CPU/PyTorch-Models/ --- .../PyTorch-Models/Model/codegemma/README.md | 73 +++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 python/llm/example/CPU/PyTorch-Models/Model/codegemma/README.md diff --git a/python/llm/example/CPU/PyTorch-Models/Model/codegemma/README.md b/python/llm/example/CPU/PyTorch-Models/Model/codegemma/README.md new file mode 100644 index 00000000000..68a5146efa6 --- /dev/null +++ b/python/llm/example/CPU/PyTorch-Models/Model/codegemma/README.md @@ -0,0 +1,73 @@ +# CodeGemma +In this directory, you will find examples on how you could use IPEX-LLM `optimize_model` API to accelerate CodeGemma models. For illustration purposes, we utilize the [google/codegemma-7b-it](https://huggingface.co/google/codegemma-7b-it) as reference CodeGemma models. + +## Requirements +To run these examples with IPEX-LLM, we have some recommended requirements for your machine, please refer to [here](../README.md#recommended-requirements) for more information. + +## Example: Predict Tokens using `generate()` API +In the example [generate.py](./generate.py), we show a basic use case for a CodeGemma model to predict the next N tokens using `generate()` API, with IPEX-LLM INT4 optimizations. +### 1. Install +We suggest using conda to manage the Python environment. For more information about conda installation, please refer to [here](https://docs.conda.io/en/latest/miniconda.html#). + +After installing conda, create a Python environment for IPEX-LLM: +```bash +conda create -n llm python=3.11 # recommend to use Python 3.11 +conda activate llm + +# install ipex-llm with 'all' option +pip install --pre --upgrade ipex-llm[all] + +# According to CodeGemma's requirement, please make sure you are using a stable version of Transformers, 4.38.1 or newer. +pip install transformers==4.38.1 +``` + +### 2. Run +After setting up the Python environment, you could run the example by following steps. + +#### 2.1 Client +On client Windows machines, it is recommended to run directly with full utilization of all cores: +```powershell +python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT +``` +More information about arguments can be found in [Arguments Info](#23-arguments-info) section. The expected output can be found in [Sample Output](#24-sample-output) section. + +#### 2.2 Server +For optimal performance on server, it is recommended to set several environment variables (refer to [here](../README.md#best-known-configuration-on-linux) for more information), and run the example with all the physical cores of a single socket. + +E.g. on Linux, +```bash +# set IPEX-LLM env variables +source ipex-llm-init + +# e.g. for a server with 48 cores per socket +export OMP_NUM_THREADS=48 +numactl -C 0-47 -m 0 python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT +``` +More information about arguments can be found in [Arguments Info](#23-arguments-info) section. The expected output can be found in [Sample Output](#24-sample-output) section. + +#### 2.3 Arguments Info +In the example, several arguments can be passed to satisfy your requirements: + +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the CodeGemma model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'google/codegemma-7b-it'`. +- `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'Write a hello world program'`. +- `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. + +#### 2.4 Sample Output +#### [google/codegemma-7b-it](https://huggingface.co/google/codegemma-7b-it) +```log +Inference time: xxxx s +-------------------- Prompt -------------------- +user +Write a hello world program +model + +-------------------- Output -------------------- +user +Write a hello world program +model +```python +print("Hello, world!") +``` + +This program will print the message "Hello, world!" to the console. +``` From 0fea0b176dde2e477172c520ba1ed91b568cc9ed Mon Sep 17 00:00:00 2001 From: "Huang, Xinshengzi" Date: Thu, 25 Apr 2024 15:31:09 +0800 Subject: [PATCH 09/13] fix typos --- .../Model/codegemma/README.md | 11 ++++++++--- .../CPU/PyTorch-Models/Model/codegemma/README.md | 2 +- .../Model/codagemma/README.md | 10 ++++++---- .../GPU/PyTorch-Models/Model/codagemma/README.md | 13 +++++++++---- 4 files changed, 24 insertions(+), 12 deletions(-) diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/README.md b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/README.md index 0d7e0559739..a959d278db6 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/README.md +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/README.md @@ -7,9 +7,11 @@ To run these examples with IPEX-LLM, we have some recommended requirements for y ## Example: Predict Tokens using `generate()` API In the example [generate.py](./generate.py), we show a basic use case for a CodeGemma model to predict the next N tokens using `generate()` API, with IPEX-LLM INT4 optimizations. ### 1. Install -We suggest using conda to manage environment: +We suggest using conda to manage the Python environment. For more information about conda installation, please refer to [here](https://docs.conda.io/en/latest/miniconda.html#). + +After installing conda, create a Python environment for IPEX-LLM: ```bash -conda create -n llm python=3.11 +conda create -n llm python=3.11 # recommend to use Python 3.11 conda activate llm # install ipex-llm with 'all' option @@ -67,4 +69,7 @@ Write a hello world program model ```python print("Hello, world!") -``` \ No newline at end of file +``` + +This program will print the message "Hello, world!" to the console. +``` diff --git a/python/llm/example/CPU/PyTorch-Models/Model/codegemma/README.md b/python/llm/example/CPU/PyTorch-Models/Model/codegemma/README.md index 68a5146efa6..22bdabc5763 100644 --- a/python/llm/example/CPU/PyTorch-Models/Model/codegemma/README.md +++ b/python/llm/example/CPU/PyTorch-Models/Model/codegemma/README.md @@ -1,7 +1,7 @@ # CodeGemma In this directory, you will find examples on how you could use IPEX-LLM `optimize_model` API to accelerate CodeGemma models. For illustration purposes, we utilize the [google/codegemma-7b-it](https://huggingface.co/google/codegemma-7b-it) as reference CodeGemma models. -## Requirements +## 0. Requirements To run these examples with IPEX-LLM, we have some recommended requirements for your machine, please refer to [here](../README.md#recommended-requirements) for more information. ## Example: Predict Tokens using `generate()` API diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/README.md index bc72717def6..606284da360 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/README.md +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/README.md @@ -1,7 +1,7 @@ # CodeGemma -In this directory, you will find examples on how you could apply IPEX-LLM INT4 optimizations on Google CodeGemma models on [Intel GPUs](../../../README.md). For illustration purposes, we utilize the [google/codegemma-7b-it ](https://huggingface.co/google/codegemma-7b-it) as reference CodeGemma models. +In this directory, you will find examples on how you could apply IPEX-LLM INT4 optimizations on CodeGemma models on [Intel GPUs](../../../README.md). For illustration purposes, we utilize the [google/codegemma-7b-it](https://huggingface.co/google/codegemma-7b-it) as reference CodeGemma models. -## Requirements +## 0. Requirements To run these examples with IPEX-LLM on Intel GPUs, we have some recommended requirements for your machine, please refer to [here](../../../README.md#requirements) for more information. **Important: According to CodeGemma's requirement, please make sure you have installed `transformers==4.38.1` to run the example.** @@ -10,9 +10,11 @@ To run these examples with IPEX-LLM on Intel GPUs, we have some recommended requ In the example [generate.py](./generate.py), we show a basic use case for a CodeGemma model to predict the next N tokens using `generate()` API, with IPEX-LLM INT4 optimizations on Intel GPUs. ### 1. Install #### 1.1 Installation on Linux -We suggest using conda to manage environment: +We suggest using conda to manage the Python environment. For more information about conda installation, please refer to [here](https://docs.conda.io/en/latest/miniconda.html#). + +After installing conda, create a Python environment for IPEX-LLM: ```bash -conda create -n llm python=3.11 +conda create -n llm python=3.11 # recommend to use Python 3.11 conda activate llm # below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ diff --git a/python/llm/example/GPU/PyTorch-Models/Model/codagemma/README.md b/python/llm/example/GPU/PyTorch-Models/Model/codagemma/README.md index b0130c91562..fa7363b3bda 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/codagemma/README.md +++ b/python/llm/example/GPU/PyTorch-Models/Model/codagemma/README.md @@ -1,7 +1,7 @@ # CodeGemma In this directory, you will find examples on how you could use IPEX-LLM `optimize_model` API to accelerate CodeGemma models. For illustration purposes, we utilize the [google/codegemma-7b-it](https://huggingface.co/google/codegemma-7b-it) as reference CodeGemma models. -## Requirements +## 0. Requirements To run these examples with IPEX-LLM on Intel GPUs, we have some recommended requirements for your machine, please refer to [here](../../../README.md#requirements) for more information. **Important: According to CodeGemma's requirement, please make sure you have installed `transformers==4.38.1` to run the example.** @@ -10,9 +10,11 @@ To run these examples with IPEX-LLM on Intel GPUs, we have some recommended requ In the example [generate.py](./generate.py), we show a basic use case for a CodeGemma model to predict the next N tokens using `generate()` API, with IPEX-LLM INT4 optimizations on Intel GPUs. ### 1. Install #### 1.1 Installation on Linux -We suggest using conda to manage environment: +We suggest using conda to manage the Python environment. For more information about conda installation, please refer to [here](https://docs.conda.io/en/latest/miniconda.html#). + +After installing conda, create a Python environment for IPEX-LLM: ```bash -conda create -n llm python=3.11 +conda create -n llm python=3.11 # recommend to use Python 3.11 conda activate llm # below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ @@ -137,4 +139,7 @@ Write a hello world program model ```python print("Hello, world!") -``` \ No newline at end of file +``` + +This program will print the message "Hello, world!" to the console. +``` From facb127474ce6fc07d9da2d103711c7aed6ee67b Mon Sep 17 00:00:00 2001 From: "Huang, Xinshengzi" Date: Thu, 25 Apr 2024 16:52:22 +0800 Subject: [PATCH 10/13] fix filename typo --- .../Model/{codagemma => codegemma}/README.md | 0 .../Model/{codagemma => codegemma}/generate.py | 0 .../GPU/PyTorch-Models/Model/{codagemma => codegemma}/README.md | 0 .../GPU/PyTorch-Models/Model/{codagemma => codegemma}/generate.py | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename python/llm/example/GPU/HF-Transformers-AutoModels/Model/{codagemma => codegemma}/README.md (100%) rename python/llm/example/GPU/HF-Transformers-AutoModels/Model/{codagemma => codegemma}/generate.py (100%) rename python/llm/example/GPU/PyTorch-Models/Model/{codagemma => codegemma}/README.md (100%) rename python/llm/example/GPU/PyTorch-Models/Model/{codagemma => codegemma}/generate.py (100%) diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/README.md b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codegemma/README.md similarity index 100% rename from python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/README.md rename to python/llm/example/GPU/HF-Transformers-AutoModels/Model/codegemma/README.md diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/generate.py b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codegemma/generate.py similarity index 100% rename from python/llm/example/GPU/HF-Transformers-AutoModels/Model/codagemma/generate.py rename to python/llm/example/GPU/HF-Transformers-AutoModels/Model/codegemma/generate.py diff --git a/python/llm/example/GPU/PyTorch-Models/Model/codagemma/README.md b/python/llm/example/GPU/PyTorch-Models/Model/codegemma/README.md similarity index 100% rename from python/llm/example/GPU/PyTorch-Models/Model/codagemma/README.md rename to python/llm/example/GPU/PyTorch-Models/Model/codegemma/README.md diff --git a/python/llm/example/GPU/PyTorch-Models/Model/codagemma/generate.py b/python/llm/example/GPU/PyTorch-Models/Model/codegemma/generate.py similarity index 100% rename from python/llm/example/GPU/PyTorch-Models/Model/codagemma/generate.py rename to python/llm/example/GPU/PyTorch-Models/Model/codegemma/generate.py From 7fd96450f7011e081a461de6d9c7111d29e931a6 Mon Sep 17 00:00:00 2001 From: "Huang, Xinshengzi" Date: Tue, 7 May 2024 10:37:53 +0800 Subject: [PATCH 11/13] add codegemma in tables --- README.md | 1 + docs/readthedocs/source/index.rst | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/README.md b/README.md index 61eef9117e0..07bc914fddd 100644 --- a/README.md +++ b/README.md @@ -183,6 +183,7 @@ Over 50 models have been optimized/verified on `ipex-llm`, including *LLaMA/LLaM | DeciLM-7B | [link](python/llm/example/CPU/HF-Transformers-AutoModels/Model/deciLM-7b) | [link](python/llm/example/GPU/HF-Transformers-AutoModels/Model/deciLM-7b) | | Deepseek | [link](python/llm/example/CPU/HF-Transformers-AutoModels/Model/deepseek) | [link](python/llm/example/GPU/HF-Transformers-AutoModels/Model/deepseek) | | StableLM | [link](python/llm/example/CPU/HF-Transformers-AutoModels/Model/stablelm) | [link](python/llm/example/GPU/HF-Transformers-AutoModels/Model/stablelm) | +| CodeGemma | [link](python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma) | [link](python/llm/example/GPU/HF-Transformers-AutoModels/Model/codegemma) | ## Get Support - Please report a bug or raise a feature request by opening a [Github Issue](https://github.com/intel-analytics/ipex-llm/issues) diff --git a/docs/readthedocs/source/index.rst b/docs/readthedocs/source/index.rst index 5a307f3f8b1..2a261ce4d03 100644 --- a/docs/readthedocs/source/index.rst +++ b/docs/readthedocs/source/index.rst @@ -580,6 +580,13 @@ Verified Models link + + CodeGemma + + link + + link + From aecd7945dc04e576b198315c43782dd942c33ad4 Mon Sep 17 00:00:00 2001 From: "Huang, Xinshengzi" Date: Tue, 7 May 2024 10:57:11 +0800 Subject: [PATCH 12/13] add comments of lm_head --- .../CPU/HF-Transformers-AutoModels/Model/codegemma/generate.py | 1 + .../llm/example/CPU/PyTorch-Models/Model/codegemma/generate.py | 1 + .../GPU/HF-Transformers-AutoModels/Model/codegemma/generate.py | 1 + .../llm/example/GPU/PyTorch-Models/Model/codegemma/generate.py | 1 + 4 files changed, 4 insertions(+) diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/generate.py b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/generate.py index bf40b6a494a..1593f83f842 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/generate.py +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/generate.py @@ -42,6 +42,7 @@ # Load model in 4 bit, # which convert the relevant layers in the model into INT4 format + # To fix the issue that the output of codegemma-7b-it is abnormal, skip the 'lm_head' module during optimization model = AutoModelForCausalLM.from_pretrained(model_path, load_in_4bit=True, trust_remote_code=True, diff --git a/python/llm/example/CPU/PyTorch-Models/Model/codegemma/generate.py b/python/llm/example/CPU/PyTorch-Models/Model/codegemma/generate.py index 0252edd9b88..e64d842b87a 100644 --- a/python/llm/example/CPU/PyTorch-Models/Model/codegemma/generate.py +++ b/python/llm/example/CPU/PyTorch-Models/Model/codegemma/generate.py @@ -45,6 +45,7 @@ model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True) # With only one line to enable IPEX-LLM optimization on model + # To fix the issue that the output of codegemma-7b-it is abnormal, skip the 'lm_head' module during optimization model = optimize_model(model, modules_to_not_convert=["lm_head"]) # Load tokenizer diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codegemma/generate.py b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codegemma/generate.py index 2689e70ccfb..76187912dc8 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codegemma/generate.py +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codegemma/generate.py @@ -42,6 +42,7 @@ # Load model in 4 bit, # which convert the relevant layers in the model into INT4 format + # To fix the issue that the output of codegemma-7b-it is abnormal, skip the 'lm_head' module during optimization # When running LLMs on Intel iGPUs for Windows users, we recommend setting `cpu_embedding=True` in the from_pretrained function. # This will allow the memory-intensive embedding layer to utilize the CPU instead of iGPU. model = AutoModelForCausalLM.from_pretrained(model_path, diff --git a/python/llm/example/GPU/PyTorch-Models/Model/codegemma/generate.py b/python/llm/example/GPU/PyTorch-Models/Model/codegemma/generate.py index 7c78ab8d4d3..ce3ec819c52 100644 --- a/python/llm/example/GPU/PyTorch-Models/Model/codegemma/generate.py +++ b/python/llm/example/GPU/PyTorch-Models/Model/codegemma/generate.py @@ -48,6 +48,7 @@ low_cpu_mem_usage=True) # With only one line to enable IPEX-LLM optimization on model + # To fix the issue that the output of codegemma-7b-it is abnormal, skip the 'lm_head' module during optimization # When running LLMs on Intel iGPUs for Windows users, we recommend setting `cpu_embedding=True` in the optimize_model function. # This will allow the memory-intensive embedding layer to utilize the CPU instead of iGPU. model = optimize_model(model, modules_to_not_convert=["lm_head"]) From e3c1b8101c9a22e670d6d46c34535764af257045 Mon Sep 17 00:00:00 2001 From: "Huang, Xinshengzi" Date: Tue, 7 May 2024 11:01:45 +0800 Subject: [PATCH 13/13] remove comments of use_cache --- .../HF-Transformers-AutoModels/Model/codegemma/generate.py | 4 ---- .../HF-Transformers-AutoModels/Model/codegemma/generate.py | 4 ---- 2 files changed, 8 deletions(-) diff --git a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/generate.py b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/generate.py index 1593f83f842..8e370f75214 100644 --- a/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/generate.py +++ b/python/llm/example/CPU/HF-Transformers-AutoModels/Model/codegemma/generate.py @@ -60,10 +60,6 @@ # start inference st = time.time() - # if your selected model is capable of utilizing previous key/value attentions - # to enhance decoding speed, but has `"use_cache": false` in its model config, - # it is important to set `use_cache=True` explicitly in the `generate` function - # to obtain optimal performance with IPEX-LLM INT4 optimizations output = model.generate(input_ids, max_new_tokens=args.n_predict) end = time.time() diff --git a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codegemma/generate.py b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codegemma/generate.py index 76187912dc8..9add373b080 100644 --- a/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codegemma/generate.py +++ b/python/llm/example/GPU/HF-Transformers-AutoModels/Model/codegemma/generate.py @@ -67,10 +67,6 @@ # start inference st = time.time() - # if your selected model is capable of utilizing previous key/value attentions - # to enhance decoding speed, but has `"use_cache": false` in its model config, - # it is important to set `use_cache=True` explicitly in the `generate` function - # to obtain optimal performance with IPEX-LLM INT4 optimizations output = model.generate(input_ids, max_new_tokens=args.n_predict) torch.xpu.synchronize()