From 58441519e6c0a93839a011ed61ead28c42e0c637 Mon Sep 17 00:00:00 2001 From: rnwang04 Date: Thu, 25 Apr 2024 18:24:15 +0800 Subject: [PATCH 1/7] initial commit --- python/llm/src/ipex_llm/ggml/quantize.py | 4 +++ .../llm/src/ipex_llm/transformers/convert.py | 21 +++++------ python/llm/src/ipex_llm/transformers/model.py | 36 +++++++++++-------- python/llm/src/ipex_llm/transformers/utils.py | 21 +++++++++-- 4 files changed, 54 insertions(+), 28 deletions(-) diff --git a/python/llm/src/ipex_llm/ggml/quantize.py b/python/llm/src/ipex_llm/ggml/quantize.py index bdaeccaf673..64f7eba597d 100644 --- a/python/llm/src/ipex_llm/ggml/quantize.py +++ b/python/llm/src/ipex_llm/ggml/quantize.py @@ -49,6 +49,10 @@ "q4_k": 27, "fp6": 29} +# mixed precison from llama.cpp +gguf_mixed_qtype = {"gguf_q4k_s": 101, + "gguf_q4k_m": 102} + _llama_quantize_type = {"q4_0": 2, "q4_1": 3, "q5_0": 8, diff --git a/python/llm/src/ipex_llm/transformers/convert.py b/python/llm/src/ipex_llm/transformers/convert.py index d1ce9f43931..921e82dc5a8 100644 --- a/python/llm/src/ipex_llm/transformers/convert.py +++ b/python/llm/src/ipex_llm/transformers/convert.py @@ -42,7 +42,7 @@ import warnings import transformers import importlib.util -from ipex_llm.ggml.quantize import ggml_tensor_qtype +from ipex_llm.ggml.quantize import ggml_tensor_qtype, gguf_mixed_qtype from .utils import logger, get_cur_qtype_and_imatrix from typing import Union import numpy as np @@ -337,15 +337,6 @@ def _replace_with_low_bit_linear(model, qtype, modules_to_not_convert=None, if in_features % 64 != 0: # now our kernel requires in_features is a multiple of 64 continue - new_linear = LowBitLinear( - in_features, - out_features, - qtype, - module.bias is not None, - mp_group=mp_group, - enable_xetla=enable_xetla, - optimize_lm_head=optimize_lm_head - ) cur_qtype, cur_imatrix = get_cur_qtype_and_imatrix(qtype, full_module_name, imatrix_data, @@ -355,6 +346,16 @@ def _replace_with_low_bit_linear(model, qtype, modules_to_not_convert=None, if cur_qtype in [ggml_tensor_qtype["sym_int4"], ggml_tensor_qtype["asym_int4"]]: cur_qtype = ggml_tensor_qtype["sym_int8"] + + new_linear = LowBitLinear( + in_features, + out_features, + cur_qtype, + module.bias is not None, + mp_group=mp_group, + enable_xetla=enable_xetla, + optimize_lm_head=optimize_lm_head + ) device = module.weight.data.device # Copy the weights paramsLowBit = FP4Params(data=module.weight.data, diff --git a/python/llm/src/ipex_llm/transformers/model.py b/python/llm/src/ipex_llm/transformers/model.py index e3c5448cd7f..0bcbb3be6a6 100644 --- a/python/llm/src/ipex_llm/transformers/model.py +++ b/python/llm/src/ipex_llm/transformers/model.py @@ -42,7 +42,7 @@ from .utils import extract_local_archive_file, \ load_state_dict, \ get_local_shard_files, load_imatrix_data -from ipex_llm.ggml.quantize import ggml_tensor_qtype +from ipex_llm.ggml.quantize import ggml_tensor_qtype, gguf_mixed_qtype from ipex_llm.utils.common import invalidInputError from ipex_llm.transformers.gguf.api import load_gguf_model import torch @@ -117,12 +117,11 @@ def from_pretrained(cls, Default to be ``False``. :param load_in_low_bit: str value, options are ``'sym_int4'``, ``'asym_int4'``, ``'sym_int5'``, ``'asym_int5'``, ``'sym_int8'``, ``'nf3'``, - ``'nf4'``, ``'fp4'``, ``'fp6'`` ``'fp8'``, ``'fp8_e4m3'``, - ``'fp8_e5m2'``, ``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``, - ``'gguf_iq1_s'``, ``'fp16'``, ``'bf16'``, ``'q4_k'`` or - ``'q6_k'``, ``'sym_int4'`` means symmetric int 4, - ``'asym_int4'`` means asymmetric int 4, - ``'nf4'`` means 4-bit NormalFloat, etc. + ``'nf4'``, ``'fp4'``, ``'fp8'``, ``'fp8_e4m3'``, ``'fp8_e5m2'``, + ``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``, gguf_iq1_s'``, + ``'fp16'``, ``'bf16'``, ``'gguf_q4k_m'``, + ``'sym_int4'`` means symmetric int 4, ``'asym_int4'`` means + asymmetric int 4, ``'nf4'`` means 4-bit NormalFloat, etc. Relevant low bit optimizations will be applied to the model. :param optimize_model: boolean value, Whether to further optimize the low_bit llm model. Default to be ``True``. @@ -139,8 +138,9 @@ def from_pretrained(cls, added to llama.cpp. :param model_hub: str value, options are ``'huggingface'`` and ``'modelscope'``, specify the model hub. Default to be ``'huggingface'``. - :param embedding_qtype: str value, options are ``'q2_k'`` now. Default to be None. - Relevant low bit optimizations will be applied to nn.Embedding layer. + :param embedding_qtype: str value, options are ``'q2_k'``, ``'q4_k'`` now. + Default to be None. Relevant low bit optimizations will be applied to + ``nn.Embedding`` layer. :param mixed_precision: boolean value, Whether to use mixed precision quantization. Default to be False. If set to True, we will use sym_int8 for lm_head when load_in_low_bit is sym_int4 or asym_int4. @@ -322,9 +322,11 @@ def from_pretrained(cls, "imatrix is needed.") cpu_embedding = kwargs.get("cpu_embedding", False) # for 2bit, default use embedding_quantization - if q_k in ["gguf_iq2_xxs", "gguf_iq2_xs", "gguf_iq1_s", "q2_k"] and \ - not cpu_embedding and embedding_qtype is None: - embedding_qtype = "q2_k" + if not cpu_embedding and embedding_qtype is None: + if q_k in ["gguf_iq2_xxs", "gguf_iq2_xs", "gguf_iq1_s", "q2_k"]: + embedding_qtype = "q2_k" + elif q_k in ["gguf_q4k_s", "gguf_q4k_m"]: + embedding_qtype = "q4_k" if imatrix_file is not None: imatrix_data = load_imatrix_data(imatrix_file) kwargs["imatrix_data"] = imatrix_data @@ -376,12 +378,16 @@ def from_gguf(fpath: str, optimize_model: bool = True, @classmethod def load_convert(cls, q_k, optimize_model, *args, **kwargs): from .convert import ggml_convert_low_bit - invalidInputError(q_k in ggml_tensor_qtype, + invalidInputError(q_k in ggml_tensor_qtype or q_k in gguf_mixed_qtype, f"Unknown load_in_low_bit value: {q_k}, expected:" f" sym_int4, asym_int4, sym_int5, asym_int5, sym_int8, nf3, nf4, " f"fp4, fp6, fp8, fp8_e4m3, fp8_e5m2, fp16, bf16, gguf_iq2_xxs, " - f"gguf_iq2_xs, gguf_iq1_s, q2_k, q4_k, q6_k, mixed_fp4 or mixed_fp8.") - qtype = ggml_tensor_qtype[q_k] + f"gguf_iq2_xs, gguf_iq1_s, q2_k, q4_k, q6_k, gguf_q4k_m, " + f"mixed_fp4 or mixed_fp8.") + if q_k in ggml_tensor_qtype: + qtype = ggml_tensor_qtype[q_k] + else: + qtype = gguf_mixed_qtype[q_k] # In case it needs a second try, # `from_pretrained`` may pop items out in dict diff --git a/python/llm/src/ipex_llm/transformers/utils.py b/python/llm/src/ipex_llm/transformers/utils.py index 49894a2bad4..5e891ed3089 100644 --- a/python/llm/src/ipex_llm/transformers/utils.py +++ b/python/llm/src/ipex_llm/transformers/utils.py @@ -41,7 +41,7 @@ # SOFTWARE. import os from transformers.modeling_utils import _add_variant -from ipex_llm.ggml.quantize import ggml_tensor_qtype +from ipex_llm.ggml.quantize import ggml_tensor_qtype, gguf_mixed_qtype from ..utils.common import invalidInputError from typing import Union, Optional import torch @@ -269,12 +269,15 @@ def module_name_process(full_module_name): return new_module_name, layer, cur_module + def get_cur_qtype_and_imatrix(qtype, full_module_name, imatrix_data, model_config=None): cur_qtype = qtype + cur_imatrix = None if model_config is not None: model_type = getattr(model_config, "model_type", None) else: model_dtype = None + if qtype in [ggml_tensor_qtype["gguf_iq2_xxs"], ggml_tensor_qtype["gguf_iq2_xs"], ggml_tensor_qtype["gguf_iq1_s"]]: # For quantization which needs importance matrix @@ -306,7 +309,6 @@ def get_cur_qtype_and_imatrix(qtype, full_module_name, imatrix_data, model_confi cur_imatrix = None if new_module_name == 'lm_head': cur_qtype = ggml_tensor_qtype['sym_int8'] - return cur_qtype, cur_imatrix elif qtype == ggml_tensor_qtype["q2_k"]: new_module_name, layer, cur_module = module_name_process(full_module_name) if cur_module == 'v' or (cur_module == 'down' and int(layer) in [0, 1, 10, 11]): @@ -319,8 +321,21 @@ def get_cur_qtype_and_imatrix(qtype, full_module_name, imatrix_data, model_confi cur_imatrix = None if new_module_name == 'lm_head': cur_qtype = ggml_tensor_qtype['sym_int8'] + elif qtype > 100: + # gguf mixed precision + new_module_name, layer, cur_module = module_name_process(full_module_name) + num_hidden_layers = getattr(model_config, "num_hidden_layers", None) + if qtype in [gguf_mixed_qtype["gguf_q4k_s"], gguf_mixed_qtype["gguf_q4k_m"]] and \ + new_module_name == 'lm_head': + cur_qtype = ggml_tensor_qtype['q6_k'] + elif qtype == gguf_mixed_qtype["gguf_q4k_m"]: + if int(layer) < int(num_hidden_layers/2) and cur_module in ['v', 'down']: + cur_qtype = ggml_tensor_qtype['q6_k'] + else: + cur_qtype = ggml_tensor_qtype['q4_k'] else: - return qtype, None + pass + return cur_qtype, cur_imatrix def get_modelscope_hf_config(model_id_or_path: str, From 0f2b02419429699bd4afaf2650a7ed887d08c9a3 Mon Sep 17 00:00:00 2001 From: rnwang04 Date: Thu, 25 Apr 2024 18:51:16 +0800 Subject: [PATCH 2/7] UPDATE --- python/llm/src/ipex_llm/transformers/convert.py | 11 ++++++++--- python/llm/src/ipex_llm/transformers/model.py | 11 +++++++++-- python/llm/src/ipex_llm/transformers/utils.py | 1 - 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/python/llm/src/ipex_llm/transformers/convert.py b/python/llm/src/ipex_llm/transformers/convert.py index 921e82dc5a8..b8b18e3f567 100644 --- a/python/llm/src/ipex_llm/transformers/convert.py +++ b/python/llm/src/ipex_llm/transformers/convert.py @@ -767,9 +767,14 @@ def ggml_convert_low_bit(model, qtype, optimize_model=True, embedding_qtype=None, enable_xetla=False, mixed_precision=False): - logger.info(f"Converting the current model to " - f"{list(ggml_tensor_qtype.keys())[list(ggml_tensor_qtype.values()).index(qtype)]} " - f"format......") + if qtype in ggml_tensor_qtype.values(): + logger.info(f"Converting the current model to " + f"{list(ggml_tensor_qtype.keys())[list(ggml_tensor_qtype.values()).index(qtype)]} " + f"format......") + else: + logger.info(f"Converting the current model to " + f"{list(gguf_mixed_qtype.keys())[list(gguf_mixed_qtype.values()).index(qtype)]} " + f"format......") modules_to_not_convert = [] if modules_to_not_convert is None else modules_to_not_convert # using ipex_llm optimizer before changing to bigdl linear diff --git a/python/llm/src/ipex_llm/transformers/model.py b/python/llm/src/ipex_llm/transformers/model.py index 0bcbb3be6a6..787f5fe54a3 100644 --- a/python/llm/src/ipex_llm/transformers/model.py +++ b/python/llm/src/ipex_llm/transformers/model.py @@ -556,17 +556,24 @@ def load_low_bit(cls, " with load_in_4bit or load_in_low_bit to get a low-bit model , and " " serialize the model using save_low_bit first.") - invalidInputError(bigdl_transformers_low_bit in ggml_tensor_qtype, + invalidInputError(bigdl_transformers_low_bit in ggml_tensor_qtype or \ + bigdl_transformers_low_bit in gguf_mixed_qtype, f"Unknown bigdl_transformers_low_bit value: {bigdl_transformers_low_bit}," f" expected: sym_int4, asym_int4, sym_int5, asym_int5 or sym_int8.") # set default optimize_model=True optimize_model = kwargs.pop("optimize_model", True) - qtype = ggml_tensor_qtype[bigdl_transformers_low_bit] + if bigdl_transformers_low_bit in ggml_tensor_qtype: + qtype = ggml_tensor_qtype[bigdl_transformers_low_bit] + else: + qtype = gguf_mixed_qtype[bigdl_transformers_low_bit] if bigdl_transformers_low_bit in ["gguf_iq2_xxs", "gguf_iq2_xs", "gguf_iq1_s", "q2_k"] and \ not cpu_embedding: embedding_qtype = "q2_k" + elif bigdl_transformers_low_bit in ["gguf_q4k_s", "gguf_q4k_m"] and \ + not cpu_embedding: + embedding_qtype = "q4_k" if embedding_qtype is not None: embedding_qtype = ggml_tensor_qtype[embedding_qtype] diff --git a/python/llm/src/ipex_llm/transformers/utils.py b/python/llm/src/ipex_llm/transformers/utils.py index 5e891ed3089..22a8c8d30c9 100644 --- a/python/llm/src/ipex_llm/transformers/utils.py +++ b/python/llm/src/ipex_llm/transformers/utils.py @@ -269,7 +269,6 @@ def module_name_process(full_module_name): return new_module_name, layer, cur_module - def get_cur_qtype_and_imatrix(qtype, full_module_name, imatrix_data, model_config=None): cur_qtype = qtype cur_imatrix = None From e8ba5c095d1dea78bfdae15bf55ef307a12050e3 Mon Sep 17 00:00:00 2001 From: rnwang04 Date: Thu, 25 Apr 2024 18:55:08 +0800 Subject: [PATCH 3/7] fix style --- python/llm/src/ipex_llm/transformers/model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/llm/src/ipex_llm/transformers/model.py b/python/llm/src/ipex_llm/transformers/model.py index 787f5fe54a3..1490cb33879 100644 --- a/python/llm/src/ipex_llm/transformers/model.py +++ b/python/llm/src/ipex_llm/transformers/model.py @@ -321,7 +321,7 @@ def from_pretrained(cls, "For gguf_iq2 and gguf_iq1 quantization," "imatrix is needed.") cpu_embedding = kwargs.get("cpu_embedding", False) - # for 2bit, default use embedding_quantization + # for iq2/k-quants, default use embedding_quantization if not cpu_embedding and embedding_qtype is None: if q_k in ["gguf_iq2_xxs", "gguf_iq2_xs", "gguf_iq1_s", "q2_k"]: embedding_qtype = "q2_k" @@ -556,7 +556,7 @@ def load_low_bit(cls, " with load_in_4bit or load_in_low_bit to get a low-bit model , and " " serialize the model using save_low_bit first.") - invalidInputError(bigdl_transformers_low_bit in ggml_tensor_qtype or \ + invalidInputError(bigdl_transformers_low_bit in ggml_tensor_qtype or bigdl_transformers_low_bit in gguf_mixed_qtype, f"Unknown bigdl_transformers_low_bit value: {bigdl_transformers_low_bit}," f" expected: sym_int4, asym_int4, sym_int5, asym_int5 or sym_int8.") From 97152624d98626927aa2c3fc69ca4c04235f333f Mon Sep 17 00:00:00 2001 From: rnwang04 Date: Thu, 25 Apr 2024 19:21:44 +0800 Subject: [PATCH 4/7] fix style --- python/llm/src/ipex_llm/transformers/convert.py | 6 ++++-- python/llm/src/ipex_llm/transformers/model.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/python/llm/src/ipex_llm/transformers/convert.py b/python/llm/src/ipex_llm/transformers/convert.py index b8b18e3f567..639a2154d75 100644 --- a/python/llm/src/ipex_llm/transformers/convert.py +++ b/python/llm/src/ipex_llm/transformers/convert.py @@ -768,12 +768,14 @@ def ggml_convert_low_bit(model, qtype, optimize_model=True, enable_xetla=False, mixed_precision=False): if qtype in ggml_tensor_qtype.values(): + index = list(ggml_tensor_qtype.values()).index(qtype) logger.info(f"Converting the current model to " - f"{list(ggml_tensor_qtype.keys())[list(ggml_tensor_qtype.values()).index(qtype)]} " + f"{list(ggml_tensor_qtype.keys())[index]} " f"format......") else: + index = list(gguf_mixed_qtype.values()).index(qtype) logger.info(f"Converting the current model to " - f"{list(gguf_mixed_qtype.keys())[list(gguf_mixed_qtype.values()).index(qtype)]} " + f"{list(gguf_mixed_qtype.keys())[index]} " f"format......") modules_to_not_convert = [] if modules_to_not_convert is None else modules_to_not_convert diff --git a/python/llm/src/ipex_llm/transformers/model.py b/python/llm/src/ipex_llm/transformers/model.py index 1490cb33879..8b624d2a09a 100644 --- a/python/llm/src/ipex_llm/transformers/model.py +++ b/python/llm/src/ipex_llm/transformers/model.py @@ -323,7 +323,7 @@ def from_pretrained(cls, cpu_embedding = kwargs.get("cpu_embedding", False) # for iq2/k-quants, default use embedding_quantization if not cpu_embedding and embedding_qtype is None: - if q_k in ["gguf_iq2_xxs", "gguf_iq2_xs", "gguf_iq1_s", "q2_k"]: + if q_k in ["gguf_iq2_xxs", "gguf_iq2_xs", "gguf_iq1_s", "q2_k"]: embedding_qtype = "q2_k" elif q_k in ["gguf_q4k_s", "gguf_q4k_m"]: embedding_qtype = "q4_k" From 9d47916b3f42e1ab31ca6891f45015945a5cbd1b Mon Sep 17 00:00:00 2001 From: rnwang04 Date: Fri, 26 Apr 2024 11:31:18 +0800 Subject: [PATCH 5/7] add gguf_q4k_s --- python/llm/src/ipex_llm/ggml/quantize.py | 1 + python/llm/src/ipex_llm/transformers/low_bit_linear.py | 3 ++- python/llm/src/ipex_llm/transformers/model.py | 4 ++-- python/llm/src/ipex_llm/transformers/utils.py | 5 +++++ 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/python/llm/src/ipex_llm/ggml/quantize.py b/python/llm/src/ipex_llm/ggml/quantize.py index 64f7eba597d..3eaf668e58e 100644 --- a/python/llm/src/ipex_llm/ggml/quantize.py +++ b/python/llm/src/ipex_llm/ggml/quantize.py @@ -47,6 +47,7 @@ "gguf_iq1_m": 25, "q6_k": 26, "q4_k": 27, + "q5_k": 28, "fp6": 29} # mixed precison from llama.cpp diff --git a/python/llm/src/ipex_llm/transformers/low_bit_linear.py b/python/llm/src/ipex_llm/transformers/low_bit_linear.py index af8eb04dcdf..f129093fd60 100644 --- a/python/llm/src/ipex_llm/transformers/low_bit_linear.py +++ b/python/llm/src/ipex_llm/transformers/low_bit_linear.py @@ -79,6 +79,7 @@ IQ1_S = ggml_tensor_qtype["gguf_iq1_s"] Q4_K = ggml_tensor_qtype["q4_k"] Q6_K = ggml_tensor_qtype["q6_k"] +Q5_K = ggml_tensor_qtype["q5_k"] # For sym_int4 @@ -219,7 +220,7 @@ def ggml_convert_qtype(tensor: torch.Tensor, qtype: int, if not convert_shape_only and device != 'meta': dst = ctypes.c_void_p(dst_tensor.data.data_ptr()) hist = (ctypes.c_int64 * 16)() - if qtype not in [IQ2_XXS, IQ2_XS, Q2_K, IQ1_S, Q4_K, Q6_K]: + if qtype not in [IQ2_XXS, IQ2_XS, Q2_K, IQ1_S, Q4_K, Q6_K, Q5_K]: ggml.ggml_quantize_tensor(src, dst, qtype, n, k, hist) else: if imatrix is not None: diff --git a/python/llm/src/ipex_llm/transformers/model.py b/python/llm/src/ipex_llm/transformers/model.py index 8b624d2a09a..03577641cda 100644 --- a/python/llm/src/ipex_llm/transformers/model.py +++ b/python/llm/src/ipex_llm/transformers/model.py @@ -118,8 +118,8 @@ def from_pretrained(cls, :param load_in_low_bit: str value, options are ``'sym_int4'``, ``'asym_int4'``, ``'sym_int5'``, ``'asym_int5'``, ``'sym_int8'``, ``'nf3'``, ``'nf4'``, ``'fp4'``, ``'fp8'``, ``'fp8_e4m3'``, ``'fp8_e5m2'``, - ``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``, gguf_iq1_s'``, - ``'fp16'``, ``'bf16'``, ``'gguf_q4k_m'``, + ``'fp6'``, ``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``, + ``'gguf_iq1_s'``, ``'gguf_q4k_m'``, ``'fp16'``, ``'bf16'``, ``'sym_int4'`` means symmetric int 4, ``'asym_int4'`` means asymmetric int 4, ``'nf4'`` means 4-bit NormalFloat, etc. Relevant low bit optimizations will be applied to the model. diff --git a/python/llm/src/ipex_llm/transformers/utils.py b/python/llm/src/ipex_llm/transformers/utils.py index 22a8c8d30c9..74e10244042 100644 --- a/python/llm/src/ipex_llm/transformers/utils.py +++ b/python/llm/src/ipex_llm/transformers/utils.py @@ -332,6 +332,11 @@ def get_cur_qtype_and_imatrix(qtype, full_module_name, imatrix_data, model_confi cur_qtype = ggml_tensor_qtype['q6_k'] else: cur_qtype = ggml_tensor_qtype['q4_k'] + elif qtype == gguf_mixed_qtype["gguf_q4k_s"]: + if int(layer) < int(num_hidden_layers/8) and cur_module in ['v', 'down']: + cur_qtype = ggml_tensor_qtype['q5_k'] + else: + cur_qtype = ggml_tensor_qtype['q4_k'] else: pass return cur_qtype, cur_imatrix From 40af3d7f50a1276ce19b66a764e778a456568f85 Mon Sep 17 00:00:00 2001 From: rnwang04 Date: Fri, 26 Apr 2024 11:37:16 +0800 Subject: [PATCH 6/7] update comment --- python/llm/src/ipex_llm/transformers/model.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/python/llm/src/ipex_llm/transformers/model.py b/python/llm/src/ipex_llm/transformers/model.py index 03577641cda..6847ee8cddc 100644 --- a/python/llm/src/ipex_llm/transformers/model.py +++ b/python/llm/src/ipex_llm/transformers/model.py @@ -118,8 +118,9 @@ def from_pretrained(cls, :param load_in_low_bit: str value, options are ``'sym_int4'``, ``'asym_int4'``, ``'sym_int5'``, ``'asym_int5'``, ``'sym_int8'``, ``'nf3'``, ``'nf4'``, ``'fp4'``, ``'fp8'``, ``'fp8_e4m3'``, ``'fp8_e5m2'``, - ``'fp6'``, ``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``, - ``'gguf_iq1_s'``, ``'gguf_q4k_m'``, ``'fp16'``, ``'bf16'``, + ``'fp6'``, ``'gguf_iq2_xxs'``, ``'gguf_iq2_xs'``, + ``'gguf_iq1_s'``, ``'gguf_q4k_m'``, ``'gguf_q4k_s'``, + ``'fp16'``, ``'bf16'``, ``'sym_int4'`` means symmetric int 4, ``'asym_int4'`` means asymmetric int 4, ``'nf4'`` means 4-bit NormalFloat, etc. Relevant low bit optimizations will be applied to the model. @@ -381,9 +382,15 @@ def load_convert(cls, q_k, optimize_model, *args, **kwargs): invalidInputError(q_k in ggml_tensor_qtype or q_k in gguf_mixed_qtype, f"Unknown load_in_low_bit value: {q_k}, expected:" f" sym_int4, asym_int4, sym_int5, asym_int5, sym_int8, nf3, nf4, " +<<<<<<< HEAD f"fp4, fp6, fp8, fp8_e4m3, fp8_e5m2, fp16, bf16, gguf_iq2_xxs, " f"gguf_iq2_xs, gguf_iq1_s, q2_k, q4_k, q6_k, gguf_q4k_m, " f"mixed_fp4 or mixed_fp8.") +======= + f"fp4, fp8, fp8_e4m3, fp8_e5m2, fp16, bf16, gguf_iq2_xxs, " + f"gguf_iq2_xs, gguf_iq1_s, q2_k, q4_k, q5_k, q6_k, " + f"gguf_q4k_s, gguf_q4k_m, mixed_fp4 or mixed_fp8.") +>>>>>>> dfdd719a (update comment) if q_k in ggml_tensor_qtype: qtype = ggml_tensor_qtype[q_k] else: From 5f6b47f08091de9e6f6d10221b2102c7c06e8212 Mon Sep 17 00:00:00 2001 From: rnwang04 Date: Fri, 17 May 2024 13:54:27 +0800 Subject: [PATCH 7/7] fix --- python/llm/src/ipex_llm/transformers/model.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/python/llm/src/ipex_llm/transformers/model.py b/python/llm/src/ipex_llm/transformers/model.py index 6847ee8cddc..00e7a2f3086 100644 --- a/python/llm/src/ipex_llm/transformers/model.py +++ b/python/llm/src/ipex_llm/transformers/model.py @@ -382,15 +382,9 @@ def load_convert(cls, q_k, optimize_model, *args, **kwargs): invalidInputError(q_k in ggml_tensor_qtype or q_k in gguf_mixed_qtype, f"Unknown load_in_low_bit value: {q_k}, expected:" f" sym_int4, asym_int4, sym_int5, asym_int5, sym_int8, nf3, nf4, " -<<<<<<< HEAD f"fp4, fp6, fp8, fp8_e4m3, fp8_e5m2, fp16, bf16, gguf_iq2_xxs, " - f"gguf_iq2_xs, gguf_iq1_s, q2_k, q4_k, q6_k, gguf_q4k_m, " - f"mixed_fp4 or mixed_fp8.") -======= - f"fp4, fp8, fp8_e4m3, fp8_e5m2, fp16, bf16, gguf_iq2_xxs, " f"gguf_iq2_xs, gguf_iq1_s, q2_k, q4_k, q5_k, q6_k, " f"gguf_q4k_s, gguf_q4k_m, mixed_fp4 or mixed_fp8.") ->>>>>>> dfdd719a (update comment) if q_k in ggml_tensor_qtype: qtype = ggml_tensor_qtype[q_k] else: