Skip to content

Commit e462843

Browse files
authored
Add Qwen2 GGUF loading support (#31175)
* add qwen2 gguf support * Update docs * fix qwen2 tokenizer * add qwen2 gguf test * fix typo in qwen2 gguf test * format code * Remove mistral, clarify the error message * format code * add typing and update docstring
1 parent df848ac commit e462843

File tree

6 files changed

+82
-15
lines changed

6 files changed

+82
-15
lines changed

docs/source/en/gguf.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ For now the supported model architectures are the architectures that have been v
6363

6464
- LLaMa
6565
- Mistral
66+
- Qwen2
6667

6768
## Example usage
6869

src/transformers/convert_slow_tokenizer.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -401,9 +401,11 @@ def converted(self) -> Tokenizer:
401401

402402

403403
class Qwen2Converter(Converter):
404-
def converted(self) -> Tokenizer:
405-
vocab = self.original_tokenizer.encoder
406-
merges = list(self.original_tokenizer.bpe_ranks.keys())
404+
def converted(self, vocab: Dict[str, int] = None, merges: List[Tuple[str, str]] = None) -> Tokenizer:
405+
if not vocab:
406+
vocab = self.original_tokenizer.encoder
407+
if not merges:
408+
merges = list(self.original_tokenizer.bpe_ranks.keys())
407409

408410
tokenizer = Tokenizer(
409411
BPE(

src/transformers/integrations/ggml.py

Lines changed: 56 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
from tokenizers.models import BPE
2626

2727
from .. import AddedToken
28-
from ..convert_slow_tokenizer import LlamaConverter
28+
from ..convert_slow_tokenizer import LlamaConverter, Qwen2Converter
2929
from ..utils import logging
3030
from ..utils.logging import tqdm
3131

@@ -101,6 +101,21 @@
101101
"output.weight": "lm_head.weight",
102102
"output_norm": "model.norm",
103103
},
104+
"qwen2": {
105+
"token_embd": "model.embed_tokens",
106+
"blk": "model.layers",
107+
"ffn_up": "mlp.up_proj",
108+
"ffn_down": "mlp.down_proj",
109+
"ffn_gate": "mlp.gate_proj",
110+
"ffn_norm": "post_attention_layernorm",
111+
"attn_norm": "input_layernorm",
112+
"attn_q": "self_attn.q_proj",
113+
"attn_v": "self_attn.v_proj",
114+
"attn_k": "self_attn.k_proj",
115+
"attn_output": "self_attn.o_proj",
116+
"output.weight": "lm_head.weight",
117+
"output_norm": "model.norm",
118+
},
104119
}
105120

106121

@@ -133,8 +148,19 @@
133148
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
134149
"vocab_size": "vocab_size",
135150
},
151+
"qwen2": {
152+
"context_length": "max_position_embeddings",
153+
"block_count": "num_hidden_layers",
154+
"feed_forward_length": "intermediate_size",
155+
"embedding_length": "hidden_size",
156+
"rope.dimension_count": None,
157+
"rope.freq_base": "rope_theta",
158+
"attention.head_count": "num_attention_heads",
159+
"attention.head_count_kv": "num_key_value_heads",
160+
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
161+
"vocab_size": "vocab_size",
162+
},
136163
"tokenizer": {
137-
"ggml.model": "model_type",
138164
"ggml.bos_token_id": "bos_token_id",
139165
"ggml.eos_token_id": "eos_token_id",
140166
"ggml.unknown_token_id": "unk_token_id",
@@ -490,14 +516,15 @@ def __init__(self, dict_):
490516
for k, v in dict_.items():
491517
setattr(self, k, v)
492518

493-
if not hasattr(self, "tokens") or not hasattr(self, "scores"):
494-
raise ValueError("tokens and scores need to be passed for a LLaMa tokenizer to be instantiated.")
495-
else:
519+
if not hasattr(self, "merges"):
520+
if not hasattr(self, "tokens") or not hasattr(self, "scores"):
521+
raise ValueError(
522+
"tokens and scores need to be passed for a LLaMa tokenizer without merges to be instantiated."
523+
)
496524
tokens = self.tokens
497525
scores = self.scores
498526
vocab = {t: scores[i] for i, t in enumerate(tokens)}
499527

500-
if not hasattr(self, "merges"):
501528
logger.warning("Merges were not in checkpoint, building merges on the fly.")
502529
merges = []
503530
for merge, piece_score in tqdm(vocab.items()):
@@ -562,16 +589,37 @@ def decoder(self, replacement, add_prefix_space):
562589
return decoders.Sequence(sequence)
563590

564591

592+
class GGUFQwen2Converter(Qwen2Converter):
593+
def __init__(self, tokenizer_dict):
594+
self.original_tokenizer = GGUFTokenizerSkeleton(tokenizer_dict)
595+
596+
def converted(self) -> Tokenizer:
597+
vocab = {word: i for i, word in enumerate(self.original_tokenizer.tokens)}
598+
merges = self.original_tokenizer.merges
599+
tokenizer = super().converted(vocab, merges)
600+
601+
tokenizer.add_special_tokens(
602+
[
603+
AddedToken("<|endoftext|>", normalized=False, special=True),
604+
AddedToken("<|im_start|>", normalized=False, special=True),
605+
AddedToken("<|im_end|>", normalized=False, special=True),
606+
]
607+
)
608+
return tokenizer
609+
610+
565611
GGUF_TO_FAST_CONVERTERS = {
566612
"llama": GGUFLlamaConverter,
613+
"qwen2": GGUFQwen2Converter,
567614
}
568615

569616

570-
def convert_gguf_tokenizer(tokenizer_dict) -> Tokenizer:
617+
def convert_gguf_tokenizer(architecture, tokenizer_dict) -> Tokenizer:
571618
"""
572619
Utilities to convert a slow tokenizer instance in a fast tokenizer instance.
573620
574621
Args:
622+
architecture (`str`): The model architecture derived from gguf file.
575623
transformer_tokenizer ([`~tokenization_utils_base.PreTrainedTokenizer`]):
576624
Instance of a slow tokenizer to convert in the backend tokenizer for
577625
[`~tokenization_utils_base.PreTrainedTokenizerFast`].
@@ -580,6 +628,6 @@ def convert_gguf_tokenizer(tokenizer_dict) -> Tokenizer:
580628
A instance of [`~tokenizers.Tokenizer`] to be used as the backend tokenizer of a
581629
[`~tokenization_utils_base.PreTrainedTokenizerFast`]
582630
"""
583-
tokenizer_class_name = tokenizer_dict["tokenizer_type"]
631+
tokenizer_class_name = architecture
584632
converter_class = GGUF_TO_FAST_CONVERTERS[tokenizer_class_name]
585633
return converter_class(tokenizer_dict).converted()

src/transformers/models/qwen2/tokenization_qwen2_fast.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,8 +118,8 @@ def __init__(
118118
)
119119

120120
super().__init__(
121-
vocab_file,
122-
merges_file,
121+
vocab_file=vocab_file,
122+
merges_file=merges_file,
123123
tokenizer_file=tokenizer_file,
124124
unk_token=unk_token,
125125
bos_token=bos_token,

src/transformers/tokenization_utils_fast.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,8 +118,10 @@ def __init__(self, *args, **kwargs):
118118
fast_tokenizer = convert_slow_tokenizer(slow_tokenizer)
119119
elif gguf_file is not None:
120120
# We need to convert a slow tokenizer to build the backend
121-
tokenizer_dict = load_gguf_checkpoint(kwargs.get("vocab_file"))["tokenizer"]
122-
fast_tokenizer = convert_gguf_tokenizer(tokenizer_dict)
121+
gguf_param = load_gguf_checkpoint(kwargs.get("vocab_file"))
122+
architecture = gguf_param["config"]["model_type"]
123+
tokenizer_dict = gguf_param["tokenizer"]
124+
fast_tokenizer = convert_gguf_tokenizer(architecture, tokenizer_dict)
123125
elif self.slow_tokenizer_class is not None:
124126
# We need to create and convert a slow tokenizer to build the backend
125127
slow_tokenizer = self.slow_tokenizer_class(*args, **kwargs)

tests/quantization/ggml/test_ggml.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ class GgufIntegrationTests(unittest.TestCase):
3131
original_model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
3232
model_id = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
3333
mistral_model_id = "TheBloke/Mistral-7B-Instruct-v0.2-GGUF"
34+
qwen2_model_id = "Qwen/Qwen1.5-0.5B-Chat-GGUF"
3435

3536
q4_0_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q4_0.gguf"
3637
q4_k_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
@@ -41,6 +42,7 @@ class GgufIntegrationTests(unittest.TestCase):
4142
q8_0_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q8_0.gguf"
4243

4344
q4_0_mistral_model_id = "mistral-7b-instruct-v0.2.Q4_0.gguf"
45+
q4_0_qwen2_model_id = "qwen1_5-0_5b-chat-q4_0.gguf"
4446

4547
example_text = "Hello"
4648

@@ -157,6 +159,18 @@ def test_mistral_q4_0(self):
157159
EXPECTED_TEXT = "Hello,\n\nI'm trying to create a"
158160
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
159161

162+
def test_qwen2_q4_0(self):
163+
tokenizer = AutoTokenizer.from_pretrained(self.qwen2_model_id, gguf_file=self.q4_0_qwen2_model_id)
164+
model = AutoModelForCausalLM.from_pretrained(
165+
self.qwen2_model_id, gguf_file=self.q4_0_qwen2_model_id, device_map="auto", torch_dtype=torch.float16
166+
)
167+
168+
text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
169+
out = model.generate(**text, max_new_tokens=10)
170+
171+
EXPECTED_TEXT = "Hello.jsoup\n\nI am a beginner"
172+
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
173+
160174
def test_tokenization_xnli(self):
161175
import tqdm
162176
from datasets import load_dataset

0 commit comments

Comments
 (0)