We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 060a66b commit 6a75470Copy full SHA for 6a75470
tests/quantization/ggml/test_ggml.py
@@ -691,7 +691,7 @@ def test_starcoder2_q6_k(self):
691
text = tokenizer(example_function_text, return_tensors="pt").to(torch_device)
692
out = model.generate(**text, max_new_tokens=10)
693
694
- EXPECTED_TEXT = 'def print_hello_world():\n\tprint("Hello World")\n\ndef print'
+ EXPECTED_TEXT = 'def print_hello_world():\n print("Hello World")\n\ndef print'
695
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
696
697
def test_tokenization_xnli(self):
0 commit comments