| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import unittest |
|
|
| from transformers import ( |
| MODEL_FOR_CAUSAL_LM_MAPPING, |
| TF_MODEL_FOR_CAUSAL_LM_MAPPING, |
| TextGenerationPipeline, |
| logging, |
| pipeline, |
| ) |
| from transformers.testing_utils import ( |
| CaptureLogger, |
| is_pipeline_test, |
| require_accelerate, |
| require_tf, |
| require_torch, |
| require_torch_accelerator, |
| require_torch_gpu, |
| require_torch_or_tf, |
| torch_device, |
| ) |
|
|
| from .test_pipelines_common import ANY |
|
|
|
|
| @is_pipeline_test |
| @require_torch_or_tf |
| class TextGenerationPipelineTests(unittest.TestCase): |
| model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING |
| tf_model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING |
|
|
| @require_torch |
| def test_small_model_pt(self): |
| text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="pt") |
| |
| outputs = text_generator("This is a test", do_sample=False) |
| self.assertEqual( |
| outputs, |
| [ |
| { |
| "generated_text": ( |
| "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." |
| " oscope. FiliFili@@" |
| ) |
| } |
| ], |
| ) |
|
|
| outputs = text_generator(["This is a test", "This is a second test"]) |
| self.assertEqual( |
| outputs, |
| [ |
| [ |
| { |
| "generated_text": ( |
| "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." |
| " oscope. FiliFili@@" |
| ) |
| } |
| ], |
| [ |
| { |
| "generated_text": ( |
| "This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy" |
| " oscope. oscope. FiliFili@@" |
| ) |
| } |
| ], |
| ], |
| ) |
|
|
| outputs = text_generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True) |
| self.assertEqual( |
| outputs, |
| [ |
| {"generated_token_ids": ANY(list)}, |
| {"generated_token_ids": ANY(list)}, |
| ], |
| ) |
|
|
| |
| test_str = "testing tokenizer kwargs. using truncation must result in a different generation." |
| input_len = len(text_generator.tokenizer(test_str)["input_ids"]) |
| output_str, output_str_with_truncation = ( |
| text_generator(test_str, do_sample=False, return_full_text=False, min_new_tokens=1)[0]["generated_text"], |
| text_generator( |
| test_str, |
| do_sample=False, |
| return_full_text=False, |
| min_new_tokens=1, |
| truncation=True, |
| max_length=input_len + 1, |
| )[0]["generated_text"], |
| ) |
| assert output_str != output_str_with_truncation |
|
|
| |
| text_generator.tokenizer.pad_token_id = text_generator.model.config.eos_token_id |
| text_generator.tokenizer.pad_token = "<pad>" |
| outputs = text_generator( |
| ["This is a test", "This is a second test"], |
| do_sample=True, |
| num_return_sequences=2, |
| batch_size=2, |
| return_tensors=True, |
| ) |
| self.assertEqual( |
| outputs, |
| [ |
| [ |
| {"generated_token_ids": ANY(list)}, |
| {"generated_token_ids": ANY(list)}, |
| ], |
| [ |
| {"generated_token_ids": ANY(list)}, |
| {"generated_token_ids": ANY(list)}, |
| ], |
| ], |
| ) |
|
|
| @require_torch |
| def test_small_chat_model_pt(self): |
| text_generator = pipeline( |
| task="text-generation", model="rocketknight1/tiny-gpt2-with-chatml-template", framework="pt" |
| ) |
| |
| chat1 = [ |
| {"role": "system", "content": "This is a system message."}, |
| {"role": "user", "content": "This is a test"}, |
| {"role": "assistant", "content": "This is a reply"}, |
| ] |
| chat2 = [ |
| {"role": "system", "content": "This is a system message."}, |
| {"role": "user", "content": "This is a second test"}, |
| {"role": "assistant", "content": "This is a reply"}, |
| ] |
| outputs = text_generator(chat1, do_sample=False, max_new_tokens=10) |
| expected_chat1 = chat1 + [ |
| { |
| "role": "assistant", |
| "content": " factors factors factors factors factors factors factors factors factors factors", |
| } |
| ] |
| self.assertEqual( |
| outputs, |
| [ |
| {"generated_text": expected_chat1}, |
| ], |
| ) |
|
|
| outputs = text_generator([chat1, chat2], do_sample=False, max_new_tokens=10) |
| expected_chat2 = chat2 + [ |
| { |
| "role": "assistant", |
| "content": " factors factors factors factors factors factors factors factors factors factors", |
| } |
| ] |
|
|
| self.assertEqual( |
| outputs, |
| [ |
| [{"generated_text": expected_chat1}], |
| [{"generated_text": expected_chat2}], |
| ], |
| ) |
|
|
| @require_tf |
| def test_small_model_tf(self): |
| text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="tf") |
|
|
| |
| outputs = text_generator("This is a test", do_sample=False) |
| self.assertEqual( |
| outputs, |
| [ |
| { |
| "generated_text": ( |
| "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" |
| " please," |
| ) |
| } |
| ], |
| ) |
|
|
| outputs = text_generator(["This is a test", "This is a second test"], do_sample=False) |
| self.assertEqual( |
| outputs, |
| [ |
| [ |
| { |
| "generated_text": ( |
| "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" |
| " please," |
| ) |
| } |
| ], |
| [ |
| { |
| "generated_text": ( |
| "This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes" |
| " Cannes 閲閲Cannes Cannes Cannes 攵 please," |
| ) |
| } |
| ], |
| ], |
| ) |
|
|
| @require_tf |
| def test_small_chat_model_tf(self): |
| text_generator = pipeline( |
| task="text-generation", model="rocketknight1/tiny-gpt2-with-chatml-template", framework="tf" |
| ) |
| |
| chat1 = [ |
| {"role": "system", "content": "This is a system message."}, |
| {"role": "user", "content": "This is a test"}, |
| {"role": "assistant", "content": "This is a reply"}, |
| ] |
| chat2 = [ |
| {"role": "system", "content": "This is a system message."}, |
| {"role": "user", "content": "This is a second test"}, |
| {"role": "assistant", "content": "This is a reply"}, |
| ] |
| outputs = text_generator(chat1, do_sample=False, max_new_tokens=10) |
| expected_chat1 = chat1 + [ |
| { |
| "role": "assistant", |
| "content": " factors factors factors factors factors factors factors factors factors factors", |
| } |
| ] |
| self.assertEqual( |
| outputs, |
| [ |
| {"generated_text": expected_chat1}, |
| ], |
| ) |
|
|
| outputs = text_generator([chat1, chat2], do_sample=False, max_new_tokens=10) |
| expected_chat2 = chat2 + [ |
| { |
| "role": "assistant", |
| "content": " factors factors factors factors factors factors factors factors factors factors", |
| } |
| ] |
|
|
| self.assertEqual( |
| outputs, |
| [ |
| [{"generated_text": expected_chat1}], |
| [{"generated_text": expected_chat2}], |
| ], |
| ) |
|
|
| def get_test_pipeline(self, model, tokenizer, processor): |
| text_generator = TextGenerationPipeline(model=model, tokenizer=tokenizer) |
| return text_generator, ["This is a test", "Another test"] |
|
|
| def test_stop_sequence_stopping_criteria(self): |
| prompt = """Hello I believe in""" |
| text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2") |
| output = text_generator(prompt) |
| self.assertEqual( |
| output, |
| [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}], |
| ) |
|
|
| output = text_generator(prompt, stop_sequence=" fe") |
| self.assertEqual(output, [{"generated_text": "Hello I believe in fe"}]) |
|
|
| def run_pipeline_test(self, text_generator, _): |
| model = text_generator.model |
| tokenizer = text_generator.tokenizer |
|
|
| outputs = text_generator("This is a test") |
| self.assertEqual(outputs, [{"generated_text": ANY(str)}]) |
| self.assertTrue(outputs[0]["generated_text"].startswith("This is a test")) |
|
|
| outputs = text_generator("This is a test", return_full_text=False) |
| self.assertEqual(outputs, [{"generated_text": ANY(str)}]) |
| self.assertNotIn("This is a test", outputs[0]["generated_text"]) |
|
|
| text_generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer, return_full_text=False) |
| outputs = text_generator("This is a test") |
| self.assertEqual(outputs, [{"generated_text": ANY(str)}]) |
| self.assertNotIn("This is a test", outputs[0]["generated_text"]) |
|
|
| outputs = text_generator("This is a test", return_full_text=True) |
| self.assertEqual(outputs, [{"generated_text": ANY(str)}]) |
| self.assertTrue(outputs[0]["generated_text"].startswith("This is a test")) |
|
|
| outputs = text_generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True) |
| self.assertEqual( |
| outputs, |
| [ |
| [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], |
| [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], |
| ], |
| ) |
|
|
| if text_generator.tokenizer.pad_token is not None: |
| outputs = text_generator( |
| ["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True |
| ) |
| self.assertEqual( |
| outputs, |
| [ |
| [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], |
| [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], |
| ], |
| ) |
|
|
| with self.assertRaises(ValueError): |
| outputs = text_generator("test", return_full_text=True, return_text=True) |
| with self.assertRaises(ValueError): |
| outputs = text_generator("test", return_full_text=True, return_tensors=True) |
| with self.assertRaises(ValueError): |
| outputs = text_generator("test", return_text=True, return_tensors=True) |
|
|
| |
| |
| |
| |
| if ( |
| text_generator.tokenizer.bos_token_id is not None |
| or "Pegasus" in tokenizer.__class__.__name__ |
| or "Git" in model.__class__.__name__ |
| ): |
| outputs = text_generator("") |
| self.assertEqual(outputs, [{"generated_text": ANY(str)}]) |
| else: |
| with self.assertRaises((ValueError, AssertionError)): |
| outputs = text_generator("") |
|
|
| if text_generator.framework == "tf": |
| |
| |
| |
| return |
| |
| |
| |
| EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS = [ |
| "RwkvForCausalLM", |
| "XGLMForCausalLM", |
| "GPTNeoXForCausalLM", |
| "FuyuForCausalLM", |
| ] |
| if ( |
| tokenizer.model_max_length < 10000 |
| and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS |
| ): |
| |
| with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)): |
| text_generator("This is a test" * 500, max_new_tokens=20) |
|
|
| outputs = text_generator("This is a test" * 500, handle_long_generation="hole", max_new_tokens=20) |
| |
| with self.assertRaises(ValueError): |
| text_generator( |
| "This is a test" * 500, |
| handle_long_generation="hole", |
| max_new_tokens=tokenizer.model_max_length + 10, |
| ) |
|
|
| @require_torch |
| @require_accelerate |
| @require_torch_gpu |
| def test_small_model_pt_bloom_accelerate(self): |
| import torch |
|
|
| |
| pipe = pipeline( |
| model="hf-internal-testing/tiny-random-bloom", |
| model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloat16}, |
| ) |
| self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloat16) |
| out = pipe("This is a test") |
| self.assertEqual( |
| out, |
| [ |
| { |
| "generated_text": ( |
| "This is a test test test test test test test test test test test test test test test test" |
| " test" |
| ) |
| } |
| ], |
| ) |
|
|
| |
| pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto", torch_dtype=torch.bfloat16) |
| self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloat16) |
| out = pipe("This is a test") |
| self.assertEqual( |
| out, |
| [ |
| { |
| "generated_text": ( |
| "This is a test test test test test test test test test test test test test test test test" |
| " test" |
| ) |
| } |
| ], |
| ) |
|
|
| |
| pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto") |
| self.assertEqual(pipe.model.lm_head.weight.dtype, torch.float32) |
| out = pipe("This is a test") |
| self.assertEqual( |
| out, |
| [ |
| { |
| "generated_text": ( |
| "This is a test test test test test test test test test test test test test test test test" |
| " test" |
| ) |
| } |
| ], |
| ) |
|
|
| @require_torch |
| @require_torch_accelerator |
| def test_small_model_fp16(self): |
| import torch |
|
|
| pipe = pipeline( |
| model="hf-internal-testing/tiny-random-bloom", |
| device=torch_device, |
| torch_dtype=torch.float16, |
| ) |
| pipe("This is a test") |
|
|
| @require_torch |
| @require_accelerate |
| @require_torch_accelerator |
| def test_pipeline_accelerate_top_p(self): |
| import torch |
|
|
| pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto", torch_dtype=torch.float16) |
| pipe("This is a test", do_sample=True, top_p=0.5) |
|
|
| def test_pipeline_length_setting_warning(self): |
| prompt = """Hello world""" |
| text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2") |
| if text_generator.model.framework == "tf": |
| logger = logging.get_logger("transformers.generation.tf_utils") |
| else: |
| logger = logging.get_logger("transformers.generation.utils") |
| logger_msg = "Both `max_new_tokens`" |
|
|
| |
| with CaptureLogger(logger) as cl: |
| _ = text_generator(prompt, max_length=10, max_new_tokens=1) |
| self.assertIn(logger_msg, cl.out) |
|
|
| |
| with CaptureLogger(logger) as cl: |
| _ = text_generator(prompt, max_new_tokens=1) |
| self.assertNotIn(logger_msg, cl.out) |
|
|
| with CaptureLogger(logger) as cl: |
| _ = text_generator(prompt, max_length=10) |
| self.assertNotIn(logger_msg, cl.out) |
|
|