| | import gradio as gr |
| | import os |
| |
|
| | os.system('pip install transformers torch') |
| |
|
| | from transformers import GPT2LMHeadModel, GPT2Tokenizer |
| |
|
| | |
| | model_name = "microsoft/DialoGPT-small" |
| | model = GPT2LMHeadModel.from_pretrained(model_name) |
| | tokenizer = GPT2Tokenizer.from_pretrained(model_name) |
| |
|
| | |
| | system_prompt = "You are a helpful assistant." |
| | chat_history = system_prompt |
| |
|
| | def generate_response(prompt, max_length=50, temperature=0.8): |
| | global chat_history |
| | input_text = chat_history + " User: " + prompt |
| | input_ids = tokenizer.encode(input_text, return_tensors="pt") |
| | output_ids = model.generate(input_ids, max_length=max_length, temperature=temperature, num_return_sequences=1) |
| | response = tokenizer.decode(output_ids[0], skip_special_tokens=True) |
| | |
| | |
| | chat_history += f" User: {prompt} Assistant: {response}" |
| | |
| | return response |
| |
|
| | iface = gr.Interface( |
| | fn=generate_response, |
| | inputs=gr.Textbox(), |
| | outputs="text", |
| | ) |
| |
|
| | iface.launch() |