| |
|
| | import os |
| | import gradio as gr |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| | import torch |
| |
|
| | |
| | model_name = 'redael/model_udc' |
| | tokenizer = AutoTokenizer.from_pretrained(model_name) |
| | model = AutoModelForCausalLM.from_pretrained(model_name) |
| |
|
| | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| | model.to(device) |
| |
|
| | |
| | def generate_response(message, history, system_message, max_tokens, temperature, top_p): |
| | |
| | messages = [{"role": "system", "content": system_message}] |
| | |
| | for user_msg, bot_msg in history: |
| | if user_msg: |
| | messages.append({"role": "user", "content": user_msg}) |
| | if bot_msg: |
| | messages.append({"role": "assistant", "content": bot_msg}) |
| | |
| | messages.append({"role": "user", "content": message}) |
| | |
| | |
| | prompt = "\n".join([f"{msg['role'].capitalize()}: {msg['content']}" for msg in messages]) |
| | inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True, max_length=512).to(device) |
| | |
| | |
| | outputs = model.generate( |
| | inputs['input_ids'], |
| | max_length=max_tokens, |
| | num_return_sequences=1, |
| | pad_token_id=tokenizer.eos_token_id, |
| | temperature=temperature, |
| | top_p=top_p, |
| | early_stopping=True, |
| | do_sample=True |
| | ) |
| | response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| | |
| | |
| | response = response.split("Assistant:")[-1].strip() |
| | response_lines = response.split('\n') |
| | clean_response = [] |
| | for line in response_lines: |
| | if "User:" not in line and "Assistant:" not in line: |
| | clean_response.append(line) |
| | response = ' '.join(clean_response) |
| | |
| | return [(message, response)] |
| |
|
| | |
| | demo = gr.ChatInterface( |
| | fn=generate_response, |
| | additional_inputs=[ |
| | gr.Textbox(value="You are a friendly Chatbot.", label="System message"), |
| | gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), |
| | gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), |
| | gr.Slider( |
| | minimum=0.1, |
| | maximum=1.0, |
| | value=0.95, |
| | step=0.05, |
| | label="Top-p (nucleus sampling)", |
| | ), |
| | ], |
| | title="Chatbot", |
| | description="Ask anything to the chatbot." |
| | ) |
| |
|
| | if __name__ == "__main__": |
| | demo.launch() |