Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| # Determine device | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| model_id = "thrishala/mental_health_chatbot" | |
| try: | |
| # Load model with appropriate settings | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_id, | |
| device_map="auto", | |
| torch_dtype=torch.float16, | |
| low_cpu_mem_usage=True, | |
| max_memory={0: "15GiB"} if torch.cuda.is_available() else None, | |
| offload_folder="offload", | |
| ).eval() | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| tokenizer.pad_token = tokenizer.eos_token | |
| tokenizer.model_max_length = 4096 # Set to model's actual context length | |
| except Exception as e: | |
| print(f"Error loading model: {e}") | |
| exit() | |
| def generate_text_streaming(prompt, max_new_tokens=128): | |
| inputs = tokenizer( | |
| prompt, | |
| return_tensors="pt", | |
| truncation=True, | |
| max_length=4096 # Match model's context length | |
| ).to(model.device) | |
| generated_tokens = [] | |
| with torch.no_grad(): | |
| for _ in range(max_new_tokens): | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=1, | |
| do_sample=False, | |
| eos_token_id=tokenizer.eos_token_id, | |
| return_dict_in_generate=True | |
| ) | |
| new_token = outputs.sequences[0, -1] | |
| generated_tokens.append(new_token) | |
| # Update inputs for next iteration | |
| inputs = { | |
| "input_ids": torch.cat([inputs["input_ids"], new_token.unsqueeze(0).unsqueeze(0)], dim=-1), | |
| "attention_mask": torch.cat([inputs["attention_mask"], torch.ones(1, 1, device=model.device)], dim=-1) | |
| } | |
| # Decode the accumulated tokens | |
| current_text = tokenizer.decode(generated_tokens, skip_special_tokens=True) | |
| yield current_text # Yield the full text so far | |
| if new_token == tokenizer.eos_token_id: | |
| break | |
| def respond(message, history, system_message, max_tokens): | |
| # Build prompt with full history | |
| prompt = f"{system_message}\n" | |
| for user_msg, bot_msg in history: | |
| prompt += f"User: {user_msg}\nAssistant: {bot_msg}\n" | |
| prompt += f"User: {message}\nAssistant:" | |
| # Keep track of the full response | |
| full_response = "" | |
| try: | |
| for token_chunk in generate_text_streaming(prompt, max_tokens): | |
| # Update the full response and yield incremental changes | |
| full_response = token_chunk | |
| yield full_response | |
| except Exception as e: | |
| print(f"Error during generation: {e}") | |
| yield "An error occurred." | |
| demo = gr.ChatInterface( | |
| respond, | |
| additional_inputs=[ | |
| gr.Textbox( | |
| value="You are a friendly and helpful mental health chatbot.", | |
| label="System message", | |
| ), | |
| gr.Slider(minimum=1, maximum=512, value=128, step=1, label="Max new tokens"), | |
| ], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |