| import os | |
| from transformers import pipeline | |
| from engine.utils import safe_log | |
| from huggingface_hub import login | |
| import os | |
| # Authenticate using your token | |
| login(token=os.getenv("HF_TOKEN")) | |
| generator = pipeline( | |
| "text-generation", | |
| model="google/gemma-7b-it", | |
| ) | |
| def generate_response(prompt, persona): | |
| name = persona.get("name", "The HCP") | |
| full_prompt = f"{name} is a healthcare professional. Respond to this prompt:\n{prompt}" | |
| try: | |
| result = generator(full_prompt, max_new_tokens=300, do_sample=True, temperature=0.7) | |
| # Force full visibility | |
| safe_log("Gemma raw result", str(result)) | |
| safe_log("Gemma result type", str(type(result))) | |
| safe_log("Gemma result[0] type", str(type(result[0]))) | |
| safe_log("Gemma result[0] keys", str(result[0].keys()) if isinstance(result[0], dict) else "Not a dict") | |
| return f"{name} says: '{str(result[0])}'" | |
| except Exception as e: | |
| safe_log("Gemma model error", str(e)) | |
| return f"{name} says: 'I’m having trouble formulating a response right now.'" |