Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import requests | |
| def generate_answer(question, model_name, temperature): | |
| prompt = "Answer the following question: " + question | |
| if model_name == "phi1.5": | |
| tokenizer = AutoTokenizer.from_pretrained("phi1.5") | |
| input_ids = tokenizer.encode(prompt, return_tensors="pt") | |
| output = llm_model.generate(input_ids, max_length=512, do_sample=True, top_k=50, top_p=0.9, temperature=temperature)[0] | |
| answer = tokenizer.decode(output, skip_special_tokens=True) | |
| end_of_text_index = answer.find("(end of text)") | |
| if end_of_text_index > -1: | |
| answer = answer[:end_of_text_index] | |
| return answer | |
| elif model_name == "Google Gemini": | |
| url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent" | |
| headers = {"Content-Type": "application/json", "X-goog-api-key": "AIzaSyCfiLsxlpQcdG6hGwCft6-yO4K2c-kp6-o"} | |
| data = { | |
| "contents": [{"parts": [{"text": prompt}]}], | |
| "generationConfig": {"temperature": temperature} | |
| } | |
| response = requests.post(url, headers=headers, json=data) | |
| if response.status_code == 200: | |
| try: | |
| return response.json()["candidates"][0]["content"]["parts"][0]["text"] | |
| except Exception: | |
| return "Error: Unexpected Gemini API response format." | |
| else: | |
| return f"Error: Gemini API call failed ({response.status_code})" | |
| else: | |
| return "Invalid model selection." | |
| def chatbot(question, model_name, temperature): | |
| return generate_answer(question, model_name, temperature) | |
| if __name__ == "__main__": | |
| llm_model = AutoModelForCausalLM.from_pretrained("phi1.5", trust_remote_code=True) | |
| with gr.Blocks(theme="default") as demo: | |
| gr.Markdown("# I am your AI Health Assistance 🏥\nAsk general health related questions to the AI Bot.") | |
| model_name = gr.Dropdown(["phi1.5", "Google Gemini"], value="phi1.5", label="Model Selection") | |
| temperature = gr.Slider(0.0, 1.0, value=0.7, label="Temperature") | |
| question = gr.Textbox(lines=2, label="Your Question") | |
| output = gr.Textbox(lines=10, label="AI Response", interactive=False) | |
| ## connect to backend | |
| def run_chatbot(q, m, t): | |
| return chatbot(q, m, t) | |
| submit_btn = gr.Button("Submit") | |
| submit_btn.click(run_chatbot, inputs=[question, model_name, temperature], outputs=output) | |
| demo.launch() | |