Spaces:
Sleeping
Sleeping
| from langchain.llms import openai | |
| import openai | |
| from dotenv import load_dotenv | |
| import os | |
| import streamlit as st | |
| from langchain.chat_models import ChatOpenAI | |
| from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate | |
| from langchain.schema import SystemMessage, HumanMessage | |
| load_dotenv() | |
| # openai.api_key = os.environ["OPENAI_API_KEY"] | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| # model_name = os.environ["MODEL_NAME"] | |
| # Function to ask a question using LangChain | |
| def ask_question_with_langchain(question): | |
| # Initialize the OpenAI chat model | |
| chat = ChatOpenAI( | |
| model="ft:gpt-3.5-turbo-0125:shubham-gupta::AqKlxnFo", # Use "gpt-4" or "gpt-3.5-turbo" | |
| temperature=0.3, # Adjust temperature for creativity | |
| openai_api_key=openai.api_key # Replace or set it in the environment | |
| ) | |
| # Define the conversation structure | |
| messages = [ | |
| SystemMessage(content="You are an medical assistant who specializes in infectious diseases."), | |
| HumanMessage(content=question) | |
| ] | |
| # Generate a response | |
| response = chat(messages) | |
| return response.content # Extract the content of the assistant's response | |
| # Example usage | |
| if __name__ == "__main__": | |
| st.set_page_config(page_title=" PPA assistant") | |
| st.header("InfektIQ") | |
| input=st.text_input("Input: ",key="input") | |
| response=ask_question_with_langchain(input) | |
| submit=st.button("Ask the question") | |
| ## If ask button is clicked | |
| if submit: | |
| st.subheader("The Response is") | |
| st.write(response) |