File size: 3,279 Bytes
4cc68a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import os
from dotenv import load_dotenv
import streamlit as st
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain.document_loaders import WikipediaLoader  # Import for loading Wikipedia articles

# Load environment variables from a .env file
load_dotenv()  # Adjust the path if necessary

os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")

# Create a prompt template
prompt = ChatPromptTemplate.from_messages(
    [
        ('system', "You are a helpful assistant. Please respond to the question asked based mostly on the context provided. Context: {context}. If not present in context, say not present in the given context"),
        ("user", "Question: {question}")
    ]
)

st.title("Question and Answering")

# Initialize session state variables if not present
if "input_type" not in st.session_state:
    st.session_state.input_type = "Direct Text Input"
if "input_text" not in st.session_state:
    st.session_state.input_text = ""
if "context_text" not in st.session_state:
    st.session_state.context_text = ""

# Function to reset session state when switching input type
def reset_inputs():
    st.session_state.input_text = ""
    st.session_state.context_text = ""

# Sidebar for selecting input type with reset callback
input_type = st.sidebar.selectbox(
    'Select Input Type',
    ('Direct Text Input', 'Wikipedia Topic'),
    key="input_type",
    on_change=reset_inputs
)

# Handle input based on the selected type
if input_type == 'Direct Text Input':
    # Direct Text Input Mode
    context_text = st.text_area("Enter your context here:", key="direct_context")
    question = st.text_input(
        "What question do you have in mind?", 
        value=st.session_state.input_text,
        key="direct_question"
    )
    st.session_state.input_text = question
    st.session_state.context_text = context_text
else:
    # Wikipedia Topic Mode
    topic = st.text_input("Enter Wikipedia Topic:", key="wiki_topic")
    if topic:
        # Load the Wikipedia article
        full_content = WikipediaLoader(query=topic, load_max_docs=2).load()
        
        # Limit the content size (e.g., first 1000 characters)
        max_length = 1000
        st.session_state.context_text = full_content[:max_length]  # Truncate to max_length
        if len(full_content) > max_length:
            st.session_state.context_text += "..."  # Indicate that the text has been truncated

    # Ensure the question input starts empty
    question = st.text_input(
        "What question do you have in mind?", 
        value=st.session_state.input_text,
        key="wiki_question"
    )
    st.session_state.input_text = question

# Initialize the OpenAI model
llm = ChatOpenAI(model="gpt-3.5-turbo")  # Use OpenAI model
output_parser = StrOutputParser()
chain = prompt | llm | output_parser

if st.session_state.input_text:
    # Use context_text if provided, otherwise use the loaded Wikipedia content
    response = chain.invoke({
        "question": st.session_state.input_text, 
        "context": st.session_state.context_text
    })
    st.write(response)