File size: 2,103 Bytes
697eed1
264a864
d9fb170
697eed1
 
 
 
 
 
 
 
 
 
 
 
 
 
264a864
697eed1
 
264a864
 
697eed1
 
 
264a864
 
697eed1
264a864
 
 
 
 
 
 
697eed1
264a864
697eed1
264a864
697eed1
 
5cd6d55
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import streamlit as st
# from models_api import get_answer, get_hugging_face_answer
from models_api import get_answer_from_context
# Define Streamlit app
def main():
    st.title("Document Analysis Tool")

    # File upload
    uploaded_file = st.file_uploader("Upload a text file", type=["txt"])
    
    if uploaded_file:
        file_contents = uploaded_file.read().decode("utf-8")
        st.text_area("File Content", file_contents, height=200)

        # User input question
        question = st.text_input("Enter your question")

        if st.button("Generate Llama 3 8b Response"):
            if question:
                st.write("Response from Llama3 8b:")
                answer_llama3 = get_answer_from_context(model_name="llama3-8b-8192",context= file_contents, question=question)
                # answer_llama3 = get_answer("llama3", file_contents, question)
                # answer_llama3 = get_hugging_face_answer("meta-llama/Meta-Llama-3-8B-Instruct", file_contents, question)
                st.write(answer_llama3)

        if st.button("Generate Mistral 7b Response"):
            if question:
                st.write("Response from Mistral 7b:")
                answer_mistral8x7b = get_answer_from_context(model_name="mixtral-8x7b-32768",context= file_contents, question=question)
                # answer_mistral7b = get_answer("mistral", file_contents, question)
                # # answer_mistral7b = get_hugging_face_answer("mistralai/Mistral-7B-Instruct-v0.1", file_contents, question)
                st.write(answer_mistral8x7b)
        if st.button("Generate Gemma 7b Response"):
            if question:
                st.write("Response from Gemma 7b:")
                # # answer_gemma7b = get_answer("gemma", file_contents, question)
                answer_gemma7b = get_answer_from_context(model_name="gemma-7b-it",context= file_contents, question=question)
                # answer_gemma7b = get_hugging_face_answer("google/gemma-7b-it", file_contents, question)
                st.write(answer_gemma7b)
# Run Streamlit app
if __name__ == "__main__":
    main()