alikhantoleberdyev commited on
Commit
c0fe54b
·
1 Parent(s): 1d4cee9

build version:1.9

Browse files
Files changed (2) hide show
  1. app.py +20 -18
  2. requirements.txt +2 -1
app.py CHANGED
@@ -2,40 +2,42 @@ import streamlit as st
2
  import altair as alt
3
  from transformers import pipeline
4
  from transformers import AutoModelForSeq2SeqLM , AutoTokenizer, TranslationPipeline
 
5
 
6
 
 
7
 
8
- st.title('Question Answering 🕵️‍♂️')
9
- st.write('Ask a question about NLP, and I will answer based on the provided context! 🔄')
10
- context_input = st.text_area("please provice some context", "Many NLP tasks are now benchmarked using datasets like GLUE and SuperGLUE. Multilingual NLP models like mBERT support multiple languages in a single framework.")
 
 
 
 
 
11
 
12
  # question_input = st.text_area("enter question about NLP", "what model support multilingual nlp?")
13
  @st.cache_resource
14
  def load_model():
15
  print("Loading model...")
16
- return pipeline("question-answering", model="deepset/roberta-base-squad2")
17
 
18
 
19
  dunno_answerer = load_model()
20
  # with open('NLP_History_and_Facts.txt', 'r') as file:
21
  # context = file.read()
 
 
 
 
 
22
 
 
23
 
24
- if context_input.strip():
25
- question_input = st.text_area("enter question about NLP", "what model support multilingual nlp?")
26
  if st.button("Answer!"):
27
  if question_input.strip():
28
- # Generate an answer using the model
29
- result = dunno_answerer(question=question_input, context=context_input)
30
-
31
- # Display the answer and additional information
32
- st.write(f"**Answer:** {result['answer']}")
33
- st.write(f"**Confidence Score:** {round(result['score'], 4)}")
34
- st.write(f"**Answer Start Position:** {result['start']}")
35
- st.write(f"**Answer End Position:** {result['end']}")
36
  else:
37
  st.write("Please enter a valid question!")
38
-
39
- # x = st.slider('Select a value')
40
- # st.write(x, 'squared is', x * x)
41
- # print(x)
 
2
  import altair as alt
3
  from transformers import pipeline
4
  from transformers import AutoModelForSeq2SeqLM , AutoTokenizer, TranslationPipeline
5
+ from PIL import Image
6
 
7
 
8
+ st.title("Image-Based Question Answering 🕵️‍♂️")
9
 
10
+ st.subheader("Ask questions directly from images!")
11
+
12
+ st.write("""
13
+ Upload an image (e.g., receipts, documents), type your question, and get precise answers in real-time.
14
+ Powered by the advanced `naver-clova-ix/donut-base-finetuned-docvqa` model.
15
+ """)
16
+
17
+ # context_input = st.text_area("please provice some context", "Many NLP tasks are now benchmarked using datasets like GLUE and SuperGLUE. Multilingual NLP models like mBERT support multiple languages in a single framework.")
18
 
19
  # question_input = st.text_area("enter question about NLP", "what model support multilingual nlp?")
20
  @st.cache_resource
21
  def load_model():
22
  print("Loading model...")
23
+ return pipeline("document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa")
24
 
25
 
26
  dunno_answerer = load_model()
27
  # with open('NLP_History_and_Facts.txt', 'r') as file:
28
  # context = file.read()
29
+ uploaded_image = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
30
+
31
+ if uploaded_image is not None:
32
+ image = Image.open(uploaded_image)
33
+ st.image(image, caption="Uploaded Image", use_column_width=True)
34
 
35
+ question_input = st.text_area("Enter your question", "Any questions ?")
36
 
 
 
37
  if st.button("Answer!"):
38
  if question_input.strip():
39
+ result = dunno_answerer(image=image, question=question_input)
40
+
41
+ st.write(f"**Answer:** {result[0]['answer']}")
 
 
 
 
 
42
  else:
43
  st.write("Please enter a valid question!")
 
 
 
 
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  streamlit==1.41.1
2
  transformers
3
- torch
 
 
1
  streamlit==1.41.1
2
  transformers
3
+ torch
4
+ sentencepiece