DrDavis commited on
Commit
8bb76f3
·
1 Parent(s): e80d1a5
.idea/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/question-answer.iml" filepath="$PROJECT_DIR$/.idea/question-answer.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/question-answer.iml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$">
5
+ <excludeFolder url="file://$MODULE_DIR$/.venv" />
6
+ </content>
7
+ <orderEntry type="inheritedJdk" />
8
+ <orderEntry type="sourceFolder" forTests="false" />
9
+ </component>
10
+ </module>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="" vcs="Git" />
5
+ </component>
6
+ </project>
app.py CHANGED
@@ -1,28 +1,20 @@
1
  import gradio as gr
2
- import tensorflow as tf
3
- import gradio as gr
4
- from transformers import pipeline, AutoTokenizer, TFAutoModelForQuestionAnswering
5
-
6
- #Option 1: Load the tokenizer and model separately
7
- #tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
8
- #model = TFAutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad", return_dict=False)
9
-
10
- #Option 2: Use the HuggingFace pipeline function
11
- nlp = pipeline("question-answering", model=model, tokenizer=tokenizer)
12
 
13
- def func(context, question):
14
- result = nlp(question=question, context=context)
15
- return result['answer']
 
16
 
17
- app = gr.Interface(fn=func,
18
- inputs = ['textbox', 'text'],
19
- outputs = gr.Textbox(lines=10),
20
- title = 'Question Answering Bot',
21
- description = 'Input context and question, then get answers!',
22
- examples = [[example_1, qst_1],
23
- [example_2, qst_2]],
24
- theme = "darkhuggingface",
25
- timeout = 120,
26
- allow_flagging="manual",
27
- flagging_options=["incorrect", "ambiguous", "offensive", "other"],
28
- ).queue()
 
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
2
 
3
+ def answer_doc_question(pdf_file, question):
4
+ pdf_text = get_text_from_pdf(pdf_file)
5
+ answer = question_answerer(question, pdf_text)
6
+ return answer["answer"]
7
 
8
+ # Add default a file and question, so it's easy to try out the app.
9
+ pdf_input = gr.File(
10
+ value="https://comptroller.defense.gov/Portals/45/Documents/defbudget/FY2025/FY2025_Budget_Request.pdf"
11
+ file_types=[".pdf"],
12
+ label="Upload a PDF document and ask a question about it.",
13
+ )
14
+ question = gr.Textbox(
15
+ value="What is the grand total (AC + RC) amount for FY 2025 Request?",
16
+ label="Type a question regarding the uploaded document here.",
17
+ )
18
+ gr.Interface(
19
+ fn=answer_doc_question, inputs=[pdf_input, question], outputs="text"
20
+ ).launch()