Spaces:
Runtime error
Runtime error
enstazao
commited on
Commit
·
60adfff
1
Parent(s):
1b0c585
used the wtq dataset fine tuned model
Browse files
app.py
CHANGED
|
@@ -3,9 +3,9 @@ import pandas as pd
|
|
| 3 |
from io import BytesIO
|
| 4 |
from transformers import AutoTokenizer, AutoModelForTableQuestionAnswering, TableQuestionAnsweringPipeline
|
| 5 |
|
| 6 |
-
# Load the tokenizer and model
|
| 7 |
-
tokenizer = AutoTokenizer.from_pretrained("google/tapas-large-finetuned-
|
| 8 |
-
model = AutoModelForTableQuestionAnswering.from_pretrained("google/tapas-large-finetuned-
|
| 9 |
|
| 10 |
# Initialize the TableQuestionAnsweringPipeline manually
|
| 11 |
pipe = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
|
|
@@ -27,7 +27,6 @@ def answer_question(uploaded_file, question):
|
|
| 27 |
answer = result['answer']
|
| 28 |
return answer
|
| 29 |
|
| 30 |
-
|
| 31 |
logo_url = "https://i.ibb.co/Brr7bPP/xflow.png"
|
| 32 |
# Define the Gradio app interface
|
| 33 |
iface = gr.Interface(
|
|
@@ -40,3 +39,4 @@ iface = gr.Interface(
|
|
| 40 |
|
| 41 |
# Run the app
|
| 42 |
iface.launch()
|
|
|
|
|
|
| 3 |
from io import BytesIO
|
| 4 |
from transformers import AutoTokenizer, AutoModelForTableQuestionAnswering, TableQuestionAnsweringPipeline
|
| 5 |
|
| 6 |
+
# Load the tokenizer and model with "google/tapas-large-finetuned-wtq"
|
| 7 |
+
tokenizer = AutoTokenizer.from_pretrained("google/tapas-large-finetuned-wtq")
|
| 8 |
+
model = AutoModelForTableQuestionAnswering.from_pretrained("google/tapas-large-finetuned-wtq")
|
| 9 |
|
| 10 |
# Initialize the TableQuestionAnsweringPipeline manually
|
| 11 |
pipe = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
|
|
|
|
| 27 |
answer = result['answer']
|
| 28 |
return answer
|
| 29 |
|
|
|
|
| 30 |
logo_url = "https://i.ibb.co/Brr7bPP/xflow.png"
|
| 31 |
# Define the Gradio app interface
|
| 32 |
iface = gr.Interface(
|
|
|
|
| 39 |
|
| 40 |
# Run the app
|
| 41 |
iface.launch()
|
| 42 |
+
|