vverma commited on
Commit ·
7ad0578
1
Parent(s): 44c800d
fixed requirements.txt
Browse files- app.py +3 -4
- 1+7.png → sample.png +0 -0
app.py
CHANGED
|
@@ -7,16 +7,15 @@ app = FastAPI()
|
|
| 7 |
@app.get("/")
|
| 8 |
def greet_json():
|
| 9 |
# Load model and processor from Hugging Face
|
|
|
|
| 10 |
processor = TrOCRProcessor.from_pretrained('tjoab/latex_finetuned')
|
| 11 |
model = VisionEncoderDecoderModel.from_pretrained('tjoab/latex_finetuned')
|
| 12 |
|
| 13 |
-
paths = "./"
|
| 14 |
-
|
| 15 |
# Load all images as a batch
|
| 16 |
-
|
| 17 |
|
| 18 |
# Preprocess the images
|
| 19 |
-
preproc_image = processor.image_processor(images=
|
| 20 |
|
| 21 |
# Generate and decode the tokens
|
| 22 |
# NOTE: max_length default value is very small, which often results in truncated inference if not set
|
|
|
|
| 7 |
@app.get("/")
|
| 8 |
def greet_json():
|
| 9 |
# Load model and processor from Hugging Face
|
| 10 |
+
print("Loading model and processor...")
|
| 11 |
processor = TrOCRProcessor.from_pretrained('tjoab/latex_finetuned')
|
| 12 |
model = VisionEncoderDecoderModel.from_pretrained('tjoab/latex_finetuned')
|
| 13 |
|
|
|
|
|
|
|
| 14 |
# Load all images as a batch
|
| 15 |
+
sample_image = open_PIL_image("sample.png")
|
| 16 |
|
| 17 |
# Preprocess the images
|
| 18 |
+
preproc_image = processor.image_processor(images=[sample_image], return_tensors="pt").pixel_values
|
| 19 |
|
| 20 |
# Generate and decode the tokens
|
| 21 |
# NOTE: max_length default value is very small, which often results in truncated inference if not set
|
1+7.png → sample.png
RENAMED
|
File without changes
|