mkoot007 commited on
Commit
8da0b8c
·
1 Parent(s): c2aec16

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -46
app.py DELETED
@@ -1,46 +0,0 @@
1
- import streamlit as st
2
- from PIL import Image
3
- import easyocr
4
- from transformers import AutoTokenizer, AutoModelForCausalLM
5
-
6
- # Define a function to extract text from an image using easyocr
7
- def extract_text(image):
8
- reader = easyocr.Reader(['en'])
9
- extracted_text = reader.readtext(image)
10
- return ' '.join([result[1] for result in extracted_text])
11
-
12
- # Define a function to generate an explanation using the "HuggingFaceH4/zephyr-7b-alpha" model
13
- def explain_text(text, tokenizer, model):
14
- input_ids = tokenizer.encode(text, return_tensors="pt", max_length=256, truncation=True)
15
- explanation_ids = model.generate(input_ids, max_length=100, num_return_sequences=1)
16
- explanation = tokenizer.decode(explanation_ids[0], skip_special_tokens=True)
17
- return explanation
18
-
19
- st.title("Text Extraction and Explanation")
20
-
21
- # Allow users to upload an image
22
- uploaded_file = st.file_uploader("Upload an image:")
23
-
24
- # Load the text generation model and tokenizer
25
- tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-alpha")
26
- model = AutoModelForCausalLM.from_pretrained("HuggingFaceH4/zephyr-7b-alpha")
27
-
28
- if uploaded_file is not None:
29
- # Read the uploaded image
30
- image = Image.open(uploaded_file)
31
-
32
- # Extract text from the image using easyocr
33
- extracted_text = extract_text(image)
34
-
35
- # Explain the extracted text using the "HuggingFaceH4/zephyr-7b-alpha" model
36
- explanation = explain_text(extracted_text, tokenizer, model)
37
-
38
- # Display the extracted text and explanation
39
- st.markdown("**Extracted text:**")
40
- st.markdown(extracted_text)
41
-
42
- st.markdown("**Explanation:**")
43
- st.markdown(explanation)
44
-
45
- else:
46
- st.markdown("Please upload an image to extract text and get an explanation.")