wahab5763 commited on
Commit
ddd6099
·
verified ·
1 Parent(s): b2c8f91

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -7
app.py CHANGED
@@ -10,13 +10,19 @@ def load_ocr_model():
10
  # Load text-generation model
11
  @st.cache_resource
12
  def load_text_model():
13
- return pipeline("text-generation", model="EleutherAI/gpt-neo-1.3B")
14
 
15
  # Function to process text with a language model
16
  def process_with_llm(prompt):
17
- llm_model = load_text_model()
18
- response = llm_model(prompt, max_length=500, do_sample=True, temperature=0.7)
19
- return response[0]["generated_text"]
 
 
 
 
 
 
20
 
21
  # Streamlit App
22
  def main():
@@ -40,7 +46,7 @@ def main():
40
  # Extract text from image
41
  ocr_model = load_ocr_model()
42
  result = ocr_model(image)
43
-
44
  if len(result) > 0:
45
  extracted_text = result[0]["generated_text"]
46
  st.write("### Extracted Text:")
@@ -49,8 +55,7 @@ def main():
49
  # Process extracted text with LLM
50
  st.write("### Explanation/Completion:")
51
  explanation = process_with_llm(extracted_text)
52
- if explanation:
53
- st.write(explanation)
54
  else:
55
  st.error("No text could be extracted. Please try another image.")
56
  except Exception as e:
 
10
  # Load text-generation model
11
  @st.cache_resource
12
  def load_text_model():
13
+ return pipeline("text-generation", model="EleutherAI/gpt-neo-2.7B")
14
 
15
  # Function to process text with a language model
16
  def process_with_llm(prompt):
17
+ try:
18
+ llm_model = load_text_model()
19
+ response = llm_model(prompt, max_length=500, do_sample=True, temperature=0.7)
20
+ if response and len(response) > 0:
21
+ return response[0]["generated_text"]
22
+ else:
23
+ return "No explanation or completion could be generated. Please try again with a different input."
24
+ except Exception as e:
25
+ return f"Error generating explanation: {e}"
26
 
27
  # Streamlit App
28
  def main():
 
46
  # Extract text from image
47
  ocr_model = load_ocr_model()
48
  result = ocr_model(image)
49
+
50
  if len(result) > 0:
51
  extracted_text = result[0]["generated_text"]
52
  st.write("### Extracted Text:")
 
55
  # Process extracted text with LLM
56
  st.write("### Explanation/Completion:")
57
  explanation = process_with_llm(extracted_text)
58
+ st.write(explanation)
 
59
  else:
60
  st.error("No text could be extracted. Please try another image.")
61
  except Exception as e: