wahab5763 commited on
Commit
f08df26
·
verified ·
1 Parent(s): 32d02c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -29
app.py CHANGED
@@ -1,43 +1,30 @@
1
  import streamlit as st
2
  from transformers import pipeline
3
  from PIL import Image
4
- import requests
5
-
6
- # Hardcoded OpenAI API Key
7
- OPENAI_API_KEY = "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"
8
 
9
  # Load OCR model for extracting text from images
10
  @st.cache_resource
11
  def load_ocr_model():
12
  return pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
13
 
14
- # Function to interact with ChatGPT API
15
- def chat_with_gpt(prompt):
16
- url = "https://api.openai.com/v1/chat/completions"
17
- headers = {
18
- "Authorization": f"Bearer {OPENAI_API_KEY}",
19
- "Content-Type": "application/json",
20
- }
21
- data = {
22
- "model": "gpt-3.5-turbo", # Adjust the model if needed
23
- "messages": [{"role": "user", "content": prompt}],
24
- "max_tokens": 1000,
25
- "temperature": 0.7,
26
- }
27
- response = requests.post(url, headers=headers, json=data)
28
- if response.status_code == 200:
29
- return response.json()["choices"][0]["message"]["content"].strip()
30
- else:
31
- st.error(f"Error from OpenAI: {response.status_code} - {response.text}")
32
- return None
33
 
34
  # Streamlit App
35
  def main():
36
- st.title("Image-to-Text with ChatGPT")
37
  st.markdown(
38
  """
39
- **Upload an image**, extract text using a state-of-the-art OCR model,
40
- and get explanations or solutions using ChatGPT.
41
  """
42
  )
43
 
@@ -59,9 +46,9 @@ def main():
59
  st.write("### Extracted Text:")
60
  st.write(f"`{extracted_text}`") # Display the extracted text in a readable format
61
 
62
- # Send text to ChatGPT
63
- st.write("### ChatGPT Explanation:")
64
- explanation = chat_with_gpt(extracted_text)
65
  if explanation:
66
  st.write(explanation)
67
  else:
 
1
  import streamlit as st
2
  from transformers import pipeline
3
  from PIL import Image
 
 
 
 
4
 
5
  # Load OCR model for extracting text from images
6
  @st.cache_resource
7
  def load_ocr_model():
8
  return pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
9
 
10
+ # Load text-generation model
11
+ @st.cache_resource
12
+ def load_text_model():
13
+ return pipeline("text-generation", model="EleutherAI/gpt-neo-1.3B")
14
+
15
+ # Function to process text with a language model
16
+ def process_with_llm(prompt):
17
+ llm_model = load_text_model()
18
+ response = llm_model(prompt, max_length=500, do_sample=True, temperature=0.7)
19
+ return response[0]["generated_text"]
 
 
 
 
 
 
 
 
 
20
 
21
  # Streamlit App
22
  def main():
23
+ st.title("Image-to-Text with Open-Source Language Models")
24
  st.markdown(
25
  """
26
+ **Upload an image**, extract text using an open-source OCR model,
27
+ and get explanations or text completions using a GPT-style open-source model.
28
  """
29
  )
30
 
 
46
  st.write("### Extracted Text:")
47
  st.write(f"`{extracted_text}`") # Display the extracted text in a readable format
48
 
49
+ # Process extracted text with LLM
50
+ st.write("### Explanation/Completion:")
51
+ explanation = process_with_llm(extracted_text)
52
  if explanation:
53
  st.write(explanation)
54
  else: