faizee07 commited on
Commit
7cb19ca
Β·
verified Β·
1 Parent(s): acc2691

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -23
app.py CHANGED
@@ -5,7 +5,7 @@ import requests
5
  import random
6
  import re
7
 
8
- # --- CORE FUNCTIONS (No changes needed here, the logic is solid) ---
9
 
10
  def get_client():
11
  """Get Hugging Face client, handle token error."""
@@ -16,12 +16,12 @@ def get_client():
16
  except Exception:
17
  pass
18
  if not token:
19
- return None, "❌ **HuggingFace Token Required**\n\n**Setup:**\n1. Go to Space Settings β†’ Repository Secrets\n2. Add secret: Name=`HF_TOKEN`, Value=(your token)\n3. Get token: https://huggingface.co/settings/tokens\n4. Restart Space"
20
  return InferenceClient(token=token), None
21
 
22
  def generate_meme_text(idea: str):
23
  """
24
- Generate meme text by trying a list of models until one succeeds.
25
  """
26
  client, error = get_client()
27
  if error:
@@ -30,23 +30,35 @@ def generate_meme_text(idea: str):
30
  MODELS_TO_TRY = [
31
  "mistralai/Mistral-7B-Instruct-v0.2",
32
  "HuggingFaceH4/zephyr-7b-beta",
33
- "google/flan-t5-large",
34
- "tiiuae/falcon-7b-instruct",
35
  ]
36
 
37
  failed_models = []
38
  for model_id in MODELS_TO_TRY:
39
  try:
 
40
  prompt = f"""Task: Create a funny, two-line meme caption for the idea: "{idea}".
41
 
42
  Format your response exactly like this, with no extra text:
43
  Top: [The first line of the meme]
44
  Bottom: [The second line, the punchline]"""
45
 
46
- response = client.text_generation(
47
- prompt, model=model_id, max_new_tokens=60, temperature=0.8, do_sample=True
 
 
 
 
 
 
 
 
48
  )
49
 
 
 
 
 
50
  top_text, bottom_text = "",""
51
  top_match = re.search(r"Top:\s*(.*)", response, re.IGNORECASE)
52
  bottom_match = re.search(r"Bottom:\s*(.*)", response, re.IGNORECASE)
@@ -60,8 +72,9 @@ Bottom: [The second line, the punchline]"""
60
  if len(lines) >= 2: bottom_text = lines[1].replace("Bottom:", "").strip()
61
 
62
  if not top_text.strip() or not bottom_text.strip():
63
- continue
64
 
 
65
  return top_text.strip(), bottom_text.strip(), None, model_id
66
 
67
  except Exception as e:
@@ -70,10 +83,11 @@ Bottom: [The second line, the punchline]"""
70
  failed_models.append(model_id.split('/')[-1])
71
  continue
72
  else:
73
- return None, None, f"❌ **AI Error:** {str(e)[:200]}", model_id
74
 
75
  return None, None, f"❌ **All AI Models Are Offline**\n\nI'm sorry, all reliable free models are currently unavailable or busy. This is a Hugging Face issue. Please try again in a few minutes.\n\n**Models Tried:** {', '.join(failed_models)}", None
76
 
 
77
  def create_meme(idea: str, template: str):
78
  """Main function to generate the complete meme."""
79
  if not idea or len(idea.strip()) < 3:
@@ -101,9 +115,10 @@ def create_meme(idea: str, template: str):
101
  img_response = requests.get(meme_url, timeout=20)
102
  img_response.raise_for_status()
103
 
104
- temp_path = f"/tmp/meme_{random.randint(1000, 9999)}.jpg"
105
- with open(temp_path, 'wb') as f:
106
- f.write(img_response.content)
 
107
 
108
  status_message = (f"βœ… **Success!**\n\n"
109
  f"πŸ“ **Top Text:** {top}\n"
@@ -118,7 +133,7 @@ def create_meme(idea: str, template: str):
118
  except Exception as e:
119
  return None, f"❌ **An unexpected error occurred:** {str(e)}"
120
 
121
- # --- CONFIGURATION & UI ---
122
 
123
  MEME_TEMPLATES = {
124
  "Drake": "181913649",
@@ -132,7 +147,6 @@ MEME_TEMPLATES = {
132
  "Surprised Pikachu": "155067746",
133
  }
134
 
135
- # βœ… ADDED more general and relatable examples
136
  examples = [
137
  ["When you fix a bug you don't understand"],
138
  ["My plans for the weekend vs. what I actually do"],
@@ -142,7 +156,6 @@ examples = [
142
  ["Trying to assemble IKEA furniture with the instructions"],
143
  ]
144
 
145
- # --- Gradio UI ---
146
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"), title="AI Meme Generator") as demo:
147
 
148
  gr.HTML("""
@@ -152,7 +165,6 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"), ti
152
  <h3>Auto-Healing AI that ALWAYS Finds a Working Model πŸ€—</h3>
153
  </div>
154
  """)
155
- #
156
 
157
  with gr.Row():
158
  with gr.Column(scale=2):
@@ -165,16 +177,10 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"), ti
165
 
166
  output_status = gr.Textbox(label="πŸ“Š Status & Details", lines=4, show_copy_button=True)
167
 
168
- # βœ… CLEANED UP UI: Moved instructions into a collapsible accordion
169
  with gr.Accordion("How does this work?", open=False):
170
  gr.Markdown("""
171
  This app is **resilient**. It has a list of reliable free models from Hugging Face and automatically tries them in order until it finds one that is online and working.
172
-
173
- 1. **Enter Your Idea** & **Pick a Template.**
174
- 2. **Click Generate.**
175
- 3. The app does the hard work of finding an active AI model for you!
176
-
177
- **No more model selection. No more 404 errors.** If your `HF_TOKEN` is set up correctly in your Space Secrets, this app should always work.
178
  """)
179
 
180
  gr.Examples(examples=examples, inputs=[idea_input], label="πŸ’‘ Meme Ideas to Try")
 
5
  import random
6
  import re
7
 
8
+ # --- CORE FUNCTIONS ---
9
 
10
  def get_client():
11
  """Get Hugging Face client, handle token error."""
 
16
  except Exception:
17
  pass
18
  if not token:
19
+ return None, "❌ **HuggingFace Token Required**\n\n**Setup:**\n1. Go to Space Settings β†’ Repository Secrets\n2. Add secret: Name=`HF_TOKEN`, Value=(your HF token)\n3. Get token: https://huggingface.co/settings/tokens\n4. Restart Space"
20
  return InferenceClient(token=token), None
21
 
22
  def generate_meme_text(idea: str):
23
  """
24
+ Generate meme text by trying a list of models using the correct chat API.
25
  """
26
  client, error = get_client()
27
  if error:
 
30
  MODELS_TO_TRY = [
31
  "mistralai/Mistral-7B-Instruct-v0.2",
32
  "HuggingFaceH4/zephyr-7b-beta",
33
+ "google/flan-t5-large", # This one is older but often works as a fallback
 
34
  ]
35
 
36
  failed_models = []
37
  for model_id in MODELS_TO_TRY:
38
  try:
39
+ # --- FIX: Use the chat_completion API for modern instruct models ---
40
  prompt = f"""Task: Create a funny, two-line meme caption for the idea: "{idea}".
41
 
42
  Format your response exactly like this, with no extra text:
43
  Top: [The first line of the meme]
44
  Bottom: [The second line, the punchline]"""
45
 
46
+ # Format the prompt as a list of messages for the chat API
47
+ messages = [{"role": "user", "content": prompt}]
48
+
49
+ # Use the correct client method: chat_completion
50
+ response_stream = client.chat_completion(
51
+ messages,
52
+ model=model_id,
53
+ max_tokens=60,
54
+ temperature=0.8,
55
+ stream=False, # We want the full response, not a stream
56
  )
57
 
58
+ # --- FIX: Parse the new response structure ---
59
+ response = response_stream.choices[0].message.content
60
+
61
+ # --- Robust Parsing Logic (no changes needed here) ---
62
  top_text, bottom_text = "",""
63
  top_match = re.search(r"Top:\s*(.*)", response, re.IGNORECASE)
64
  bottom_match = re.search(r"Bottom:\s*(.*)", response, re.IGNORECASE)
 
72
  if len(lines) >= 2: bottom_text = lines[1].replace("Bottom:", "").strip()
73
 
74
  if not top_text.strip() or not bottom_text.strip():
75
+ continue
76
 
77
+ # SUCCESS!
78
  return top_text.strip(), bottom_text.strip(), None, model_id
79
 
80
  except Exception as e:
 
83
  failed_models.append(model_id.split('/')[-1])
84
  continue
85
  else:
86
+ return None, None, f"❌ **AI Error:** {str(e)[:250]}", model_id
87
 
88
  return None, None, f"❌ **All AI Models Are Offline**\n\nI'm sorry, all reliable free models are currently unavailable or busy. This is a Hugging Face issue. Please try again in a few minutes.\n\n**Models Tried:** {', '.join(failed_models)}", None
89
 
90
+
91
  def create_meme(idea: str, template: str):
92
  """Main function to generate the complete meme."""
93
  if not idea or len(idea.strip()) < 3:
 
115
  img_response = requests.get(meme_url, timeout=20)
116
  img_response.raise_for_status()
117
 
118
+ # Use a temporary file path compatible with Gradio
119
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmpfile:
120
+ tmpfile.write(img_response.content)
121
+ temp_path = tmpfile.name
122
 
123
  status_message = (f"βœ… **Success!**\n\n"
124
  f"πŸ“ **Top Text:** {top}\n"
 
133
  except Exception as e:
134
  return None, f"❌ **An unexpected error occurred:** {str(e)}"
135
 
136
+ # --- CONFIGURATION & UI (No changes needed) ---
137
 
138
  MEME_TEMPLATES = {
139
  "Drake": "181913649",
 
147
  "Surprised Pikachu": "155067746",
148
  }
149
 
 
150
  examples = [
151
  ["When you fix a bug you don't understand"],
152
  ["My plans for the weekend vs. what I actually do"],
 
156
  ["Trying to assemble IKEA furniture with the instructions"],
157
  ]
158
 
 
159
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky"), title="AI Meme Generator") as demo:
160
 
161
  gr.HTML("""
 
165
  <h3>Auto-Healing AI that ALWAYS Finds a Working Model πŸ€—</h3>
166
  </div>
167
  """)
 
168
 
169
  with gr.Row():
170
  with gr.Column(scale=2):
 
177
 
178
  output_status = gr.Textbox(label="πŸ“Š Status & Details", lines=4, show_copy_button=True)
179
 
 
180
  with gr.Accordion("How does this work?", open=False):
181
  gr.Markdown("""
182
  This app is **resilient**. It has a list of reliable free models from Hugging Face and automatically tries them in order until it finds one that is online and working.
183
+ If your `HF_TOKEN` is set up correctly in your Space Secrets, this app should always work.
 
 
 
 
 
184
  """)
185
 
186
  gr.Examples(examples=examples, inputs=[idea_input], label="πŸ’‘ Meme Ideas to Try")