Spaces:
Running
Running
Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
|
@@ -97,22 +97,33 @@ def generate_image(prompt):
|
|
| 97 |
if not prompt.strip(): return None, 'Please enter a description.'
|
| 98 |
if not HF_TOKEN: return None, 'Error: Add HF_TOKEN to Space Settings Secrets.'
|
| 99 |
try:
|
| 100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
headers = {'Authorization': 'Bearer ' + HF_TOKEN, 'Content-Type': 'application/json'}
|
| 102 |
-
payload = {'inputs':
|
| 103 |
models = [
|
| 104 |
'https://router.huggingface.co/hf-inference/models/black-forest-labs/FLUX.1-schnell',
|
| 105 |
'https://router.huggingface.co/hf-inference/models/stabilityai/stable-diffusion-xl-base-1.0',
|
| 106 |
-
'https://router.huggingface.co/hf-inference/models/runwayml/stable-diffusion-v1-5',
|
| 107 |
]
|
| 108 |
for model_url in models:
|
| 109 |
try:
|
| 110 |
r = requests.post(model_url, headers=headers, json=payload, timeout=60)
|
| 111 |
if r.status_code == 200:
|
| 112 |
img = Image.open(io.BytesIO(r.content))
|
| 113 |
-
return img, '
|
| 114 |
except: continue
|
| 115 |
-
return None, '
|
| 116 |
except Exception as e:
|
| 117 |
return None, 'Error: ' + str(e)
|
| 118 |
|
|
|
|
| 97 |
if not prompt.strip(): return None, 'Please enter a description.'
|
| 98 |
if not HF_TOKEN: return None, 'Error: Add HF_TOKEN to Space Settings Secrets.'
|
| 99 |
try:
|
| 100 |
+
# Step 1 - Use Groq to enhance prompt for accuracy
|
| 101 |
+
enhanced = prompt
|
| 102 |
+
if GROQ_KEY:
|
| 103 |
+
try:
|
| 104 |
+
client = Groq(api_key=GROQ_KEY)
|
| 105 |
+
msgs = [
|
| 106 |
+
{'role':'system','content':'You are an expert at writing image generation prompts for biomedical and engineering topics. Convert the user request into a highly detailed, specific image generation prompt. Be very specific about colors, materials, labels, dimensions, and visual style. Always say: photorealistic, highly detailed, scientific illustration, white background, labeled diagram, professional medical illustration.'},
|
| 107 |
+
{'role':'user','content':'Create an image generation prompt for: ' + prompt + '. Context: SJSU CardioLab uses 27mm SJM Regent bileaflet mechanical heart valves, Sylgard 184 transparent silicone, green laser PIV system, Arduino Uno with stepper motor for TGT, Whatman paper microfluidic uPAD devices for CKD creatinine detection using Jaffe reaction orange-red color.'}
|
| 108 |
+
]
|
| 109 |
+
resp = client.chat.completions.create(model='llama-3.3-70b-versatile', messages=msgs, max_tokens=200)
|
| 110 |
+
enhanced = resp.choices[0].message.content
|
| 111 |
+
except: pass
|
| 112 |
+
# Step 2 - Send enhanced prompt to FLUX
|
| 113 |
headers = {'Authorization': 'Bearer ' + HF_TOKEN, 'Content-Type': 'application/json'}
|
| 114 |
+
payload = {'inputs': enhanced, 'parameters': {'num_inference_steps': 8, 'guidance_scale': 7.5}}
|
| 115 |
models = [
|
| 116 |
'https://router.huggingface.co/hf-inference/models/black-forest-labs/FLUX.1-schnell',
|
| 117 |
'https://router.huggingface.co/hf-inference/models/stabilityai/stable-diffusion-xl-base-1.0',
|
|
|
|
| 118 |
]
|
| 119 |
for model_url in models:
|
| 120 |
try:
|
| 121 |
r = requests.post(model_url, headers=headers, json=payload, timeout=60)
|
| 122 |
if r.status_code == 200:
|
| 123 |
img = Image.open(io.BytesIO(r.content))
|
| 124 |
+
return img, 'Generated! Enhanced prompt: ' + enhanced[:150] + '...'
|
| 125 |
except: continue
|
| 126 |
+
return None, 'Models busy. Please try again in 30 seconds.'
|
| 127 |
except Exception as e:
|
| 128 |
return None, 'Error: ' + str(e)
|
| 129 |
|