Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
import asyncio
|
| 3 |
from PIL import Image
|
| 4 |
from io import BytesIO
|
| 5 |
-
from huggingface_hub import AsyncInferenceClient
|
| 6 |
from dotenv import load_dotenv
|
| 7 |
import os
|
| 8 |
|
|
@@ -10,7 +11,8 @@ import os
|
|
| 10 |
load_dotenv()
|
| 11 |
API_TOKEN = os.getenv("HF_API_TOKEN")
|
| 12 |
|
| 13 |
-
# Конфигурация
|
|
|
|
| 14 |
MODELS = {
|
| 15 |
"Midjourney": "Jovie/Midjourney",
|
| 16 |
"FLUX.1 [dev]": "black-forest-labs/FLUX.1-dev",
|
|
@@ -20,26 +22,30 @@ MODELS = {
|
|
| 20 |
"Leonardo AI": "goofyai/Leonardo_Ai_Style_Illustration",
|
| 21 |
}
|
| 22 |
|
| 23 |
-
# Инициализация клиента
|
| 24 |
-
client = AsyncInferenceClient(token=API_TOKEN)
|
| 25 |
-
|
| 26 |
-
|
| 27 |
# Асинхронная функция для отправки запроса к API
|
| 28 |
async def query_model(prompt, model_name, model_url):
|
| 29 |
try:
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
except Exception as e:
|
| 38 |
-
print(f"Ошибка
|
| 39 |
return model_name, None
|
| 40 |
|
| 41 |
-
|
| 42 |
-
# Асинхронная обработка запросов
|
| 43 |
async def handle(prompt):
|
| 44 |
tasks = [
|
| 45 |
query_model(prompt, model_name, model_url)
|
|
@@ -48,7 +54,6 @@ async def handle(prompt):
|
|
| 48 |
results = await asyncio.gather(*tasks)
|
| 49 |
return {model_name: image for model_name, image in results if image}
|
| 50 |
|
| 51 |
-
|
| 52 |
# Интерфейс Gradio
|
| 53 |
with gr.Blocks() as demo:
|
| 54 |
gr.Markdown("## Генерация изображений с использованием моделей Hugging Face")
|
|
@@ -82,10 +87,9 @@ with gr.Blocks() as demo:
|
|
| 82 |
# Ссылки на соцсети
|
| 83 |
with gr.Row():
|
| 84 |
with gr.Column(scale=1):
|
| 85 |
-
gr.Image(value=
|
| 86 |
with gr.Column(scale=4):
|
| 87 |
-
|
| 88 |
-
"""<div style="text-align: center; font-family: 'Helvetica Neue', sans-serif; padding: 10px; color: #333333;">
|
| 89 |
<p style="font-size: 18px; font-weight: 600; margin-bottom: 8px;">
|
| 90 |
Эта демка была создана телеграм каналом <strong style="color: #007ACC;"><a href='https://t.me/mlphys'> mlphys</a></strong>. Другие мои социальные сети:
|
| 91 |
</p>
|
|
@@ -94,7 +98,6 @@ with gr.Blocks() as demo:
|
|
| 94 |
<a href="https://x.com/quensy23" target="_blank" style="color: #1DA1F2; text-decoration: none; font-weight: 500;">Twitter</a> |
|
| 95 |
<a href="https://github.com/freQuensy23-coder" target="_blank" style="color: #0088cc; text-decoration: none; font-weight: 500;">GitHub</a>
|
| 96 |
</p>
|
| 97 |
-
</div>"""
|
| 98 |
-
)
|
| 99 |
|
| 100 |
demo.launch()
|
|
|
|
| 1 |
+
import gradio
|
| 2 |
import gradio as gr
|
| 3 |
+
import aiohttp
|
| 4 |
import asyncio
|
| 5 |
from PIL import Image
|
| 6 |
from io import BytesIO
|
|
|
|
| 7 |
from dotenv import load_dotenv
|
| 8 |
import os
|
| 9 |
|
|
|
|
| 11 |
load_dotenv()
|
| 12 |
API_TOKEN = os.getenv("HF_API_TOKEN")
|
| 13 |
|
| 14 |
+
# Конфигурация API
|
| 15 |
+
HEADERS = {"Authorization": f"Bearer {API_TOKEN}"}
|
| 16 |
MODELS = {
|
| 17 |
"Midjourney": "Jovie/Midjourney",
|
| 18 |
"FLUX.1 [dev]": "black-forest-labs/FLUX.1-dev",
|
|
|
|
| 22 |
"Leonardo AI": "goofyai/Leonardo_Ai_Style_Illustration",
|
| 23 |
}
|
| 24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
# Асинхронная функция для отправки запроса к API
|
| 26 |
async def query_model(prompt, model_name, model_url):
|
| 27 |
try:
|
| 28 |
+
async with aiohttp.ClientSession() as session:
|
| 29 |
+
async with session.post(
|
| 30 |
+
f"https://api-inference.huggingface.co/models/{model_url}",
|
| 31 |
+
headers=HEADERS,
|
| 32 |
+
json={"inputs": prompt},
|
| 33 |
+
) as response:
|
| 34 |
+
if response.status == 200:
|
| 35 |
+
image_data = await response.read()
|
| 36 |
+
return model_name, Image.open(BytesIO(image_data))
|
| 37 |
+
else:
|
| 38 |
+
error_message = await response.json()
|
| 39 |
+
warnings = error_message.get("warnings", [])
|
| 40 |
+
print(f"Ошибка для модели {model_name}: {error_message.get('error', 'unknown error')}")
|
| 41 |
+
if warnings:
|
| 42 |
+
print(f"Предупреждения для модели {model_name}: {warnings}")
|
| 43 |
+
return model_name, None
|
| 44 |
except Exception as e:
|
| 45 |
+
print(f"Ошибка соединения с моделью {model_name}: {e}")
|
| 46 |
return model_name, None
|
| 47 |
|
| 48 |
+
# Асинхронная обработка всех запросов
|
|
|
|
| 49 |
async def handle(prompt):
|
| 50 |
tasks = [
|
| 51 |
query_model(prompt, model_name, model_url)
|
|
|
|
| 54 |
results = await asyncio.gather(*tasks)
|
| 55 |
return {model_name: image for model_name, image in results if image}
|
| 56 |
|
|
|
|
| 57 |
# Интерфейс Gradio
|
| 58 |
with gr.Blocks() as demo:
|
| 59 |
gr.Markdown("## Генерация изображений с использованием моделей Hugging Face")
|
|
|
|
| 87 |
# Ссылки на соцсети
|
| 88 |
with gr.Row():
|
| 89 |
with gr.Column(scale=1):
|
| 90 |
+
gr.Image(value='icon.jpg')
|
| 91 |
with gr.Column(scale=4):
|
| 92 |
+
gradio.HTML("""<div style="text-align: center; font-family: 'Helvetica Neue', sans-serif; padding: 10px; color: #333333;">
|
|
|
|
| 93 |
<p style="font-size: 18px; font-weight: 600; margin-bottom: 8px;">
|
| 94 |
Эта демка была создана телеграм каналом <strong style="color: #007ACC;"><a href='https://t.me/mlphys'> mlphys</a></strong>. Другие мои социальные сети:
|
| 95 |
</p>
|
|
|
|
| 98 |
<a href="https://x.com/quensy23" target="_blank" style="color: #1DA1F2; text-decoration: none; font-weight: 500;">Twitter</a> |
|
| 99 |
<a href="https://github.com/freQuensy23-coder" target="_blank" style="color: #0088cc; text-decoration: none; font-weight: 500;">GitHub</a>
|
| 100 |
</p>
|
| 101 |
+
</div>""")
|
|
|
|
| 102 |
|
| 103 |
demo.launch()
|