Upload 12 files
Browse files- .gitattributes +35 -35
- .gitignore +2 -0
- .vscode/settings.json +3 -0
- Dockerfile +20 -0
- README.md +10 -10
- app.py +13 -0
- models/chat_completion.py +24 -0
- models/text_to_image.py +14 -0
- requirements.txt +5 -0
- routes/automaticSpeechRecognition.py +23 -0
- routes/chatCompletion.py +52 -0
- routes/textToImage.py +42 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,35 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
test.py
|
| 2 |
+
.venv
|
.vscode/settings.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"editor.wordWrap": "on"
|
| 3 |
+
}
|
Dockerfile
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11
|
| 2 |
+
|
| 3 |
+
RUN useradd -m -u 1000 user
|
| 4 |
+
USER user
|
| 5 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
| 6 |
+
|
| 7 |
+
# ARG HF_TOKEN
|
| 8 |
+
|
| 9 |
+
# RUN --mount=type=secret,id=HF_TOKEN,mode=0444,required=true \
|
| 10 |
+
# HF_TOKEN=$(cat /run/secrets/HF_TOKEN)
|
| 11 |
+
|
| 12 |
+
# ENV HF_TOKEN=HF_TOKEN
|
| 13 |
+
|
| 14 |
+
WORKDIR /app
|
| 15 |
+
|
| 16 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
| 17 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 18 |
+
|
| 19 |
+
COPY --chown=user . /app
|
| 20 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
|
@@ -1,10 +1,10 @@
|
|
| 1 |
-
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo: blue
|
| 6 |
-
sdk: docker
|
| 7 |
-
pinned: false
|
| 8 |
-
---
|
| 9 |
-
|
| 10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Llm
|
| 3 |
+
emoji: 🏆
|
| 4 |
+
colorFrom: pink
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from huggingface_hub import InferenceClient
|
| 2 |
+
from fastapi import FastAPI
|
| 3 |
+
from routes import chatCompletion, textToImage, automaticSpeechRecognition
|
| 4 |
+
|
| 5 |
+
app = FastAPI()
|
| 6 |
+
|
| 7 |
+
@app.get("/")
|
| 8 |
+
async def root():
|
| 9 |
+
return {"message": "Hello World"}
|
| 10 |
+
|
| 11 |
+
app.include_router(chatCompletion.router)
|
| 12 |
+
app.include_router(textToImage.router)
|
| 13 |
+
app.include_router(automaticSpeechRecognition.router)
|
models/chat_completion.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, Field
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
from huggingface_hub import ChatCompletionInputMessage, ChatCompletionInputGrammarType, ChatCompletionInputStreamOptions, ChatCompletionInputToolChoiceClass, ChatCompletionInputTool
|
| 4 |
+
|
| 5 |
+
class ChatRequest(BaseModel):
|
| 6 |
+
model: str = Field(..., description="The model to use for chat-completion. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for chat-based text-generation will be used. See https://huggingface.co/tasks/text-generation for more details.")
|
| 7 |
+
messages: List[ChatCompletionInputMessage] = Field(..., description="Conversation history consisting of roles and content pairs.")
|
| 8 |
+
frequency_penalty: Optional[float] = Field(0.0, ge=-2.0, le=2.0, description="Penalizes new tokens based on their existing frequency in the text so far. Range: [-2.0, 2.0]. Defaults to 0.0.")
|
| 9 |
+
logit_bias: Optional[dict] = Field(None, description="Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens to an associated bias value from -100 to 100.")
|
| 10 |
+
logprobs: Optional[bool] = Field(None, description="Whether to return log probabilities of the output tokens or not.")
|
| 11 |
+
max_tokens: Optional[int] = Field(8192, description="Maximum number of tokens allowed in the response. Defaults to 100.")
|
| 12 |
+
n: Optional[int] = Field(None, description="UNUSED.")
|
| 13 |
+
presence_penalty: Optional[float] = Field(None, ge=-2.0, le=2.0, description="Positive values penalize new tokens based on whether they appear in the text so far.")
|
| 14 |
+
response_format: Optional[ChatCompletionInputGrammarType] = Field(None, description="Grammar constraints. Can be either a JSONSchema or a regex.")
|
| 15 |
+
seed: Optional[int] = Field(None, description="Seed for reproducible control flow.")
|
| 16 |
+
stop: Optional[str] = Field(None, description="Up to four strings which trigger the end of the response.")
|
| 17 |
+
stream: Optional[bool] = Field(False, description="Enable realtime streaming of responses. Defaults to False.")
|
| 18 |
+
stream_options: Optional[ChatCompletionInputStreamOptions] = Field(None, description="Options for streaming completions.")
|
| 19 |
+
temperature: Optional[float] = Field(1.0, ge=0.0, le=2.0, description="Controls randomness of the generations. Lower values ensure less random completions.")
|
| 20 |
+
top_logprobs: Optional[int] = Field(None, ge=0, le=5, description="Specifying the number of most likely tokens to return at each token position.")
|
| 21 |
+
top_p: Optional[float] = Field(0.95, gt=0.0, lt=1.0, description="Fraction of the most likely next words to sample from.")
|
| 22 |
+
tool_choice: Optional[ChatCompletionInputToolChoiceClass] = Field("auto", description="The tool to use for the completion. Defaults to 'auto'.")
|
| 23 |
+
tool_prompt: Optional[str] = Field(None, description="A prompt to be appended before the tools.")
|
| 24 |
+
tools: Optional[List] = Field(None, description="A list of tools the model may call.")
|
models/text_to_image.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, Field
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
|
| 4 |
+
class TextToImageRequest(BaseModel):
|
| 5 |
+
prompt: str = Field(..., description="The prompt to generate an image from.")
|
| 6 |
+
negative_prompt: Optional[str] = Field(None, description="One or several prompts to guide what NOT to include in image generation.")
|
| 7 |
+
height: Optional[int] = Field(None, description="The height in pixels of the image to generate.", ge=64, le=2048)
|
| 8 |
+
width: Optional[int] = Field(None, description="The width in pixels of the image to generate.", ge=64, le=2048)
|
| 9 |
+
num_inference_steps: Optional[int] = Field(None, description="The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference.", ge=1, le=500)
|
| 10 |
+
guidance_scale: Optional[float] = Field(None, description="A higher guidance scale value encourages the model to generate images closely linked to the text prompt, but values too high may cause saturation and other artifacts.", ge=1, le=20)
|
| 11 |
+
model: Optional[str] = Field(None, description="The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed Inference Endpoint. If not provided, the default recommended text-to-image model will be used.")
|
| 12 |
+
scheduler: Optional[str] = Field(None, description="Override the scheduler with a compatible one.")
|
| 13 |
+
#target_size: Optional[TextToImageTargetSize] = Field(None, description="The size in pixel of the output image")
|
| 14 |
+
seed: Optional[int] = Field(None, description="Seed for the random number generator.")
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn[standard]
|
| 3 |
+
huggingface_hub==0.27.1
|
| 4 |
+
Pillow
|
| 5 |
+
python-multipart
|
routes/automaticSpeechRecognition.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, File, UploadFile, Form
|
| 2 |
+
from huggingface_hub import InferenceClient
|
| 3 |
+
|
| 4 |
+
router = APIRouter()
|
| 5 |
+
|
| 6 |
+
@router.post("/v1/audio/transcriptions", tags=["Automatic Speech Recognition"])
|
| 7 |
+
# Add model and audio parameters to the function signature
|
| 8 |
+
async def automatic_speech_recognition(
|
| 9 |
+
model: str = Form(..., description="The model to use for ASR. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for ASR will be used."),
|
| 10 |
+
audio: UploadFile = File(..., description="The content to transcribe. It can be raw audio bytes, local audio file, or a URL to an audio file.")
|
| 11 |
+
):
|
| 12 |
+
# Use the 'model' parameter from the form data
|
| 13 |
+
client = InferenceClient(model=model)
|
| 14 |
+
|
| 15 |
+
# Read the uploaded file content
|
| 16 |
+
audio_bytes = await audio.read()
|
| 17 |
+
|
| 18 |
+
# Pass the audio bytes to the client method
|
| 19 |
+
res = client.automatic_speech_recognition(
|
| 20 |
+
audio=audio_bytes
|
| 21 |
+
)
|
| 22 |
+
# Return the result
|
| 23 |
+
return res
|
routes/chatCompletion.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter
|
| 2 |
+
from fastapi.responses import StreamingResponse
|
| 3 |
+
from models.chat_completion import ChatRequest
|
| 4 |
+
from huggingface_hub import InferenceClient
|
| 5 |
+
import json
|
| 6 |
+
|
| 7 |
+
router = APIRouter()
|
| 8 |
+
|
| 9 |
+
def generate_stream(response):
|
| 10 |
+
try:
|
| 11 |
+
for chunk in response:
|
| 12 |
+
try:
|
| 13 |
+
# Attempt to process and yield the chunk
|
| 14 |
+
yield f"data: {json.dumps(chunk.__dict__, separators=(',', ':'))}\n\n"
|
| 15 |
+
except Exception as e:
|
| 16 |
+
# Optional: Log the error for debugging
|
| 17 |
+
print(f"Error during stream processing: {e}")
|
| 18 |
+
# Stop sending chunks if an error occurs
|
| 19 |
+
break
|
| 20 |
+
finally:
|
| 21 |
+
# Ensure the [DONE] message is always sent, even if an error occurred
|
| 22 |
+
yield "data: [DONE]\n\n"
|
| 23 |
+
|
| 24 |
+
@router.post("/v1/chat/completions", tags=["Chat Completion"])
|
| 25 |
+
async def chat_completion(body: ChatRequest):
|
| 26 |
+
client = InferenceClient(model=body.model)
|
| 27 |
+
|
| 28 |
+
res = client.chat_completion(
|
| 29 |
+
messages=body.messages,
|
| 30 |
+
frequency_penalty=body.frequency_penalty,
|
| 31 |
+
logit_bias=body.logit_bias,
|
| 32 |
+
logprobs=body.logprobs,
|
| 33 |
+
max_tokens=body.max_tokens,
|
| 34 |
+
n=body.n,
|
| 35 |
+
presence_penalty=body.presence_penalty,
|
| 36 |
+
response_format=body.response_format,
|
| 37 |
+
seed=body.seed,
|
| 38 |
+
stop=body.stop,
|
| 39 |
+
stream=body.stream,
|
| 40 |
+
stream_options=body.stream_options,
|
| 41 |
+
temperature=body.temperature,
|
| 42 |
+
top_logprobs=body.top_logprobs,
|
| 43 |
+
top_p=body.top_p,
|
| 44 |
+
tool_choice=body.tool_choice,
|
| 45 |
+
tool_prompt=body.tool_prompt,
|
| 46 |
+
tools=body.tools
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
if not body.stream:
|
| 50 |
+
return json.dumps(res.__dict__, indent=2)
|
| 51 |
+
else:
|
| 52 |
+
return StreamingResponse(generate_stream(res), media_type="text/event-stream")
|
routes/textToImage.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, Response
|
| 2 |
+
from models.text_to_image import TextToImageRequest
|
| 3 |
+
from huggingface_hub import InferenceClient
|
| 4 |
+
import io
|
| 5 |
+
|
| 6 |
+
router = APIRouter()
|
| 7 |
+
|
| 8 |
+
@router.post("/v1/images/generations", tags=["Text to Image"])
|
| 9 |
+
async def text_to_image(t2i_body: TextToImageRequest):
|
| 10 |
+
client = InferenceClient(model=t2i_body.model)
|
| 11 |
+
res = client.text_to_image(
|
| 12 |
+
t2i_body.prompt,
|
| 13 |
+
negative_prompt=t2i_body.negative_prompt,
|
| 14 |
+
height=t2i_body.height,
|
| 15 |
+
width=t2i_body.width,
|
| 16 |
+
num_inference_steps=t2i_body.num_inference_steps,
|
| 17 |
+
guidance_scale=t2i_body.guidance_scale,
|
| 18 |
+
scheduler=t2i_body.scheduler,
|
| 19 |
+
# target_size=t2i_body.target_size,
|
| 20 |
+
seed=t2i_body.seed
|
| 21 |
+
)
|
| 22 |
+
img_byte_arr = io.BytesIO()
|
| 23 |
+
res.save(img_byte_arr, format="PNG")
|
| 24 |
+
img_byte_arr.seek(0)
|
| 25 |
+
try:
|
| 26 |
+
res = client.text_to_image(
|
| 27 |
+
prompt=t2i_body.prompt,
|
| 28 |
+
negative_prompt=t2i_body.negative_prompt,
|
| 29 |
+
height=t2i_body.height,
|
| 30 |
+
width=t2i_body.width,
|
| 31 |
+
num_inference_steps=t2i_body.num_inference_steps,
|
| 32 |
+
guidance_scale=t2i_body.guidance_scale,
|
| 33 |
+
scheduler=t2i_body.scheduler,
|
| 34 |
+
seed=t2i_body.seed
|
| 35 |
+
)
|
| 36 |
+
img_byte_arr = io.BytesIO()
|
| 37 |
+
res.save(img_byte_arr, format="PNG")
|
| 38 |
+
img_byte_arr.seek(0)
|
| 39 |
+
return Response(content=img_byte_arr.getvalue(), media_type="image/png")
|
| 40 |
+
except Exception as e:
|
| 41 |
+
print(f"Error generating image: {e}")
|
| 42 |
+
return {"error": str(e)}, 500
|