Spaces:
Sleeping
Sleeping
| # from fastapi import FastAPI, Response | |
| # from fastapi.responses import FileResponse | |
| # from kokoro import KPipeline | |
| # import soundfile as sf | |
| # import os | |
| # import numpy as np | |
| # import torch | |
| # from huggingface_hub import InferenceClient | |
| # def llm_chat_response(text): | |
| # HF_TOKEN = os.getenv("HF_TOKEN") | |
| # client = InferenceClient(api_key=HF_TOKEN) | |
| # messages = [ | |
| # { | |
| # "role": "user", | |
| # "content": [ | |
| # { | |
| # "type": "text", | |
| # "text": text + str('describe in one line only') | |
| # } #, | |
| # # { | |
| # # "type": "image_url", | |
| # # "image_url": { | |
| # # "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" | |
| # # } | |
| # # } | |
| # ] | |
| # } | |
| # ] | |
| # response_from_llama = client.chat.completions.create( | |
| # model="meta-llama/Llama-3.2-11B-Vision-Instruct", | |
| # messages=messages, | |
| # max_tokens=500) | |
| # return response_from_llama.choices[0].message['content'] | |
| # app = FastAPI() | |
| # # Initialize pipeline once at startup | |
| # pipeline = KPipeline(lang_code='a') | |
| # @app.post("/generate") | |
| # async def generate_audio(text: str, voice: str = "af_heart", speed: float = 1.0): | |
| # text_reply = llm_chat_response(text) | |
| # # Generate audio | |
| # generator = pipeline( | |
| # text_reply, | |
| # voice=voice, | |
| # speed=speed, | |
| # split_pattern=r'\n+' | |
| # ) | |
| # # # Save first segment only for demo | |
| # # for i, (gs, ps, audio) in enumerate(generator): | |
| # # sf.write(f"output_{i}.wav", audio, 24000) | |
| # # return FileResponse( | |
| # # f"output_{i}.wav", | |
| # # media_type="audio/wav", | |
| # # filename="output.wav" | |
| # # ) | |
| # # return Response("No audio generated", status_code=400) | |
| # # Process only the first segment for demo | |
| # for i, (gs, ps, audio) in enumerate(generator): | |
| # # Convert PyTorch tensor to NumPy array | |
| # audio_numpy = audio.cpu().numpy() | |
| # # Convert to 16-bit PCM | |
| # # Ensure the audio is in the range [-1, 1] | |
| # audio_numpy = np.clip(audio_numpy, -1, 1) | |
| # # Convert to 16-bit signed integers | |
| # pcm_data = (audio_numpy * 32767).astype(np.int16) | |
| # # Convert to bytes (automatically uses row-major order) | |
| # raw_audio = pcm_data.tobytes() | |
| # # Return PCM data with minimal necessary headers | |
| # return Response( | |
| # content=raw_audio, | |
| # media_type="application/octet-stream", | |
| # headers={ | |
| # "Content-Disposition": f'attachment; filename="output.pcm"', | |
| # "X-Sample-Rate": "24000", | |
| # "X-Bits-Per-Sample": "16", | |
| # "X-Endianness": "little" | |
| # } | |
| # ) | |
| # return Response("No audio generated", status_code=400) | |
| import os | |
| import uuid | |
| import base64 | |
| import logging | |
| from fastapi import FastAPI, HTTPException, Response, Request | |
| from fastapi.responses import JSONResponse | |
| from fastapi.staticfiles import StaticFiles | |
| from pydantic import BaseModel | |
| from typing import Optional, ClassVar, List | |
| from huggingface_hub import InferenceClient | |
| import numpy as np | |
| import torch | |
| from kokoro import KPipeline # Assuming you have this pipeline for audio generation | |
| # Set up logging | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| # Create FastAPI app | |
| app = FastAPI( | |
| title="Text-to-Speech API with Vision Support", | |
| description="This API uses meta-llama/Llama-3.2-11B-Vision-Instruct, which requires an image input.", | |
| version="1.0.0" | |
| ) | |
| # Mount a static directory for serving saved images | |
| STATIC_DIR = "static_images" | |
| if not os.path.exists(STATIC_DIR): | |
| os.makedirs(STATIC_DIR) | |
| app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static") | |
| # Pydantic model for request | |
| class TextImageRequest(BaseModel): | |
| text: Optional[str] = None | |
| image_base64: Optional[str] = None | |
| voice: str = "af_heart" # Default voice | |
| speed: float = 1.0 | |
| # Use ClassVar so that Pydantic doesn't treat this as a model field. | |
| AVAILABLE_VOICES: ClassVar[List[str]] = ["af_heart"] | |
| def validate_voice(self): | |
| if self.voice not in self.AVAILABLE_VOICES: | |
| return "af_heart" | |
| return self.voice | |
| # (Optional) Pydantic models for responses | |
| class AudioResponse(BaseModel): | |
| status: str | |
| message: str | |
| class ErrorResponse(BaseModel): | |
| error: str | |
| detail: Optional[str] = None | |
| # Function to call the LLM model following the reference code exactly | |
| def llm_chat_response(text: str, image_base64: str) -> str: | |
| HF_TOKEN = os.getenv("HF_TOKEN") | |
| logger.info("Checking HF_TOKEN...") | |
| if not HF_TOKEN: | |
| logger.error("HF_TOKEN not configured") | |
| raise HTTPException(status_code=500, detail="HF_TOKEN not configured") | |
| logger.info("Initializing InferenceClient...") | |
| client = InferenceClient( | |
| provider="hf-inference", | |
| api_key=HF_TOKEN | |
| ) | |
| # Save the base64-encoded image locally so it is accessible via a URL | |
| filename = f"{uuid.uuid4()}.jpg" | |
| image_path = os.path.join(STATIC_DIR, filename) | |
| try: | |
| image_data = base64.b64decode(image_base64) | |
| except Exception as e: | |
| logger.error(f"Error decoding image: {str(e)}") | |
| raise HTTPException(status_code=400, detail="Invalid base64 image data") | |
| with open(image_path, "wb") as f: | |
| f.write(image_data) | |
| # Construct the public URL for the saved image. | |
| # BASE_URL should be set to your public URL if not running locally. | |
| base_url = os.getenv("BASE_URL", "http://localhost:8000") | |
| image_url = f"{base_url}/static/{filename}" | |
| # Build the message exactly as in the reference code. | |
| # This model requires a list with two items: one for text and one for the image. | |
| prompt = text if text else "Describe this image in one sentence." | |
| messages = [ | |
| { | |
| "role": "user", | |
| "content": [ | |
| {"type": "text", "text": prompt}, | |
| {"type": "image_url", "image_url": {"url": image_url}} | |
| ] | |
| } | |
| ] | |
| logger.info(f"Message structure: {messages}") | |
| try: | |
| completion = client.chat.completions.create( | |
| model="meta-llama/Llama-3.2-11B-Vision-Instruct", | |
| messages=messages, | |
| max_tokens=500 | |
| ) | |
| response = completion.choices[0].message.content | |
| logger.info(f"Extracted response: {response}") | |
| return response | |
| except Exception as e: | |
| logger.error(f"Error during model inference: {str(e)}") | |
| raise HTTPException(status_code=500, detail=str(e)) | |
| # Initialize audio generation pipeline (your audio conversion pipeline) | |
| try: | |
| logger.info("Initializing KPipeline...") | |
| pipeline = KPipeline(lang_code='a') | |
| logger.info("KPipeline initialized successfully") | |
| except Exception as e: | |
| logger.error(f"Failed to initialize KPipeline: {str(e)}") | |
| # The API can still run, but audio generation will fail. | |
| async def generate_audio(request: TextImageRequest): | |
| """ | |
| Generate audio from a multimodal (text+image) input. | |
| This model does not support text-only inputs. | |
| """ | |
| logger.info("Received generation request") | |
| # Ensure an image is provided because the model is multimodal. | |
| if not request.image_base64: | |
| raise HTTPException(status_code=400, detail="This model requires an image input.") | |
| # Get the text prompt. If none is provided, use a default. | |
| user_text = request.text if request.text else "Describe this image in one sentence." | |
| # Get the LLM's response | |
| logger.info("Calling the LLM model") | |
| text_reply = llm_chat_response(user_text, request.image_base64) | |
| logger.info(f"LLM response: {text_reply}") | |
| # Validate voice parameter (if needed for audio generation) | |
| validated_voice = request.validate_voice() | |
| if validated_voice != request.voice: | |
| logger.warning(f"Voice '{request.voice}' not available; using '{validated_voice}' instead") | |
| # Convert the text reply to audio using your audio pipeline | |
| logger.info(f"Generating audio using voice={validated_voice}, speed={request.speed}") | |
| try: | |
| # Generate audio segments (assumes pipeline yields segments) | |
| generator = pipeline( | |
| text_reply, | |
| voice=validated_voice, | |
| speed=request.speed, | |
| split_pattern=r'\n+' | |
| ) | |
| for i, (gs, ps, audio) in enumerate(generator): | |
| logger.info(f"Audio generated, segment {i}") | |
| # Convert audio tensor to 16-bit PCM bytes | |
| audio_numpy = audio.cpu().numpy() | |
| audio_numpy = np.clip(audio_numpy, -1, 1) | |
| pcm_data = (audio_numpy * 32767).astype(np.int16) | |
| raw_audio = pcm_data.tobytes() | |
| return Response( | |
| content=raw_audio, | |
| media_type="application/octet-stream", | |
| headers={ | |
| "Content-Disposition": 'attachment; filename="output.pcm"', | |
| "X-Sample-Rate": "24000", | |
| "X-Bits-Per-Sample": "16", | |
| "X-Endianness": "little" | |
| } | |
| ) | |
| raise HTTPException(status_code=400, detail="No audio segments generated.") | |
| except Exception as e: | |
| logger.error(f"Error generating audio: {str(e)}") | |
| raise HTTPException(status_code=500, detail=str(e)) | |
| async def root(): | |
| return {"message": "Welcome! Use POST /generate with text and image_base64."} | |
| async def not_found_handler(request: Request, exc): | |
| return JSONResponse(status_code=404, content={"error": "Endpoint not found."}) | |
| async def method_not_allowed_handler(request: Request, exc): | |
| return JSONResponse(status_code=405, content={"error": "Method not allowed."}) | |