| import os |
| import time |
| from langchain_core.pydantic_v1 import BaseModel, Field |
| from fastapi import FastAPI, HTTPException, Query, Request |
| from fastapi.responses import StreamingResponse,Response |
| from fastapi.middleware.cors import CORSMiddleware |
|
|
| from langchain.chains import LLMChain |
| from langchain.prompts import PromptTemplate |
| from TextGen.suno import custom_generate_audio, get_audio_information,generate_lyrics |
| from TextGen.diffusion import generate_image |
| |
| from langchain_google_genai import ( |
| ChatGoogleGenerativeAI, |
| HarmBlockThreshold, |
| HarmCategory, |
| ) |
| from TextGen import app |
| from gradio_client import Client, handle_file |
| from typing import List |
| from elevenlabs.client import ElevenLabs |
| from elevenlabs import stream |
|
|
|
|
| Eleven_client = ElevenLabs( |
| api_key=os.environ["ELEVEN_API_KEY"], |
| ) |
|
|
|
|
| Last_message=None |
| class PlayLastMusic(BaseModel): |
| '''plays the lastest created music ''' |
| Desicion: str = Field( |
| ..., description="Yes or No" |
| ) |
|
|
| class CreateLyrics(BaseModel): |
| f'''create some Lyrics for a new music''' |
| Desicion: str = Field( |
| ..., description="Yes or No" |
| ) |
|
|
| class CreateNewMusic(BaseModel): |
| f'''create a new music with the Lyrics previously computed''' |
| Name: str = Field( |
| ..., description="tags to describe the new music" |
| ) |
|
|
| class SongRequest(BaseModel): |
| prompt: str | None = None |
| tags: List[str] | None = None |
|
|
| class Message(BaseModel): |
| npc: str | None = None |
| messages: List[str] | None = None |
| class ImageGen(BaseModel): |
| prompt: str | None = None |
| class VoiceMessage(BaseModel): |
| npc: str | None = None |
| input: str | None = None |
| language: str | None = "en" |
| genre:str | None = "Male" |
| |
| song_base_api=os.environ["VERCEL_API"] |
|
|
| my_hf_token=os.environ["HF_TOKEN"] |
|
|
| |
|
|
| main_npcs={ |
| "Blacksmith":"./voices/Blacksmith.mp3", |
| "Herbalist":"./voices/female.mp3", |
| "Bard":"./voices/Bard_voice.mp3" |
| } |
| main_npc_system_prompts={ |
| "Blacksmith":"You are a blacksmith in a video game", |
| "Herbalist":"You are an herbalist in a video game", |
| "Witch":"You are a witch in a video game. You are disguised as a potion seller in a small city where adventurers come to challenge the portal. You are selling some magic spells in a UI that the player only sees. Don't event too much lore and just follow the standard role of a merchant.", |
| "Bard":"You are a bard in a video game" |
| } |
| class Generate(BaseModel): |
| text:str |
|
|
| def generate_text(messages: List[str], npc:str): |
| print(npc) |
| if npc in main_npcs: |
| system_prompt=main_npc_system_prompts[npc] |
| else: |
| system_prompt="you're a character in a video game. Play along." |
| print(system_prompt) |
| new_messages=[{"role": "user", "content": system_prompt}] |
| for index, message in enumerate(messages): |
| if index%2==0: |
| new_messages.append({"role": "user", "content": message}) |
| else: |
| new_messages.append({"role": "assistant", "content": message}) |
| print(new_messages) |
| |
| llm = ChatGoogleGenerativeAI( |
| model="gemini-1.5-pro-latest", |
| max_output_tokens=100, |
| temperature=1, |
| safety_settings={ |
| HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE, |
| HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE, |
| HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE, |
| HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE |
| }, |
| ) |
| if npc=="bard": |
| llm = llm.bind_tools([PlayLastMusic,CreateNewMusic,CreateLyrics]) |
|
|
| llm_response = llm.invoke(new_messages) |
| print(llm_response) |
| return Generate(text=llm_response.content) |
|
|
| app.add_middleware( |
| CORSMiddleware, |
| allow_origins=["*"], |
| allow_credentials=True, |
| allow_methods=["*"], |
| allow_headers=["*"], |
| ) |
|
|
| @app.get("/", tags=["Home"]) |
| def api_home(): |
| return {'detail': 'Everchanging Quest backend, nothing to see here'} |
|
|
| @app.post("/api/generate", summary="Generate text from prompt", tags=["Generate"], response_model=Generate) |
| def inference(message: Message): |
| return generate_text(messages=message.messages, npc=message.npc) |
|
|
| |
| def determine_vocie_from_npc(npc,genre): |
| if npc in main_npcs: |
| return main_npcs[npc] |
| else: |
| if genre =="Male": |
| "./voices/default_male.mp3" |
| if genre=="Female": |
| return"./voices/default_female.mp3" |
| else: |
| return "./voices/narator_out.wav" |
| |
|
|
| @app.post("/generate_wav") |
| async def generate_wav(message: VoiceMessage): |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| return 200 |
|
|
|
|
| @app.get("/generate_voice_eleven", response_class=StreamingResponse) |
| @app.post("/generate_voice_eleven", response_class=StreamingResponse) |
| def generate_voice_eleven(message: VoiceMessage = None): |
| global Last_message |
| if message is None: |
| message = Last_message |
| else: |
| Last_message = message |
|
|
| def audio_stream(): |
| |
| for chunk in Eleven_client.generate(text=message.input, stream=True): |
| yield chunk |
|
|
| return StreamingResponse(audio_stream(), media_type="audio/mpeg") |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| @app.get("/generate_song") |
| async def generate_song(): |
| text="""You are a bard in a video game singing the tales of a little girl in red hood.""" |
|
|
| song_lyrics=generate_lyrics({ |
| "prompt": f"{text}", |
| }) |
| data = custom_generate_audio({ |
| "prompt": song_lyrics['text'], |
| "tags": "male bard", |
| "title":"Everchangin_Quest_song", |
| "wait_audio":True, |
| |
| }) |
| infos=get_audio_information(f"{data[0]['id']},{data[1]['id']}") |
| return infos |
|
|
| @app.post('/generate_image') |
| def Imagen(prompt:ImageGen=None): |
| image_bytes=generate_image(ImageGen.prompt) |
| return Response(content=image_bytes, media_type="image/png") |