[QUERYREPHRASER] model = meta-llama/llama-4-maverick-17b-128e-instruct temperature = 1 maxTokens = 512 [METADATAGENERATOR] model = meta-llama/llama-4-maverick-17b-128e-instruct temperature = 1 [CODEGENERATOR] model = meta-llama/llama-4-maverick-17b-128e-instruct temperature = 1 [FAILSAFECODEGENERATOR] model = meta-llama/llama-4-maverick-17b-128e-instruct temperature = 1 [SPEECHTOTEXT] model = whisper-large-v3-turbo [APPLICATION] host = 0.0.0.0 port = 7860 workers = 3