| | import gradio as gr |
| | import gemini_gradio |
| | import openai_gradio |
| | import anthropic_gradio |
| | import sambanova_gradio |
| | import xai_gradio |
| | import hyperbolic_gradio |
| | import perplexity_gradio |
| | import mistral_gradio |
| | import fireworks_gradio |
| | import cerebras_gradio |
| | import groq_gradio |
| | import together_gradio |
| | import nvidia_gradio |
| | import dashscope_gradio |
| |
|
| |
|
| |
|
| | with gr.Blocks(fill_height=True) as demo: |
| | with gr.Tab("Meta Llama"): |
| | with gr.Row(): |
| | llama_model = gr.Dropdown( |
| | choices=[ |
| | 'Meta-Llama-3.2-1B-Instruct', |
| | 'Meta-Llama-3.2-3B-Instruct', |
| | 'Llama-3.2-11B-Vision-Instruct', |
| | 'Llama-3.2-90B-Vision-Instruct', |
| | 'Meta-Llama-3.1-8B-Instruct', |
| | 'Meta-Llama-3.1-70B-Instruct', |
| | 'Meta-Llama-3.1-405B-Instruct' |
| | ], |
| | value='Llama-3.2-90B-Vision-Instruct', |
| | label="Select Llama Model", |
| | interactive=True |
| | ) |
| | |
| | llama_interface = gr.load( |
| | name=llama_model.value, |
| | src=sambanova_gradio.registry, |
| | multimodal=True, |
| | fill_height=True |
| | ) |
| | |
| | def update_llama_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=sambanova_gradio.registry, |
| | multimodal=True, |
| | fill_height=True |
| | ) |
| | |
| | llama_model.change( |
| | fn=update_llama_model, |
| | inputs=[llama_model], |
| | outputs=[llama_interface] |
| | ) |
| | |
| | gr.Markdown("**Note:** You need to use a SambaNova API key from [SambaNova Cloud](https://cloud.sambanova.ai/).") |
| | with gr.Tab("Gemini"): |
| | with gr.Row(): |
| | gemini_model = gr.Dropdown( |
| | choices=[ |
| | 'gemini-1.5-flash', |
| | 'gemini-1.5-flash-8b', |
| | 'gemini-1.5-pro', |
| | 'gemini-exp-1114' |
| | ], |
| | value='gemini-1.5-pro', |
| | label="Select Gemini Model", |
| | interactive=True |
| | ) |
| | |
| | gemini_interface = gr.load( |
| | name=gemini_model.value, |
| | src=gemini_gradio.registry, |
| | fill_height=True |
| | ) |
| | |
| | def update_gemini_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=gemini_gradio.registry, |
| | fill_height=True |
| | ) |
| | |
| | gemini_model.change( |
| | fn=update_gemini_model, |
| | inputs=[gemini_model], |
| | outputs=[gemini_interface] |
| | ) |
| | with gr.Tab("ChatGPT"): |
| | with gr.Row(): |
| | model_choice = gr.Dropdown( |
| | choices=[ |
| | 'gpt-4o', |
| | 'gpt-4o-2024-08-06', |
| | 'gpt-4o-2024-05-13', |
| | 'chatgpt-4o-latest', |
| | 'gpt-4o-mini', |
| | 'gpt-4o-mini-2024-07-18', |
| | 'o1-preview', |
| | 'o1-preview-2024-09-12', |
| | 'o1-mini', |
| | 'o1-mini-2024-09-12', |
| | 'gpt-4-turbo', |
| | 'gpt-4-turbo-2024-04-09', |
| | 'gpt-4-turbo-preview', |
| | 'gpt-4-0125-preview', |
| | 'gpt-4-1106-preview', |
| | 'gpt-4', |
| | 'gpt-4-0613' |
| | ], |
| | value='gpt-4o', |
| | label="Select Model", |
| | interactive=True |
| | ) |
| | |
| | chatgpt_interface = gr.load( |
| | name=model_choice.value, |
| | src=openai_gradio.registry, |
| | accept_token=True, |
| | fill_height=True |
| | ) |
| | |
| | def update_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=openai_gradio.registry, |
| | accept_token=True, |
| | fill_height=True |
| | ) |
| | |
| | model_choice.change( |
| | fn=update_model, |
| | inputs=[model_choice], |
| | outputs=[chatgpt_interface] |
| | ) |
| | with gr.Tab("Claude"): |
| | with gr.Row(): |
| | claude_model = gr.Dropdown( |
| | choices=[ |
| | 'claude-3-5-sonnet-20241022', |
| | 'claude-3-5-haiku-20241022', |
| | 'claude-3-opus-20240229', |
| | 'claude-3-sonnet-20240229', |
| | 'claude-3-haiku-20240307' |
| | ], |
| | value='claude-3-5-sonnet-20241022', |
| | label="Select Model", |
| | interactive=True |
| | ) |
| | |
| | claude_interface = gr.load( |
| | name=claude_model.value, |
| | src=anthropic_gradio.registry, |
| | accept_token=True, |
| | fill_height=True |
| | ) |
| | |
| | def update_claude_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=anthropic_gradio.registry, |
| | accept_token=True, |
| | fill_height=True |
| | ) |
| | |
| | claude_model.change( |
| | fn=update_claude_model, |
| | inputs=[claude_model], |
| | outputs=[claude_interface] |
| | ) |
| | with gr.Tab("Grok"): |
| | with gr.Row(): |
| | grok_model = gr.Dropdown( |
| | choices=[ |
| | 'grok-beta', |
| | 'grok-vision-beta' |
| | ], |
| | value='grok-vision-beta', |
| | label="Select Grok Model", |
| | interactive=True |
| | ) |
| | |
| | grok_interface = gr.load( |
| | name=grok_model.value, |
| | src=xai_gradio.registry, |
| | fill_height=True |
| | ) |
| | |
| | def update_grok_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=xai_gradio.registry, |
| | fill_height=True |
| | ) |
| | |
| | grok_model.change( |
| | fn=update_grok_model, |
| | inputs=[grok_model], |
| | outputs=[grok_interface] |
| | ) |
| | with gr.Tab("Groq"): |
| | with gr.Row(): |
| | groq_model = gr.Dropdown( |
| | choices=[ |
| | 'llama3-groq-8b-8192-tool-use-preview', |
| | 'llama3-groq-70b-8192-tool-use-preview', |
| | 'llama-3.2-1b-preview', |
| | 'llama-3.2-3b-preview', |
| | 'llama-3.2-11b-text-preview', |
| | 'llama-3.2-90b-text-preview', |
| | 'mixtral-8x7b-32768', |
| | 'gemma2-9b-it', |
| | 'gemma-7b-it' |
| | ], |
| | value='llama3-groq-70b-8192-tool-use-preview', |
| | label="Select Groq Model", |
| | interactive=True |
| | ) |
| | |
| | groq_interface = gr.load( |
| | name=groq_model.value, |
| | src=groq_gradio.registry, |
| | fill_height=True |
| | ) |
| | |
| | def update_groq_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=groq_gradio.registry, |
| | fill_height=True |
| | ) |
| | |
| | groq_model.change( |
| | fn=update_groq_model, |
| | inputs=[groq_model], |
| | outputs=[groq_interface] |
| | ) |
| | |
| | gr.Markdown(""" |
| | **Note:** You need a Groq API key to use these models. Get one at [Groq Cloud](https://console.groq.com/). |
| | """) |
| | with gr.Tab("Hyperbolic"): |
| | with gr.Row(): |
| | hyperbolic_model = gr.Dropdown( |
| | choices=[ |
| | |
| | |
| | |
| | |
| | |
| | 'Qwen/Qwen2.5-Coder-32B-Instruct', |
| | 'meta-llama/Llama-3.2-3B-Instruct', |
| | 'meta-llama/Meta-Llama-3.1-8B-Instruct', |
| | 'meta-llama/Meta-Llama-3.1-70B-Instruct', |
| | 'meta-llama/Meta-Llama-3-70B-Instruct', |
| | 'NousResearch/Hermes-3-Llama-3.1-70B', |
| | 'Qwen/Qwen2.5-72B-Instruct', |
| | 'deepseek-ai/DeepSeek-V2.5', |
| | 'meta-llama/Meta-Llama-3.1-405B-Instruct', |
| | ], |
| | value='Qwen/Qwen2.5-Coder-32B-Instruct', |
| | label="Select Hyperbolic Model", |
| | interactive=True |
| | ) |
| | |
| | hyperbolic_interface = gr.load( |
| | name=hyperbolic_model.value, |
| | src=hyperbolic_gradio.registry, |
| | fill_height=True |
| | ) |
| | |
| | def update_hyperbolic_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=hyperbolic_gradio.registry, |
| | fill_height=True |
| | ) |
| | |
| | hyperbolic_model.change( |
| | fn=update_hyperbolic_model, |
| | inputs=[hyperbolic_model], |
| | outputs=[hyperbolic_interface] |
| | ) |
| | |
| | gr.Markdown(""" |
| | <div> |
| | <img src="https://storage.googleapis.com/public-arena-asset/hyperbolic_logo.png" alt="Hyperbolic Logo" style="height: 50px; margin-right: 10px;"> |
| | </div> |
| | |
| | **Note:** This model is supported by Hyperbolic. Build your AI apps at [Hyperbolic](https://app.hyperbolic.xyz/). |
| | """) |
| | with gr.Tab("Qwen"): |
| | with gr.Row(): |
| | qwen_model = gr.Dropdown( |
| | choices=[ |
| | |
| | 'qwen-turbo-latest', |
| | 'qwen-turbo', |
| | 'qwen-plus', |
| | 'qwen-max', |
| | |
| | 'qwen1.5-110b-chat', |
| | 'qwen1.5-72b-chat', |
| | 'qwen1.5-32b-chat', |
| | 'qwen1.5-14b-chat', |
| | 'qwen1.5-7b-chat' |
| | ], |
| | value='qwen-turbo-latest', |
| | label="Select Qwen Model", |
| | interactive=True |
| | ) |
| | |
| | qwen_interface = gr.load( |
| | name=qwen_model.value, |
| | src=dashscope_gradio.registry, |
| | fill_height=True |
| | ) |
| | |
| | def update_qwen_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=dashscope_gradio.registry, |
| | fill_height=True |
| | ) |
| | |
| | qwen_model.change( |
| | fn=update_qwen_model, |
| | inputs=[qwen_model], |
| | outputs=[qwen_interface] |
| | ) |
| | |
| | gr.Markdown(""" |
| | **Note:** You need a DashScope API key to use these models. Get one at [DashScope](https://dashscope.aliyun.com/). |
| | |
| | Models available in two categories: |
| | - **Proprietary Models**: |
| | - Qwen Turbo: Fast responses for general tasks |
| | - Qwen Plus: Balanced performance and quality |
| | - Qwen Max: Highest quality responses |
| | - **Open Source Models**: |
| | - Available in various sizes from 7B to 110B parameters |
| | - Based on the Qwen 1.5 architecture |
| | """) |
| | with gr.Tab("Perplexity"): |
| | with gr.Row(): |
| | perplexity_model = gr.Dropdown( |
| | choices=[ |
| | |
| | 'llama-3.1-sonar-small-128k-online', |
| | 'llama-3.1-sonar-large-128k-online', |
| | 'llama-3.1-sonar-huge-128k-online', |
| | |
| | 'llama-3.1-sonar-small-128k-chat', |
| | 'llama-3.1-sonar-large-128k-chat', |
| | |
| | 'llama-3.1-8b-instruct', |
| | 'llama-3.1-70b-instruct' |
| | ], |
| | value='llama-3.1-sonar-large-128k-online', |
| | label="Select Perplexity Model", |
| | interactive=True |
| | ) |
| | |
| | perplexity_interface = gr.load( |
| | name=perplexity_model.value, |
| | src=perplexity_gradio.registry, |
| | accept_token=True, |
| | fill_height=True |
| | ) |
| | |
| | def update_perplexity_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=perplexity_gradio.registry, |
| | accept_token=True, |
| | fill_height=True |
| | ) |
| | |
| | perplexity_model.change( |
| | fn=update_perplexity_model, |
| | inputs=[perplexity_model], |
| | outputs=[perplexity_interface] |
| | ) |
| | |
| | gr.Markdown(""" |
| | **Note:** Models are grouped into three categories: |
| | - **Sonar Online Models**: Include search capabilities (beta access required) |
| | - **Sonar Chat Models**: Standard chat models |
| | - **Open Source Models**: Based on Hugging Face implementations |
| | |
| | For access to Online LLMs features, please fill out the [beta access form](https://perplexity.typeform.com/apiaccessform?typeform-source=docs.perplexity.ai). |
| | """) |
| | with gr.Tab("DeepSeek-V2.5"): |
| | gr.load( |
| | name='deepseek-ai/DeepSeek-V2.5', |
| | src=hyperbolic_gradio.registry, |
| | fill_height=True |
| | ) |
| | gr.Markdown(""" |
| | <div> |
| | <img src="https://storage.googleapis.com/public-arena-asset/hyperbolic_logo.png" alt="Hyperbolic Logo" style="height: 50px; margin-right: 10px;"> |
| | </div> |
| | |
| | **Note:** This model is supported by Hyperbolic. Build your AI apps at [Hyperbolic](https://app.hyperbolic.xyz/). |
| | """) |
| | with gr.Tab("Mistral"): |
| | with gr.Row(): |
| | mistral_model = gr.Dropdown( |
| | choices=[ |
| | |
| | 'mistral-large-latest', |
| | 'pixtral-large-latest', |
| | 'ministral-3b-latest', |
| | 'ministral-8b-latest', |
| | 'mistral-small-latest', |
| | 'codestral-latest', |
| | 'mistral-embed', |
| | 'mistral-moderation-latest', |
| | |
| | 'pixtral-12b-2409', |
| | 'open-mistral-nemo', |
| | 'open-codestral-mamba' |
| | ], |
| | value='pixtral-large-latest', |
| | label="Select Mistral Model", |
| | interactive=True |
| | ) |
| | |
| | mistral_interface = gr.load( |
| | name=mistral_model.value, |
| | src=mistral_gradio.registry, |
| | fill_height=True |
| | ) |
| | |
| | def update_mistral_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=mistral_gradio.registry, |
| | fill_height=True |
| | ) |
| | |
| | mistral_model.change( |
| | fn=update_mistral_model, |
| | inputs=[mistral_model], |
| | outputs=[mistral_interface], |
| | ) |
| | |
| | gr.Markdown(""" |
| | **Note:** You need a Mistral API key to use these models. Get one at [Mistral AI Platform](https://console.mistral.ai/). |
| | |
| | Models are grouped into two categories: |
| | - **Premier Models**: Require a paid API key |
| | - **Free Models**: Available with free API keys |
| | |
| | Each model has different context window sizes (from 8k to 256k tokens) and specialized capabilities. |
| | """) |
| | with gr.Tab("Fireworks"): |
| | with gr.Row(): |
| | fireworks_model = gr.Dropdown( |
| | choices=[ |
| | 'f1-preview', |
| | 'f1-mini-preview', |
| | ], |
| | value='f1-preview', |
| | label="Select Fireworks Model", |
| | interactive=True |
| | ) |
| | |
| | fireworks_interface = gr.load( |
| | name=fireworks_model.value, |
| | src=fireworks_gradio.registry, |
| | fill_height=True |
| | ) |
| | |
| | def update_fireworks_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=fireworks_gradio.registry, |
| | fill_height=True |
| | ) |
| | |
| | fireworks_model.change( |
| | fn=update_fireworks_model, |
| | inputs=[fireworks_model], |
| | outputs=[fireworks_interface] |
| | ) |
| | |
| | gr.Markdown(""" |
| | **Note:** You need a Fireworks AI API key to use these models. Get one at [Fireworks AI](https://app.fireworks.ai/). |
| | """) |
| | with gr.Tab("Cerebras"): |
| | with gr.Row(): |
| | cerebras_model = gr.Dropdown( |
| | choices=[ |
| | 'llama3.1-8b', |
| | 'llama3.1-70b', |
| | 'llama3.1-405b' |
| | ], |
| | value='llama3.1-70b', |
| | label="Select Cerebras Model", |
| | interactive=True |
| | ) |
| | |
| | cerebras_interface = gr.load( |
| | name=cerebras_model.value, |
| | src=cerebras_gradio.registry, |
| | accept_token=True, |
| | fill_height=True |
| | ) |
| | |
| | def update_cerebras_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=cerebras_gradio.registry, |
| | accept_token=True, |
| | fill_height=True |
| | ) |
| | |
| | cerebras_model.change( |
| | fn=update_cerebras_model, |
| | inputs=[cerebras_model], |
| | outputs=[cerebras_interface] |
| | ) |
| | with gr.Tab("Together"): |
| | with gr.Row(): |
| | together_model = gr.Dropdown( |
| | choices=[ |
| | |
| | 'meta-llama/Llama-Vision-Free', |
| | 'meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo', |
| | 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo', |
| | |
| | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo', |
| | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', |
| | 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo', |
| | 'meta-llama/Meta-Llama-3-8B-Instruct-Turbo', |
| | 'meta-llama/Meta-Llama-3-70B-Instruct-Turbo', |
| | 'meta-llama/Llama-3.2-3B-Instruct-Turbo', |
| | 'meta-llama/Meta-Llama-3-8B-Instruct-Lite', |
| | 'meta-llama/Meta-Llama-3-70B-Instruct-Lite', |
| | 'meta-llama/Llama-3-8b-chat-hf', |
| | 'meta-llama/Llama-3-70b-chat-hf', |
| | |
| | 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', |
| | 'Qwen/Qwen2.5-Coder-32B-Instruct', |
| | 'microsoft/WizardLM-2-8x22B', |
| | 'google/gemma-2-27b-it', |
| | 'google/gemma-2-9b-it', |
| | 'databricks/dbrx-instruct', |
| | |
| | 'mistralai/Mixtral-8x7B-Instruct-v0.1', |
| | 'mistralai/Mixtral-8x22B-Instruct-v0.1', |
| | |
| | 'Qwen/Qwen2.5-7B-Instruct-Turbo', |
| | 'Qwen/Qwen2.5-72B-Instruct-Turbo', |
| | 'Qwen/Qwen2-72B-Instruct', |
| | |
| | 'deepseek-ai/deepseek-llm-67b-chat', |
| | 'google/gemma-2b-it', |
| | 'Gryphe/MythoMax-L2-13b', |
| | 'meta-llama/Llama-2-13b-chat-hf', |
| | 'mistralai/Mistral-7B-Instruct-v0.1', |
| | 'mistralai/Mistral-7B-Instruct-v0.2', |
| | 'mistralai/Mistral-7B-Instruct-v0.3', |
| | 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', |
| | 'togethercomputer/StripedHyena-Nous-7B', |
| | 'upstage/SOLAR-10.7B-Instruct-v1.0' |
| | ], |
| | value='meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo', |
| | label="Select Together Model", |
| | interactive=True |
| | ) |
| | |
| | together_interface = gr.load( |
| | name=together_model.value, |
| | src=together_gradio.registry, |
| | multimodal=True, |
| | fill_height=True |
| | ) |
| | |
| | def update_together_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=together_gradio.registry, |
| | multimodal=True, |
| | fill_height=True |
| | ) |
| | |
| | together_model.change( |
| | fn=update_together_model, |
| | inputs=[together_model], |
| | outputs=[together_interface] |
| | ) |
| | |
| | gr.Markdown(""" |
| | **Note:** You need a Together AI API key to use these models. Get one at [Together AI](https://www.together.ai/). |
| | """) |
| | with gr.Tab("NVIDIA"): |
| | with gr.Row(): |
| | nvidia_model = gr.Dropdown( |
| | choices=[ |
| | |
| | 'nvidia/llama3-chatqa-1.5-70b', |
| | 'nvidia/llama3-chatqa-1.5-8b', |
| | 'nvidia-nemotron-4-340b-instruct', |
| | |
| | 'meta/llama-3.1-70b-instruct', |
| | 'meta/codellama-70b', |
| | 'meta/llama2-70b', |
| | 'meta/llama3-8b', |
| | 'meta/llama3-70b', |
| | |
| | 'mistralai/codestral-22b-instruct-v0.1', |
| | 'mistralai/mathstral-7b-v0.1', |
| | 'mistralai/mistral-large-2-instruct', |
| | 'mistralai/mistral-7b-instruct', |
| | 'mistralai/mistral-7b-instruct-v0.3', |
| | 'mistralai/mixtral-8x7b-instruct', |
| | 'mistralai/mixtral-8x22b-instruct', |
| | 'mistralai/mistral-large', |
| | |
| | 'google/gemma-2b', |
| | 'google/gemma-7b', |
| | 'google/gemma-2-2b-it', |
| | 'google/gemma-2-9b-it', |
| | 'google/gemma-2-27b-it', |
| | 'google/codegemma-1.1-7b', |
| | 'google/codegemma-7b', |
| | 'google/recurrentgemma-2b', |
| | 'google/shieldgemma-9b', |
| | |
| | 'microsoft/phi-3-medium-128k-instruct', |
| | 'microsoft/phi-3-medium-4k-instruct', |
| | 'microsoft/phi-3-mini-128k-instruct', |
| | 'microsoft/phi-3-mini-4k-instruct', |
| | 'microsoft/phi-3-small-128k-instruct', |
| | 'microsoft/phi-3-small-8k-instruct', |
| | |
| | 'qwen/qwen2-7b-instruct', |
| | 'databricks/dbrx-instruct', |
| | 'deepseek-ai/deepseek-coder-6.7b-instruct', |
| | 'upstage/solar-10.7b-instruct', |
| | 'snowflake/arctic' |
| | ], |
| | value='meta/llama-3.1-70b-instruct', |
| | label="Select NVIDIA Model", |
| | interactive=True |
| | ) |
| | |
| | nvidia_interface = gr.load( |
| | name=nvidia_model.value, |
| | src=nvidia_gradio.registry, |
| | accept_token=True, |
| | fill_height=True |
| | ) |
| | |
| | def update_nvidia_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=nvidia_gradio.registry, |
| | accept_token=True, |
| | fill_height=True |
| | ) |
| | |
| | nvidia_model.change( |
| | fn=update_nvidia_model, |
| | inputs=[nvidia_model], |
| | outputs=[nvidia_interface] |
| | ) |
| | |
| | gr.Markdown(""" |
| | **Note:** You need an NVIDIA AI Foundation API key to use these models. Get one at [NVIDIA AI Foundation](https://www.nvidia.com/en-us/ai-data-science/foundation-models/). |
| | |
| | Models are organized by provider: |
| | - **NVIDIA**: Native models including Llama3-ChatQA and Nemotron |
| | - **Meta**: Llama family models |
| | - **Mistral**: Various Mistral and Mixtral models |
| | - **Google**: Gemma family models |
| | - **Microsoft**: Phi-3 series |
| | - And other providers including Qwen, Databricks, DeepSeek, etc. |
| | """) |
| |
|
| | demo.launch(ssr_mode=False) |
| |
|
| |
|
| |
|