| | import gradio as gr |
| | import gemini_gradio |
| | import openai_gradio |
| | import anthropic_gradio |
| | import sambanova_gradio |
| | import xai_gradio |
| | import hyperbolic_gradio |
| | import perplexity_gradio |
| |
|
| |
|
| |
|
| | with gr.Blocks(fill_height=True) as demo: |
| | with gr.Tab("Gemini"): |
| | with gr.Row(): |
| | gemini_model = gr.Dropdown( |
| | choices=[ |
| | 'gemini-1.5-flash', |
| | 'gemini-1.5-flash-8b', |
| | 'gemini-1.5-pro', |
| | 'gemini-exp-1114' |
| | ], |
| | value='gemini-1.5-pro', |
| | label="Select Gemini Model", |
| | interactive=True |
| | ) |
| | |
| | gemini_interface = gr.load( |
| | name=gemini_model.value, |
| | src=gemini_gradio.registry, |
| | accept_token=True |
| | ) |
| | |
| | def update_gemini_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=gemini_gradio.registry, |
| | accept_token=True |
| | ) |
| | |
| | gemini_model.change( |
| | fn=update_gemini_model, |
| | inputs=[gemini_model], |
| | outputs=[gemini_interface] |
| | ) |
| | with gr.Tab("ChatGPT"): |
| | with gr.Row(): |
| | model_choice = gr.Dropdown( |
| | choices=[ |
| | 'gpt-4o', |
| | 'gpt-4o-2024-08-06', |
| | 'gpt-4o-2024-05-13', |
| | 'chatgpt-4o-latest', |
| | 'gpt-4o-mini', |
| | 'gpt-4o-mini-2024-07-18', |
| | 'o1-preview', |
| | 'o1-preview-2024-09-12', |
| | 'o1-mini', |
| | 'o1-mini-2024-09-12', |
| | 'gpt-4-turbo', |
| | 'gpt-4-turbo-2024-04-09', |
| | 'gpt-4-turbo-preview', |
| | 'gpt-4-0125-preview', |
| | 'gpt-4-1106-preview', |
| | 'gpt-4', |
| | 'gpt-4-0613' |
| | ], |
| | value='gpt-4o', |
| | label="Select Model", |
| | interactive=True |
| | ) |
| | |
| | chatgpt_interface = gr.load( |
| | name=model_choice.value, |
| | src=openai_gradio.registry, |
| | accept_token=True |
| | ) |
| | |
| | def update_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=openai_gradio.registry, |
| | accept_token=True |
| | ) |
| | |
| | model_choice.change( |
| | fn=update_model, |
| | inputs=[model_choice], |
| | outputs=[chatgpt_interface] |
| | ) |
| | with gr.Tab("Claude"): |
| | with gr.Row(): |
| | claude_model = gr.Dropdown( |
| | choices=[ |
| | 'claude-3-5-sonnet-20241022', |
| | 'claude-3-5-haiku-20241022', |
| | 'claude-3-opus-20240229', |
| | 'claude-3-sonnet-20240229', |
| | 'claude-3-haiku-20240307' |
| | ], |
| | value='claude-3-5-sonnet-20241022', |
| | label="Select Model", |
| | interactive=True |
| | ) |
| | |
| | claude_interface = gr.load( |
| | name=claude_model.value, |
| | src=anthropic_gradio.registry, |
| | accept_token=True |
| | ) |
| | |
| | def update_claude_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=anthropic_gradio.registry, |
| | accept_token=True |
| | ) |
| | |
| | claude_model.change( |
| | fn=update_claude_model, |
| | inputs=[claude_model], |
| | outputs=[claude_interface] |
| | ) |
| | with gr.Tab("Meta Llama"): |
| | with gr.Row(): |
| | llama_model = gr.Dropdown( |
| | choices=[ |
| | 'Meta-Llama-3.2-1B-Instruct', |
| | 'Meta-Llama-3.2-3B-Instruct', |
| | 'Llama-3.2-11B-Vision-Instruct', |
| | 'Llama-3.2-90B-Vision-Instruct', |
| | 'Meta-Llama-3.1-8B-Instruct', |
| | 'Meta-Llama-3.1-70B-Instruct', |
| | 'Meta-Llama-3.1-405B-Instruct' |
| | ], |
| | value='Llama-3.2-90B-Vision-Instruct', |
| | label="Select Llama Model", |
| | interactive=True |
| | ) |
| | |
| | llama_interface = gr.load( |
| | name=llama_model.value, |
| | src=sambanova_gradio.registry, |
| | accept_token=True, |
| | multimodal=True |
| | ) |
| | |
| | def update_llama_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=sambanova_gradio.registry, |
| | accept_token=True, |
| | multimodal=True |
| | ) |
| | |
| | llama_model.change( |
| | fn=update_llama_model, |
| | inputs=[llama_model], |
| | outputs=[llama_interface] |
| | ) |
| | |
| | gr.Markdown("**Note:** You need to use a SambaNova API key from [SambaNova Cloud](https://cloud.sambanova.ai/).") |
| | with gr.Tab("Grok"): |
| | gr.load( |
| | name='grok-beta', |
| | src=xai_gradio.registry, |
| | accept_token=True |
| | ) |
| | with gr.Tab("Qwen2.5 72B"): |
| | gr.load( |
| | name='Qwen/Qwen2.5-72B-Instruct', |
| | src=hyperbolic_gradio.registry, |
| | accept_token=True |
| | ) |
| | with gr.Tab("Perplexity"): |
| | with gr.Row(): |
| | perplexity_model = gr.Dropdown( |
| | choices=[ |
| | |
| | 'llama-3.1-sonar-small-128k-online', |
| | 'llama-3.1-sonar-large-128k-online', |
| | 'llama-3.1-sonar-huge-128k-online', |
| | |
| | 'llama-3.1-sonar-small-128k-chat', |
| | 'llama-3.1-sonar-large-128k-chat', |
| | |
| | 'llama-3.1-8b-instruct', |
| | 'llama-3.1-70b-instruct' |
| | ], |
| | value='llama-3.1-sonar-large-128k-online', |
| | label="Select Perplexity Model", |
| | interactive=True |
| | ) |
| | |
| | perplexity_interface = gr.load( |
| | name=perplexity_model.value, |
| | src=perplexity_gradio.registry, |
| | accept_token=True |
| | ) |
| | |
| | def update_perplexity_model(new_model): |
| | return gr.load( |
| | name=new_model, |
| | src=perplexity_gradio.registry, |
| | accept_token=True |
| | ) |
| | |
| | perplexity_model.change( |
| | fn=update_perplexity_model, |
| | inputs=[perplexity_model], |
| | outputs=[perplexity_interface] |
| | ) |
| | |
| | gr.Markdown(""" |
| | **Note:** Models are grouped into three categories: |
| | - **Sonar Online Models**: Include search capabilities (beta access required) |
| | - **Sonar Chat Models**: Standard chat models |
| | - **Open Source Models**: Based on Hugging Face implementations |
| | |
| | For access to Online LLMs features, please fill out the [beta access form](https://forms.perplexity.ai). |
| | """) |
| |
|
| |
|
| | demo.launch() |
| |
|
| |
|
| |
|