File size: 1,073 Bytes
8328420
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import gradio as gr
from langchain_huggingface import HuggingFaceEndpoint
from secrets import HUGGING_FACE_TOKEN  # Importing the token from a separate file

# Define your HuggingFace endpoint details
repo_id_1 = "mistralai/Mistral-7B-Instruct-v0.2"
repo_id_2 = "mistralai/Mistral-7B-Instruct-v0.3"

# Initialize the HuggingFace endpoints
llm_1 = HuggingFaceEndpoint(repo_id=repo_id_1, max_length=128, temperature=0.7, token=HUGGING_FACE_TOKEN)
llm_2 = HuggingFaceEndpoint(repo_id=repo_id_2, max_length=128, temperature=0.7, token=HUGGING_FACE_TOKEN)

# Define a function to get responses from both models
def get_combined_response(prompt):
    response_1 = llm_1.invoke(prompt)
    response_2 = llm_2.invoke(prompt)
    combined_response = f"Model 1 Response: {response_1}\n\nModel 2 Response: {response_2}"
    return combined_response

# Create a Gradio interface for the combined function
iface_combined = gr.Interface(fn=get_combined_response, inputs="text", outputs="text", title="Combined Machine Learning Chatbots")

# Launch the Gradio app
iface_combined.launch()