import os import gradio as gr from huggingface_hub import InferenceClient # Initialize the InferenceClient client = InferenceClient(api_key=os.getenv("HF_TOKEN")) # Define the function for generating the response def generate_response(question, prompt): messages = [ {"role": "user", "content": question}, {"role": "system", "content": prompt} ] # Get the completion from the model completion = client.chat.completions.create( model="meta-llama/Meta-Llama-3-8B-Instruct", messages=messages, max_tokens=1024 ) # Return the generated response return completion.choices[0].message['content'] # Design the Gradio interface def create_ui(): with gr.Blocks() as demo: gr.HTML("

Welcome to PromptMate!

") gr.HTML("

Ask a question and get a detailed response with the power of AI!

") # Add a horizontal rule for visual separation gr.HTML("
") gr.HTML("

Example Usage:

") gr.HTML("

Question: What is the capital of France?
Prompt: Provide a detailed answer using geographical and cultural context.

") # Add another horizontal rule for visual separation gr.HTML("
") with gr.Row(): question_input = gr.Textbox( label="Your Question", placeholder="Type your question here...", lines=2, max_lines=4, elem_id="question_input", value="What is the capital of France?" # Example for user ) with gr.Row(): prompt_input = gr.Textbox( label="Prompt", placeholder="Provide a prompt to guide the response...", lines=4, max_lines=6, elem_id="prompt_input", value="Provide a detailed answer using geographical and cultural context." # Example for user ) with gr.Row(): submit_button = gr.Button("Generate Response", elem_id="submit_button") output = gr.Textbox( label="Generated Response", placeholder="Your AI-generated response will appear here...", lines=6, max_lines=8, interactive=False, elem_id="output" ) # Define the button action submit_button.click( fn=generate_response, inputs=[question_input, prompt_input], outputs=output ) return demo # Run the app if __name__ == "__main__": app = create_ui() app.launch()