File size: 3,329 Bytes
3bbe5dd
 
 
37c23f8
798bdb7
3bbe5dd
37c23f8
 
 
 
 
 
 
 
 
3bbe5dd
37c23f8
3bbe5dd
b5d66d1
3bbe5dd
37c23f8
 
 
607080f
 
 
 
 
 
3bbe5dd
37c23f8
f0cd706
37c23f8
 
 
 
 
 
 
 
b5d66d1
eb6beb6
b5d66d1
798bdb7
37c23f8
 
 
ddb9697
3bbe5dd
798bdb7
 
 
 
 
 
 
37c23f8
 
3bbe5dd
 
 
 
b5d66d1
3bbe5dd
 
37c23f8
 
 
 
607080f
37c23f8
798bdb7
 
 
37c23f8
b5d66d1
 
 
 
3bbe5dd
37c23f8
 
3bbe5dd
 
 
 
 
b5d66d1
3bbe5dd
 
 
 
 
 
b5d66d1
3bbe5dd
 
798bdb7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import gradio as gr
import pdfplumber
from groq import Groq
import os
import json

def file_to_string(file):
    if file.name.endswith(".pdf"):
        text = ''
        with pdfplumber.open(file.name) as pdf:
            for page in pdf.pages:
                text += page.extract_text()
        return text
    elif file.name.endswith(".txt"):
        return file.read().decode('utf-8')
    else:
        return "Unsupported file format."

def generate_summary(file, prompt, model, output_format, temperature, top_p):
    if not prompt:
        return "Please provide a prompt."

    file_text = file_to_string(file) if file else ""
    
    # Check if the {PDF_TEXT} token is in the prompt
    if "{PDF_TEXT}" in prompt and file_text:
        full_prompt = prompt.replace("{PDF_TEXT}", file_text)
    else:
        full_prompt = f"{prompt}\n{file_text}" if file_text else prompt

    try:
        client = Groq(api_key=os.environ["GROQ_API_KEY"])
        completion = client.chat.completions.create(
            model=model,
            messages=[
                {
                    "role": "user",
                    "content": full_prompt
                }
            ],
            temperature=temperature,
            max_tokens=8000,
            top_p=top_p,
            response_format={"type": "json_object"} if output_format == "JSON" else None,
            stop=None,
        )

        output_text = completion.choices[0].message.content

        if output_format == "JSON":
            try:
                parsed_json = json.loads(output_text)
                output_text = json.dumps(parsed_json, indent=2)
            except json.JSONDecodeError:
                output_text = "Error: The model did not return valid JSON."

    except Exception as e:
        output_text = f"An error occurred: {str(e)}"

    return output_text

def clear_output():
    return None, "", "", "Plain Text", 0, 0.9

with gr.Blocks() as iface:
    gr.Markdown("LLAMA 70B Groq API")
    
    with gr.Row():
        file_input = gr.File(label="Upload File (Optional)")
        prompt_input = gr.Textbox(label="Prompt", placeholder="Enter your prompt here... Use {PDF_TEXT} to insert PDF content at a specific location.", lines=3)
    
    with gr.Row():
        model_dropdown = gr.Dropdown(label="Choose Model", choices=["llama-3.1-8b-instant", "llama-3.1-70b-versatile"], value="llama-3.1-70b-versatile")
        output_format = gr.Radio(["Plain Text", "JSON"], label="Output Format", value="Plain Text")
    
    with gr.Row():
        temperature_slider = gr.Slider(minimum=0, maximum=2, value=0, step=0.1, label="Temperature")
        top_p_slider = gr.Slider(minimum=0, maximum=1, value=0.9, step=0.05, label="Top-p")

    with gr.Row():
        clear_button = gr.Button("Clear", size="small")
        submit_button = gr.Button("Generate Output")

    output = gr.Textbox(label="Output", lines=10, placeholder="Output will appear here...")

    submit_button.click(
        fn=generate_summary,
        inputs=[file_input, prompt_input, model_dropdown, output_format, temperature_slider, top_p_slider],
        outputs=[output]
    )

    clear_button.click(
        fn=clear_output,
        inputs=[],
        outputs=[file_input, prompt_input, output, output_format, temperature_slider, top_p_slider]
    )

iface.launch()