File size: 3,293 Bytes
5520791
 
 
 
857ad2f
 
5520791
 
 
 
 
 
 
 
 
 
857ad2f
 
 
5520791
 
 
 
 
 
 
 
 
 
 
857ad2f
 
5520791
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0e51269
5520791
 
 
 
857ad2f
 
 
 
 
5520791
857ad2f
 
5520791
857ad2f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import gradio as gr
import os
import json
import requests
# from dotenv import load_dotenv
# load_dotenv()

API_URL = "https://api.openai.com/v1/chat/completions"

OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
hf_token = os.getenv("hf_token")

def get_system_txt():
    headers = {
        "Authorization": f"Bearer {hf_token}"
    }
    url = "https://huggingface.co/spaces/clayp/reaction-sys/resolve/main/system.txt"
    response = requests.get(url, headers=headers)

    if response.status_code == 200:
        return response.text.strip()
    else:
        raise Exception("Failed to fetch system.txt from private Gradio space")

def predict(inputs, chat_counter, chatbot=[], history=[]):
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {OPENAI_API_KEY}"
    }
    
    # Get system message using get_system_txt function
    system_msg = get_system_txt()
    
    top_p = 1.0
    temperature = 0.0

    messages = [{"role": "system", "content": system_msg},]
    for data in chatbot:
        user = {"role": "user", "content": data[0]}
        assistant = {"role": "assistant", "content": data[1]}
        messages.append(user)
        messages.append(assistant)
    temp = {"role": "user", "content": inputs}
    messages.append(temp)

    payload = {
        "model": "gpt-4",
        "messages": messages,
        "temperature": temperature,
        "top_p": top_p,
        "n": 1,
        "stream": True,
        "presence_penalty": 0,
        "frequency_penalty": 0,
    }

    chat_counter += 1
    history.append(inputs)
    response = requests.post(API_URL, headers=headers, json=payload, stream=True)
    token_counter = 0
    partial_words = ""

    counter = 0
    for chunk in response.iter_lines():
        if counter == 0:
            counter += 1
            continue
        if chunk.decode():
            chunk = chunk.decode()
            if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
                partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
                if token_counter == 0:
                    history.append(" " + partial_words)
                else:
                    history[-1] = partial_words
                chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)]
                token_counter += 1
                yield chat, history, chat_counter, response

def reset_textbox():
    return gr.update(value='')

title = """<h1 align="center">Response simulator: No account needed, just enter the text in the input box below and hit submit or enter.</h1>"""

with gr.Blocks() as demo:
    gr.HTML(title)
    with gr.Column(elem_id="col_container"):
        chatbot = gr.Chatbot(label='GPT-4', elem_id="chatbot")
        inputs = gr.Textbox(placeholder="", label="Type an input and press Enter")
        state = gr.State([])
        server_status_code = gr.Textbox(label="Status code from OpenAI server",)
        chat_counter = gr.Number(value=0, visible=False, precision=0)

    inputs.submit(predict, [inputs, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code], queue=True)
    inputs.submit(reset_textbox, [], [inputs])

demo.queue(max_size=5, concurrency_count=5).launch()