GeminiAi commited on
Commit
700487c
·
1 Parent(s): 6e3d6f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -42
app.py CHANGED
@@ -1,48 +1,40 @@
1
  import gradio as gr
2
  import os
3
  import requests
 
 
 
4
 
5
- TITLE = "Andy Ai"
6
- EXAMPLE_INPUT = "Your default example input"
 
 
7
 
 
8
  constants = """
9
  SYSTEM_PROMPT = "{}"
10
  TITLE = "{}"
11
  EXAMPLE_INPUT = "{}"
12
  """
13
 
14
-
15
- zephyr_7b_beta = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta/"
16
-
17
- HF_TOKEN = os.getenv("HF_TOKEN")
18
- HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"}
19
-
20
  def build_input_prompt(message, chatbot, system_prompt):
21
- """
22
- Constructs the input prompt string from the chatbot interactions and the current message.
23
- """
24
- input_prompt = "<|system|>\n" + system_prompt + "</s>\n<|user|>\n"
25
  for interaction in chatbot:
26
- input_prompt = input_prompt + str(interaction[0]) + "</s>\n<|assistant|>\n" + str(interaction[1]) + "\n</s>\n<|user|>\n"
27
-
28
- input_prompt = input_prompt + str(message) + "</s>\n<|assistant|>"
29
  return input_prompt
30
 
31
-
32
  def post_request_beta(payload):
33
- """
34
- Sends a POST request to the predefined Zephyr-7b-Beta URL and returns the JSON response.
35
- """
36
  response = requests.post(zephyr_7b_beta, headers=HEADERS, json=payload)
37
- response.raise_for_status() # Will raise an HTTPError if the HTTP request returned an unsuccessful status code
38
  return response.json()
39
 
40
-
41
  def predict_beta(message, chatbot=[], system_prompt=""):
42
  input_prompt = build_input_prompt(message, chatbot, system_prompt)
43
- data = {
44
- "inputs": input_prompt
45
- }
46
 
47
  try:
48
  response_data = post_request_beta(data)
@@ -63,21 +55,84 @@ def predict_beta(message, chatbot=[], system_prompt=""):
63
  error_msg = f"Failed to decode response as JSON: {str(e)}"
64
  raise gr.Error(error_msg)
65
 
66
- def test_preview_chatbot(message, history):
67
- response = predict_beta(message, history, SYSTEM_PROMPT)
68
- text_start = response.rfind("<|assistant|>", ) + len("<|assistant|>")
69
- response = response[text_start:]
70
- return response
71
-
72
-
73
- welcome_preview_message = f"""
74
- Welcome to **{TITLE}**! Say something like:
75
- "{EXAMPLE_INPUT}"
76
- """
77
-
78
- chatbot_preview = gr.Chatbot(layout="panel", value=[(None, welcome_preview_message)])
79
- textbox_preview = gr.Textbox(scale=7, container=False, value=EXAMPLE_INPUT)
80
-
81
- demo = gr.ChatInterface(test_preview_chatbot, chatbot=chatbot_preview, textbox=textbox_preview)
82
-
83
- demo.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import os
3
  import requests
4
+ import json
5
+ from huggingface_hub import HfApi
6
+ import huggingface_hub
7
 
8
+ # Constants for Zephyr-7b-Beta model
9
+ zephyr_7b_beta = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta/"
10
+ HF_TOKEN = os.getenv("HF_TOKEN")
11
+ HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"}
12
 
13
+ # Constants for Gradio UI
14
  constants = """
15
  SYSTEM_PROMPT = "{}"
16
  TITLE = "{}"
17
  EXAMPLE_INPUT = "{}"
18
  """
19
 
20
+ # Function to build input prompt for the model
 
 
 
 
 
21
  def build_input_prompt(message, chatbot, system_prompt):
22
+ input_prompt = "\n" + system_prompt + "</s>\n\n"
 
 
 
23
  for interaction in chatbot:
24
+ input_prompt = input_prompt + str(interaction[0]) + "</s>\n\n" + str(interaction[1]) + "\n</s>\n\n"
25
+ input_prompt = input_prompt + str(message) + "</s>\n"
 
26
  return input_prompt
27
 
28
+ # Function to send a POST request to Zephyr-7b-Beta model
29
  def post_request_beta(payload):
 
 
 
30
  response = requests.post(zephyr_7b_beta, headers=HEADERS, json=payload)
31
+ response.raise_for_status()
32
  return response.json()
33
 
34
+ # Function to get model predictions
35
  def predict_beta(message, chatbot=[], system_prompt=""):
36
  input_prompt = build_input_prompt(message, chatbot, system_prompt)
37
+ data = {"inputs": input_prompt}
 
 
38
 
39
  try:
40
  response_data = post_request_beta(data)
 
55
  error_msg = f"Failed to decode response as JSON: {str(e)}"
56
  raise gr.Error(error_msg)
57
 
58
+ # Function to extract title, system prompt, and example input from model response
59
+ def extract_title_prompt_example(text, title, system_prompt, example_input):
60
+ try:
61
+ text_start = text.rfind("", ) + len("")
62
+ text = text[text_start:]
63
+ except ValueError:
64
+ pass
65
+ try:
66
+ title_start = text.lower().rfind("title:") + len("title:")
67
+ prompt_start = text.lower().rfind("system prompt:")
68
+ title = text[title_start:prompt_start].strip()
69
+ except ValueError:
70
+ pass
71
+ try:
72
+ prompt_start = text.lower().rfind("system prompt:") + len("system prompt:")
73
+ example_start = text.lower().rfind("example input:")
74
+ system_prompt = text[prompt_start:example_start].strip()
75
+ except ValueError:
76
+ pass
77
+ try:
78
+ example_start = text.lower().rfind("example input:") + len("example input:")
79
+ example_input = text[example_start:].strip()
80
+ example_input = example_input[:example_input.index("\n")]
81
+ except ValueError:
82
+ pass
83
+ return text, title, system_prompt, example_input
84
+
85
+ # Function to make an Open GPT
86
+ def make_open_gpt(message, history, current_title, current_system_prompt, current_example_input):
87
+ response = predict_beta(message, history, current_system_prompt)
88
+ response, title, system_prompt, example_input = extract_title_prompt_example(response, current_title, current_system_prompt, current_example_input)
89
+ return "", history + [(message, response)], title, system_prompt, example_input, [(None, welcome_preview_message.format(title, example_input))], example_input, gr.Column(visible=True), gr.Group(visible=True)
90
+
91
+ # Function to set title and example input for preview
92
+ def set_title_example(title, example):
93
+ return [(None, welcome_preview_message.format(title, example))], example, gr.Column(visible=True), gr.Group(visible=True)
94
+
95
+ # Function to publish the GPT to Hugging Face Spaces
96
+ def publish(textbox_system_prompt, textbox_title, textbox_example, textbox_token):
97
+ source_file = 'app_template.py'
98
+ destination_file = 'app.py'
99
+ constants_formatted = constants.format(textbox_system_prompt, textbox_title, textbox_example)
100
+ with open(source_file, 'r') as file:
101
+ original_content = file.read()
102
+ with open(destination_file, 'w') as file:
103
+ file.write(constants_formatted + original_content)
104
+ title = strip_invalid_filename_characters(textbox_title, max_bytes=30)
105
+ api = HfApi(token=textbox_token)
106
+ new_space = api.create_repo(
107
+ repo_id=f"open-gpt-{title}",
108
+ repo_type="space",
109
+ exist_ok=True,
110
+ private=False,
111
+ space_sdk="gradio",
112
+ token=textbox_token,
113
+ )
114
+ api.upload_file(
115
+ repo_id=new_space.repo_id,
116
+ path_or_fileobj='app.py',
117
+ path_in_repo='app.py',
118
+ token=textbox_token,
119
+ repo_type="space",
120
+ )
121
+ api.upload_file(
122
+ repo_id=new_space.repo_id,
123
+ path_or_fileobj='README_template.md',
124
+ path_in_repo='README.md',
125
+ token=textbox_token,
126
+ repo_type="space",
127
+ )
128
+ huggingface_hub.add_space_secret(
129
+ new_space.repo_id, "HF_TOKEN", textbox_token, token=textbox_token
130
+ )
131
+
132
+ return gr.Markdown(f"Published to https://huggingface.co/spaces/{new_space.repo_id} ✅", visible=True), gr.Button("Publish", interactive=True)
133
+
134
+ # Gradio UI setup
135
+ with gr.Blocks(css=css) as demo:
136
+ # ... (The rest of your Gradio UI setup)
137
+
138
+ demo.launch(share=True)