Nithish310 commited on
Commit
143f301
·
verified ·
1 Parent(s): 0852d69

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -44
app.py CHANGED
@@ -14,6 +14,10 @@ import torch
14
  import cv2
15
  from gradio_client import Client, file
16
 
 
 
 
 
17
  def image_gen(prompt):
18
  client = Client("KingNish/Image-Gen-Pro")
19
  return client.predict("Image Generation",None, prompt, api_name="/image_gen_pro")
@@ -25,7 +29,6 @@ processor = LlavaProcessor.from_pretrained(model_id)
25
  model = LlavaForConditionalGeneration.from_pretrained(model_id)
26
  model.to("cpu")
27
 
28
-
29
  def llava(message, history):
30
  if message["files"]:
31
  image = message["files"][0]
@@ -38,7 +41,7 @@ def llava(message, history):
38
 
39
  gr.Info("Analyzing image")
40
  image = Image.open(image).convert("RGB")
41
- prompt = f"<|im_start|>user <image>\n{txt}<|im_end|><|im_start|>assistant"
42
 
43
  inputs = processor(prompt, image, return_tensors="pt")
44
  return inputs
@@ -138,15 +141,15 @@ def respond(message, history):
138
  web_results = search(query)
139
  gr.Info("Extracting relevant Info")
140
  web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
141
- messages = f"<|im_start|>system\nYou are OpenCHAT mini a helpful assistant made by Nithish. You are provided with WEB results from which you can find informations to answer users query in Structured and More better way. You do not say Unnecesarry things Only say thing which is important and relevant. You also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions.<|im_end|>"
142
  for msg in history:
143
- messages += f"\n<|im_start|>user\n{str(msg[0])}<|im_end|>"
144
- messages += f"\n<|im_start|>assistant\n{str(msg[1])}<|im_end|>"
145
- messages+=f"\n<|im_start|>user\n{message_text}<|im_end|>\n<|im_start|>web_result\n{web2}<|im_end|>\n<|im_start|>assistant\n"
146
  stream = client_mixtral.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
147
  output = ""
148
  for response in stream:
149
- if not response.token.text == "<|im_end|>":
150
  output += response.token.text
151
  yield output
152
  elif json_data["name"] == "image_generation":
@@ -168,44 +171,42 @@ def respond(message, history):
168
 
169
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
170
  thread.start()
171
-
172
  buffer = ""
173
  for new_text in streamer:
174
  buffer += new_text
175
  yield buffer
176
- else:
177
- messages = f"<|im_start|>system\nYou are OpenCHAT mini a helpful assistant made by Nithish. You answers users query like human friend. You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions.<|im_end|>"
178
- for msg in history:
179
- messages += f"\n<|im_start|>user\n{str(msg[0])}<|im_end|>"
180
- messages += f"\n<|im_start|>assistant\n{str(msg[1])}<|im_end|>"
181
- messages+=f"\n<|im_start|>user\n{message_text}<|im_end|>\n<|im_start|>assistant\n"
182
- stream = client_yi.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
183
- output = ""
184
- for response in stream:
185
- if not response.token.text == "<|endoftext|>":
186
- output += response.token.text
187
- yield output
188
- except:
189
- messages = f"<|start_header_id|>system\nYou are OpenCHAT mini a helpful assistant made by Nithish. You answers users query like human friend. You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions.<|end_header_id|>"
190
- for msg in history:
191
- messages += f"\n<|start_header_id|>user\n{str(msg[0])}<|end_header_id|>"
192
- messages += f"\n<|start_header_id|>assistant\n{str(msg[1])}<|end_header_id|>"
193
- messages+=f"\n<|start_header_id|>user\n{message_text}<|end_header_id|>\n<|start_header_id|>assistant\n"
194
- stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
195
- output = ""
196
- for response in stream:
197
- if not response.token.text == "<|eot_id|>":
198
- output += response.token.text
199
- yield output
200
-
201
- # Create the Gradio interface
202
- demo = gr.ChatInterface(
203
- fn=respond,
204
- chatbot=gr.Chatbot(show_copy_button=True, likeable=True, layout="panel"),
205
- description ="# OpenGPT 4o mini\n ### You can engage in chat, generate images, perform web searches, and Q&A with images.",
206
- textbox=gr.MultimodalTextbox(),
207
- multimodal=True,
208
- concurrency_limit=200,
209
- cache_examples=False,
210
- )
211
- demo.launch()
 
14
  import cv2
15
  from gradio_client import Client, file
16
 
17
+ # The image for the bot and human
18
+ bot_image = "path_to_bot_image.png"
19
+ human_image = "path_to_human_image.png"
20
+
21
  def image_gen(prompt):
22
  client = Client("KingNish/Image-Gen-Pro")
23
  return client.predict("Image Generation",None, prompt, api_name="/image_gen_pro")
 
29
  model = LlavaForConditionalGeneration.from_pretrained(model_id)
30
  model.to("cpu")
31
 
 
32
  def llava(message, history):
33
  if message["files"]:
34
  image = message["files"][0]
 
41
 
42
  gr.Info("Analyzing image")
43
  image = Image.open(image).convert("RGB")
44
+ prompt = f"user <image>\n{txt}assistant"
45
 
46
  inputs = processor(prompt, image, return_tensors="pt")
47
  return inputs
 
141
  web_results = search(query)
142
  gr.Info("Extracting relevant Info")
143
  web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
144
+ messages = f"system\nYou are OpenCHAT mini a helpful assistant made by Nithish. You are provided with WEB results from which you can find informations to answer users query in Structured and More better way. You do not say Unnecesarry things Only say thing which is important and relevant. You also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions."
145
  for msg in history:
146
+ messages += f"\nuser\n{str(msg[0])}"
147
+ messages += f"\nassistant\n{str(msg[1])}"
148
+ messages+=f"\nuser\n{message_text}\nweb_result\n{web2}\nassistant\n"
149
  stream = client_mixtral.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
150
  output = ""
151
  for response in stream:
152
+ if not response.token.text == "":
153
  output += response.token.text
154
  yield output
155
  elif json_data["name"] == "image_generation":
 
171
 
172
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
173
  thread.start()
174
+
175
  buffer = ""
176
  for new_text in streamer:
177
  buffer += new_text
178
  yield buffer
179
+ except Exception as ex:
180
+ gr.Error(str(ex))
181
+
182
+ # Gradio Blocks interface with a custom layout
183
+ with gr.Blocks(css="#chatbot .chat-message.user { border-bottom: none; margin-bottom: 2px; }") as demo:
184
+ gr.Markdown("# OpenChat Mini 🚀")
185
+ chatbot = gr.Chatbot(label="Nithish OpenChat").style(container=False).style(height=700)
186
+ with gr.Row():
187
+ with gr.Column(scale=0.95):
188
+ with gr.Row():
189
+ txt = gr.Textbox(show_label=False, placeholder="Type your message here...").style(container=False)
190
+ with gr.Row():
191
+ file_btn = gr.UploadButton("📁", file_types=["image", "video", "document"])
192
+ with gr.Row():
193
+ submit_btn = gr.Button("Send", size="small", variant="primary")
194
+ with gr.Column(scale=0.05, align="center"):
195
+ gr.Image.update(human_image, human_image)
196
+
197
+ with gr.Row():
198
+ with gr.Column(scale=0.95):
199
+ with gr.Row():
200
+ file_btn2 = gr.UploadButton("📁", file_types=["image", "video", "document"]).style(container=False)
201
+ with gr.Row():
202
+ bot_img = gr.Image(bot_image, bot_image).style(container=False)
203
+ with gr.Row():
204
+ submit_btn2 = gr.Button("Send", size="small", variant="primary")
205
+
206
+ txt.submit(respond, [txt, chatbot], [txt, chatbot], scroll_to_output=True)
207
+ file_btn.upload(respond, [txt, chatbot], [txt, chatbot], scroll_to_output=True)
208
+ submit_btn.click(respond, [txt, chatbot], [txt, chatbot], scroll_to_output=True)
209
+ file_btn2.upload(respond, [txt, chatbot], [txt, chatbot], scroll_to_output=True)
210
+ submit_btn2.click(respond, [txt, chatbot], [txt, chatbot], scroll_to_output=True)
211
+
212
+ demo.launch(debug=True)