Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,222 +1,109 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from huggingface_hub import InferenceClient
|
| 3 |
-
import json
|
| 4 |
-
import uuid
|
| 5 |
-
from PIL import Image
|
| 6 |
-
from bs4 import BeautifulSoup
|
| 7 |
import requests
|
| 8 |
-
import
|
| 9 |
-
from transformers import LlavaProcessor, LlavaForConditionalGeneration, TextIteratorStreamer
|
| 10 |
-
from threading import Thread
|
| 11 |
-
import re
|
| 12 |
-
import time
|
| 13 |
-
import torch
|
| 14 |
import cv2
|
| 15 |
-
from
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
|
| 25 |
-
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
|
|
|
|
|
|
| 28 |
|
| 29 |
-
def
|
| 30 |
-
if
|
| 31 |
-
|
| 32 |
-
else:
|
| 33 |
-
for hist in history:
|
| 34 |
-
if type(hist[0])==tuple:
|
| 35 |
-
image = hist[0][0]
|
| 36 |
-
|
| 37 |
-
txt = message["text"]
|
| 38 |
-
|
| 39 |
-
gr.Info("Analyzing image")
|
| 40 |
-
image = Image.open(image).convert("RGB")
|
| 41 |
-
prompt = f"<|im_start|>user <image>\n{txt}<|im_end|><|im_start|>assistant"
|
| 42 |
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
all_results = []
|
| 56 |
-
max_chars_per_page = 8000
|
| 57 |
-
with requests.Session() as session:
|
| 58 |
-
resp = session.get(
|
| 59 |
-
url="https://www.google.com/search",
|
| 60 |
-
headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"},
|
| 61 |
-
params={"q": term, "num": 3, "udm": 14},
|
| 62 |
-
timeout=5,
|
| 63 |
-
verify=None,
|
| 64 |
-
)
|
| 65 |
-
resp.raise_for_status()
|
| 66 |
-
soup = BeautifulSoup(resp.text, "html.parser")
|
| 67 |
-
result_block = soup.find_all("div", attrs={"class": "g"})
|
| 68 |
-
for result in result_block:
|
| 69 |
-
link = result.find("a", href=True)
|
| 70 |
-
link = link["href"]
|
| 71 |
-
try:
|
| 72 |
-
webpage = session.get(link, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"}, timeout=5, verify=False)
|
| 73 |
-
webpage.raise_for_status()
|
| 74 |
-
visible_text = extract_text_from_webpage(webpage.text)
|
| 75 |
-
if len(visible_text) > max_chars_per_page:
|
| 76 |
-
visible_text = visible_text[:max_chars_per_page]
|
| 77 |
-
all_results.append({"link": link, "text": visible_text})
|
| 78 |
-
except requests.exceptions.RequestException:
|
| 79 |
-
all_results.append({"link": link, "text": None})
|
| 80 |
-
return all_results
|
| 81 |
-
|
| 82 |
-
# Initialize inference clients for different models
|
| 83 |
-
client_gemma = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
| 84 |
-
client_mixtral = InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO")
|
| 85 |
-
client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
func_caller = []
|
| 89 |
-
|
| 90 |
-
# Define the main chat function
|
| 91 |
-
def respond(message, history):
|
| 92 |
-
func_caller = []
|
| 93 |
-
|
| 94 |
-
user_prompt = message
|
| 95 |
-
# Handle image processing
|
| 96 |
-
if message["files"]:
|
| 97 |
-
inputs = llava(message, history)
|
| 98 |
-
streamer = TextIteratorStreamer(processor, skip_prompt=True, **{"skip_special_tokens": True})
|
| 99 |
-
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
| 100 |
|
| 101 |
-
|
| 102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
else:
|
| 109 |
-
|
| 110 |
-
{"type": "function", "function": {"name": "web_search", "description": "Search query on google", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "web search query"}}, "required": ["query"]}}},
|
| 111 |
-
{"type": "function", "function": {"name": "general_query", "description": "Reply general query of USER", "parameters": {"type": "object", "properties": {"prompt": {"type": "string", "description": "A detailed prompt"}}, "required": ["prompt"]}}},
|
| 112 |
-
{"type": "function", "function": {"name": "image_generation", "description": "Generate image for user", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "image generation prompt"}}, "required": ["query"]}}},
|
| 113 |
-
{"type": "function", "function": {"name": "image_qna", "description": "Answer question asked by user related to image", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "Question by user"}}, "required": ["query"]}}},
|
| 114 |
-
]
|
| 115 |
-
|
| 116 |
-
for msg in history:
|
| 117 |
-
func_caller.append({"role": "user", "content": f"{str(msg[0])}"})
|
| 118 |
-
func_caller.append({"role": "assistant", "content": f"{str(msg[1])}"})
|
| 119 |
-
|
| 120 |
-
message_text = message["text"]
|
| 121 |
-
func_caller.append({"role": "user", "content": f'[SYSTEM]You are a helpful assistant. You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {message_text}'})
|
| 122 |
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
response = response[int(response.find("{")):int(response.rindex("</"))]
|
| 127 |
-
except:
|
| 128 |
-
response = response[int(response.find("{")):(int(response.rfind("}"))+1)]
|
| 129 |
-
response = response.replace("\\n", "")
|
| 130 |
-
response = response.replace("\\'", "'")
|
| 131 |
-
response = response.replace('\\"', '"')
|
| 132 |
-
response = response.replace('\\', '')
|
| 133 |
-
print(f"\n{response}")
|
| 134 |
|
| 135 |
-
|
| 136 |
-
json_data = json.loads(str(response))
|
| 137 |
-
if json_data["name"] == "web_search":
|
| 138 |
-
query = json_data["arguments"]["query"]
|
| 139 |
-
gr.Info("Searching Web")
|
| 140 |
-
web_results = search(query)
|
| 141 |
-
gr.Info("Extracting relevant Info")
|
| 142 |
-
web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
|
| 143 |
-
messages = f"<|im_start|>system\nYou are OpenCHAT mini a helpful assistant made by KingNish. You are provided with WEB results from which you can find informations to answer users query in Structured and More better way. You do not say Unnecesarry things Only say thing which is important and relevant. You also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions.<|im_end|>"
|
| 144 |
-
for msg in history:
|
| 145 |
-
messages += f"\n<|im_start|>user\n{str(msg[0])}<|im_end|>"
|
| 146 |
-
messages += f"\n<|im_start|>assistant\n{str(msg[1])}<|im_end|>"
|
| 147 |
-
messages+=f"\n<|im_start|>user\n{message_text}<|im_end|>\n<|im_start|>web_result\n{web2}<|im_end|>\n<|im_start|>assistant\n"
|
| 148 |
-
stream = client_mixtral.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
|
| 149 |
-
output = ""
|
| 150 |
-
for response in stream:
|
| 151 |
-
if not response.token.text == "<|im_end|>":
|
| 152 |
-
output += response.token.text
|
| 153 |
-
yield output
|
| 154 |
-
elif json_data["name"] == "image_generation":
|
| 155 |
-
query = json_data["arguments"]["query"]
|
| 156 |
-
gr.Info("Generating Image, Please wait 10 sec...")
|
| 157 |
-
yield "Generating Image, Please wait 10 sec..."
|
| 158 |
-
try:
|
| 159 |
-
image = image_gen(f"{str(query)}")
|
| 160 |
-
yield gr.Image(image[1])
|
| 161 |
-
except:
|
| 162 |
-
client_sd3 = InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
|
| 163 |
-
seed = random.randint(0,999999)
|
| 164 |
-
image = client_sd3.text_to_image(query, negative_prompt=f"{seed}")
|
| 165 |
-
yield gr.Image(image)
|
| 166 |
-
elif json_data["name"] == "image_qna":
|
| 167 |
-
inputs = llava(message, history)
|
| 168 |
-
streamer = TextIteratorStreamer(processor, skip_prompt=True, **{"skip_special_tokens": True})
|
| 169 |
-
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
| 170 |
|
| 171 |
-
|
| 172 |
-
|
|
|
|
| 173 |
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
output = ""
|
| 198 |
-
for response in stream:
|
| 199 |
-
if not response.token.text == "<|eot_id|>":
|
| 200 |
-
output += response.token.text
|
| 201 |
-
yield output
|
| 202 |
-
|
| 203 |
-
# Create the Gradio interface
|
| 204 |
-
demo = gr.ChatInterface(
|
| 205 |
-
fn=respond,
|
| 206 |
-
chatbot=gr.Chatbot(show_copy_button=True, likeable=True, layout="panel"),
|
| 207 |
-
description ="# OpenGPT 4o mini\n ### You can engage in chat, generate images, perform web searches, and Q&A with images.",
|
| 208 |
-
textbox=gr.MultimodalTextbox(),
|
| 209 |
-
multimodal=True,
|
| 210 |
-
concurrency_limit=200,
|
| 211 |
-
examples=[
|
| 212 |
-
{"text": "Hy, who are you?",},
|
| 213 |
-
{"text": "What's the current price of Bitcoin",},
|
| 214 |
-
{"text": "Search and Tell me what's the release date of llama 3 400b",},
|
| 215 |
-
{"text": "Create A Beautiful image of Effiel Tower at Night",},
|
| 216 |
-
{"text": "Write me a Python function to calculate the first 10 digits of the fibonacci sequence.",},
|
| 217 |
-
{"text": "What's the colour of car in given image", "files": ["./car1.png"]},
|
| 218 |
-
{"text": "Read what's written on paper", "files": ["./paper_with_text.png"]},
|
| 219 |
-
],
|
| 220 |
-
cache_examples=False,
|
| 221 |
)
|
| 222 |
-
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import requests
|
| 3 |
+
from bs4 import BeautifulSoup
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
import cv2
|
| 5 |
+
from PIL import Image
|
| 6 |
+
from transformers import pipeline
|
| 7 |
+
from huggingface_hub import InferenceApi
|
| 8 |
+
import sqlite3
|
| 9 |
+
from sqlalchemy import create_engine, Column, Integer, String, MetaData, Table
|
| 10 |
+
from sqlalchemy.orm import sessionmaker
|
| 11 |
+
import json
|
| 12 |
+
import random
|
| 13 |
+
import io
|
| 14 |
|
| 15 |
+
# Setting up the SQLAlchemy engine and session
|
| 16 |
+
DATABASE_URL = "sqlite:///chatbot.db"
|
| 17 |
+
engine = create_engine(DATABASE_URL)
|
| 18 |
+
Session = sessionmaker(bind=engine)
|
| 19 |
+
session = Session()
|
| 20 |
+
metadata = MetaData()
|
| 21 |
|
| 22 |
+
# Load the image generation model (for example, using a Hugging Face model)
|
| 23 |
+
image_generator = pipeline("image-generation", model="CompVis/stable-diffusion-v1-4")
|
| 24 |
|
| 25 |
+
def create_table(table_name, columns):
|
| 26 |
+
if table_name in engine.table_names():
|
| 27 |
+
return f"Table '{table_name}' already exists."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
+
columns_list = [Column('id', Integer, primary_key=True)]
|
| 30 |
+
for col_name, col_type in columns.items():
|
| 31 |
+
if col_type.lower() == 'string':
|
| 32 |
+
columns_list.append(Column(col_name, String))
|
| 33 |
+
elif col_type.lower() == 'integer':
|
| 34 |
+
columns_list.append(Column(col_name, Integer))
|
| 35 |
+
else:
|
| 36 |
+
return "Unsupported column type. Use 'String' or 'Integer'."
|
| 37 |
+
|
| 38 |
+
new_table = Table(table_name, metadata, *columns_list)
|
| 39 |
+
metadata.create_all(engine)
|
| 40 |
+
return f"Table '{table_name}' created successfully."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
+
def edit_table(table_name, columns):
|
| 43 |
+
if table_name not in engine.table_names():
|
| 44 |
+
return f"Table '{table_name}' does not exist."
|
| 45 |
+
|
| 46 |
+
table = Table(table_name, metadata, autoload_with=engine)
|
| 47 |
+
for col_name, col_type in columns.items():
|
| 48 |
+
if col_name not in table.c:
|
| 49 |
+
if col_type.lower() == 'string':
|
| 50 |
+
new_column = Column(col_name, String)
|
| 51 |
+
elif col_type.lower() == 'integer':
|
| 52 |
+
new_column = Column(col_name, Integer)
|
| 53 |
+
else:
|
| 54 |
+
return "Unsupported column type. Use 'String' or 'Integer'."
|
| 55 |
+
new_column.create(table, populate_default=True)
|
| 56 |
|
| 57 |
+
return f"Table '{table_name}' updated successfully."
|
| 58 |
+
|
| 59 |
+
def chatbot_response(task, table_name=None, columns=None):
|
| 60 |
+
if task == "create_table":
|
| 61 |
+
if table_name and columns:
|
| 62 |
+
result = create_table(table_name, columns)
|
| 63 |
+
else:
|
| 64 |
+
result = "Please provide a table name and columns."
|
| 65 |
+
elif task == "edit_table":
|
| 66 |
+
if table_name and columns:
|
| 67 |
+
result = edit_table(table_name, columns)
|
| 68 |
+
else:
|
| 69 |
+
result = "Please provide a table name and columns."
|
| 70 |
else:
|
| 71 |
+
result = "Unsupported task. Use 'create_table' or 'edit_table'."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
+
# Generate a descriptive image based on the response
|
| 74 |
+
description = f"Task: {task}, Table Name: {table_name}, Columns: {columns}"
|
| 75 |
+
image = generate_image(description)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
+
return result, image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
+
def handle_chatbot(task, table_name, columns):
|
| 80 |
+
if task not in ['create_table', 'edit_table']:
|
| 81 |
+
return "Unsupported task. Use 'create_table' or 'edit_table'.", None
|
| 82 |
|
| 83 |
+
try:
|
| 84 |
+
columns_dict = json.loads(columns)
|
| 85 |
+
except json.JSONDecodeError:
|
| 86 |
+
return "Invalid columns format. Please use JSON format.", None
|
| 87 |
+
|
| 88 |
+
return chatbot_response(task, table_name, columns_dict)
|
| 89 |
+
|
| 90 |
+
def generate_image(description):
|
| 91 |
+
images = image_generator(description, num_return_sequences=1)
|
| 92 |
+
image = images[0]['image']
|
| 93 |
+
return image
|
| 94 |
+
|
| 95 |
+
# Gradio interface setup
|
| 96 |
+
task_input = gr.inputs.Textbox(lines=1, placeholder="Task (create_table or edit_table)")
|
| 97 |
+
table_name_input = gr.inputs.Textbox(lines=1, placeholder="Table Name")
|
| 98 |
+
columns_input = gr.inputs.Textbox(lines=2, placeholder="Columns (JSON format: {'column1': 'type', 'column2': 'type'})")
|
| 99 |
+
|
| 100 |
+
interface = gr.Interface(
|
| 101 |
+
fn=handle_chatbot,
|
| 102 |
+
inputs=[task_input, table_name_input, columns_input],
|
| 103 |
+
outputs=[gr.outputs.Textbox(), gr.outputs.Image(type="pil")],
|
| 104 |
+
title="SQL Database Chatbot with Image Generation",
|
| 105 |
+
description="A chatbot interface to create and edit SQL tables with image generation."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
)
|
| 107 |
+
|
| 108 |
+
if __name__ == "__main__":
|
| 109 |
+
interface.launch()
|