| import PIL.Image as Image |
| import gradio as gr |
| from ultralytics import YOLO |
| import os |
| import time |
| import uuid |
| from langchain_groq import ChatGroq |
| from langchain_community.embeddings import HuggingFaceEmbeddings |
| from langchain.text_splitter import CharacterTextSplitter |
| from langchain.chains.combine_documents import create_stuff_documents_chain |
| from langchain_core.prompts import ChatPromptTemplate |
| from langchain.chains import create_retrieval_chain |
| from langchain_community.vectorstores import FAISS |
| from langchain_community.document_loaders import PyPDFLoader |
| from dotenv import load_dotenv |
|
|
| load_dotenv() |
| groq_api_key = os.getenv('GROQ_API_KEY') |
|
|
| |
| model = YOLO("version4c.pt") |
|
|
| |
| CONF_THRESHOLD = 0.25 |
| IOU_THRESHOLD = 0.45 |
|
|
| def predict_image(img): |
| |
| results = model.predict(source=img, conf=CONF_THRESHOLD, iou=IOU_THRESHOLD, show_labels=True, show_conf=True, imgsz=640) |
| |
| |
| if len(results[0].boxes) == 0: |
| return None, "Please upload a clearer image, and don't upload images of breeds that are not been used." |
| |
| |
| for r in results: |
| im_array = r.plot() |
| im = Image.fromarray(im_array[..., ::-1]) |
| |
| |
| filename = f"detected_result_{uuid.uuid4()}.jpg" |
| |
| |
| im.save(filename, format='JPEG') |
| |
| return filename, None |
|
|
| |
| llm = ChatGroq(groq_api_key=groq_api_key, model_name="Llama3-70b-8192") |
| prompt = ChatPromptTemplate.from_template( |
| """ Answer the questions based on the provided context only. Please provide the most accurate response based on the question <context> {context} </context> Questions:{input} """ |
| ) |
| embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") |
| loader = PyPDFLoader("Document.pdf") |
| docs = loader.load() |
| text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) |
| final_documents = text_splitter.split_documents(docs) |
| |
| doc_texts = [doc.page_content for doc in final_documents] |
| embeddings_result = embeddings.embed_documents(doc_texts) |
| if embeddings_result: |
| vectors = FAISS.from_documents(final_documents, embeddings) |
| else: |
| raise ValueError("Failed to generate embeddings. Please check your input documents or try a different embedding model.") |
|
|
| document_chain = create_stuff_documents_chain(llm, prompt) |
| retriever = vectors.as_retriever() |
| retrieval_chain = create_retrieval_chain(retriever, document_chain) |
|
|
| def print_like_dislike(x: gr.LikeData): |
| print(x.index, x.value, x.liked) |
|
|
| def add_message(history, message): |
| if message is not None: |
| history.append((message, None)) |
| return history, gr.Textbox(value=None, interactive=False) |
|
|
| stop_generation = False |
|
|
| def bot(history): |
| global stop_generation |
| stop_generation = False |
| message = history[-1][0] |
| start_time = time.time() |
| response = retrieval_chain.invoke({'input': message})['answer'] |
| response_time = time.time() - start_time |
| if response_time > 6: |
| return [(f"Sorry, I couldn't generate a response within 6 seconds. Please try again.", None)] |
| history[-1][1] = "" |
| for character in response: |
| if stop_generation: |
| break |
| history[-1][1] += character |
| time.sleep(0.05) |
| yield history |
|
|
| def stop_response(dummy_placeholder): |
| global stop_generation |
| stop_generation = True |
|
|
| with gr.Blocks() as demo: |
| with gr.Row(): |
| with gr.Column(scale=2): |
| model_input = gr.Image(type="pil", label="Upload Image") |
| model_output = gr.Image(type="filepath", label="Result") |
| caution_message = gr.Textbox(label="Caution", visible=False) |
|
|
| def process_image(img): |
| result, caution = predict_image(img) |
| if caution: |
| return None, caution, gr.Image(visible=False), gr.Textbox(visible=True) |
| else: |
| return result, "", gr.Image(visible=True), gr.Textbox(visible=False) |
|
|
| model_btn = gr.Button("Detect Result") |
| model_btn.click(process_image, inputs=model_input, outputs=[model_output, caution_message, model_output, caution_message]) |
|
|
| with gr.Column(scale=1): |
| chatbot = gr.Chatbot( |
| [], |
| elem_id="chatbot", |
| bubble_full_width=False |
| ) |
|
|
| chat_input = gr.Textbox(interactive=True, placeholder="Enter message...", show_label=False) |
| chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input]) |
| bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response") |
| bot_msg.then(lambda: gr.Textbox(interactive=True), None, [chat_input]) |
| chatbot.like(print_like_dislike, None, None) |
|
|
| stop_btn = gr.Button("Stop Generation") |
| stop_btn.click(stop_response, None, None) |
|
|
| demo.queue() |
|
|
| if __name__ == "__main__": |
| demo.launch() |