Spaces:
Sleeping
Sleeping
| from langchain.schema import AIMessage, HumanMessage | |
| from langchain_core.messages import HumanMessage | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| import gradio as gr | |
| import os | |
| from PIL import Image | |
| import time | |
| GOOGLE_AI_STUDIO = 'AIzaSyD9f8u_jY3BB2awtocA6E-XEoyi4hMexRE' | |
| # Setup the language model | |
| llm_img = ChatGoogleGenerativeAI( | |
| model="gemini-pro-vision", | |
| temperature=0.0, | |
| max_output_tokens=2048, | |
| google_api_key=GOOGLE_AI_STUDIO | |
| ) | |
| llm = ChatGoogleGenerativeAI( | |
| model="gemini-pro", | |
| temperature=0.0, | |
| max_output_tokens=2048, | |
| google_api_key=GOOGLE_AI_STUDIO, | |
| stream=True | |
| ) | |
| def predict(message, history, img): | |
| history_langchain_format = [] | |
| for human, assistant in history: | |
| history_langchain_format.append(HumanMessage(content=human)) | |
| history_langchain_format.append(AIMessage(content=assistant)) | |
| if img is None: | |
| history_langchain_format.append(HumanMessage(content=message)) | |
| gpt_response = llm.invoke(history_langchain_format) | |
| ans = '' | |
| for response in gpt_response.content: | |
| ans += response | |
| time.sleep(0.001) | |
| yield ans | |
| else: | |
| msg = HumanMessage( | |
| content=[ | |
| {"type": "text", "text": message}, | |
| {"type": "image_url", "image_url": img}, | |
| ] | |
| ) | |
| history_langchain_format.append(msg) | |
| gpt_response = llm_img.invoke([msg]) | |
| ans = '' | |
| for response in gpt_response.content: | |
| ans += response | |
| time.sleep(0.001) | |
| yield ans | |
| interface = gr.ChatInterface(predict, additional_inputs=gr.Image(type='pil', label='Upload your image...')) | |
| interface.launch(interface.queue()) |