Spaces:
Sleeping
Sleeping
File size: 2,736 Bytes
278550c 9b5b26a c19d193 6aae614 93a8d57 8fe992b 9b5b26a 278550c 9b5b26a 278550c e60ce2a 9b5b26a 278550c 5acca6b 8c01ffb 278550c 8c01ffb 278550c 975a609 6aae614 ae7a494 e121372 bf6d34c 29ec968 fe328e0 13d500a 8c01ffb 9b5b26a 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b 975a609 8c01ffb 861422e 8fe992b 9b5b26a 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
"""
This app.py consitutes an application where users can pose queries to
an agent about plants.
"""
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from huggingface_hub import InferenceClient
from Gradio_UI import GradioUI
@tool
def describe_plant_image(user_query: str, image_url: str) -> str:
"""
Describe a plant image and answer the user's query.
Only to be used if and only if a user provides an image_url or if
prior chat messages have retrieved image_url(s).
Args:
user_query: Any relevant text pertaining to the image_url that has been based off the chat
image_url: The image url that has been provided by the user
"""
system_prompt = (
"You are an LLM assistant that analyzes plant images and:\n"
"1. Identifies the plant if possible\n"
"2. Describes key visible characteristics\n"
"3. Answers the user's question clearly and concisely"
)
vl_model = InferenceClient(model="CohereLabs/aya-vision-32b:cohere")
response = vl_model.chat.completions.create(
messages=[
{
"role": "system",
"content": system_prompt,
},
{
"role": "user",
"content": [
{"type": "text", "text": user_query},
{
"type": "image_url",
"image_url": {"url": image_url},
},
],
},
]
)
return response.choices[0].message.content
query_internet = DuckDuckGoSearchTool()
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[query_internet, describe_plant_image, final_answer], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch() |