Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from dataclasses import asdict | |
| from transformers import Tool, ReactCodeAgent # type: ignore | |
| from transformers.agents import stream_to_gradio, HfApiEngine # type: ignore | |
| from gradio_tools import GradioTool # assuming you have gradio_tool installed | |
| class GenerateQuestionsTool(GradioTool): | |
| """ | |
| A tool to generate general questions for deployment/integration gaps. | |
| The tool calls the execute_prompt method using the "generate_general_questions" prompt. | |
| Input: (optional) project detail as a string. | |
| Output: Generated questions as a string. | |
| """ | |
| def __init__( | |
| self, | |
| name="GenerateQuestions", | |
| description=( | |
| "A tool that generates general deployment/integration questions " | |
| "by executing a prompt with project details. " | |
| "Input: a string with project detail (optional). Output: a string with generated questions." | |
| ) ): | |
| super().__init__(name, description) | |
| # Optionally, you could initialize any state or dependencies here | |
| def create_job(self, query: str): | |
| """ | |
| This method interprets the input query. | |
| In our case, if a query is provided, we use it as project_detail; | |
| otherwise, we rely on the internal method get_project_detail(). | |
| """ | |
| # Assuming your tool's class (or the project instance) has these methods. | |
| project_detail = query if query.strip() else self.get_project_detail() | |
| try: | |
| # Execute the prompt with provided project detail. | |
| result = self.execute_prompt("generate_general_questions", {"project_detail": project_detail}) | |
| except Exception as e: | |
| result = f"Error during prompt execution: {str(e)}" | |
| return result | |
| def postprocess(self, output) -> str: | |
| """ | |
| Process the output from the job to a string that can be returned to the LLM. | |
| """ | |
| return str(output) | |
| def _block_input(self, gr): | |
| """ | |
| Define the Gradio input component. | |
| Here, we use a textbox where the user can optionally provide project details. | |
| """ | |
| return gr.Textbox(label="Project Detail (optional)", placeholder="Enter project detail or leave empty to use default") | |
| def _block_output(self, gr): | |
| """ | |
| Define the Gradio output component. | |
| We return the generated questions in a textbox. | |
| """ | |
| return gr.Textbox(label="Generated Questions") | |
| # Import tool from Hub | |
| image_generation_tool = Tool.from_space( # type: ignore | |
| space_id="black-forest-labs/FLUX.1-schnell", | |
| name="image_generator", | |
| description="Generates an image following your prompt. Returns a PIL Image.", | |
| api_name="/infer", | |
| ) | |
| # testing_tool = GenerateQuestionsTool() | |
| # question_generator = Tool.from_gradio(testing_tool) | |
| llm_engine = HfApiEngine("Qwen/Qwen2.5-Coder-32B-Instruct") | |
| # Initialize the agent with both tools and engine | |
| agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine) | |
| def interact_with_agent(prompt, history): | |
| messages = [] | |
| yield messages | |
| for msg in stream_to_gradio(agent, prompt): | |
| messages.append(asdict(msg)) # type: ignore | |
| yield messages | |
| yield messages | |
| demo = gr.ChatInterface( | |
| interact_with_agent, | |
| chatbot= gr.Chatbot( | |
| height= 650, | |
| label="Agent", | |
| type="messages", | |
| avatar_images=( | |
| None, | |
| "https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png", | |
| ), | |
| ), | |
| examples=[ | |
| ["Generate an image of an astronaut riding an alligator"], | |
| ["I am writing a children's book for my daughter. Can you help me with some illustrations?"], | |
| ], | |
| type="messages", | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |