Spaces:
Runtime error
Runtime error
File size: 5,913 Bytes
38afa58 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
import os
from dotenv import load_dotenv
from e2b import Sandbox
from huggingface_hub import InferenceClient # Import the InferenceClient
import gradio as gr
# Load environment variables from .env file
load_dotenv()
# --- E2B Setup ---
e2b_api_key = os.getenv("E2B_API_KEY")
if not e2b_api_key:
print("WARNING: E2B_API_KEY not found. Cannot run locally without it.")
# --- LLM (Hugging Face Inference API) Setup ---
hf_token = os.getenv("HF_TOKEN") # Get Hugging Face token
if not hf_token:
print("WARNING: HF_TOKEN not found. Cannot run locally without it. Inference might be limited.")
# Choose the model you want to use from Hugging Face
# Make sure it's a text generation or chat model.
# Examples: "mistralai/Mistral-7B-Instruct-v0.2", "meta-llama/Llama-2-7b-chat-hf", "Qwen/Qwen1.5-7B-Chat"
HF_MODEL_ID = "mistralai/Mistral-7B-Instruct-v0.2" # <-- **Choose your model here**
# Initialize the Inference Client
try:
llm_client = InferenceClient(model=HF_MODEL_ID, token=hf_token)
llm_client_available = True
except Exception as e:
print(f"Could not initialize Hugging Face Inference Client: {e}")
llm_client_available = False
llm_client = None # Ensure client is None if initialization fails
def run_agent_task(user_input: str):
"""
Processes user input, interacts with Hugging Face Inference API and E2B, and returns results.
"""
output = ""
if not e2b_api_key:
return "Error: E2B API key not configured."
if not llm_client_available or llm_client is None:
return "Error: Hugging Face Inference Client not initialized. Check HF_TOKEN or model ID."
# Start an E2B sandbox session
try:
with Sandbox(api_key=e2b_api_key, template="base") as sandbox:
output += "E2B Sandbox started successfully.\n"
# 1. Formulate a prompt for the LLM
# Use the same prompt structure, adapting slightly if needed for chat models
prompt_content = f"""
You are a computer agent connected to a sandboxed environment.
The user wants you to perform the following task: {user_input}
Based on the task, decide what command(s) to run in the bash terminal within the sandbox.
Output only the bash command(s), nothing else. If no command is needed, output 'NO_COMMAND'.
For example:
User: List files in the current directory
Agent: ls -l
User: Calculate 2+2
Agent: echo $((2+2))
User: Greet me
Agent: NO_COMMAND
Now, based on the user task: {user_input}
Output the bash command(s) or 'NO_COMMAND':
"""
# 2. Call the Hugging Face Inference API
try:
# Use the .chat method which is suitable for instruction-following models
# It takes messages in the OpenAI format
response = llm_client.chat(
messages=[
{"role": "system", "content": "You are a helpful assistant that outputs bash commands or NO_COMMAND."},
{"role": "user", "content": prompt_content}
],
max_tokens=100, # Adjust as needed
temperature=0.1, # Lower temperature often helps with predictable output like commands
# Add other parameters as needed (e.g., top_p)
)
# Extract the content from the response
# The response structure depends on the method and model, .chat returns a ChatCompletion object
command_to_run = response.choices[0].message.content.strip()
output += f"LLM ({HF_MODEL_ID}) decided to run: `{command_to_run}`\n"
except Exception as e:
output += f"An exception occurred calling Hugging Face Inference API: {e}\n"
command_to_run = "NO_COMMAND" # Prevent execution on LLM error
# 3. Execute the command in the E2B sandbox (if not NO_COMMAND)
# This part remains the same as it uses the E2B SDK
if command_to_run and command_to_run != "NO_COMMAND":
try:
proc = sandbox.process.start(command_to_run)
process_output = proc.wait()
if process_output.stdout:
output += "--- Command Output (stdout) ---\n"
output += process_output.stdout + "\n"
if process_output.stderr:
output += "--- Command Output (stderr) ---\n"
output += process_output.stderr + "\n"
output += f"Command exited with code: {process_output.exit_code}\n"
except Exception as e:
output += f"Error executing command in sandbox: {e}\n"
elif command_to_run == "NO_COMMAND":
output += "LLM decided no command was necessary.\n"
else:
output += "LLM returned an empty command.\n"
output += "E2B Sandbox session ended.\n"
except Exception as e:
output += f"An error occurred with the E2B sandbox: {e}\n"
output += "Please check your E2B API key and try again.\n"
return output
# Define the Gradio interface - This part is unchanged
interface = gr.Interface(
fn=run_agent_task,
inputs=gr.Textbox(lines=2, placeholder="Enter your task for the agent here..."),
outputs=gr.Textbox(lines=10, label="Agent Output", interactive=False),
title=f"E2B Computer Agent Demo (using {HF_MODEL_ID})", # Update title
description="Enter a task for the agent to perform in a sandboxed environment using E2B and a Hugging Face model via Inference API.",
)
# This is the line Hugging Face Spaces will look for
if __name__ == "__main__":
interface.launch() |