Spaces:
Sleeping
Sleeping
Commit ·
84cf689
1
Parent(s): 1446d52
feat: Add multi-provider support for agent generation with model validation, new templates, and security checks.
Browse files- .env.example +4 -7
- GEMINI.md +1 -0
- frontend/src/components/AgentForm.jsx +58 -9
- implementation_plan.md +83 -26
- llm_agent_builder/agent_builder.py +10 -4
- llm_agent_builder/templates/agent_template.py.j2 +2 -2
- llm_agent_builder/templates/agent_template_hf.py.j2 +71 -0
- pre-commit-check.sh +35 -0
- server/main.py +5 -7
- server/models.py +36 -0
- task.md +22 -0
.env.example
CHANGED
|
@@ -1,8 +1,5 @@
|
|
| 1 |
-
|
|
|
|
| 2 |
|
| 3 |
-
#
|
| 4 |
-
|
| 5 |
-
# - claude-3-opus-20240229 (Opus - Most Powerful)
|
| 6 |
-
# - claude-3-sonnet-20240229 (Previous Sonnet)
|
| 7 |
-
# - claude-3-haiku-20240307 (Haiku - Fastest/Cheapest)
|
| 8 |
-
ANTHROPIC_MODEL=claude-3-5-sonnet-20241022
|
|
|
|
| 1 |
+
# Anthropic API Key
|
| 2 |
+
ANTHROPIC_API_KEY=your_anthropic_api_key_here
|
| 3 |
|
| 4 |
+
# Hugging Face API Token
|
| 5 |
+
HUGGINGFACEHUB_API_TOKEN=your_huggingface_token_here
|
|
|
|
|
|
|
|
|
|
|
|
GEMINI.md
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
# LLMAgentBuilder Context
|
| 2 |
|
| 3 |
## Project Overview
|
|
|
|
| 1 |
+
|
| 2 |
# LLMAgentBuilder Context
|
| 3 |
|
| 4 |
## Project Overview
|
frontend/src/components/AgentForm.jsx
CHANGED
|
@@ -5,15 +5,29 @@ const AgentForm = ({ onGenerate, isLoading }) => {
|
|
| 5 |
name: '',
|
| 6 |
prompt: '',
|
| 7 |
task: '',
|
| 8 |
-
|
|
|
|
|
|
|
| 9 |
});
|
| 10 |
|
| 11 |
const handleChange = (e) => {
|
| 12 |
-
const { name, value } = e.target;
|
| 13 |
-
setFormData(prev =>
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
};
|
| 18 |
|
| 19 |
const handleSubmit = (e) => {
|
|
@@ -38,6 +52,19 @@ const AgentForm = ({ onGenerate, isLoading }) => {
|
|
| 38 |
/>
|
| 39 |
</div>
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
<div className="form-group">
|
| 42 |
<label htmlFor="model">Model</label>
|
| 43 |
<select
|
|
@@ -46,9 +73,19 @@ const AgentForm = ({ onGenerate, isLoading }) => {
|
|
| 46 |
value={formData.model}
|
| 47 |
onChange={handleChange}
|
| 48 |
>
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
</select>
|
| 53 |
</div>
|
| 54 |
|
|
@@ -78,6 +115,18 @@ const AgentForm = ({ onGenerate, isLoading }) => {
|
|
| 78 |
/>
|
| 79 |
</div>
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
<button type="submit" className="btn-primary" disabled={isLoading}>
|
| 82 |
{isLoading ? 'Generating...' : 'Generate Agent'}
|
| 83 |
</button>
|
|
|
|
| 5 |
name: '',
|
| 6 |
prompt: '',
|
| 7 |
task: '',
|
| 8 |
+
provider: 'anthropic',
|
| 9 |
+
model: 'claude-3-5-sonnet-20241022',
|
| 10 |
+
stream: false
|
| 11 |
});
|
| 12 |
|
| 13 |
const handleChange = (e) => {
|
| 14 |
+
const { name, value, type, checked } = e.target;
|
| 15 |
+
setFormData(prev => {
|
| 16 |
+
const newData = {
|
| 17 |
+
...prev,
|
| 18 |
+
[name]: type === 'checkbox' ? checked : value
|
| 19 |
+
};
|
| 20 |
+
|
| 21 |
+
// Reset model when provider changes
|
| 22 |
+
if (name === 'provider' && value !== prev.provider) {
|
| 23 |
+
if (value === 'anthropic') {
|
| 24 |
+
newData.model = 'claude-3-5-sonnet-20241022';
|
| 25 |
+
} else if (value === 'huggingface') {
|
| 26 |
+
newData.model = 'meta-llama/Meta-Llama-3-8B-Instruct';
|
| 27 |
+
}
|
| 28 |
+
}
|
| 29 |
+
return newData;
|
| 30 |
+
});
|
| 31 |
};
|
| 32 |
|
| 33 |
const handleSubmit = (e) => {
|
|
|
|
| 52 |
/>
|
| 53 |
</div>
|
| 54 |
|
| 55 |
+
<div className="form-group">
|
| 56 |
+
<label htmlFor="provider">Provider</label>
|
| 57 |
+
<select
|
| 58 |
+
id="provider"
|
| 59 |
+
name="provider"
|
| 60 |
+
value={formData.provider}
|
| 61 |
+
onChange={handleChange}
|
| 62 |
+
>
|
| 63 |
+
<option value="anthropic">Anthropic</option>
|
| 64 |
+
<option value="huggingface">Hugging Face</option>
|
| 65 |
+
</select>
|
| 66 |
+
</div>
|
| 67 |
+
|
| 68 |
<div className="form-group">
|
| 69 |
<label htmlFor="model">Model</label>
|
| 70 |
<select
|
|
|
|
| 73 |
value={formData.model}
|
| 74 |
onChange={handleChange}
|
| 75 |
>
|
| 76 |
+
{formData.provider === 'anthropic' ? (
|
| 77 |
+
<>
|
| 78 |
+
<option value="claude-3-5-sonnet-20241022">Claude 3.5 Sonnet (Latest)</option>
|
| 79 |
+
<option value="claude-3-5-haiku-20241022">Claude 3.5 Haiku</option>
|
| 80 |
+
<option value="claude-3-opus-20240229">Claude 3 Opus</option>
|
| 81 |
+
<option value="claude-3-haiku-20240307">Claude 3 Haiku (Legacy)</option>
|
| 82 |
+
</>
|
| 83 |
+
) : (
|
| 84 |
+
<>
|
| 85 |
+
<option value="meta-llama/Meta-Llama-3-8B-Instruct">Meta Llama 3 8B Instruct</option>
|
| 86 |
+
<option value="mistralai/Mistral-7B-Instruct-v0.3">Mistral 7B Instruct v0.3</option>
|
| 87 |
+
</>
|
| 88 |
+
)}
|
| 89 |
</select>
|
| 90 |
</div>
|
| 91 |
|
|
|
|
| 115 |
/>
|
| 116 |
</div>
|
| 117 |
|
| 118 |
+
<div className="form-group checkbox-group">
|
| 119 |
+
<label>
|
| 120 |
+
<input
|
| 121 |
+
type="checkbox"
|
| 122 |
+
name="stream"
|
| 123 |
+
checked={formData.stream}
|
| 124 |
+
onChange={handleChange}
|
| 125 |
+
/>
|
| 126 |
+
Stream Response
|
| 127 |
+
</label>
|
| 128 |
+
</div>
|
| 129 |
+
|
| 130 |
<button type="submit" className="btn-primary" disabled={isLoading}>
|
| 131 |
{isLoading ? 'Generating...' : 'Generate Agent'}
|
| 132 |
</button>
|
implementation_plan.md
CHANGED
|
@@ -1,50 +1,107 @@
|
|
| 1 |
-
#
|
| 2 |
|
| 3 |
## Goal Description
|
| 4 |
|
| 5 |
-
|
| 6 |
|
| 7 |
## User Review Required
|
| 8 |
>
|
| 9 |
-
> [!
|
| 10 |
-
>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
## Proposed Changes
|
| 13 |
|
| 14 |
-
###
|
| 15 |
|
| 16 |
-
#### [MODIFY] [
|
| 17 |
|
| 18 |
-
-
|
| 19 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
-
###
|
| 22 |
|
| 23 |
-
|
| 24 |
-
- Remove "Saved to path" message.
|
| 25 |
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
-
#### [MODIFY] [
|
| 29 |
|
| 30 |
-
-
|
| 31 |
-
-
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
-
###
|
| 34 |
|
| 35 |
-
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
## Verification Plan
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
### Manual Verification
|
| 43 |
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
|
|
|
| 1 |
+
# Configure Model Options
|
| 2 |
|
| 3 |
## Goal Description
|
| 4 |
|
| 5 |
+
Update the agent generation form to include the latest Anthropic models and add support for Hugging Face models. Additionally, implement backend validation, streaming support, security measures, a sandbox executor, and observability.
|
| 6 |
|
| 7 |
## User Review Required
|
| 8 |
>
|
| 9 |
+
> [!NOTE]
|
| 10 |
+
> I am adding `claude-3-5-haiku-20241022` to the list.
|
| 11 |
+
> I am also adding support for Hugging Face models (via `huggingface_hub`).
|
| 12 |
+
> I am adding backend validation using Pydantic.
|
| 13 |
+
> I am adding a streaming response toggle.
|
| 14 |
+
> I am adding security measures (pre-commit hook).
|
| 15 |
+
> I am adding a Sandbox Executor for safe agent execution.
|
| 16 |
+
> I am adding Prometheus metrics and a `/healthz` endpoint.
|
| 17 |
|
| 18 |
## Proposed Changes
|
| 19 |
|
| 20 |
+
### Frontend
|
| 21 |
|
| 22 |
+
#### [MODIFY] [AgentForm.jsx](file:///wsl.localhost/Ubuntu/root/LLMAgentBuilder/frontend/src/components/AgentForm.jsx)
|
| 23 |
|
| 24 |
+
- Add a "Provider" dropdown (Anthropic, Hugging Face).
|
| 25 |
+
- Update "Model" dropdown options based on the selected provider.
|
| 26 |
+
- Add `claude-3-5-haiku-20241022` for Anthropic.
|
| 27 |
+
- Add `meta-llama/Meta-Llama-3-8B-Instruct` and `mistralai/Mistral-7B-Instruct-v0.3` for Hugging Face.
|
| 28 |
+
- Add a "Stream Response" checkbox (default: false).
|
| 29 |
+
- Add a "Test Agent" button to execute the generated code in the sandbox.
|
| 30 |
+
- Display execution results (output/errors) in the UI.
|
| 31 |
|
| 32 |
+
### Backend
|
| 33 |
|
| 34 |
+
#### [NEW] [models.py](file:///wsl.localhost/Ubuntu/root/LLMAgentBuilder/server/models.py)
|
|
|
|
| 35 |
|
| 36 |
+
- Define `ProviderEnum` (Anthropic, HuggingFace).
|
| 37 |
+
- Define `GenerateRequest` Pydantic model with validation:
|
| 38 |
+
- `provider`: ProviderEnum
|
| 39 |
+
- `model`: Validated against an allowlist per provider.
|
| 40 |
+
- `stream`: bool (default: False)
|
| 41 |
|
| 42 |
+
#### [MODIFY] [main.py](file:///wsl.localhost/Ubuntu/root/LLMAgentBuilder/server/main.py)
|
| 43 |
|
| 44 |
+
- Import `GenerateRequest` from `models.py`.
|
| 45 |
+
- Update `generate_agent` endpoint to use the new validation model.
|
| 46 |
+
- Add `/api/execute` endpoint.
|
| 47 |
+
- Add `prometheus-fastapi-instrumentator`.
|
| 48 |
+
- Add `/metrics` and `/healthz` endpoints.
|
| 49 |
|
| 50 |
+
#### [MODIFY] [agent_builder.py](file:///wsl.localhost/Ubuntu/root/LLMAgentBuilder/llm_agent_builder/agent_builder.py)
|
| 51 |
|
| 52 |
+
- Update `build_agent` to accept `provider` and `stream` arguments.
|
| 53 |
+
- Select the appropriate template (`agent_template.py.j2` or `agent_template_hf.py.j2`) based on the provider.
|
| 54 |
+
- Pass `stream` to the template context.
|
| 55 |
|
| 56 |
+
#### [NEW] [agent_template_hf.py.j2](file:///wsl.localhost/Ubuntu/root/LLMAgentBuilder/llm_agent_builder/templates/agent_template_hf.py.j2)
|
| 57 |
+
|
| 58 |
+
- Create a new Jinja2 template for agents using `huggingface_hub.InferenceClient`.
|
| 59 |
+
- Use `HUGGINGFACEHUB_API_TOKEN` for authentication.
|
| 60 |
+
- Implement conditional logic for `stream=True` vs `stream=False`.
|
| 61 |
+
|
| 62 |
+
#### [MODIFY] [agent_template.py.j2](file:///wsl.localhost/Ubuntu/root/LLMAgentBuilder/llm_agent_builder/templates/agent_template.py.j2)
|
| 63 |
+
|
| 64 |
+
- Fix the environment variable name from `GEMINI_API_KEY` to `ANTHROPIC_API_KEY`.
|
| 65 |
+
|
| 66 |
+
### Sandbox Executor
|
| 67 |
+
|
| 68 |
+
#### [NEW] [sandbox.py](file:///wsl.localhost/Ubuntu/root/LLMAgentBuilder/server/sandbox.py)
|
| 69 |
+
|
| 70 |
+
- Implement `run_in_sandbox(code: str, task: str) -> str`:
|
| 71 |
+
- Write code to a temporary file.
|
| 72 |
+
- Use `subprocess.Popen` to execute the file.
|
| 73 |
+
- Use `preexec_fn` to set `resource.setrlimit`:
|
| 74 |
+
- `RLIMIT_CPU`: Limit CPU time (e.g., 30 seconds).
|
| 75 |
+
- `RLIMIT_AS`: Limit address space (memory) (e.g., 512MB).
|
| 76 |
+
- Capture `stdout` and `stderr`.
|
| 77 |
+
- Handle timeouts and errors.
|
| 78 |
+
|
| 79 |
+
### Security & Misc
|
| 80 |
+
|
| 81 |
+
#### [NEW] [.env.example](file:///wsl.localhost/Ubuntu/root/LLMAgentBuilder/.env.example)
|
| 82 |
+
|
| 83 |
+
- Add `ANTHROPIC_API_KEY` and `HUGGINGFACEHUB_API_TOKEN` placeholders.
|
| 84 |
+
|
| 85 |
+
#### [NEW] [pre-commit-check.sh](file:///wsl.localhost/Ubuntu/root/LLMAgentBuilder/pre-commit-check.sh)
|
| 86 |
+
|
| 87 |
+
- Simple script to grep for potential API keys in staged files.
|
| 88 |
+
|
| 89 |
+
#### [MODIFY] [requirements.txt](file:///wsl.localhost/Ubuntu/root/LLMAgentBuilder/requirements.txt)
|
| 90 |
+
|
| 91 |
+
- Add `prometheus-fastapi-instrumentator`.
|
| 92 |
|
| 93 |
## Verification Plan
|
| 94 |
|
| 95 |
+
### Automated Tests
|
| 96 |
+
|
| 97 |
+
- Run `pytest` to ensure no regressions in `agent_builder`.
|
| 98 |
+
|
| 99 |
### Manual Verification
|
| 100 |
|
| 101 |
+
- Start the frontend (`npm run dev` in `frontend/`).
|
| 102 |
+
- Verify the new provider and model options.
|
| 103 |
+
- Verify the stream toggle works.
|
| 104 |
+
- Generate an agent with streaming enabled and check the code.
|
| 105 |
+
- Test the "Test Agent" button with a simple task.
|
| 106 |
+
- Verify `/metrics` and `/healthz` endpoints.
|
| 107 |
+
- Try to commit a file with a fake API key to test the hook.
|
llm_agent_builder/agent_builder.py
CHANGED
|
@@ -9,19 +9,25 @@ class AgentBuilder:
|
|
| 9 |
self.env = Environment(loader=FileSystemLoader(template_path))
|
| 10 |
self.template = self.env.get_template('agent_template.py.j2')
|
| 11 |
|
| 12 |
-
def build_agent(self, agent_name: str, prompt: str, example_task: str, model: str = "claude-3-5-sonnet-20241022") -> str:
|
| 13 |
"""
|
| 14 |
Generates the Python code for a new agent.
|
| 15 |
|
| 16 |
:param agent_name: The name of the agent class to be generated.
|
| 17 |
:param prompt: The system prompt for the agent.
|
| 18 |
:param example_task: An example task for the agent.
|
| 19 |
-
:param model: The
|
|
|
|
|
|
|
| 20 |
:return: The generated Python code as a string.
|
| 21 |
"""
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
| 23 |
agent_name=agent_name,
|
| 24 |
prompt=prompt,
|
| 25 |
example_task=example_task,
|
| 26 |
-
model=model
|
|
|
|
| 27 |
)
|
|
|
|
| 9 |
self.env = Environment(loader=FileSystemLoader(template_path))
|
| 10 |
self.template = self.env.get_template('agent_template.py.j2')
|
| 11 |
|
| 12 |
+
def build_agent(self, agent_name: str, prompt: str, example_task: str, model: str = "claude-3-5-sonnet-20241022", provider: str = "anthropic", stream: bool = False) -> str:
|
| 13 |
"""
|
| 14 |
Generates the Python code for a new agent.
|
| 15 |
|
| 16 |
:param agent_name: The name of the agent class to be generated.
|
| 17 |
:param prompt: The system prompt for the agent.
|
| 18 |
:param example_task: An example task for the agent.
|
| 19 |
+
:param model: The model to use.
|
| 20 |
+
:param provider: The provider (anthropic or huggingface).
|
| 21 |
+
:param stream: Whether to stream the response.
|
| 22 |
:return: The generated Python code as a string.
|
| 23 |
"""
|
| 24 |
+
template_name = 'agent_template_hf.py.j2' if provider == 'huggingface' else 'agent_template.py.j2'
|
| 25 |
+
template = self.env.get_template(template_name)
|
| 26 |
+
|
| 27 |
+
return template.render(
|
| 28 |
agent_name=agent_name,
|
| 29 |
prompt=prompt,
|
| 30 |
example_task=example_task,
|
| 31 |
+
model=model,
|
| 32 |
+
stream=stream
|
| 33 |
)
|
llm_agent_builder/templates/agent_template.py.j2
CHANGED
|
@@ -31,9 +31,9 @@ if __name__ == '__main__':
|
|
| 31 |
args = parser.parse_args()
|
| 32 |
|
| 33 |
# Ensure API key is set
|
| 34 |
-
api_key = os.environ.get("
|
| 35 |
if not api_key:
|
| 36 |
-
raise ValueError("
|
| 37 |
|
| 38 |
try:
|
| 39 |
agent = {{ agent_name }}(api_key=api_key)
|
|
|
|
| 31 |
args = parser.parse_args()
|
| 32 |
|
| 33 |
# Ensure API key is set
|
| 34 |
+
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
| 35 |
if not api_key:
|
| 36 |
+
raise ValueError("ANTHROPIC_API_KEY environment variable not set. Please set it in your .env file or environment.")
|
| 37 |
|
| 38 |
try:
|
| 39 |
agent = {{ agent_name }}(api_key=api_key)
|
llm_agent_builder/templates/agent_template_hf.py.j2
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from huggingface_hub import InferenceClient
|
| 3 |
+
|
| 4 |
+
class {{ agent_name }}:
|
| 5 |
+
def __init__(self, api_key):
|
| 6 |
+
self.client = InferenceClient(token=api_key)
|
| 7 |
+
self.prompt = "{{- prompt -}}"
|
| 8 |
+
|
| 9 |
+
def run(self, task):
|
| 10 |
+
messages = [
|
| 11 |
+
{"role": "system", "content": self.prompt},
|
| 12 |
+
{"role": "user", "content": task}
|
| 13 |
+
]
|
| 14 |
+
|
| 15 |
+
{% if stream %}
|
| 16 |
+
response = self.client.chat_completion(
|
| 17 |
+
model="{{ model }}",
|
| 18 |
+
messages=messages,
|
| 19 |
+
max_tokens=1024,
|
| 20 |
+
stream=True
|
| 21 |
+
)
|
| 22 |
+
full_response = ""
|
| 23 |
+
for chunk in response:
|
| 24 |
+
content = chunk.choices[0].delta.content
|
| 25 |
+
if content:
|
| 26 |
+
print(content, end="", flush=True)
|
| 27 |
+
full_response += content
|
| 28 |
+
print() # Newline after stream
|
| 29 |
+
return full_response
|
| 30 |
+
{% else %}
|
| 31 |
+
response = self.client.chat_completion(
|
| 32 |
+
model="{{ model }}",
|
| 33 |
+
messages=messages,
|
| 34 |
+
max_tokens=1024,
|
| 35 |
+
stream=False
|
| 36 |
+
)
|
| 37 |
+
return response.choices[0].message.content
|
| 38 |
+
{% endif %}
|
| 39 |
+
|
| 40 |
+
if __name__ == '__main__':
|
| 41 |
+
import os
|
| 42 |
+
import argparse
|
| 43 |
+
from dotenv import load_dotenv
|
| 44 |
+
|
| 45 |
+
load_dotenv()
|
| 46 |
+
|
| 47 |
+
# Parse command line arguments
|
| 48 |
+
parser = argparse.ArgumentParser(description="Run the {{ agent_name }} agent.")
|
| 49 |
+
parser.add_argument("--task", default="{{- example_task -}}", help="The task to be performed by the agent")
|
| 50 |
+
args = parser.parse_args()
|
| 51 |
+
|
| 52 |
+
# Ensure API key is set
|
| 53 |
+
api_key = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
|
| 54 |
+
if not api_key:
|
| 55 |
+
raise ValueError("HUGGINGFACEHUB_API_TOKEN environment variable not set. Please set it in your .env file or environment.")
|
| 56 |
+
|
| 57 |
+
try:
|
| 58 |
+
agent = {{ agent_name }}(api_key=api_key)
|
| 59 |
+
print(f"Running {{ agent_name }} with task: {args.task}\n")
|
| 60 |
+
result = agent.run(args.task)
|
| 61 |
+
print("Response:")
|
| 62 |
+
print("-" * 50)
|
| 63 |
+
# If streaming, result is already printed, but we print it again or just the separator?
|
| 64 |
+
# The template logic above prints during stream.
|
| 65 |
+
# If not streaming, we print result.
|
| 66 |
+
{% if not stream %}
|
| 67 |
+
print(result)
|
| 68 |
+
{% endif %}
|
| 69 |
+
print("-" * 50)
|
| 70 |
+
except Exception as e:
|
| 71 |
+
print(f"Error running agent: {e}")
|
pre-commit-check.sh
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# Pre-commit hook to check for exposed API keys
|
| 3 |
+
|
| 4 |
+
# Keywords to search for
|
| 5 |
+
KEYWORDS="ANTHROPIC_API_KEY HUGGINGFACEHUB_API_TOKEN sk-ant- hf_"
|
| 6 |
+
|
| 7 |
+
# Check staged files
|
| 8 |
+
FILES=$(git diff --cached --name-only)
|
| 9 |
+
|
| 10 |
+
if [ -z "$FILES" ]; then
|
| 11 |
+
exit 0
|
| 12 |
+
fi
|
| 13 |
+
|
| 14 |
+
FOUND_KEYS=0
|
| 15 |
+
|
| 16 |
+
for FILE in $FILES; do
|
| 17 |
+
# Skip check for .env.example and pre-commit-check.sh itself
|
| 18 |
+
if [[ "$FILE" == ".env.example" || "$FILE" == "pre-commit-check.sh" ]]; then
|
| 19 |
+
continue
|
| 20 |
+
fi
|
| 21 |
+
|
| 22 |
+
for KEYWORD in $KEYWORDS; do
|
| 23 |
+
if grep -q "$KEYWORD" "$FILE"; then
|
| 24 |
+
echo "ERROR: Found potential API key or sensitive keyword '$KEYWORD' in '$FILE'."
|
| 25 |
+
FOUND_KEYS=1
|
| 26 |
+
fi
|
| 27 |
+
done
|
| 28 |
+
done
|
| 29 |
+
|
| 30 |
+
if [ $FOUND_KEYS -eq 1 ]; then
|
| 31 |
+
echo "Commit rejected. Please remove sensitive information before committing."
|
| 32 |
+
exit 1
|
| 33 |
+
fi
|
| 34 |
+
|
| 35 |
+
exit 0
|
server/main.py
CHANGED
|
@@ -11,6 +11,8 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
| 11 |
|
| 12 |
from llm_agent_builder.agent_builder import AgentBuilder
|
| 13 |
|
|
|
|
|
|
|
| 14 |
app = FastAPI()
|
| 15 |
|
| 16 |
# Configure CORS
|
|
@@ -22,12 +24,6 @@ app.add_middleware(
|
|
| 22 |
allow_headers=["*"],
|
| 23 |
)
|
| 24 |
|
| 25 |
-
class GenerateRequest(BaseModel):
|
| 26 |
-
name: str
|
| 27 |
-
prompt: str
|
| 28 |
-
task: str
|
| 29 |
-
model: str = "claude-3-5-sonnet-20241022"
|
| 30 |
-
|
| 31 |
@app.post("/api/generate")
|
| 32 |
async def generate_agent(request: GenerateRequest):
|
| 33 |
try:
|
|
@@ -36,7 +32,9 @@ async def generate_agent(request: GenerateRequest):
|
|
| 36 |
agent_name=request.name,
|
| 37 |
prompt=request.prompt,
|
| 38 |
example_task=request.task,
|
| 39 |
-
model=request.model
|
|
|
|
|
|
|
| 40 |
)
|
| 41 |
|
| 42 |
# Stateless: Return code directly, do not save to disk
|
|
|
|
| 11 |
|
| 12 |
from llm_agent_builder.agent_builder import AgentBuilder
|
| 13 |
|
| 14 |
+
from server.models import GenerateRequest, ProviderEnum
|
| 15 |
+
|
| 16 |
app = FastAPI()
|
| 17 |
|
| 18 |
# Configure CORS
|
|
|
|
| 24 |
allow_headers=["*"],
|
| 25 |
)
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
@app.post("/api/generate")
|
| 28 |
async def generate_agent(request: GenerateRequest):
|
| 29 |
try:
|
|
|
|
| 32 |
agent_name=request.name,
|
| 33 |
prompt=request.prompt,
|
| 34 |
example_task=request.task,
|
| 35 |
+
model=request.model,
|
| 36 |
+
provider=request.provider,
|
| 37 |
+
stream=request.stream
|
| 38 |
)
|
| 39 |
|
| 40 |
# Stateless: Return code directly, do not save to disk
|
server/models.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from enum import Enum
|
| 2 |
+
from pydantic import BaseModel, validator
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
class ProviderEnum(str, Enum):
|
| 6 |
+
ANTHROPIC = "anthropic"
|
| 7 |
+
HUGGINGFACE = "huggingface"
|
| 8 |
+
|
| 9 |
+
class GenerateRequest(BaseModel):
|
| 10 |
+
name: str
|
| 11 |
+
prompt: str
|
| 12 |
+
task: str
|
| 13 |
+
provider: ProviderEnum = ProviderEnum.ANTHROPIC
|
| 14 |
+
model: str
|
| 15 |
+
stream: bool = False
|
| 16 |
+
|
| 17 |
+
@validator('model')
|
| 18 |
+
def validate_model(cls, v, values):
|
| 19 |
+
provider = values.get('provider')
|
| 20 |
+
if provider == ProviderEnum.ANTHROPIC:
|
| 21 |
+
allowed = [
|
| 22 |
+
"claude-3-5-sonnet-20241022",
|
| 23 |
+
"claude-3-opus-20240229",
|
| 24 |
+
"claude-3-haiku-20240307",
|
| 25 |
+
"claude-3-5-haiku-20241022"
|
| 26 |
+
]
|
| 27 |
+
if v not in allowed:
|
| 28 |
+
raise ValueError(f"Model {v} not supported for Anthropic")
|
| 29 |
+
elif provider == ProviderEnum.HUGGINGFACE:
|
| 30 |
+
allowed = [
|
| 31 |
+
"meta-llama/Meta-Llama-3-8B-Instruct",
|
| 32 |
+
"mistralai/Mistral-7B-Instruct-v0.3"
|
| 33 |
+
]
|
| 34 |
+
if v not in allowed:
|
| 35 |
+
raise ValueError(f"Model {v} not supported for Hugging Face")
|
| 36 |
+
return v
|
task.md
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Tasks
|
| 2 |
+
|
| 3 |
+
- [x] Professionalize CLI (Previous Context)
|
| 4 |
+
- [x] Implement React + Vite Frontend <!-- id: 0 -->
|
| 5 |
+
- [x] Implement FastAPI Backend <!-- id: 1 -->
|
| 6 |
+
- [x] Deploy to Hugging Face Spaces <!-- id: 2 -->
|
| 7 |
+
- [/] Configure Model Options <!-- id: 3 -->
|
| 8 |
+
- [ ] Update AgentForm.jsx with new models and provider selector
|
| 9 |
+
- [ ] Add Stream Response toggle
|
| 10 |
+
- [/] Implement Backend Validation (Pydantic)
|
| 11 |
+
- [/] Implement Streaming Support in Templates
|
| 12 |
+
- [ ] Add Security Measures (.env.example, pre-commit)
|
| 13 |
+
- [ ] Add Security Measures (.env.example, pre-commit)
|
| 14 |
+
- [ ] Update AgentBuilder default if needed
|
| 15 |
+
- [ ] Implement Sandbox Executor <!-- id: 4 -->
|
| 16 |
+
- [ ] Create Sandbox module (subprocess + resource limits)
|
| 17 |
+
- [ ] Add /api/execute endpoint
|
| 18 |
+
- [ ] Add 'Test Agent' button in Frontend
|
| 19 |
+
- [ ] Add Observability <!-- id: 5 -->
|
| 20 |
+
- [ ] Add Prometheus Metrics
|
| 21 |
+
- [ ] Add /healthz endpoint
|
| 22 |
+
- [x] Push to GitHub
|