OnyxlMunkey commited on
Commit
fb8d591
·
1 Parent(s): 57bf7d9

No changes were provided to summarize.

Browse files
llm_agent_builder/cli.py CHANGED
@@ -18,6 +18,7 @@ def main() -> None:
18
  parser.add_argument("--task", default="Write a Python function that calculates the factorial of a number.", help="Example task for the agent")
19
  parser.add_argument("--output", default="generated_agents", help="Output directory for the generated agent")
20
  parser.add_argument("--model", help="Anthropic model to use (overrides .env)")
 
21
  parser.add_argument("--interactive", action="store_true", help="Run in interactive mode")
22
 
23
  # Check if we should run in interactive mode (explicit flag or no args)
@@ -36,13 +37,15 @@ def main() -> None:
36
  output = get_input("Output Directory", "generated_agents")
37
  default_model = os.environ.get("ANTHROPIC_MODEL", "claude-3-5-sonnet-20241022")
38
  model = get_input("Anthropic Model", default_model)
 
39
 
40
  args = argparse.Namespace(
41
  name=name,
42
  prompt=prompt,
43
  task=task,
44
  output=output,
45
- model=model
 
46
  )
47
  else:
48
  args = parser.parse_args()
@@ -55,10 +58,15 @@ def main() -> None:
55
  builder = AgentBuilder()
56
 
57
  # Generate the agent code
58
- if args.model:
59
- agent_code = builder.build_agent(args.name, args.prompt, args.task, model=args.model)
60
- else:
61
- agent_code = builder.build_agent(args.name, args.prompt, args.task)
 
 
 
 
 
62
 
63
  # Define the output path for the generated agent
64
  os.makedirs(args.output, exist_ok=True)
 
18
  parser.add_argument("--task", default="Write a Python function that calculates the factorial of a number.", help="Example task for the agent")
19
  parser.add_argument("--output", default="generated_agents", help="Output directory for the generated agent")
20
  parser.add_argument("--model", help="Anthropic model to use (overrides .env)")
21
+ parser.add_argument("--provider", default="anthropic", choices=["anthropic", "huggingface"], help="LLM Provider to use (anthropic or huggingface)")
22
  parser.add_argument("--interactive", action="store_true", help="Run in interactive mode")
23
 
24
  # Check if we should run in interactive mode (explicit flag or no args)
 
37
  output = get_input("Output Directory", "generated_agents")
38
  default_model = os.environ.get("ANTHROPIC_MODEL", "claude-3-5-sonnet-20241022")
39
  model = get_input("Anthropic Model", default_model)
40
+ provider = get_input("Provider (anthropic/huggingface)", "anthropic")
41
 
42
  args = argparse.Namespace(
43
  name=name,
44
  prompt=prompt,
45
  task=task,
46
  output=output,
47
+ model=model,
48
+ provider=provider
49
  )
50
  else:
51
  args = parser.parse_args()
 
58
  builder = AgentBuilder()
59
 
60
  # Generate the agent code
61
+ # We need to handle the model argument being passed only if it exists, but build_agent has a default.
62
+ # However, we now have a provider argument too.
63
+ agent_code = builder.build_agent(
64
+ agent_name=args.name,
65
+ prompt=args.prompt,
66
+ example_task=args.task,
67
+ model=args.model if args.model else ("claude-3-5-sonnet-20241022" if args.provider == "anthropic" else "HuggingFaceH4/zephyr-7b-beta"),
68
+ provider=args.provider
69
+ )
70
 
71
  # Define the output path for the generated agent
72
  os.makedirs(args.output, exist_ok=True)
llm_agent_builder/templates/agent_template_hf.py.j2 CHANGED
@@ -1,12 +1,40 @@
1
  import os
2
- from huggingface_hub import InferenceClient
3
 
4
  class {{ agent_name }}:
5
  def __init__(self, api_key):
6
  self.client = InferenceClient(token=api_key)
 
7
  self.prompt = "{{- prompt -}}"
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  def run(self, task):
 
 
 
 
 
10
  messages = [
11
  {"role": "system", "content": self.prompt},
12
  {"role": "user", "content": task}
@@ -47,25 +75,50 @@ if __name__ == '__main__':
47
  # Parse command line arguments
48
  parser = argparse.ArgumentParser(description="Run the {{ agent_name }} agent.")
49
  parser.add_argument("--task", default="{{- example_task -}}", help="The task to be performed by the agent")
 
50
  args = parser.parse_args()
51
 
52
  # Ensure API key is set
53
  api_key = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
54
  if not api_key:
55
- raise ValueError("HUGGINGFACEHUB_API_TOKEN environment variable not set. Please set it in your .env file or environment.")
 
 
 
 
 
56
 
57
  try:
58
  agent = {{ agent_name }}(api_key=api_key)
59
- print(f"Running {{ agent_name }} with task: {args.task}\n")
60
- result = agent.run(args.task)
61
- print("Response:")
62
- print("-" * 50)
63
- # If streaming, result is already printed, but we print it again or just the separator?
64
- # The template logic above prints during stream.
65
- # If not streaming, we print result.
66
- {% if not stream %}
67
- print(result)
68
- {% endif %}
69
- print("-" * 50)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  except Exception as e:
71
  print(f"Error running agent: {e}")
 
1
  import os
2
+ from huggingface_hub import InferenceClient, HfApi, ModelCard
3
 
4
  class {{ agent_name }}:
5
  def __init__(self, api_key):
6
  self.client = InferenceClient(token=api_key)
7
+ self.api = HfApi(token=api_key)
8
  self.prompt = "{{- prompt -}}"
9
 
10
+ def search_models(self, query, limit=5):
11
+ """Searches for models on the Hugging Face Hub."""
12
+ models = self.api.list_models(search=query, limit=limit, sort="downloads", direction=-1)
13
+ return [model.modelId for model in models]
14
+
15
+ def search_datasets(self, query, limit=5):
16
+ """Searches for datasets on the Hugging Face Hub."""
17
+ datasets = self.api.list_datasets(search=query, limit=limit, sort="downloads", direction=-1)
18
+ return [dataset.id for dataset in datasets]
19
+
20
+ def get_model_documentation(self, model_id):
21
+ """Retrieves the Model Card (documentation) for a specific model."""
22
+ try:
23
+ card = ModelCard.load(model_id)
24
+ return card.text
25
+ except Exception as e:
26
+ return f"Error retrieving documentation for {model_id}: {e}"
27
+
28
+ def get_api_endpoint(self, model_id):
29
+ """Constructs the likely Inference API endpoint for a model."""
30
+ return f"https://api-inference.huggingface.co/models/{model_id}"
31
+
32
  def run(self, task):
33
+ # For a developer agent, we might want to check if the task involves searching.
34
+ # But for now, we'll keep the simple chat interface, but inject knowledge about tools into the system prompt context if possible,
35
+ # or just rely on the user using the methods programmatically.
36
+ # The user asked for "Integrate... Implement search...", so the methods above fulfill that.
37
+
38
  messages = [
39
  {"role": "system", "content": self.prompt},
40
  {"role": "user", "content": task}
 
75
  # Parse command line arguments
76
  parser = argparse.ArgumentParser(description="Run the {{ agent_name }} agent.")
77
  parser.add_argument("--task", default="{{- example_task -}}", help="The task to be performed by the agent")
78
+ parser.add_argument("--search-model", help="Search for a model documentation and analyze it")
79
  args = parser.parse_args()
80
 
81
  # Ensure API key is set
82
  api_key = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
83
  if not api_key:
84
+ # Fallback to ANTHROPIC_API_KEY if HF token not set, though unlikely to work for HF specific stuff if not compatible,
85
+ # but the code requires HF token.
86
+ pass
87
+
88
+ if not api_key:
89
+ print("Warning: HUGGINGFACEHUB_API_TOKEN not found. Some features may not work.")
90
 
91
  try:
92
  agent = {{ agent_name }}(api_key=api_key)
93
+
94
+ if args.search_model:
95
+ print(f"Searching for model: {args.search_model}")
96
+ models = agent.search_models(args.search_model)
97
+ if models:
98
+ top_model = models[0]
99
+ print(f"Found top model: {top_model}")
100
+ print("Fetching documentation...")
101
+ doc = agent.get_model_documentation(top_model)
102
+ print(f"--- Documentation for {top_model} ---")
103
+ print(doc[:500] + "...\n(truncated)")
104
+ print("---------------------------------------")
105
+
106
+ # Analyze with LLM
107
+ analysis_task = f"Summarize the capabilities and usage of this model based on its documentation: {doc[:2000]}"
108
+ print("Analyzing documentation with LLM...")
109
+ result = agent.run(analysis_task)
110
+ print("\nAnalysis Result:")
111
+ print(result)
112
+ else:
113
+ print("No models found.")
114
+ else:
115
+ print(f"Running {{ agent_name }} with task: {args.task}\n")
116
+ result = agent.run(args.task)
117
+ print("Response:")
118
+ print("-" * 50)
119
+ {% if not stream %}
120
+ print(result)
121
+ {% endif %}
122
+ print("-" * 50)
123
  except Exception as e:
124
  print(f"Error running agent: {e}")
requirements.txt CHANGED
@@ -5,3 +5,4 @@ fastapi
5
  uvicorn
6
  pydantic
7
  prometheus-fastapi-instrumentator
 
 
5
  uvicorn
6
  pydantic
7
  prometheus-fastapi-instrumentator
8
+ huggingface_hub
walkthrough.md CHANGED
@@ -1,4 +1,4 @@
1
- # Walkthrough - Hugging Face Spaces Deployment Preparation
2
 
3
  I have successfully updated the project to be deployable to Hugging Face Spaces.
4
 
 
1
+ .m# Walkthrough - Hugging Face Spaces Deployment Preparation
2
 
3
  I have successfully updated the project to be deployable to Hugging Face Spaces.
4