GenAIDevTOProd commited on
Commit
6d81bb7
·
verified ·
1 Parent(s): a37bac4

Upload agent_py.py

Browse files
Files changed (1) hide show
  1. agent_py.py +104 -0
agent_py.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Agent.py
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1aYmc6hcd6JKLK6sdFwPS-yC89WliBwhh
8
+ """
9
+
10
+ pip install langchain langchain-openai pydantic openai
11
+
12
+ pip install langchain-community
13
+
14
+ """# Library and Framework imports
15
+ - ChatOpenAI: This is the LLM backend (OpenAI’s GPT model).
16
+
17
+ - ConversationBufferMemory: This stores past messages, enabling Memory (MCP).
18
+
19
+ - initialize_agent: Initializes the agent, which decides how to use the LLM and tools.
20
+
21
+ - Tool: Represents a single tool the agent can call
22
+ """
23
+
24
+ from langchain.chat_models import ChatOpenAI
25
+ from langchain.memory import ConversationBufferMemory
26
+ from langchain.agents import initialize_agent, AgentType
27
+ from langchain.tools import Tool
28
+ import os
29
+
30
+ """# Build the Memory Layer (MCP)
31
+
32
+ - The memory ensures the agent doesn’t “forget” previous context, which is crucial for MCP (Memory + Chain-of-Thought).
33
+
34
+ - return_messages=True means the memory will store entire message objects (input and output).
35
+ """
36
+
37
+ def build_memory() -> ConversationBufferMemory:
38
+ """Create memory so the agent can remember past messages"""
39
+ return ConversationBufferMemory(memory_key="chat_history", return_messages=True)
40
+
41
+ """# Prompt engineering:
42
+ - SystemPrompt: Sets up the agent's persona as an experienced platform architect. This is the agent's guide.
43
+
44
+ - HumanPrompt: Defines what the agent needs (modernization plan, resilience strategy).
45
+
46
+ - CoT Behavior: The agent will reason step-by-step and expose only the reasoning summary at the end.
47
+ """
48
+
49
+ from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
50
+
51
+ def build_prompt() -> ChatPromptTemplate:
52
+ """Create a system+human prompt that encourages explicit planning (CoT-style)."""
53
+ system = SystemMessagePromptTemplate.from_template(
54
+ "You are InfraResilience, a senior platform architect.\n"
55
+ "Task: given a legacy stack, propose modernization options AND a resilience test plan.\n"
56
+ "Be concrete, AWS-aware (Airflow/Step Functions/ECS/Lambda/S3/SQS/SNS), and security-conscious.\n"
57
+ "Output concise, actionable bullet points.\n"
58
+ "When planning, think step-by-step internally; expose only a brief reasoning summary."
59
+ )
60
+
61
+ human = HumanMessagePromptTemplate.from_template(
62
+ "Legacy stack description:\n{legacy_stack}\n\n"
63
+ "Outage scenario (optional): {outage_scenario}\n\n"
64
+ "Provide:\n"
65
+ "1) Modernization plan\n"
66
+ "2) Resilience test strategy\n"
67
+ "3) Short reasoning summary"
68
+ )
69
+
70
+ return ChatPromptTemplate.from_messages([system, human])
71
+
72
+ """# GPT-4o-Mini LLM build
73
+ - temperature=0.2: This controls the randomness of the LLM. A lower value makes the LLM more deterministic (less random).
74
+
75
+ - LLM (OpenAI's GPT) that the agent will use for reasoning.
76
+ """
77
+
78
+ from typing import Optional
79
+ def build_llm(model_name: Optional[str] = None, temperature: float = 0.2) -> ChatOpenAI:
80
+ """Instantiate the chat LLM with sane defaults."""
81
+ chosen = model_name or os.getenv("LLM_MODEL_NAME", "gpt-4o-mini")
82
+ return ChatOpenAI(model=chosen, temperature=temperature)
83
+
84
+ """So far, we have Memory, Prompts, LLM initialized. Now, let's build the agent with tools.
85
+
86
+ The agent is now connected to tools (we'll define in tools.py), memory, and the prompt template. It's ready to think through problems.
87
+ """
88
+
89
+ from typing import List
90
+ def create_agent(tools: List[Tool], llm: Optional[ChatOpenAI] = None, memory: Optional[ConversationBufferMemory] = None):
91
+ llm = llm or build_llm() # LLM backend (can override)
92
+ memory = memory or build_memory() # Memory (MCP)
93
+ prompt = build_prompt() # Prompt
94
+
95
+ agent = initialize_agent(
96
+ tools=tools, # List of tools (what actions the agent can take)
97
+ llm=llm, # Language model (the brain)
98
+ agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, # React-style agent
99
+ memory=memory, # Memory
100
+ verbose=False, # Toggle logs on/off
101
+ agent_kwargs={"extra_prompt_messages": [prompt]}, # Include custom prompt
102
+ )
103
+
104
+ return agent # This returns the agent, which can be invoked later