samagra44 commited on
Commit
f154798
·
1 Parent(s): 60f193f

initial commit

Browse files
.dockerignore ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ *.pyc
3
+ *.pyo
4
+ *.pyd
5
+ env/
6
+ venv/
7
+ *.log
8
+ .git
9
+ .gitignore
10
+ tests/
11
+ .DS_Store
.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
Dockerfile CHANGED
@@ -1,21 +1,27 @@
1
- FROM python:3.9-slim
 
 
 
 
2
 
3
  WORKDIR /app
4
 
5
- RUN apt-get update && apt-get install -y \
6
  build-essential \
7
- curl \
8
- software-properties-common \
9
- git \
10
  && rm -rf /var/lib/apt/lists/*
11
 
12
- COPY requirements.txt ./
13
- COPY src/ ./src/
 
14
 
15
- RUN pip3 install -r requirements.txt
16
 
17
- EXPOSE 8501
 
 
 
 
18
 
19
- HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
20
 
21
- ENTRYPOINT ["streamlit", "run", "src/streamlit_app.py", "--server.port=8501", "--server.address=0.0.0.0"]
 
1
+ FROM python:3.10-slim
2
+
3
+ ENV PYTHONDONTWRITEBYTECODE=1 \
4
+ PYTHONUNBUFFERED=1 \
5
+ PIP_NO_CACHE_DIR=1
6
 
7
  WORKDIR /app
8
 
9
+ RUN apt-get update && apt-get install -y --no-install-recommends \
10
  build-essential \
 
 
 
11
  && rm -rf /var/lib/apt/lists/*
12
 
13
+ COPY requirements.txt .
14
+
15
+ RUN pip install --upgrade pip && pip install -r requirements.txt
16
 
17
+ COPY . .
18
 
19
+ ENV PORT=7860
20
+ ENV STREAMLIT_SERVER_PORT=$PORT
21
+ ENV STREAMLIT_SERVER_HEADLESS=true
22
+ ENV STREAMLIT_SERVER_ENABLECORS=false
23
+ ENV STREAMLIT_SERVER_ADDRESS=0.0.0.0
24
 
25
+ EXPOSE 7860
26
 
27
+ CMD ["streamlit", "run", "app.py", "--server.port", "7860", "--server.headless", "true", "--server.address", "0.0.0.0"]
README.md CHANGED
@@ -1,19 +1,419 @@
1
- ---
2
- title: LinkedIn Post Generator
3
- emoji: 🚀
4
- colorFrom: red
5
- colorTo: red
6
- sdk: docker
7
- app_port: 8501
8
- tags:
9
- - streamlit
10
- pinned: false
11
- short_description: Streamlit template space
12
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- # Welcome to Streamlit!
15
 
16
- Edit `/src/streamlit_app.py` to customize this app to your heart's desire. :heart:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
19
- forums](https://discuss.streamlit.io).
 
1
+ # 🚀 LangGraph Post Generator
2
+
3
+ An intelligent LinkedIn post generator powered by **LangGraph** and **Azure OpenAI** that creates engaging, professional content by intelligently routing queries through web search and content generation workflows.
4
+
5
+ ## 📋 Table of Contents
6
+
7
+ - [Overview](#overview)
8
+ - [Features](#features)
9
+ - [Architecture](#architecture)
10
+ - [Tech Stack](#tech-stack)
11
+ - [Installation](#installation)
12
+ - [Configuration](#configuration)
13
+ - [Usage](#usage)
14
+ - [Project Structure](#project-structure)
15
+ - [API Documentation](#api-documentation)
16
+ - [Technical Details](#technical-details)
17
+ - [Testing](#testing)
18
+ - [Contributing](#contributing)
19
+ - [License](#license)
20
+
21
+ ## 🌟 Overview
22
+
23
+ The **LangGraph Post Generator** is an AI-powered application that transforms simple topic inputs into compelling LinkedIn posts. It leverages the power of **LangGraph** to create an intelligent workflow that decides whether to search for additional context or directly generate content based on the input query.
24
+
25
+ ### Key Capabilities:
26
+ - **Intelligent Routing**: Automatically determines if web search is needed for better context
27
+ - **Web Search Integration**: Uses DuckDuckGo to gather recent, relevant information
28
+ - **Professional Content Generation**: Creates LinkedIn-optimized posts with proper tone and structure
29
+ - **Real-time Processing**: Streamlit-based web interface for immediate results
30
+ - **Comprehensive Logging**: Detailed logging for debugging and monitoring
31
+
32
+ ## ✨ Features
33
+
34
+ - 🧠 **Smart Query Routing**: AI-powered decision making for content enhancement
35
+ - 🔍 **Real-time Web Search**: Integration with DuckDuckGo for up-to-date information
36
+ - 📝 **Professional Content Creation**: LinkedIn-optimized posts with engaging tone
37
+ - 🎯 **Context-Aware Generation**: Uses web search results to create informed content
38
+ - 📊 **Enhanced Streamlit Interface**: Beautiful, modern UI with sidebar configurations
39
+ - ⚙️ **Configurable Settings**: Customize post length, tone, hashtags, and call-to-action
40
+ - 🎨 **Professional Styling**: LinkedIn-inspired design with gradients and modern components
41
+ - 📈 **Post Analytics**: Track generated posts and usage statistics
42
+ - 💡 **Smart Suggestions**: Hashtag recommendations and example topics
43
+ - 🔧 **Modular Architecture**: Clean, maintainable codebase with separation of concerns
44
+ - 📋 **Comprehensive Logging**: Detailed logs for monitoring and debugging
45
+ - 🔄 **Flexible Workflow**: LangGraph-powered state management
46
+
47
+ ## 🏗️ Architecture
48
+
49
+ The application follows a **LangGraph-based workflow** with the following components:
50
+
51
+ ```mermaid
52
+ graph TD
53
+ A[User Input] --> B[Question Router]
54
+ B --> C{Route Decision}
55
+ C -->|Need Context| D[Query Transform]
56
+ C -->|Direct Generation| E[Content Generator]
57
+ D --> F[Web Search]
58
+ F --> E
59
+ E --> G[LinkedIn Post Output]
60
+ ```
61
+
62
+ ### Workflow Steps:
63
+ 1. **Input Processing**: User provides a topic through Streamlit interface
64
+ 2. **Question Routing**: AI determines if web search is needed
65
+ 3. **Query Transformation** (if needed): Optimizes the query for web search
66
+ 4. **Web Search** (if needed): Gathers relevant context using DuckDuckGo
67
+ 5. **Content Generation**: Creates professional LinkedIn post using Azure OpenAI
68
+ 6. **Output**: Returns formatted LinkedIn post to user
69
+
70
+ ## 🛠️ Tech Stack
71
+
72
+ ### Core Technologies:
73
+ - **Python 3.8+** - Primary programming language
74
+ - **LangGraph** - Workflow orchestration and state management
75
+ - **LangChain** - LLM framework and prompt management
76
+ - **Azure OpenAI** - Language model for content generation
77
+ - **Streamlit** - Web application framework
78
+ - **DuckDuckGo Search** - Web search functionality
79
+
80
+ ### Dependencies:
81
+ - `streamlit` - Web interface
82
+ - `langchain` - LLM framework
83
+ - `langchain-core` - Core LangChain functionality
84
+ - `langchain-community` - Community tools and utilities
85
+ - `langchain-openai` - Azure OpenAI integration
86
+ - `langgraph` - Graph-based workflow management
87
+ - `langsmith` - LangChain monitoring and debugging
88
+ - `duckduckgo-search` - Web search capabilities
89
+ - `python-dotenv` - Environment variable management
90
+
91
+ ## 📦 Installation
92
+
93
+ ### Prerequisites
94
+ - Python 3.8 or higher
95
+ - Azure OpenAI API access
96
+ - Git (for cloning the repository)
97
+
98
+ ### Step 1: Clone the Repository
99
+ ```bash
100
+ git clone https://github.com/your-username/langgraph-post-generator.git
101
+ cd langgraph-post-generator/post-generator-agent
102
+ ```
103
+
104
+ ### Step 2: Create Virtual Environment
105
+ ```bash
106
+ # Create virtual environment
107
+ python -m venv venv
108
+
109
+ # Activate virtual environment
110
+ # On Windows:
111
+ venv\Scripts\activate
112
+ # On macOS/Linux:
113
+ source venv/bin/activate
114
+ ```
115
+
116
+ ### Step 3: Install Dependencies
117
+ ```bash
118
+ pip install -r requirements.txt
119
+ ```
120
+
121
+ ### Step 4: Run the Application
122
+ ```bash
123
+ streamlit run app.py
124
+ ```
125
+
126
+ ### Step 5: Configure Credentials
127
+ 1. Open the app in your browser
128
+ 2. Go to the sidebar configuration
129
+ 3. Enter your Azure OpenAI credentials:
130
+ - **API Key**: Your Azure OpenAI API key
131
+ - **Endpoint**: Your Azure OpenAI endpoint (e.g., `https://your-resource.openai.azure.com/`)
132
+ - **Deployment Name**: Your model deployment name (e.g., `gpt-35-turbo`)
133
+ - **API Version**: API version (default: `2024-02-01`)
134
+ 4. Click "Save Credentials"
135
+ 5. Test the connection using the "Test Connection" button
136
+
137
+ ## ⚙️ Configuration
138
+
139
+ ### Azure OpenAI Setup
140
+
141
+ 1. **Create Azure OpenAI Resource**:
142
+ - Go to [Azure Portal](https://portal.azure.com)
143
+ - Create a new OpenAI resource
144
+ - Note your endpoint and API key
145
+
146
+ 2. **Deploy a Model**:
147
+ - Deploy a GPT model (GPT-3.5-turbo or GPT-4)
148
+ - Note your deployment name
149
+
150
+ 3. **Update Configuration**:
151
+ - Add your credentials to the `.env` file
152
+ - Ensure the API version matches your Azure OpenAI service
153
+
154
+ ### Environment Variables
155
+
156
+ | Variable | Description | Example |
157
+ |----------|-------------|---------|
158
+ | `AZURE_OPENAI_API_KEY` | Your Azure OpenAI API key | `abc123...` |
159
+ | `AZURE_OPENAI_ENDPOINT` | Your Azure OpenAI endpoint URL | `https://example.openai.azure.com/` |
160
+ | `OPENAI_API_VERSION` | API version for Azure OpenAI | `2024-02-01` |
161
+ | `AZURE_DEPLOYMENT` | Name of your model deployment | `gpt-35-turbo` |
162
+
163
+ ## 🚀 Usage
164
+
165
+ ### Running the Application
166
+
167
+ 1. **Start the Streamlit App**:
168
+ ```bash
169
+ streamlit run app.py
170
+ ```
171
+
172
+ 2. **Access the Interface**:
173
+ - Open your browser to `http://localhost:8501`
174
+ - Configure settings in the sidebar (post length, tone, hashtags)
175
+ - Enter a topic in the main input area
176
+ - Add optional context for better results
177
+ - Click "Generate LinkedIn Post" to create content
178
+ - View the generated post in a styled container
179
+ - Use example topics for quick testing
180
+
181
+ ### UI Features
182
+
183
+ - **📊 Sidebar Configuration**:
184
+ - **🔑 Azure Credentials Management**:
185
+ - Manual credential input with password masking
186
+ - Real-time credential validation for Azure OpenAI format
187
+ - Connection testing functionality
188
+ - Credential persistence across sessions
189
+ - API status indicators with masked API keys
190
+ - Post length options (Short, Medium, Long)
191
+ - Tone selection (Professional, Casual, Enthusiastic, etc.)
192
+ - Hashtag and call-to-action toggles
193
+ - Hashtag suggestions based on topic
194
+
195
+ - **🎯 Main Interface**:
196
+ - Large text area for topic input
197
+ - Optional context expander
198
+ - Generate and Clear buttons
199
+ - Tips for better posts
200
+ - Post analytics tracking
201
+ - Example topics for quick testing
202
+
203
+ - **📄 Output Display**:
204
+ - Styled output container
205
+ - Success/error messages
206
+ - Copy to clipboard functionality
207
+
208
+ ### Example Usage
209
+
210
+ **Input**: "Artificial Intelligence trends in 2024"
211
+
212
+ **Output**: A professionally formatted LinkedIn post including:
213
+ - Current AI trends and insights
214
+ - Professional tone suitable for LinkedIn
215
+ - Actionable insights for professionals
216
+ - Engaging call-to-action or discussion points
217
+
218
+ ### Command Line Testing
219
+
220
+ You can also test individual components:
221
+
222
+ ```bash
223
+ # Test LLM configuration
224
+ python tests/llm_test.py
225
+
226
+ # Test web search functionality
227
+ python tests/search_test.py
228
+
229
+ # Test the complete agent workflow
230
+ python utils/run_agent.py
231
+ ```
232
+
233
+ ## 📁 Project Structure
234
+
235
+ ```
236
+ post-generator-agent/
237
+ ├── 📄 app.py # Main Streamlit application
238
+ ├── 📁 config/
239
+ │ ├── configs.py # Environment configuration
240
+ │ └── generation_config.py # Generation settings and prompts
241
+ ├── 📁 helper/
242
+ │ ├── configure_llm.py # Azure OpenAI LLM setup
243
+ │ ├── graphs.py # LangGraph workflow definition
244
+ │ ├── model_load.py # Graph state model
245
+ │ └── web_search_agent.py # DuckDuckGo search configuration
246
+ ├── 📁 loggers/
247
+ │ └── logger.py # Logging configuration
248
+ ├── 📁 template/
249
+ │ ├── response_prompt.py # Content generation prompts
250
+ │ ├── router_prompt.py # Query routing prompts
251
+ │ └── transform_prompt.py # Query transformation prompts
252
+ ├── 📁 tests/
253
+ │ ├── llm_test.py # LLM functionality tests
254
+ │ └── search_test.py # Search functionality tests
255
+ ├── 📁 utils/
256
+ │ ├── credential_manager.py # Azure credentials management
257
+ │ ├── generate_content.py # Content generation logic
258
+ │ ├── query_transform.py # Query transformation logic
259
+ │ ├── question_route.py # Question routing logic
260
+ │ ├── run_agent.py # Main agent execution
261
+ │ └── search_web_content.py # Web search implementation
262
+ ├── 📄 requirements.txt # Python dependencies
263
+ └── 📄 README.md # This documentation
264
+ ```
265
 
266
+ ## 📖 API Documentation
267
 
268
+ ### Core Functions
269
+
270
+ #### `execute_agent(query: str) -> str`
271
+ Main function to execute the LangGraph workflow.
272
+
273
+ **Parameters:**
274
+ - `query` (str): The topic or question for LinkedIn post generation
275
+
276
+ **Returns:**
277
+ - `str`: Generated LinkedIn post content
278
+
279
+ **Example:**
280
+ ```python
281
+ from utils.run_agent import execute_agent
282
+
283
+ result = execute_agent("Latest trends in machine learning")
284
+ print(result)
285
+ ```
286
+
287
+ #### `route_question(state: dict) -> str`
288
+ Routes questions to appropriate workflow paths.
289
+
290
+ **Parameters:**
291
+ - `state` (dict): Current graph state containing the question
292
+
293
+ **Returns:**
294
+ - `str`: Routing decision ("websearch" or "generate")
295
+
296
+ #### `transform_query(state: dict) -> dict`
297
+ Transforms user queries for optimal web search.
298
+
299
+ **Parameters:**
300
+ - `state` (dict): Current graph state
301
+
302
+ **Returns:**
303
+ - `dict`: Updated state with optimized search query
304
+
305
+ #### `web_search(state: dict) -> dict`
306
+ Performs web search using DuckDuckGo.
307
+
308
+ **Parameters:**
309
+ - `state` (dict): Current graph state with search query
310
+
311
+ **Returns:**
312
+ - `dict`: Updated state with search results context
313
+
314
+ #### `generate(state: dict) -> dict`
315
+ Generates final LinkedIn post content.
316
+
317
+ **Parameters:**
318
+ - `state` (dict): Current graph state with question and optional context
319
+
320
+ **Returns:**
321
+ - `dict`: Updated state with generated content
322
+
323
+ ## 🔧 Technical Details
324
+
325
+ ### LangGraph Workflow
326
+
327
+ The application uses **LangGraph** to create a stateful workflow:
328
+
329
+ ```python
330
+ # Graph state definition
331
+ class GraphState(TypedDict):
332
+ question: str # Original user question
333
+ generation: str # Generated LinkedIn post
334
+ search_query: str # Optimized search query
335
+ context: str # Web search results
336
+ ```
337
+
338
+ ### Prompt Engineering
339
+
340
+ The system uses three specialized prompt templates:
341
+
342
+ 1. **Router Prompt**: Determines routing strategy
343
+ 2. **Transform Prompt**: Optimizes queries for web search
344
+ 3. **Response Prompt**: Generates LinkedIn-optimized content
345
+
346
+ ### Logging System
347
+
348
+ Comprehensive logging with:
349
+ - **File Logging**: Timestamped log files in `logs/` directory
350
+ - **Console Logging**: Real-time feedback during execution
351
+ - **Multiple Log Levels**: DEBUG, INFO, WARNING, ERROR
352
+
353
+ ### Error Handling
354
+
355
+ Robust error handling including:
356
+ - Graceful fallbacks when web search fails
357
+ - Retry mechanisms for API calls
358
+ - Comprehensive error logging
359
+
360
+ ## 🧪 Testing
361
+
362
+ ### Running Tests
363
+
364
+ ```bash
365
+ # Test Azure OpenAI connection
366
+ python tests/llm_test.py
367
+
368
+ # Test web search functionality
369
+ python tests/search_test.py
370
+
371
+ # Test complete workflow
372
+ python utils/run_agent.py
373
+ ```
374
+
375
+ ### Test Coverage
376
+
377
+ - **LLM Integration**: Validates Azure OpenAI connectivity
378
+ - **Web Search**: Tests DuckDuckGo search functionality
379
+ - **End-to-End**: Complete workflow validation
380
+
381
+ ## 🤝 Contributing
382
+
383
+ We welcome contributions! Please follow these steps:
384
+
385
+ 1. **Fork the Repository**
386
+ 2. **Create Feature Branch**: `git checkout -b feature/amazing-feature`
387
+ 3. **Commit Changes**: `git commit -m 'Add amazing feature'`
388
+ 4. **Push to Branch**: `git push origin feature/amazing-feature`
389
+ 5. **Open Pull Request**
390
+
391
+ ### Development Guidelines
392
+
393
+ - Follow PEP 8 style guidelines
394
+ - Add comprehensive docstrings
395
+ - Include tests for new features
396
+ - Update documentation as needed
397
+
398
+ ## 📄 License
399
+
400
+ This project is licensed under the MIT License. See the `LICENSE` file for details.
401
+
402
+ ## 🆘 Support
403
+
404
+ For support and questions:
405
+
406
+ - **Issues**: Open an issue on GitHub
407
+ - **Documentation**: Check this README and code comments
408
+ - **Community**: Join our discussions
409
+
410
+ ## 🙏 Acknowledgments
411
+
412
+ - **LangChain Team** for the excellent LLM framework
413
+ - **Azure OpenAI** for powerful language models
414
+ - **Streamlit** for the intuitive web framework
415
+ - **DuckDuckGo** for search capabilities
416
+
417
+ ---
418
 
419
+ **Built with ❤️ using LangGraph and Azure OpenAI**
 
app.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import time
3
+ import os
4
+ from utils.run_agent import execute_agent
5
+ from config.configs import AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, AZURE_DEPLOYMENT
6
+ from config.generation_config import get_generation_prompt, get_hashtag_suggestions, DEFAULT_SETTINGS
7
+ from utils.credential_manager import CredentialManager, format_endpoint_url, mask_api_key
8
+
9
+ st.set_page_config(
10
+ page_title="LinkedIn Post Generator",
11
+ page_icon="📝",
12
+ layout="wide",
13
+ initial_sidebar_state="expanded"
14
+ )
15
+
16
+ st.markdown("""
17
+ <style>
18
+ .main-header {
19
+ background: linear-gradient(90deg, #0077B5, #00A0DC);
20
+ padding: 2rem;
21
+ border-radius: 10px;
22
+ color: white;
23
+ text-align: center;
24
+ margin-bottom: 2rem;
25
+ }
26
+
27
+ .sidebar-header {
28
+ background: linear-gradient(135deg, #f8f9fa, #e9ecef);
29
+ padding: 1rem;
30
+ border-radius: 8px;
31
+ margin-bottom: 1rem;
32
+ }
33
+
34
+ .config-section {
35
+ background: #f8f9fa;
36
+ padding: 1rem;
37
+ border-radius: 8px;
38
+ margin-bottom: 1rem;
39
+ border-left: 4px solid #0077B5;
40
+ }
41
+
42
+ .output-container {
43
+ background: #f8f9fa;
44
+ padding: 1.5rem;
45
+ border-radius: 10px;
46
+ border: 1px solid #dee2e6;
47
+ margin-top: 1rem;
48
+ }
49
+
50
+ .loading-spinner {
51
+ text-align: center;
52
+ padding: 2rem;
53
+ }
54
+
55
+ .success-message {
56
+ background: #d4edda;
57
+ color: #155724;
58
+ padding: 1rem;
59
+ border-radius: 8px;
60
+ border: 1px solid #c3e6cb;
61
+ margin: 1rem 0;
62
+ }
63
+
64
+ .error-message {
65
+ background: #f8d7da;
66
+ color: #721c24;
67
+ padding: 1rem;
68
+ border-radius: 8px;
69
+ border: 1px solid #f5c6cb;
70
+ margin: 1rem 0;
71
+ }
72
+
73
+ .credentials-section {
74
+ background: #fff3cd;
75
+ padding: 1rem;
76
+ border-radius: 8px;
77
+ border: 1px solid #ffeaa7;
78
+ margin-bottom: 1rem;
79
+ }
80
+ </style>
81
+ """, unsafe_allow_html=True)
82
+
83
+ # Initialize session state for credentials
84
+ if 'azure_api_key' not in st.session_state:
85
+ st.session_state.azure_api_key = ""
86
+ if 'azure_endpoint' not in st.session_state:
87
+ st.session_state.azure_endpoint = ""
88
+ if 'azure_deployment' not in st.session_state:
89
+ st.session_state.azure_deployment = ""
90
+ if 'azure_api_version' not in st.session_state:
91
+ st.session_state.azure_api_version = "2024-02-01"
92
+
93
+ with st.sidebar:
94
+ st.title("⚙️ Configuration")
95
+
96
+ st.subheader("🔑 Azure OpenAI Credentials")
97
+
98
+ st.info("💡 Enter your Azure OpenAI credentials below:")
99
+
100
+ api_key = st.text_input(
101
+ "Azure OpenAI API Key",
102
+ value=st.session_state.azure_api_key,
103
+ type="password",
104
+ help="Your Azure OpenAI API key"
105
+ )
106
+
107
+ endpoint = st.text_input(
108
+ "Azure OpenAI Endpoint",
109
+ value=st.session_state.azure_endpoint,
110
+ placeholder="https://your-resource.openai.azure.com/",
111
+ help="Your Azure OpenAI endpoint URL"
112
+ )
113
+
114
+ deployment = st.text_input(
115
+ "Deployment Name",
116
+ value=st.session_state.azure_deployment,
117
+ placeholder="gpt-35-turbo",
118
+ help="Name of your deployed model"
119
+ )
120
+
121
+ api_version = st.text_input(
122
+ "API Version",
123
+ value=st.session_state.azure_api_version,
124
+ placeholder="2024-02-01",
125
+ help="Azure OpenAI API version"
126
+ )
127
+
128
+ if st.button("💾 Save Credentials", use_container_width=True):
129
+ credentials = {
130
+ 'api_key': api_key,
131
+ 'endpoint': format_endpoint_url(endpoint) if endpoint else '',
132
+ 'deployment': deployment,
133
+ 'api_version': api_version
134
+ }
135
+
136
+ is_valid, message = CredentialManager.validate_credentials(credentials)
137
+
138
+ if is_valid:
139
+ CredentialManager.save_credentials_to_session(
140
+ credentials['api_key'],
141
+ credentials['endpoint'],
142
+ credentials['deployment'],
143
+ credentials['api_version']
144
+ )
145
+ st.success("✅ Credentials saved successfully!")
146
+ st.rerun()
147
+ else:
148
+ st.error(f"❌ {message}")
149
+
150
+ if st.button("🗑️ Clear Credentials", use_container_width=True):
151
+ CredentialManager.clear_session_credentials()
152
+ st.success("✅ Credentials cleared!")
153
+ st.rerun()
154
+
155
+ st.markdown('</div>', unsafe_allow_html=True)
156
+
157
+ st.subheader("🔍 API Status")
158
+
159
+ current_credentials = CredentialManager.get_credentials_from_session()
160
+
161
+ if current_credentials['api_key']:
162
+ masked_key = mask_api_key(current_credentials['api_key'])
163
+ st.success(f"✅ API Key: {masked_key}")
164
+ else:
165
+ st.error("❌ API Key not found")
166
+
167
+ if current_credentials['endpoint']:
168
+ st.success(f"✅ Endpoint: {current_credentials['endpoint']}")
169
+ else:
170
+ st.warning("⚠️ Endpoint not configured")
171
+
172
+ if current_credentials['deployment']:
173
+ st.success(f"✅ Deployment: {current_credentials['deployment']}")
174
+ else:
175
+ st.warning("⚠️ Deployment not configured")
176
+
177
+ if all(current_credentials.values()):
178
+ if st.button("🧪 Test Connection", use_container_width=True):
179
+ with st.spinner("Testing Azure OpenAI connection..."):
180
+ try:
181
+ CredentialManager.set_environment_variables(current_credentials)
182
+
183
+ test_output = execute_agent("Test connection")
184
+ st.success("✅ Connection successful! Azure OpenAI is working.")
185
+ except Exception as e:
186
+ st.error(f"❌ Connection failed: {str(e)}")
187
+
188
+ st.subheader("🎯 Generation Settings")
189
+
190
+ post_length = st.selectbox(
191
+ "Post Length",
192
+ ["Short (100-200 words)", "Medium (200-400 words)", "Long (400-600 words)"],
193
+ index=1
194
+ )
195
+
196
+ tone = st.selectbox(
197
+ "Tone",
198
+ ["Professional", "Casual", "Enthusiastic", "Educational", "Inspirational"],
199
+ index=0
200
+ )
201
+
202
+ include_hashtags = st.checkbox("Include Hashtags", value=True)
203
+ include_call_to_action = st.checkbox("Include Call to Action", value=True)
204
+
205
+ st.subheader("ℹ️ About")
206
+ st.write("**LinkedIn Post Generator**")
207
+ st.write("Powered by LangGraph and Azure OpenAI")
208
+ st.write("Version: 1.0.0")
209
+
210
+ st.title("📝 LinkedIn Post Generator")
211
+ st.markdown("Transform your ideas into engaging LinkedIn posts using AI")
212
+
213
+ col1, col2 = st.columns([2, 1])
214
+
215
+ with col1:
216
+ st.subheader("🎯 What would you like to write about?")
217
+
218
+ user_input = st.text_area(
219
+ "Enter your topic, idea, or subject matter:",
220
+ value=st.session_state.get("user_input", ""),
221
+ placeholder="e.g., 'The future of artificial intelligence in healthcare' or 'Tips for remote team collaboration'",
222
+ height=100,
223
+ help="Be specific about your topic to get better results"
224
+ )
225
+
226
+ if user_input and include_hashtags:
227
+ suggested_hashtags = get_hashtag_suggestions(user_input)
228
+ st.info(f"💡 **Suggested hashtags for '{user_input}':**")
229
+ hashtag_cols = st.columns(3)
230
+ for i, hashtag in enumerate(suggested_hashtags[:3]):
231
+ with hashtag_cols[i]:
232
+ st.write(f"• {hashtag}")
233
+
234
+ with st.expander("🔍 Additional Context (Optional)"):
235
+ context = st.text_area(
236
+ "Add any specific details, target audience, or context:",
237
+ placeholder="e.g., 'Target audience: Tech professionals, Focus on practical applications'",
238
+ height=80
239
+ )
240
+
241
+ col1_btn, col2_btn = st.columns([3, 1])
242
+
243
+ with col1_btn:
244
+ generate_clicked = st.button("🚀 Generate LinkedIn Post", type="primary", use_container_width=True)
245
+
246
+ with col2_btn:
247
+ clear_clicked = st.button("🗑️ Clear", use_container_width=True)
248
+
249
+ if clear_clicked:
250
+ st.session_state.user_input = ""
251
+ st.session_state.example_topic = ""
252
+ st.rerun()
253
+
254
+ if generate_clicked:
255
+ if user_input:
256
+ current_credentials = CredentialManager.get_credentials_from_session()
257
+
258
+ if not all(current_credentials.values()):
259
+ st.error("❌ Please configure your Azure OpenAI credentials in the sidebar first.")
260
+ else:
261
+ CredentialManager.set_environment_variables(current_credentials)
262
+
263
+ with st.spinner("🤖 AI is crafting your perfect LinkedIn post..."):
264
+ try:
265
+ settings = {
266
+ "post_length": post_length,
267
+ "tone": tone,
268
+ "include_hashtags": include_hashtags,
269
+ "include_call_to_action": include_call_to_action
270
+ }
271
+
272
+ # Generate formatted prompt using the configuration
273
+ full_query = get_generation_prompt(user_input, context, settings)
274
+
275
+ # Execute the agent
276
+ output = execute_agent(full_query)
277
+
278
+ # Update session state
279
+ if 'generated_posts' not in st.session_state:
280
+ st.session_state.generated_posts = 0
281
+ st.session_state.generated_posts += 1
282
+
283
+ st.success("✅ Your LinkedIn post has been generated successfully!")
284
+
285
+ st.subheader("📄 Generated LinkedIn Post")
286
+ st.markdown("---")
287
+ st.write(output)
288
+ st.markdown("---")
289
+
290
+ if st.button("📋 Copy to Clipboard", use_container_width=True):
291
+ st.write("📋 Post copied to clipboard!")
292
+
293
+ except Exception as e:
294
+ st.error(f"❌ An error occurred: {str(e)}")
295
+ else:
296
+ st.warning("⚠️ Please enter a topic to generate a post.")
297
+
298
+ with col2:
299
+ st.subheader("💡 Tips for Better Posts")
300
+
301
+ tips = [
302
+ "🎯 Be specific about your topic",
303
+ "👥 Consider your target audience",
304
+ "📊 Include relevant statistics if applicable",
305
+ "💬 Ask engaging questions",
306
+ "🔗 Reference current trends or news",
307
+ "📝 Keep paragraphs short and readable",
308
+ "🎨 Use emojis sparingly but effectively"
309
+ ]
310
+
311
+ for tip in tips:
312
+ st.write(tip)
313
+
314
+ st.markdown("---")
315
+
316
+ st.subheader("📈 Post Analytics")
317
+ if 'generated_posts' not in st.session_state:
318
+ st.session_state.generated_posts = 0
319
+
320
+ st.metric("Posts Generated", st.session_state.generated_posts)
321
+
322
+ st.subheader("🎯 Example Topics")
323
+ example_topics = [
324
+ "AI in healthcare",
325
+ "Remote work productivity",
326
+ "Digital transformation",
327
+ "Leadership skills",
328
+ "Innovation in business"
329
+ ]
330
+
331
+ for topic in example_topics:
332
+ if st.button(topic, key=f"example_{topic}"):
333
+ st.session_state.example_topic = topic
334
+ st.session_state.user_input = topic
335
+ st.rerun()
336
+
337
+ st.markdown("---")
338
+ st.markdown(
339
+ """
340
+ <div style='text-align: center; color: #666; padding: 1rem;'>
341
+ <p>Made with ❤️ using Streamlit, LangGraph, and Azure OpenAI</p>
342
+ <p>For questions or support, please check the documentation</p>
343
+ </div>
344
+ """,
345
+ unsafe_allow_html=True
346
+ )
config/configs.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.dont_write_bytecode = True
3
+
4
+ import os
5
+ from dotenv import load_dotenv
6
+
7
+ load_dotenv()
8
+
9
+ AZURE_OPENAI_API_KEY = os.getenv("AZURE_OPENAI_API_KEY")
10
+
11
+ OPENAI_API_VERSION = os.getenv("OPENAI_API_VERSION")
12
+
13
+ AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT")
14
+
15
+ AZURE_DEPLOYMENT = os.getenv("AZURE_DEPLOYMENT")
config/generation_config.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Generation configuration settings for the LinkedIn Post Generator
3
+ """
4
+
5
+ # Post length configurations
6
+ POST_LENGTHS = {
7
+ "Short (100-200 words)": {"min_words": 100, "max_words": 200},
8
+ "Medium (200-400 words)": {"min_words": 200, "max_words": 400},
9
+ "Long (400-600 words)": {"min_words": 400, "max_words": 600}
10
+ }
11
+
12
+ # Tone configurations
13
+ TONES = {
14
+ "Professional": "formal, business-like, authoritative",
15
+ "Casual": "friendly, conversational, approachable",
16
+ "Enthusiastic": "energetic, passionate, motivating",
17
+ "Educational": "informative, instructional, helpful",
18
+ "Inspirational": "motivational, uplifting, encouraging"
19
+ }
20
+
21
+ # Default settings
22
+ DEFAULT_SETTINGS = {
23
+ "post_length": "Medium (200-400 words)",
24
+ "tone": "Professional",
25
+ "include_hashtags": True,
26
+ "include_call_to_action": True,
27
+ "max_hashtags": 5,
28
+ "include_emoji": True
29
+ }
30
+
31
+ # Hashtag suggestions for common topics
32
+ HASHTAG_SUGGESTIONS = {
33
+ "ai": ["#AI", "#ArtificialIntelligence", "#MachineLearning", "#Tech", "#Innovation"],
34
+ "leadership": ["#Leadership", "#Management", "#Business", "#ProfessionalDevelopment", "#Career"],
35
+ "remote_work": ["#RemoteWork", "#WorkFromHome", "#Productivity", "#DigitalTransformation", "#FutureOfWork"],
36
+ "healthcare": ["#Healthcare", "#HealthTech", "#DigitalHealth", "#Innovation", "#PatientCare"],
37
+ "business": ["#Business", "#Entrepreneurship", "#Strategy", "#Growth", "#Success"]
38
+ }
39
+
40
+ def get_generation_prompt(user_input, context="", settings=None):
41
+ """
42
+ Generate a formatted prompt for the AI agent based on user input and settings
43
+ """
44
+ if settings is None:
45
+ settings = DEFAULT_SETTINGS
46
+
47
+ prompt = f"Create a LinkedIn post about: {user_input}"
48
+
49
+ if context:
50
+ prompt += f"\n\nContext: {context}"
51
+
52
+ # Add length requirements
53
+ length_config = POST_LENGTHS.get(settings.get("post_length", DEFAULT_SETTINGS["post_length"]))
54
+ prompt += f"\n\nLength: {length_config['min_words']}-{length_config['max_words']} words"
55
+
56
+ # Add tone requirements
57
+ tone = settings.get("tone", DEFAULT_SETTINGS["tone"])
58
+ tone_description = TONES.get(tone, TONES["Professional"])
59
+ prompt += f"\n\nTone: {tone_description}"
60
+
61
+ # Add hashtag requirements
62
+ if settings.get("include_hashtags", True):
63
+ max_hashtags = settings.get("max_hashtags", 5)
64
+ prompt += f"\n\nInclude {max_hashtags} relevant hashtags at the end"
65
+
66
+ # Add call to action requirements
67
+ if settings.get("include_call_to_action", True):
68
+ prompt += "\n\nInclude a call to action at the end"
69
+
70
+ # Add emoji requirements
71
+ if settings.get("include_emoji", True):
72
+ prompt += "\n\nUse emojis sparingly but effectively to enhance readability"
73
+
74
+ # Add LinkedIn-specific formatting
75
+ prompt += "\n\nFormat the post for LinkedIn with proper paragraph breaks and engaging opening"
76
+
77
+ return prompt
78
+
79
+ def get_hashtag_suggestions(topic):
80
+ """
81
+ Get hashtag suggestions based on the topic
82
+ """
83
+ topic_lower = topic.lower()
84
+
85
+ for key, hashtags in HASHTAG_SUGGESTIONS.items():
86
+ if key in topic_lower:
87
+ return hashtags
88
+
89
+ # Return general hashtags if no specific match
90
+ return ["#LinkedIn", "#Professional", "#Networking", "#Career", "#Business"]
helper/configure_llm.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.dont_write_bytecode = True
3
+
4
+ from langchain_openai import AzureChatOpenAI
5
+ from config.configs import AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, OPENAI_API_VERSION, AZURE_DEPLOYMENT
6
+ import os
7
+
8
+ os.environ['AZURE_OPENAI_API_KEY'] = AZURE_OPENAI_API_KEY
9
+
10
+ os.environ['OPENAI_API_VERSION'] = OPENAI_API_VERSION
11
+
12
+ os.environ['AZURE_OPENAI_ENDPOINT'] = AZURE_OPENAI_ENDPOINT
13
+
14
+ LLM = AzureChatOpenAI(
15
+ azure_deployment=AZURE_DEPLOYMENT,
16
+ temperature=0,
17
+ max_tokens=None,
18
+ timeout=None,
19
+ max_retries=2,
20
+ )
helper/graphs.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langgraph.graph import END, StateGraph
2
+ from helper.model_load import GraphState
3
+ from utils.search_web_content import web_search
4
+ from utils.query_transform import transform_query
5
+ from utils.generate_content import generate
6
+ from utils.question_route import route_question
7
+
8
+ import sys
9
+ sys.dont_write_bytecode = True
10
+
11
+ builder = StateGraph(GraphState)
12
+
13
+ builder.add_node("web_search", web_search)
14
+
15
+ builder.add_node("transform_query", transform_query)
16
+
17
+ builder.add_node("generate", generate)
18
+
19
+ builder.set_conditional_entry_point(
20
+ route_question,
21
+ {
22
+ "websearch": "transform_query",
23
+ "generate": "generate"
24
+ },
25
+ )
26
+
27
+ builder.add_edge("transform_query", "web_search")
28
+
29
+ builder.add_edge("web_search", "generate")
30
+
31
+ builder.add_edge("generate", END)
32
+
33
+ local_agent = builder.compile()
helper/model_load.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypedDict
2
+ import sys
3
+ sys.dont_write_bytecode = True
4
+
5
+ class GraphState(TypedDict):
6
+ """
7
+ Represents the state of our graph.
8
+ Attributes:
9
+ question: question
10
+ generation: LLM generation
11
+ search_query: revised question for web search
12
+ context: web_search result
13
+ """
14
+ question: str
15
+ generation: str
16
+ search_query: str
17
+ context: str
helper/web_search_agent.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.tools import DuckDuckGoSearchRun
2
+ from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
3
+
4
+ import sys
5
+ sys.dont_write_bytecode = True
6
+
7
+ import sys
8
+ sys.dont_write_bytecode = True
9
+
10
+ WRAPPER = DuckDuckGoSearchAPIWrapper(
11
+ max_results=25
12
+ )
13
+
14
+ WEB_SEARCH_TOOL = DuckDuckGoSearchRun(
15
+ api_wrapper=WRAPPER
16
+ )
loggers/logger.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from datetime import datetime
4
+
5
+ import sys
6
+ sys.dont_write_bytecode = True
7
+
8
+ log_dir = "logs"
9
+ os.makedirs(log_dir, exist_ok=True)
10
+
11
+ log_filename = datetime.now().strftime("log_%Y-%m-%d_%H-%M-%S.log")
12
+ log_filepath = os.path.join(log_dir, log_filename)
13
+
14
+ logger = logging.getLogger("project_logger")
15
+ logger.setLevel(logging.DEBUG)
16
+
17
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
18
+
19
+ file_handler = logging.FileHandler(log_filepath)
20
+ file_handler.setLevel(logging.DEBUG)
21
+ file_handler.setFormatter(formatter)
22
+
23
+ stream_handler = logging.StreamHandler()
24
+ stream_handler.setLevel(logging.INFO)
25
+ stream_handler.setFormatter(formatter)
26
+
27
+ if not logger.handlers:
28
+ logger.addHandler(file_handler)
29
+ logger.addHandler(stream_handler)
requirements.txt CHANGED
@@ -1,3 +1,9 @@
1
- altair
2
- pandas
3
- streamlit
 
 
 
 
 
 
 
1
+ streamlit
2
+ langchain
3
+ langchain-core
4
+ langchain-community
5
+ langchain-openai
6
+ langgraph
7
+ langsmith
8
+ duckduckgo-search
9
+ python-dotenv
src/streamlit_app.py DELETED
@@ -1,40 +0,0 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
- import streamlit as st
5
-
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
template/response_prompt.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.prompts import PromptTemplate
2
+ from langchain_core.output_parsers import StrOutputParser
3
+ from helper.configure_llm import LLM
4
+
5
+ import sys
6
+ sys.dont_write_bytecode = True
7
+
8
+ llm = LLM
9
+
10
+ GENERATE_TEMPLATE = PromptTemplate(
11
+ template = """
12
+
13
+ <|begin_of_text|>
14
+
15
+ <|start_header_id|>system<|end_header_id|>
16
+
17
+ You are an AI assistant that synthesizes web search results to create engaging and information LinkedIn posts that are clear, concise and appealing to professionals on LinkedIn.
18
+ Make sure the tone is professional yet approachable, and include actionable insights, tips or thought-provoking points that would resonate with the LinkedIn audience.
19
+ If relevant, include a call-to-action or a question to encourage engagement. Strictly use the following pieces of web search context to answer the question.
20
+ If you don't know, just say that you don't know. Only make direct references to material if provided in the context.
21
+
22
+ <|eot_id|>
23
+
24
+ <|start_header_id|>user<|end_header_id|>
25
+
26
+ Question: {question}
27
+ Web Search Context: {context}
28
+ Answer:
29
+
30
+ <|eot_id|>
31
+
32
+ <|start_header_id|>assistant<|end_header_id|>""",
33
+ input_variables=["question", "context"],
34
+ )
35
+
36
+ GENERATE_CHAIN = GENERATE_TEMPLATE | llm | StrOutputParser()
template/router_prompt.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.prompts import PromptTemplate
2
+ from langchain_core.output_parsers import JsonOutputParser
3
+ from helper.configure_llm import LLM
4
+
5
+ import sys
6
+ sys.dont_write_bytecode = True
7
+
8
+ llm = LLM
9
+
10
+ router_prompt = PromptTemplate(
11
+ template="""
12
+
13
+ <|begin_of_text|>
14
+
15
+ <|start_header_id|>system<|end_header_id|>
16
+
17
+ You are an expert at routing a user question to either the generation stage or web search.
18
+ Use the web search for questions that require more context for a better answer or recent events.
19
+ Otherwise, you can skip and go straight to the generation phase to respond.
20
+ You do not need to be stringent with the keywords in the question related to these topics.
21
+ Give a binary choice 'web_search' or 'generate' based on the question.
22
+ Return the JSON with a single key 'choice' with no premable or explanation.
23
+
24
+ Question to route: {question}
25
+
26
+ <|eot_id|>
27
+
28
+ <|start_header_id|>assistant<|end_header_id|>
29
+
30
+ """,
31
+ input_variables=["question"],
32
+ )
33
+
34
+ QUESTION_ROUTER = router_prompt | llm | JsonOutputParser()
template/transform_prompt.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.prompts import PromptTemplate
2
+ from langchain_core.output_parsers import JsonOutputParser
3
+ from helper.configure_llm import LLM
4
+
5
+ import sys
6
+ sys.dont_write_bytecode = True
7
+
8
+ llm = LLM
9
+
10
+ QUERY_PROMPT = PromptTemplate(
11
+ template = """
12
+
13
+ <|begin_of_text|>
14
+
15
+ <|start_header_id|>system<|end_header_id|>
16
+
17
+ You are a expert at crafting web search queries for research questions.
18
+ More often that not, a user will ask a basic question that they wish to learn more about; however, it might not be in the best format.
19
+ Reword their query to be the most effective web search string possible.
20
+ Return the JSON with a single key 'query' with no premable or explanation.
21
+
22
+ Question to transform: {question}
23
+
24
+ <|eot_id|>
25
+
26
+ <|start_header_id|>user<|end_header_id|>
27
+
28
+ """,
29
+ input_variables=["question"],
30
+ )
31
+
32
+ QUERY_CHAIN = QUERY_PROMPT | llm | JsonOutputParser()
tests/llm_test.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_openai import AzureChatOpenAI
2
+ from config.configs import AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, OPENAI_API_VERSION, AZURE_DEPLOYMENT
3
+ import os
4
+
5
+ os.environ['AZURE_OPENAI_API_KEY'] = AZURE_OPENAI_API_KEY
6
+
7
+ os.environ['OPENAI_API_VERSION'] = OPENAI_API_VERSION
8
+
9
+ os.environ['AZURE_OPENAI_ENDPOINT'] = AZURE_OPENAI_ENDPOINT
10
+
11
+ LLM = AzureChatOpenAI(
12
+ azure_deployment=AZURE_DEPLOYMENT,
13
+ temperature=0,
14
+ max_tokens=None,
15
+ timeout=None,
16
+ max_retries=2,
17
+ )
18
+
19
+ if __name__ == "__main__":
20
+ response = LLM.invoke("hello, how are you")
21
+ print(response)
tests/search_test.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.tools import DuckDuckGoSearchRun
2
+ from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
3
+
4
+ wrapper = DuckDuckGoSearchAPIWrapper(max_results=25)
5
+ web_search_tool = DuckDuckGoSearchRun(api_wrapper=wrapper)
6
+
7
+ rep = web_search_tool.invoke("Who won 2025 Champions Trophy")
8
+ print(rep)
utils/credential_manager.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Credential management utilities for Azure OpenAI
3
+ """
4
+
5
+ import os
6
+ import streamlit as st
7
+ from typing import Dict, Optional, Tuple
8
+
9
+ class CredentialManager:
10
+ """Manages Azure OpenAI credentials for the application"""
11
+
12
+ @staticmethod
13
+ def get_credentials_from_env() -> Dict[str, str]:
14
+ """Get credentials from environment variables"""
15
+ return {
16
+ 'api_key': os.getenv('AZURE_OPENAI_API_KEY', ''),
17
+ 'endpoint': os.getenv('AZURE_OPENAI_ENDPOINT', ''),
18
+ 'deployment': os.getenv('AZURE_DEPLOYMENT', ''),
19
+ 'api_version': os.getenv('OPENAI_API_VERSION', '2024-02-01')
20
+ }
21
+
22
+ @staticmethod
23
+ def get_credentials_from_session() -> Dict[str, str]:
24
+ """Get credentials from Streamlit session state"""
25
+ return {
26
+ 'api_key': st.session_state.get('azure_api_key', ''),
27
+ 'endpoint': st.session_state.get('azure_endpoint', ''),
28
+ 'deployment': st.session_state.get('azure_deployment', ''),
29
+ 'api_version': st.session_state.get('azure_api_version', '2024-02-01')
30
+ }
31
+
32
+ @staticmethod
33
+ def save_credentials_to_session(api_key: str, endpoint: str, deployment: str, api_version: str = '2024-02-01'):
34
+ """Save credentials to Streamlit session state"""
35
+ st.session_state.azure_api_key = api_key
36
+ st.session_state.azure_endpoint = endpoint
37
+ st.session_state.azure_deployment = deployment
38
+ st.session_state.azure_api_version = api_version
39
+
40
+ @staticmethod
41
+ def set_environment_variables(credentials: Dict[str, str]):
42
+ """Set environment variables for Azure OpenAI"""
43
+ os.environ['AZURE_OPENAI_API_KEY'] = credentials['api_key']
44
+ os.environ['AZURE_OPENAI_ENDPOINT'] = credentials['endpoint']
45
+ os.environ['AZURE_DEPLOYMENT'] = credentials['deployment']
46
+ os.environ['OPENAI_API_VERSION'] = credentials['api_version']
47
+
48
+ @staticmethod
49
+ def validate_credentials(credentials: Dict[str, str]) -> Tuple[bool, str]:
50
+ """Validate Azure OpenAI credentials"""
51
+ if not credentials['api_key']:
52
+ return False, "API Key is required"
53
+
54
+ if not credentials['endpoint']:
55
+ return False, "Endpoint is required"
56
+
57
+ if not credentials['deployment']:
58
+ return False, "Deployment name is required"
59
+
60
+ # Basic format validation for Azure OpenAI
61
+ if len(credentials['api_key']) < 10:
62
+ return False, "API Key seems too short"
63
+
64
+ if not credentials['endpoint'].startswith('https://'):
65
+ return False, "Endpoint should be a valid HTTPS URL"
66
+
67
+ if not credentials['endpoint'].endswith('.openai.azure.com/'):
68
+ return False, "Endpoint should end with '.openai.azure.com/'"
69
+
70
+ return True, "Credentials are valid"
71
+
72
+ @staticmethod
73
+ def get_current_credentials(use_env: bool = True) -> Dict[str, str]:
74
+ """Get current credentials based on preference"""
75
+ if use_env:
76
+ return CredentialManager.get_credentials_from_env()
77
+ else:
78
+ return CredentialManager.get_credentials_from_session()
79
+
80
+ @staticmethod
81
+ def clear_session_credentials():
82
+ """Clear credentials from session state"""
83
+ if 'azure_api_key' in st.session_state:
84
+ del st.session_state.azure_api_key
85
+ if 'azure_endpoint' in st.session_state:
86
+ del st.session_state.azure_endpoint
87
+ if 'azure_deployment' in st.session_state:
88
+ del st.session_state.azure_deployment
89
+ if 'azure_api_version' in st.session_state:
90
+ del st.session_state.azure_api_version
91
+
92
+ def format_endpoint_url(endpoint: str) -> str:
93
+ """Format endpoint URL to ensure it ends with a slash"""
94
+ if not endpoint.endswith('/'):
95
+ endpoint += '/'
96
+ return endpoint
97
+
98
+ def mask_api_key(api_key: str) -> str:
99
+ """Mask API key for display purposes"""
100
+ if len(api_key) > 8:
101
+ return api_key[:4] + '*' * (len(api_key) - 8) + api_key[-4:]
102
+ return '*' * len(api_key)
utils/generate_content.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from loggers.logger import logger
2
+ from template.response_prompt import GENERATE_CHAIN
3
+
4
+ import sys
5
+ sys.dont_write_bytecode = True
6
+
7
+ def generate(state):
8
+ """
9
+ Generate Answer
10
+ Args:
11
+ state (dict): The current graph state
12
+ Return:
13
+ state (dict): New key added to state
14
+ """
15
+
16
+ logger.info("STEP: GENERATING FINAL RESPONSE")
17
+
18
+ question = state['question']
19
+
20
+ try:
21
+ context = state['context']
22
+ except:
23
+ context = " "
24
+
25
+ generation = GENERATE_CHAIN.invoke(
26
+ {
27
+ "context": context,
28
+ "question": question
29
+ }
30
+ )
31
+ return {
32
+ "generation": generation
33
+ }
utils/query_transform.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from loggers.logger import logger
2
+ from template.transform_prompt import QUERY_CHAIN
3
+
4
+ import sys
5
+ sys.dont_write_bytecode = True
6
+
7
+ query_chain = QUERY_CHAIN
8
+
9
+ def transform_query(state):
10
+ """
11
+ Transform the user query to web search
12
+ Args:
13
+ state (dict): The current graph state
14
+ Returns:
15
+ state (dict): Appended search query
16
+ """
17
+
18
+ logger.info("INSIDE QUERY TRANSFORMATION FOR WEB SEARCH")
19
+
20
+ question = state['question']
21
+
22
+ gen_question = query_chain.invoke({"question": question})
23
+
24
+ search_query = gen_question['query']
25
+
26
+ return {
27
+ "search_query": search_query
28
+ }
utils/question_route.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from loggers.logger import logger
2
+ from template.router_prompt import QUESTION_ROUTER
3
+
4
+ import sys
5
+ sys.dont_write_bytecode = True
6
+
7
+ query_router = QUESTION_ROUTER
8
+
9
+ def route_question(state):
10
+ """
11
+ Route question to web search or generation.
12
+ Args:
13
+ state (dict): The current graph state
14
+ Returns:
15
+ str: Next node to call
16
+ """
17
+
18
+ logger.info("INSIDE ROUTE QUESTION FUNCTION")
19
+
20
+ question = state['question']
21
+
22
+ output = query_router.invoke(
23
+ {
24
+ "question": question
25
+ }
26
+ )
27
+
28
+ if output['choice'] == "web_search":
29
+ logger.info("ROUTING QUERY TO WEB SEARCH CONTENT")
30
+ return "websearch"
31
+
32
+ elif output['choice'] == "generate":
33
+ logger.info("ROUTING QUERY TO GENERATION CONTENT")
34
+ return "generate"
35
+
36
+
37
+
utils/run_agent.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from helper.graphs import local_agent
2
+
3
+ import sys
4
+ sys.dont_write_bytecode = True
5
+
6
+ def execute_agent(query):
7
+ output = local_agent.invoke(
8
+ {
9
+ "question": query
10
+ }
11
+ )
12
+
13
+ return output['generation']
14
+
15
+ print(execute_agent("What is AI"))
utils/search_web_content.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from loggers.logger import logger
2
+ from helper.web_search_agent import WEB_SEARCH_TOOL
3
+
4
+ import sys
5
+ sys.dont_write_bytecode = True
6
+
7
+ tool = WEB_SEARCH_TOOL
8
+
9
+ def web_search(state):
10
+ """
11
+ Route question to web search
12
+ Args:
13
+ state (dict): The current graph state
14
+ Returns:
15
+ state (dict): Next node to call
16
+ """
17
+
18
+ logger.info("INSIDE THE WEB SEARCH FILE")
19
+
20
+ query = state['search_query']
21
+
22
+ logger.info(f"SEARCH QUERY ENTERED: {query}")
23
+
24
+ result = WEB_SEARCH_TOOL.invoke(query)
25
+
26
+ logger.info("WEB SEARCH TOOL INVOKED")
27
+
28
+ return {
29
+ "context": result
30
+ }
31
+
32
+
33
+
34
+
35
+