File size: 7,813 Bytes
6ab1d9c
9a9c92d
 
 
 
 
 
 
0c998db
aac7a58
9a9c92d
0c998db
9a9c92d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0d77621
9a9c92d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fadb1e0
9a9c92d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aac7a58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9a9c92d
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
# from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_community.tools.tavily_search import TavilySearchResults
import requests
import os
from typing import List, TypedDict
from langgraph.types import Command
from typing import Literal, List
from langgraph.graph import StateGraph, START, END
from dotenv import load_dotenv
from langchain_groq import ChatGroq

load_dotenv()

def get_platform_tips(state) -> Command[Literal['web_search']]:
    """Conduct a web search to find up-to-date information on how to write an effective post for the provided platform."""
    tavily_tool=TavilySearchResults(max_results=5)
    tavily_output = tavily_tool.invoke(f"tips on how to write an effective post on {state['platform']}")
    prompt = f"""
    Summarize the tips provided in {tavily_output}. These tips will be used to generate a {state['platform']} post
    Output as plain text.
    """
    response = model.invoke(prompt).content
    return Command(update={"tips": response}, goto="web_search")

def web_search(state) -> Command[Literal['generate_post']]:
    """Conduct a web search to find up-to-date information about a provided topic to be used for a social media post."""
    tavily_tool=TavilySearchResults(max_results=5)
    response = tavily_tool.invoke(state["topic"])
    return Command(update={"tavily_results": response}, goto="generate_post")
    
def generate_social_media_post(state) -> Command[Literal["evaluate_engagement"]]:
    """Generate a social media post for a B2B bank."""
    prompt = f"""
    You are a social media strategist for a B2B bank. Generate a {state["platform"]} post.
    The post should:
    - Be engaging but professional.
    - Provide value to corporate clients.
    - Focus on {state["topic"]}.
    - Incorporate information from {state["tavily_results"]}

    Output as plain text.
    """
    response = model.invoke(prompt)
    return Command(update={"post": response.content}, goto="evaluate_engagement")

def evaluate_engagement(state) -> Command[Literal["evaluate_tone"]]:
    """Assess how engaging the post is for LinkedIn/Instagram."""    
    prompt = f"""
    Score the following post on engagement (1-10) basd on the provided social media platform. 
    Consider clarity, readability, and compelling call-to-action.
    
    Platform: {state["platform"]}
    Post: {state["post"]}
    
    Respond with just a number.
    """
    score = model.invoke(prompt)
    return Command(update={"engagement_score": score.content}, goto="evaluate_tone")

def evaluate_tone(state) -> Command[Literal["evaluate_clarity"]]:
    """Check if the post maintains a professional yet engaging tone."""
    prompt = f"""
    Score the post’s tone (1-10). Ensure it's:
    - Professional but not too rigid.
    - Trustworthy and aligned with B2B financial services.
    - Aligns with the specified platform.

    Platform: {state["platform"]}
    Post: {state["post"]}
    
    Respond with just a number.
    """
    score = model.invoke(prompt)
    return Command(update={"tone_score": score.content}, goto="evaluate_clarity")

def evaluate_clarity(state) -> Command[Literal["revise_if_needed"]]:
    """Ensure the post is clear and not overly technical."""
    prompt = f"""
    Score the post on clarity (1-10).
    - Avoids jargon.
    - Easy to read for busy corporate professionals.
    - Appropriate for the social media platform.

    Platform: {state["platform"]}
    Post: {state["post"]}

    Respond with just a number.
    """
    score = model.invoke(prompt)
    return Command(update={"clarity_score": score.content}, goto="revise_if_needed")

def revise_if_needed(state) -> Command[Literal["get_image"]]:
    """Revise post if average evaluation score is below a threshold."""
    scores = [int(state["engagement_score"]), int(state["tone_score"]), int(state["clarity_score"])]
    avg_score = sum(scores) / len(scores)
    
    if avg_score < 8:  # Arbitrary threshold for revision
        prompt = f"""
        Revise this post to improve clarity, engagement, and tone:
        
        {state["post"]}
        
        Improve based on the following scores:
        Engagement: {state["engagement_score"]}
        Tone: {state["tone_score"]}
        Clarity: {state["clarity_score"]}
        """
        revised_post = model.invoke(prompt)
        return Command(update={"post": revised_post.content}, goto="get_image")
    
    return Command(goto="get_image")

def fetch_image(state) -> Command[Literal[END]]:
    """Fetch an image from Unsplash based on the provided text."""    
    prompt = f"""
    You are a search optimization assistant. Your task is to take a topic and improve it to ensure the best image results from an image search API like Unsplash. Follow these steps:

    1. **Normalize the input**: Convert all text to lowercase and remove special characters (except for spaces).
    2. **Add more descriptive terms**: If the query is broad (e.g., "nature"), add more specific keywords like "landscape" or "outdoor" to help refine the search.
    3. **Use synonyms and related terms**: For terms that could have multiple meanings or common synonyms, expand the query to include variations. For example, if the user queries "car", you can add "vehicle" or "automobile".
    4. **Specify style and tone**: If the user provides a vague description, suggest adding words to define the style or mood of the image, such as "peaceful", "dramatic", or "colorful".
    5. **Categorize the query**: If applicable, categorize the query into domains like "nature", "architecture", or "people" and add related terms (e.g., "urban", "portrait", "scenic").

    **Example Inputs and Outputs:**

    1. Input: "sunset over a beach"
    Output: "sunset beach ocean horizon landscape"

    2. Input: "car"
    Output: "car vehicle automobile road transport"

    3. Input: "nature"
    Output: "nature landscape outdoor scenic green"    

    Topic: {state['topic']}
    """    

    url = "https://api.pexels.com/v1/search"
    
    params = {
        "query": model.invoke(prompt).content,
        "per_page": 5,
        "page": 1               
    }

    headers = {
        "Authorization": os.getenv("PEXELS_API_KEY")
    }
    
    response = requests.get(url, headers=headers, params=params)
    if response.status_code == 200:
        data = response.json()        
        urls = []
        for _, photo in enumerate(data['photos']):
            urls.append(photo['url'])                            
          
        return Command(update={"image_url": urls}, goto=END)
    return Command(goto=END)

class State(TypedDict):
    topic: str
    platform: str
    tips: str
    tavily_results: List[dict]
    post: str
    engagement_score: int
    tone_score: int
    clarity_score: int
    image_url: str

# model =  ChatGoogleGenerativeAI(
#             model="gemini-2.5-flash",#"gemini-1.5-flash",
#             temperature=0,
#             max_tokens=None,
#             timeout=None,
#             max_retries=2,            
#             )

model = ChatGroq(
      model="llama-3.3-70b-versatile",
      temperature=0.0,            # Adjust sampling temperature as needed
      max_retries=2,              # Configure retry attempts
      timeout=None,
      api_key=os.environ.get("GROQ_API_KEY")
  )

workflow = StateGraph(State)
workflow.add_node("get_tips", get_platform_tips)
workflow.add_node("web_search", web_search)
workflow.add_node("generate_post", generate_social_media_post)
workflow.add_node("evaluate_engagement", evaluate_engagement)
workflow.add_node("evaluate_tone", evaluate_tone)
workflow.add_node("evaluate_clarity", evaluate_clarity)
workflow.add_node("revise_if_needed", revise_if_needed)
workflow.add_node("get_image", fetch_image)

workflow.add_edge(START, "get_tips")
graph = workflow.compile()