Spaces:
Build error
Build error
| # from langchain_google_genai import ChatGoogleGenerativeAI | |
| from langchain_community.tools.tavily_search import TavilySearchResults | |
| import requests | |
| import os | |
| from typing import List, TypedDict | |
| from langgraph.types import Command | |
| from typing import Literal, List | |
| from langgraph.graph import StateGraph, START, END | |
| from dotenv import load_dotenv | |
| from langchain_groq import ChatGroq | |
| load_dotenv() | |
| def get_platform_tips(state) -> Command[Literal['web_search']]: | |
| """Conduct a web search to find up-to-date information on how to write an effective post for the provided platform.""" | |
| tavily_tool=TavilySearchResults(max_results=5) | |
| tavily_output = tavily_tool.invoke(f"tips on how to write an effective post on {state['platform']}") | |
| prompt = f""" | |
| Summarize the tips provided in {tavily_output}. These tips will be used to generate a {state['platform']} post | |
| Output as plain text. | |
| """ | |
| response = model.invoke(prompt).content | |
| return Command(update={"tips": response}, goto="web_search") | |
| def web_search(state) -> Command[Literal['generate_post']]: | |
| """Conduct a web search to find up-to-date information about a provided topic to be used for a social media post.""" | |
| tavily_tool=TavilySearchResults(max_results=5) | |
| response = tavily_tool.invoke(state["topic"]) | |
| return Command(update={"tavily_results": response}, goto="generate_post") | |
| def generate_social_media_post(state) -> Command[Literal["evaluate_engagement"]]: | |
| """Generate a social media post for a B2B bank.""" | |
| prompt = f""" | |
| You are a social media strategist for a B2B bank. Generate a {state["platform"]} post. | |
| The post should: | |
| - Be engaging but professional. | |
| - Provide value to corporate clients. | |
| - Focus on {state["topic"]}. | |
| - Incorporate information from {state["tavily_results"]} | |
| Output as plain text. | |
| """ | |
| response = model.invoke(prompt) | |
| return Command(update={"post": response.content}, goto="evaluate_engagement") | |
| def evaluate_engagement(state) -> Command[Literal["evaluate_tone"]]: | |
| """Assess how engaging the post is for LinkedIn/Instagram.""" | |
| prompt = f""" | |
| Score the following post on engagement (1-10) basd on the provided social media platform. | |
| Consider clarity, readability, and compelling call-to-action. | |
| Platform: {state["platform"]} | |
| Post: {state["post"]} | |
| Respond with just a number. | |
| """ | |
| score = model.invoke(prompt) | |
| return Command(update={"engagement_score": score.content}, goto="evaluate_tone") | |
| def evaluate_tone(state) -> Command[Literal["evaluate_clarity"]]: | |
| """Check if the post maintains a professional yet engaging tone.""" | |
| prompt = f""" | |
| Score the post’s tone (1-10). Ensure it's: | |
| - Professional but not too rigid. | |
| - Trustworthy and aligned with B2B financial services. | |
| - Aligns with the specified platform. | |
| Platform: {state["platform"]} | |
| Post: {state["post"]} | |
| Respond with just a number. | |
| """ | |
| score = model.invoke(prompt) | |
| return Command(update={"tone_score": score.content}, goto="evaluate_clarity") | |
| def evaluate_clarity(state) -> Command[Literal["revise_if_needed"]]: | |
| """Ensure the post is clear and not overly technical.""" | |
| prompt = f""" | |
| Score the post on clarity (1-10). | |
| - Avoids jargon. | |
| - Easy to read for busy corporate professionals. | |
| - Appropriate for the social media platform. | |
| Platform: {state["platform"]} | |
| Post: {state["post"]} | |
| Respond with just a number. | |
| """ | |
| score = model.invoke(prompt) | |
| return Command(update={"clarity_score": score.content}, goto="revise_if_needed") | |
| def revise_if_needed(state) -> Command[Literal["get_image"]]: | |
| """Revise post if average evaluation score is below a threshold.""" | |
| scores = [int(state["engagement_score"]), int(state["tone_score"]), int(state["clarity_score"])] | |
| avg_score = sum(scores) / len(scores) | |
| if avg_score < 8: # Arbitrary threshold for revision | |
| prompt = f""" | |
| Revise this post to improve clarity, engagement, and tone: | |
| {state["post"]} | |
| Improve based on the following scores: | |
| Engagement: {state["engagement_score"]} | |
| Tone: {state["tone_score"]} | |
| Clarity: {state["clarity_score"]} | |
| """ | |
| revised_post = model.invoke(prompt) | |
| return Command(update={"post": revised_post.content}, goto="get_image") | |
| return Command(goto="get_image") | |
| def fetch_image(state) -> Command[Literal[END]]: | |
| """Fetch an image from Unsplash based on the provided text.""" | |
| prompt = f""" | |
| You are a search optimization assistant. Your task is to take a topic and improve it to ensure the best image results from an image search API like Unsplash. Follow these steps: | |
| 1. **Normalize the input**: Convert all text to lowercase and remove special characters (except for spaces). | |
| 2. **Add more descriptive terms**: If the query is broad (e.g., "nature"), add more specific keywords like "landscape" or "outdoor" to help refine the search. | |
| 3. **Use synonyms and related terms**: For terms that could have multiple meanings or common synonyms, expand the query to include variations. For example, if the user queries "car", you can add "vehicle" or "automobile". | |
| 4. **Specify style and tone**: If the user provides a vague description, suggest adding words to define the style or mood of the image, such as "peaceful", "dramatic", or "colorful". | |
| 5. **Categorize the query**: If applicable, categorize the query into domains like "nature", "architecture", or "people" and add related terms (e.g., "urban", "portrait", "scenic"). | |
| **Example Inputs and Outputs:** | |
| 1. Input: "sunset over a beach" | |
| Output: "sunset beach ocean horizon landscape" | |
| 2. Input: "car" | |
| Output: "car vehicle automobile road transport" | |
| 3. Input: "nature" | |
| Output: "nature landscape outdoor scenic green" | |
| Topic: {state['topic']} | |
| """ | |
| url = "https://api.pexels.com/v1/search" | |
| params = { | |
| "query": model.invoke(prompt).content, | |
| "per_page": 5, | |
| "page": 1 | |
| } | |
| headers = { | |
| "Authorization": os.getenv("PEXELS_API_KEY") | |
| } | |
| response = requests.get(url, headers=headers, params=params) | |
| if response.status_code == 200: | |
| data = response.json() | |
| urls = [] | |
| for _, photo in enumerate(data['photos']): | |
| urls.append(photo['url']) | |
| return Command(update={"image_url": urls}, goto=END) | |
| return Command(goto=END) | |
| class State(TypedDict): | |
| topic: str | |
| platform: str | |
| tips: str | |
| tavily_results: List[dict] | |
| post: str | |
| engagement_score: int | |
| tone_score: int | |
| clarity_score: int | |
| image_url: str | |
| # model = ChatGoogleGenerativeAI( | |
| # model="gemini-2.5-flash",#"gemini-1.5-flash", | |
| # temperature=0, | |
| # max_tokens=None, | |
| # timeout=None, | |
| # max_retries=2, | |
| # ) | |
| model = ChatGroq( | |
| model="llama-3.3-70b-versatile", | |
| temperature=0.0, # Adjust sampling temperature as needed | |
| max_retries=2, # Configure retry attempts | |
| timeout=None, | |
| api_key=os.environ.get("GROQ_API_KEY") | |
| ) | |
| workflow = StateGraph(State) | |
| workflow.add_node("get_tips", get_platform_tips) | |
| workflow.add_node("web_search", web_search) | |
| workflow.add_node("generate_post", generate_social_media_post) | |
| workflow.add_node("evaluate_engagement", evaluate_engagement) | |
| workflow.add_node("evaluate_tone", evaluate_tone) | |
| workflow.add_node("evaluate_clarity", evaluate_clarity) | |
| workflow.add_node("revise_if_needed", revise_if_needed) | |
| workflow.add_node("get_image", fetch_image) | |
| workflow.add_edge(START, "get_tips") | |
| graph = workflow.compile() |