Spaces:
Running
Running
| import os | |
| import json | |
| import time | |
| import traceback | |
| from dotenv import load_dotenv | |
| from langfuse import Langfuse | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| from langfuse.callback import CallbackHandler | |
| from langchain_core.prompts import PromptTemplate | |
| load_dotenv() | |
| langfuse_news_analysis_handler = CallbackHandler( | |
| secret_key=os.getenv("LANGFUSE_SECRET_KEY"), | |
| public_key=os.getenv("LANGFUSE_PUBLIC_KEY"), | |
| host="https://cloud.langfuse.com", # πͺπΊ EU region | |
| session_id="news_analysis", | |
| ) | |
| langfuse_post_generation_handler = CallbackHandler( | |
| secret_key=os.getenv("LANGFUSE_SECRET_KEY"), | |
| public_key=os.getenv("LANGFUSE_PUBLIC_KEY"), | |
| host="https://cloud.langfuse.com", # πͺπΊ EU region | |
| session_id="post_generation", | |
| ) | |
| langfuse = Langfuse() | |
| analysis_llm = ChatGoogleGenerativeAI( | |
| model="gemini-2.0-flash-lite", | |
| temperature=0.8, | |
| timeout=None, | |
| max_retries=2, | |
| api_key=os.getenv("GOOGLE_API_KEY"), | |
| ) | |
| post_content_llm = ChatGoogleGenerativeAI( | |
| model="gemini-2.0-flash-lite", | |
| temperature=0.8, | |
| timeout=None, | |
| max_retries=2, | |
| api_key=os.getenv("GOOGLE_POST_GENERATION_API_KEY"), | |
| ) | |
| def basic_analysis(news): | |
| prompt = langfuse.get_prompt("news_selector") | |
| for _ in range(5): | |
| try: | |
| response = analysis_llm.invoke( | |
| prompt.compile(news_object = news), | |
| config={"callbacks": [langfuse_news_analysis_handler]} | |
| ) | |
| print("################ BASIC ANALYSIS AGENT RESPONSE ################") | |
| print(response.content) | |
| print("################ BASIC ANALYSIS END AGENT RESPONSE ################") | |
| if "</think>" in response.content: | |
| response.content = response.content.split("</think>")[1] | |
| start_index = response.content.find("{") | |
| end_index = response.content.rfind("}") | |
| print("start index:", start_index) | |
| print("end index:", end_index) | |
| abstracted_string = "" | |
| if start_index != -1 and end_index != -1 and start_index < end_index: | |
| abstracted_string = response.content[start_index : end_index + 1] | |
| try: | |
| results = json.loads(abstracted_string) | |
| print(results) | |
| return results | |
| except Exception as e: | |
| print(e) | |
| traceback.print_exc() | |
| except Exception as e: | |
| print(e) | |
| traceback.print_exc() | |
| time.sleep(30) | |
| return {"error": "LLM response is not in correct format."} | |
| def get_text_post_content(details, reference): | |
| try: | |
| prompt = PromptTemplate.from_file( | |
| template_file="prompts/post_generator_without_source.yml", | |
| input_variables=["NEWS_CONTENT", "CHAR_LENGTH"], | |
| ) | |
| prompt = langfuse.get_prompt("post_generator") | |
| user_query = prompt.compile(NEWS_CONTENT = details, CHAR_LENGTH = 490- len(reference)) | |
| response = post_content_llm.invoke(user_query, config={"callbacks": [langfuse_post_generation_handler]}) | |
| print("POST CONTENT RESPONSE:", response) | |
| content = response.content.replace('"', '') | |
| if "</think>" in content: | |
| content = content.split("</think>")[1] | |
| start_indx = content.find("#") | |
| content = f"""{content[:start_indx]} | |
| {reference} | |
| {content[start_indx:]}""" | |
| return content, True | |
| except Exception as e: | |
| print(e) | |
| traceback.print_exc() | |
| return "", False |