| import asyncio | |
| import json | |
| from collections import deque | |
| from application import app_data | |
| from langchain.prompts import ChatPromptTemplate | |
| from chains import llm, retry_decorator | |
| from utility import list_dict_to_dict | |
| from pydantic import BaseModel, Field | |
| from typing import List, Dict, Optional | |
| # manual create chain objects | |
| with open(".data/summary_prompts.json","r") as f: | |
| overview_summary = json.load(f) | |
| overview_summary = list_dict_to_dict(overview_summary,"name") | |
| class Instruction(BaseModel): | |
| name: str | |
| inputs: List[str] | |
| instruction: str | |
| def execute_chain(instruction,articles): # target shall be the object of the summary instruction | |
| # a recursive function to generate the chain object for llm execution | |
| # it will recursively create the chain object for each prompt required from article object | |
| if all([instruction.name in article for article in articles]): | |
| return | |
| for i in instruction.inputs: | |
| for article in articles: | |
| if i not in article: | |
| execute_chain(overview_summary[i],articles) | |
| def gen_node(name): | |
| instruction = app_data["summary"][name]["instruction"] | |
| prompt = ChatPromptTemplate.from_messages( | |
| ("system", "{content}"), | |
| ("system",f"{instruction}") | |
| ) | |
| node = prompt | llm | |
| return {name:node} |