Spaces:
Runtime error
Runtime error
| from langchain import callbacks, hub | |
| from langchain_core.output_parsers import JsonOutputParser | |
| from langchain_openai import ChatOpenAI | |
| from pydantic import BaseModel, Field, UUID4 | |
| from typing import List | |
| import os | |
| import random | |
| class HeadlineInfo(BaseModel): | |
| headline: str | |
| tone: str | |
| reason: str | |
| class Response(BaseModel): | |
| run_id : UUID4 | |
| headlines_details: List[HeadlineInfo] | |
| class HeadlineInfoLLM(BaseModel): | |
| headline: str = Field( | |
| description="The suggested headline for the given TIL.", | |
| ) | |
| tone: str = Field( | |
| description="The tone of the suggested headline.", | |
| ) | |
| clickability_score: int = Field( | |
| description="Evaluate the headline's quality on a scale of 1 to 10 w.r.t the tone.", | |
| ) | |
| reason: str = Field( | |
| description="Reason for the clickability_score in one sentence", | |
| ) | |
| class HeadlineResults(BaseModel): | |
| headlines_details: List[HeadlineInfoLLM] | |
| class SuggestHeadlinesV2(): | |
| def kickoff(self, inputs=[]) -> Response: | |
| self.content = inputs["content"] | |
| return self._get_til_headline() | |
| def _get_til_headline(self) -> Response: | |
| prompt = hub.pull("til_suggest_headline") | |
| llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.8) | |
| parser = JsonOutputParser(pydantic_object=HeadlineResults) | |
| chain = (prompt | llm | parser).with_config({ | |
| "tags": ["til"], "run_name": "Suggest TIL Headlines", | |
| "metadata": { | |
| "version": "v2.0.0", | |
| "growth_activity": "til", | |
| "env": os.environ["ENV"], | |
| "model": os.environ["OPENAI_MODEL"] | |
| } | |
| }) | |
| with callbacks.collect_runs() as cb: | |
| self.llm_response = chain.invoke({ | |
| "til_content": self.content, | |
| "format_instructions": parser.get_format_instructions(), | |
| }) | |
| self.run_id = cb.traced_runs[0].id | |
| return self._handle_response() | |
| def _handle_response(self) -> Response: | |
| response = Response( | |
| run_id=self.run_id, | |
| headlines_details=[] | |
| ) | |
| headlines_data = self.llm_response["headlines_details"] | |
| random.shuffle(headlines_data) | |
| for headline_datum in headlines_data[:3]: | |
| response.headlines_details.append( | |
| HeadlineInfo( | |
| headline=headline_datum["headline"], | |
| tone=headline_datum["tone"], | |
| reason=headline_datum["reason"], | |
| ) | |
| ) | |
| return response | |