Spaces:
Runtime error
Runtime error
| from langchain import callbacks, hub | |
| from langchain_core.output_parsers import JsonOutputParser | |
| from langchain_openai import ChatOpenAI | |
| from pydantic import BaseModel, Field, UUID4 | |
| from typing import List | |
| import os | |
| class Takeaway(BaseModel): | |
| takeaway: str | |
| feedback: str | |
| class Response(BaseModel): | |
| run_id : UUID4 | |
| new_til: List[Takeaway] | |
| class ReadableTilResult(BaseModel): | |
| original_til: str = Field( | |
| description="The original TIL without any modifications.", | |
| ) | |
| readability_score: str = Field( | |
| description="The readability score as High/Medium/Low based on the readability of the TIL.", | |
| ) | |
| reason: str = Field( | |
| description="The reason for the assessment of the TIL readability score in one sentence.", | |
| ) | |
| readable_til: str = Field( | |
| description="Rewrite the TIL in a readable manner only if the readability score is not High", | |
| ) | |
| class ReadableTilResults(BaseModel): | |
| tils: List[ReadableTilResult] | |
| class RewriteTilV2(): | |
| def kickoff(self, inputs=[]) -> Response: | |
| self.content = inputs["content"] | |
| return self._get_understandable_til() | |
| def _get_understandable_til(self) -> Response: | |
| prompt = hub.pull("til_understandability_revision") | |
| llm = ChatOpenAI(model=os.environ['OPENAI_MODEL'], temperature=0.2) | |
| parser = JsonOutputParser(pydantic_object=ReadableTilResults) | |
| chain = (prompt | llm | parser).with_config({ | |
| "tags": ["til"], "run_name": "Rewrite Understandable TIL", | |
| "metadata": { | |
| "version": "v2.0.0", | |
| "growth_activity": "til", | |
| "env": os.environ["ENV"], | |
| "model": os.environ["OPENAI_MODEL"] | |
| } | |
| }) | |
| with callbacks.collect_runs() as cb: | |
| self.llm_response = chain.invoke({ | |
| "til_content": self.content, | |
| "format_instructions": parser.get_format_instructions(), | |
| }) | |
| self.run_id = cb.traced_runs[0].id | |
| return self._handle_response() | |
| def _handle_response(self) -> Response: | |
| response = Response( | |
| run_id=self.run_id, | |
| new_til=[] | |
| ) | |
| recommended_tils = self.llm_response["tils"] | |
| for til in recommended_tils: | |
| new_takeaway = Takeaway( | |
| feedback="not_ok", | |
| takeaway=til["readable_til"] | |
| ) | |
| if til["readability_score"] == "High": | |
| new_takeaway.feedback = "ok" | |
| new_takeaway.takeaway = til["original_til"] | |
| response.new_til.append(new_takeaway) | |
| return response | |