import json import streamlit as st import utils.settings as settings from crewai import Task from crewai.tasks.task_output import TaskOutput from pydantic import BaseModel from typing import List from agents.article_evaluator import article_evaluator from tasks.create_learning_profile import learning_profile_task from tasks.new_article_suggestion import article_suggestion_task class EvaluatedArticle(BaseModel): title: str url: str evaluation_score: int evaluation_reason: str class EvaluatedArticles(BaseModel): articles: List[EvaluatedArticle] def callback_function(output: TaskOutput): evaluated_articles = json.loads(output.exported_output)['articles'] for article in evaluated_articles: settings.articles[article['url'] ]['evaluation_score'] = article['evaluation_score'] settings.articles[article['url'] ]['evaluation_reason'] = article['evaluation_reason'] st.markdown("### Evaluate Articles task is executed successfully!") evaluation_task = Task( description=( "Evaluate artilces based on the metric does the articles provide incremenrtal " "learning w.r.t the insights captured by the user. " "Score the articles on the scale of 1 to 10, " "1 being doesn't provide incremental learning and " "10 being provides incremental learning to the user." "Evaluate only articles that have been suggested to the user and no other articles." ), expected_output=( "List of article titles with their URLs, evaluation scores, " "and evaluation reasons w.r.t insights captured by the user." ), output_json=EvaluatedArticles, output_file="evaluated_articles.json", agent=article_evaluator, async_execution=False, callback=callback_function, context=[learning_profile_task, article_suggestion_task] )