AnanthulaShravya commited on
Commit
8c27dd8
·
verified ·
1 Parent(s): d29d35c

Upload 17 files

Browse files
__init__.py ADDED
File without changes
article_recommendation.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
+ import json
3
+ import streamlit as st
4
+ import utils.settings as settings
5
+
6
+ from crew.article_suggestion import article_recommendation_crew
7
+ from utils.write_to_json import write_dict_to_json as write_dict_to_json
8
+ load_dotenv()
9
+ settings.init()
10
+
11
+
12
+ def icon(emoji: str):
13
+ """Shows an emoji as a Notion-style page icon."""
14
+ st.write(
15
+ f'<span style="font-size: 78px; line-height: 1">{emoji}</span>',
16
+ unsafe_allow_html=True,
17
+ )
18
+
19
+
20
+ def main():
21
+ icon("📖 Articles RecommendAIgent")
22
+ st.subheader("Let AI agents recommend articles based on your interest!")
23
+
24
+ with st.sidebar:
25
+ st.header("👇 Provide Your Interests Below!")
26
+ with st.form("user_input_form", border=True):
27
+ interests = st.text_input(
28
+ "Enter your interests (comma-separated):",
29
+ "GenAI, Architecture, Agentic Programming",
30
+ )
31
+ previous_article_insights = st.text_area(
32
+ "Enter previous article insights:",
33
+ "Agentic Design Patterns (https://www.deeplearning.ai/the-batch/how-agents-can-improve-llm-performance/)\n"
34
+ "Reflection: The LLM examines its own work to come up with ways to improve it. "
35
+ "Tool Use: The LLM is given tools such as web search, code execution, or any other function to help it gather information, take action, or process data. "
36
+ "Planning: The LLM comes up with, and executes, a multistep plan to achieve a goal "
37
+ "Multi-agent collaboration: More than one AI agent work together, splitting up tasks and discussing and debating ideas, to come up with better solutions than a single agent would.\n\n"
38
+ "GenAI Multi-Agent Systems (https://thenewstack.io/genai-multi-agent-systems-a-secret-weapon-for-tech-teams/)\n"
39
+ "Multi-agent systems go beyond the task-oriented roles to truly super-charge development and strategy teams. "
40
+ "Successful multi-agent systems act as a “digital twin” for your development team. "
41
+ "Different Approaches: 1. Centralized, with one agent in the center that collects and assimilates all the other outputs. "
42
+ "2. Distributed, where there is no central controller and the agents coordinate directly with one another in an “agent swarm. "
43
+ "3. Hierarchical, where agents are organized in teams or hierarchical layers.\n\n"
44
+ "LLM Model Quantisation\n"
45
+ "Different Methods for Compression: Pruning, Knowledge Distiallation and Quantization."
46
+ "Quantization process represents the model weights in lower precession which is also known as downcasting."
47
+ "Quanitzatoin Error is the difference in the weights of the quantized model and the original model."
48
+ "Advantages of Quanitzation: Reduced memory footprint, increased compute and speed of inferrence."
49
+ "Disadvantages of Quantization: Less precise.\n\n",
50
+ height=400,
51
+ )
52
+ st.markdown("")
53
+ submitted = st.form_submit_button("Submit")
54
+
55
+ if submitted:
56
+ with st.status(
57
+ "🤖 **Agents at work...**", state="running", expanded=True
58
+ ) as status:
59
+ with st.container(height=500, border=False):
60
+ result = article_recommendation_crew.kickoff(
61
+ inputs={
62
+ "interests": interests,
63
+ "previous_article_insights": previous_article_insights,
64
+ }
65
+ )
66
+ status.update(
67
+ label="✅ Articles are Ready for Reading!",
68
+ state="complete",
69
+ expanded=False,
70
+ )
71
+
72
+ st.subheader("", anchor=False, divider="rainbow")
73
+
74
+ articles_list = settings.articles.values()
75
+
76
+ if articles_list is None:
77
+ st.markdown("No articles found.")
78
+ return
79
+ else:
80
+ for article in articles_list:
81
+ st.markdown(f"# {article['title']}")
82
+ st.markdown(f"**URL:** [{article['url']}]({article['url']})")
83
+ st.markdown(f"**Pitch:** {article.get('pitch', '')}")
84
+ st.markdown(f"**Evaluation Score:** {article.get('evaluation_score', '')}")
85
+ st.markdown(f"**Evaluation Reason:** {article.get('evaluation_reason', '')}")
86
+ st.markdown(f"**Reason For Recommendation:** {article.get('reason_for_recommendation', '')}")
87
+ st.markdown("---")
88
+
89
+ if st.sidebar.button("← Back to Main Page"):
90
+ st.session_state.page = "main"
91
+ if __name__ == "__main__":
92
+ main()
create_article_pitch.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import streamlit as st
3
+
4
+ import utils.settings as settings
5
+
6
+ from crewai import Task
7
+ from crewai.tasks.task_output import TaskOutput
8
+ from pydantic import BaseModel
9
+ from typing import List
10
+
11
+ from agents.curiosity_catalyst import curiosity_catalyst
12
+ from tools.scrape_website import scrape_tool
13
+ from tasks.create_learning_profile import learning_profile_task
14
+ from tasks.evaluate_articles import evaluation_task
15
+
16
+
17
+ class PitchedArticle(BaseModel):
18
+ title: str
19
+ url: str
20
+ pitch: str
21
+
22
+
23
+ class PitchedArticles(BaseModel):
24
+ articles: List[PitchedArticle]
25
+
26
+
27
+ def callback_function(output: TaskOutput):
28
+ evaluated_articles = json.loads(output.exported_output)
29
+ st.markdown(evaluated_articles)
30
+ for article in evaluated_articles['articles']:
31
+ settings.articles[article['url']]['pitch'] = article['pitch']
32
+ st.markdown("### Create Article Pitch is executed successfully!")
33
+
34
+
35
+ article_pitch_task = Task(
36
+ description=(
37
+ "Create a pitch only for the articles that have been evaluated and no other links. "
38
+ "Craft the pitch so to that it teases the article's most intriguing aspects, "
39
+ "by posing questions that the article might answer or "
40
+ "highlighting surprising facts to pique the user's curiosity "
41
+ " to read the article for incremental learning."
42
+ ),
43
+ expected_output=(
44
+ "List of all the artilces that have been evaluated phase along with their url and pitch statement and no other new urls."
45
+ ),
46
+ output_json=PitchedArticles,
47
+ output_file="pitched_articles.json",
48
+ tools=[scrape_tool],
49
+ agent=curiosity_catalyst,
50
+ async_execution=False,
51
+ callback=callback_function,
52
+ context=[learning_profile_task, evaluation_task]
53
+ )
create_learning_profile.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import streamlit as st
3
+
4
+ from crewai import Task
5
+ from crewai.tasks.task_output import TaskOutput
6
+ from pydantic import BaseModel
7
+ from typing import List
8
+
9
+ from agents.learning_profiler import learning_profiler
10
+
11
+
12
+ class Topic(BaseModel):
13
+ name: str
14
+ insights: List[str]
15
+
16
+
17
+ class LearningProfile(BaseModel):
18
+ topics_of_interests: List[str]
19
+ learnings: List[Topic]
20
+
21
+ def callback_function(output: TaskOutput):
22
+ st.markdown("### Learning profile of the user:")
23
+ data=json.loads(output.exported_output)
24
+ st.markdown(f"**topics_of_interests:** {data['topics_of_interests'][0]}")
25
+ st.markdown(f"**learnings:**")
26
+ for topic in data['learnings']:
27
+ st.markdown(f"*{topic['name']}*:{topic['insights'][0]}")
28
+
29
+ learning_profile_task = Task(
30
+ description=(
31
+ "Create a Learning profile of the user based on "
32
+ "the following articles and insights he has read in the past: \n"
33
+ "{previous_article_insights}"
34
+ ),
35
+ expected_output=(
36
+ "A structured learning profile of the user with his interests, topics he has read about "
37
+ "and insights he has captured on the topics."
38
+ ),
39
+ agent=learning_profiler,
40
+ output_json=LearningProfile,
41
+ output_file="learning_profile.json",
42
+ async_execution=False,
43
+ callback=callback_function
44
+ )
evaluate_articles.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import streamlit as st
3
+
4
+ import utils.settings as settings
5
+
6
+ from crewai import Task
7
+ from crewai.tasks.task_output import TaskOutput
8
+ from pydantic import BaseModel
9
+ from typing import List
10
+
11
+ from agents.article_evaluator import article_evaluator
12
+ from tasks.create_learning_profile import learning_profile_task
13
+ from tasks.new_article_suggestion import article_suggestion_task
14
+
15
+
16
+ class EvaluatedArticle(BaseModel):
17
+ title: str
18
+ url: str
19
+ evaluation_score: int
20
+ evaluation_reason: str
21
+
22
+
23
+ class EvaluatedArticles(BaseModel):
24
+ articles: List[EvaluatedArticle]
25
+
26
+
27
+ def callback_function(output: TaskOutput):
28
+ evaluated_articles = json.loads(output.exported_output)['articles']
29
+
30
+ for article in evaluated_articles:
31
+ settings.articles[article['url']
32
+ ]['evaluation_score'] = article['evaluation_score']
33
+ settings.articles[article['url']
34
+ ]['evaluation_reason'] = article['evaluation_reason']
35
+ st.markdown("### Evaluate Articles task is executed successfully!")
36
+
37
+
38
+ evaluation_task = Task(
39
+ description=(
40
+ "Evaluate artilces based on the metric does the articles provide incremenrtal "
41
+ "learning w.r.t the insights captured by the user. "
42
+ "Score the articles on the scale of 1 to 10, "
43
+ "1 being doesn't provide incremental learning and "
44
+ "10 being provides incremental learning to the user."
45
+ "Evaluate only articles that have been suggested to the user and no other articles."
46
+ ),
47
+ expected_output=(
48
+ "List of article titles with their URLs, evaluation scores, "
49
+ "and evaluation reasons w.r.t insights captured by the user."
50
+ ),
51
+ output_json=EvaluatedArticles,
52
+ output_file="evaluated_articles.json",
53
+ agent=article_evaluator,
54
+ async_execution=False,
55
+ callback=callback_function,
56
+ context=[learning_profile_task, article_suggestion_task]
57
+ )
gemini.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from langchain_google_genai import ChatGoogleGenerativeAI
3
+
4
+ try:
5
+ # Check if there's already a running event loop
6
+ asyncio.get_event_loop()
7
+ except RuntimeError:
8
+ # If not, create a new event loop
9
+ asyncio.set_event_loop(asyncio.new_event_loop())
10
+
11
+ llm=ChatGoogleGenerativeAI(model="gemini-1.5-flash",verbose=True)
gpt.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from langchain_openai import ChatOpenAI
2
+
3
+ llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.5)
helpers.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from crewai.tasks.task_output import TaskOutput
3
+
4
+ def streamlit_callback(step_output:TaskOutput):
5
+ st.markdown("---")
6
+ for step in step_output:
7
+ if isinstance(step, tuple) and len(step) == 2:
8
+ action, observation = step
9
+
10
+ # Display action information
11
+ if isinstance(action, dict) and all(key in action for key in ["tool", "tool_input", "log"]):
12
+ st.markdown(f"**Tool:** {action['tool']}")
13
+ st.markdown(f"**Tool Input:** {action['tool_input']}")
14
+ st.markdown(f"**Log:** {action['log']}")
15
+ if "Action" in action:
16
+ st.markdown(f"# 📝 Processing Action...")
17
+ st.markdown(f"**Action:** {action['Action']}")
18
+ st.markdown(f"**Action Input:** ```json\n{action['tool_input']}\n```")
19
+ elif isinstance(action, str):
20
+ if action != 'log':
21
+ continue
22
+ st.markdown(f"# Action Result...")
23
+ st.markdown(f"**Action:** {action}")
24
+ else:
25
+ st.markdown(f"**Action:** {str(action)}")
26
+
27
+ # Display observation information
28
+ st.markdown(f"**Thought**")
29
+ if isinstance(observation, str):
30
+ observation_lines = observation.split('\n')
31
+ for line in observation_lines:
32
+ if line.startswith('Title: '):
33
+ st.markdown(f"**Title:** {line[7:]}")
34
+ elif line.startswith('Link: '):
35
+ st.markdown(f"**Link:** {line[6:]}")
36
+ elif line.startswith('Snippet: '):
37
+ st.markdown(f"**Snippet:** {line[9:]}")
38
+ elif line.startswith('-'):
39
+ st.markdown(line)
40
+ else:
41
+ st.markdown(line)
42
+ else:
43
+ st.markdown(str(observation))
44
+ else:
45
+ st.markdown(step)
main.css ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ body {
2
+ background-color: white;
3
+ color: #333;
4
+ font-family: 'Arial', sans-serif;
5
+ margin: 0;
6
+ padding: 0;
7
+ }
8
+ .main-title {
9
+ font-size: 3em;
10
+ font-weight: bold;
11
+ margin-top: 20px;
12
+ text-align: center;
13
+ color: #007bff;
14
+ }
15
+ .sub-header {
16
+ font-size: 1.5em;
17
+ margin: 20px 0;
18
+ text-align: center;
19
+ color: #333;
20
+ }
21
+ .card {
22
+ background-color: #f8f9fa;
23
+ border-radius: 10px;
24
+ padding: 20px;
25
+ margin: 20px;
26
+ height:300px;
27
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
28
+ display: flex;
29
+ flex-direction: column;
30
+ justify-content: center;
31
+ align-items: center;
32
+ text-align: center;
33
+ }
34
+ .card h2 {
35
+ font-size: 1.5em;
36
+ color: #007bff;
37
+ }
38
+ .card p {
39
+ color: #666;
40
+ }
41
+
new_article_suggestion.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import streamlit as st
3
+
4
+ import utils.settings as settings
5
+
6
+ from crewai import Task
7
+ from crewai.tasks.task_output import TaskOutput
8
+ from pydantic import BaseModel
9
+ from typing import List
10
+
11
+ from agents.learning_curator import learning_curator
12
+
13
+
14
+ class SuggestedArticle(BaseModel):
15
+ title: str
16
+ url: str
17
+ reason_for_recommendation: str
18
+
19
+
20
+ class SuggestedArticles(BaseModel):
21
+ articles: List[SuggestedArticle]
22
+
23
+
24
+ def callback_function(output: TaskOutput):
25
+ suggested_articles = json.loads(output.exported_output)['articles']
26
+ for article in suggested_articles:
27
+ settings.articles[article['url']] = article
28
+ st.markdown("### New Article Suggestion task is executed successfully!")
29
+
30
+
31
+ article_suggestion_task = Task(
32
+ description=(
33
+ "Find 5 articles from the past 10 days that align with the user's learning interests. "
34
+ "The articles should provide incremental learning to the user based on their insights."
35
+ ),
36
+ expected_output=(
37
+ "List of article titles along with their links. "
38
+ ),
39
+ output_json=SuggestedArticles,
40
+ output_file="article_suggestions.json",
41
+ agent=learning_curator,
42
+ async_execution=False,
43
+ callback=callback_function,
44
+ )
research_paper.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from crew.research_article_suggester import RecentArticleSuggester
3
+ from streamlit_extras.capture import stdout
4
+
5
+
6
+
7
+ def main():
8
+
9
+ st.markdown(
10
+ """
11
+ <style>
12
+ .main {
13
+ background-color: #f5f5f5;
14
+ padding: 20px;
15
+ border-radius: 10px;
16
+ }
17
+ .centered {
18
+ display: flex;
19
+ flex-direction: column;
20
+ align-items: center;
21
+ justify-content: center;
22
+ text-align: center;
23
+ }
24
+ .title {
25
+ font-family: 'Helvetica', sans-serif;
26
+ font-weight: bold;
27
+ font-size: 36px;
28
+ color: #1f77b4;
29
+ }
30
+ .description {
31
+ font-family: 'Helvetica', sans-serif;
32
+ font-size: 18px;
33
+ color: #333333;
34
+ margin-top: 10px;
35
+ }
36
+ .subheader {
37
+ font-family: 'Helvetica', sans-serif;
38
+ font-weight: bold;
39
+ font-size: 24px;
40
+ color: #ff7f0e;
41
+ margin-top: 20px;
42
+ }
43
+ .element {
44
+ background-color: #ffffff;
45
+ padding: 1rem;
46
+ border-radius: 8px;
47
+ box-shadow: 0 0 5px rgba(0, 0, 0, 0.1);
48
+ margin-bottom: 1rem;
49
+ }
50
+ .element h3 {
51
+ font-size: 24px;
52
+ color: #1f77b4;
53
+ margin-bottom: 0.5rem;
54
+ text-transform: uppercase;
55
+ padding :10px;
56
+
57
+ }
58
+ .element ul {
59
+ list-style-type: none;
60
+ padding: 0;
61
+ margin: 0;
62
+ }
63
+ .element li {
64
+ font-size: 16px;
65
+ color: #333333;
66
+ margin-bottom: 0.5rem;
67
+ }
68
+ .element li b {
69
+ font-size: 22px;
70
+ }
71
+
72
+ </style>
73
+ """,
74
+ unsafe_allow_html=True
75
+ )
76
+
77
+ st.markdown("<div class='container'>", unsafe_allow_html=True)
78
+
79
+ st.markdown(
80
+ """
81
+ <div class="centered">
82
+ <p class="title">Recent Article Suggester</p>
83
+ <p class="description">Discover recent articles based on your topic of interest using advanced AI.</p>
84
+ </div>
85
+ """,
86
+ unsafe_allow_html=True
87
+ )
88
+
89
+ st.sidebar.markdown("<p class='sidebar-header'>Search for the papers you are interested in:</p>", unsafe_allow_html=True)
90
+ topic = st.sidebar.text_input('Enter a topic:', 'GenAI', key='topic_input', help='Enter a topic of interest')
91
+
92
+ if st.sidebar.button('Get Suggestions'):
93
+
94
+ with st.status(
95
+ "🤖 **Agents at work...**", state="running", expanded=True
96
+ ) as status:
97
+ with st.container(height=500, border=False):
98
+ log_container = st.empty()
99
+ with stdout(log_container.code, terminator=""):
100
+ suggester = RecentArticleSuggester()
101
+ inputs = {"topic": topic}
102
+ results = suggester.kickoff(inputs=inputs)
103
+ status.update(
104
+ label="✅ Articles are Ready for Reading!",
105
+ state="complete",
106
+ expanded=False,
107
+ )
108
+
109
+ st.subheader("", anchor=False, divider="rainbow")
110
+
111
+ if results is None:
112
+ st.markdown('No articles found.', unsafe_allow_html=True)
113
+ else:
114
+
115
+ st.markdown('<p class="subheader">Results:</p>', unsafe_allow_html=True)
116
+
117
+ for element in results:
118
+ st.markdown(
119
+ f"""
120
+ <div class="element">
121
+ <h3><a href="{element['url']}" target="_blank">{element['title']}</a></h3>
122
+ <ul>
123
+ {"".join(f"<li style='font-size: 20px;'><b>{key.capitalize()}:</b> {value}</li>" for key, value in element.items() if key != "title")}
124
+ </ul>
125
+ </div>
126
+ """,
127
+ unsafe_allow_html=True
128
+ )
129
+ if st.sidebar.button("← Back to Main Page"):
130
+ st.session_state.page = "main"
131
+ if __name__ == "__main__":
132
+ main()
scrape_website.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from crewai_tools import ScrapeWebsiteTool
2
+ import requests
3
+ from bs4 import BeautifulSoup
4
+
5
+ scrape_tool = ScrapeWebsiteTool()
6
+
7
+ def CustomScrapeWebsiteTool(url):
8
+ response = requests.get(url)
9
+ parsed = BeautifulSoup(response.content, "html.parser")
10
+ text = parsed.get_text()
11
+ text = '\n'.join([i for i in text.split('\n') if i.strip() != ''])
12
+ text = ' '.join([i for i in text.split(' ') if i.strip() != ''])
13
+
14
+ return text
search_web.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from langchain_community.tools.tavily_search import TavilySearchResults
2
+
3
+ search_tool = TavilySearchResults(max_results=5)
settings.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ def init():
2
+ global articles
3
+ articles = {}
til_feedback.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ from crew.til import TilCrew
4
+ from streamlit_extras.capture import stdout
5
+ load_dotenv()
6
+
7
+
8
+
9
+ def main():
10
+ st.markdown("<div class='container'>", unsafe_allow_html=True)
11
+
12
+ st.markdown(
13
+ """
14
+ <div class="centered">
15
+ <p class="title">Today I Learnt Feedback</p>
16
+ <p class="description">Feedback on Today I Learnt</p>
17
+ </div>
18
+ """,
19
+ unsafe_allow_html=True
20
+ )
21
+ til_content = st.text_area('Enter what you learnt today:',
22
+ "* Quantization is the process of reducing the size of LLM models by reducing the underlying weights.\n"
23
+ "* The weights are reduced by scaling down the datatypes from a datatype that takes larger space to a data type that takes a smaller space, this is also known as downcasting.\n"
24
+ "* Quantization offers benefits such as reduced storage space usage and faster computation.\n"
25
+ "* Disadvantages: Answers are less precise\n"
26
+ "* I learnt how to use Go Routines to handle concurrency in React.\n",
27
+ key='til_content', help='Enter what you learnt today')
28
+
29
+ if st.button("Get Feedback"):
30
+ with st.status(
31
+ "🤖 **Analysing TIL...**", state="running", expanded=True
32
+ ) as status:
33
+ with st.container(height=500, border=False):
34
+ log_container = st.empty()
35
+ with stdout(log_container.code, terminator=""):
36
+ feedback = TilCrew()
37
+ inputs = {"content": til_content}
38
+ results = feedback.kickoff(inputs=inputs)["feedback"]
39
+ status.update(
40
+ label="✅ Feedback ready!",
41
+ state="complete",
42
+ expanded=False,
43
+ )
44
+
45
+ for result in results:
46
+ st.markdown(f"#### TIL: {result['til']}")
47
+ st.markdown(f"**Feedback:** {result['feedback']}")
48
+ if result['feedback'] == "not_ok":
49
+ st.markdown(f"**Criteria:** {result['feedback_criteria']}")
50
+ st.markdown(f"**Reason:** {result['reason']}")
51
+ if result.get('suggestion') is not None:
52
+ st.markdown(f"**Suggestion:** {result['suggestion']}")
53
+
54
+
55
+ if __name__ == "__main__":
56
+ main()
til_test.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ from growthy_agents.crew.til import TilCrew # type: ignore
3
+
4
+
5
+ examples = [
6
+ ("The sun rises in the east.", [
7
+ {"insightful_categorization": 'Low', "factuality_categorization": 'High', "simplicity_categorization": 'High', "grammatical_categorization": 'High'}]),
8
+ ("* Quantization is the process of reducing the size of LLM models by reducing the underlying weights.\n"
9
+ "* In quantization the weights are reduced by scaling up the datatypes from a datatype that takes smaller space to a data type that takes a larger space, this is also known as downcasting for example downcasting from int8 to float32.\n"
10
+ "* Advantages: takes lesser space and increases compute speed.\n"
11
+ "* Disadvantages: Answers are less precise because of the loss of precision in the LLM model weights.\n", [
12
+ {"insightful_categorization": 'Meidum', "factuality_categorization": 'High',
13
+ "simplicity_categorization": 'High', "grammatical_categorization": 'High'},
14
+ {"insightful_categorization": 'High', "factuality_categorization": 'Low',
15
+ "simplicity_categorization": 'High', "grammatical_categorization": 'High'},
16
+ {"insightful_categorization": 'High', "factuality_categorization": 'High',
17
+ "simplicity_categorization": 'High', "grammatical_categorization": 'High'},
18
+ {"insightful_categorization": 'High', "factuality_categorization": 'High',
19
+ "simplicity_categorization": 'High', "grammatical_categorization": 'High'},
20
+ ]),
21
+ ]
22
+
23
+
24
+ @pytest.mark.parametrize("input_text, expected_categorizations", examples)
25
+ def test_llm_evaluation(input_text, expected_categorizations):
26
+ til_crew = TilCrew()
27
+ til_crew.content = input_text
28
+ til_crew._gather_feedback()
29
+ response = til_crew.feedback_results
30
+
31
+ for idx, feedback in enumerate(response):
32
+ assert feedback["insightful_categorization"] == pytest.approx(
33
+ expected_categorizations[idx]["insightful_categorization"], abs=2.0)
34
+ assert feedback["factuality_categorization"] == pytest.approx(
35
+ expected_categorizations[idx]["factuality_categorization"], abs=2.0)
36
+ assert feedback["simplicity_categorization"] == pytest.approx(
37
+ expected_categorizations[idx]["simplicity_categorization"], abs=2.0)
38
+ assert feedback["grammatical_categorization"] == pytest.approx(
39
+ expected_categorizations[idx]["grammatical_categorization"], abs=2.0)
write_to_json.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ def write_dict_to_json(data, filename="data.json", indent=4):
4
+ try:
5
+ with open(filename, 'w') as json_file:
6
+ json.dump(data, json_file, indent=indent)
7
+ print(f"Successfully wrote dictionary to {filename}")
8
+ except (IOError, json.JSONDecodeError) as e:
9
+ print(f"Error writing to JSON file: {e}")