Files changed (1) hide show
  1. app.py +67 -1
app.py CHANGED
@@ -153,4 +153,70 @@ else:
153
 
154
  # Add a footer
155
  st.markdown("---")
156
- st.markdown("By AI Planet")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
 
154
  # Add a footer
155
  st.markdown("---")
156
+ st.markdown("By AI Planet")
157
+
158
+ from trulens_eval import Tru
159
+ from trulens_eval.tru_custom_app import instrument
160
+ from trulens_eval import Feedback, Select
161
+ from trulens_eval.feedback.provider.openai import OpenAI
162
+ import numpy as np
163
+ from trulens_eval import TruCustomApp
164
+ tru = Tru()
165
+ tru.reset_database()
166
+
167
+ def append_to_csv(user_query, provider, retriever, csv_path='relevance_scores.csv'):
168
+
169
+ # provider = OpenAI(model_engine="gpt-4o")
170
+ provider = provider
171
+ # Define a groundedness feedback function
172
+ f_groundedness = (
173
+ Feedback(provider.groundedness_measure_with_cot_reasons, name = "Groundedness")
174
+ .on(Select.RecordCalls.retrieve.rets.collect())
175
+ .on_output()
176
+ )
177
+ # Question/answer relevance between overall question and answer.
178
+ f_answer_relevance = (
179
+ Feedback(provider.relevance_with_cot_reasons, name = "Answer Relevance")
180
+ .on_input()
181
+ .on_output()
182
+ )
183
+
184
+ # Context relevance between question and each context chunk.
185
+ f_context_relevance = (
186
+ Feedback(provider.context_relevance_with_cot_reasons, name = "Context Relevance")
187
+ .on_input()
188
+ .on(Select.RecordCalls.retrieve.rets[:])
189
+ .aggregate(np.mean) # choose a different aggregation method if you wish
190
+ )
191
+
192
+ tru_rag = TruCustomApp(retriever,
193
+ app_id = 'RAG v1',
194
+ feedbacks = [f_groundedness, f_answer_relevance, f_context_relevance])
195
+
196
+ with tru_rag as recording:
197
+ llm_response = retriever.get_relevant_documents(user_query)
198
+
199
+ rec = recording.get()
200
+ feedback_results = rec.wait_for_feedback_results()
201
+
202
+ # Collect feedback results
203
+ feedback_scores = {feedback.name: feedback_result.result for feedback, feedback_result in feedback_results.items()}
204
+
205
+ data = {
206
+ "User Query": [user_query],
207
+ "LLM Response": [llm_response],
208
+ "Answer Relevance": [feedback_scores.get("Answer Relevance")],
209
+ "Context Relevance": [feedback_scores.get("Context Relevance")],
210
+ "Groundedness": [feedback_scores.get("Groundedness")]
211
+ }
212
+ df = pd.DataFrame(data)
213
+
214
+ # Check if the CSV file exists
215
+ if not os.path.isfile(csv_path):
216
+ df.to_csv(csv_path, index=False)
217
+ else:
218
+ df.to_csv(csv_path, mode='a', header=False, index=False)
219
+
220
+
221
+
222
+ append_to_csv(prompt,OpenAI(model_engine="gpt-4o"),retriever)