gerglitzen commited on
Commit
78247b5
·
1 Parent(s): 0ab9543
Files changed (1) hide show
  1. callback_handler.py +44 -0
callback_handler.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from typing import Any
3
+
4
+
5
+ class BaseCallbackHandler:
6
+ """Class to handle callbacks for openai function agent"""
7
+
8
+ def on_llm_start(self, **kwargs: Any) -> None:
9
+ """Run when LLM starts running."""
10
+
11
+ def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
12
+ """Run on new LLM token. Only available when streaming is enabled."""
13
+
14
+ def on_llm_end(self, response: str, **kwargs: Any) -> None:
15
+ """Run when LLM ends running."""
16
+
17
+
18
+ class PrintingCallBackHandler(BaseCallbackHandler):
19
+ """Callback handler to call print function"""
20
+
21
+ def on_llm_start(self, **kwargs: Any) -> None:
22
+ """Run when LLM starts running."""
23
+
24
+ def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
25
+ """Run on new LLM token. Only available when streaming is enabled."""
26
+ print(token)
27
+
28
+ def on_llm_end(self, response: str, **kwargs: Any) -> None:
29
+ """Run when LLM ends running."""
30
+
31
+
32
+ class StreamingStdOutCallBackHandler(BaseCallbackHandler):
33
+ """Callback handler to call print function"""
34
+
35
+ def on_llm_start(self, **kwargs: Any) -> None:
36
+ """Run when LLM starts running."""
37
+
38
+ def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
39
+ """Run on new LLM token. Only available when streaming is enabled."""
40
+ sys.stdout.write(token)
41
+ sys.stdout.flush()
42
+
43
+ def on_llm_end(self, response: str, **kwargs: Any) -> None:
44
+ """Run when LLM ends running."""