simonguest commited on
Commit
fbd6b13
·
1 Parent(s): 5415e57

Completed conversation from LangChain

Browse files
Files changed (6) hide show
  1. app.py +6 -7
  2. gradio_callback.py +0 -36
  3. requirements.txt +0 -2
  4. test.py +0 -9
  5. tutor.py +115 -24
  6. tutor2.py +0 -114
app.py CHANGED
@@ -2,22 +2,21 @@ import gradio as gr
2
  import sys
3
  import json
4
  from io import StringIO
5
- from threading import Thread
6
  from urllib import parse
7
 
8
- from tutor2 import Tutor
9
 
10
 
11
  def submit_chat(tutor_ctx, message):
12
  tutor = Tutor(context=tutor_ctx)
13
- tutor.chat(message)
14
- return [tutor.serialize(), tutor._memory_as_history(), None]
15
-
16
 
17
  def submit_code(tutor_ctx, editor, output):
18
  tutor = Tutor(context=tutor_ctx)
19
- tutor.code(editor, output)
20
- return [tutor.serialize(), tutor._memory_as_history()]
21
 
22
 
23
  def init_tutor(instructions, starter_code):
 
2
  import sys
3
  import json
4
  from io import StringIO
 
5
  from urllib import parse
6
 
7
+ from tutor import Tutor
8
 
9
 
10
  def submit_chat(tutor_ctx, message):
11
  tutor = Tutor(context=tutor_ctx)
12
+ for history in tutor.chat(message):
13
+ yield [tutor.serialize(), history, None]
14
+
15
 
16
  def submit_code(tutor_ctx, editor, output):
17
  tutor = Tutor(context=tutor_ctx)
18
+ for history in tutor.code(editor, output):
19
+ yield [tutor.serialize(), history, None]
20
 
21
 
22
  def init_tutor(instructions, starter_code):
gradio_callback.py DELETED
@@ -1,36 +0,0 @@
1
- from langchain.callbacks.base import BaseCallbackHandler
2
- from langchain.schema import LLMResult
3
- from typing import Any, Union, Dict, List
4
-
5
- from queue import Empty, SimpleQueue
6
- q = SimpleQueue()
7
-
8
- job_done = object()
9
-
10
- class StreamingGradioCallbackHandler(BaseCallbackHandler):
11
- def __init__(self, q: SimpleQueue):
12
- self.q = q
13
-
14
- def on_llm_start(
15
- self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
16
- ) -> None:
17
- """Run when LLM starts running. Clean the queue."""
18
- while not self.q.empty():
19
- try:
20
- self.q.get(block=False)
21
- except Empty:
22
- continue
23
-
24
- def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
25
- """Run on new LLM token. Only available when streaming is enabled."""
26
- self.q.put(token)
27
-
28
- def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
29
- """Run when LLM ends running."""
30
- self.q.put(job_done)
31
-
32
- def on_llm_error(
33
- self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
34
- ) -> None:
35
- """Run when LLM errors."""
36
- self.q.put(job_done)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,3 +1 @@
1
  openai==0.27.8
2
- langchain==0.0.225
3
- langchainplus-sdk==0.0.20
 
1
  openai==0.27.8
 
 
test.py DELETED
@@ -1,9 +0,0 @@
1
- from tutor2 import Tutor, STUDENT, TEACHER
2
-
3
- tutor = Tutor("instructions", "print('hello world')")
4
- #tutor.chat("hello")
5
- #tutor.chat("How does a for loop work in Python?")
6
- # tutor.chat("hi", role=TEACHER)
7
- tutor.code("print('hello world')", "hello world")
8
- # tutor.chat("not working", role=TEACHER)
9
- print(tutor._memory_as_history())
 
 
 
 
 
 
 
 
 
 
tutor.py CHANGED
@@ -1,32 +1,123 @@
 
1
  import os
 
2
 
3
- from langchain.memory import ConversationBufferMemory
4
- from langchain import LLMChain
5
- from langchain.chat_models import ChatOpenAI
6
 
7
- from gradio_callback import StreamingGradioCallbackHandler, q
8
- from prompts import system_prompt
9
 
10
- llm = ChatOpenAI(
11
- streaming=True,
12
- callbacks=[StreamingGradioCallbackHandler(q)],
13
- temperature=0.0,
14
- model="gpt-4",
15
- openai_api_key=os.getenv("OPENAI_API_KEY"),
16
- )
17
 
18
- memory = ConversationBufferMemory(memory_key="chat_history")
 
 
 
 
 
 
 
 
 
 
19
 
20
- llm_chain = None
 
 
 
 
 
 
21
 
22
- def chat(user_message):
23
- return llm_chain.predict(student_input=user_message)
 
 
 
 
 
 
24
 
25
- def create_llm_chain(instructions, starter_code):
26
- global llm_chain
27
- llm_chain = LLMChain(
28
- llm=llm,
29
- prompt=system_prompt(instructions, starter_code),
30
- verbose=True,
31
- memory=memory,
32
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
  import os
3
+ from prompts import system_prompt, welcome_prompt, run_code_prompt, is_code_prompt
4
 
5
+ SYSTEM = "system"
6
+ TEACHER = "assistant"
7
+ STUDENT = "user"
8
 
 
 
9
 
10
+ class Tutor:
11
+ def __init__(self, instructions="", starter_code="", context=None, debug=False):
12
+ self.model = "gpt-4"
13
+ self.temperature = 0.0
14
+ self.api_key = os.getenv("OPENAI_API_KEY")
15
+ self.debug = debug
 
16
 
17
+ if context is not None:
18
+ self.deserialize(context)
19
+ else:
20
+ self.memory = []
21
+ # self.system_prompt = system_prompt(instructions, starter_code)
22
+ self.instructions = instructions
23
+ self.starter_code = starter_code
24
+ self.memory.append(
25
+ {SYSTEM: system_prompt(self.instructions, self.starter_code)}
26
+ )
27
+ self.memory.append({TEACHER: welcome_prompt()})
28
 
29
+ def serialize(self):
30
+ # Return memory as a dictionary
31
+ return {
32
+ "instructions": self.instructions,
33
+ "starter_code": self.starter_code,
34
+ "memory": self.memory,
35
+ }
36
 
37
+ def deserialize(self, data):
38
+ # Make sure incoming data is a dictionary containing "memory"
39
+ if isinstance(data, dict) and "memory" in data:
40
+ self.memory = data["memory"]
41
+ self.instructions = data["instructions"]
42
+ self.starter_code = data["starter_code"]
43
+ else:
44
+ raise ValueError("Input must be a dictionary containing 'memory'")
45
 
46
+ def _gpt(self):
47
+ return openai.ChatCompletion.create(
48
+ model=self.model,
49
+ api_key=self.api_key,
50
+ temperature=self.temperature,
51
+ messages=self._memory_as_openai_messages(),
52
+ stream=True,
53
+ )
54
+
55
+ def chat(self, message, role=STUDENT, request=True):
56
+ self.memory.append({role: message})
57
+ yield self._memory_as_history()
58
+ # Pre-append an empty teacher messages so that we can stream the result
59
+ self.memory.append({TEACHER: ""})
60
+ if request:
61
+ for token in self._gpt():
62
+ if token.choices[0].finish_reason != "stop":
63
+ self.memory[-1][TEACHER] = (
64
+ self.memory[-1][TEACHER] + token.choices[0].delta.content
65
+ )
66
+ yield self._memory_as_history()
67
+
68
+ def code(self, editor, output, request=True):
69
+ self.memory.append({STUDENT: run_code_prompt(editor, output)})
70
+ yield self._memory_as_history()
71
+ # Pre-append an empty teacher messages so that we can stream the result
72
+ self.memory.append({TEACHER: ""})
73
+ if request:
74
+ for token in self._gpt():
75
+ if token.choices[0].finish_reason != "stop":
76
+ self.memory[-1][TEACHER] = (
77
+ self.memory[-1][TEACHER] + token.choices[0].delta.content
78
+ )
79
+ yield self._memory_as_history()
80
+
81
+ def _memory_as_string(self):
82
+ # Convert memory to a formatted string
83
+ memory_string = ""
84
+ for entry in self.memory:
85
+ for role, message in entry.items():
86
+ memory_string += f"{role}: {message}\n"
87
+ return memory_string
88
+
89
+ def _memory_as_history(self):
90
+ # Convert memory to a list of message pairs
91
+ history = []
92
+ for i in range(0, len(self.memory), 2): # Step by 2, as we need pairs
93
+ # Get messages, ignoring role
94
+ if i == 0:
95
+ message1 = None # Skip the system prompt
96
+ else:
97
+ message1 = list(self.memory[i].values())[0]
98
+ if message1 is not None and is_code_prompt(message1):
99
+ message1 = "Running your code..."
100
+ # If there's a next message, get it, else use an empty string
101
+ message2 = (
102
+ list(self.memory[i + 1].values())[0]
103
+ if i + 1 < len(self.memory)
104
+ else None
105
+ )
106
+ if message2 is not None and is_code_prompt(message2):
107
+ message2 = "Running your code..."
108
+ history.append([message1, message2])
109
+ return history
110
+
111
+ def _memory_as_openai_messages(self):
112
+ # Convert memory to a list of OpenAI style messages
113
+ messages = []
114
+ messages.append(
115
+ {
116
+ "role": SYSTEM,
117
+ "content": system_prompt(self.instructions, self.starter_code),
118
+ }
119
+ )
120
+ for entry in self.memory:
121
+ for role, message in entry.items():
122
+ messages.append({"role": role, "content": message})
123
+ return messages
tutor2.py DELETED
@@ -1,114 +0,0 @@
1
- import openai
2
- import os
3
- from prompts import system_prompt, welcome_prompt, run_code_prompt, is_code_prompt
4
-
5
- SYSTEM = "system"
6
- TEACHER = "assistant"
7
- STUDENT = "user"
8
-
9
-
10
- class Tutor:
11
- def __init__(self, instructions="", starter_code="", context=None, debug=False):
12
- self.model = "gpt-4"
13
- self.temperature = 0.0
14
- self.api_key = os.getenv("OPENAI_API_KEY")
15
- self.debug = debug
16
-
17
- if context is not None:
18
- self.deserialize(context)
19
- else:
20
- self.memory = []
21
- # self.system_prompt = system_prompt(instructions, starter_code)
22
- self.instructions = instructions
23
- self.starter_code = starter_code
24
- self.memory.append(
25
- {SYSTEM: system_prompt(self.instructions, self.starter_code)}
26
- )
27
- self.memory.append({TEACHER: welcome_prompt()})
28
-
29
- def serialize(self):
30
- # Return memory as a dictionary
31
- return {
32
- "instructions": self.instructions,
33
- "starter_code": self.starter_code,
34
- "memory": self.memory,
35
- }
36
-
37
- def deserialize(self, data):
38
- # Make sure incoming data is a dictionary containing "memory"
39
- if isinstance(data, dict) and "memory" in data:
40
- self.memory = data["memory"]
41
- self.instructions = data["instructions"]
42
- self.starter_code = data["starter_code"]
43
- else:
44
- raise ValueError("Input must be a dictionary containing 'memory'")
45
-
46
- def chat(self, message, role=STUDENT, request=True):
47
- # Append incoming role and message to memory as a dictionary
48
- self.memory.append({role: message})
49
- # Make the call to OpenAI and append the response to memory
50
- if request:
51
- response = openai.ChatCompletion.create(
52
- model=self.model,
53
- api_key=self.api_key,
54
- temperature=self.temperature,
55
- messages=self._memory_as_openai_messages(),
56
- )
57
- self.memory.append({TEACHER: response.choices[0].message.content})
58
-
59
- def code(self, editor, output, request=True):
60
- # Run the code and append the output to memory
61
- self.memory.append({STUDENT: run_code_prompt(editor, output)})
62
- # Make the call to OpenAI and append the response to memory
63
- if request:
64
- response = openai.ChatCompletion.create(
65
- model=self.model,
66
- api_key=self.api_key,
67
- temperature=self.temperature,
68
- messages=self._memory_as_openai_messages(),
69
- )
70
- self.memory.append({TEACHER: response.choices[0].message.content})
71
-
72
- def _memory_as_string(self):
73
- # Convert memory to a formatted string
74
- memory_string = ""
75
- for entry in self.memory:
76
- for role, message in entry.items():
77
- memory_string += f"{role}: {message}\n"
78
- return memory_string
79
-
80
- def _memory_as_history(self):
81
- # Convert memory to a list of message pairs
82
- history = []
83
- for i in range(0, len(self.memory), 2): # Step by 2, as we need pairs
84
- # Get messages, ignoring role
85
- if i == 0:
86
- message1 = None # Skip the system prompt
87
- else:
88
- message1 = list(self.memory[i].values())[0]
89
- if message1 is not None and is_code_prompt(message1):
90
- message1 = "Running your code..."
91
- # If there's a next message, get it, else use an empty string
92
- message2 = (
93
- list(self.memory[i + 1].values())[0]
94
- if i + 1 < len(self.memory)
95
- else None
96
- )
97
- if message2 is not None and is_code_prompt(message2):
98
- message2 = "Running your code..."
99
- history.append([message1, message2])
100
- return history
101
-
102
- def _memory_as_openai_messages(self):
103
- # Convert memory to a list of OpenAI style messages
104
- messages = []
105
- messages.append(
106
- {
107
- "role": SYSTEM,
108
- "content": system_prompt(self.instructions, self.starter_code),
109
- }
110
- )
111
- for entry in self.memory:
112
- for role, message in entry.items():
113
- messages.append({"role": role, "content": message})
114
- return messages