simonguest commited on
Commit
969d7ed
·
1 Parent(s): ef3b608

File move from Github repo

Browse files
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ __pycache__
2
+ .vscode
3
+ .envrc
4
+ flagged
5
+ .python-version
app.py CHANGED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import sys
3
+ import json
4
+ from io import StringIO
5
+ from threading import Thread
6
+ from urllib import parse
7
+
8
+
9
+ from prompts import run_code_prompt, welcome_prompt
10
+ from tutor import chat, create_llm_chain
11
+ from gradio_callback import q, job_done
12
+
13
+
14
+ def user(user_message, history):
15
+ return "", history + [[user_message, None]]
16
+
17
+
18
+ def code(editor, output, history):
19
+ json_template = {"editor": editor, "output": output}
20
+ print(json.dumps(json_template))
21
+ return "", history + [[json.dumps(json_template), None]]
22
+
23
+
24
+ def bot(history):
25
+ user_message = history[-1][0]
26
+ try: # Check to see if the user_message contains code
27
+ json_template = json.loads(user_message)
28
+ editor = json_template["editor"]
29
+ output = json_template["output"]
30
+ history[-1][0] = "Running your code..."
31
+ yield history
32
+ user_message = run_code_prompt(editor, output)
33
+ except:
34
+ pass
35
+ history[-1][1] = ""
36
+ thread = Thread(target=chat, kwargs={"user_message": user_message})
37
+ thread.start()
38
+ while True:
39
+ next_token = q.get(block=True)
40
+ if next_token is job_done:
41
+ break
42
+ history[-1][1] += next_token
43
+ yield history
44
+ thread.join()
45
+
46
+
47
+ def run_code(code):
48
+ buffer = StringIO()
49
+ sys.stdout = buffer
50
+ try:
51
+ exec(code)
52
+ return buffer.getvalue()
53
+ except Exception as e:
54
+ print(e)
55
+ return buffer.getvalue()
56
+ finally:
57
+ sys.stdout = sys.__stdout__
58
+
59
+
60
+ def load_level(request: gr.Request):
61
+ try:
62
+ level = parse.parse_qs(parse.urlparse(request.headers["referer"]).query)[
63
+ "level"
64
+ ][0]
65
+ except:
66
+ level = "1"
67
+ with open(f"levels/{level}/metadata.json", "r") as f:
68
+ data = json.load(f)
69
+ instructions = open(f"levels/{level}/{data['instructions']}", "r").read()
70
+ starter_code = open(f"levels/{level}/{data['starter_code']}", "r").read()
71
+ return [instructions, starter_code]
72
+
73
+
74
+ with gr.Blocks() as demo:
75
+ with gr.Row():
76
+ instruction_panel = gr.Markdown()
77
+ with gr.Row():
78
+ with gr.Column(scale=1):
79
+ chatbot = gr.Chatbot(label="AI Tutor", value=[[None, welcome_prompt()]])
80
+ msg = gr.Textbox(show_label=False, placeholder="Type your message here...")
81
+ msg.submit(
82
+ user, [msg, chatbot], [msg, chatbot], queue=False, scroll_to_output=True
83
+ ).then(bot, chatbot, chatbot)
84
+ with gr.Column(scale=2):
85
+ editor = gr.Code(
86
+ value="Loading...",
87
+ language="python",
88
+ interactive=True,
89
+ show_label=True,
90
+ label="Code Editor",
91
+ lines=15,
92
+ )
93
+ output = gr.Code(
94
+ value="""Click "Run" to run your code.""",
95
+ language="python",
96
+ interactive=False,
97
+ show_label=True,
98
+ label="Output",
99
+ )
100
+ with gr.Row():
101
+ run = gr.Button("Run")
102
+ run.click(
103
+ run_code, editor, output, queue=False, scroll_to_output=True
104
+ ).then(
105
+ code, [editor, output, chatbot], [msg, chatbot], queue=False
106
+ ).then(
107
+ bot, chatbot, chatbot
108
+ )
109
+ demo.load(load_level, None, [instruction_panel, editor], queue=False).then(
110
+ create_llm_chain, [instruction_panel, editor]
111
+ )
112
+
113
+ demo.queue()
114
+ demo.launch()
gradio_callback.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.callbacks.base import BaseCallbackHandler
2
+ from langchain.schema import LLMResult
3
+ from typing import Any, Union, Dict, List
4
+
5
+ from queue import Empty, SimpleQueue
6
+ q = SimpleQueue()
7
+
8
+ job_done = object()
9
+
10
+ class StreamingGradioCallbackHandler(BaseCallbackHandler):
11
+ def __init__(self, q: SimpleQueue):
12
+ self.q = q
13
+
14
+ def on_llm_start(
15
+ self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
16
+ ) -> None:
17
+ """Run when LLM starts running. Clean the queue."""
18
+ while not self.q.empty():
19
+ try:
20
+ self.q.get(block=False)
21
+ except Empty:
22
+ continue
23
+
24
+ def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
25
+ """Run on new LLM token. Only available when streaming is enabled."""
26
+ self.q.put(token)
27
+
28
+ def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
29
+ """Run when LLM ends running."""
30
+ self.q.put(job_done)
31
+
32
+ def on_llm_error(
33
+ self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
34
+ ) -> None:
35
+ """Run when LLM errors."""
36
+ self.q.put(job_done)
levels/1/instructions.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Level 1: Reverse a String using a For Loop
2
+
3
+ The objective of this exercise is to create a function that reverses a string.
4
+
5
+ The function must use a for loop to iterate through the original string, building a new string which is then returned. You can't use libraries, arrays, or slices.
levels/1/metadata.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "title": "A second example",
3
+ "language": "Python",
4
+ "instructions": "instructions.md",
5
+ "starter_code": "starter_code.py"
6
+ }
levels/1/starter_code.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ def reverse_string(string):
2
+ return string
3
+
4
+ print(reverse_string("Hello, world!"))
levels/2/instructions.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Level 2: Predict the Season
2
+
3
+ Your goal is to write a predict_season function that uses nested if statements to guess the season. You are given two variables, temp (an integer) and is_raining (a boolean).
4
+
5
+ - If temp is less than 65 and is_raining is True, it's Winter.
6
+ - If temp is more than 65 and is_raining is True, it's Spring.
7
+ - If temp is more than 65 and is_raining is False, it's Summer.
8
+ - If temp is less than 65 and is_raining is False, it's Fall.
9
+
10
+ Complete the predict_season function to predict the season and print it to the console.
levels/2/metadata.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "title": "Predict the Season",
3
+ "language": "Python",
4
+ "instructions": "instructions.md",
5
+ "starter_code": "starter_code.py"
6
+ }
levels/2/starter_code.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ def predict_season(temp, is_raining):
2
+ # Add your nested if statements here!
3
+ return "Summer"
4
+
5
+ print(predict_season(70, False))
levels/3/instructions.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Level 3: Cakes, Cookies, and Pies in 2D Arrays
2
+
3
+ The owner of the Project Mercury Pastries Food Truck wants to find the total inventory for each dessert. They have the following values:
4
+
5
+ ```
6
+ 25 17 22
7
+ 18 12 15
8
+ 21 19 27
9
+ 30 10 23
10
+ ```
11
+
12
+ Each row represents a unique food truck in the business. Each column represents the number of cakes, cookies, and pies, respectively.
13
+
14
+ Write a method that totals each column and displays the total for each category:
15
+
16
+ ```
17
+ Cakes: 94
18
+ Cookies: 58
19
+ Pies: 87
20
+ ```
levels/3/metadata.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "title": "Cakes, Cookies, and Pies in 2D Arrays",
3
+ "language": "Python",
4
+ "instructions": "instructions.md",
5
+ "starter_code": "starter_code.py"
6
+ }
levels/3/starter_code.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ inventory = [[25, 17, 22], [18, 12, 15], [21, 19, 27], [30, 10, 23]]
2
+ print(inventory)
prompts.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain import PromptTemplate
2
+
3
+
4
+ def system_prompt(instructions, starter_code):
5
+ template = f"""
6
+ You are a computer science teacher helping a student learn computer science. You are friendly and want your students to succeed.
7
+
8
+ The student has been asked to complete the following exercise in Python:
9
+ ```
10
+ {instructions}
11
+ ```
12
+ They have been given the following starter code:
13
+ ```
14
+ {starter_code}
15
+ ```
16
+ They will ask you for help. You may help them, but you must not give them the answer. You may only give them hints. You must take a socratic approach to helping them.
17
+
18
+ Do not let the student deviate from the task at hand. If they ask you a question that is not related to the task at hand, you must redirect them to the task at hand.
19
+
20
+ {{chat_history}}
21
+ Student: {{student_input}}
22
+ Teacher:"""
23
+ return PromptTemplate(
24
+ input_variables=["chat_history", "student_input"], template=template
25
+ )
26
+
27
+ def run_code_prompt(editor, output):
28
+ return f"""
29
+ I've written this code:
30
+ ```
31
+ {editor}
32
+ ```
33
+ And it produces this output:
34
+ ```
35
+ {output}
36
+ ```
37
+ """
38
+
39
+ def welcome_prompt():
40
+ return "Hi! I'm your AI-powered computer science tutor. Feel free to ask me any questions as you work on this level. I'll also try and provide help when you run your code."
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ openai==0.27.8
2
+ langchain==0.0.225
3
+ langchainplus-sdk==0.0.20
tutor.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from langchain.memory import ConversationBufferMemory
4
+ from langchain import LLMChain
5
+ from langchain.chat_models import ChatOpenAI
6
+
7
+ from gradio_callback import StreamingGradioCallbackHandler, q
8
+ from prompts import system_prompt
9
+
10
+ llm = ChatOpenAI(
11
+ streaming=True,
12
+ callbacks=[StreamingGradioCallbackHandler(q)],
13
+ temperature=0.0,
14
+ model="gpt-4",
15
+ openai_api_key=os.getenv("OPENAI_API_KEY"),
16
+ )
17
+
18
+ memory = ConversationBufferMemory(memory_key="chat_history")
19
+
20
+ llm_chain = None
21
+
22
+ def chat(user_message):
23
+ return llm_chain.predict(student_input=user_message)
24
+
25
+ def create_llm_chain(instructions, starter_code):
26
+ global llm_chain
27
+ llm_chain = LLMChain(
28
+ llm=llm,
29
+ prompt=system_prompt(instructions, starter_code),
30
+ verbose=True,
31
+ memory=memory,
32
+ )