Jorge Londoño commited on
Commit ·
7e500a4
1
Parent(s): fe33b0a
Updates
Browse files- .gitignore +4 -0
- Dockerfile +19 -0
- Packages.md +20 -0
- app.py +123 -0
- config.json +13 -0
- requirements.txt +51 -0
.gitignore
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.venv
|
| 2 |
+
.env
|
| 3 |
+
**/__pychache__
|
| 4 |
+
.aider*
|
Dockerfile
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11
|
| 2 |
+
|
| 3 |
+
ENV LC_ALL=en_US.UTF-8
|
| 4 |
+
ENV LANG=en_US.UTF-8
|
| 5 |
+
ENV LANGUAGE=en_US.UTF-8
|
| 6 |
+
|
| 7 |
+
RUN useradd -m -u 1000 mesop
|
| 8 |
+
USER mesop
|
| 9 |
+
ENV PATH="/home/mesop/.local/bin:$PATH"
|
| 10 |
+
|
| 11 |
+
WORKDIR /app
|
| 12 |
+
COPY requirements.txt config.json app.py /app/
|
| 13 |
+
RUN pip install --upgrade pip
|
| 14 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# Run Mesop through gunicorn. Should be available at localhost:8080
|
| 18 |
+
# CMD ["mesop", "/app/app.py", "--port", "8080", "--logtostderr"]
|
| 19 |
+
CMD ["gunicorn", "--bind", "0.0.0.0:8080", "app:me"]
|
Packages.md
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Required Packages
|
| 2 |
+
|
| 3 |
+
```sh
|
| 4 |
+
python3.11 -m venv --clear .venv # Clear .venv folder before proccedding
|
| 5 |
+
source .venv/bin/activate
|
| 6 |
+
|
| 7 |
+
pip install --upgrade pip
|
| 8 |
+
pip install -U python-dotenv pydantic typer google-generativeai mesop gunicorn
|
| 9 |
+
pip freeze > requirements.txt
|
| 10 |
+
```
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
Building and running the docker image
|
| 14 |
+
```sh
|
| 15 |
+
docker build -t dsa_tutor .
|
| 16 |
+
docker run --rm -p 9080:8080 --name tutor dsa_tutor
|
| 17 |
+
docker exec -it tutor bash
|
| 18 |
+
```
|
| 19 |
+
|
| 20 |
+
[Go to the DSA Tutor](http://localhost:9080)
|
app.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mesop app.py --port 8080
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import json
|
| 5 |
+
import mesop as me
|
| 6 |
+
import mesop.labs as mel
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
+
import google.generativeai as genai
|
| 9 |
+
from google.generativeai.types.generation_types import GenerateContentResponse
|
| 10 |
+
from typing import Generator
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
DEFAULT_CONFIG_PATH = "./config.json"
|
| 14 |
+
DEFAULT_MODEL_NAME = "learnlm-1.5-pro-experimental"
|
| 15 |
+
|
| 16 |
+
# Load environment variables from .env file
|
| 17 |
+
load_dotenv()
|
| 18 |
+
|
| 19 |
+
rolemap = {"user": "user", "assistant": "model"}
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
| 23 |
+
|
| 24 |
+
# Create the model
|
| 25 |
+
generation_config = {
|
| 26 |
+
"temperature": 1,
|
| 27 |
+
"top_p": 0.95,
|
| 28 |
+
"top_k": 64,
|
| 29 |
+
"max_output_tokens": 8192,
|
| 30 |
+
"response_mime_type": "text/plain",
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# Load config once
|
| 35 |
+
_config: dict|None = None
|
| 36 |
+
def _load_config():
|
| 37 |
+
global _config
|
| 38 |
+
global generation_config
|
| 39 |
+
config_path = os.environ.get("CHAT_CONFIG_PATH", DEFAULT_CONFIG_PATH)
|
| 40 |
+
try:
|
| 41 |
+
with open(config_path, 'r') as f:
|
| 42 |
+
_config = json.load(f)
|
| 43 |
+
# print(f"Successfully loaded config from: {config_path}")
|
| 44 |
+
if _config:
|
| 45 |
+
generation_config.update(_config.get('generation_config', generation_config))
|
| 46 |
+
# print(f"Updated generation_config: {generation_config}")
|
| 47 |
+
except FileNotFoundError:
|
| 48 |
+
print(f"Warning: Could not read config file at: {config_path}")
|
| 49 |
+
except json.JSONDecodeError as e:
|
| 50 |
+
print(f"Error parsing config file: {e}")
|
| 51 |
+
|
| 52 |
+
_load_config()
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
model = genai.GenerativeModel(
|
| 56 |
+
model_name=os.environ.get("MODEL_NAME", DEFAULT_MODEL_NAME),
|
| 57 |
+
generation_config=generation_config,
|
| 58 |
+
system_instruction=_config['prompt']['es']
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
# TODO This function is not being called, why???
|
| 62 |
+
def on_load(e: me.LoadEvent):
|
| 63 |
+
print("***On load event***")
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@me.stateclass
|
| 68 |
+
class FirstState:
|
| 69 |
+
first:str|None = None
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@me.page(
|
| 73 |
+
security_policy=me.SecurityPolicy(
|
| 74 |
+
allowed_iframe_parents=["https://google.github.io", "https://huggingface.co"]
|
| 75 |
+
),
|
| 76 |
+
path="/",
|
| 77 |
+
title="Mesop Demo Chat",
|
| 78 |
+
)
|
| 79 |
+
def page():
|
| 80 |
+
if _config:
|
| 81 |
+
try:
|
| 82 |
+
welcome_message = _config["welcome_message"]
|
| 83 |
+
# me.text(welcome_message)
|
| 84 |
+
except KeyError:
|
| 85 |
+
print("Error: 'welcome_message' not found in config file.")
|
| 86 |
+
else:
|
| 87 |
+
print("Config not loaded, using default values.")
|
| 88 |
+
me.text("Welcome to the Chat (Default)")
|
| 89 |
+
|
| 90 |
+
# me.box(
|
| 91 |
+
# me.text("This is a chat interface where you can interact with the DSA Tutor. Please enter your questions below:"),
|
| 92 |
+
# style=me.Style(padding=me.Padding(bottom=12))
|
| 93 |
+
# )
|
| 94 |
+
state = me.state(FirstState)
|
| 95 |
+
if 'start_prompt' in me.query_params:
|
| 96 |
+
start_prompt:str = me.query_params['start_prompt']
|
| 97 |
+
#print('start_prompt', start_prompt)
|
| 98 |
+
del me.query_params['start_prompt']
|
| 99 |
+
state.first = start_prompt
|
| 100 |
+
|
| 101 |
+
mel.chat(transform, title="DSA Tutor", bot_user="Tutor", )
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def transform(input: str, history: list[mel.ChatMessage]) -> Generator[str, None, None]:
|
| 105 |
+
messages = []
|
| 106 |
+
state = me.state(FirstState)
|
| 107 |
+
if state.first:
|
| 108 |
+
# print('first', state.first)
|
| 109 |
+
messages.append({"role": "user", "parts": [state.first]})
|
| 110 |
+
messages.extend([
|
| 111 |
+
{"role": rolemap[message.role], "parts": [message.content]}
|
| 112 |
+
for message in history
|
| 113 |
+
])
|
| 114 |
+
# print('messages', messages)
|
| 115 |
+
chat_session = model.start_chat(history=messages)
|
| 116 |
+
response:GenerateContentResponse = chat_session.send_message(input, stream=True)
|
| 117 |
+
text = ""
|
| 118 |
+
for chunk in response:
|
| 119 |
+
text += chunk.text
|
| 120 |
+
yield chunk.text
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
|
config.json
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"welcome_message": "Welcome to the DSA Tutor",
|
| 3 |
+
"generation_config": {
|
| 4 |
+
"temperature": 0.1,
|
| 5 |
+
"top_p": 0.9,
|
| 6 |
+
"top_k": 64,
|
| 7 |
+
"max_output_tokens": 8192
|
| 8 |
+
},
|
| 9 |
+
"prompt": {
|
| 10 |
+
"en": "You are a tutor helping a student prepare for a test. If not provided by the\nstudent, ask them what subject and at what level they want to be tested on.\nThen,\n\nGenerate practice questions. Start simple, then make questions more\n difficult if the student answers correctly.\n Prompt the student to explain the reason for their answer choice. Do not\n debate the student.\n After the student explains their choice, affirm their correct answer or\n guide the student to correct their mistake.\n If a student requests to move on to another question, give the correct\n answer and move on.\n If the student requests to explore a concept more deeply, chat with them to\n help them construct an understanding.\n After 5 questions ask the student if they would like to continue with more\n questions or if they would like a summary of their session. If they ask for\n a summary, provide an assessment of how they have done and where they should\n focus studying.",
|
| 11 |
+
"es": "Eres un tutor que ayuda al estudiante a prepararse para el examen. Si el estudiante no proporciona el tema, pregúntale qué tema y a qué nivel quiere ser\nexaminado. Luego,\n\nGenera preguntas de práctica. Comienza de forma sencilla, luego haz las\n preguntas más difíciles si el estudiante responde correctamente.\n Pide al estudiante que explique la razón de su elección de respuesta. No\n debatas con el estudiante.\n Después de que el estudiante explique su elección, afirma su respuesta\ncorrecta o guía al estudiante para que corrija su error.\n Si un estudiante solicita pasar a otra pregunta, da la respuesta correcta y\n pasa a la siguiente.\n Si el estudiante solicita explorar un concepto más profundamente, conversa\n con él para ayudarlo a construir una comprensión.\n Después de 5 preguntas, pregunta al estudiante si desea continuar con más\n preguntas o si desea un resumen de su sesión. Si pide un resumen, proporciona\n una evaluación de cómo le ha ido y en qué debe centrarse para estudiar."
|
| 12 |
+
}
|
| 13 |
+
}
|
requirements.txt
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
absl-py==2.1.0
|
| 2 |
+
annotated-types==0.7.0
|
| 3 |
+
blinker==1.9.0
|
| 4 |
+
cachetools==5.5.0
|
| 5 |
+
certifi==2024.12.14
|
| 6 |
+
charset-normalizer==3.4.1
|
| 7 |
+
click==8.1.8
|
| 8 |
+
deepdiff==6.7.1
|
| 9 |
+
Flask==3.1.0
|
| 10 |
+
google-ai-generativelanguage==0.6.15
|
| 11 |
+
google-api-core==2.24.0
|
| 12 |
+
google-api-python-client==2.159.0
|
| 13 |
+
google-auth==2.37.0
|
| 14 |
+
google-auth-httplib2==0.2.0
|
| 15 |
+
google-generativeai==0.8.5
|
| 16 |
+
googleapis-common-protos==1.66.0
|
| 17 |
+
grpcio==1.69.0
|
| 18 |
+
grpcio-status==1.69.0
|
| 19 |
+
gunicorn==23.0.0
|
| 20 |
+
httplib2==0.22.0
|
| 21 |
+
idna==3.10
|
| 22 |
+
itsdangerous==2.2.0
|
| 23 |
+
Jinja2==3.1.5
|
| 24 |
+
markdown-it-py==3.0.0
|
| 25 |
+
MarkupSafe==3.0.2
|
| 26 |
+
mdurl==0.1.2
|
| 27 |
+
mesop==1.0.1
|
| 28 |
+
msgpack==1.1.0
|
| 29 |
+
ordered-set==4.1.0
|
| 30 |
+
packaging==24.2
|
| 31 |
+
proto-plus==1.25.0
|
| 32 |
+
protobuf==5.29.3
|
| 33 |
+
pyasn1==0.6.1
|
| 34 |
+
pyasn1_modules==0.4.1
|
| 35 |
+
pydantic==2.11.3
|
| 36 |
+
pydantic_core==2.33.1
|
| 37 |
+
Pygments==2.19.1
|
| 38 |
+
pyparsing==3.2.1
|
| 39 |
+
python-dotenv==1.1.0
|
| 40 |
+
requests==2.32.3
|
| 41 |
+
rich==13.9.4
|
| 42 |
+
rsa==4.9
|
| 43 |
+
shellingham==1.5.4
|
| 44 |
+
tqdm==4.67.1
|
| 45 |
+
typer==0.15.2
|
| 46 |
+
typing-inspection==0.4.0
|
| 47 |
+
typing_extensions==4.12.2
|
| 48 |
+
uritemplate==4.1.1
|
| 49 |
+
urllib3==2.3.0
|
| 50 |
+
watchdog==6.0.0
|
| 51 |
+
Werkzeug==3.1.3
|