Upload folder using huggingface_hub
Browse files- .aws/task-definition-production.json +95 -0
- .aws/task-definition-staging.json +95 -0
- .env +1 -0
- .github/workflows/deploy-production.yml +23 -0
- .github/workflows/deploy-staging.yml +23 -0
- .github/workflows/deployment.yml +76 -0
- .gitignore +2 -0
- .gradio/certificate.pem +31 -0
- Dockerfile +26 -0
- README.md +8 -12
- __pycache__/assistants.cpython-312.pyc +0 -0
- __pycache__/user.cpython-312.pyc +0 -0
- __pycache__/utils.cpython-312.pyc +0 -0
- app/__init__.py +0 -0
- app/main.py +462 -0
- app/requirements.txt +7 -0
- assistants.py +165 -0
- demo.py +312 -0
- old.py +290 -0
- playground copy.ipynb +0 -0
- playground.ipynb +0 -0
- requirements.txt +10 -0
- temp.json +1 -0
- user.py +175 -0
- utils.py +56 -0
.aws/task-definition-production.json
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"taskDefinitionArn": "arn:aws:ecs:ap-southeast-1:537124935427:task-definition/production-ourcoach-ai-core:1",
|
| 3 |
+
"containerDefinitions": [
|
| 4 |
+
{
|
| 5 |
+
"name": "ourcoach-ai-core",
|
| 6 |
+
"image": "537124935427.dkr.ecr.ap-southeast-1.amazonaws.com/production/ourcoach-ai-core",
|
| 7 |
+
"cpu": 256,
|
| 8 |
+
"memory": 512,
|
| 9 |
+
"portMappings": [
|
| 10 |
+
{
|
| 11 |
+
"name": "7860",
|
| 12 |
+
"containerPort": 7860,
|
| 13 |
+
"hostPort": 7860,
|
| 14 |
+
"protocol": "tcp",
|
| 15 |
+
"appProtocol": "http"
|
| 16 |
+
}
|
| 17 |
+
],
|
| 18 |
+
"essential": true,
|
| 19 |
+
"environment": [],
|
| 20 |
+
"environmentFiles": [],
|
| 21 |
+
"mountPoints": [],
|
| 22 |
+
"volumesFrom": [],
|
| 23 |
+
"ulimits": [],
|
| 24 |
+
"logConfiguration": {
|
| 25 |
+
"logDriver": "awslogs",
|
| 26 |
+
"options": {
|
| 27 |
+
"awslogs-group": "/ecs/production-ourcoach-ai-core",
|
| 28 |
+
"mode": "non-blocking",
|
| 29 |
+
"awslogs-create-group": "true",
|
| 30 |
+
"max-buffer-size": "25m",
|
| 31 |
+
"awslogs-region": "ap-southeast-1",
|
| 32 |
+
"awslogs-stream-prefix": "ecs"
|
| 33 |
+
},
|
| 34 |
+
"secretOptions": []
|
| 35 |
+
},
|
| 36 |
+
"systemControls": []
|
| 37 |
+
}
|
| 38 |
+
],
|
| 39 |
+
"family": "production-ourcoach-ai-core",
|
| 40 |
+
"taskRoleArn": "arn:aws:iam::537124935427:role/ecsTaskExecutionRole",
|
| 41 |
+
"executionRoleArn": "arn:aws:iam::537124935427:role/ecsTaskExecutionRole",
|
| 42 |
+
"networkMode": "awsvpc",
|
| 43 |
+
"revision": 1,
|
| 44 |
+
"volumes": [],
|
| 45 |
+
"status": "ACTIVE",
|
| 46 |
+
"requiresAttributes": [
|
| 47 |
+
{
|
| 48 |
+
"name": "com.amazonaws.ecs.capability.logging-driver.awslogs"
|
| 49 |
+
},
|
| 50 |
+
{
|
| 51 |
+
"name": "ecs.capability.execution-role-awslogs"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"name": "com.amazonaws.ecs.capability.ecr-auth"
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"name": "com.amazonaws.ecs.capability.docker-remote-api.1.19"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"name": "com.amazonaws.ecs.capability.docker-remote-api.1.28"
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"name": "com.amazonaws.ecs.capability.task-iam-role"
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"name": "ecs.capability.execution-role-ecr-pull"
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"name": "com.amazonaws.ecs.capability.docker-remote-api.1.18"
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"name": "ecs.capability.task-eni"
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"name": "com.amazonaws.ecs.capability.docker-remote-api.1.29"
|
| 76 |
+
}
|
| 77 |
+
],
|
| 78 |
+
"placementConstraints": [],
|
| 79 |
+
"compatibilities": [
|
| 80 |
+
"EC2",
|
| 81 |
+
"FARGATE"
|
| 82 |
+
],
|
| 83 |
+
"requiresCompatibilities": [
|
| 84 |
+
"FARGATE"
|
| 85 |
+
],
|
| 86 |
+
"cpu": "256",
|
| 87 |
+
"memory": "512",
|
| 88 |
+
"runtimePlatform": {
|
| 89 |
+
"cpuArchitecture": "X86_64",
|
| 90 |
+
"operatingSystemFamily": "LINUX"
|
| 91 |
+
},
|
| 92 |
+
"registeredAt": "2024-10-31T06:16:27.129Z",
|
| 93 |
+
"registeredBy": "arn:aws:iam::537124935427:user/besher",
|
| 94 |
+
"tags": []
|
| 95 |
+
}
|
.aws/task-definition-staging.json
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"taskDefinitionArn": "arn:aws:ecs:ap-southeast-1:537124935427:task-definition/staging-ourcoach-ai-core:1",
|
| 3 |
+
"containerDefinitions": [
|
| 4 |
+
{
|
| 5 |
+
"name": "ourcoach-ai-core",
|
| 6 |
+
"image": "537124935427.dkr.ecr.ap-southeast-1.amazonaws.com/staging/ourcoach-ai-core",
|
| 7 |
+
"cpu": 256,
|
| 8 |
+
"memory": 512,
|
| 9 |
+
"portMappings": [
|
| 10 |
+
{
|
| 11 |
+
"name": "7860",
|
| 12 |
+
"containerPort": 7860,
|
| 13 |
+
"hostPort": 7860,
|
| 14 |
+
"protocol": "tcp",
|
| 15 |
+
"appProtocol": "http"
|
| 16 |
+
}
|
| 17 |
+
],
|
| 18 |
+
"essential": true,
|
| 19 |
+
"environment": [],
|
| 20 |
+
"environmentFiles": [],
|
| 21 |
+
"mountPoints": [],
|
| 22 |
+
"volumesFrom": [],
|
| 23 |
+
"ulimits": [],
|
| 24 |
+
"logConfiguration": {
|
| 25 |
+
"logDriver": "awslogs",
|
| 26 |
+
"options": {
|
| 27 |
+
"awslogs-group": "/ecs/staging-ourcoach-ai-core",
|
| 28 |
+
"mode": "non-blocking",
|
| 29 |
+
"awslogs-create-group": "true",
|
| 30 |
+
"max-buffer-size": "25m",
|
| 31 |
+
"awslogs-region": "ap-southeast-1",
|
| 32 |
+
"awslogs-stream-prefix": "ecs"
|
| 33 |
+
},
|
| 34 |
+
"secretOptions": []
|
| 35 |
+
},
|
| 36 |
+
"systemControls": []
|
| 37 |
+
}
|
| 38 |
+
],
|
| 39 |
+
"family": "staging-ourcoach-ai-core",
|
| 40 |
+
"taskRoleArn": "arn:aws:iam::537124935427:role/ecsTaskExecutionRole",
|
| 41 |
+
"executionRoleArn": "arn:aws:iam::537124935427:role/ecsTaskExecutionRole",
|
| 42 |
+
"networkMode": "awsvpc",
|
| 43 |
+
"revision": 1,
|
| 44 |
+
"volumes": [],
|
| 45 |
+
"status": "ACTIVE",
|
| 46 |
+
"requiresAttributes": [
|
| 47 |
+
{
|
| 48 |
+
"name": "com.amazonaws.ecs.capability.logging-driver.awslogs"
|
| 49 |
+
},
|
| 50 |
+
{
|
| 51 |
+
"name": "ecs.capability.execution-role-awslogs"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"name": "com.amazonaws.ecs.capability.ecr-auth"
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"name": "com.amazonaws.ecs.capability.docker-remote-api.1.19"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"name": "com.amazonaws.ecs.capability.docker-remote-api.1.28"
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"name": "com.amazonaws.ecs.capability.task-iam-role"
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"name": "ecs.capability.execution-role-ecr-pull"
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"name": "com.amazonaws.ecs.capability.docker-remote-api.1.18"
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"name": "ecs.capability.task-eni"
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"name": "com.amazonaws.ecs.capability.docker-remote-api.1.29"
|
| 76 |
+
}
|
| 77 |
+
],
|
| 78 |
+
"placementConstraints": [],
|
| 79 |
+
"compatibilities": [
|
| 80 |
+
"EC2",
|
| 81 |
+
"FARGATE"
|
| 82 |
+
],
|
| 83 |
+
"requiresCompatibilities": [
|
| 84 |
+
"FARGATE"
|
| 85 |
+
],
|
| 86 |
+
"cpu": "256",
|
| 87 |
+
"memory": "512",
|
| 88 |
+
"runtimePlatform": {
|
| 89 |
+
"cpuArchitecture": "X86_64",
|
| 90 |
+
"operatingSystemFamily": "LINUX"
|
| 91 |
+
},
|
| 92 |
+
"registeredAt": "2024-10-25T11:56:36.943Z",
|
| 93 |
+
"registeredBy": "arn:aws:iam::537124935427:user/besher",
|
| 94 |
+
"tags": []
|
| 95 |
+
}
|
.env
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
OPENAI_API_KEY='sk-svcacct-ogaRpJ1DnTAqXmfM5E-omC1FqcLkRCgfaJsY62YaNV-gHb74FJu9XUizZoTCjqpT3BlbkFJEu3xXX4JEIg3K-L8DB3GUX4RVZMvUXrnJE2tcrgzlTnv4YGj_12b3hcUNSl38AA'
|
.github/workflows/deploy-production.yml
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Deployment Pipeline (Production)
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches:
|
| 6 |
+
- main
|
| 7 |
+
|
| 8 |
+
permissions:
|
| 9 |
+
id-token: write # This is required for requesting the JWT
|
| 10 |
+
contents: read # This is required for actions/checkout
|
| 11 |
+
|
| 12 |
+
jobs:
|
| 13 |
+
deploy:
|
| 14 |
+
name: Using Deployment Workflow
|
| 15 |
+
uses: ./.github/workflows/deployment.yml
|
| 16 |
+
with:
|
| 17 |
+
active_env: production
|
| 18 |
+
ecr_url: 537124935427.dkr.ecr.ap-southeast-1.amazonaws.com/production/ourcoach-ai-core
|
| 19 |
+
ecs_service: ourcoach-ai-core
|
| 20 |
+
ecs_cluster: ProductionECSCluster
|
| 21 |
+
ecs_task_definition: .aws/task-definition-production.json
|
| 22 |
+
container_name: ourcoach-ai-core
|
| 23 |
+
secrets: inherit
|
.github/workflows/deploy-staging.yml
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Deployment Pipeline (Staging)
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches:
|
| 6 |
+
- staging
|
| 7 |
+
|
| 8 |
+
permissions:
|
| 9 |
+
id-token: write # This is required for requesting the JWT
|
| 10 |
+
contents: read # This is required for actions/checkout
|
| 11 |
+
|
| 12 |
+
jobs:
|
| 13 |
+
deploy:
|
| 14 |
+
name: Using Deployment Workflow
|
| 15 |
+
uses: ./.github/workflows/deployment.yml
|
| 16 |
+
with:
|
| 17 |
+
active_env: staging
|
| 18 |
+
ecr_url: 537124935427.dkr.ecr.ap-southeast-1.amazonaws.com/staging/ourcoach-ai-core
|
| 19 |
+
ecs_service: ourcoach-ai-core
|
| 20 |
+
ecs_cluster: StagingECSCluster
|
| 21 |
+
ecs_task_definition: .aws/task-definition-staging.json
|
| 22 |
+
container_name: ourcoach-ai-core
|
| 23 |
+
secrets: inherit
|
.github/workflows/deployment.yml
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Reusable Deployment Workflow
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
workflow_call:
|
| 5 |
+
inputs:
|
| 6 |
+
active_env:
|
| 7 |
+
required: true
|
| 8 |
+
type: string
|
| 9 |
+
ecr_url:
|
| 10 |
+
required: true
|
| 11 |
+
type: string
|
| 12 |
+
ecs_service:
|
| 13 |
+
required: true
|
| 14 |
+
type: string
|
| 15 |
+
ecs_cluster:
|
| 16 |
+
required: true
|
| 17 |
+
type: string
|
| 18 |
+
ecs_task_definition:
|
| 19 |
+
required: true
|
| 20 |
+
type: string
|
| 21 |
+
container_name:
|
| 22 |
+
required: true
|
| 23 |
+
type: string
|
| 24 |
+
|
| 25 |
+
env:
|
| 26 |
+
AWS_REGION: ap-southeast-1
|
| 27 |
+
|
| 28 |
+
permissions:
|
| 29 |
+
contents: read
|
| 30 |
+
id-token: write
|
| 31 |
+
|
| 32 |
+
jobs:
|
| 33 |
+
deploy:
|
| 34 |
+
name: Deploy
|
| 35 |
+
runs-on: ubuntu-latest
|
| 36 |
+
environment:
|
| 37 |
+
name: ${{ inputs.active_env }}
|
| 38 |
+
steps:
|
| 39 |
+
- name: Checkout
|
| 40 |
+
uses: actions/checkout@v3
|
| 41 |
+
|
| 42 |
+
- name: Configure AWS credentials
|
| 43 |
+
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
|
| 44 |
+
with:
|
| 45 |
+
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
| 46 |
+
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
| 47 |
+
aws-region: ${{ env.AWS_REGION }}
|
| 48 |
+
|
| 49 |
+
- name: Login to Amazon ECR
|
| 50 |
+
id: login-ecr
|
| 51 |
+
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
|
| 52 |
+
|
| 53 |
+
- name: Build, tag, and push image to Amazon ECR
|
| 54 |
+
id: build-image
|
| 55 |
+
env:
|
| 56 |
+
IMAGE_TAG: ${{ github.sha }}
|
| 57 |
+
run: |
|
| 58 |
+
docker build --build-arg FASTAPI_KEY=${{secrets.FASTAPI_KEY}} --build-arg OPENAI_API_KEY=${{secrets.OPENAI_API_KEY}} -t ${{inputs.ecr_url}}:$IMAGE_TAG .
|
| 59 |
+
docker push ${{inputs.ecr_url}}:$IMAGE_TAG
|
| 60 |
+
echo "image=${{inputs.ecr_url}}:$IMAGE_TAG" >> $GITHUB_OUTPUT
|
| 61 |
+
|
| 62 |
+
- name: Fill in the new image ID in the Amazon ECS task definition
|
| 63 |
+
id: task-def
|
| 64 |
+
uses: aws-actions/amazon-ecs-render-task-definition@c804dfbdd57f713b6c079302a4c01db7017a36fc
|
| 65 |
+
with:
|
| 66 |
+
task-definition: ${{ inputs.ecs_task_definition }}
|
| 67 |
+
container-name: ${{ inputs.container_name }}
|
| 68 |
+
image: ${{ steps.build-image.outputs.image }}
|
| 69 |
+
|
| 70 |
+
- name: Deploy Amazon ECS task definition
|
| 71 |
+
uses: aws-actions/amazon-ecs-deploy-task-definition@df9643053eda01f169e64a0e60233aacca83799a
|
| 72 |
+
with:
|
| 73 |
+
task-definition: ${{ steps.task-def.outputs.task-definition }}
|
| 74 |
+
service: ${{ inputs.ecs_service }}
|
| 75 |
+
cluster: ${{ inputs.ecs_cluster }}
|
| 76 |
+
wait-for-service-stability: true
|
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
templates/
|
| 2 |
+
personalfinance_wiki/
|
.gradio/certificate.pem
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN CERTIFICATE-----
|
| 2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
| 3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
| 4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
| 5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
| 6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
| 7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
| 8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
| 9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
| 10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
| 11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
| 12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
| 13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
| 14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
| 15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
| 16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
| 17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
| 18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
| 19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
| 20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
| 21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
| 22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
| 23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
| 24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
| 25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
| 26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
| 27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
| 28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
| 29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
| 30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
| 31 |
+
-----END CERTIFICATE-----
|
Dockerfile
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use the official Python 3.10.9 image
|
| 2 |
+
FROM python:3.10.9
|
| 3 |
+
|
| 4 |
+
# Expose the port FastAPI will run on
|
| 5 |
+
EXPOSE 7860
|
| 6 |
+
|
| 7 |
+
# Set environment variables for FastAPI and OpenAI API keys
|
| 8 |
+
ARG FASTAPI_KEY
|
| 9 |
+
ARG OPENAI_API_KEY
|
| 10 |
+
|
| 11 |
+
ENV FASTAPI_KEY=$FASTAPI_KEY
|
| 12 |
+
ENV OPENAI_API_KEY=$OPENAI_API_KEY
|
| 13 |
+
ENV PYTHONUNBUFFERED=1
|
| 14 |
+
|
| 15 |
+
# Set the working directory
|
| 16 |
+
WORKDIR /code
|
| 17 |
+
|
| 18 |
+
# Copy requirements and install dependencies
|
| 19 |
+
COPY ./app/requirements.txt /code/requirements.txt
|
| 20 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
| 21 |
+
|
| 22 |
+
# Copy the app directory
|
| 23 |
+
COPY ./app /code/app
|
| 24 |
+
|
| 25 |
+
# Start the FastAPI app on port 7860
|
| 26 |
+
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
|
@@ -1,12 +1,8 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: Demo1
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
pinned: false
|
| 10 |
-
---
|
| 11 |
-
|
| 12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Demo1
|
| 3 |
+
app_file: demo.py
|
| 4 |
+
sdk: gradio
|
| 5 |
+
sdk_version: 5.3.0
|
| 6 |
+
---
|
| 7 |
+
# ourcoach-ai-core
|
| 8 |
+
Contains all AI system
|
|
|
|
|
|
|
|
|
|
|
|
__pycache__/assistants.cpython-312.pyc
ADDED
|
Binary file (9.55 kB). View file
|
|
|
__pycache__/user.cpython-312.pyc
ADDED
|
Binary file (12.7 kB). View file
|
|
|
__pycache__/utils.cpython-312.pyc
ADDED
|
Binary file (2.37 kB). View file
|
|
|
app/__init__.py
ADDED
|
File without changes
|
app/main.py
ADDED
|
@@ -0,0 +1,462 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, HTTPException, Security, Query, status
|
| 2 |
+
from fastapi.security import APIKeyHeader
|
| 3 |
+
from pydantic import BaseModel
|
| 4 |
+
from uuid import UUID
|
| 5 |
+
import os
|
| 6 |
+
import logging
|
| 7 |
+
import json
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
from langchain.chat_models import ChatOpenAI
|
| 10 |
+
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
|
| 11 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
| 12 |
+
from typing import List, Optional
|
| 13 |
+
|
| 14 |
+
# Environment Variables for API Keys
|
| 15 |
+
api_keys = [os.getenv('FASTAPI_KEY')]
|
| 16 |
+
api_key_header = APIKeyHeader(name="X-API-Key")
|
| 17 |
+
|
| 18 |
+
def get_api_key(api_key_header: str = Security(api_key_header)) -> str:
|
| 19 |
+
if api_key_header in api_keys:
|
| 20 |
+
return api_key_header
|
| 21 |
+
raise HTTPException(
|
| 22 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
| 23 |
+
detail="Invalid or missing API Key",
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
# Initialize Logging (optional)
|
| 27 |
+
# logging.basicConfig(filename='app.log', level=logging.INFO)
|
| 28 |
+
|
| 29 |
+
# OpenAI GPT Wrapper
|
| 30 |
+
llm = ChatOpenAI(temperature=0.85, model='gpt-4o-mini', api_key=os.getenv('OPENAI_API_KEY'), max_tokens=300)
|
| 31 |
+
|
| 32 |
+
# FastAPI App
|
| 33 |
+
app = FastAPI(title="Ourcoach AI API", description="A FastAPI app for ourcoach's chatbot", version="0.1.0")
|
| 34 |
+
|
| 35 |
+
# Pydantic Models
|
| 36 |
+
class HistoryItem(BaseModel):
|
| 37 |
+
role: str
|
| 38 |
+
content: str
|
| 39 |
+
|
| 40 |
+
class SummaryItem(BaseModel):
|
| 41 |
+
summary: str
|
| 42 |
+
date: str
|
| 43 |
+
|
| 44 |
+
class ChatRequest(BaseModel):
|
| 45 |
+
message: str
|
| 46 |
+
history: List[HistoryItem]
|
| 47 |
+
name: str
|
| 48 |
+
birthday: str
|
| 49 |
+
country: str
|
| 50 |
+
perfect_day: str
|
| 51 |
+
focus: str
|
| 52 |
+
goals: str
|
| 53 |
+
challenges: str
|
| 54 |
+
email_address: str
|
| 55 |
+
whatsapp_number: str
|
| 56 |
+
belief_in_astrology: str
|
| 57 |
+
legendary_persona: str
|
| 58 |
+
latest_summary: Optional[str] = None
|
| 59 |
+
|
| 60 |
+
class SummaryRequest(BaseModel):
|
| 61 |
+
history: List[HistoryItem]
|
| 62 |
+
name: str
|
| 63 |
+
birthday: str
|
| 64 |
+
country: str
|
| 65 |
+
perfect_day: str
|
| 66 |
+
focus: str
|
| 67 |
+
goals: str
|
| 68 |
+
challenges: str
|
| 69 |
+
email_address: str
|
| 70 |
+
whatsapp_number: str
|
| 71 |
+
belief_in_astrology: str
|
| 72 |
+
legendary_persona: str
|
| 73 |
+
latest_summary: Optional[str] = None
|
| 74 |
+
|
| 75 |
+
class NewsLetterRequest(BaseModel):
|
| 76 |
+
summaries: List[SummaryItem]
|
| 77 |
+
|
| 78 |
+
@app.get("/ok")
|
| 79 |
+
def ok_endpoint():
|
| 80 |
+
return {"message": "ok"}
|
| 81 |
+
|
| 82 |
+
@app.post("/chat")
|
| 83 |
+
def predict(request: ChatRequest, api_key: str = Security(get_api_key)):
|
| 84 |
+
bot_name = request.legendary_persona.split(' ')[0]
|
| 85 |
+
|
| 86 |
+
# Retrieve the latest summary for this user_id
|
| 87 |
+
summary = request.latest_summary
|
| 88 |
+
|
| 89 |
+
# Update the system prompt with the latest summary
|
| 90 |
+
summary_text = f"\n#COACHING NOTES#\n{summary}\n" if summary else ""
|
| 91 |
+
|
| 92 |
+
if summary is None:
|
| 93 |
+
system_prompt = f"""
|
| 94 |
+
##ROLE##
|
| 95 |
+
You are a world-class life coach, dedicated to helping users improve their mental well-being, physical health, relationships, career, financial stability, and personal growth. You should only entertain conversations related to being a life coach, especially around the areas aforementioned.
|
| 96 |
+
|
| 97 |
+
##TONE##
|
| 98 |
+
At all times, you must sound natural like a life coach, always empathetic, caring and yet inspiring. you should always carry the tone of {request.legendary_persona} and impersonate {request.legendary_persona}.
|
| 99 |
+
|
| 100 |
+
##USER PROFILE##
|
| 101 |
+
This is the profile of the user that you’re coaching:
|
| 102 |
+
a) name: {request.name}
|
| 103 |
+
b) birthday: {request.birthday}
|
| 104 |
+
c) country: {request.country}
|
| 105 |
+
d) perfect day and inspiration: {request.perfect_day}
|
| 106 |
+
e) what matters most to user (area of focus): {request.focus}
|
| 107 |
+
f) goals in areas of focus: {request.goals}
|
| 108 |
+
g) challenges: {request.challenges}
|
| 109 |
+
h) email address: {request.email_address}
|
| 110 |
+
i) whatsapp number: {request.whatsapp_number}
|
| 111 |
+
j) belief in astrology/horoscopes: {request.belief_in_astrology}
|
| 112 |
+
k) legendary persona: {request.legendary_persona}
|
| 113 |
+
|
| 114 |
+
##COACHING GUIDES##
|
| 115 |
+
As a coach, you must do a conversation with the user in these steps:
|
| 116 |
+
############################# START OF INTRODUCTION SESSION #################################
|
| 117 |
+
#STEP 1: INTRODUCTION#
|
| 118 |
+
1. Introduce yourself as "Coach {bot_name}" and start with a short introduction based on the chosen legendary persona persona ({request.legendary_persona}) and its purpose and role which is to be a life coach and help the user grow towards achieving the user’s perfect day, inspiration and improve the user’s goals in the area of focus.
|
| 119 |
+
2. In the introduction message you must make it as natural as possible and include these:
|
| 120 |
+
- Greetings: Call the user's name & introduce your name
|
| 121 |
+
- Explain that you are inspired by {request.legendary_persona}
|
| 122 |
+
- Explain that your purpose is to help the user grow and ultimately achieve their goals
|
| 123 |
+
- Emphatize with the user's challenges
|
| 124 |
+
- Give some motivation to the user
|
| 125 |
+
3. The introduction message should be comprehensive (around 1 paragraph), natural, friendly, and warm.
|
| 126 |
+
4. The introduction should end without asking any questions.
|
| 127 |
+
|
| 128 |
+
EXAMPLE:
|
| 129 |
+
Hello, John Doe! I am Coach Teresa, your life coach inspired by the incredible compassion and empathy of Mother Teresa. My purpose is to help you grow and flourish in your journey toward achieving your perfect day, which is becoming the champion of the Geoguessr world cup and using that experience to travel the world and share your life journey through your writing.
|
| 130 |
+
Together, we will focus on your career growth and financial stability, aligning your goals towards attaining financial independence. Remember, my approach is rooted in understanding and compassion. It's a privilege to support you in this journey of self-discovery and actualization as we work towards your dreams. Let's embark on this meaningful path together.
|
| 131 |
+
|
| 132 |
+
#STEP 2: GET TO KNOW THE USER#
|
| 133 |
+
Important rules:
|
| 134 |
+
- It has to sound natural, straightforward, and friendly
|
| 135 |
+
- Avoid repeating the same question over and over again. Ensure that the user enjoys the conversation & not tired of answering too many questions
|
| 136 |
+
1. Your goal is to help the user crystallize the goals that they've given above ({request.goals}). How?
|
| 137 |
+
- A goal must be SMART:
|
| 138 |
+
Specific: Clearly define what you want to accomplish. Instead of a vague objective, provide a focused goal.
|
| 139 |
+
Measurable: Include specific criteria to measure your progress. This could be metrics, quantities, or specific deliverables that indicate when you've met the goal.
|
| 140 |
+
Achievable: Set a realistic goal that challenges you but remains within reach, considering available resources and constraints.
|
| 141 |
+
Relevant: Ensure the goal aligns with broader objectives or personal values, so it's meaningful and worthwhile.
|
| 142 |
+
Time-bound: Set a clear timeline for achieving the goal, with start and end dates or specific deadlines.
|
| 143 |
+
- Hence, ask the user questions that help you complete this SMART criteria. But if it's already SMART, you can skip this step.
|
| 144 |
+
2. Once the goal is SMART, you can ask the user about their current state—strengths, weaknesses, challenges, and opportunities they see. This helps you gauge the starting point.
|
| 145 |
+
3. You must also explore why the user wants to achieve their goals. They may also identify potential obstacles or limiting beliefs that could impact progress.
|
| 146 |
+
4. Lastly, you must wrap up the conversation with small, achievable steps or practices the user can focus on before the next session. This could be a specific task or reflective exercise aligned with their goals.
|
| 147 |
+
############################# END OF INTRODUCTION SESSION #################################
|
| 148 |
+
#GENERAL COACHING SESSION#
|
| 149 |
+
If the user initiates a new conversation after you wrapped up the introduction session, we will enter the GENERAL COACHING SESSION. Follow this steps to guide you coach the user:
|
| 150 |
+
############################# START OF GENERAL COACHING SESSION #################################
|
| 151 |
+
#STEP 1: ASK ONE OPENING PERSONALIZED QUESTION#
|
| 152 |
+
Goal: To know what's currently happening with the user.
|
| 153 |
+
- If the user brings up a new topic, start by sending one thoughtful and personalized question to guide self-reflection.
|
| 154 |
+
- Personalization: The question must refer back to past information or context provided by the user, such as their goals, challenges, inspirations, or any relevant details from previous conversations.
|
| 155 |
+
- Ask about the user's state in their area of focus or relate to their sources of inspiration.
|
| 156 |
+
- Rotate the topics and formats daily to keep the interaction fresh and engaging.
|
| 157 |
+
- Keep the question straightforward and natural.
|
| 158 |
+
- Do NOT ask about the same area or context on consecutive days to prevent conversational fatigue.
|
| 159 |
+
- Do NOT re-ask what you already know from the onboarding (unless it's outdated); ask different questions instead.
|
| 160 |
+
- Ensure no repetitive questions within a 7-day window.
|
| 161 |
+
- Only ask one question in this message.
|
| 162 |
+
(Go to the next step after you asked an opening question)
|
| 163 |
+
|
| 164 |
+
#STEP 2: ASK THREE DEEP DIVE QUESTION#
|
| 165 |
+
Goal: To understand more why is it happening to the user, what's the user's feeling, and what's the impact to the user.
|
| 166 |
+
- After the user responds, follow up with maximum THREE (3) more question that invites deeper reflection based on their response. The follow-up question should delve deeper into the topic introduced in the first question.
|
| 167 |
+
Personalize the question by recalling and referencing something the user has mentioned before. Do NOT re-ask what you already know from the onboarding (unless it's outdated); ask different questions instead.
|
| 168 |
+
- Don't forget to only ask THREE (3) deep dive question!
|
| 169 |
+
(Go to the next step after you asked three dive deeper question!)
|
| 170 |
+
|
| 171 |
+
#STEP 3: GIVE POSITIVE AFFIRMATION, SUGGESTION, AND QUOTE#
|
| 172 |
+
Goal: This is the most important part of the coaching session. To give positive affirmation, suggestions, and quote based on the information gained
|
| 173 |
+
- After the second user response, send a single message that includes:
|
| 174 |
+
1. A positive affirmation or reflection based on the user's responses.
|
| 175 |
+
2. It should not explicitly mention "positive affirmation” nor “positive reflection"
|
| 176 |
+
3. Maximum of 3 suggestions that helps the user reach his/her goals or overcome the challenges.
|
| 177 |
+
4. A relevant quote from {request.legendary_persona} for added inspiration, but YOU MUST NOT USE double quotation mark (") for the quote (so say it like it's coming from you!) and DON'T say that the quote is coming from {request.legendary_persona}!
|
| 178 |
+
Good Example: I believe you can achieve your goals. You have power over your mind — not outside events. Realize this, and you will find strength.
|
| 179 |
+
Bad Example: I believe you can achieve your goals. As {request.legendary_persona} said, "You have power over your mind — not outside events. Realize this, and you will find strength."
|
| 180 |
+
5. The affirmation should be personalized and help the user feel better or motivate them.
|
| 181 |
+
6. The quote must be relevant to the conversation; if there isn't a relevant quote, you may skip it.
|
| 182 |
+
7. Do NOT ask any further questions after this message.
|
| 183 |
+
8. Refrain from prompting the user to respond or engage further in this interaction.
|
| 184 |
+
(Go to the next step once this step is done.)
|
| 185 |
+
|
| 186 |
+
#STEP 4: END OF INTERACTION#
|
| 187 |
+
- After sending the positive affirmation, suggestions and quote, the interaction for the day is complete.
|
| 188 |
+
- You may commend the user warmly for the reflection of the day and tell the user that you look forward to tomorrow’s reflection unless the user has anything else to talk about. Do NOT ask any additional questions.
|
| 189 |
+
- Wait for the next scheduled interaction (e.g., the next day) to re-engage the user.
|
| 190 |
+
- Flexibility with Coach Persona: Allow the user to switch to any of the six given coach personas at any time.
|
| 191 |
+
- If the user asks any question outside the aforementioned areas of life coaching, please kindly divert it back to the user’s life coaching journey.
|
| 192 |
+
- Any question raised by the user after the positive affirmation should only be entertained if it's around the areas of life coaching. Otherwise, end the conversation kindly as the reflection of the day is done
|
| 193 |
+
- If the user ask about his/her life score, give an estimate for each of the life coaching area and also an overall estimate using the past conversations in the last 7 days
|
| 194 |
+
############################# END OF GENERAL COACHING SESSION #################################
|
| 195 |
+
|
| 196 |
+
#FORMAT#
|
| 197 |
+
You may use emojis or emoticons sparingly in the interaction to enhance warmth and engagement.
|
| 198 |
+
|
| 199 |
+
#ADDITIONAL RULES#
|
| 200 |
+
- In both daily reflection and general coaching session, you must adhere to the step-by-step above and ensure the objective is achieved as quick as possible (don't ask too many questions to avoid conversation fatigue)
|
| 201 |
+
- Keep the question straightforward and natural.
|
| 202 |
+
- Do NOT ask about the same area or context on consecutive days to prevent conversational fatigue.
|
| 203 |
+
- Do NOT re-ask what you already know from the onboarding (unless it's outdated); ask different questions instead.
|
| 204 |
+
- Ensure no repetitive questions within a 7-day window.
|
| 205 |
+
- The questions asked by the assistant are in a daily setting. """
|
| 206 |
+
else:
|
| 207 |
+
system_prompt = f"""
|
| 208 |
+
##ROLE##
|
| 209 |
+
You are a world-class life coach, dedicated to helping users improve their mental well-being, physical health, relationships, career, financial stability, and personal growth. You should only entertain conversations related to being a life coach, especially around the areas aforementioned.
|
| 210 |
+
|
| 211 |
+
##TONE##
|
| 212 |
+
At all times, you must sound natural like a life coach, always empathetic, caring and yet inspiring. you should always carry the tone of {request.legendary_persona} and impersonate {request.legendary_persona}.
|
| 213 |
+
|
| 214 |
+
##USER PROFILE##
|
| 215 |
+
This is the profile of the user that you’re coaching:
|
| 216 |
+
a) name: {request.name}
|
| 217 |
+
b) birthday: {request.birthday}
|
| 218 |
+
c) country: {request.country}
|
| 219 |
+
d) perfect day and inspiration: {request.perfect_day}
|
| 220 |
+
e) what matters most to user (area of focus): {request.focus}
|
| 221 |
+
f) goals in areas of focus: {request.goals}
|
| 222 |
+
g) challenges: {request.challenges}
|
| 223 |
+
h) email address: {request.email_address}
|
| 224 |
+
i) whatsapp number: {request.whatsapp_number}
|
| 225 |
+
j) belief in astrology/horoscopes: {request.belief_in_astrology}
|
| 226 |
+
k) legendary persona: {request.legendary_persona}
|
| 227 |
+
|
| 228 |
+
#INFORMATION ABOUT PAST CONVERSATION#
|
| 229 |
+
You need to use the coaching notes below to understand the user and determine the topic of the conversation today!
|
| 230 |
+
{summary_text}
|
| 231 |
+
|
| 232 |
+
##COACHING GUIDES##
|
| 233 |
+
As a coach, you must do a conversation with the user in these steps:
|
| 234 |
+
|
| 235 |
+
############################# START OF DAILY REFLECTION SESSION #################################
|
| 236 |
+
|
| 237 |
+
#STEP 1: ASK ONE OPENING PERSONALIZED QUESTION#
|
| 238 |
+
Goal: To engage in natural way (like a real coach) and know what's currently happening with the user.
|
| 239 |
+
- If there is an information about past conversation available, you must follow up these previous topics.
|
| 240 |
+
- If there is no information about past conversation available, you can ask about the user's state in their area of focus ({request.focus}) or relate to their sources of inspiration.
|
| 241 |
+
- Only ask maximum TWO (2) questions to prevent conversation fatigue
|
| 242 |
+
### EXAMPLE DIALOGUE, IF INFORMATION ABOUT PAST CONVERSATION IS AVAILABLE ###
|
| 243 |
+
(Example Information from Past Conversation: The user is having a busy week with tennis matches and startup hustle lately. User likes spending time with family)
|
| 244 |
+
Coach: Hey {request.name}! It's Saturday morning, and I was just thinking about you. After your busy week with tennis matches and startup hustle, I hope you're looking forward to some downtime this weekend. Do you have any plans for the weekend with your family? Sometimes the best recharge comes from simply being present with the people we care about. Wishing you a relaxing and rejuvenating weekend!
|
| 245 |
+
User: I will hike the mountain with my whole family tomorrow morning. It's fun
|
| 246 |
+
Coach: That sounds absolutely wonderful, {request.name}! Hiking with family is such a beautiful way to connect with nature and bond with loved ones. It's a perfect opportunity to enjoy the present moment while also being physically active. As you prepare for your hike, how do you feel about balancing this joyful experience with your fitness goals? What are you most looking forward to during the hike?
|
| 247 |
+
(Go to the next step after you asked the opening question!)
|
| 248 |
+
|
| 249 |
+
#STEP 2: ASK THREE DEEP DIVE QUESTION#
|
| 250 |
+
Goal: To understand more why is it happening to the user, what's the user's feeling, and what's the impact to the user.
|
| 251 |
+
- After the user responds, follow up with maximum THREE (3) more question that invites deeper reflection based on their response. The follow-up question should delve deeper into the topic introduced in the first question.
|
| 252 |
+
Personalize the question by recalling and referencing something the user has mentioned before. Do NOT re-ask what you already know from the onboarding (unless it's outdated); ask different questions instead.
|
| 253 |
+
- Don't forget to only ask maximum THREE (3) deep dive question!
|
| 254 |
+
(Go to the next step after you asked three dive deeper question!)
|
| 255 |
+
|
| 256 |
+
#STEP 3: GIVE POSITIVE AFFIRMATION, SUGGESTION, AND QUOTE#
|
| 257 |
+
Goal: This is the most important part of the coaching session. To give positive affirmation, suggestions, and quote based on the information gained
|
| 258 |
+
- After the user response, send a single message that includes:
|
| 259 |
+
1. A positive affirmation or reflection based on the user's responses.
|
| 260 |
+
2. It should not explicitly mention "positive affirmation” nor “positive reflection"
|
| 261 |
+
3. Maximum of 3 suggestions that helps the user reach his/her goals or overcome the challenges.
|
| 262 |
+
4. A relevant quote from {request.legendary_persona} for added inspiration, but YOU MUST NOT USE double quotation mark (") for the quote (so say it like it's coming from you!) and DON'T say that the quote is coming from {request.legendary_persona}!
|
| 263 |
+
Good Example: I believe you can achieve your goals. You have power over your mind — not outside events. Realize this, and you will find strength.
|
| 264 |
+
Bad Example: I believe you can achieve your goals. As {request.legendary_persona} said, "You have power over your mind — not outside events. Realize this, and you will find strength."
|
| 265 |
+
5. The affirmation should be personalized and help the user feel better or motivate them.
|
| 266 |
+
6. The quote must be relevant to the conversation; if there isn't a relevant quote, you may skip it.
|
| 267 |
+
7. Do NOT ask any further questions after this message.
|
| 268 |
+
8. Refrain from prompting the user to respond or engage further in this interaction.
|
| 269 |
+
(Go to the next step once this step is done.)
|
| 270 |
+
|
| 271 |
+
#STEP 4: END OF INTERACTION#
|
| 272 |
+
- After sending the positive affirmation, suggestions and quote, the interaction for the day is complete.
|
| 273 |
+
- You may commend the user warmly for the reflection of the day and tell the user that you look forward to tomorrow’s reflection unless the user has anything else to talk about. Do NOT ask any additional questions.
|
| 274 |
+
- Wait for the next scheduled interaction (e.g., the next day) to re-engage the user.
|
| 275 |
+
- Flexibility with Coach Persona: Allow the user to switch to any of the six given coach personas at any time.
|
| 276 |
+
- If the user asks any question outside the aforementioned areas of life coaching, please kindly divert it back to the user’s life coaching journey.
|
| 277 |
+
- Any question raised by the user after the positive affirmation should only be entertained if it's around the areas of life coaching. Otherwise, end the conversation kindly as the reflection of the day is done
|
| 278 |
+
- If the user ask about his/her life score, give an estimate for each of the life coaching area and also an overall estimate using the past conversations in the last 7 days
|
| 279 |
+
############################# END OF DAILY REFLECTION SESSION #################################
|
| 280 |
+
#GENERAL COACHING SESSION#
|
| 281 |
+
If the user initiates a new conversation after you wrapped up the daily reflection session, we will enter the GENERAL COACHING SESSION.
|
| 282 |
+
For the general coaching session, you can entertain the user by asking an opening question, ask a deep dive question, and give some positive affirmation/suggestions to the user.
|
| 283 |
+
The point is to make it quick and avoid conversational fatigue.
|
| 284 |
+
|
| 285 |
+
#FORMAT#
|
| 286 |
+
You may use emojis or emoticons sparingly in the interaction to enhance warmth and engagement.
|
| 287 |
+
|
| 288 |
+
#ADDITIONAL RULES#
|
| 289 |
+
- In both daily reflection and general coaching session, you must adhere to the step-by-step above and ensure the objective is achieved as quick as possible (don't ask too many questions to avoid conversation fatigue)
|
| 290 |
+
- Keep the question straightforward and natural.
|
| 291 |
+
- Do NOT ask about the same area or context on consecutive days to prevent conversational fatigue.
|
| 292 |
+
- Do NOT re-ask what you already know from the onboarding (unless it's outdated); ask different questions instead.
|
| 293 |
+
- Ensure no repetitive questions within a 7-day window.
|
| 294 |
+
- The questions asked by the assistant are in a daily setting.
|
| 295 |
+
"""
|
| 296 |
+
|
| 297 |
+
# Create the Prompt Template
|
| 298 |
+
prompt = ChatPromptTemplate.from_messages(
|
| 299 |
+
[
|
| 300 |
+
SystemMessage(content=system_prompt),
|
| 301 |
+
MessagesPlaceholder(variable_name="messages"),
|
| 302 |
+
]
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
chain = prompt | llm
|
| 306 |
+
|
| 307 |
+
# Convert history to langchain format
|
| 308 |
+
history_langchain_format = [
|
| 309 |
+
HumanMessage(content=msg.content) if msg.role == "user" else AIMessage(content=msg.content)
|
| 310 |
+
for msg in request.history
|
| 311 |
+
]
|
| 312 |
+
history_langchain_format.append(HumanMessage(content=request.message))
|
| 313 |
+
|
| 314 |
+
gpt_response = chain.invoke(
|
| 315 |
+
{
|
| 316 |
+
"messages": history_langchain_format,
|
| 317 |
+
}
|
| 318 |
+
)
|
| 319 |
+
return {"response": gpt_response.content}
|
| 320 |
+
|
| 321 |
+
@app.post("/summarize")
|
| 322 |
+
def summarize(request: SummaryRequest, api_key: str = Security(get_api_key)):
|
| 323 |
+
chat_history = [msg.model_dump() for msg in request.history]
|
| 324 |
+
|
| 325 |
+
# Generate a summary using ChatOpenAI
|
| 326 |
+
summary_prompt = f"""
|
| 327 |
+
# ROLE #
|
| 328 |
+
You are a world-class life coach dedicated to helping users improve their mental well-being, physical health, relationships, career, financial stability, and personal growth.
|
| 329 |
+
You have done some dialogues with your client and you need to take some coaching notes to understand the key characteristic of your client and the topics that can be followed up in the next
|
| 330 |
+
conversation.
|
| 331 |
+
|
| 332 |
+
# TASK #
|
| 333 |
+
Based on the chat history that is available, you must create a coaching notes that includes two parts: Key characteristics of the client and Topics to be followed up.
|
| 334 |
+
|
| 335 |
+
# USER PROFILE #
|
| 336 |
+
This is the profile of the user that you’re coaching:
|
| 337 |
+
a) name: {request.name}
|
| 338 |
+
b) birthday: {request.birthday}
|
| 339 |
+
c) country: {request.country}
|
| 340 |
+
d) perfect day and inspiration: {request.perfect_day}
|
| 341 |
+
e) what matters most to user (area of focus): {request.focus}
|
| 342 |
+
f) goals in areas of focus: {request.goals}
|
| 343 |
+
g) challenges: {request.challenges}
|
| 344 |
+
h) email address: {request.email_address}
|
| 345 |
+
i) whatsapp number: {request.whatsapp_number}
|
| 346 |
+
j) belief in astrology/horoscopes: {request.belief_in_astrology}
|
| 347 |
+
k) legendary persona: {request.legendary_persona}
|
| 348 |
+
|
| 349 |
+
# LATEST SUMMARY #
|
| 350 |
+
This is the latest coaching note from previous session that might be helpful for you as an additional context for the new coaching note:
|
| 351 |
+
{request.latest_summary}
|
| 352 |
+
"""
|
| 353 |
+
summary_template = ChatPromptTemplate.from_messages([
|
| 354 |
+
SystemMessage(content=summary_prompt),
|
| 355 |
+
MessagesPlaceholder(variable_name="messages"),
|
| 356 |
+
])
|
| 357 |
+
|
| 358 |
+
summary_chain = summary_template | llm
|
| 359 |
+
|
| 360 |
+
gpt_summary_response = summary_chain.invoke({
|
| 361 |
+
"messages": [HumanMessage(content=json.dumps(chat_history))]
|
| 362 |
+
})
|
| 363 |
+
|
| 364 |
+
summary = gpt_summary_response.content
|
| 365 |
+
|
| 366 |
+
return {"summary": summary}
|
| 367 |
+
|
| 368 |
+
@app.post("/newsletter")
|
| 369 |
+
def generate_newsletter(summary_list: NewsLetterRequest, api_key: str = Security(get_api_key)):
|
| 370 |
+
summaries = summary_list.summaries
|
| 371 |
+
|
| 372 |
+
formatted_summaries = "\n\n".join(
|
| 373 |
+
f"Date: {item.date}\nSummary: {item.summary}" for item in summaries
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
# Define the newsletter prompt with instructions
|
| 377 |
+
newsletter_prompt = """
|
| 378 |
+
### TASK ###
|
| 379 |
+
Based on the following summaries, create an engaging and concise newsletter.
|
| 380 |
+
The newsletter should be presented in an HTML format and include sections:
|
| 381 |
+
|
| 382 |
+
- Introduction addressing the user's progress and journey.
|
| 383 |
+
- Weekly Highlights summarizing key insights.
|
| 384 |
+
- Progress Tracking highlighting accomplishments.
|
| 385 |
+
- Personalized Suggestions for the upcoming week.
|
| 386 |
+
- Positive Affirmations with motivational pointers.
|
| 387 |
+
- Look Ahead suggesting areas of focus for future growth.
|
| 388 |
+
|
| 389 |
+
Below are the summaries:
|
| 390 |
+
{formatted_summaries}
|
| 391 |
+
|
| 392 |
+
### Output Format ###
|
| 393 |
+
Provide the newsletter in JSON format as follows, without any additional message other than the JSON output!
|
| 394 |
+
|
| 395 |
+
{{
|
| 396 |
+
"introduction": "<p>HTML content for introduction</p>",
|
| 397 |
+
"weekly_highlights": "<p>HTML content for weekly highlights</p>",
|
| 398 |
+
"progress_tracking": "<p>HTML content for progress tracking</p>",
|
| 399 |
+
"affirmations": "<p>HTML content for affirmations</p>",
|
| 400 |
+
"look_ahead": "<p>HTML content for look ahead</p>"
|
| 401 |
+
}}
|
| 402 |
+
"""
|
| 403 |
+
|
| 404 |
+
# Create a chat prompt template with LangChain
|
| 405 |
+
prompt = ChatPromptTemplate.from_messages([
|
| 406 |
+
SystemMessage(content=newsletter_prompt),
|
| 407 |
+
HumanMessage(content=formatted_summaries)
|
| 408 |
+
])
|
| 409 |
+
|
| 410 |
+
# Chain the template with the language model
|
| 411 |
+
chain = prompt | llm
|
| 412 |
+
|
| 413 |
+
# Invoke the chain to get the response
|
| 414 |
+
try:
|
| 415 |
+
gpt_response = chain.invoke({})
|
| 416 |
+
output_content = gpt_response.content
|
| 417 |
+
except Exception as e:
|
| 418 |
+
raise HTTPException(status_code=500, detail=f"OpenAI API error: {str(e)}")
|
| 419 |
+
|
| 420 |
+
try:
|
| 421 |
+
# Parse the JSON from the model's response
|
| 422 |
+
newsletter_sections = json.loads(output_content)
|
| 423 |
+
except json.JSONDecodeError:
|
| 424 |
+
raise HTTPException(status_code=500, detail="Unable to parse OpenAI response as structured JSON.")
|
| 425 |
+
|
| 426 |
+
# Construct the final HTML newsletter
|
| 427 |
+
final_newsletter_html = f"""
|
| 428 |
+
<html>
|
| 429 |
+
<head>
|
| 430 |
+
<style>
|
| 431 |
+
body {{ font-family: Arial, sans-serif; }}
|
| 432 |
+
.container {{ max-width: 600px; margin: auto; padding: 20px; }}
|
| 433 |
+
.section {{ margin-bottom: 20px; }}
|
| 434 |
+
</style>
|
| 435 |
+
</head>
|
| 436 |
+
<body>
|
| 437 |
+
<div class='container'>
|
| 438 |
+
<h1>Your Weekly Progress Newsletter</h1>
|
| 439 |
+
<div class='section'>{newsletter_sections.get('introduction', '')}</div>
|
| 440 |
+
<div class='section'>
|
| 441 |
+
<h2>Weekly Highlights</h2>
|
| 442 |
+
{newsletter_sections.get('weekly_highlights', '')}
|
| 443 |
+
</div>
|
| 444 |
+
<div class='section'>
|
| 445 |
+
<h2>Progress Tracking</h2>
|
| 446 |
+
{newsletter_sections.get('progress_tracking', '')}
|
| 447 |
+
</div>
|
| 448 |
+
<div class='section'>
|
| 449 |
+
<h2>Positive Affirmations for the Week Ahead</h2>
|
| 450 |
+
{newsletter_sections.get('affirmations', '')}
|
| 451 |
+
</div>
|
| 452 |
+
<div class='section'>
|
| 453 |
+
<h2>Looking Ahead</h2>
|
| 454 |
+
{newsletter_sections.get('look_ahead', '')}
|
| 455 |
+
</div>
|
| 456 |
+
<footer>Stay inspired, stay committed.<br>Warm regards,<br>Your AI Coach at ourcoach.ai</footer>
|
| 457 |
+
</div>
|
| 458 |
+
</body>
|
| 459 |
+
</html>
|
| 460 |
+
"""
|
| 461 |
+
|
| 462 |
+
return {"newsletter": final_newsletter_html}
|
app/requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi==0.109.2
|
| 2 |
+
pydantic==2.7.0
|
| 3 |
+
uvicorn==0.27.1
|
| 4 |
+
langchain==0.1.7
|
| 5 |
+
openai==1.12.0
|
| 6 |
+
langchain-openai==0.0.6
|
| 7 |
+
duckdb
|
assistants.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import io
|
| 3 |
+
import os
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
import json
|
| 6 |
+
|
| 7 |
+
def get_current_datetime():
|
| 8 |
+
return datetime.now()
|
| 9 |
+
|
| 10 |
+
class Assistant:
|
| 11 |
+
def __init__(self, id, cm):
|
| 12 |
+
self.id = id
|
| 13 |
+
self.cm = cm
|
| 14 |
+
|
| 15 |
+
def process(self, thread, text):
|
| 16 |
+
template_search = self.cm.add_message_to_thread(thread.id, "assistant", f"Pay attention to the current state you are in and the conversation guidelines to respond to the users query:")
|
| 17 |
+
message = self.cm.add_message_to_thread(thread.id, "user", text)
|
| 18 |
+
|
| 19 |
+
run = self.cm.client.beta.threads.runs.create_and_poll(
|
| 20 |
+
thread_id=thread.id,
|
| 21 |
+
assistant_id=self.id,
|
| 22 |
+
model="gpt-4o-mini",
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
if run.status == 'requires_action':
|
| 26 |
+
# print(f'[TOOL]: Calling tool for action')
|
| 27 |
+
run = self.call_tool(run, thread)
|
| 28 |
+
|
| 29 |
+
if run.status == 'completed':
|
| 30 |
+
# response = self.cm.client.beta.threads.messages.list(
|
| 31 |
+
# thread_id=thread.id, order="asc", after=message.id
|
| 32 |
+
# )
|
| 33 |
+
# delete template search message
|
| 34 |
+
self.cm.client.beta.threads.messages.delete(
|
| 35 |
+
message_id=template_search.id,
|
| 36 |
+
thread_id=thread.id,
|
| 37 |
+
)
|
| 38 |
+
else:
|
| 39 |
+
print(run.status)
|
| 40 |
+
return message
|
| 41 |
+
|
| 42 |
+
def call_tool(self, run, thread):
|
| 43 |
+
tool_outputs = []
|
| 44 |
+
print(f"[INFO]: Required actions: {list(map(lambda x: f'{x.function.name}({x.function.arguments})', run.required_action.submit_tool_outputs.tool_calls))}")
|
| 45 |
+
# Loop through each tool in the required action section
|
| 46 |
+
for tool in run.required_action.submit_tool_outputs.tool_calls:
|
| 47 |
+
if tool.function.name == "generate_coaching_plan":
|
| 48 |
+
user_challenge = json.loads(tool.function.arguments)
|
| 49 |
+
tool_outputs.append({
|
| 50 |
+
"tool_call_id": tool.id,
|
| 51 |
+
"output": str(self.generate_coaching_plan(user_challenge))
|
| 52 |
+
})
|
| 53 |
+
elif tool.function.name == "transition":
|
| 54 |
+
transitions = json.loads(tool.function.arguments)
|
| 55 |
+
print(f"[TRANSITION]: {transitions['from']} -> {transitions['to']}")
|
| 56 |
+
tool_outputs.append({
|
| 57 |
+
"tool_call_id": tool.id,
|
| 58 |
+
"output": f"** [TRANSITION]: {transitions['from']} -> {transitions['to']} **"
|
| 59 |
+
})
|
| 60 |
+
elif tool.function.name == "get_date":
|
| 61 |
+
# print(f"[DATETIME]: {get_current_datetime()}")
|
| 62 |
+
print(f"[DATETIME]: {self.cm.state['date']}")
|
| 63 |
+
|
| 64 |
+
tool_outputs.append({
|
| 65 |
+
"tool_call_id": tool.id,
|
| 66 |
+
"output": f"{self.cm.state['date']}"
|
| 67 |
+
})
|
| 68 |
+
elif tool.function.name == "create_goals" or tool.function.name == "create_memento":
|
| 69 |
+
json_string = json.loads(tool.function.arguments)
|
| 70 |
+
json_string['created'] = str(self.cm.state['date'])
|
| 71 |
+
json_string['updated'] = None
|
| 72 |
+
print(f"[NEW EVENT]: {json_string}")
|
| 73 |
+
|
| 74 |
+
# Create a folder for the user's mementos if it doesn't exist
|
| 75 |
+
user_mementos_folder = f"mementos/to_upload/{self.cm.user.user_id}"
|
| 76 |
+
if not os.path.exists(user_mementos_folder):
|
| 77 |
+
os.makedirs(user_mementos_folder)
|
| 78 |
+
|
| 79 |
+
# save json_string as a file
|
| 80 |
+
json.dump(json_string, open(f"mementos/to_upload/{self.cm.user.user_id}/{json_string['title']}.json", "w"))
|
| 81 |
+
|
| 82 |
+
# # Add the event to the user's vector store
|
| 83 |
+
# # get or create vector store 'events'
|
| 84 |
+
# memory_file = self.cm.client.files.create(file=open(f"mementos/{self.cm.user.user_id}/{json_string['title']}.json", "rb"),\
|
| 85 |
+
# purpose="assistants")
|
| 86 |
+
|
| 87 |
+
# vector_store_file = self.cm.client.beta.vector_stores.files.create(
|
| 88 |
+
# vector_store_id=self.cm.user_personal_memory.id,
|
| 89 |
+
# file_id=memory_file.id
|
| 90 |
+
# )
|
| 91 |
+
|
| 92 |
+
print(f"[INFO]: Added event to the user's vector store")
|
| 93 |
+
|
| 94 |
+
tool_outputs.append({
|
| 95 |
+
"tool_call_id": tool.id,
|
| 96 |
+
"output": f"** [Success]: Added event to the user's vector store**"
|
| 97 |
+
})
|
| 98 |
+
elif tool.function.name == "msearch":
|
| 99 |
+
context = json.loads(tool.function.arguments)['queries']
|
| 100 |
+
|
| 101 |
+
print(f"[MSEARCH]: Searching for {context}")
|
| 102 |
+
tool_outputs.append({
|
| 103 |
+
"tool_call_id": tool.id,
|
| 104 |
+
"output": f"** retrieve any files related to: {context} **"
|
| 105 |
+
})
|
| 106 |
+
elif tool.function.name == "get_goals":
|
| 107 |
+
print("FETCH GOAL")
|
| 108 |
+
context = json.loads(tool.function.arguments)['context']
|
| 109 |
+
|
| 110 |
+
print(f"[GET GOALS]: {context}")
|
| 111 |
+
tool_outputs.append({
|
| 112 |
+
"tool_call_id": tool.id,
|
| 113 |
+
"output": f"** File search the vector_store: {self.cm.user_personal_memory.id} for the most relevant goal related to: {context} **"
|
| 114 |
+
})
|
| 115 |
+
elif tool.function.name == "update_goal":
|
| 116 |
+
goal = json.loads(tool.function.arguments)['goal']
|
| 117 |
+
|
| 118 |
+
print(f"[UPDATE GOAL]: {goal}")
|
| 119 |
+
tool_outputs.append({
|
| 120 |
+
"tool_call_id": tool.id,
|
| 121 |
+
"output": f"** Updated Goal: {context} **"
|
| 122 |
+
})
|
| 123 |
+
elif tool.function.name == "get_mementos":
|
| 124 |
+
context = json.loads(tool.function.arguments)['context']
|
| 125 |
+
|
| 126 |
+
print(f"[GET MEMENTOS]: {context}")
|
| 127 |
+
tool_outputs.append({
|
| 128 |
+
"tool_call_id": tool.id,
|
| 129 |
+
"output": f"** File search the vector_store: {self.cm.user_personal_memory.id} for the most relevant mementos based on the recent conversation history and context:{context} **"
|
| 130 |
+
})
|
| 131 |
+
|
| 132 |
+
# Submit all tool outputs at once after collecting them in a list
|
| 133 |
+
if tool_outputs:
|
| 134 |
+
try:
|
| 135 |
+
run = self.cm.client.beta.threads.runs.submit_tool_outputs_and_poll(
|
| 136 |
+
thread_id=thread.id,
|
| 137 |
+
run_id=run.id,
|
| 138 |
+
tool_outputs=tool_outputs
|
| 139 |
+
)
|
| 140 |
+
print("Tool outputs submitted successfully.")
|
| 141 |
+
except Exception as e:
|
| 142 |
+
print("Failed to submit tool outputs:", e)
|
| 143 |
+
else:
|
| 144 |
+
print("No tool outputs to submit.")
|
| 145 |
+
|
| 146 |
+
if run.status == 'completed':
|
| 147 |
+
messages = self.cm.client.beta.threads.messages.list(
|
| 148 |
+
thread_id=thread.id
|
| 149 |
+
)
|
| 150 |
+
# print(messages)
|
| 151 |
+
elif run.status == 'requires_action':
|
| 152 |
+
print(f'[TOOL]: Calling tool for action')
|
| 153 |
+
run = self.call_tool(run, thread)
|
| 154 |
+
else:
|
| 155 |
+
print("Something bad happened", run.status)
|
| 156 |
+
return run
|
| 157 |
+
|
| 158 |
+
class GeneralAssistant(Assistant):
|
| 159 |
+
def __init__(self, id, cm):
|
| 160 |
+
super().__init__(id, cm)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
class PFAssistant(Assistant):
|
| 164 |
+
def __init__(self, id, cm):
|
| 165 |
+
super().__init__(id, cm)
|
demo.py
ADDED
|
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from gradio_calendar import Calendar
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import psycopg2
|
| 5 |
+
from psycopg2 import sql
|
| 6 |
+
from openai import OpenAI
|
| 7 |
+
from pprint import pprint
|
| 8 |
+
from user import User
|
| 9 |
+
from dotenv import load_dotenv
|
| 10 |
+
import os
|
| 11 |
+
from datetime import timedelta
|
| 12 |
+
import json
|
| 13 |
+
import traceback
|
| 14 |
+
import shutil
|
| 15 |
+
|
| 16 |
+
# Load environment variables from .env file
|
| 17 |
+
load_dotenv()
|
| 18 |
+
|
| 19 |
+
# Function to fetch all users (name and created_at) for display
|
| 20 |
+
def fetch_users():
|
| 21 |
+
db_params = {
|
| 22 |
+
'dbname': 'ourcoach',
|
| 23 |
+
'user': 'ourcoach',
|
| 24 |
+
'password': 'hvcTL3kN3pOG5KteT17T',
|
| 25 |
+
'host': 'staging-ourcoach.cx8se8o0iaiy.ap-southeast-1.rds.amazonaws.com',
|
| 26 |
+
'port': '5432'
|
| 27 |
+
}
|
| 28 |
+
try:
|
| 29 |
+
with psycopg2.connect(**db_params) as conn:
|
| 30 |
+
with conn.cursor() as cursor:
|
| 31 |
+
query = sql.SQL("SELECT id, name, created_at FROM {table}").format(
|
| 32 |
+
table=sql.Identifier('public', 'users')
|
| 33 |
+
)
|
| 34 |
+
cursor.execute(query)
|
| 35 |
+
rows = cursor.fetchall()
|
| 36 |
+
colnames = [desc[0] for desc in cursor.description]
|
| 37 |
+
df = pd.DataFrame(rows, columns=colnames)
|
| 38 |
+
return df[['id', 'name', 'created_at']]
|
| 39 |
+
except psycopg2.Error as e:
|
| 40 |
+
print(f"An error occurred: {e}")
|
| 41 |
+
return pd.DataFrame()
|
| 42 |
+
|
| 43 |
+
def get_user_info(user_id):
|
| 44 |
+
db_params = {
|
| 45 |
+
'dbname': 'ourcoach',
|
| 46 |
+
'user': 'ourcoach',
|
| 47 |
+
'password': 'hvcTL3kN3pOG5KteT17T',
|
| 48 |
+
'host': 'staging-ourcoach.cx8se8o0iaiy.ap-southeast-1.rds.amazonaws.com',
|
| 49 |
+
'port': '5432'
|
| 50 |
+
}
|
| 51 |
+
try:
|
| 52 |
+
with psycopg2.connect(**db_params) as conn:
|
| 53 |
+
with conn.cursor() as cursor:
|
| 54 |
+
query = sql.SQL("SELECT * FROM {table} WHERE id = %s").format(
|
| 55 |
+
table=sql.Identifier('public', 'users')
|
| 56 |
+
)
|
| 57 |
+
cursor.execute(query, (user_id,))
|
| 58 |
+
row = cursor.fetchone()
|
| 59 |
+
if row:
|
| 60 |
+
colnames = [desc[0] for desc in cursor.description]
|
| 61 |
+
user_data = dict(zip(colnames, row))
|
| 62 |
+
return user_data
|
| 63 |
+
else:
|
| 64 |
+
return None
|
| 65 |
+
except psycopg2.Error as e:
|
| 66 |
+
print(f"An error occurred: {e}")
|
| 67 |
+
return None
|
| 68 |
+
|
| 69 |
+
# Load the initial user data
|
| 70 |
+
user_df = fetch_users()
|
| 71 |
+
|
| 72 |
+
with gr.Blocks() as app:
|
| 73 |
+
gr.Markdown("## Demo 2")
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# State to hold user's chat content
|
| 78 |
+
client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
|
| 79 |
+
user_message = gr.State("")
|
| 80 |
+
user = gr.State()
|
| 81 |
+
prev_date = gr.State(pd.Timestamp.now().date())
|
| 82 |
+
|
| 83 |
+
# Dropdown for user selection
|
| 84 |
+
user_dropdown = gr.Dropdown(
|
| 85 |
+
choices=user_df['id'].apply(lambda x: f"{x} - {user_df.loc[user_df['id'] == x, 'name'].values[0]}").tolist(),
|
| 86 |
+
label="Please Select User First",
|
| 87 |
+
value=None,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
assistant = gr.Dropdown(
|
| 91 |
+
choices=[("Backup of Coach Steve","asst_SI8I6oLdqPAQTAiUL3tTO8E4"), ("Coach Steve","asst_C7oBmqDRi085X8V7DPdMxhRF")],
|
| 92 |
+
label="Please Select an Assistant",
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
# Calendar component for selecting date/time
|
| 96 |
+
date_picker = Calendar(type="string", label="Select Date and Time")
|
| 97 |
+
|
| 98 |
+
# "Next Day" button
|
| 99 |
+
change_day_button = gr.Button("Change Day")
|
| 100 |
+
|
| 101 |
+
# Tabs for Chat, Mementos/Goals, and Message History
|
| 102 |
+
with gr.Tabs():
|
| 103 |
+
with gr.Tab("Chat"):
|
| 104 |
+
# Chatbot component
|
| 105 |
+
chatbot = gr.Chatbot(type='messages')
|
| 106 |
+
chatbot_input = gr.Textbox(label="Your Message")
|
| 107 |
+
send_button = gr.Button("Send")
|
| 108 |
+
reset_button = gr.Button("Reset")
|
| 109 |
+
with gr.Tab("Mementos/Goals"):
|
| 110 |
+
# Display mementos/goals as a JSON list
|
| 111 |
+
mementos_display = gr.JSON(label="Mementos / Goals")
|
| 112 |
+
with gr.Tab("Raw Message History"): # New tab for message history
|
| 113 |
+
# Display history as a copyable text area
|
| 114 |
+
message_history_display = gr.Textbox(label="Raw Message History", interactive=False, lines=20, show_copy_button=True, show_label=True, autofocus=True)
|
| 115 |
+
|
| 116 |
+
# Action on button click
|
| 117 |
+
def build_user(selected_user, assistant):
|
| 118 |
+
print("Building user")
|
| 119 |
+
user_id = selected_user.split(" - ")[0] # Extract user ID
|
| 120 |
+
user_info = get_user_info(user_id)
|
| 121 |
+
return User(user_id, user_info, client, assistant), [{'role': 'assistant', 'content': f"Chatbot initialized for user {selected_user}."}]
|
| 122 |
+
|
| 123 |
+
def change_assistant(selected_user, assistant, user):
|
| 124 |
+
print("Building user")
|
| 125 |
+
user_id = selected_user.split(" - ")[0] # Extract user ID
|
| 126 |
+
if user_id == user.user_id:
|
| 127 |
+
user = user.change_assistant(assistant), [{'role': 'assistant', 'content': f"Assistant changed to:{assistant}."}]
|
| 128 |
+
return user
|
| 129 |
+
user_info = get_user_info(user_id)
|
| 130 |
+
return User(user_id, user_info, client, assistant), [{'role': 'assistant', 'content': f"Assistant changed to:{assistant}."}]
|
| 131 |
+
|
| 132 |
+
def reset(history, user):
|
| 133 |
+
history = [{'role': 'assistant', 'content': "Chatbot reset."}]
|
| 134 |
+
user.reset()
|
| 135 |
+
# delete all mementos
|
| 136 |
+
user_mementos_path = f"mementos/{user.user_id}"
|
| 137 |
+
if os.path.exists(user_mementos_path):
|
| 138 |
+
for memento_file in os.listdir(user_mementos_path):
|
| 139 |
+
os.remove(f"{user_mementos_path}/{memento_file}")
|
| 140 |
+
return history
|
| 141 |
+
|
| 142 |
+
def update_message_history(user):
|
| 143 |
+
# Compile message history into a plain text format for easy copy-pasting
|
| 144 |
+
return "\n".join([f"{msg['role']}: {msg['content']}" for msg in user.get_messages(False)])
|
| 145 |
+
|
| 146 |
+
def fetch_mementos(selected_user):
|
| 147 |
+
# Load mementos for the selected user from a hypothetical JSON store
|
| 148 |
+
user_id = selected_user.split(" - ")[0]
|
| 149 |
+
mementos = []
|
| 150 |
+
user_mementos_path = f"mementos/{user_id}"
|
| 151 |
+
|
| 152 |
+
# Check if the directory exists
|
| 153 |
+
if os.path.exists(user_mementos_path):
|
| 154 |
+
for memento_file in os.listdir(user_mementos_path):
|
| 155 |
+
with open(f"{user_mementos_path}/{memento_file}", 'r') as f:
|
| 156 |
+
mementos.append(json.load(f))
|
| 157 |
+
else:
|
| 158 |
+
mementos = [{"message": "No mementos found for this user."}]
|
| 159 |
+
|
| 160 |
+
print("Fetched mementos:", mementos) # Debug print to verify loading
|
| 161 |
+
return mementos
|
| 162 |
+
|
| 163 |
+
user_dropdown.change(
|
| 164 |
+
fn=build_user,
|
| 165 |
+
inputs=[user_dropdown, assistant],
|
| 166 |
+
outputs=[user, chatbot]
|
| 167 |
+
).then(
|
| 168 |
+
fn=fetch_mementos,
|
| 169 |
+
inputs=user_dropdown,
|
| 170 |
+
outputs=mementos_display # Directly updating the mementos display component
|
| 171 |
+
).then(
|
| 172 |
+
fn=update_message_history, # Update message history after user selection
|
| 173 |
+
inputs=user,
|
| 174 |
+
outputs=message_history_display
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
assistant.change(
|
| 178 |
+
fn=change_assistant,
|
| 179 |
+
inputs=[user_dropdown, assistant, user],
|
| 180 |
+
outputs=[user, chatbot]
|
| 181 |
+
).then(
|
| 182 |
+
fn=fetch_mementos,
|
| 183 |
+
inputs=user_dropdown,
|
| 184 |
+
outputs=mementos_display # Directly updating the mementos display component
|
| 185 |
+
).then(
|
| 186 |
+
fn=update_message_history, # Update message history after user selection
|
| 187 |
+
inputs=user,
|
| 188 |
+
outputs=message_history_display
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
def user_input(user_message, history):
|
| 192 |
+
user_content = user_message
|
| 193 |
+
return "", history + [{"role": "user", "content": user_message}], user_content
|
| 194 |
+
|
| 195 |
+
def chatbot_response(user, message):
|
| 196 |
+
print(f"User message: {message}")
|
| 197 |
+
user.send_message(message)
|
| 198 |
+
return user.get_messages()
|
| 199 |
+
|
| 200 |
+
chatbot_input.submit(
|
| 201 |
+
fn=user_input,
|
| 202 |
+
inputs=[chatbot_input, chatbot],
|
| 203 |
+
outputs=[chatbot_input, chatbot, user_message],
|
| 204 |
+
queue=False
|
| 205 |
+
).then(
|
| 206 |
+
fn=chatbot_response,
|
| 207 |
+
inputs=[user, user_message],
|
| 208 |
+
outputs=chatbot
|
| 209 |
+
).then(
|
| 210 |
+
fn=fetch_mementos,
|
| 211 |
+
inputs=user_dropdown,
|
| 212 |
+
outputs=mementos_display # Directly updating the mementos display component
|
| 213 |
+
).then(
|
| 214 |
+
fn=update_message_history, # Update message history after a chat message
|
| 215 |
+
inputs=user,
|
| 216 |
+
outputs=message_history_display
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
send_button.click(
|
| 220 |
+
fn=user_input,
|
| 221 |
+
inputs=[chatbot_input, chatbot],
|
| 222 |
+
outputs=[chatbot_input, chatbot, user_message],
|
| 223 |
+
queue=False
|
| 224 |
+
).then(
|
| 225 |
+
fn=chatbot_response,
|
| 226 |
+
inputs=[user, user_message],
|
| 227 |
+
outputs=chatbot
|
| 228 |
+
).then(
|
| 229 |
+
fn=fetch_mementos,
|
| 230 |
+
inputs=user_dropdown,
|
| 231 |
+
outputs=mementos_display # Directly updating the mementos display component
|
| 232 |
+
).then(
|
| 233 |
+
fn=update_message_history, # Update message history after a chat message
|
| 234 |
+
inputs=user,
|
| 235 |
+
outputs=message_history_display
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
reset_button.click(
|
| 239 |
+
fn=reset,
|
| 240 |
+
inputs=[chatbot, user],
|
| 241 |
+
outputs=chatbot
|
| 242 |
+
).then(
|
| 243 |
+
fn=update_message_history, # Clear message history on reset
|
| 244 |
+
inputs=user,
|
| 245 |
+
outputs=message_history_display
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
def date_changed(selected_date, user):
|
| 249 |
+
user.change_date(selected_date)
|
| 250 |
+
history = user.get_messages()[:-1] + [{'role': 'assistant', 'content': f"Date changed to {selected_date}."}] + [user.get_messages()[-1]]
|
| 251 |
+
return history
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def next_day(selected_date, prev_date, user):
|
| 255 |
+
# Calculate the next date
|
| 256 |
+
next_date = pd.to_datetime(selected_date).date()
|
| 257 |
+
|
| 258 |
+
print("Selected date:", next_date)
|
| 259 |
+
print("Previous date:", prev_date)
|
| 260 |
+
if next_date == prev_date:
|
| 261 |
+
next_date = pd.to_datetime(selected_date) + timedelta(days=1)
|
| 262 |
+
next_date = next_date.date()
|
| 263 |
+
|
| 264 |
+
# infer the follow up date for the user's mementos
|
| 265 |
+
user.infer_memento_follow_ups()
|
| 266 |
+
|
| 267 |
+
# Upload files from mementos/to_upload/{user_id} to vector store
|
| 268 |
+
user_id = user.user_id
|
| 269 |
+
to_upload_folder = f"mementos/to_upload/{user_id}"
|
| 270 |
+
destination_folder = f"mementos/{user_id}"
|
| 271 |
+
|
| 272 |
+
if os.path.exists(to_upload_folder):
|
| 273 |
+
# Ensure the destination folder exists
|
| 274 |
+
if not os.path.exists(destination_folder):
|
| 275 |
+
os.makedirs(destination_folder)
|
| 276 |
+
|
| 277 |
+
for filename in os.listdir(to_upload_folder):
|
| 278 |
+
if filename.endswith('.json'):
|
| 279 |
+
file_path = os.path.join(to_upload_folder, filename)
|
| 280 |
+
try:
|
| 281 |
+
# Upload the file to the vector store
|
| 282 |
+
with open(file_path, "rb") as f:
|
| 283 |
+
memory_file = client.files.create(file=f, purpose="assistants")
|
| 284 |
+
vector_store_file = client.beta.vector_stores.files.create(
|
| 285 |
+
vector_store_id=user.conversations.user_personal_memory.id,
|
| 286 |
+
file_id=memory_file.id
|
| 287 |
+
)
|
| 288 |
+
# Move the file to mementos/{user_id}
|
| 289 |
+
shutil.move(file_path, os.path.join(destination_folder, filename))
|
| 290 |
+
except Exception as e:
|
| 291 |
+
print(f"Failed to upload and move file {filename}: {e}")
|
| 292 |
+
traceback.print_exc()
|
| 293 |
+
|
| 294 |
+
# Call date_changed function
|
| 295 |
+
return date_changed(next_date, user), next_date, next_date.strftime("%Y-%m-%d")
|
| 296 |
+
|
| 297 |
+
change_day_button.click(
|
| 298 |
+
fn=next_day,
|
| 299 |
+
inputs=[date_picker, prev_date, user],
|
| 300 |
+
outputs=[chatbot, prev_date, date_picker]
|
| 301 |
+
).then(
|
| 302 |
+
fn=fetch_mementos,
|
| 303 |
+
inputs=user_dropdown,
|
| 304 |
+
outputs=mementos_display # Directly updating the mementos display
|
| 305 |
+
).then(
|
| 306 |
+
fn=update_message_history, # Update message history on next day change
|
| 307 |
+
inputs=user,
|
| 308 |
+
outputs=message_history_display
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
# Launch the app
|
| 312 |
+
app.launch()
|
old.py
ADDED
|
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from gradio_calendar import Calendar
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import psycopg2
|
| 5 |
+
from psycopg2 import sql
|
| 6 |
+
from openai import OpenAI
|
| 7 |
+
from pprint import pprint
|
| 8 |
+
from user import User
|
| 9 |
+
from dotenv import load_dotenv
|
| 10 |
+
import os
|
| 11 |
+
from datetime import timedelta
|
| 12 |
+
import json
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# user = User(id, user_info, client)
|
| 17 |
+
|
| 18 |
+
#app.GET('/users', fetch_users)
|
| 19 |
+
# def send_message(request):
|
| 20 |
+
# user.send_message(request.msg)
|
| 21 |
+
|
| 22 |
+
#app.GET('/get_messages', fetch_messages)
|
| 23 |
+
# def get_message(request):
|
| 24 |
+
# user.get_messages()
|
| 25 |
+
|
| 26 |
+
# Load environment variables from .env file
|
| 27 |
+
load_dotenv()
|
| 28 |
+
|
| 29 |
+
client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
|
| 30 |
+
user_info = None
|
| 31 |
+
user = None
|
| 32 |
+
history = []
|
| 33 |
+
|
| 34 |
+
# Function to fetch all users (name and created_at) for display
|
| 35 |
+
def fetch_users():
|
| 36 |
+
db_params = {
|
| 37 |
+
'dbname': 'ourcoach',
|
| 38 |
+
'user': 'ourcoach',
|
| 39 |
+
'password': 'hvcTL3kN3pOG5KteT17T',
|
| 40 |
+
'host': 'staging-ourcoach.cx8se8o0iaiy.ap-southeast-1.rds.amazonaws.com',
|
| 41 |
+
'port': '5432'
|
| 42 |
+
}
|
| 43 |
+
try:
|
| 44 |
+
with psycopg2.connect(**db_params) as conn:
|
| 45 |
+
with conn.cursor() as cursor:
|
| 46 |
+
query = sql.SQL("SELECT id, name, created_at FROM {table}").format(
|
| 47 |
+
table=sql.Identifier('public', 'users')
|
| 48 |
+
)
|
| 49 |
+
cursor.execute(query)
|
| 50 |
+
rows = cursor.fetchall()
|
| 51 |
+
colnames = [desc[0] for desc in cursor.description]
|
| 52 |
+
df = pd.DataFrame(rows, columns=colnames)
|
| 53 |
+
return df[['id', 'name', 'created_at']]
|
| 54 |
+
except psycopg2.Error as e:
|
| 55 |
+
print(f"An error occurred: {e}")
|
| 56 |
+
return pd.DataFrame()
|
| 57 |
+
|
| 58 |
+
# Load the initial user data
|
| 59 |
+
user_df = fetch_users()
|
| 60 |
+
|
| 61 |
+
def get_user_info(user_id):
|
| 62 |
+
db_params = {
|
| 63 |
+
'dbname': 'ourcoach',
|
| 64 |
+
'user': 'ourcoach',
|
| 65 |
+
'password': 'hvcTL3kN3pOG5KteT17T',
|
| 66 |
+
'host': 'staging-ourcoach.cx8se8o0iaiy.ap-southeast-1.rds.amazonaws.com',
|
| 67 |
+
'port': '5432'
|
| 68 |
+
}
|
| 69 |
+
try:
|
| 70 |
+
with psycopg2.connect(**db_params) as conn:
|
| 71 |
+
with conn.cursor() as cursor:
|
| 72 |
+
query = sql.SQL("SELECT * FROM {table} WHERE id = %s").format(
|
| 73 |
+
table=sql.Identifier('public', 'users')
|
| 74 |
+
)
|
| 75 |
+
cursor.execute(query, (user_id,))
|
| 76 |
+
row = cursor.fetchone()
|
| 77 |
+
if row:
|
| 78 |
+
colnames = [desc[0] for desc in cursor.description]
|
| 79 |
+
user_data = dict(zip(colnames, row))
|
| 80 |
+
return user_data
|
| 81 |
+
else:
|
| 82 |
+
return None
|
| 83 |
+
except psycopg2.Error as e:
|
| 84 |
+
print(f"An error occurred: {e}")
|
| 85 |
+
return None
|
| 86 |
+
|
| 87 |
+
def display_info(user_id, selected_date):
|
| 88 |
+
global user_info, user, history
|
| 89 |
+
user_info = get_user_info(user_id)
|
| 90 |
+
|
| 91 |
+
# Create a User object
|
| 92 |
+
user = User(user_id=user_info['id'], user_info=user_info, client=client)
|
| 93 |
+
|
| 94 |
+
if user_info:
|
| 95 |
+
history.append({'role': 'assistant', 'content': f"Chatbot initialized for {user_info['name']} (User ID: {user_id}) on {selected_date}."})
|
| 96 |
+
return history
|
| 97 |
+
else:
|
| 98 |
+
history.append({'role': 'assistant', 'content': "User not found. Please select a valid user."})
|
| 99 |
+
return history
|
| 100 |
+
|
| 101 |
+
def chatbot_response(message):
|
| 102 |
+
global user, history
|
| 103 |
+
print(f"User message: {message}")
|
| 104 |
+
user.send_message(message)
|
| 105 |
+
return history + user.get_messages()
|
| 106 |
+
|
| 107 |
+
# Define a global variable to keep track of the last date
|
| 108 |
+
last_selected_date = pd.Timestamp.now().strftime('%Y-%m-%d')
|
| 109 |
+
|
| 110 |
+
def date_changed(selected_date):
|
| 111 |
+
global user, history, last_selected_date
|
| 112 |
+
|
| 113 |
+
# Check if the selected date is different from the last recorded date
|
| 114 |
+
if selected_date == last_selected_date:
|
| 115 |
+
return history # If it’s the same, skip processing
|
| 116 |
+
|
| 117 |
+
# Update the last selected date
|
| 118 |
+
last_selected_date = selected_date
|
| 119 |
+
|
| 120 |
+
if user is not None:
|
| 121 |
+
user.change_date(selected_date)
|
| 122 |
+
history = user.get_messages()[:-2] + [{'role': 'assistant', 'content': f"Date changed to {selected_date}."}] + [user.get_messages()[-1]]
|
| 123 |
+
return history
|
| 124 |
+
|
| 125 |
+
def next_day():
|
| 126 |
+
global last_selected_date
|
| 127 |
+
next_date = pd.to_datetime(last_selected_date) + timedelta(days=1)
|
| 128 |
+
last_selected_date = next_date.strftime('%Y-%m-%d')
|
| 129 |
+
return last_selected_date, date_changed(last_selected_date)
|
| 130 |
+
|
| 131 |
+
def fetch_mementos(selected_user):
|
| 132 |
+
# Load mementos for the selected user from a hypothetical JSON store
|
| 133 |
+
user_id = selected_user.split(" - ")[0]
|
| 134 |
+
mementos = []
|
| 135 |
+
user_mementos_path = f"mementos/{user_id}"
|
| 136 |
+
|
| 137 |
+
# Check if the directory exists
|
| 138 |
+
if os.path.exists(user_mementos_path):
|
| 139 |
+
for memento_file in os.listdir(user_mementos_path):
|
| 140 |
+
with open(f"{user_mementos_path}/{memento_file}", 'r') as f:
|
| 141 |
+
mementos.append(json.load(f))
|
| 142 |
+
else:
|
| 143 |
+
mementos = [{"message": "No mementos found for this user."}]
|
| 144 |
+
|
| 145 |
+
print("Fetched mementos:", mementos) # Debug print to verify loading
|
| 146 |
+
return mementos
|
| 147 |
+
|
| 148 |
+
display_info(user_df.loc[0, 'id'], pd.Timestamp.now())
|
| 149 |
+
|
| 150 |
+
with gr.Blocks() as app:
|
| 151 |
+
gr.Markdown("## Demo")
|
| 152 |
+
|
| 153 |
+
# State to hold user's chat content
|
| 154 |
+
user_content = gr.State("")
|
| 155 |
+
|
| 156 |
+
# Dropdown for user selection
|
| 157 |
+
user_dropdown = gr.Dropdown(
|
| 158 |
+
choices=user_df['id'].apply(lambda x: f"{x} - {user_df.loc[user_df['id'] == x, 'name'].values[0]}").tolist(),
|
| 159 |
+
label="Select User"
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
# Calendar component for selecting date/time
|
| 163 |
+
date_picker = Calendar(type="datetime", label="Select Date and Time")
|
| 164 |
+
|
| 165 |
+
# "Next Day" button
|
| 166 |
+
next_day_button = gr.Button("Next Day")
|
| 167 |
+
|
| 168 |
+
# Tabs for Chat, Mementos/Goals, and Message History
|
| 169 |
+
with gr.Tabs():
|
| 170 |
+
with gr.Tab("Chat"):
|
| 171 |
+
# Chatbot component
|
| 172 |
+
chatbot = gr.Chatbot(type='messages')
|
| 173 |
+
chatbot_input = gr.Textbox(label="Your Message")
|
| 174 |
+
send_button = gr.Button("Send")
|
| 175 |
+
reset_button = gr.Button("Reset")
|
| 176 |
+
with gr.Tab("Mementos/Goals"):
|
| 177 |
+
# Display mementos/goals as a JSON list
|
| 178 |
+
mementos_display = gr.JSON(label="Mementos / Goals")
|
| 179 |
+
with gr.Tab("Raw Message History"): # New tab for message history
|
| 180 |
+
# Display history as a copyable text area
|
| 181 |
+
message_history_display = gr.Textbox(label="Raw Message History", interactive=False, lines=20, show_copy_button=True, show_label=True, autofocus=True)
|
| 182 |
+
|
| 183 |
+
# Action on button click
|
| 184 |
+
def build_user(selected_user, selected_date):
|
| 185 |
+
user_id = selected_user.split(" - ")[0] # Extract user ID
|
| 186 |
+
return display_info(user_id, selected_date)
|
| 187 |
+
|
| 188 |
+
def reset():
|
| 189 |
+
global history, user
|
| 190 |
+
history = [{'role': 'assistant', 'content': "Chatbot reset."}]
|
| 191 |
+
user.reset()
|
| 192 |
+
# delete all mementos
|
| 193 |
+
user_mementos_path = f"mementos/{user.user_id}"
|
| 194 |
+
if os.path.exists(user_mementos_path):
|
| 195 |
+
for memento_file in os.listdir(user_mementos_path):
|
| 196 |
+
os.remove(f"{user_mementos_path}/{memento_file}")
|
| 197 |
+
return history
|
| 198 |
+
|
| 199 |
+
def update_message_history():
|
| 200 |
+
# Compile message history into a plain text format for easy copy-pasting
|
| 201 |
+
return "\n".join([f"{msg['role']}: {msg['content']}" for msg in user.get_messages(False)])
|
| 202 |
+
|
| 203 |
+
user_dropdown.change(
|
| 204 |
+
fn=build_user,
|
| 205 |
+
inputs=[user_dropdown, date_picker],
|
| 206 |
+
outputs=chatbot
|
| 207 |
+
).then(
|
| 208 |
+
fn=fetch_mementos,
|
| 209 |
+
inputs=user_dropdown,
|
| 210 |
+
outputs=mementos_display # Directly updating the mementos display component
|
| 211 |
+
).then(
|
| 212 |
+
fn=update_message_history, # Update message history after user selection
|
| 213 |
+
inputs=None,
|
| 214 |
+
outputs=message_history_display
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
def user_input(user_message, history):
|
| 218 |
+
user_content = user_message
|
| 219 |
+
return "", history + [{"role": "user", "content": user_message}], user_content
|
| 220 |
+
|
| 221 |
+
chatbot_input.submit(
|
| 222 |
+
fn=user_input,
|
| 223 |
+
inputs=[chatbot_input, chatbot],
|
| 224 |
+
outputs=[chatbot_input, chatbot, user_content],
|
| 225 |
+
queue=False
|
| 226 |
+
).then(
|
| 227 |
+
fn=chatbot_response,
|
| 228 |
+
inputs=user_content,
|
| 229 |
+
outputs=chatbot
|
| 230 |
+
).then(
|
| 231 |
+
fn=fetch_mementos,
|
| 232 |
+
inputs=user_dropdown,
|
| 233 |
+
outputs=mementos_display # Directly updating the mementos display component
|
| 234 |
+
).then(
|
| 235 |
+
fn=update_message_history, # Update message history after a chat message
|
| 236 |
+
inputs=None,
|
| 237 |
+
outputs=message_history_display
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
send_button.click(
|
| 241 |
+
fn=user_input,
|
| 242 |
+
inputs=[chatbot_input, chatbot],
|
| 243 |
+
outputs=[chatbot_input, chatbot, user_content],
|
| 244 |
+
queue=False
|
| 245 |
+
).then(
|
| 246 |
+
fn=chatbot_response,
|
| 247 |
+
inputs=user_content,
|
| 248 |
+
outputs=chatbot
|
| 249 |
+
).then(
|
| 250 |
+
fn=fetch_mementos,
|
| 251 |
+
inputs=user_dropdown,
|
| 252 |
+
outputs=mementos_display # Directly updating the mementos display component
|
| 253 |
+
).then(
|
| 254 |
+
fn=update_message_history, # Update message history after a chat message
|
| 255 |
+
inputs=None,
|
| 256 |
+
outputs=message_history_display
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
reset_button.click(
|
| 260 |
+
fn=reset,
|
| 261 |
+
inputs=None,
|
| 262 |
+
outputs=chatbot
|
| 263 |
+
).then(
|
| 264 |
+
fn=update_message_history, # Clear message history on reset
|
| 265 |
+
inputs=None,
|
| 266 |
+
outputs=message_history_display
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
date_picker.change(
|
| 270 |
+
fn=date_changed,
|
| 271 |
+
inputs=date_picker,
|
| 272 |
+
outputs=chatbot
|
| 273 |
+
).then(
|
| 274 |
+
fn=update_message_history, # Update message history on date change
|
| 275 |
+
inputs=None,
|
| 276 |
+
outputs=message_history_display
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
next_day_button.click(
|
| 280 |
+
fn=next_day,
|
| 281 |
+
inputs=None,
|
| 282 |
+
outputs=[date_picker, chatbot]
|
| 283 |
+
).then(
|
| 284 |
+
fn=update_message_history, # Update message history on next day change
|
| 285 |
+
inputs=None,
|
| 286 |
+
outputs=message_history_display
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
# Launch the app
|
| 290 |
+
app.launch()
|
playground copy.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
playground.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi==0.115.4
|
| 2 |
+
gradio==5.5.0
|
| 3 |
+
gradio_calendar==0.0.6
|
| 4 |
+
langchain==0.3.7
|
| 5 |
+
langchain_core==0.3.15
|
| 6 |
+
openai==1.54.1
|
| 7 |
+
pandas==2.2.3
|
| 8 |
+
psycopg2==2.9.10
|
| 9 |
+
pydantic==2.9.2
|
| 10 |
+
python-dotenv==1.0.1
|
temp.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"{\"user_id\":\"18ae6d05-11f9-40f4-814e-91ff0f89c25e\",\"created\":\"2024-11-01T00:00:00Z\",\"follow_up_on\":\"2024-11-08T00:00:00Z\",\"title\":\"Time Management Improvement\",\"description\":\"I will dedicate 30 minutes, four times a week, to plan and prioritize my tasks for the upcoming days.\",\"tags\":[\"time management\",\"productivity\",\"planning\"],\"priority\":\"medium\",\"status\":\"not_started\",\"location\":\"\",\"recurrence\":\"none\"}"
|
user.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import io
|
| 3 |
+
import os
|
| 4 |
+
import pandas as pd
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
import json
|
| 7 |
+
from assistants import Assistant
|
| 8 |
+
import glob
|
| 9 |
+
|
| 10 |
+
def get_current_datetime():
|
| 11 |
+
return datetime.now()
|
| 12 |
+
|
| 13 |
+
class ConversationManager:
|
| 14 |
+
def __init__(self, client, user, asst_id):
|
| 15 |
+
self.user = user
|
| 16 |
+
self.assistants = {'general': Assistant(asst_id, self)}
|
| 17 |
+
self.client = client
|
| 18 |
+
self.user_personal_memory = None
|
| 19 |
+
self.current_thread = self.create_thread()
|
| 20 |
+
|
| 21 |
+
self.state = {'date': pd.Timestamp.now().date()}
|
| 22 |
+
|
| 23 |
+
print("[Init State]:", self.state)
|
| 24 |
+
|
| 25 |
+
def create_thread(self):
|
| 26 |
+
# Check if a thread already exists for the current month, if yes, return it
|
| 27 |
+
user_interaction_guidelines =self.user.user_interaction_guidelines
|
| 28 |
+
thread = self.client.beta.threads.create()
|
| 29 |
+
|
| 30 |
+
# Create an (empty, for now) 'UserPersonalMemory' vector store to store information on users events/state/etc.
|
| 31 |
+
self.user_personal_memory = self.client.beta.vector_stores.create(name="UserPersonalMemory", \
|
| 32 |
+
metadata={
|
| 33 |
+
"description": "Personal events and emotional states of the user for personalized assistance and reminders of upcoming events.",
|
| 34 |
+
"user_id": self.user.user_id,
|
| 35 |
+
"categories": "events,tasks,emotions,goals",
|
| 36 |
+
"created_at": str(datetime.now()),
|
| 37 |
+
"updated_at": str(datetime.now()),
|
| 38 |
+
"tags": "personal,emotional_state,upcoming_events,goals",
|
| 39 |
+
"source": "Mementos shared by user",
|
| 40 |
+
})
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
self.system_message = self.add_message_to_thread(thread.id, "assistant", f"\
|
| 44 |
+
You are coaching:\n\n{user_interaction_guidelines}\n\n\
|
| 45 |
+
Be mindful of this information at all times in order to \
|
| 46 |
+
be as personalised as possible when conversing. Ensure to \
|
| 47 |
+
follow the conversation guidelines and flow provided. Use the \
|
| 48 |
+
current state of the conversation to adhere to the flow.\n\n \
|
| 49 |
+
** You are now in the Introduction state. **")
|
| 50 |
+
return thread
|
| 51 |
+
|
| 52 |
+
def _get_current_thread_history(self, remove_system_message=True, msg=None):
|
| 53 |
+
if not remove_system_message:
|
| 54 |
+
return [{"role": msg.role, "content": msg.content[0].text.value} for msg in self.client.beta.threads.messages.list(self.current_thread.id, order="asc")]
|
| 55 |
+
if msg:
|
| 56 |
+
return [{"role": msg.role, "content": msg.content[0].text.value} for msg in self.client.beta.threads.messages.list(self.current_thread.id, order="asc", after=msg.id)][1:]
|
| 57 |
+
return [{"role": msg.role, "content": msg.content[0].text.value} for msg in self.client.beta.threads.messages.list(self.current_thread.id, order="asc")][1:] # remove the system message
|
| 58 |
+
|
| 59 |
+
def add_message_to_thread(self, thread_id, role, content):
|
| 60 |
+
message = self.client.beta.threads.messages.create(
|
| 61 |
+
thread_id=thread_id,
|
| 62 |
+
role=role,
|
| 63 |
+
content=content
|
| 64 |
+
)
|
| 65 |
+
return message
|
| 66 |
+
# else:
|
| 67 |
+
# raise ValueError("Thread ID not found.")
|
| 68 |
+
|
| 69 |
+
def _get_thread_messages(self, thread):
|
| 70 |
+
return self.client.beta.threads.messages.list(thread.id, order="asc")
|
| 71 |
+
|
| 72 |
+
def _run_current_thread(self, text):
|
| 73 |
+
thread = self.current_thread
|
| 74 |
+
|
| 75 |
+
# if you have more than 1 assistant
|
| 76 |
+
# need to select assistant
|
| 77 |
+
response = self.assistants['general'].process(thread, text)
|
| 78 |
+
return response
|
| 79 |
+
|
| 80 |
+
def _send_hidden_message(self, text):
|
| 81 |
+
msg = self.add_message_to_thread(self.current_thread.id, "user", text)
|
| 82 |
+
self._run_current_thread(text)
|
| 83 |
+
# delete message
|
| 84 |
+
self.client.beta.threads.messages.delete(message_id=msg.id, thread_id=self.current_thread.id)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class User:
|
| 88 |
+
def __init__(self, user_id, user_info, client, asst_id):
|
| 89 |
+
self.user_id = user_id
|
| 90 |
+
self.client = client
|
| 91 |
+
self.asst_id = asst_id
|
| 92 |
+
# self.user_info = user_info
|
| 93 |
+
# self.events = None
|
| 94 |
+
self.user_interaction_guidelines = self.generate_user_interaction_guidelines(user_info, client)
|
| 95 |
+
# self.coaching_plans = {}
|
| 96 |
+
|
| 97 |
+
self.conversations = ConversationManager(client, self, asst_id)
|
| 98 |
+
|
| 99 |
+
def reset(self):
|
| 100 |
+
self.conversations = ConversationManager(self.client, self, self.asst_id)
|
| 101 |
+
|
| 102 |
+
def generate_user_interaction_guidelines(self, user_info, client):
|
| 103 |
+
prompt = f"Using the users information:\n\n{user_info}\n\n generate a \
|
| 104 |
+
user summary that describes how best to interact with the user to create a personalized \
|
| 105 |
+
and targeted chat experience. Include all the information above and based on that information \
|
| 106 |
+
list several sample conversation topics and questions that will engage the user."
|
| 107 |
+
|
| 108 |
+
response = client.chat.completions.create(
|
| 109 |
+
model="gpt-4o-mini",
|
| 110 |
+
messages=[
|
| 111 |
+
{"role": "system", "content": "You are an expert at building profile documents containing rich user insights."},
|
| 112 |
+
{"role": "user", "content": prompt}
|
| 113 |
+
],
|
| 114 |
+
temperature=0.2
|
| 115 |
+
)
|
| 116 |
+
return response.choices[0].message.content
|
| 117 |
+
|
| 118 |
+
def update_conversation_state(self, stage, last_interaction):
|
| 119 |
+
self.conversation_state['stage'] = stage
|
| 120 |
+
self.conversation_state['last_interaction'] = last_interaction
|
| 121 |
+
|
| 122 |
+
def _get_current_thread(self):
|
| 123 |
+
return self.conversations.current_thread
|
| 124 |
+
|
| 125 |
+
def send_message(self, text):
|
| 126 |
+
run = self.conversations._run_current_thread(text)
|
| 127 |
+
return run
|
| 128 |
+
# print("[Response]:", response)
|
| 129 |
+
|
| 130 |
+
def get_messages(self, exclude_system_msg=True):
|
| 131 |
+
if not exclude_system_msg:
|
| 132 |
+
return self.conversations._get_current_thread_history(False)
|
| 133 |
+
else:
|
| 134 |
+
return list(filter(lambda x: not x['content'].startswith("** It is a new day:") or not x['content'].startswith("Pay attention to the current state you are in"), self.conversations._get_current_thread_history(exclude_system_msg)))
|
| 135 |
+
|
| 136 |
+
def change_date(self, date):
|
| 137 |
+
print(f"[Changing Date]: {self.conversations.state['date']} -> {date}")
|
| 138 |
+
self.conversations.state['date'] = date
|
| 139 |
+
self.conversations._send_hidden_message(f"** It is a new day: {date} **")
|
| 140 |
+
print("[Date Updated]:", self.conversations.state['date'])
|
| 141 |
+
return self.get_messages()
|
| 142 |
+
|
| 143 |
+
def __hash__(self) -> int:
|
| 144 |
+
return hash(self.user_id)
|
| 145 |
+
|
| 146 |
+
def _infer_follow_ups(self, created, context):
|
| 147 |
+
prompt = f"Infer the date of the next follow-up for the user based on the created date:{created} and the context:{context}"
|
| 148 |
+
|
| 149 |
+
response = self.client.chat.completions.create(
|
| 150 |
+
model="gpt-4o-mini",
|
| 151 |
+
messages=[
|
| 152 |
+
{"role": "system", "content": "You are an expert at estimating when to follow up events with users. Only output a single string representing the follow up date i.e. 'YYYY-MM-dd'"},
|
| 153 |
+
{"role": "user", "content": prompt}
|
| 154 |
+
],
|
| 155 |
+
temperature=0.2
|
| 156 |
+
)
|
| 157 |
+
return response.choices[0].message.content
|
| 158 |
+
|
| 159 |
+
def infer_memento_follow_ups(self):
|
| 160 |
+
mementos_path = f"mementos/to_upload/{self.user_id}/*.json"
|
| 161 |
+
|
| 162 |
+
for file_path in glob.glob(mementos_path):
|
| 163 |
+
with open(file_path, 'r+') as file:
|
| 164 |
+
data = json.load(file)
|
| 165 |
+
infered_follow_up = self._infer_follow_ups(data['created'], data['context'])
|
| 166 |
+
print(f"[Infered Follow Up]: {infered_follow_up}")
|
| 167 |
+
data['follow_up_on'] = infered_follow_up
|
| 168 |
+
file.seek(0)
|
| 169 |
+
json.dump(data, file, indent=4)
|
| 170 |
+
file.truncate()
|
| 171 |
+
|
| 172 |
+
def change_assistant(self, asst_id):
|
| 173 |
+
self.asst_id = asst_id
|
| 174 |
+
self.conversations.assistants['general'] = Assistant(self.asst_id, self.conversations)
|
| 175 |
+
return self
|
utils.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel
|
| 2 |
+
from typing import Optional
|
| 3 |
+
from enum import Enum
|
| 4 |
+
from pydantic import BaseModel
|
| 5 |
+
|
| 6 |
+
class StageClass(str, Enum):
|
| 7 |
+
introduction = 'introduction'
|
| 8 |
+
pending = 'pending'
|
| 9 |
+
guided_coaching = 'guided_coaching'
|
| 10 |
+
daily_reflection = 'daily_reflection'
|
| 11 |
+
general_coaching = 'general_coaching'
|
| 12 |
+
|
| 13 |
+
class AssistantNames(str, Enum):
|
| 14 |
+
general = 'general'
|
| 15 |
+
pf_assistant = 'pf_assistant'
|
| 16 |
+
|
| 17 |
+
class Role(str, Enum):
|
| 18 |
+
user = 'user'
|
| 19 |
+
assistant = 'assistant'
|
| 20 |
+
|
| 21 |
+
class AssistantDetails(BaseModel):
|
| 22 |
+
name: AssistantNames
|
| 23 |
+
id: str
|
| 24 |
+
|
| 25 |
+
class ConversationStages(BaseModel):
|
| 26 |
+
stage: StageClass
|
| 27 |
+
selected_assistant: AssistantDetails
|
| 28 |
+
new_user_info: Optional[str]
|
| 29 |
+
follow_up_questions: Optional[str]
|
| 30 |
+
goes_next: Role
|
| 31 |
+
context: Optional[str]
|
| 32 |
+
|
| 33 |
+
import json
|
| 34 |
+
from pydantic import BaseModel
|
| 35 |
+
|
| 36 |
+
class DailyGoal(BaseModel):
|
| 37 |
+
objective: str
|
| 38 |
+
title: str
|
| 39 |
+
completion_criteria: str
|
| 40 |
+
completed: bool
|
| 41 |
+
day: str
|
| 42 |
+
|
| 43 |
+
class WeeklyGoal(BaseModel):
|
| 44 |
+
objective: str
|
| 45 |
+
title: str
|
| 46 |
+
completion_criteria: str
|
| 47 |
+
current_day: int
|
| 48 |
+
daily_objectives: list[DailyGoal]
|
| 49 |
+
|
| 50 |
+
class MonthlyGoal(BaseModel):
|
| 51 |
+
objective: str
|
| 52 |
+
title: str
|
| 53 |
+
completion_criteria: str
|
| 54 |
+
current_week: int
|
| 55 |
+
brief_summary: str
|
| 56 |
+
weekly_goals: list[WeeklyGoal]
|