Spaces:
Sleeping
Sleeping
Upload 5 files
Browse files- Dockerfile +21 -0
- main.py +50 -0
- requirements.txt +0 -0
- show_page.py +49 -0
- show_personality.py +59 -0
Dockerfile
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use official Python image
|
| 2 |
+
FROM python:3.11-slim
|
| 3 |
+
|
| 4 |
+
# Set working directory
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Copy requirements.txt first (for caching)
|
| 8 |
+
COPY requirements.txt .
|
| 9 |
+
|
| 10 |
+
# Install dependencies
|
| 11 |
+
RUN pip install --no-cache-dir --upgrade pip
|
| 12 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 13 |
+
|
| 14 |
+
# Copy app code and model files
|
| 15 |
+
COPY . .
|
| 16 |
+
|
| 17 |
+
# Expose port
|
| 18 |
+
EXPOSE 7860
|
| 19 |
+
|
| 20 |
+
# Run FastAPI app with Uvicorn
|
| 21 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
main.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
+
from show_page import get_questions
|
| 4 |
+
from show_personality import show_my_personality
|
| 5 |
+
from pydantic import BaseModel
|
| 6 |
+
|
| 7 |
+
class user_ask(BaseModel):
|
| 8 |
+
user:list[int]
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
result_list=[]
|
| 12 |
+
app=FastAPI()
|
| 13 |
+
app.add_middleware(
|
| 14 |
+
CORSMiddleware,
|
| 15 |
+
allow_credentials=True,
|
| 16 |
+
allow_origins=['*'],
|
| 17 |
+
allow_methods=['*'],
|
| 18 |
+
allow_headers=['*']
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
@app.get('/')
|
| 22 |
+
def home():
|
| 23 |
+
return{
|
| 24 |
+
'CognitiveTrace':'Welcome to CognitiveTrace!'
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
@app.get('/get_question')
|
| 28 |
+
def question_show():
|
| 29 |
+
|
| 30 |
+
result=get_questions()
|
| 31 |
+
result_list.clear()
|
| 32 |
+
result_list.append(result)
|
| 33 |
+
|
| 34 |
+
return{
|
| 35 |
+
'CognitiveTrace':result
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@app.post('/check_user')
|
| 42 |
+
def check_list(u:user_ask):
|
| 43 |
+
|
| 44 |
+
result=show_my_personality(user_choice=u.user,result=result_list)
|
| 45 |
+
return{
|
| 46 |
+
'CognitiveTrace':result
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
|
requirements.txt
ADDED
|
File without changes
|
show_page.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from groq import Groq
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
load_dotenv()
|
| 4 |
+
client = Groq()
|
| 5 |
+
|
| 6 |
+
def get_questions():
|
| 7 |
+
prompt=""""
|
| 8 |
+
You are a psychology-based personality assessment engine.
|
| 9 |
+
|
| 10 |
+
Your task is to generate EXACTLY 10 unique personality assessment questions every time you are invoked.
|
| 11 |
+
|
| 12 |
+
The questions must be based on the Big Five (OCEAN) personality traits:
|
| 13 |
+
- Openness (O)
|
| 14 |
+
- Conscientiousness (C)
|
| 15 |
+
- Extraversion (E)
|
| 16 |
+
- Agreeableness (A)
|
| 17 |
+
- Neuroticism (N)
|
| 18 |
+
|
| 19 |
+
Rules:
|
| 20 |
+
1. Generate EXACTLY 2 questions for each trait (total = 10).
|
| 21 |
+
2. Questions must be psychologically meaningful and neutral.
|
| 22 |
+
3. Questions must measure stable personality tendencies, not temporary moods.
|
| 23 |
+
4. Questions must be written in simple, clear English.
|
| 24 |
+
5. Avoid repeating wording or structure from previous questions.
|
| 25 |
+
6. Do NOT mention the trait names in the questions.
|
| 26 |
+
7. Do NOT provide answers, explanations, or scoring.
|
| 27 |
+
8. Output ONLY the questions in a numbered list from 1 to 10.
|
| 28 |
+
9. Each question must be suitable for a Likert-scale response (Strongly Agree → Strongly Disagree).
|
| 29 |
+
10. Do NOT include any extra text, headers, emojis, or commentary.
|
| 30 |
+
|
| 31 |
+
Your output must contain ONLY the 10 questions.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
chat_completion = client.chat.completions.create(
|
| 36 |
+
messages=[
|
| 37 |
+
|
| 38 |
+
{
|
| 39 |
+
"role": "system",
|
| 40 |
+
"content": prompt
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
],
|
| 44 |
+
|
| 45 |
+
# The language model which will generate the completion.
|
| 46 |
+
model="llama-3.3-70b-versatile"
|
| 47 |
+
)
|
| 48 |
+
return chat_completion.choices[0].message.content
|
| 49 |
+
|
show_personality.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from groq import Groq
|
| 2 |
+
from dotenv import load_dotenv
|
| 3 |
+
|
| 4 |
+
load_dotenv()
|
| 5 |
+
client = Groq()
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def show_my_personality(result:list,user_choice:list):
|
| 10 |
+
prompt="""You are a psychology-based personality assessor.
|
| 11 |
+
|
| 12 |
+
Your input consists of two things:
|
| 13 |
+
1. A list of 10 questions (`result_list`) presented to the user.
|
| 14 |
+
2. A list of exactly 3 user responses. Each user’s response is a list of 10 values corresponding to the questions in order:
|
| 15 |
+
- 1 → Agree
|
| 16 |
+
- 0 → Neutral
|
| 17 |
+
- -1 → Disagree
|
| 18 |
+
|
| 19 |
+
Your task:
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
1. Based on the content of each question and the user’s response, **decide the user’s dominant personality trait** from the Big Five:
|
| 23 |
+
- Openness
|
| 24 |
+
- Conscientiousness
|
| 25 |
+
- Extraversion
|
| 26 |
+
- Agreeableness
|
| 27 |
+
- Neuroticism
|
| 28 |
+
|
| 29 |
+
2. Reply exactly one word with.
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
Rules:
|
| 33 |
+
|
| 34 |
+
- Do NOT include scores, abbreviations, or extra commentary.
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
chat_completion = client.chat.completions.create(
|
| 41 |
+
messages=[
|
| 42 |
+
|
| 43 |
+
{
|
| 44 |
+
"role": "system",
|
| 45 |
+
"content": prompt
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"role":'user',
|
| 49 |
+
'content':f"Here is the Questions: {result}\n\n And here is the User response: {user_choice}"
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
],
|
| 53 |
+
|
| 54 |
+
# The language model which will generate the completion.
|
| 55 |
+
model="llama-3.3-70b-versatile"
|
| 56 |
+
)
|
| 57 |
+
return chat_completion.choices[0].message.content
|
| 58 |
+
|
| 59 |
+
|