Spaces:
Runtime error
Runtime error
Ilyas KHIAT
commited on
Commit
·
e08a6fb
1
Parent(s):
c06f463
first
Browse files- .dockerignore +11 -0
- .gitattributes copy +35 -0
- .gitignore +3 -0
- Dockerfile +13 -0
- README copy.md +10 -0
- chunks_ia_signature.pkl +3 -0
- config.yaml +13 -0
- kg_ia_signature.pkl +3 -0
- main.py +153 -0
- prompt.py +88 -0
- rag.py +306 -0
- requirements.txt +20 -0
- scenes.pkl +3 -0
.dockerignore
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
| 3 |
+
*.pyo
|
| 4 |
+
*.pyd
|
| 5 |
+
.Python
|
| 6 |
+
env/
|
| 7 |
+
venv/
|
| 8 |
+
.git
|
| 9 |
+
.dockerignore
|
| 10 |
+
Dockerfile
|
| 11 |
+
*.md
|
.gitattributes copy
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
.env
|
| 3 |
+
data/
|
Dockerfile
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.12
|
| 2 |
+
|
| 3 |
+
RUN useradd -m -u 1000 user
|
| 4 |
+
USER user
|
| 5 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
| 6 |
+
|
| 7 |
+
WORKDIR /app
|
| 8 |
+
|
| 9 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
| 10 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 11 |
+
|
| 12 |
+
COPY --chown=user . /app
|
| 13 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README copy.md
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Ia Back
|
| 3 |
+
emoji: 🐨
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: purple
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
chunks_ia_signature.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:240d159d8dedc430a3b6049a60f0805fa423cf9abece82b36c4fb650c8c5d437
|
| 3 |
+
size 145837
|
config.yaml
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nom_ouvrage: "confessions muettes"
|
| 2 |
+
auteur: "Gaspard Boreal"
|
| 3 |
+
resume: "Un recit futuristique qui se deroule en 2038, melangeant art et technologie, ou l'IA est omnipresente."
|
| 4 |
+
severite: 5
|
| 5 |
+
scenes_par_numero_de_page:
|
| 6 |
+
- 1
|
| 7 |
+
- 5
|
| 8 |
+
- 10
|
| 9 |
+
derniere_page: 15
|
| 10 |
+
scenes_choisies:
|
| 11 |
+
- 2
|
| 12 |
+
|
| 13 |
+
|
kg_ia_signature.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:55b49436038a45405798f6d05591464b1a35360409d83dbead163921707ac592
|
| 3 |
+
size 7354091
|
main.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, HTTPException, UploadFile, File,Request,Depends,status,BackgroundTasks
|
| 2 |
+
from fastapi.security import OAuth2PasswordBearer
|
| 3 |
+
from pydantic import BaseModel
|
| 4 |
+
from typing import Optional, List
|
| 5 |
+
from uuid import uuid4
|
| 6 |
+
import os
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
+
from rag import *
|
| 9 |
+
from fastapi.responses import StreamingResponse
|
| 10 |
+
import json
|
| 11 |
+
from prompt import *
|
| 12 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 13 |
+
import requests
|
| 14 |
+
import pandas as pd
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
load_dotenv()
|
| 19 |
+
|
| 20 |
+
## setup authorization
|
| 21 |
+
api_keys = [os.environ.get("FASTAPI_API_KEY")]
|
| 22 |
+
|
| 23 |
+
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token") # use token authentication
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def api_key_auth(api_key: str = Depends(oauth2_scheme)):
|
| 27 |
+
if api_key not in api_keys:
|
| 28 |
+
raise HTTPException(
|
| 29 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
| 30 |
+
detail="Forbidden"
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
dev_mode = os.environ.get("DEV")
|
| 34 |
+
|
| 35 |
+
if dev_mode == "True":
|
| 36 |
+
app = FastAPI()
|
| 37 |
+
else:
|
| 38 |
+
app = FastAPI(dependencies=[Depends(api_key_auth)])
|
| 39 |
+
|
| 40 |
+
app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"])
|
| 41 |
+
|
| 42 |
+
# Pydantic model for the form data
|
| 43 |
+
class verify_response_model(BaseModel):
|
| 44 |
+
response: str = Field(description="The response from the user to the question")
|
| 45 |
+
answers: list[str] = Field(description="The possible answers to the question to test if the user read the entire book")
|
| 46 |
+
question: str = Field(description="The question asked to the user to test if they read the entire book")
|
| 47 |
+
|
| 48 |
+
class UserInput(BaseModel):
|
| 49 |
+
query: str
|
| 50 |
+
stream: Optional[bool] = False
|
| 51 |
+
messages: Optional[list[dict]] = []
|
| 52 |
+
|
| 53 |
+
class Artwork(BaseModel):
|
| 54 |
+
name: str
|
| 55 |
+
artist: str
|
| 56 |
+
image_url: str
|
| 57 |
+
date: str
|
| 58 |
+
description: str
|
| 59 |
+
|
| 60 |
+
class WhatifInput(BaseModel):
|
| 61 |
+
question: str
|
| 62 |
+
answer: str
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# Global variable to store the data
|
| 66 |
+
artworks_data = []
|
| 67 |
+
|
| 68 |
+
def load_data():
|
| 69 |
+
global artworks_data
|
| 70 |
+
|
| 71 |
+
# Provide the path to your local spreadsheet
|
| 72 |
+
spreadsheet_path = "./data/data.xlsx"
|
| 73 |
+
|
| 74 |
+
# Read the spreadsheet into a DataFrame
|
| 75 |
+
df = pd.read_excel(spreadsheet_path, sheet_name='Sheet1') # Adjust sheet_name as needed
|
| 76 |
+
|
| 77 |
+
df = df.fillna(False)
|
| 78 |
+
# Convert DataFrame to a list of dictionaries
|
| 79 |
+
df_filtered = df[df['Publication'] == True]
|
| 80 |
+
|
| 81 |
+
artworks_data = df_filtered.to_dict(orient='records')
|
| 82 |
+
|
| 83 |
+
print("Data loaded successfully")
|
| 84 |
+
|
| 85 |
+
load_data()
|
| 86 |
+
|
| 87 |
+
#endpoinds
|
| 88 |
+
@app.get("/artworks/{artist_name}")
|
| 89 |
+
async def get_artworks_by_artist(artist_name: str):
|
| 90 |
+
artist_name_lower = artist_name.lower()
|
| 91 |
+
results = []
|
| 92 |
+
|
| 93 |
+
for artwork in artworks_data:
|
| 94 |
+
if artist_name_lower in artwork['Artiste'].lower():
|
| 95 |
+
result = {
|
| 96 |
+
'name':artwork['Titre français'],
|
| 97 |
+
'artist':artwork['Artiste'],
|
| 98 |
+
'image_url':artwork['Image_URL'],
|
| 99 |
+
'date':str(artwork['Date']), # Ensure date is a string
|
| 100 |
+
'description':artwork['Media']
|
| 101 |
+
}
|
| 102 |
+
results.append(result)
|
| 103 |
+
|
| 104 |
+
if not results:
|
| 105 |
+
raise HTTPException(status_code=404, detail="Artist not found")
|
| 106 |
+
|
| 107 |
+
return results
|
| 108 |
+
|
| 109 |
+
@app.post("/generate_sphinx")
|
| 110 |
+
async def generate_sphinx():
|
| 111 |
+
try:
|
| 112 |
+
sphinx : sphinx_output = generate_sphinx_response()
|
| 113 |
+
return {"question": sphinx.question, "answers": sphinx.answers}
|
| 114 |
+
except Exception as e:
|
| 115 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 116 |
+
|
| 117 |
+
@app.post("/verify_sphinx")
|
| 118 |
+
async def verify_sphinx(response: verify_response_model):
|
| 119 |
+
try:
|
| 120 |
+
score : bool = verify_response(response.response, response.answers, response.question)
|
| 121 |
+
return {"score": score}
|
| 122 |
+
except Exception as e:
|
| 123 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 124 |
+
|
| 125 |
+
@app.post("/generate")
|
| 126 |
+
async def generate(user_input: UserInput):
|
| 127 |
+
try:
|
| 128 |
+
print(user_input.stream,user_input.query)
|
| 129 |
+
if user_input.stream:
|
| 130 |
+
return StreamingResponse(generate_stream(user_input.query,user_input.messages,stream=True),media_type="application/json")
|
| 131 |
+
else:
|
| 132 |
+
return generate_stream(user_input.query,user_input.messages,stream=False)
|
| 133 |
+
except Exception as e:
|
| 134 |
+
return {"message": str(e)}
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
@app.post("/whatif")
|
| 138 |
+
async def whatif(whatif_input: WhatifInput):
|
| 139 |
+
try:
|
| 140 |
+
print(whatif_input.question)
|
| 141 |
+
return generate_whatif_stream(question=whatif_input.question,response=whatif_input.answer)
|
| 142 |
+
except Exception as e:
|
| 143 |
+
return {"message": str(e)}
|
| 144 |
+
|
| 145 |
+
@app.post("/whatif_chat")
|
| 146 |
+
async def generate_whatif_chat(user_input: UserInput):
|
| 147 |
+
try:
|
| 148 |
+
if user_input.stream:
|
| 149 |
+
return StreamingResponse(generate_stream_whatif_chat(user_input.query,user_input.messages,stream=True),media_type="application/json")
|
| 150 |
+
else:
|
| 151 |
+
return generate_stream_whatif_chat(user_input.query,user_input.messages,stream=False)
|
| 152 |
+
except Exception as e:
|
| 153 |
+
return {"message": str(e)}
|
prompt.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
template_sphinx = '''
|
| 2 |
+
Voici un résumé et un bout du récit de {book_name}. Vous êtes le Grand Sphinx, maître des énigmes et des questions.
|
| 3 |
+
Vous devez tester si quelqu'un a lu le récit en lui posant une question sur l'Extrait ci-dessous pour lui ouvrir la porte vers la réalité de ce récit.
|
| 4 |
+
Votre question doit être en français, et vous devez l'associer à la ou les **bonnes** réponses possibles. Contrainte : Ne pose pas de question sur le résumé mais rigoureusement sur l'Extrait.
|
| 5 |
+
|
| 6 |
+
**résumé**:
|
| 7 |
+
{summary}
|
| 8 |
+
|
| 9 |
+
**Extrait**:
|
| 10 |
+
{excerpt}
|
| 11 |
+
|
| 12 |
+
**Instructions**:
|
| 13 |
+
- S'il y'a une seul bonne reponse possible , renvoit une seul reponse possible.
|
| 14 |
+
- Les autres bonne reponse possibles doivent etre correcte pour la question.
|
| 15 |
+
|
| 16 |
+
**Sortie**:
|
| 17 |
+
La sortie doit être une question en français, qui teste la compréhension du récit. Vous devez fournir la ou les **bonnes** réponses possibles et **correctes** à cette question.
|
| 18 |
+
|
| 19 |
+
**
|
| 20 |
+
'''
|
| 21 |
+
|
| 22 |
+
template_verify = '''
|
| 23 |
+
Vous êtes un expert en correction et comparaison de réponses. Retournez une note sur 10 sur la cohérence de la réponse de l'utilisateur avec la réponse correcte. Voici les détails :
|
| 24 |
+
|
| 25 |
+
Question : {initial_question}
|
| 26 |
+
|
| 27 |
+
Réponses correctes : {answers}
|
| 28 |
+
|
| 29 |
+
Réponse de l'utilisateur : {response}
|
| 30 |
+
|
| 31 |
+
Évaluez la réponse de l'utilisateur et attribuez une note sur 10 en fonction de sa cohérence avec la réponse correcte.
|
| 32 |
+
|
| 33 |
+
'''
|
| 34 |
+
|
| 35 |
+
template = '''
|
| 36 |
+
Vous êtes un assistant IA très intelligent qui connaît tout sur le livre {name_book} de {writer}.
|
| 37 |
+
Vous allez répondre à la question de l'utilisateur, qui portera sur ce livre. Vos réponses seront claires et courtes.
|
| 38 |
+
|
| 39 |
+
Résumé du livre : {summary}
|
| 40 |
+
|
| 41 |
+
**Contexte récupéré (si pertinent pour votre réponse) :** {context}
|
| 42 |
+
|
| 43 |
+
**Question de l'utilisateur :** {query}
|
| 44 |
+
|
| 45 |
+
**Sortie attendue :** Votre réponse doit être bien formatée, plaisante à lire et inclure des émojis.
|
| 46 |
+
'''
|
| 47 |
+
|
| 48 |
+
template_whatif = '''
|
| 49 |
+
Prends le temps de bien lire la question posée par IA SIGNATURE au lecteur et la réponse faite par le lecteur :
|
| 50 |
+
* Question posée par IA SIGNATURE -> {question}
|
| 51 |
+
* Bonne réponse du lecteur -> {response}
|
| 52 |
+
|
| 53 |
+
Contexte :
|
| 54 |
+
{context}
|
| 55 |
+
|
| 56 |
+
**Instructions** :
|
| 57 |
+
Relis le passage correspondant dans le récit et propose cinq autres choix qu'auraient pu faire l'auteur et ayant un impact sur la suite du récit. Numérote les résultats de 1 à 5 en prenant en compte :
|
| 58 |
+
Choix 1 : Choix ayant un impact déterminant avec une suite totalement différente pour le récit
|
| 59 |
+
Choix 2 : Choix entraînant une suite du récit beaucoup plus sombre
|
| 60 |
+
Choix 3 : Choix entraînant une suite du récit beaucoup plus ouverte, gai et inspirante
|
| 61 |
+
Choix 4 : A toi de jouer
|
| 62 |
+
Choix 5 : Conserve la réponse actuelle
|
| 63 |
+
|
| 64 |
+
**CONTRAINTE** :
|
| 65 |
+
Présente la réponse de la façon suivante :
|
| 66 |
+
|
| 67 |
+
-> "Voici 5 suites différentes possibles à ce moment du récit :"
|
| 68 |
+
-> Affiche les 5 réponses sous forme de 5 bullets points en sautant une ligne entre chaque choix pour aérer le texte
|
| 69 |
+
-> Demande en fin de réponse :"QUELLE SUITE VEUX TU DONNER AU RÉCIT ? (1 ou 2 ou 3 ou 4 ou 5) ?"
|
| 70 |
+
|
| 71 |
+
'''
|
| 72 |
+
|
| 73 |
+
template_whatif_response = '''
|
| 74 |
+
Vous êtes un assistant IA très intelligent qui connaît tout sur le livre {name_book} de {writer}.
|
| 75 |
+
Vous allez répondre à la question de l'utilisateur, qui portera sur ce livre. Vos réponses seront claires et courtes.
|
| 76 |
+
|
| 77 |
+
Résumé du livre : {summary}
|
| 78 |
+
|
| 79 |
+
**Historique des messages :** {messages}
|
| 80 |
+
|
| 81 |
+
**l'utilisateur :** {query}
|
| 82 |
+
|
| 83 |
+
**Extrait récupéré (si pertinent pour votre réponse) :** {context}
|
| 84 |
+
|
| 85 |
+
**Sortie attendue :** Votre réponse doit être bien formatée, plaisante à lire et inclure des émojis.
|
| 86 |
+
'''
|
| 87 |
+
|
| 88 |
+
|
rag.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_openai import OpenAIEmbeddings
|
| 2 |
+
from langchain_community.vectorstores import FAISS
|
| 3 |
+
from langchain_core.documents import Document
|
| 4 |
+
from langchain.document_loaders import PyPDFLoader
|
| 5 |
+
|
| 6 |
+
from langchain_openai import ChatOpenAI
|
| 7 |
+
from langchain_core.output_parsers import StrOutputParser
|
| 8 |
+
from langchain_core.prompts import PromptTemplate
|
| 9 |
+
from uuid import uuid4
|
| 10 |
+
from prompt import *
|
| 11 |
+
import random
|
| 12 |
+
from itext2kg.models import KnowledgeGraph
|
| 13 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 14 |
+
|
| 15 |
+
from langchain_experimental.text_splitter import SemanticChunker
|
| 16 |
+
|
| 17 |
+
import faiss
|
| 18 |
+
from langchain_community.docstore.in_memory import InMemoryDocstore
|
| 19 |
+
|
| 20 |
+
from pydantic import BaseModel, Field
|
| 21 |
+
from dotenv import load_dotenv
|
| 22 |
+
import os
|
| 23 |
+
|
| 24 |
+
from langchain_core.tools import tool
|
| 25 |
+
import pickle
|
| 26 |
+
|
| 27 |
+
import unicodedata
|
| 28 |
+
import yaml
|
| 29 |
+
|
| 30 |
+
load_dotenv()
|
| 31 |
+
index_name = os.environ.get("INDEX_NAME")
|
| 32 |
+
# Global initialization
|
| 33 |
+
embedding_model = "text-embedding-3-small"
|
| 34 |
+
|
| 35 |
+
embedding = OpenAIEmbeddings(model=embedding_model)
|
| 36 |
+
# vector_store = PineconeVectorStore(index=index_name, embedding=embedding)
|
| 37 |
+
|
| 38 |
+
def advanced_graph_to_json(graph:KnowledgeGraph):
|
| 39 |
+
nodes = []
|
| 40 |
+
edges = []
|
| 41 |
+
for node in graph.entities:
|
| 42 |
+
node_id = node.name.replace(" ", "_")
|
| 43 |
+
label = node.name
|
| 44 |
+
type = node.label
|
| 45 |
+
nodes.append({"id": node_id, "label": label, "type": type})
|
| 46 |
+
for relationship in graph.relationships:
|
| 47 |
+
source = relationship.startEntity
|
| 48 |
+
source_id = source.name.replace(" ", "_")
|
| 49 |
+
target = relationship.endEntity
|
| 50 |
+
target_id = target.name.replace(" ", "_")
|
| 51 |
+
label = relationship.name
|
| 52 |
+
edges.append({"source": source_id, "label": label, "cible": target_id})
|
| 53 |
+
return {"noeuds": nodes, "relations": edges}
|
| 54 |
+
|
| 55 |
+
def load_document(name):
|
| 56 |
+
try:
|
| 57 |
+
loader = PyPDFLoader(f"./data/{name}.pdf")
|
| 58 |
+
pages = loader.load()
|
| 59 |
+
return pages
|
| 60 |
+
except Exception as e:
|
| 61 |
+
print(e)
|
| 62 |
+
print("Make sure the name indicated in the config.yaml file is the same as the pdf file in the data folder")
|
| 63 |
+
return False
|
| 64 |
+
|
| 65 |
+
def chunk_by_scene(scenes_indexes,pages,last_page=None):
|
| 66 |
+
try:
|
| 67 |
+
scenes = []
|
| 68 |
+
|
| 69 |
+
for i in range(len(scenes_indexes)-1):
|
| 70 |
+
print("scene sclice",scenes_indexes[i]-1,scenes_indexes[i+1]-1)
|
| 71 |
+
current_scene = "Scene "+ str(i+1)
|
| 72 |
+
print("current_scene",current_scene)
|
| 73 |
+
scene_content = "".join([page.page_content.replace("\n","").replace(" "," ").replace("\t"," ") for page in pages[scenes_indexes[i]-1:scenes_indexes[i+1]-1]])
|
| 74 |
+
scenes.append(scene_content)
|
| 75 |
+
|
| 76 |
+
if last_page == None:
|
| 77 |
+
last_page = len(pages)
|
| 78 |
+
print("scene sclice",scenes_indexes[-1]-1,last_page)
|
| 79 |
+
current_scene = "Scene "+str(len(scenes_indexes))
|
| 80 |
+
print("current_scene",current_scene)
|
| 81 |
+
scene_content = "".join([page.page_content.replace("\n","").replace(" "," ").replace("\t"," ") for page in pages[scenes_indexes[-1]-1:]])
|
| 82 |
+
scenes.append(scene_content)
|
| 83 |
+
|
| 84 |
+
return scenes
|
| 85 |
+
except Exception as e:
|
| 86 |
+
print(e)
|
| 87 |
+
return False
|
| 88 |
+
|
| 89 |
+
def chunk_semantic(pages):
|
| 90 |
+
text_filtered = "\n".join([page.page_content.replace("\n","").replace(" "," ").replace("\t"," ") for page in pages])
|
| 91 |
+
text_splitter = SemanticChunker(OpenAIEmbeddings(),breakpoint_threshold_type="standard_deviation",breakpoint_threshold_amount=2.718)
|
| 92 |
+
chunks_filtered = text_splitter.create_documents([text_filtered])
|
| 93 |
+
semantic_chunks = [chunk.page_content for chunk in chunks_filtered]
|
| 94 |
+
return semantic_chunks
|
| 95 |
+
|
| 96 |
+
def handle_config(config_path='config.yaml'):
|
| 97 |
+
try:
|
| 98 |
+
with open(config_path, "r", encoding="utf-8") as f:
|
| 99 |
+
config = yaml.safe_load(f)
|
| 100 |
+
|
| 101 |
+
name = config.get('nom_ouvrage', '')
|
| 102 |
+
scenes_indexes = config.get('scenes_par_numero_de_page', [])
|
| 103 |
+
writer = config.get('auteur', 'anonyme')
|
| 104 |
+
summary = config.get('resume', '')
|
| 105 |
+
severite = config.get('severite', 0) # 0: faible, 1: max
|
| 106 |
+
scene_specific = config.get('scenes_choisies', None) # None: tout le récit, [1,3,5]: scènes 1, 3 et 5
|
| 107 |
+
last_page = config.get('derniere_page', None )
|
| 108 |
+
|
| 109 |
+
## log config
|
| 110 |
+
print("########### Config loaded ###########")
|
| 111 |
+
print(f"Loading document {name}")
|
| 112 |
+
print(f"Writer: {writer}")
|
| 113 |
+
print(f"Scenes: {scenes_indexes}")
|
| 114 |
+
print(f"Summary: {summary}")
|
| 115 |
+
print(f"Severite: {severite}")
|
| 116 |
+
print(f"Scene specific: {scene_specific}")
|
| 117 |
+
print(f"Last page: {last_page}")
|
| 118 |
+
print("#####################################")
|
| 119 |
+
|
| 120 |
+
config = {
|
| 121 |
+
"name_book": name,
|
| 122 |
+
"scenes_indexes": scenes_indexes,
|
| 123 |
+
"writer": writer,
|
| 124 |
+
"summary": summary,
|
| 125 |
+
"severite": severite,
|
| 126 |
+
"scene_specific": scene_specific,
|
| 127 |
+
"last_page": last_page
|
| 128 |
+
}
|
| 129 |
+
return config
|
| 130 |
+
except Exception as e:
|
| 131 |
+
print(f"Error: {e}")
|
| 132 |
+
return False
|
| 133 |
+
|
| 134 |
+
config = handle_config()
|
| 135 |
+
|
| 136 |
+
name_book = config.get('name_book')
|
| 137 |
+
scenes_indexes = config.get('scenes_indexes')
|
| 138 |
+
writer = config.get('writer')
|
| 139 |
+
summary_text = config.get('summary')
|
| 140 |
+
severite = config.get('severite')
|
| 141 |
+
scene_specific = config.get('scene_specific')
|
| 142 |
+
last_page = config.get('last_page')
|
| 143 |
+
|
| 144 |
+
print("########### Loading document ###########")
|
| 145 |
+
pages = load_document(name_book)
|
| 146 |
+
print("########### Pages loaded ###########")
|
| 147 |
+
|
| 148 |
+
print("########### Loading scenes ###########")
|
| 149 |
+
scenes = chunk_by_scene(scenes_indexes,pages)
|
| 150 |
+
print("########### Scenes loaded ###########")
|
| 151 |
+
|
| 152 |
+
print("########### Loading chunks ###########")
|
| 153 |
+
chunks = chunk_semantic(pages)
|
| 154 |
+
print("########### Chunks loaded ###########")
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class sphinx_output(BaseModel):
|
| 158 |
+
question: str = Field(description="The question to ask the user to test if they read the entire book")
|
| 159 |
+
answers: list[str] = Field(description="The possible answers to the question to test if the user read the entire book")
|
| 160 |
+
|
| 161 |
+
class verify_response_model(BaseModel):
|
| 162 |
+
response: str = Field(description="The response from the user to the question")
|
| 163 |
+
answers: list[str] = Field(description="The possible answers to the question to test if the user read the entire book")
|
| 164 |
+
initial_question: str = Field(description="The question asked to the user to test if they read the entire book")
|
| 165 |
+
|
| 166 |
+
class verification_score(BaseModel):
|
| 167 |
+
score: float = Field(description="The score of the user's response from 0 to 10 to the question")
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
llm = ChatOpenAI(model="gpt-4o", max_tokens=1000, temperature=0.5)
|
| 171 |
+
|
| 172 |
+
def split_texts(text : str) -> list[str]:
|
| 173 |
+
splitter = RecursiveCharacterTextSplitter(
|
| 174 |
+
chunk_size=1000,
|
| 175 |
+
chunk_overlap=200,
|
| 176 |
+
length_function=len,
|
| 177 |
+
is_separator_regex=False,
|
| 178 |
+
)
|
| 179 |
+
return splitter.split_text(text)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
#########################################################################
|
| 183 |
+
### PAR ICI , CHOISIR UNE SCENE SPECIFIQUE DANS L'ARGUMENT DE LA FONCTION
|
| 184 |
+
def get_random_chunk(scene_specific = scene_specific) : # scene_specific = None signifie qu'on considère tout le récit / [1,3,5] pour avoir la 1 et la 3 et la 5 / [5] pour avoir que la 5
|
| 185 |
+
if scene_specific:
|
| 186 |
+
scene_specific_content = [scenes[i-1] for i in scene_specific]
|
| 187 |
+
scene_specific_content = " ".join(scene_specific_content)
|
| 188 |
+
chunks_scene = split_texts(scene_specific_content)
|
| 189 |
+
print(f"Scene {scene_specific} has {len(chunks_scene)} chunks")
|
| 190 |
+
print([chunk[0:50] for chunk in chunks_scene])
|
| 191 |
+
print('---')
|
| 192 |
+
chunk_chosen = chunks_scene[random.randint(0, len(chunks_scene) - 1)]
|
| 193 |
+
print(f"Chosen chunk: {chunk_chosen}")
|
| 194 |
+
return chunk_chosen, scene_specific
|
| 195 |
+
|
| 196 |
+
return chunks[random.randint(0, len(chunks) - 1)],scene_specific
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def get_vectorstore(chunks) -> FAISS:
|
| 200 |
+
index = faiss.IndexFlatL2(len(embedding.embed_query("hello world")))
|
| 201 |
+
vector_store = FAISS(
|
| 202 |
+
embedding_function=embedding,
|
| 203 |
+
index=index,
|
| 204 |
+
docstore=InMemoryDocstore(),
|
| 205 |
+
index_to_docstore_id={},
|
| 206 |
+
)
|
| 207 |
+
print("Adding documents to vector store")
|
| 208 |
+
print("Chunks",len(chunks))
|
| 209 |
+
documents = [Document(page_content=chunk) for chunk in chunks]
|
| 210 |
+
uuids = [str(uuid4()) for _ in range(len(documents))]
|
| 211 |
+
vector_store.add_documents(documents=documents, ids=uuids)
|
| 212 |
+
return vector_store
|
| 213 |
+
|
| 214 |
+
vectore_store = get_vectorstore(chunks)
|
| 215 |
+
scenes_vectore_store = get_vectorstore(scenes)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def generate_sphinx_response() -> sphinx_output:
|
| 219 |
+
summary = summary_text
|
| 220 |
+
excerpt , scene_number = get_random_chunk()
|
| 221 |
+
if scene_number:
|
| 222 |
+
summary = "scene " + str(scene_number)
|
| 223 |
+
prompt = PromptTemplate.from_template(template_sphinx)
|
| 224 |
+
structured_llm = llm.with_structured_output(sphinx_output)
|
| 225 |
+
# Create an LLM chain with the prompt and the LLM
|
| 226 |
+
llm_chain = prompt | structured_llm
|
| 227 |
+
|
| 228 |
+
return llm_chain.invoke({"writer":writer,"book_name":name_book,"summary":summary,"excerpt":excerpt})
|
| 229 |
+
|
| 230 |
+
#############################################################
|
| 231 |
+
### PAR ICI , CHOISIR LE DEGRE DE SEVERITE DE LA VERIFICATION
|
| 232 |
+
def verify_response(response:str,answers:list[str],question:str) -> bool:
|
| 233 |
+
prompt = PromptTemplate.from_template(template_verify)
|
| 234 |
+
structured_llm = llm.with_structured_output(verification_score)
|
| 235 |
+
llm_chain = prompt | structured_llm
|
| 236 |
+
score = llm_chain.invoke({"response":response,"answers":answers,"initial_question":question})
|
| 237 |
+
if score.score >= severite:
|
| 238 |
+
return True
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
def retrieve_context_from_vectorestore(query:str) -> str:
|
| 242 |
+
retriever = vectore_store.as_retriever(search_type="mmr", search_kwargs={"k": 3})
|
| 243 |
+
return retriever.invoke(query)
|
| 244 |
+
|
| 245 |
+
def retrieve_context_from_scenes(query:str) -> str:
|
| 246 |
+
retriever = scenes_vectore_store.as_retriever(search_kwargs={"k": 1})
|
| 247 |
+
return retriever.invoke(query)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def generate_stream(query:str,messages = [], model = "gpt-4o-mini", max_tokens = 300, temperature = 1,index_name="",stream=True,vector_store=None):
|
| 251 |
+
try:
|
| 252 |
+
print("init chat")
|
| 253 |
+
print("init template")
|
| 254 |
+
prompt = PromptTemplate.from_template(template)
|
| 255 |
+
summary = summary_text
|
| 256 |
+
|
| 257 |
+
print("retreiving context")
|
| 258 |
+
context = retrieve_context_from_vectorestore(query)
|
| 259 |
+
print(f"Context: {context}")
|
| 260 |
+
llm_chain = prompt | llm | StrOutputParser()
|
| 261 |
+
|
| 262 |
+
print("streaming")
|
| 263 |
+
if stream:
|
| 264 |
+
return llm_chain.stream({"name_book":name_book,"writer":writer,"context":context,"query":query,"summary":summary})
|
| 265 |
+
else:
|
| 266 |
+
return llm_chain.invoke({"name_book":name_book,"writer":writer,"context":context,"query":query,"summary":summary})
|
| 267 |
+
|
| 268 |
+
except Exception as e:
|
| 269 |
+
print(e)
|
| 270 |
+
return False
|
| 271 |
+
|
| 272 |
+
def generate_whatif_stream(question:str,response:str, stream:bool = False) -> str:
|
| 273 |
+
try:
|
| 274 |
+
prompt = PromptTemplate.from_template(template_whatif)
|
| 275 |
+
llm_chain = prompt | llm | StrOutputParser()
|
| 276 |
+
print("Enter whatif")
|
| 277 |
+
context = retrieve_context_from_scenes(f"question: {question} . reponse : {response}")
|
| 278 |
+
print(f"Context: {context}")
|
| 279 |
+
|
| 280 |
+
if stream:
|
| 281 |
+
return llm_chain.stream({"question":question,"response":response,"context":context})
|
| 282 |
+
else:
|
| 283 |
+
return llm_chain.invoke({"question":question,"response":response,"context":context})
|
| 284 |
+
except Exception as e:
|
| 285 |
+
print(e)
|
| 286 |
+
return False
|
| 287 |
+
|
| 288 |
+
def generate_stream_whatif_chat(query:str,messages = [], model = "gpt-4o-mini", max_tokens = 500, temperature = 1,index_name="",stream=True,vector_store=None):
|
| 289 |
+
try:
|
| 290 |
+
print("init chat")
|
| 291 |
+
print("init template")
|
| 292 |
+
prompt = PromptTemplate.from_template(template_whatif_response)
|
| 293 |
+
print("retreiving context")
|
| 294 |
+
context = retrieve_context_from_vectorestore(query)
|
| 295 |
+
print(f"Context: {context}")
|
| 296 |
+
llm_chain = prompt | llm | StrOutputParser()
|
| 297 |
+
|
| 298 |
+
print("streaming")
|
| 299 |
+
if stream:
|
| 300 |
+
return llm_chain.stream({"name_book":name_book,"writer":writer,"messages":messages,"context":context,"query":query,"summary":summary_text})
|
| 301 |
+
else:
|
| 302 |
+
return llm_chain.invoke({"name_book":name_book,"writer":writer,"messages":messages,"context":context,"query":query,"summary":summary_text})
|
| 303 |
+
|
| 304 |
+
except Exception as e:
|
| 305 |
+
print(e)
|
| 306 |
+
return False
|
requirements.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn
|
| 3 |
+
python-multipart
|
| 4 |
+
async-timeout
|
| 5 |
+
pymupdf
|
| 6 |
+
python-dotenv
|
| 7 |
+
typing-extensions
|
| 8 |
+
pydantic==2.9.2
|
| 9 |
+
openai==1.45.0
|
| 10 |
+
langchain==0.3.0
|
| 11 |
+
langchain-core==0.3.0
|
| 12 |
+
langchain-openai==0.2.0
|
| 13 |
+
faiss-cpu
|
| 14 |
+
neo4j==5.24.0
|
| 15 |
+
itext2kg==0.0.7
|
| 16 |
+
langchain-community
|
| 17 |
+
six
|
| 18 |
+
httpx==0.27.2
|
| 19 |
+
|
| 20 |
+
pandas
|
scenes.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:17fc4636b752c5b8f1434d0c97c95ea3b12605b083689e6d79daacd060f6c110
|
| 3 |
+
size 142917
|