Spaces:
Sleeping
Sleeping
DEBUG: punctuation + login_with_token
Browse files- app.py +9 -1
- functions.py +3 -0
app.py
CHANGED
|
@@ -145,6 +145,13 @@ async def sign_in(email, password):
|
|
| 145 |
async def login_with_token(access_token: str, refresh_token: str):
|
| 146 |
try:
|
| 147 |
decoded_token = jwt.decode(access_token, options={"verify_signature": False})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
|
| 149 |
json = {
|
| 150 |
"code": status.HTTP_200_OK,
|
|
@@ -153,7 +160,8 @@ async def login_with_token(access_token: str, refresh_token: str):
|
|
| 153 |
"access_token": access_token,
|
| 154 |
"refresh_token": refresh_token,
|
| 155 |
"issued_at": decoded_token.get("iat"),
|
| 156 |
-
"expires_at": decoded_token.get("exp")
|
|
|
|
| 157 |
|
| 158 |
}
|
| 159 |
return json
|
|
|
|
| 145 |
async def login_with_token(access_token: str, refresh_token: str):
|
| 146 |
try:
|
| 147 |
decoded_token = jwt.decode(access_token, options={"verify_signature": False})
|
| 148 |
+
user_id_oauth = decoded_token.get("sub")
|
| 149 |
+
try:
|
| 150 |
+
user_id = supabase.table("ConversAI_UserInfo").select("*").filter("user_id", "eq", user_id_oauth).execute()
|
| 151 |
+
user_name = user_id.data[0]["username"]
|
| 152 |
+
|
| 153 |
+
except:
|
| 154 |
+
user_name = 'null'
|
| 155 |
|
| 156 |
json = {
|
| 157 |
"code": status.HTTP_200_OK,
|
|
|
|
| 160 |
"access_token": access_token,
|
| 161 |
"refresh_token": refresh_token,
|
| 162 |
"issued_at": decoded_token.get("iat"),
|
| 163 |
+
"expires_at": decoded_token.get("exp"),
|
| 164 |
+
"username": user_name
|
| 165 |
|
| 166 |
}
|
| 167 |
return json
|
functions.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import pymupdf
|
|
|
|
| 2 |
from concurrent.futures import ThreadPoolExecutor
|
| 3 |
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
|
| 4 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
|
@@ -118,6 +119,8 @@ def addDocuments(text: str, source: str, vectorstore: str):
|
|
| 118 |
chunk_overlap=250,
|
| 119 |
add_start_index=True
|
| 120 |
)
|
|
|
|
|
|
|
| 121 |
texts = [Document(page_content=text, metadata={"source": source})]
|
| 122 |
texts = splitter.split_documents(texts)
|
| 123 |
ids = [str(uuid4()) for _ in range(len(texts))]
|
|
|
|
| 1 |
import pymupdf
|
| 2 |
+
import string
|
| 3 |
from concurrent.futures import ThreadPoolExecutor
|
| 4 |
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
|
| 5 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
|
|
|
| 119 |
chunk_overlap=250,
|
| 120 |
add_start_index=True
|
| 121 |
)
|
| 122 |
+
text = text.replace("\n", " ")
|
| 123 |
+
text = text.translate(str.maketrans('', '', string.punctuation.replace(".", "")))
|
| 124 |
texts = [Document(page_content=text, metadata={"source": source})]
|
| 125 |
texts = splitter.split_documents(texts)
|
| 126 |
ids = [str(uuid4()) for _ in range(len(texts))]
|