Commit
·
ecf3087
1
Parent(s):
3ab87df
fix: add cache folder for transformers
Browse files- app/api/endpoints/analysis.py +1 -4
- app/core/config.py +4 -0
- app/services/sentiment_service.py +6 -4
app/api/endpoints/analysis.py
CHANGED
|
@@ -2,18 +2,15 @@ from typing import Any, List, Dict
|
|
| 2 |
import uuid
|
| 3 |
from datetime import datetime
|
| 4 |
from fastapi import APIRouter, HTTPException, status, Request
|
|
|
|
| 5 |
|
| 6 |
import asyncio
|
| 7 |
from motor.motor_asyncio import AsyncIOMotorClient
|
| 8 |
from bson import ObjectId
|
| 9 |
|
| 10 |
-
import pandas as pd
|
| 11 |
-
from trendspy import Trends
|
| 12 |
-
|
| 13 |
from app.core.config import settings
|
| 14 |
from app.core.clients import qstash_client
|
| 15 |
from app.schemas.analysis_schema import (
|
| 16 |
-
WeeklyTrendResponseSchema,
|
| 17 |
WeeklyTrendListResponse,
|
| 18 |
TrendDetailResponseSchema,
|
| 19 |
OnDemandRequestSchema,
|
|
|
|
| 2 |
import uuid
|
| 3 |
from datetime import datetime
|
| 4 |
from fastapi import APIRouter, HTTPException, status, Request
|
| 5 |
+
from trendspy import Trends
|
| 6 |
|
| 7 |
import asyncio
|
| 8 |
from motor.motor_asyncio import AsyncIOMotorClient
|
| 9 |
from bson import ObjectId
|
| 10 |
|
|
|
|
|
|
|
|
|
|
| 11 |
from app.core.config import settings
|
| 12 |
from app.core.clients import qstash_client
|
| 13 |
from app.schemas.analysis_schema import (
|
|
|
|
| 14 |
WeeklyTrendListResponse,
|
| 15 |
TrendDetailResponseSchema,
|
| 16 |
OnDemandRequestSchema,
|
app/core/config.py
CHANGED
|
@@ -34,6 +34,10 @@ class Settings(BaseSettings):
|
|
| 34 |
CONSUMER_BATCH_SIZE: int = 32
|
| 35 |
CONSUMER_BATCH_TIMEOUT_SECONDS: int = 5
|
| 36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
# FastAPI
|
| 38 |
API_PREFIX: str = "/api"
|
| 39 |
API_VERSION: str = "/v1"
|
|
|
|
| 34 |
CONSUMER_BATCH_SIZE: int = 32
|
| 35 |
CONSUMER_BATCH_TIMEOUT_SECONDS: int = 5
|
| 36 |
|
| 37 |
+
# Cache folder
|
| 38 |
+
HF_HOME: str = "/data"
|
| 39 |
+
TRANSFORMERS_CACHE: str = "/data/transformers"
|
| 40 |
+
|
| 41 |
# FastAPI
|
| 42 |
API_PREFIX: str = "/api"
|
| 43 |
API_VERSION: str = "/v1"
|
app/services/sentiment_service.py
CHANGED
|
@@ -27,11 +27,13 @@ class SentimentService:
|
|
| 27 |
|
| 28 |
# Load model, tokenizer, and config (for id2label mapping)
|
| 29 |
model_name = settings.SENTIMENT_MODEL
|
| 30 |
-
self.tokenizer = AutoTokenizer.from_pretrained(
|
| 31 |
-
|
| 32 |
-
self.model = AutoModelForSequenceClassification.from_pretrained(model_name).to(
|
| 33 |
-
self.device
|
| 34 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
self.model.eval() # set model to inference mode
|
| 36 |
print("Sentiment model loaded successfully.")
|
| 37 |
|
|
|
|
| 27 |
|
| 28 |
# Load model, tokenizer, and config (for id2label mapping)
|
| 29 |
model_name = settings.SENTIMENT_MODEL
|
| 30 |
+
self.tokenizer = AutoTokenizer.from_pretrained(
|
| 31 |
+
model_name, cache_dir=settings.TRANSFORMERS_CACHE
|
|
|
|
|
|
|
| 32 |
)
|
| 33 |
+
self.config = AutoConfig.from_pretrained(model_name)
|
| 34 |
+
self.model = AutoModelForSequenceClassification.from_pretrained(
|
| 35 |
+
model_name, cache_dir=settings.TRANSFORMERS_CACHE
|
| 36 |
+
).to(self.device)
|
| 37 |
self.model.eval() # set model to inference mode
|
| 38 |
print("Sentiment model loaded successfully.")
|
| 39 |
|