win2win commited on
Commit
2ceb040
·
verified ·
1 Parent(s): 6db8e32

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -8
app.py CHANGED
@@ -3,6 +3,11 @@ from transformers import pipeline
3
  import torch
4
  from pydantic import BaseModel
5
  import os
 
 
 
 
 
6
 
7
  app = FastAPI()
8
 
@@ -11,17 +16,23 @@ torch.backends.cuda.enable_flash_sdp(False)
11
  torch.backends.cuda.enable_mem_efficient_sdp(False)
12
 
13
 
14
- # Set cache directory to a writable location
15
- os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface_cache"
16
  os.makedirs(os.environ["TRANSFORMERS_CACHE"], exist_ok=True)
17
 
18
 
19
- # Load model
20
- model = pipeline(
21
- "text-classification",
22
- model="win2win/3-epochs-classifier-ver2",
23
- device="cuda" if torch.cuda.is_available() else "cpu"
24
- )
 
 
 
 
 
 
25
 
26
  class Request(BaseModel):
27
  text: str
 
3
  import torch
4
  from pydantic import BaseModel
5
  import os
6
+ import numpy as np # Explicit numpy import
7
+
8
+ # Fix numpy initialization
9
+ np.zeros(1) # Force numpy load before model
10
+
11
 
12
  app = FastAPI()
13
 
 
16
  torch.backends.cuda.enable_mem_efficient_sdp(False)
17
 
18
 
19
+ # Configure paths and device
20
+ os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface"
21
  os.makedirs(os.environ["TRANSFORMERS_CACHE"], exist_ok=True)
22
 
23
 
24
+ # Load model with error handling
25
+ try:
26
+ model = pipeline(
27
+ "text-classification",
28
+ model="win2win/3-epochs-classifier-ver2",
29
+ device="cpu", # Force CPU for free tier
30
+ torch_dtype=torch.float32 # Avoid mixed precision
31
+ )
32
+ print("Model loaded successfully!")
33
+ except Exception as e:
34
+ print(f"Model loading failed: {str(e)}")
35
+ raise
36
 
37
  class Request(BaseModel):
38
  text: str