hamxaameer commited on
Commit
a2669ab
·
verified ·
1 Parent(s): ebf0eaf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -26
app.py CHANGED
@@ -4,7 +4,9 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import os
5
 
6
  # Model configuration
7
- MODEL_NAME = "model.safetensors" # Replace with your actual HF model repo
 
 
8
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
9
 
10
  # Global variables for model caching
@@ -22,34 +24,48 @@ def load_model():
22
  print(f"Loading model from: {MODEL_NAME}")
23
  print(f"Using device: {DEVICE}")
24
 
25
- # Load tokenizer
26
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
27
-
28
- # Set pad token if not set
29
- if tokenizer.pad_token is None:
30
- tokenizer.pad_token = tokenizer.eos_token
31
-
32
- # Load model with appropriate settings
33
- model = AutoModelForCausalLM.from_pretrained(
34
- MODEL_NAME,
35
- torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32,
36
- device_map="auto" if DEVICE == "cuda" else None,
37
- trust_remote_code=True
38
- )
39
-
40
- if DEVICE == "cpu":
41
- model = model.to(DEVICE)
42
-
43
- print("✅ Model and tokenizer loaded successfully!")
44
-
45
- # Cache the loaded model and tokenizer
46
- _model = model
47
- _tokenizer = tokenizer
 
 
 
48
 
49
- return model, tokenizer
 
 
 
 
 
 
50
 
51
  # Initialize model and tokenizer
52
- model, tokenizer = load_model()
 
 
 
 
 
53
 
54
  def generate_code(pseudocode, indent=1, line=1, temperature=0.7, top_p=0.9, max_length=128):
55
  """
@@ -67,6 +83,20 @@ def generate_code(pseudocode, indent=1, line=1, temperature=0.7, top_p=0.9, max_
67
  Generated code string
68
  """
69
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  # Validate inputs
71
  if not pseudocode.strip():
72
  return "❌ Error: Please enter some pseudocode."
 
4
  import os
5
 
6
  # Model configuration
7
+ # Loading from current directory since model files are uploaded to Space root
8
+ MODEL_NAME = "." # Current directory contains model.safetensors and tokenizer files
9
+
10
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
  # Global variables for model caching
 
24
  print(f"Loading model from: {MODEL_NAME}")
25
  print(f"Using device: {DEVICE}")
26
 
27
+ try:
28
+ # Load tokenizer
29
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
30
+
31
+ # Set pad token if not set
32
+ if tokenizer.pad_token is None:
33
+ tokenizer.pad_token = tokenizer.eos_token
34
+
35
+ # Load model with appropriate settings
36
+ model = AutoModelForCausalLM.from_pretrained(
37
+ MODEL_NAME,
38
+ torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32,
39
+ device_map="auto" if DEVICE == "cuda" else None,
40
+ trust_remote_code=True
41
+ )
42
+
43
+ if DEVICE == "cpu":
44
+ model = model.to(DEVICE)
45
+
46
+ print("✅ Model and tokenizer loaded successfully!")
47
+
48
+ # Cache the loaded model and tokenizer
49
+ _model = model
50
+ _tokenizer = tokenizer
51
+
52
+ return model, tokenizer
53
 
54
+ except Exception as e:
55
+ print(f"❌ Error loading model: {e}")
56
+ print("\n🔧 Troubleshooting:")
57
+ print("1. If using HF repository: Make sure MODEL_NAME is 'username/model-name'")
58
+ print("2. If using local files: Make sure model files are in the correct folder")
59
+ print("3. For private repos: Add authentication token")
60
+ raise e
61
 
62
  # Initialize model and tokenizer
63
+ try:
64
+ model, tokenizer = load_model()
65
+ except Exception as e:
66
+ print(f"Failed to load model: {e}")
67
+ # Create dummy objects to prevent further errors
68
+ model, tokenizer = None, None
69
 
70
  def generate_code(pseudocode, indent=1, line=1, temperature=0.7, top_p=0.9, max_length=128):
71
  """
 
83
  Generated code string
84
  """
85
  try:
86
+ # Check if model is loaded
87
+ if model is None or tokenizer is None:
88
+ return """❌ Model not loaded. Please check:
89
+
90
+ 1. MODEL_NAME in app.py - should be either:
91
+ - Your HF repository: "username/model-name"
92
+ - Local path: "./model" (if files uploaded to Space)
93
+
94
+ 2. If using HF repository, make sure it exists and is public
95
+
96
+ 3. If using local files, ensure model files are in correct folder
97
+
98
+ Current MODEL_NAME: """ + MODEL_NAME
99
+
100
  # Validate inputs
101
  if not pseudocode.strip():
102
  return "❌ Error: Please enter some pseudocode."