shaheerawan3 commited on
Commit
4430bc2
·
verified ·
1 Parent(s): e30b15f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -52,14 +52,15 @@ def load_model_in_background():
52
  global MODEL, TOKENIZER, PIPE, MODEL_LOADING, MODEL_LOADED
53
  try:
54
  MODEL_LOADING = True
 
55
 
56
  # Model identifier
57
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
58
 
59
- # Load tokenizer
60
  TOKENIZER = AutoTokenizer.from_pretrained(model_id)
61
 
62
- # Configure model loading (with lower precision for efficiency)
63
  MODEL = AutoModelForCausalLM.from_pretrained(
64
  model_id,
65
  torch_dtype=torch.float16,
@@ -67,7 +68,7 @@ def load_model_in_background():
67
  low_cpu_mem_usage=True
68
  )
69
 
70
- # Create text generation pipeline
71
  PIPE = pipeline(
72
  "text-generation",
73
  model=MODEL,
@@ -77,9 +78,11 @@ def load_model_in_background():
77
 
78
  MODEL_LOADING = False
79
  MODEL_LOADED = True
 
80
  return "Model loaded successfully!"
81
  except Exception as e:
82
  MODEL_LOADING = False
 
83
  return f"Error loading model: {str(e)}"
84
 
85
  # Function to generate response using the model
 
52
  global MODEL, TOKENIZER, PIPE, MODEL_LOADING, MODEL_LOADED
53
  try:
54
  MODEL_LOADING = True
55
+ print("Starting model loading...")
56
 
57
  # Model identifier
58
  model_id = "mistralai/Mistral-7B-Instruct-v0.3"
59
 
60
+ print("Loading tokenizer...")
61
  TOKENIZER = AutoTokenizer.from_pretrained(model_id)
62
 
63
+ print("Loading model (this may take several minutes)...")
64
  MODEL = AutoModelForCausalLM.from_pretrained(
65
  model_id,
66
  torch_dtype=torch.float16,
 
68
  low_cpu_mem_usage=True
69
  )
70
 
71
+ print("Creating pipeline...")
72
  PIPE = pipeline(
73
  "text-generation",
74
  model=MODEL,
 
78
 
79
  MODEL_LOADING = False
80
  MODEL_LOADED = True
81
+ print("Model loaded successfully!")
82
  return "Model loaded successfully!"
83
  except Exception as e:
84
  MODEL_LOADING = False
85
+ print(f"Error loading model: {str(e)}")
86
  return f"Error loading model: {str(e)}"
87
 
88
  # Function to generate response using the model