Rajan Sharma commited on
Commit
7bece4d
·
verified ·
1 Parent(s): e88005b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -9
app.py CHANGED
@@ -8,17 +8,21 @@ from huggingface_hub.utils import RepositoryNotFoundError, HfHubHTTPError
8
  def initialize_model():
9
  try:
10
  # Login to Hugging Face Hub
11
- login(token=os.getenv("HUGGING_FACE_HUB_TOKEN"))
 
 
 
 
12
 
13
- # Initialize the model and tokenizer with auth
14
  model_id = "CohereLabs/c4ai-command-a-03-2025"
15
  tokenizer = AutoTokenizer.from_pretrained(
16
  model_id,
17
- use_auth_token=True
18
  )
19
  model = AutoModelForCausalLM.from_pretrained(
20
  model_id,
21
- use_auth_token=True
22
  )
23
  return True, model, tokenizer
24
  except RepositoryNotFoundError:
@@ -27,7 +31,7 @@ def initialize_model():
27
  if e.response.status_code == 401:
28
  return False, "Authentication failed. Please check your token permissions.", None
29
  elif e.response.status_code == 403:
30
- return False, "Access denied. Please ensure you have access to this model.", None
31
  else:
32
  return False, f"An error occurred: {str(e)}", None
33
  except Exception as e:
@@ -37,7 +41,6 @@ def initialize_model():
37
  success, result, tokenizer = initialize_model()
38
  if not success:
39
  print(f"Error initializing model: {result}")
40
- # You might want to raise an exception here or handle the error differently
41
  else:
42
  model = result
43
 
@@ -85,7 +88,7 @@ def chat(message, history):
85
  except Exception as e:
86
  return [(message, f"Error during chat: {str(e)}")]
87
 
88
- # Create the Gradio interface
89
  demo = gr.ChatInterface(
90
  fn=chat,
91
  title="Medical Decision Support AI",
@@ -95,8 +98,7 @@ demo = gr.ChatInterface(
95
  "What are the symptoms of hypertension?",
96
  "What are common drug interactions with aspirin?",
97
  "What are the warning signs of diabetes?",
98
- ],
99
- retry_on_error=True
100
  )
101
 
102
  demo.launch()
 
8
  def initialize_model():
9
  try:
10
  # Login to Hugging Face Hub
11
+ token = os.getenv("HUGGING_FACE_HUB_TOKEN")
12
+ if not token:
13
+ return False, "No token found. Please set HUGGING_FACE_HUB_TOKEN in Space secrets.", None
14
+
15
+ login(token=token)
16
 
17
+ # Initialize the model and tokenizer with token (not use_auth_token)
18
  model_id = "CohereLabs/c4ai-command-a-03-2025"
19
  tokenizer = AutoTokenizer.from_pretrained(
20
  model_id,
21
+ token=token # Updated from use_auth_token
22
  )
23
  model = AutoModelForCausalLM.from_pretrained(
24
  model_id,
25
+ token=token # Updated from use_auth_token
26
  )
27
  return True, model, tokenizer
28
  except RepositoryNotFoundError:
 
31
  if e.response.status_code == 401:
32
  return False, "Authentication failed. Please check your token permissions.", None
33
  elif e.response.status_code == 403:
34
+ return False, "Access denied. Please request access at https://huggingface.co/CohereLabs/c4ai-command-a-03-2025", None
35
  else:
36
  return False, f"An error occurred: {str(e)}", None
37
  except Exception as e:
 
41
  success, result, tokenizer = initialize_model()
42
  if not success:
43
  print(f"Error initializing model: {result}")
 
44
  else:
45
  model = result
46
 
 
88
  except Exception as e:
89
  return [(message, f"Error during chat: {str(e)}")]
90
 
91
+ # Create the Gradio interface - removed retry_on_error
92
  demo = gr.ChatInterface(
93
  fn=chat,
94
  title="Medical Decision Support AI",
 
98
  "What are the symptoms of hypertension?",
99
  "What are common drug interactions with aspirin?",
100
  "What are the warning signs of diabetes?",
101
+ ]
 
102
  )
103
 
104
  demo.launch()