sejalkishan commited on
Commit
3788fd7
Β·
verified Β·
1 Parent(s): 185448b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -10,7 +10,7 @@ from huggingface_hub import login
10
  import spaces
11
 
12
  # πŸ” Login (Hugging Face token should be set as HF_TOKEN env variable)
13
- login(token=os.environ.get("HF_TOKEN"))
14
 
15
  # βœ… Check for GPU
16
  if not torch.cuda.is_available():
@@ -19,12 +19,12 @@ print(f"βœ… Using GPU: {torch.cuda.get_device_name(0)}")
19
 
20
  # Model setup
21
  model_id = "mistralai/Mistral-7B-Instruct-v0.2"
22
- tokenizer = AutoTokenizer.from_pretrained(model_id, token=os.environ.get("HF_TOKEN"))
23
  model = AutoModelForCausalLM.from_pretrained(
24
  model_id,
25
  device_map="auto",
26
  torch_dtype=torch.float16,
27
- token=os.environ.get("HF_TOKEN"),
28
  trust_remote_code=True
29
  )
30
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
10
  import spaces
11
 
12
  # πŸ” Login (Hugging Face token should be set as HF_TOKEN env variable)
13
+ login(token=os.environ.get("token"))
14
 
15
  # βœ… Check for GPU
16
  if not torch.cuda.is_available():
 
19
 
20
  # Model setup
21
  model_id = "mistralai/Mistral-7B-Instruct-v0.2"
22
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token=os.environ.get("token"))
23
  model = AutoModelForCausalLM.from_pretrained(
24
  model_id,
25
  device_map="auto",
26
  torch_dtype=torch.float16,
27
+ token=os.environ.get("token"),
28
  trust_remote_code=True
29
  )
30
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)