FlawedLLM commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,6 +5,10 @@ import re
|
|
| 5 |
import spaces
|
| 6 |
import gradio as gr
|
| 7 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
| 9 |
from huggingface_hub import login, HfFolder
|
| 10 |
# tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_gemma_merged16bit_clean_final", trust_remote_code=True)
|
|
|
|
| 5 |
import spaces
|
| 6 |
import gradio as gr
|
| 7 |
import torch
|
| 8 |
+
print(f"Is CUDA available: {torch.cuda.is_available()}")
|
| 9 |
+
# True
|
| 10 |
+
print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
|
| 11 |
+
# Tesla T4
|
| 12 |
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
| 13 |
from huggingface_hub import login, HfFolder
|
| 14 |
# tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_gemma_merged16bit_clean_final", trust_remote_code=True)
|