Spaces:
Sleeping
Sleeping
Commit ·
eb56811
1
Parent(s): 294b16d
added accelarate support
Browse files- app.py +1 -1
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -5,7 +5,7 @@ import torch
|
|
| 5 |
# Load your model and tokenizer
|
| 6 |
model_name = "modelsmafia/punjabi_Gemma-2B" # Replace with your model name
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 8 |
-
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="
|
| 9 |
|
| 10 |
# Create a chat function
|
| 11 |
def chat_with_model(message, history):
|
|
|
|
| 5 |
# Load your model and tokenizer
|
| 6 |
model_name = "modelsmafia/punjabi_Gemma-2B" # Replace with your model name
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 8 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="cpu",low_cpu_mem_usage=True, trust_remote_code=True)
|
| 9 |
|
| 10 |
# Create a chat function
|
| 11 |
def chat_with_model(message, history):
|
requirements.txt
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
huggingface_hub==0.25.2
|
| 2 |
gradio
|
| 3 |
transformers
|
| 4 |
-
torch
|
|
|
|
|
|
| 1 |
huggingface_hub==0.25.2
|
| 2 |
gradio
|
| 3 |
transformers
|
| 4 |
+
torch
|
| 5 |
+
accelerate>=0.26.0
|