FlawedLLM commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -31,10 +31,14 @@ from huggingface_hub import login, HfFolder
|
|
| 31 |
# model = PeftModel.from_pretrained(model, lora_weights)
|
| 32 |
# Load model directly
|
| 33 |
# from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
| 35 |
tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_gemma_merged4bit_clean_final")
|
| 36 |
model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_gemma_merged4bit_clean_final",
|
| 37 |
-
device_map="auto"
|
|
|
|
| 38 |
# alpaca_prompt = You MUST copy from above!
|
| 39 |
@spaces.GPU(duration=300)
|
| 40 |
def chunk_it(input_command, item_list):
|
|
|
|
| 31 |
# model = PeftModel.from_pretrained(model, lora_weights)
|
| 32 |
# Load model directly
|
| 33 |
# from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 34 |
+
bnb_config = BitsAndBytesConfig(
|
| 35 |
+
load_in_8bit=True,
|
| 36 |
+
llm_int8_threshold=6.0,
|
| 37 |
+
)
|
| 38 |
tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_gemma_merged4bit_clean_final")
|
| 39 |
model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_gemma_merged4bit_clean_final",
|
| 40 |
+
device_map="auto",
|
| 41 |
+
quantization_config=bnb_config,)
|
| 42 |
# alpaca_prompt = You MUST copy from above!
|
| 43 |
@spaces.GPU(duration=300)
|
| 44 |
def chunk_it(input_command, item_list):
|