ghitaben commited on
Commit
4e19176
·
1 Parent(s): 936bc6b

fix loader

Browse files
notebooks/kaggle_medic_demo.ipynb CHANGED
@@ -69,15 +69,7 @@
69
  "id": "4c637bc0",
70
  "metadata": {},
71
  "outputs": [],
72
- "source": [
73
- "%%bash\n",
74
- "if [ ! -d /kaggle/working/AMR-Guard ]; then\n",
75
- " git clone \"$GITHUB_REPO\" /kaggle/working/AMR-Guard\n",
76
- "else\n",
77
- " echo \"Repo already present — pulling latest\"\n",
78
- " git -C /kaggle/working/AMR-Guard pull\n",
79
- "fi"
80
- ]
81
  },
82
  {
83
  "cell_type": "code",
 
69
  "id": "4c637bc0",
70
  "metadata": {},
71
  "outputs": [],
72
+ "source": "%%bash\n# Always start fresh to avoid stale code from previous runs\nrm -rf /kaggle/working/AMR-Guard\ngit clone \"$GITHUB_REPO\" /kaggle/working/AMR-Guard"
 
 
 
 
 
 
 
 
73
  },
74
  {
75
  "cell_type": "code",
src/loader.py CHANGED
@@ -33,7 +33,8 @@ def _get_local_causal_lm(model_name: TextModelName):
33
 
34
  load_kwargs: Dict[str, Any] = {"device_map": "auto"}
35
  if settings.quantization == "4bit":
36
- load_kwargs["load_in_4bit"] = True
 
37
 
38
  tokenizer = AutoTokenizer.from_pretrained(model_path)
39
  model = AutoModelForCausalLM.from_pretrained(model_path, **load_kwargs)
 
33
 
34
  load_kwargs: Dict[str, Any] = {"device_map": "auto"}
35
  if settings.quantization == "4bit":
36
+ from transformers import BitsAndBytesConfig
37
+ load_kwargs["quantization_config"] = BitsAndBytesConfig(load_in_4bit=True)
38
 
39
  tokenizer = AutoTokenizer.from_pretrained(model_path)
40
  model = AutoModelForCausalLM.from_pretrained(model_path, **load_kwargs)