eddddyy commited on
Commit
c07da70
Β·
verified Β·
1 Parent(s): 7883432

Update model_loader.py

Browse files
Files changed (1) hide show
  1. model_loader.py +29 -35
model_loader.py CHANGED
@@ -1,38 +1,32 @@
1
- import os
 
 
 
 
 
 
 
2
 
3
- # Hugging Face Token
4
- HF_TOKEN = os.getenv("HF_TOKEN")
5
- if not HF_TOKEN:
6
- raise ValueError("❌ HF_TOKEN not set in environment variables.")
 
 
 
 
7
 
8
- # Model to Load
9
- MODEL_ID = os.getenv("MODEL_ID", "meta-llama/Meta-Llama-3-8B-Instruct") # πŸ‘ˆ updated here
 
 
 
 
 
 
 
 
10
 
11
- # Paths and Settings
12
- TEMP_DIR = os.getenv("TEMP_DIR", "/tmp") # For screenshots, temporary files, etc.
13
- ENABLE_EXECUTION = os.getenv("ENABLE_EXECUTION", "true").lower() == "true" # toggle command execution
14
-
15
- # OS Commands (unchanged)
16
- OS_COMMANDS = {
17
- "open": {
18
- "chrome": "google-chrome",
19
- "firefox": "firefox",
20
- "notepad": "notepad",
21
- "calculator": "gnome-calculator",
22
- "terminal": "gnome-terminal",
23
- "vscode": "code"
24
- },
25
- "close": {
26
- "chrome": "pkill chrome",
27
- "firefox": "pkill firefox",
28
- "notepad": "pkill notepad",
29
- "calculator": "pkill gnome-calculator",
30
- "vscode": "pkill code"
31
- },
32
- "screenshot": {
33
- "default": "import -window root screenshot.png"
34
- },
35
- "shutdown": "shutdown now",
36
- "restart": "reboot",
37
- "lock": "gnome-screensaver-command -l"
38
- }
 
1
+ def load_model():
2
+ try:
3
+ print(f"πŸ”„ Loading tokenizer and model: {MODEL_ID}")
4
+ tokenizer = AutoTokenizer.from_pretrained(
5
+ MODEL_ID,
6
+ token=HF_TOKEN,
7
+ trust_remote_code=True
8
+ )
9
 
10
+ model = AutoModelForCausalLM.from_pretrained(
11
+ MODEL_ID,
12
+ token=HF_TOKEN,
13
+ trust_remote_code=True,
14
+ device_map="auto" if torch.cuda.is_available() else "cpu",
15
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
16
+ low_cpu_mem_usage=True
17
+ )
18
 
19
+ print("βœ… Model loaded successfully.")
20
+ return pipeline(
21
+ "text-generation",
22
+ model=model,
23
+ tokenizer=tokenizer,
24
+ max_new_tokens=150,
25
+ do_sample=True,
26
+ temperature=0.7,
27
+ top_p=0.9
28
+ )
29
 
30
+ except Exception as e:
31
+ print(f"❌ Failed to load model: {e}")
32
+ raise RuntimeError(f"Model loading failed: {e}")