EGYADMIN commited on
Commit
406fa52
Β·
verified Β·
1 Parent(s): a0f6119

Fix indentation issues in download_model.py

Browse files
Files changed (1) hide show
  1. download_model.py +30 -63
download_model.py CHANGED
@@ -1,90 +1,58 @@
1
  #!/usr/bin/env python3
2
  """
3
  Model Downloader Script for Kimi-K2-Instruct
4
- This script pre-downloads the Kimi-K2-Instruct model from Hugging Face
5
- during the container build to speed up initialization.
6
  """""
7
 
8
  import os
9
  import sys
10
- import logging
11
- from pathlib import Path
12
-
13
- # Setup logging
14
- logging.basicConfig(
15
- level=logging.INFO,
16
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
17
- )
18
- logger = logging.getLogger(__name__)
19
 
20
  def download_model():
21
  """Download the Kimi-K2-Instruct model from Hugging Face Hub"""""
22
-
23
  try:
24
  from transformers import AutoTokenizer, AutoModelForCausalLM
25
  import torch
26
 
27
  MODEL_NAME = "moonshotai/Kimi-K2-Instruct"
28
 
29
- logger.info("=" * 60)
30
- logger.info("πŸ€– Starting Model Download")
31
- logger.info("=" * 60)
32
 
33
- # Check CUDA availability
34
- logger.info(f"CUDA Available: {torch.cuda.is_available()}")
35
  if torch.cuda.is_available():
36
- logger.info(f"GPU Count: {torch.cuda.device_count()}")
37
- for i in range(torch.cuda.device_count()):
38
- logger.info(f" GPU {i}: {torch.cuda.get_device_name(i)}")
39
 
40
- logger.info(f"\nπŸ“₯ Downloading Model: {MODEL_NAME}")
41
- logger.info(f"Using HF_TOKEN: {'Yes' if os.environ.get('HF_TOKEN') else 'No'}")
42
 
43
  # Download tokenizer
44
- logger.info("\n[1/2] Downloading Tokenizer...")
45
- try:
46
- tokenizer = AutoTokenizer.from_pretrained(
47
- MODEL_NAME,
48
- trust_remote_code=True,
49
- token=os.environ.get("HF_TOKEN")
50
- )
51
- logger.info("βœ“ Tokenizer downloaded successfully")
52
- except Exception as e:
53
- logger.error(f"βœ— Failed to download tokenizer: {str(e)}")
54
- raise
55
 
56
- # Download model
57
- logger.info("\n[2/2] Downloading Model Weights (this may take a while)...")
58
- try:
59
- model = AutoModelForCausalLM.from_pretrained(
60
- MODEL_NAME,
61
- torch_dtype=torch.bfloat16,
62
- device_map="auto",
63
- trust_remote_code=True,
64
- token=os.environ.get("HF_TOKEN")
65
- )
66
- logger.info("βœ“ Model downloaded successfully")
67
- except Exception as e:
68
- logger.error(f"βœ— Failed to download model: {str(e)}")
69
- raise
70
 
71
- # Verify model is cached
72
- cache_dir = Path.home() / ".cache" / "huggingface" / "hub"
73
- logger.info(f"\nπŸ“ Cache Directory: {cache_dir}")
74
- logger.info(f"βœ“ Model cached at: {cache_dir}")
75
-
76
- logger.info("\n" + "=" * 60)
77
- logger.info("βœ“ Model download completed successfully!")
78
- logger.info("=" * 60)
79
 
80
  return True
81
 
82
- except ImportError as e:
83
- logger.error(f"βœ— Import Error: {str(e)}")
84
- logger.error("Make sure all required packages are installed")
85
- return False
86
  except Exception as e:
87
- logger.error(f"βœ— Error: {str(e)}")
88
  import traceback
89
  traceback.print_exc()
90
  return False
@@ -92,6 +60,5 @@ def download_model():
92
  if __name__ == "__main__":
93
  success = download_model()
94
  sys.exit(0 if success else 1)
95
- )
96
- )
97
- )
 
1
  #!/usr/bin/env python3
2
  """
3
  Model Downloader Script for Kimi-K2-Instruct
4
+ This script pre-downloads the model from Hugging Face.
 
5
  """""
6
 
7
  import os
8
  import sys
 
 
 
 
 
 
 
 
 
9
 
10
  def download_model():
11
  """Download the Kimi-K2-Instruct model from Hugging Face Hub"""""
 
12
  try:
13
  from transformers import AutoTokenizer, AutoModelForCausalLM
14
  import torch
15
 
16
  MODEL_NAME = "moonshotai/Kimi-K2-Instruct"
17
 
18
+ print("=" * 60)
19
+ print("Starting Model Download")
20
+ print("=" * 60)
21
 
22
+ print(f"CUDA Available: {torch.cuda.is_available()}")
 
23
  if torch.cuda.is_available():
24
+ print(f"GPU Count: {torch.cuda.device_count()}")
 
 
25
 
26
+ print(f"Downloading Model: {MODEL_NAME}")
 
27
 
28
  # Download tokenizer
29
+ print("Downloading Tokenizer...")
30
+ tokenizer = AutoTokenizer.from_pretrained(
31
+ MODEL_NAME,
32
+ trust_remote_code=True,
33
+ token=os.environ.get("HF_TOKEN")
34
+ )
35
+ print("Tokenizer downloaded successfully")
 
 
 
 
36
 
37
+ # Download model
38
+ print("Downloading Model Weights...")
39
+ model = AutoModelForCausalLM.from_pretrained(
40
+ MODEL_NAME,
41
+ torch_dtype=torch.bfloat16,
42
+ device_map="auto",
43
+ trust_remote_code=True,
44
+ token=os.environ.get("HF_TOKEN")
45
+ )
46
+ print("Model downloaded successfully")
 
 
 
 
47
 
48
+ print("=" * 60)
49
+ print("Model download completed!")
50
+ print("=" * 60)
 
 
 
 
 
51
 
52
  return True
53
 
 
 
 
 
54
  except Exception as e:
55
+ print(f"Error: {str(e)}")
56
  import traceback
57
  traceback.print_exc()
58
  return False
 
60
  if __name__ == "__main__":
61
  success = download_model()
62
  sys.exit(0 if success else 1)
63
+ )
64
+ )