3v324v23 commited on
Commit
8ac8443
·
1 Parent(s): edc52b4
Files changed (1) hide show
  1. run_test.py +29 -3
run_test.py CHANGED
@@ -1,5 +1,5 @@
1
  #!/usr/bin/env python3
2
- import os, sys
3
 
4
  STORE_WEIGHTS = False
5
  DEDUPLICATE_SAFETENSORS = True
@@ -12,6 +12,8 @@ user, model = model_id.split('/')
12
  prompt = 'Once upon a time,'
13
  fn = f'{user}_{model}_{revision}.{"logits-and-weights" if STORE_WEIGHTS else "logits"}.{"DEDUPLICATED.safetensors" if DEDUPLICATE_SAFETENSORS else "safetensors"}'
14
 
 
 
15
  import torch, numpy as np, random
16
  torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
17
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
@@ -23,12 +25,34 @@ os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
23
  torch.manual_seed(0)
24
  random.seed(0)
25
  np.random.seed(0)
 
 
26
  if FAKE_H100:
27
  torch.cuda.is_available = lambda: True
28
  torch.cuda.get_device_capability = lambda: [9,0]
 
29
  import accelerate, safetensors.torch, transformers, tqdm
30
  from _safetensors import WritingSafeTensors
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  config = transformers.AutoConfig.from_pretrained(model_id, revision=revision, trust_remote_code=True)
33
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True)
34
  Model = transformers.AutoModelForCausalLM
@@ -43,12 +67,14 @@ max_memory['cpu'] //= 3
43
  model_kwparams = dict(pretrained_model_name_or_path=model_id, revision=revision, trust_remote_code=True,
44
  torch_dtype=getattr(torch, TORCH_DTYPE, TORCH_DTYPE), use_safetensors=True, low_cpu_mem_usage=True,
45
  device_map=DEVICE_MAP, max_memory=max_memory, offload_state_dict=True, offload_buffers=True)
46
- model = Model.from_pretrained(**model_kwparams)
 
47
 
48
  if config.model_type == 'deepseek_v3':
49
  model._supports_cache_class = False
50
 
51
- pipe = transformers.pipeline('text-generation', model=model, config=config, tokenizer=tokenizer)
 
52
 
53
  SafeTensors = WritingSafeTensors(
54
  fn,
 
1
  #!/usr/bin/env python3
2
+ import contextlib, os, sys
3
 
4
  STORE_WEIGHTS = False
5
  DEDUPLICATE_SAFETENSORS = True
 
12
  prompt = 'Once upon a time,'
13
  fn = f'{user}_{model}_{revision}.{"logits-and-weights" if STORE_WEIGHTS else "logits"}.{"DEDUPLICATED.safetensors" if DEDUPLICATE_SAFETENSORS else "safetensors"}'
14
 
15
+
16
+ ## Increase determinism, from torch docs
17
  import torch, numpy as np, random
18
  torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
19
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
 
25
  torch.manual_seed(0)
26
  random.seed(0)
27
  np.random.seed(0)
28
+
29
+
30
  if FAKE_H100:
31
  torch.cuda.is_available = lambda: True
32
  torch.cuda.get_device_capability = lambda: [9,0]
33
+
34
  import accelerate, safetensors.torch, transformers, tqdm
35
  from _safetensors import WritingSafeTensors
36
 
37
+
38
+ ## Show progress for slow internal functions (model layer enumerations)
39
+ import builtins
40
+ builtin_range = range
41
+ def range_progress(*params):
42
+ return tqdm.tqdm(builtin_range(*params), leave=False, desc=range_progress.desc + ' range('+','.join([repr(p)for p in params])+')')
43
+ @contextlib.contextmanager
44
+ def range_progress_description(str):
45
+ range_progress.desc = str
46
+ try:
47
+ yield
48
+ finally:
49
+ range_progress.desc = ''
50
+ range_progress.desc = ''
51
+ builtins.range = range_progress
52
+ tqdm.std.range = builtin_range
53
+ import sre_parse; sre_parse.range = builtin_range
54
+
55
+
56
  config = transformers.AutoConfig.from_pretrained(model_id, revision=revision, trust_remote_code=True)
57
  tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True)
58
  Model = transformers.AutoModelForCausalLM
 
67
  model_kwparams = dict(pretrained_model_name_or_path=model_id, revision=revision, trust_remote_code=True,
68
  torch_dtype=getattr(torch, TORCH_DTYPE, TORCH_DTYPE), use_safetensors=True, low_cpu_mem_usage=True,
69
  device_map=DEVICE_MAP, max_memory=max_memory, offload_state_dict=True, offload_buffers=True)
70
+ with range_progress_description('constructing model'):
71
+ model = Model.from_pretrained(**model_kwparams)
72
 
73
  if config.model_type == 'deepseek_v3':
74
  model._supports_cache_class = False
75
 
76
+ with range_progress_description('constructing pipeline'):
77
+ pipe = transformers.pipeline('text-generation', model=model, config=config, tokenizer=tokenizer)
78
 
79
  SafeTensors = WritingSafeTensors(
80
  fn,