sheep33333 commited on
Commit
9310c94
·
verified ·
1 Parent(s): 0ba727b

Upload modelforseminat_v5.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modelforseminat_v5.py +3 -25
modelforseminat_v5.py CHANGED
@@ -20,36 +20,12 @@ from transformers.utils import LossKwargs
20
  from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
21
 
22
  from torch.nn.functional import cosine_similarity
23
- import time
24
- import os
25
- import sys
26
- import json
27
  import pdb
28
- import torch.distributed as dist
29
- from tqdm import tqdm
30
- from torch.utils.data.distributed import DistributedSampler
31
- import transformers
32
- import pickle
33
  from dataset import *
34
- # from peft import (get_peft_model, PeftModel)
35
- import random
36
- from config import *
37
- from datasets import Dataset, DatasetDict, load_dataset
38
- import wandb
39
- import argparse
40
  import torch
41
- import torch.nn as nn
42
  import torch.nn.functional as F
43
- import torch.optim as optim
44
  import functools
45
- from torch.optim.lr_scheduler import StepLR
46
- import torch.nn.functional as F
47
  import torch.distributed as dist
48
- import torch.multiprocessing as mp
49
- from torch.nn.parallel import DistributedDataParallel as DDP
50
- from torch.utils.data.distributed import DistributedSampler
51
- from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
52
- checkpoint_wrapper, CheckpointImpl)
53
  from torch.distributed.fsdp import (
54
  FullyShardedDataParallel as FSDP,
55
  MixedPrecision,
@@ -1524,13 +1500,15 @@ class Olmo2ForCausalLMForSemiNAT(Olmo2ForCausalLM):
1524
  ) -> Union[GenerateOutput, torch.LongTensor]:
1525
 
1526
  # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
1527
- self._validate_model_class()
1528
  tokenizer = kwargs.pop(
1529
  "tokenizer",
1530
  None) # Pull this out first, we only use it for stopping criteria
1531
  assistant_tokenizer = kwargs.pop(
1532
  "assistant_tokenizer", None) # only used for assisted generation
1533
 
 
 
1534
  generation_config, model_kwargs = self._prepare_generation_config(
1535
  generation_config, **kwargs)
1536
 
 
20
  from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
21
 
22
  from torch.nn.functional import cosine_similarity
 
 
 
 
23
  import pdb
 
 
 
 
 
24
  from dataset import *
 
 
 
 
 
 
25
  import torch
 
26
  import torch.nn.functional as F
 
27
  import functools
 
 
28
  import torch.distributed as dist
 
 
 
 
 
29
  from torch.distributed.fsdp import (
30
  FullyShardedDataParallel as FSDP,
31
  MixedPrecision,
 
1500
  ) -> Union[GenerateOutput, torch.LongTensor]:
1501
 
1502
  # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
1503
+ self._validate_model_class() #能进行generate的模型
1504
  tokenizer = kwargs.pop(
1505
  "tokenizer",
1506
  None) # Pull this out first, we only use it for stopping criteria
1507
  assistant_tokenizer = kwargs.pop(
1508
  "assistant_tokenizer", None) # only used for assisted generation
1509
 
1510
+
1511
+ pdb.set_trace()
1512
  generation_config, model_kwargs = self._prepare_generation_config(
1513
  generation_config, **kwargs)
1514