from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline import io import nltk import json import torch import os import sys import gc if len(sys.argv) > 1: os.environ["HUGGING_FACE_HUB_TOKEN"] = sys.argv[1] nltk.download("punkt") nltk.download('punkt_tab') device = 0 if torch.cuda.is_available() else -1 def cleanup_model_resource(model): del model gc.collect() torch.cuda.empty_cache()