File size: 3,349 Bytes
c94c8c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
# from transformers import AutoTokenizer, AutoModel
# torch.set_float32_matmul_precision("high")
# model_id = "FacebookAI/roberta-large"
# tokenizer = AutoTokenizer.from_pretrained(model_id)
# model = AutoModel.from_pretrained(model_id).to("cuda")
# text = "The capital of France is [MASK]."
# inputs = tokenizer(text, return_tensors="pt").to("cuda")
# outputs = model(**inputs)
# # To get predictions for the mask:
# masked_index = inputs["input_ids"][0].tolist().index(tokenizer.mask_token_id)
# predicted_token_id = outputs.logits[0, masked_index].argmax(axis=-1)
# predicted_token = tokenizer.decode(predicted_token_id)
# print("Predicted token:", predicted_token)
# from transformers import AutoModel, AutoTokenizer
# model_name = "chandar-lab/NeoBERT"
# tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
# model = AutoModel.from_pretrained(model_name, trust_remote_code=True)
# text = "NeoBERT is the most efficient model of its kind!"
# inputs = tokenizer(text, return_tensors="pt")
# # Generate embeddings
# outputs = model(**inputs)
# embedding = outputs.last_hidden_state[:, 0, :]
# print(embedding.shape)
# import sys
# # Add the folder to sys.path
# sys.path.append("/gpfs/home/ym621/UniPointMap")
# import torch
# from PIL import Image
# sys.path.append("/home/m50048399/transfered/ye_project/UniPointMap")
# import open_clip
# # Create model & transforms
# model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-16-quickgelu', pretrained='dfn2b')
# model.eval() # Set model to eval mode
# # Get tokenizer
# tokenizer = open_clip.get_tokenizer('ViT-B-16-quickgelu')
# image = preprocess(Image.open("docs/CLIP.png")).unsqueeze(0)
# text = tokenizer(["a diagram", "a dog", "a cat"])
# with torch.no_grad(), torch.autocast("cuda"):
# image_features = model.encode_image(image)
# text_features = model.encode_text(text)
# image_features /= image_features.norm(dim=-1, keepdim=True)
# text_features /= text_features.norm(dim=-1, keepdim=True)
# text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)
# print("Label probs:", text_probs) # prints: [[1., 0., 0.]]
# import torch
# from PIL import Image
# from transformers import (
# AutoImageProcessor,
# AutoTokenizer,
# AutoModelForCausalLM,
# )
# model_root = "jina"
# image_size=224
# model = AutoModelForCausalLM.from_pretrained(model_root,trust_remote_code=True).cuda()
# device = model.device
# from transformers import AutoModel, AutoTokenizer, AutoImageProcessor
# model_root = 'fg-clip-base'
# tokenizer = AutoTokenizer.from_pretrained(model_root)
# image_processor = AutoImageProcessor.from_pretrained(model_root)
# text_encoder = AutoModel.from_pretrained('jinaai/jina-clip-v2', trust_remote_code=True)
# tokenizer = AutoTokenizer.from_pretrained('jinaai/jina-clip-v2', trust_remote_code=True)
# pip install -U huggingface_hub
from huggingface_hub import snapshot_download
# Download ONLY the light_cc3m subfolder into HF cache
snapshot_download(
repo_id="MatchLab/ScenePoint",
repo_type="dataset",
allow_patterns=["light_3rscan/**", "light_arkitscenes/**"], # only this subfolder
resume_download=True, # safe to re-run
max_workers=8 # parallel downloads
)
print("Downloaded to Hugging Face cache only.") |