Spaces:
Running
on
Zero
Running
on
Zero
| from transformers import AutoModel, AutoTokenizer | |
| import torch | |
| import os | |
| from PIL import Image | |
| model_name = 'deepseek-ai/DeepSeek-OCR-2' | |
| def test_inference(): | |
| print(f"Loading tokenizer for {model_name}...") | |
| tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) | |
| print(f"Loading model for {model_name}...") | |
| # Load model on CPU | |
| model = AutoModel.from_pretrained( | |
| model_name, | |
| trust_remote_code=True, | |
| use_safetensors=True | |
| ) | |
| # Check if loaded | |
| print("Model loaded successfully.") | |
| print(f"Model type: {type(model)}") | |
| # Test simple tokenization | |
| inputs = tokenizer("Hello", return_tensors="pt") | |
| print("Tokenizer test: Success") | |
| print("DeepSeek-OCR-2 is ready for use.") | |
| if __name__ == "__main__": | |
| test_inference() | |