| | import torch |
| | from sentence_transformers import SentenceTransformer |
| | import os |
| |
|
| | try: |
| | |
| | model_path = "." |
| | print(f"Loading model from: {model_path}") |
| | |
| | |
| | model = SentenceTransformer(model_path, device='cpu') |
| | |
| | |
| | model.eval() |
| | |
| | |
| | sample_text = "This is an example sentence to encode." |
| | print(f"Creating example input with text: '{sample_text}'") |
| | |
| | |
| | tokenizer = model.tokenizer |
| | |
| | |
| | inputs = tokenizer(sample_text, return_tensors="pt", padding=True, truncation=True, max_length=256) |
| | |
| | print("Tracing model - this may take a moment...") |
| | |
| | with torch.no_grad(): |
| | |
| | traced_model = torch.jit.trace( |
| | model[0].auto_model, |
| | (inputs["input_ids"], inputs["attention_mask"]), |
| | strict=False |
| | ) |
| | |
| | |
| | output_path = "model.pt" |
| | traced_model.save(output_path) |
| | |
| | print(f"Model successfully converted to TorchScript and saved as {output_path}") |
| | print(f"Full path: {os.path.abspath(output_path)}") |
| | print(f"Note: This traces only the transformer model. Pooling and normalization layers are not included.") |
| | |
| | except Exception as e: |
| | print(f"Error converting model: {str(e)}") |
| | import traceback |
| | traceback.print_exc() |
| |
|