| import os | |
| import json | |
| os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" | |
| os.environ["CUDA_VISIBLE_DEVICES"] = "2" | |
| import torch | |
| from transformers import AutoModelForImageTextToText, AutoProcessor | |
| model_id = "google/translategemma-27b-it" | |
| processor = AutoProcessor.from_pretrained(model_id) | |
| # model = AutoModelForImageTextToText.from_pretrained(model_id, device_map="auto") | |
| # ---- Text Translation ---- | |
| messages = [ | |
| { | |
| "role": "user", | |
| "content": [ | |
| { | |
| "type": "text", | |
| "source_lang_code": "cs", | |
| "target_lang_code": "de-DE", | |
| "text": "V nejhorším případě i k prasknutí čočky.", | |
| } | |
| ], | |
| } | |
| ] | |
| inputs = processor.apply_chat_template( | |
| messages, tokenize=False, add_generation_prompt=True, return_dict=True, return_tensors="pt" | |
| ) | |
| print(inputs) | |