import torch from transformers import AutoTokenizer, AutoModelForQuestionAnswering # Load model and tokenizer model_name = "distilbert-base-cased-distilled-squad" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForQuestionAnswering.from_pretrained(model_name) # Example text context = """BOOK I. Concerning Discipline. The end of Sciences; association with the aged; restraint of the organs of sense; the creation of ministers; the creation of councillors and priests; ascertaining by temptations purity or impurity in the character of ministers; the institution of spies. Protection of parties for or against one's own cause in one's own state; winning over the factions for or against an enemy's cause in an enemy's state; the business of council meeting; the mission of envoys; protection of princes; the conduct of a prince kept under restraint; treatment of a prince kept under restraint; the duties of a king; duty towards the harem; personal safety.""" question = "What is the end of Sciences?" # Tokenize input inputs = tokenizer( question, context, return_tensors="pt", truncation=True, padding=True, max_length=512, return_offsets_mapping=True # Ensure this is included ) # Perform inference outputs = model(**inputs) # Get start and end logits start_logits = outputs.start_logits end_logits = outputs.end_logits # Find the answer start_index = torch.argmax(start_logits) end_index = torch.argmax(end_logits) # Decode answer answer = tokenizer.decode(inputs['input_ids'][0][start_index:end_index + 1]) # Print the result print(f"Question: {question}") print(f"Answer: {answer}")