| import torch |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
| |
| model_name = "synCAI-144k-gpt2.5" |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
| |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| model.to(device) |
|
|
| def generate_text(prompt, model, tokenizer, device, max_length=100, temperature=0.7, top_p=0.9, top_k=50): |
| try: |
| |
| inputs = tokenizer(prompt, return_tensors="pt") |
| inputs = {key: value.to(device) for key, value in inputs.items()} |
|
|
| |
| outputs = model.generate( |
| inputs['input_ids'], |
| max_length=max_length, |
| temperature=temperature, |
| top_p=top_p, |
| top_k=top_k |
| ) |
|
|
| |
| generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| return generated_text |
| except Exception as e: |
| print(f"Error generating text for prompt '{prompt}': {e}") |
| return None |
|
|
| |
| input_prompts = [ |
| "Explain the significance of the project:", |
| "What methodologies were used in the research?", |
| "What are the future implications of the findings?" |
| ] |
|
|
| |
| for prompt in input_prompts: |
| generated_text = generate_text(prompt, model, tokenizer, device) |
| if generated_text: |
| print(f"Prompt: {prompt}") |
| print(f"Generated Text: {generated_text}\n") |
|
|