File size: 6,205 Bytes
8d18b7c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 | #!/usr/bin/env python3
"""Zenith-7B Inference Script for Standard GPUs"""
import torch
import argparse
from pathlib import Path
from typing import Optional, Dict, Any
# Add current directory to path for imports
import sys
sys.path.append(str(Path(__file__).parent))
from configs.zenith_config import get_7b_config
from models.zenith_model import ZenithForCausalLM
from data.advanced_tokenizer import AdvancedTokenizer
def load_model(checkpoint_path: str, device: str = "cuda"):
"""Load trained model from checkpoint."""
config = get_7b_config()
# Initialize tokenizer
tokenizer = AdvancedTokenizer.from_pretrained(checkpoint_path)
config.vocab_size = tokenizer.get_vocab_size()
# Load model
model = ZenithForCausalLM.from_pretrained(
checkpoint_path,
config=config,
device_map="auto" if device == "cuda" else None
)
model.eval()
return model, tokenizer
def generate(
model: ZenithForCausalLM,
tokenizer: AdvancedTokenizer,
prompt: str,
max_new_tokens: int = 512,
temperature: float = 0.7,
top_p: float = 0.9,
top_k: int = 50,
repetition_penalty: float = 1.1,
do_sample: bool = True,
stream: bool = False
):
"""Generate text from the model."""
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
if stream:
# Streaming generation
from transformers import TextIteratorStreamer
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
generation_kwargs = dict(
input_ids=input_ids,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_p=top_p,
top_k=top_k,
repetition_penalty=repetition_penalty,
do_sample=do_sample,
streamer=streamer
)
from threading import Thread
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
return streamer
else:
outputs = model.generate(
input_ids=input_ids,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_p=top_p,
top_k=top_k,
repetition_penalty=repetition_penalty,
do_sample=do_sample,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
def interactive_mode(model, tokenizer):
"""Run interactive chat session."""
print("=" * 60)
print("Zenith-7B Interactive Mode")
print("Type 'quit' to exit, 'clear' to clear history")
print("=" * 60)
history = []
while True:
try:
user_input = input("\nYou: ").strip()
if user_input.lower() == 'quit':
break
if user_input.lower() == 'clear':
history = []
print("History cleared.")
continue
# Build prompt with history
prompt = ""
for user_msg, assistant_msg in history[-4:]: # Keep last 4 exchanges
prompt += f"User: {user_msg}\nAssistant: {assistant_msg}\n"
prompt += f"User: {user_input}\nAssistant:"
print("\nZenith: ", end="", flush=True)
response = generate(model, tokenizer, prompt, stream=True)
full_response = ""
for token in response:
print(token, end="", flush=True)
full_response += token
print()
history.append((user_input, full_response))
except KeyboardInterrupt:
print("\n\nInterrupted. Type 'quit' to exit.")
except Exception as e:
print(f"\nError: {e}")
def main():
parser = argparse.ArgumentParser(description="Zenith-7B Inference")
parser.add_argument(
"--checkpoint",
type=str,
required=True,
help="Path to model checkpoint directory"
)
parser.add_argument(
"--prompt",
type=str,
default=None,
help="Prompt for generation (if not provided, enters interactive mode)"
)
parser.add_argument(
"--max_new_tokens",
type=int,
default=512,
help="Maximum new tokens to generate"
)
parser.add_argument(
"--temperature",
type=float,
default=0.7,
help="Sampling temperature"
)
parser.add_argument(
"--top_p",
type=float,
default=0.9,
help="Top-p (nucleus) sampling"
)
parser.add_argument(
"--top_k",
type=int,
default=50,
help="Top-k sampling"
)
parser.add_argument(
"--device",
type=str,
default="cuda",
choices=["cuda", "cpu"],
help="Device to run inference on"
)
parser.add_argument(
"--stream",
action="store_true",
help="Stream output token by token"
)
args = parser.parse_args()
# Load model
print(f"Loading model from {args.checkpoint}...")
model, tokenizer = load_model(args.checkpoint, args.device)
print("Model loaded successfully!")
if args.prompt:
# Single generation
response = generate(
model, tokenizer, args.prompt,
max_new_tokens=args.max_new_tokens,
temperature=args.temperature,
top_p=args.top_p,
top_k=args.top_k,
stream=args.stream
)
if args.stream:
for token in response:
print(token, end="", flush=True)
print()
else:
print(f"\nResponse: {response}")
else:
# Interactive mode
interactive_mode(model, tokenizer)
if __name__ == "__main__":
main() |