File size: 7,601 Bytes
5b25b3f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 |
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.12"
# dependencies = [
# "vllm>=0.11.0",
# "spacy>=3.7.0",
# "mistral_common>=1.5.0",
# "en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.8.0/en_core_web_sm-3.8.0-py3-none-any.whl",
# ]
#
# [tool.uv]
# no-build = true
# index-strategy = "unsafe-best-match"
# extra-index-url = ["https://download.pytorch.org/whl/cu128"]
# ///
"""
Minimal end-to-end example for sui-1-24b summarization.
Usage:
# Summarize a file
uv run example.py document.txt
# Summarize inline text
uv run example.py --text "Your long text here..."
# With custom parameters
uv run example.py document.txt --words 300 --tags 8 --language en
"""
import argparse
import hashlib
import json
import re
import sys
from pathlib import Path
# Lazy imports for faster --help
def main():
parser = argparse.ArgumentParser(
description="Summarize text using sui-1-24b with source grounding",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
parser.add_argument("input", nargs="?", help="Input file path (or use --text)")
parser.add_argument("--text", "-t", help="Input text directly")
parser.add_argument("--words", "-w", type=int, default=250, help="Target word count (default: 400)")
parser.add_argument("--tags", "-n", type=int, default=4, help="Number of XML tags to cite (default: 10)")
parser.add_argument("--language", "-l", default="en", choices=["en", "de", "es", "fr", "it"], help="Language (default: en)")
parser.add_argument("--model", "-m", default="ellamind/sui-1-24b", help="Model path or HF repo")
parser.add_argument("--tensor-parallel", "-tp", type=int, default=1, help="Tensor parallel size (default: 1)")
parser.add_argument("--raw", action="store_true", help="Print raw JSON output instead of formatted")
args = parser.parse_args()
# Get input text
if args.text:
text = args.text
elif args.input:
text = Path(args.input).read_text()
else:
parser.error("Provide input file or --text")
# Import heavy dependencies only when needed
import spacy
from vllm import LLM, SamplingParams
# Load spaCy model for sentence segmentation
# Note: Only English is bundled. For other languages, install the model first:
# pip install https://github.com/explosion/spacy-models/releases/download/de_core_news_sm-3.8.0/de_core_news_sm-3.8.0-py3-none-any.whl
spacy_models = {
"en": "en_core_web_sm",
"de": "de_core_news_sm",
"es": "es_core_news_sm",
"fr": "fr_core_news_sm",
"it": "it_core_news_sm",
}
try:
nlp = spacy.load(spacy_models[args.language])
except OSError:
print(f"Error: spaCy model '{spacy_models[args.language]}' not found.")
print(f"For English, this should be bundled automatically.")
print(f"For other languages, install the model first:")
print(f" pip install https://github.com/explosion/spacy-models/releases/download/{spacy_models[args.language]}-3.8.0/{spacy_models[args.language]}-3.8.0-py3-none-any.whl")
sys.exit(1)
# Tag sentences with unique XML identifiers
print("Tagging sentences...")
doc = nlp(text)
tagged_text = ""
tag_mapping = {}
for i, sent in enumerate(doc.sents):
sentence = sent.text.strip()
if sentence:
tag = hashlib.md5(f"{i}_{sentence[:50]}".encode()).hexdigest()[:8]
tag_mapping[tag] = sentence
tagged_text += f"<{tag}>{sentence}</{tag}>"
print(f"Tagged {len(tag_mapping)} sentences")
# Build prompt
language_names = {"en": "English", "de": "German", "es": "Spanish", "fr": "French", "it": "Italian"}
prompt = f"""You are a professional summarizer, following all given instructions with the utmost care.
<text>
{tagged_text}
</text>
# Output Format
The output must be in JSON format with the following structure:
1. A "structure" string containing your thoughts about the content and structure of the summary
2. An "xml_tags" list containing the XML tag identifiers from the tagged text (e.g., "<a1b2c3d4>")
3. A "summary" string containing the actual summary with inline XML tag references
# Instructions
1. Start by thinking about and explaining the structure and content of your summary. Select {args.tags} XML tags from the tagged text that capture the most significant data and facts.
2. Begin with an executive summary introducing the title, author (if available), and key findings.
3. Structure the summary in coherent paragraphs. Every paragraph should contain at least one XML tag reference.
4. Reference XML tags inline in square brackets (e.g., [<a1b2c3d4>]) immediately after the statement they support.
5. Each XML tag must appear exactly once in the summary.
6. Avoid a concluding paragraph that merely restates points.
7. Do not use bullet points or headings unless explicitly requested.
Parameters:
- Word count (excl. XML tags): {args.words}
- Number of XML tags: {args.tags}
- Language: {language_names[args.language]}
"""
# Load model and generate
print(f"Loading model: {args.model}")
llm = LLM(
model=args.model,
tensor_parallel_size=args.tensor_parallel,
dtype="bfloat16",
tokenizer_mode="mistral",
trust_remote_code=True,
limit_mm_per_prompt={"image": 0}, # Disable vision encoder for text-only
)
print("Generating summary...")
sampling_params = SamplingParams(max_tokens=4096, temperature=0.0)
outputs = llm.chat([[{"role": "user", "content": prompt}]], sampling_params)
result = outputs[0].outputs[0].text
# Parse and display output
if args.raw:
print(result)
return
try:
# Extract JSON from response
json_match = re.search(r'\{[\s\S]*\}', result)
if json_match:
data = json.loads(json_match.group())
print("\n" + "=" * 60)
print("SUMMARY")
print("=" * 60 + "\n")
summary = data.get("summary", "")
# Replace XML tags with highlighted source references
def replace_tag(match):
tag = match.group(1)
source = tag_mapping.get(tag, "???")
# Truncate long sources
if len(source) > 80:
source = source[:77] + "..."
return f"[{tag}]"
clean_summary = re.sub(r'\[<([a-f0-9]{8})>\]', replace_tag, summary)
print(clean_summary)
print("\n" + "-" * 60)
print("SOURCES")
print("-" * 60)
# Show referenced sources
# Handle both formats: ["<tag>"] or [{"xml_tag": "<tag>"}]
xml_tags = data.get("xml_tags", [])
for tag in xml_tags:
if isinstance(tag, str):
clean_tag = tag.strip("<>")
elif isinstance(tag, dict) and "xml_tag" in tag:
clean_tag = tag["xml_tag"].strip("<>")
else:
continue
source = tag_mapping.get(clean_tag, "Not found")
if len(source) > 100:
source = source[:97] + "..."
print(f"[{clean_tag}] {source}")
else:
print("Could not parse JSON response:")
print(result)
except json.JSONDecodeError as e:
print(f"JSON parse error: {e}")
print(result)
if __name__ == "__main__":
main()
|