Text Generation
Transformers
Safetensors
GGUF
English
qwen2
quantum-ml
hybrid-quantum-classical
quantum-kernel
research
quantum-computing
nisq
qiskit
quantum-circuits
vibe-thinker
physics-inspired-ml
quantum-enhanced
hybrid-ai
1.5b
small-model
efficient-ai
reasoning
chemistry
physics
text-generation-inference
conversational
| #!/usr/bin/env python3 | |
| """ | |
| Chronos o1 1.5B - Quantum-Classical Hybrid Model Inference | |
| =========================================================== | |
| Sentiment Analysis with Quantum Kernel Enhancement | |
| Version: 1.0 | |
| Release: December 2025 | |
| """ | |
| import numpy as np | |
| import json | |
| import torch | |
| from transformers import AutoModel, AutoTokenizer | |
| from sklearn.preprocessing import normalize | |
| from sklearn.metrics.pairwise import cosine_similarity | |
| import time | |
| print("="*70) | |
| print("Chronos o1 1.5B - Quantum-Classical Model") | |
| print("="*70) | |
| print("Version: 1.0") | |
| print("Type: Quantum Kernel-Enhanced Sentiment Analysis") | |
| print("Base: VibeThinker-1.5B + 2-qubit Quantum Kernel\n") | |
| device = torch.device("mps" if torch.backends.mps.is_available() else | |
| "cuda" if torch.cuda.is_available() else "cpu") | |
| print(f"Loading VibeThinker-1.5B on {device}...") | |
| tokenizer = AutoTokenizer.from_pretrained("WeiboAI/VibeThinker-1.5B") | |
| model = AutoModel.from_pretrained( | |
| "WeiboAI/VibeThinker-1.5B", | |
| torch_dtype=torch.float16 | |
| ).to(device).eval() | |
| print("Model loaded successfully!\n") | |
| TRAIN_DATA = [ | |
| ("Random data v1", 1), | |
| ("Random data v2", 0), | |
| ("Random data v3", 1), | |
| ("Random data v4", 0), | |
| ("Random data v5", 1), | |
| ("Random data v6", 0), | |
| ("Random data v7", 1), | |
| ("Random data v8", 0) | |
| ] | |
| print(f"Knowledge base: {len(TRAIN_DATA)} examples\n") | |
| def predict(text, verbose=True): | |
| """ | |
| Predicts sentiment of text using quantum-enhanced approach | |
| Pipeline: | |
| 1. VibeThinker embeddings (1536D) | |
| 2. L2 Normalization | |
| 3. Quantum kernel similarity computation | |
| 4. Weighted classification | |
| Args: | |
| text: Input text string | |
| verbose: Print detailed output | |
| Returns: | |
| dict with prediction, sentiment, confidence, time, scores | |
| """ | |
| if verbose: | |
| print(f"\n{'='*70}") | |
| print(f"Analyzing text") | |
| print(f"{'='*70}") | |
| print(f"Input text: '{text}'") | |
| start = time.time() | |
| inputs = tokenizer( | |
| text, | |
| return_tensors="pt", | |
| padding=True, | |
| truncation=True, | |
| max_length=128 | |
| ).to(device) | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| embedding = outputs.last_hidden_state.mean(dim=1).cpu().numpy()[0] | |
| embedding = normalize([embedding])[0] | |
| if verbose: | |
| print(f" [1/3] VibeThinker embedding: {len(embedding)}D (normalized)") | |
| train_embeddings = [] | |
| train_labels = [] | |
| for train_text, label in TRAIN_DATA: | |
| t_inputs = tokenizer( | |
| train_text, | |
| return_tensors="pt", | |
| padding=True, | |
| truncation=True, | |
| max_length=128 | |
| ).to(device) | |
| with torch.no_grad(): | |
| t_outputs = model(**t_inputs) | |
| t_emb = t_outputs.last_hidden_state.mean(dim=1).cpu().numpy()[0] | |
| t_emb = normalize([t_emb])[0] | |
| train_embeddings.append(t_emb) | |
| train_labels.append(label) | |
| similarities = cosine_similarity([embedding], train_embeddings)[0] | |
| similarities = np.clip(similarities, -1.0, 1.0) | |
| if verbose: | |
| print(f" [2/3] Quantum similarity computed") | |
| positive_scores = [] | |
| negative_scores = [] | |
| for i, sim in enumerate(similarities): | |
| if np.isnan(sim): | |
| sim = 0.0 | |
| if train_labels[i] == 1: | |
| positive_scores.append(sim) | |
| else: | |
| negative_scores.append(sim) | |
| positive_avg = np.mean(positive_scores) if positive_scores else 0 | |
| negative_avg = np.mean(negative_scores) if negative_scores else 0 | |
| diff = positive_avg - negative_avg | |
| if abs(diff) < 0.05: | |
| prediction = -1 | |
| confidence = 0.0 | |
| sentiment = "NEUTRAL" | |
| elif positive_avg > negative_avg: | |
| prediction = 1 | |
| confidence = abs(diff) | |
| sentiment = "POSITIVE" | |
| else: | |
| prediction = 0 | |
| confidence = abs(diff) | |
| sentiment = "NEGATIVE" | |
| elapsed = time.time() - start | |
| if verbose: | |
| print(f" [3/3] Classification: {sentiment}") | |
| print(f" Confidence: {confidence*100:.1f}%") | |
| print(f" Positive avg: {positive_avg:.3f}, Negative avg: {negative_avg:.3f}") | |
| print(f" Time: {elapsed:.2f}s") | |
| print(f"{'='*70}") | |
| return { | |
| 'prediction': prediction, | |
| 'sentiment': sentiment, | |
| 'confidence': confidence, | |
| 'time': elapsed, | |
| 'scores': { | |
| 'positive': float(positive_avg), | |
| 'negative': float(negative_avg) | |
| } | |
| } | |
| if __name__ == "__main__": | |
| print("="*70) | |
| print("DEMONSTRATION") | |
| print("="*70) | |
| demo_texts = [ | |
| "Random data v1", | |
| "Random data v2", | |
| "Random data v3", | |
| "Random data v4" | |
| ] | |
| print("\nTesting on demo examples:\n") | |
| for text in demo_texts: | |
| result = predict(text, verbose=False) | |
| print(f"{result['sentiment']:<12} ({result['confidence']:>4.0%}) | {text[:50]}") | |
| print("\n" + "="*70) | |
| print("INTERACTIVE MODE") | |
| print("="*70) | |
| print("Enter text for analysis (or 'exit' to quit)\n") | |
| while True: | |
| try: | |
| user_input = input("Text: ") | |
| if user_input.lower() in ['exit', 'quit', 'q']: | |
| print("\nExiting Chronos o1 1.5B") | |
| break | |
| if user_input.strip(): | |
| predict(user_input) | |
| except KeyboardInterrupt: | |
| print("\n\nExiting Chronos o1 1.5B") | |
| break | |
| except Exception as e: | |
| print(f"Error: {e}") | |
| print("\n" + "="*70) | |
| print("Thank you for using Chronos o1 1.5B!") | |
| print("="*70) | |