| import gradio as gr |
| import random |
| import re |
| import torch |
| from datetime import datetime |
| from typing import List, Dict, Optional |
| import warnings |
| warnings.filterwarnings('ignore') |
|
|
| |
| try: |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, TextStreamer |
| from transformers import BitsAndBytesConfig |
| import accelerate |
| TRANSFORMERS_AVAILABLE = True |
| except ImportError: |
| TRANSFORMERS_AVAILABLE = False |
| print("⚠️ Transformers kütüphanesi yüklü değil. pip install transformers torch accelerate") |
|
|
| |
| try: |
| from datasets import load_dataset, DatasetDict |
| DATASETS_AVAILABLE = True |
| except ImportError: |
| DATASETS_AVAILABLE = False |
| print("⚠️ 'datasets' kütüphanesi yüklü değil. pip install datasets") |
|
|
| |
| |
| |
| class TurkishGemmaManager: |
| """Turkish-Gemma-9b modelini yönetir""" |
| |
| def __init__(self): |
| self.model = None |
| self.tokenizer = None |
| self.generator = None |
| self.model_loaded = False |
| |
| def load_model(self, use_4bit=True): |
| """Turkish-Gemma modelini yükler""" |
| if not TRANSFORMERS_AVAILABLE: |
| print("❌ Transformers kütüphanesi yüklü değil") |
| return False |
| |
| try: |
| model_name = "ytu-ce-cosmos/Turkish-Gemma-9b-T1" |
| print(f"🚀 Turkish-Gemma-9b modeli yükleniyor: {model_name}") |
| |
| |
| if use_4bit: |
| bnb_config = BitsAndBytesConfig( |
| load_in_4bit=True, |
| bnb_4bit_quant_type="nf4", |
| bnb_4bit_compute_dtype=torch.float16, |
| bnb_4bit_use_double_quant=True |
| ) |
| else: |
| bnb_config = None |
| |
| |
| self.tokenizer = AutoTokenizer.from_pretrained( |
| model_name, |
| trust_remote_code=True |
| ) |
| |
| |
| self.model = AutoModelForCausalLM.from_pretrained( |
| model_name, |
| quantization_config=bnb_config if use_4bit else None, |
| torch_dtype=torch.float16 if not use_4bit else None, |
| device_map="auto", |
| trust_remote_code=True, |
| low_cpu_mem_usage=True |
| ) |
| |
| |
| self.generator = pipeline( |
| "text-generation", |
| model=self.model, |
| tokenizer=self.tokenizer, |
| max_new_tokens=100, |
| do_sample=True, |
| temperature=0.7, |
| top_p=0.9, |
| repetition_penalty=1.1, |
| pad_token_id=self.tokenizer.eos_token_id |
| ) |
| |
| self.model_loaded = True |
| print("✅ Turkish-Gemma-9b modeli başarıyla yüklendi!") |
| return True |
| |
| except Exception as e: |
| print(f"❌ Turkish-Gemma modeli yüklenemedi: {e}") |
| print("⚠️ Demo modu kullanılacak") |
| return False |
| |
| def generate_turkish_text(self, prompt, max_length=150): |
| """Turkish-Gemma ile Türkçe metin oluştur""" |
| if not self.model_loaded: |
| return self._demo_generate(prompt) |
| |
| try: |
| |
| optimized_prompt = self._optimize_prompt(prompt) |
| |
| |
| outputs = self.generator( |
| optimized_prompt, |
| max_length=max_length, |
| num_return_sequences=1, |
| truncation=True |
| ) |
| |
| generated_text = outputs[0]['generated_text'] |
| |
| |
| if generated_text.startswith(optimized_prompt): |
| generated_text = generated_text[len(optimized_prompt):].strip() |
| |
| |
| generated_text = self._clean_generated_text(generated_text) |
| |
| return generated_text |
| |
| except Exception as e: |
| print(f"❌ Turkish-Gemma ile üretim hatası: {e}") |
| return self._demo_generate(prompt) |
| |
| def _optimize_prompt(self, prompt): |
| """Prompt'u Turkish-Gemma için optimize et""" |
| |
| tweet_prompts = [ |
| f"Bir tweet yaz: {prompt}", |
| f"Twitter için kısa bir mesaj yaz: {prompt}", |
| f"Sosyal medya paylaşımı oluştur: {prompt}", |
| f"Tweet: {prompt}" |
| ] |
| |
| return random.choice(tweet_prompts) |
| |
| def _clean_generated_text(self, text): |
| """Üretilen metni temizle""" |
| |
| if len(text) > 280: |
| text = text[:275] + "..." |
| |
| |
| text = re.sub(r'\n\s*\n', '\n\n', text) |
| |
| |
| hashtags = re.findall(r'#\w+', text) |
| if len(hashtags) > 3: |
| |
| text = re.sub(r'#\w+', '', text) |
| |
| return text.strip() |
| |
| def _demo_generate(self, prompt): |
| """Demo modunda metin oluştur""" |
| demo_responses = [ |
| f"{prompt} hakkında ilginç gelişmeler var! #gündem", |
| f"Bugün {prompt} konusunu tartışıyoruz. Ne düşünüyorsunuz?", |
| f"{prompt} alanında yeni bir dönem başlıyor. Heyecanlıyım!", |
| f"Merak ediyorum: {prompt} hakkında ne biliyorsunuz?", |
| f"{prompt} ile ilgili önemli bir haber geldi." |
| ] |
| |
| return random.choice(demo_responses) |
| |
| def analyze_sentiment_with_gemma(self, text): |
| """Turkish-Gemma ile duygu analizi""" |
| if not self.model_loaded: |
| return {"label": "neutral", "score": 0.5} |
| |
| try: |
| sentiment_prompt = f"Aşağıdaki metnin duygusunu analiz et (positive, negative, neutral):\n{text}\nDuygu:" |
| |
| outputs = self.generator( |
| sentiment_prompt, |
| max_length=50, |
| num_return_sequences=1 |
| ) |
| |
| response = outputs[0]['generated_text'] |
| response_lower = response.lower() |
| |
| if 'positive' in response_lower or 'pozitif' in response_lower: |
| return {"label": "positive", "score": 0.85} |
| elif 'negative' in response_lower or 'negatif' in response_lower: |
| return {"label": "negative", "score": 0.85} |
| else: |
| return {"label": "neutral", "score": 0.75} |
| |
| except Exception as e: |
| print(f"❌ Turkish-Gemma ile duygu analizi hatası: {e}") |
| return {"label": "neutral", "score": 0.5} |
|
|
| |
| |
| |
| class TurkishDatasetManager: |
| """Hugging Face Türkçe veri setlerini yönetir""" |
| |
| def __init__(self): |
| self.datasets = {} |
| self.loaded = False |
| |
| def load_all_datasets(self): |
| """Tüm veri setlerini yükle""" |
| if not DATASETS_AVAILABLE: |
| print("⚠️ Datasets kütüphanesi yok, demo veriler kullanılacak") |
| return self._create_demo_datasets() |
| |
| try: |
| print("📥 Türkçe veri setleri yükleniyor...") |
| |
| |
| self.datasets['sentiment'] = load_dataset( |
| "turkish_sentiment", |
| trust_remote_code=True |
| ) |
| |
| |
| self.datasets['news'] = load_dataset( |
| "mlsum", |
| "tr", |
| trust_remote_code=True |
| ) |
| |
| self.loaded = True |
| print("✅ Veri setleri yüklendi") |
| |
| except Exception as e: |
| print(f"❌ Veri setleri yüklenemedi: {e}") |
| return self._create_demo_datasets() |
| |
| def _create_demo_datasets(self): |
| """Demo veri setleri oluştur""" |
| self.datasets = { |
| 'sentiment': { |
| 'train': [ |
| {'text': 'Harika bir gün!', 'sentiment': 'positive'}, |
| {'text': 'Çok kötü bir durum', 'sentiment': 'negative'}, |
| ] |
| }, |
| 'news': { |
| 'train': [ |
| {'text': 'Teknoloji haberleri', 'summary': 'teknoloji'}, |
| {'text': 'Spor haberleri', 'summary': 'spor'}, |
| ] |
| } |
| } |
| self.loaded = True |
|
|
| |
| |
| |
| class EnhancedTurkishTweetGenerator: |
| def __init__(self, dataset_manager, gemma_manager): |
| self.dataset_manager = dataset_manager |
| self.gemma_manager = gemma_manager |
| self.trends = self._extract_trends_from_datasets() |
| |
| def _extract_trends_from_datasets(self): |
| """Veri setlerinden trend konuları çıkar""" |
| return ["teknoloji", "spor", "siyaset", "ekonomi", "sağlık", |
| "eğitim", "sinema", "müzik", "yapayzeka", "çevre"] |
| |
| def generate_tweet_with_gemma(self, topic, mood, custom_topic, |
| include_hashtags=True, include_emoji=True): |
| """Turkish-Gemma ile tweet oluştur""" |
| |
| |
| if custom_topic.strip(): |
| final_topic = custom_topic.strip() |
| elif topic and topic != "Özel": |
| final_topic = topic |
| else: |
| final_topic = random.choice(self.trends) |
| |
| |
| mood_prompts = { |
| "positive": f"{final_topic} hakkında olumlu ve heyecan verici bir tweet", |
| "negative": f"{final_topic} hakkında eleştirel ve düşündürücü bir tweet", |
| "neutral": f"{final_topic} hakkında bilgilendirici ve nötr bir tweet" |
| } |
| |
| prompt = mood_prompts.get(mood, mood_prompts["neutral"]) |
| |
| |
| tweet_text = self.gemma_manager.generate_turkish_text(prompt) |
| |
| |
| if include_hashtags: |
| tweet_text += f"\n\n#{final_topic.capitalize()}" |
| if len(tweet_text) < 250: |
| tweet_text += f" #{final_topic}Gündem" |
| |
| if include_emoji: |
| emojis = {"positive": "😊", "negative": "🤔", "neutral": "📊"} |
| tweet_text = emojis.get(mood, "📝") + " " + tweet_text |
| |
| |
| sentiment = self.gemma_manager.analyze_sentiment_with_gemma(tweet_text) |
| engagement = self._predict_engagement(tweet_text) |
| |
| return { |
| "tweet": tweet_text, |
| "topic": final_topic, |
| "mood": mood, |
| "length": len(tweet_text), |
| "sentiment": sentiment["label"], |
| "confidence": f"%{sentiment['score']*100:.1f}", |
| "predicted_likes": engagement["likes"], |
| "predicted_retweets": engagement["retweets"], |
| "engagement_score": engagement["score"] |
| } |
| |
| def _predict_engagement(self, tweet): |
| """Engagement tahmini""" |
| score = 50 |
| |
| |
| hashtag_count = len(re.findall(r'#\w+', tweet)) |
| score += min(20, hashtag_count * 5) |
| |
| |
| if any(emoji in tweet for emoji in ["😊", "🎉", "🌟", "😔", "😢", "🤔"]): |
| score += 10 |
| |
| |
| length = len(tweet) |
| if 80 <= length <= 160: |
| score += 15 |
| |
| score = min(100, max(0, score)) |
| |
| return { |
| "score": score, |
| "likes": int(score * 10), |
| "retweets": int(score * 4) |
| } |
| |
| def get_trending_topics(self): |
| """Trend konular""" |
| topics = random.sample(self.trends, min(10, len(self.trends))) |
| |
| trending = [] |
| for i, topic in enumerate(topics, 1): |
| trending.append({ |
| "rank": i, |
| "topic": topic, |
| "hashtag": f"#{topic}", |
| "tweet_count": random.randint(1000, 50000), |
| "score": random.randint(30, 100) |
| }) |
| |
| return sorted(trending, key=lambda x: x["score"], reverse=True) |
|
|
| |
| |
| |
| class PhoenixRecommender: |
| def __init__(self, gemma_manager): |
| self.gemma_manager = gemma_manager |
| |
| def recommend_tweets(self, user_interests, num_recommendations=5): |
| """Tweet öner""" |
| interests = [i.strip().lower() for i in user_interests if i.strip()] |
| |
| if not interests: |
| interests = ['teknoloji', 'spor', 'haber'] |
| |
| recommendations = [] |
| for interest in interests[:3]: |
| prompt = f"{interest} hakkında popüler bir tweet" |
| tweet = self.gemma_manager.generate_turkish_text(prompt) |
| |
| score = random.randint(60, 95) |
| recommendations.append({ |
| "tweet": tweet, |
| "score": score, |
| "interest": interest, |
| "hashtags": re.findall(r'#\w+', tweet) |
| }) |
| |
| recommendations.sort(key=lambda x: x["score"], reverse=True) |
| return recommendations[:num_recommendations] |
|
|
| |
| |
| |
| def main(): |
| print("🚀 Turkish-Gemma Tweet AI Başlatılıyor...") |
| |
| |
| print("🤖 Turkish-Gemma modeli yükleniyor...") |
| gemma_manager = TurkishGemmaManager() |
| gemma_manager.load_model(use_4bit=True) |
| |
| print("📥 Veri setleri yükleniyor...") |
| dataset_manager = TurkishDatasetManager() |
| dataset_manager.load_all_datasets() |
| |
| |
| tweet_gen = EnhancedTurkishTweetGenerator(dataset_manager, gemma_manager) |
| phoenix = PhoenixRecommender(gemma_manager) |
| |
| |
| |
| def generate_tweet_ui(topic, mood, custom_topic, include_hashtags, include_emoji): |
| """Tweet oluştur""" |
| result = tweet_gen.generate_tweet_with_gemma( |
| topic=topic, |
| mood=mood, |
| custom_topic=custom_topic, |
| include_hashtags=include_hashtags, |
| include_emoji=include_emoji |
| ) |
| |
| output = f""" |
| 🎯 **TURKISH-GEMMA İLE OLUŞTURULDU** |
| |
| 📝 **TWEET:** |
| {result['tweet']} |
| |
| 📊 **ANALİZ:** |
| • 🏷️ Konu: {result['topic']} |
| • 🎭 Ruh Hali: {result['mood']} |
| • 😊 Duygu: {result['sentiment']} ({result['confidence']}) |
| • 📏 Uzunluk: {result['length']}/280 karakter |
| |
| 📈 **TAHMİNLER:** |
| • ❤️ Beğeni: {result['predicted_likes']} |
| • 🔄 Retweet: {result['predicted_retweets']} |
| • ⭐ Engagement: {result['engagement_score']}/100 |
| """ |
| |
| return output, result['tweet'] |
| |
| def analyze_tweet_ui(tweet_text): |
| """Tweet analiz et""" |
| |
| sentiment = gemma_manager.analyze_sentiment_with_gemma(tweet_text) |
| |
| |
| hashtag_count = len(re.findall(r'#\w+', tweet_text)) |
| question_count = tweet_text.count('?') |
| emoji_count = len(re.findall(r'[😀-🙏]', tweet_text)) |
| |
| output = f""" |
| 🤖 **TURKISH-GEMMA ANALİZİ** |
| |
| 🎭 **DUYGU ANALİZİ:** |
| • Sonuç: {sentiment['label'].upper()} |
| • Güven: %{sentiment['score']*100:.1f} |
| |
| 🔍 **TEKNİK ANALİZ:** |
| • 📏 Karakter: {len(tweet_text)}/280 |
| • 🏷️ Hashtag: {hashtag_count} |
| • 😊 Emoji: {emoji_count} |
| • ❓ Soru: {question_count} |
| |
| 💡 **ÖNERİLER:** |
| """ |
| |
| if len(tweet_text) > 280: |
| output += "• ⚠️ Tweet çok uzun, kısaltın\n" |
| if hashtag_count == 0: |
| output += "• 💡 Hashtag ekleyin\n" |
| if emoji_count == 0: |
| output += "• 😊 Emoji ekleyin\n" |
| |
| return output |
| |
| def show_trends_ui(): |
| """Trendleri göster""" |
| trends = tweet_gen.get_trending_topics() |
| |
| output = "🔥 **TREND KONULAR**\n\n" |
| |
| for trend in trends: |
| bar = "█" * (trend["score"] // 10) + "░" * (10 - trend["score"] // 10) |
| output += f"{trend['rank']:2d}. {trend['hashtag']:15} {bar} {trend['score']}/100\n" |
| output += f" 📊 {trend['tweet_count']:,} tweet\n\n" |
| |
| output += f"🤖 **Sistem Durumu:**\n" |
| output += f"• Turkish-Gemma: {'✅ Aktif' if gemma_manager.model_loaded else '❌ Demo'}\n" |
| output += f"• Veri Setleri: {'✅ Yüklü' if dataset_manager.loaded else '❌ Demo'}\n" |
| |
| return output |
| |
| def recommend_tweets_ui(user_interests, num_recommendations): |
| """Tweet öner""" |
| interests = [i.strip() for i in user_interests.split(',') if i.strip()] |
| |
| recommendations = phoenix.recommend_tweets(interests, num_recommendations) |
| |
| output = f"⚡ **TURKISH-GEMMA ÖNERİLERİ**\n\n" |
| output += f"👤 İlgi Alanlarınız: {', '.join(interests) if interests else 'Varsayılan'}\n" |
| output += f"🎯 Öneri Sayısı: {num_recommendations}\n\n" |
| |
| for i, rec in enumerate(recommendations, 1): |
| stars = "★" * (rec["score"] // 20) + "☆" * (5 - rec["score"] // 20) |
| output += f"**{i}. Tweet** ({stars} {rec['score']}/100)\n" |
| output += f"🏷️ Konu: {rec['interest']}\n" |
| output += f"{rec['tweet']}\n" |
| output += f"🏷️ Hashtag'ler: {', '.join(rec['hashtags']) if rec['hashtags'] else 'Yok'}\n" |
| output += "─" * 40 + "\n\n" |
| |
| all_tweets = "\n\n".join([r["tweet"] for r in recommendations]) |
| |
| return output, all_tweets |
| |
| |
| with gr.Blocks(title="🤖 Turkish-Gemma Tweet AI", theme=gr.themes.Soft()) as demo: |
| gr.Markdown("# 🤖 Turkish-Gemma Tweet AI") |
| gr.Markdown(f"### Model Durumu: {'✅ Turkish-Gemma-9b Aktif' if gemma_manager.model_loaded else '⚠️ Demo Modu'}") |
| |
| with gr.Tabs(): |
| |
| with gr.TabItem("🎯 Gemma ile Tweet Oluştur"): |
| with gr.Row(): |
| with gr.Column(): |
| topic = gr.Dropdown( |
| choices=["Özel"] + tweet_gen.trends, |
| value="teknoloji", |
| label="Konu" |
| ) |
| custom_topic = gr.Textbox( |
| label="Özel Konu", |
| placeholder="Kendi konunuzu yazın..." |
| ) |
| mood = gr.Radio( |
| choices=["positive", "neutral", "negative"], |
| value="neutral", |
| label="Ruh Hali" |
| ) |
| with gr.Row(): |
| include_hashtags = gr.Checkbox(label="Hashtag Ekle", value=True) |
| include_emoji = gr.Checkbox(label="Emoji Ekle", value=True) |
| |
| btn1 = gr.Button("🤖 Gemma ile Oluştur", variant="primary") |
| |
| with gr.Column(): |
| output1 = gr.Markdown(label="Sonuçlar") |
| tweet_output1 = gr.Textbox(label="Tweet Metni", lines=5) |
| |
| |
| with gr.TabItem("📊 Gemma ile Analiz"): |
| with gr.Row(): |
| with gr.Column(): |
| analyze_input = gr.Textbox( |
| label="Tweet Metni", |
| placeholder="Analiz etmek istediğiniz tweet...", |
| lines=4 |
| ) |
| btn2 = gr.Button("🤖 Gemma ile Analiz Et", variant="primary") |
| |
| with gr.Column(): |
| output2 = gr.Markdown(label="Analiz Sonuçları") |
| |
| |
| with gr.TabItem("🔥 Trendler"): |
| with gr.Row(): |
| with gr.Column(): |
| btn3 = gr.Button("Trendleri Göster", variant="primary") |
| |
| with gr.Column(): |
| output3 = gr.Markdown(label="Trendler") |
| |
| |
| with gr.TabItem("⚡ Gemma Önerileri"): |
| with gr.Row(): |
| with gr.Column(): |
| user_interests = gr.Textbox( |
| label="İlgi Alanlarınız (virgülle ayırın)", |
| placeholder="teknoloji, spor, müzik", |
| value="teknoloji, spor, haber" |
| ) |
| rec_count = gr.Slider(1, 10, 5, label="Öneri Sayısı") |
| btn4 = gr.Button("🤖 Gemma ile Öner", variant="primary") |
| |
| with gr.Column(): |
| output4 = gr.Markdown(label="Öneriler") |
| tweet_output4 = gr.Textbox(label="Önerilen Tweet'ler", lines=8) |
| |
| |
| with gr.TabItem("🛠️ Sistem Durumu"): |
| gr.Markdown(f""" |
| ## 🖥️ Sistem Bilgileri |
| |
| ### 🤖 Turkish-Gemma-9b: |
| - **Durum:** {'✅ Aktif' if gemma_manager.model_loaded else '❌ Demo Modu'} |
| - **Model:** ytu-ce-cosmos/Turkish-Gemma-9b-T1 |
| - **Quantization:** 4-bit |
| - **Parametre:** 9 Milyar |
| |
| ### 📊 Veri Setleri: |
| - **Durum:** {'✅ Yüklü' if dataset_manager.loaded else '❌ Demo'} |
| - **Kaynak:** Hugging Face |
| - **Türkçe Sentiment:** {'✅' if 'sentiment' in dataset_manager.datasets else '❌'} |
| - **Haber Verisi:** {'✅' if 'news' in dataset_manager.datasets else '❌'} |
| |
| ### 🚀 Özellikler: |
| 1. **Akıllı Tweet Üretimi:** Turkish-Gemma ile |
| 2. **Duygu Analizi:** Gerçek zamanlı analiz |
| 3. **Trend Takibi:** Güncel konular |
| 4. **Kişiselleştirilmiş Öneriler:** İlgi alanlarına göre |
| |
| ### ⚠️ Not: |
| {'' if gemma_manager.model_loaded else 'Turkish-Gemma modeli yüklenemedi, demo modu aktif.'} |
| """) |
| |
| |
| btn1.click( |
| generate_tweet_ui, |
| inputs=[topic, mood, custom_topic, include_hashtags, include_emoji], |
| outputs=[output1, tweet_output1] |
| ) |
| |
| btn2.click( |
| analyze_tweet_ui, |
| inputs=[analyze_input], |
| outputs=[output2] |
| ) |
| |
| btn3.click( |
| show_trends_ui, |
| outputs=[output3] |
| ) |
| |
| btn4.click( |
| recommend_tweets_ui, |
| inputs=[user_interests, rec_count], |
| outputs=[output4, tweet_output4] |
| ) |
| |
| |
| demo.load(show_trends_ui, outputs=[output3]) |
| |
| return demo |
|
|
| |
| |
| |
| if __name__ == "__main__": |
| print(""" |
| ======================================== |
| 🚀 TURKISH-GEMMA TWEET AI |
| ======================================== |
| |
| 🤖 Model: ytu-ce-cosmos/Turkish-Gemma-9b-T1 |
| 📊 Veri Setleri: Hugging Face |
| 🎯 Özellik: Türkçe Tweet Üretimi ve Analizi |
| |
| ⚠️ NOT: İlk başlatma sırasında model yüklenecektir. |
| Bu işlem internet hızınıza bağlı olarak zaman alabilir. |
| """) |
| |
| demo = main() |
| |
| |
| if not TRANSFORMERS_AVAILABLE: |
| print("\n⚠️ UYARI: Transformers kütüphanesi yüklü değil!") |
| print("📦 Kurulum için: pip install transformers torch accelerate") |
| |
| demo.launch( |
| server_name="0.0.0.0", |
| server_port=7860, |
| share=False, |
| show_error=True |
| ) |