contexto / app.py
therayz1's picture
Upload app.py
5b7d6e5 verified
import gradio as gr
import json
import time
import os
from datetime import datetime
from llm_utils import LLM_PROVIDERS, get_llm_models, validate_api_key, call_llm, parse_llm_output, create_zip_from_files
from project_types import PROJECT_TYPES
from advanced_prompts import (
get_advanced_context_prompt,
get_advanced_commands_prompt,
get_advanced_prompts_prompt,
calculate_project_complexity,
get_industry_specific_requirements
)
# Global state
generated_files = {'context': {}, 'commands': {}, 'prompts': {}}
stats = {'context': {'tokens': 0, 'time': 0}, 'commands': {'tokens': 0, 'time': 0}, 'prompts': {'tokens': 0, 'time': 0}}
project_analysis = {}
# Sektörler
INDUSTRIES = [
"Teknoloji/Yazılım", "Finans/Fintech", "Sağlık/Healthcare",
"E-ticaret", "Eğitim/EdTech", "Oyun/Gaming", "Sosyal Medya",
"IoT/Akıllı Cihazlar", "Blockchain/Web3", "AI/ML Servisleri",
"SaaS B2B", "SaaS B2C", "Marketplace", "Diğer"
]
# Proje özellikleri
PROJECT_FEATURES = [
"Kullanıcı Yönetimi", "Ödeme İşlemleri", "Gerçek Zamanlı Özellikler",
"AI/ML Entegrasyonu", "Çoklu Platform", "3. Parti Entegrasyonlar",
"Offline Çalışma", "Çoklu Dil Desteği", "Bildirim Sistemi",
"Analytics/Raporlama", "Sosyal Özellikler", "Güvenlik/Compliance"
]
def analyze_project(project_type, project_idea, industry, features):
"""Proje analizi yapar ve öneriler üretir"""
analysis = {
"complexity": calculate_project_complexity(project_type, project_idea, features),
"industry_requirements": get_industry_specific_requirements(industry),
"estimated_files": {},
"tech_recommendations": [],
"architecture_suggestion": ""
}
# Teknoloji önerileri
if "AI/ML" in project_type:
analysis["tech_recommendations"] = ["Python", "TensorFlow/PyTorch", "MLflow", "Docker", "Kubernetes"]
analysis["architecture_suggestion"] = "Microservices with ML Pipeline"
elif "Web" in project_type:
analysis["tech_recommendations"] = ["React/Next.js", "Node.js/Python", "PostgreSQL", "Redis", "Docker"]
analysis["architecture_suggestion"] = "Modern Jamstack Architecture"
elif "Mobil" in project_type:
analysis["tech_recommendations"] = ["React Native/Flutter", "Firebase", "GraphQL", "Redux/MobX"]
analysis["architecture_suggestion"] = "Clean Architecture with BLoC/MVVM"
return analysis
def generate_section_advanced(provider, model, api_key, project_type, project_idea,
section, industry, features, tech_details, progress=gr.Progress()):
"""Gelişmiş section oluşturma"""
try:
start_time = time.time()
# Proje analizi
progress(0.05, desc="Proje analiz ediliyor...")
analysis = analyze_project(project_type, project_idea, industry, features)
global project_analysis
project_analysis = analysis
# Progress
progress(0.1, desc=f"{section.capitalize()} için akıllı prompt hazırlanıyor...")
# Section'a göre advanced prompt seç
if section == 'context':
prompt = get_advanced_context_prompt(project_type, project_idea, tech_details)
elif section == 'commands':
context_summary = f"Proje {len(generated_files['context'])} context dosyası içeriyor"
prompt = get_advanced_commands_prompt(project_type, project_idea, context_summary)
elif section == 'prompts':
prompt = get_advanced_prompts_prompt(project_type, project_idea, tech_details)
# Sektör gereksinimlerini ekle
if industry != "Diğer":
prompt += f"\n\nSEKTÖR GEREKSİNİMLERİ ({industry}):\n"
prompt += "\n".join(f"- {req}" for req in analysis["industry_requirements"])
# Token tahmini
prompt_tokens = len(prompt.split()) * 1.3
progress(0.3, desc="AI ile iletişim kuruluyor...")
# LLM çağrısı
llm_response = call_llm(provider, model, api_key, prompt)
if not llm_response or "hata" in llm_response.lower():
return f"<p style='color:red;'>❌ Hata: {llm_response}</p>", None, None
progress(0.6, desc="Yanıt işleniyor ve optimize ediliyor...")
# Response analizi
response_tokens = len(llm_response.split()) * 1.3
total_tokens = prompt_tokens + response_tokens
# Dosyaları parse et
parsed_files = parse_llm_output(llm_response)
if not parsed_files:
return f"<p style='color:red;'>❌ Dosyalar ayrıştırılamadı.</p>", None, None
# Kaydet
generated_files[section] = parsed_files
# İstatistikleri güncelle
elapsed_time = time.time() - start_time
stats[section] = {'tokens': int(total_tokens), 'time': round(elapsed_time, 2)}
progress(0.9, desc="Kalite kontrolü yapılıyor...")
# Dosya listesi ve kalite skoru
file_count = len(parsed_files)
quality_score = min(100, 60 + (file_count * 4)) # Basit kalite skoru
# Başarı mesajı
file_list_html = "<div style='max-height: 200px; overflow-y: auto;'>"
for i, (file, content) in enumerate(parsed_files.items()):
size_kb = len(content.encode('utf-8')) / 1024
file_list_html += f"""
<div style='padding: 5px; border-bottom: 1px solid #e5e7eb;'>
<span style='color: #10b981;'>✅</span>
<strong>{file}</strong>
<span style='color: #6b7280; font-size: 0.9em;'>({size_kb:.1f} KB)</span>
</div>
"""
file_list_html += "</div>"
success_msg = f"""
<div style='background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 20px; border-radius: 12px; color: white;'>
<h3 style='margin-top: 0;'>✨ {section.capitalize()} Başarıyla Oluşturuldu!</h3>
<div style='display: grid; grid-template-columns: repeat(3, 1fr); gap: 15px; margin: 15px 0;'>
<div style='background: rgba(255,255,255,0.2); padding: 10px; border-radius: 8px;'>
<div style='font-size: 24px; font-weight: bold;'>{file_count}</div>
<div style='font-size: 12px; opacity: 0.9;'>Dosya Oluşturuldu</div>
</div>
<div style='background: rgba(255,255,255,0.2); padding: 10px; border-radius: 8px;'>
<div style='font-size: 24px; font-weight: bold;'>~{int(total_tokens)}</div>
<div style='font-size: 12px; opacity: 0.9;'>Token Kullanıldı</div>
</div>
<div style='background: rgba(255,255,255,0.2); padding: 10px; border-radius: 8px;'>
<div style='font-size: 24px; font-weight: bold;'>{elapsed_time:.1f}s</div>
<div style='font-size: 12px; opacity: 0.9;'>İşlem Süresi</div>
</div>
</div>
<div style='margin-top: 15px;'>
<div style='font-weight: bold; margin-bottom: 10px;'>📁 Oluşturulan Dosyalar:</div>
{file_list_html}
</div>
<div style='margin-top: 15px; padding: 10px; background: rgba(255,255,255,0.1); border-radius: 8px;'>
<span style='font-weight: bold;'>Kalite Skoru:</span>
<span style='font-size: 20px;'>{quality_score}/100</span>
</div>
</div>
"""
# Analiz raporu
analysis_report = f"""
<div style='margin-top: 20px; padding: 15px; background: #f3f4f6; border-radius: 8px;'>
<h4 style='margin-top: 0;'>📊 Proje Analizi</h4>
<ul style='margin: 10px 0;'>
<li><strong>Karmaşıklık:</strong> {analysis['complexity']['context'] + analysis['complexity']['commands'] + analysis['complexity']['prompts']} puan</li>
<li><strong>Sektör:</strong> {industry}</li>
<li><strong>Önerilen Teknolojiler:</strong> {', '.join(analysis['tech_recommendations'][:3])}</li>
<li><strong>Mimari:</strong> {analysis['architecture_suggestion']}</li>
</ul>
</div>
"""
return success_msg + analysis_report, None, analysis
except Exception as e:
import traceback
error_detail = traceback.format_exc()
return f"<p style='color:red;'>❌ Hata: {str(e)}</p><pre>{error_detail}</pre>", None, None
def download_all_professional():
"""Profesyonel ZIP paketi oluşturur"""
all_files = {}
# Dosyaları topla
for section in ['context', 'commands', 'prompts']:
if generated_files[section]:
all_files.update(generated_files[section])
if not all_files:
return None, "<p style='color:red;'>❌ İndirilecek dosya yok.</p>"
# README.md oluştur
readme_content = f"""# {project_analysis.get('project_name', 'Project')} - Context Engineering
## 📋 Proje Özeti
- **Tür:** {project_analysis.get('project_type', 'N/A')}
- **Sektör:** {project_analysis.get('industry', 'N/A')}
- **Karmaşıklık:** {project_analysis.get('complexity', {}).get('context', 0) + project_analysis.get('complexity', {}).get('commands', 0) + project_analysis.get('complexity', {}).get('prompts', 0)} puan
## 📁 Dosya Yapısı
- `context/` - Proje bağlamı ve dokümantasyon ({len(generated_files['context'])} dosya)
- `commands/` - Otomasyon ve yönetim komutları ({len(generated_files['commands'])} dosya)
- `prompts/` - AI asistan prompt şablonları ({len(generated_files['prompts'])} dosya)
## 🚀 Başlangıç
1. Context dosyalarını okuyarak projeyi anlayın
2. Commands klasöründeki setup komutlarını çalıştırın
3. Prompts şablonlarını AI asistanınızla kullanın
## 📊 İstatistikler
- Toplam Dosya: {len(all_files)}
- Toplam Token: ~{sum(s['tokens'] for s in stats.values())}
- Oluşturma Süresi: {sum(s['time'] for s in stats.values()):.1f} saniye
---
Generated by Context Engineer v2.0
"""
all_files['README.md'] = readme_content
# Manifest dosyası
manifest = {
"version": "2.0",
"generated_at": datetime.now().isoformat(),
"statistics": stats,
"project_analysis": project_analysis,
"file_count": {
"context": len(generated_files['context']),
"commands": len(generated_files['commands']),
"prompts": len(generated_files['prompts'])
}
}
all_files['manifest.json'] = json.dumps(manifest, indent=2)
# ZIP oluştur
zip_data = create_zip_from_files(all_files)
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
zip_file_path = f"./context_engineering_pro_{timestamp}.zip"
with open(zip_file_path, "wb") as f:
f.write(zip_data)
# Özet rapor
total_size_kb = sum(len(content.encode('utf-8')) for content in all_files.values()) / 1024
summary = f"""
<div style='background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 30px; border-radius: 16px; color: white;'>
<h2 style='margin-top: 0;'>🎉 Proje Paketi Hazır!</h2>
<div style='display: grid; grid-template-columns: repeat(2, 1fr); gap: 20px; margin: 20px 0;'>
<div style='background: rgba(255,255,255,0.2); padding: 20px; border-radius: 12px;'>
<h3 style='margin-top: 0;'>📊 Genel İstatistikler</h3>
<ul style='list-style: none; padding: 0;'>
<li>📁 <strong>Toplam Dosya:</strong> {len(all_files)}</li>
<li>💾 <strong>Paket Boyutu:</strong> {total_size_kb:.1f} KB</li>
<li>🎯 <strong>Token Kullanımı:</strong> ~{sum(s['tokens'] for s in stats.values())}</li>
<li>⏱️ <strong>Toplam Süre:</strong> {sum(s['time'] for s in stats.values()):.1f}s</li>
</ul>
</div>
<div style='background: rgba(255,255,255,0.2); padding: 20px; border-radius: 12px;'>
<h3 style='margin-top: 0;'>📂 Klasör Detayları</h3>
<ul style='list-style: none; padding: 0;'>
<li>📘 <strong>Context:</strong> {len(generated_files['context'])} dosya</li>
<li>⚡ <strong>Commands:</strong> {len(generated_files['commands'])} dosya</li>
<li>💡 <strong>Prompts:</strong> {len(generated_files['prompts'])} dosya</li>
<li>📄 <strong>Ekstra:</strong> README.md, manifest.json</li>
</ul>
</div>
</div>
<div style='background: rgba(255,255,255,0.1); padding: 15px; border-radius: 8px; margin-top: 20px;'>
<p style='margin: 0; text-align: center; font-size: 18px;'>
✨ Profesyonel context engineering paketiniz başarıyla oluşturuldu!
</p>
</div>
</div>
"""
return gr.File(value=zip_file_path, visible=True), summary
# Ana arayüz
with gr.Blocks(theme=gr.themes.Soft(), css="""
.gradio-container {
font-family: 'Inter', sans-serif;
}
.gr-button-primary {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
border: none;
}
.gr-button-secondary {
background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
border: none;
}
""") as demo:
gr.Markdown("""
# 🚀 Context Engineer Pro v2.0
### Enterprise-Grade AI-Powered Project Structure Generator
Projeleriniz için **akıllı**, **dinamik** ve **sektöre özgü** context engineering yapıları oluşturun.
""")
with gr.Tab("🎯 Proje Bilgileri"):
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### 🏢 Proje Detayları")
project_type = gr.Dropdown(
choices=PROJECT_TYPES,
label="Proje Türü",
value=PROJECT_TYPES[0]
)
industry = gr.Dropdown(
choices=INDUSTRIES,
label="Sektör",
value="Teknoloji/Yazılım"
)
features = gr.CheckboxGroup(
choices=PROJECT_FEATURES,
label="Proje Özellikleri",
value=["Kullanıcı Yönetimi"]
)
with gr.Column(scale=2):
gr.Markdown("### 📝 Proje Açıklaması")
project_idea_input = gr.Textbox(
label="Proje Fikri",
lines=6,
value="Detaylı proje açıklamanızı buraya yazın..."
)
tech_details_input = gr.Textbox(
label="Teknik Detaylar (Opsiyonel)",
lines=4,
value="Kullanmayı düşündüğünüz teknolojiler, özel gereksinimler..."
)
with gr.Tab("🔧 LLM Konfigürasyonu"):
with gr.Row():
with gr.Column():
llm_provider = gr.Dropdown(
choices=list(LLM_PROVIDERS.keys()),
label="AI Sağlayıcı",
value="Gemini"
)
llm_model = gr.Dropdown(
choices=[],
label="Model",
interactive=False
)
with gr.Column():
api_key_input = gr.Textbox(
label="API Anahtarı",
type="password"
)
validate_button = gr.Button("🔐 API Doğrula", variant="secondary")
validation_status = gr.Markdown()
with gr.Tab("🏗️ Yapı Oluşturma"):
gr.Markdown("""
### 🎨 Akıllı Dosya Üretimi
Her bölüm, projenizin özelliklerine göre **dinamik olarak** oluşturulur.
""")
with gr.Row():
context_btn = gr.Button("📘 Context Oluştur", variant="primary", size="lg")
commands_btn = gr.Button("⚡ Commands Oluştur", variant="primary", size="lg")
prompts_btn = gr.Button("💡 Prompts Oluştur", variant="primary", size="lg")
with gr.Row():
generate_all_btn = gr.Button("🎯 Hepsini Oluştur (Sırayla)", variant="secondary")
analyze_btn = gr.Button("🔍 Proje Analizi", variant="secondary")
output_display = gr.Markdown()
analysis_display = gr.Markdown()
with gr.Tab("📦 İndirme & İstatistikler"):
with gr.Row():
download_btn = gr.Button("📥 Profesyonel Paket İndir", variant="primary", size="lg")
clear_btn = gr.Button("🗑️ Temizle", variant="secondary")
download_file = gr.File(label="İndirilebilir Paket", visible=False)
stats_display = gr.Markdown()
# Event handlers
def update_models(provider):
if provider in LLM_PROVIDERS:
return gr.Dropdown(
choices=LLM_PROVIDERS[provider].get("models", []),
interactive=True
)
return gr.Dropdown(choices=[], interactive=False)
def validate_api(provider, api_key):
if not provider or not api_key:
return gr.Markdown("<p style='color:orange;'>⚠️ Sağlayıcı ve API anahtarı gerekli.</p>"), gr.Dropdown()
is_valid, message = validate_api_key(provider, api_key)
if is_valid:
models = get_llm_models(provider, api_key)
return (
gr.Markdown(f"<p style='color:green;'>✅ {message}</p>"),
gr.Dropdown(choices=models, value=models[0] if models else None, interactive=True)
)
return (
gr.Markdown(f"<p style='color:red;'>❌ {message}</p>"),
gr.Dropdown(choices=[], interactive=False)
)
def analyze_project_details(project_type, project_idea, industry, features):
analysis = analyze_project(project_type, project_idea, industry, features)
report = f"""
<div style='background: #f9fafb; padding: 20px; border-radius: 12px; border: 1px solid #e5e7eb;'>
<h3>🔍 Proje Analiz Raporu</h3>
<div style='display: grid; grid-template-columns: repeat(2, 1fr); gap: 20px;'>
<div>
<h4>📊 Karmaşıklık Analizi</h4>
<ul>
<li>Context dosyaları: ~{analysis['complexity']['context']} adet</li>
<li>Command dosyaları: ~{analysis['complexity']['commands']} adet</li>
<li>Prompt şablonları: ~{analysis['complexity']['prompts']} adet</li>
</ul>
</div>
<div>
<h4>🛠️ Teknoloji Önerileri</h4>
<ul>
{"".join(f"<li>{tech}</li>" for tech in analysis['tech_recommendations'][:5])}
</ul>
</div>
</div>
<div style='margin-top: 20px;'>
<h4>🏗️ Önerilen Mimari</h4>
<p>{analysis['architecture_suggestion']}</p>
</div>
{f'''
<div style='margin-top: 20px;'>
<h4>🏢 Sektör Gereksinimleri ({industry})</h4>
<ul>
{"".join(f"<li>{req}</li>" for req in analysis['industry_requirements'])}
</ul>
</div>
''' if analysis['industry_requirements'] else ''}
</div>
"""
return report
def generate_all_sequential(provider, model, api_key, project_type, project_idea,
industry, features, tech_details):
results = []
for section in ['context', 'commands', 'prompts']:
result, _, _ = generate_section_advanced(
provider, model, api_key, project_type, project_idea,
section, industry, features, tech_details
)
results.append(result)
time.sleep(2) # Rate limiting
return "<br><br>".join(results)
def clear_all():
global generated_files, stats, project_analysis
generated_files = {'context': {}, 'commands': {}, 'prompts': {}}
stats = {'context': {'tokens': 0, 'time': 0}, 'commands': {'tokens': 0, 'time': 0}, 'prompts': {'tokens': 0, 'time': 0}}
project_analysis = {}
return gr.Markdown("✅ Tüm veriler temizlendi."), gr.File(visible=False), gr.Markdown("")
# Wire up events
llm_provider.change(fn=update_models, inputs=llm_provider, outputs=llm_model)
validate_button.click(fn=validate_api, inputs=[llm_provider, api_key_input], outputs=[validation_status, llm_model])
# Analyze button
analyze_btn.click(
fn=analyze_project_details,
inputs=[project_type, project_idea_input, industry, features],
outputs=analysis_display
)
# Section buttons
context_btn.click(
fn=lambda p, m, a, pt, pi, i, f, t: generate_section_advanced(p, m, a, pt, pi, 'context', i, f, t),
inputs=[llm_provider, llm_model, api_key_input, project_type, project_idea_input, industry, features, tech_details_input],
outputs=[output_display, download_file, analysis_display]
)
commands_btn.click(
fn=lambda p, m, a, pt, pi, i, f, t: generate_section_advanced(p, m, a, pt, pi, 'commands', i, f, t),
inputs=[llm_provider, llm_model, api_key_input, project_type, project_idea_input, industry, features, tech_details_input],
outputs=[output_display, download_file, analysis_display]
)
prompts_btn.click(
fn=lambda p, m, a, pt, pi, i, f, t: generate_section_advanced(p, m, a, pt, pi, 'prompts', i, f, t),
inputs=[llm_provider, llm_model, api_key_input, project_type, project_idea_input, industry, features, tech_details_input],
outputs=[output_display, download_file, analysis_display]
)
generate_all_btn.click(
fn=generate_all_sequential,
inputs=[llm_provider, llm_model, api_key_input, project_type, project_idea_input, industry, features, tech_details_input],
outputs=output_display
)
# Download and clear
download_btn.click(fn=download_all_professional, outputs=[download_file, stats_display])
clear_btn.click(fn=clear_all, outputs=[output_display, download_file, stats_display])
if __name__ == "__main__":
demo.launch(debug=True, share=False)