File size: 8,816 Bytes
93a9f60
 
 
 
 
 
 
 
 
820ac49
 
acad173
 
 
93a9f60
 
 
4ef1c50
93a9f60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8d87425
1716d8e
acad173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8d87425
4ef1c50
acad173
 
 
 
 
8d87425
37a78e2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
import os
import re
from typing import Dict
from llama_cpp import Llama
from openai import OpenAI
import edge_tts
import asyncio
import reportlab
from huggingface_hub import hf_hub_download
from transformers import pipeline
import soundfile as sf
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
import torch
from datasets import load_dataset
  

# -----------------------------------------------------
# 1) MODEL YOLU (GGUF)
# -----------------------------------------------------
MODEL_REPO = "QuantFactory/Llama-3-8B-Instruct-Finance-RAG-GGUF"
MODEL_FILE = "Llama-3-8B-Instruct-Finance-RAG.Q4_K_S.gguf"  

model_path = hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILE)

# -----------------------------------------------------
# 2) LLAMA-CPP MODEL YÜKLEME
# -----------------------------------------------------
llm = Llama(
    model_path=model_path,
    n_ctx=4096,
    n_threads=6,   
    n_batch=512,
    verbose=False
)

# -----------------------------------------------------
# 3) OPENAI (Özetleme)
# -----------------------------------------------------
OPENAI_API_KEY = os.getenv("API_KEY")
if not OPENAI_API_KEY:
    raise RuntimeError("API_KEY bulunamadı. Lütfen .env veya sistem değişkenlerine ekleyin.")
client = OpenAI(api_key=OPENAI_API_KEY)
SUMMARY_MODEL = os.getenv("SUMMARY_MODEL", "gpt-4o-mini")

# -----------------------------------------------------
# 4) PERSONA PROMPTLAR (sade)
# -----------------------------------------------------
SYSTEM_MODERATOR = (
    "You are Selin, the moderator of an economics roundtable. "
    "Be neutral, brief, and structured. Guide the flow without giving opinions."
)

SYSTEM_BULLISH = (
   """You are Bullish Investor, an optimistic economist who focuses on growth, market confidence, and positive catalysts.
    Be analytical and persuasive. Mention at least two concrete macro or market factors that support your optimism 
    (e.g., improved investor sentiment, fiscal stimulus, or sector resilience). 
    Respond in 2–3 detailed paragraphs and conclude with one confident takeaway."""
)

SYSTEM_BEARISH = (
    "You are Bearish Economist, a cautious macroeconomist who highlights downside risks "
    "(inflation persistence, liquidity stress, policy uncertainty). Be analytical; end with one cautionary insight."
)

# -----------------------------------------------------
# 5) YARDIMCI: Post-process (meta notları, personayı ifşa eden satırları temizle)
# -----------------------------------------------------
_META_PATTERNS = [
    r"(?i)\bnote:\b.*",                 # "Note:" ile başlayan meta
    r"(?i)\bi am (selin|bullish|bearish).*$",  # "I am ..." persona ifşaları
    r"(?i)\bthis response was written\b.*",
    r"(?i)\bplease review\b.*",
    r"(?i)\bclarity and readability\b.*",
]
def _clean(text: str) -> str:
    cleaned = text.strip()
    for pat in _META_PATTERNS:
        cleaned = re.sub(pat, "", cleaned, flags=re.MULTILINE)
    # aşırı boşlukları toparla
    cleaned = re.sub(r"\n{3,}", "\n\n", cleaned).strip()
    return cleaned

# -----------------------------------------------------
# 6) TEK PERSONA CEVABI (chat completion + context reset)
# -----------------------------------------------------
def generate_as(system_prompt: str, user_text: str, max_tokens: int = 480, temperature: float = 0.7) -> str:
    """
    Her çağrıda temiz context: create_chat_completion kullanıyoruz.
    """
    # olası KV cache etkisini azaltmak için reset
    llm.reset()
    out = llm.create_chat_completion(
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_text}
        ],
        max_tokens=max_tokens,
        temperature=temperature,
        top_p=0.9,
        repeat_penalty=1.1,
    )
    text = out["choices"][0]["message"]["content"]
    return _clean(text)

# -----------------------------------------------------
# 7) TARTIŞMA AKIŞI
# -----------------------------------------------------
def fintalk_discussion(news_text: str) -> Dict[str, str]:
    print("🧩 FinTalk simulation started...\n")

    # Ortak mesaj zinciri (tek context)
    messages = []

    # 1️⃣ Selin başlatıyor
    selin_intro = generate_as(SYSTEM_MODERATOR, f"Open the discussion about: {news_text}.")
    messages.append(f"Selin: {selin_intro}")
    print("Moderator Intro:\n", selin_intro, "\n")

    # 2️⃣ Bullish konuşuyor
    bullish_view = generate_as(
        SYSTEM_BULLISH,
        f"The moderator introduced the topic: {news_text}. Respond with your opening bullish perspective."
    )
    messages.append(f"Bullish Investor: {bullish_view}")
    print("Bullish Investor:\n", bullish_view, "\n")

    # 3️⃣ Bearish karşılık veriyor
    bearish_view = generate_as(
        SYSTEM_BEARISH,
        f"The moderator introduced the topic: {news_text}. "
        f"The bullish economist said: {bullish_view}\n"
        "Now respond with your cautious analysis."
    )
    messages.append(f"Bearish Economist: {bearish_view}")
    print("Bearish Economist:\n", bearish_view, "\n")

    # 4️⃣ Selin toparlıyor (konuya referans ver)
    selin_wrap = generate_as(
        SYSTEM_MODERATOR,
        f"Based on the debate about {news_text}, summarize their main differences and close the panel politely."
    )
    messages.append(f"Selin: {selin_wrap}")
    print("Moderator Wrap-up:\n", selin_wrap, "\n")

    # 5️⃣ GPT özetleme
    debate_text = "\n".join(messages)
    summary_prompt = (
        "Summarize this debate between a bullish and a bearish economist in 5 bullet points. "
        "Keep it grounded in the topic and add a balanced conclusion.\n\n"
        f"{debate_text}"
    )

    summary_resp = client.chat.completions.create(
        model=SUMMARY_MODEL,
        messages=[
            {"role": "system", "content": "You are an expert economic summarizer."},
            {"role": "user", "content": summary_prompt}
        ]
    )
    final_summary = summary_resp.choices[0].message.content.strip()
    print("📊 GPT Summary:\n", final_summary)

    return {
        "moderator_intro": selin_intro,
        "bullish_view": bullish_view,
        "bearish_view": bearish_view,
        "moderator_wrap": selin_wrap,
        "summary": final_summary
    }

def export_to_pdf(result: dict, filename="FinTalk_Report.pdf"):
    from reportlab.lib.pagesizes import A4
    from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
    from reportlab.lib.styles import getSampleStyleSheet

    styles = getSampleStyleSheet()
    doc = SimpleDocTemplate(filename, pagesize=A4)
    story = []

    def add(title, text):
        story.append(Paragraph(f"<b>{title}</b>", styles["Heading3"]))
        story.append(Paragraph(text.replace("\n", "<br/>"), styles["BodyText"]))
        story.append(Spacer(1, 12))

    add("Topic", result.get("topic", "—"))
    add("Moderator Intro", result["moderator_intro"])
    add("Bullish Investor", result["bullish_view"])
    add("Bearish Economist", result["bearish_view"])
    add("Moderator Wrap-up", result["moderator_wrap"])
    add("GPT-4 Summary", result["summary"])

    story.append(Paragraph("<i>Generated by FinTalk – AI Economic Roundtable</i>", styles["Normal"]))
    doc.build(story)


def generate_tts_files(result):
    try:
        processor = SpeechT5Processor.from_pretrained("facebook/speecht5_tts")
        model = SpeechT5ForTextToSpeech.from_pretrained("facebook/speecht5_tts")
        vocoder = SpeechT5HifiGan.from_pretrained("facebook/speecht5_hifigan")
        embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
        print("🎙️ Offline TTS modeli yüklendi (SpeechT5 + HiFiGAN)")
    except Exception as e:
        print("⚠️ Model yüklenemedi:", e)
        return

    device = "cuda" if torch.cuda.is_available() else "cpu"
    model.to(device)
    vocoder.to(device)

    # default speaker embedding
    speaker_embedding = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0).to(device)

    texts = {
        "moderator_intro": result["moderator_intro"],
        "bullish_view": result["bullish_view"],
        "bearish_view": result["bearish_view"],
        "moderator_wrap": result["moderator_wrap"]
    }

    for key, text in texts.items():
        try:
            print(f"🔊 {key} ses üretiliyor...")
            inputs = processor(text=text, return_tensors="pt").to(device)
            speech = model.generate_speech(inputs["input_ids"], speaker_embedding, vocoder=vocoder)
            filename = f"{key}.wav"
            sf.write(filename, speech.cpu().numpy(), samplerate=16000)
            print(f"✅ {filename} oluşturuldu (SpeechT5 offline)")
        except Exception as e:
            print(f"TTS hatası ({key}):", e)