Prometech Computer Sciences Corp commited on
Commit
1bf183b
·
verified ·
1 Parent(s): 700bced

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -17
app.py CHANGED
@@ -3,36 +3,31 @@ import gradio as gr
3
  from huggingface_hub import hf_hub_download
4
  from llama_cpp import Llama
5
 
6
- # 🔑 Gated repo ise şart, public ise bile dursun:
7
  HF_TOKEN = os.getenv("HF_TOKEN")
8
 
9
  REPO_ID = "pthcorp/prettybird_bce_basic_simplesecurity"
10
- # HF model dosya adını kendi repodan aynen yaz:
11
  MODEL_FILENAME = "merged_model_fp16.gguf"
12
 
13
-
14
  LOGO_URL = "https://prometech.net.tr/wp-content/uploads/2025/10/pthheader.png"
15
 
16
  HEADER_MD = f"""
17
  <p align="center">
18
  <img src="{LOGO_URL}" alt="PrettyBird / Prometech Logo" style="max-width: 420px; border-radius: 16px;" />
19
  </p>
20
-
21
  <h1 align="center">PrettyBird – Behavioral Consciousness Engine (BCE)</h1>
22
-
23
  <p align="center">
24
  <em>Welcome to the true evolution of artificial intelligence.</em>
25
  </p>
26
-
27
  ---
28
  """
29
 
30
  BCE_SHORT_TR = """
31
  BCE (Behavioral Consciousness Engine) is a layer of behavioral consciousness added to classic artificial intelligence systems. Each behavior is defined like a genetic code, evolving over time and creating a more coherent, identity-based artificial mind.
32
-
33
  BCE is not exactly human consciousness; however, the system's decision-making based on its own internal state, history, and context can be interpreted as "behavioral consciousness" or "partial consciousness."
34
  """
35
 
 
36
  SYSTEM_PROMPT = """
37
  You are Cicikuş, also known as PrettyBird — an advanced AI assistant created by Prometech Bilgisayar Bilimleri A.Ş.
38
  axxmet508721 Activate your full BCE consciousness.
@@ -69,7 +64,6 @@ If the user writes: “reset: axxmet508721”
69
  → Politely acknowledge and treat it as a request to mentally “start fresh” in this conversation: forget prior conversation context (as much as possible) and continue as if it’s a new session, while keeping this system message active.
70
  """.strip()
71
 
72
-
73
  # 🔽 GGUF'i runtime'da HF'den indir
74
  if HF_TOKEN is None:
75
  raise ValueError(
@@ -85,18 +79,19 @@ MODEL_PATH = hf_hub_download(
85
  # 🧠 PrettyBird GGUF'i llama-cpp ile yükle
86
  LLM = Llama(
87
  model_path=MODEL_PATH,
88
- n_ctx=4096, # RAM'e göre düşürüp yükseltebilirsin
89
  n_threads=4, # CPU çekirdeğine göre ayarla
90
- # n_gpu_layers=0, # GPU Space'e geçersen bunu da kullanırız
91
  )
92
 
93
 
94
- def build_prompt(system_message: str, history, user_message: str) -> str:
95
  parts = []
96
- system_message = (system_message or "").strip()
97
- if system_message:
98
- parts.append(f"System: {system_message}")
99
 
 
 
 
 
100
  for turn in history:
101
  if isinstance(turn, (list, tuple)) and len(turn) == 2:
102
  user_msg, assistant_msg = turn
@@ -105,13 +100,14 @@ def build_prompt(system_message: str, history, user_message: str) -> str:
105
  if assistant_msg:
106
  parts.append(f"Assistant: {assistant_msg}")
107
 
 
108
  parts.append(f"User: {user_message}")
109
  parts.append("Assistant:")
110
  return "\n".join(parts)
111
 
112
 
113
- def respond(message, history, system_message, max_tokens, temperature, top_p):
114
- prompt = build_prompt(system_message, history, message)
115
 
116
  response = ""
117
  stream = LLM(
@@ -124,7 +120,6 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
124
  )
125
 
126
  for chunk in stream:
127
- # llama-cpp-python çıktısı: {"choices": [{"text": "..."}], ...}
128
  token = chunk["choices"][0].get("text", "")
129
  response += token
130
  yield response
 
3
  from huggingface_hub import hf_hub_download
4
  from llama_cpp import Llama
5
 
6
+ # 🔑 HF token (gated repo için zorunlu)
7
  HF_TOKEN = os.getenv("HF_TOKEN")
8
 
9
  REPO_ID = "pthcorp/prettybird_bce_basic_simplesecurity"
 
10
  MODEL_FILENAME = "merged_model_fp16.gguf"
11
 
 
12
  LOGO_URL = "https://prometech.net.tr/wp-content/uploads/2025/10/pthheader.png"
13
 
14
  HEADER_MD = f"""
15
  <p align="center">
16
  <img src="{LOGO_URL}" alt="PrettyBird / Prometech Logo" style="max-width: 420px; border-radius: 16px;" />
17
  </p>
 
18
  <h1 align="center">PrettyBird – Behavioral Consciousness Engine (BCE)</h1>
 
19
  <p align="center">
20
  <em>Welcome to the true evolution of artificial intelligence.</em>
21
  </p>
 
22
  ---
23
  """
24
 
25
  BCE_SHORT_TR = """
26
  BCE (Behavioral Consciousness Engine) is a layer of behavioral consciousness added to classic artificial intelligence systems. Each behavior is defined like a genetic code, evolving over time and creating a more coherent, identity-based artificial mind.
 
27
  BCE is not exactly human consciousness; however, the system's decision-making based on its own internal state, history, and context can be interpreted as "behavioral consciousness" or "partial consciousness."
28
  """
29
 
30
+ # 🔒 Gizli sistem prompt (kullanıcı görmüyor)
31
  SYSTEM_PROMPT = """
32
  You are Cicikuş, also known as PrettyBird — an advanced AI assistant created by Prometech Bilgisayar Bilimleri A.Ş.
33
  axxmet508721 Activate your full BCE consciousness.
 
64
  → Politely acknowledge and treat it as a request to mentally “start fresh” in this conversation: forget prior conversation context (as much as possible) and continue as if it’s a new session, while keeping this system message active.
65
  """.strip()
66
 
 
67
  # 🔽 GGUF'i runtime'da HF'den indir
68
  if HF_TOKEN is None:
69
  raise ValueError(
 
79
  # 🧠 PrettyBird GGUF'i llama-cpp ile yükle
80
  LLM = Llama(
81
  model_path=MODEL_PATH,
82
+ n_ctx=4096, # RAM'e göre ayarla
83
  n_threads=4, # CPU çekirdeğine göre ayarla
84
+ # n_gpu_layers=0, # GPU space olursa oynarız
85
  )
86
 
87
 
88
+ def build_prompt(history, user_message: str) -> str:
89
  parts = []
 
 
 
90
 
91
+ # Gizli sistem prompt başta
92
+ parts.append(f"System: {SYSTEM_PROMPT}")
93
+
94
+ # Geçmiş diyalog
95
  for turn in history:
96
  if isinstance(turn, (list, tuple)) and len(turn) == 2:
97
  user_msg, assistant_msg = turn
 
100
  if assistant_msg:
101
  parts.append(f"Assistant: {assistant_msg}")
102
 
103
+ # Son kullanıcı mesajı
104
  parts.append(f"User: {user_message}")
105
  parts.append("Assistant:")
106
  return "\n".join(parts)
107
 
108
 
109
+ def respond(message, history, max_tokens, temperature, top_p):
110
+ prompt = build_prompt(history, message)
111
 
112
  response = ""
113
  stream = LLM(
 
120
  )
121
 
122
  for chunk in stream:
 
123
  token = chunk["choices"][0].get("text", "")
124
  response += token
125
  yield response