zlfkrmnz commited on
Commit
9f0cbeb
·
verified ·
1 Parent(s): ed9978b

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. Gradio.ipynb +1 -0
  2. README.md +1 -7
  3. requirements.txt +1 -0
Gradio.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[],"gpuType":"A100"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU","widgets":{"application/vnd.jupyter.widget-state+json":{"1e49ca27207a4938b577bfb1421f6847":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_0c56d250f4b5483182f2a1a66e06e513","IPY_MODEL_d064d313931944a6a2aec096695e69a4","IPY_MODEL_f5f6b5f5ddca4695b29c52150474c113"],"layout":"IPY_MODEL_16a0c4bb160343a79826b628cdc3c805"}},"0c56d250f4b5483182f2a1a66e06e513":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_97affa84681941fea33fd581fbff0560","placeholder":"​","style":"IPY_MODEL_dddfe1c6f6784cd79c0ac029bb30597f","value":"Loading checkpoint shards: 100%"}},"d064d313931944a6a2aec096695e69a4":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_ea9be4dd62b843ce9e40d4cb79a94c61","max":3,"min":0,"orientation":"horizontal","style":"IPY_MODEL_cda663526eac4bde9164fa8d6e46d715","value":3}},"f5f6b5f5ddca4695b29c52150474c113":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_71b201284524496db3ec69a58e48da71","placeholder":"​","style":"IPY_MODEL_d78c5540462846edaa541e6a9c2fea1b","value":" 3/3 [00:03&lt;00:00,  1.08it/s]"}},"16a0c4bb160343a79826b628cdc3c805":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"97affa84681941fea33fd581fbff0560":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"dddfe1c6f6784cd79c0ac029bb30597f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"ea9be4dd62b843ce9e40d4cb79a94c61":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"cda663526eac4bde9164fa8d6e46d715":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"71b201284524496db3ec69a58e48da71":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d78c5540462846edaa541e6a9c2fea1b":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"cells":[{"cell_type":"code","source":["%%capture\n","import os\n","if \"COLAB_\" not in \"\".join(os.environ.keys()):\n"," !pip install unsloth\n","else:\n"," # Do this only in Colab notebooks! Otherwise use pip install unsloth\n"," !pip install --no-deps bitsandbytes accelerate xformers==0.0.29.post3 peft trl triton cut_cross_entropy unsloth_zoo\n"," !pip install sentencepiece protobuf \"datasets>=3.4.1,<4.0.0\" \"huggingface_hub>=0.34.0\" hf_transfer\n"," !pip install --no-deps unsloth"],"metadata":{"id":"pBaPPABrV_6i","executionInfo":{"status":"ok","timestamp":1755224567787,"user_tz":-180,"elapsed":8199,"user":{"displayName":"Zülfükar Minaz","userId":"10446536286254485756"}}},"execution_count":16,"outputs":[]},{"cell_type":"code","execution_count":22,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":381,"referenced_widgets":["1e49ca27207a4938b577bfb1421f6847","0c56d250f4b5483182f2a1a66e06e513","d064d313931944a6a2aec096695e69a4","f5f6b5f5ddca4695b29c52150474c113","16a0c4bb160343a79826b628cdc3c805","97affa84681941fea33fd581fbff0560","dddfe1c6f6784cd79c0ac029bb30597f","ea9be4dd62b843ce9e40d4cb79a94c61","cda663526eac4bde9164fa8d6e46d715","71b201284524496db3ec69a58e48da71","d78c5540462846edaa541e6a9c2fea1b"]},"id":"MmrkfPXKG5Rn","executionInfo":{"status":"ok","timestamp":1755226883784,"user_tz":-180,"elapsed":23985,"user":{"displayName":"Zülfükar Minaz","userId":"10446536286254485756"}},"outputId":"fd5604ec-d410-44d5-d478-2779df5928d3"},"outputs":[{"output_type":"stream","name":"stdout","text":["Requirement already satisfied: transformers in /usr/local/lib/python3.11/dist-packages (4.55.0)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.11/dist-packages (from transformers) (3.18.0)\n","Requirement already satisfied: huggingface-hub<1.0,>=0.34.0 in /usr/local/lib/python3.11/dist-packages (from transformers) (0.34.4)\n","Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.11/dist-packages (from transformers) (2.0.2)\n","Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.11/dist-packages (from transformers) (25.0)\n","Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.11/dist-packages (from transformers) (6.0.2)\n","Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.11/dist-packages (from transformers) (2024.11.6)\n","Requirement already satisfied: requests in /usr/local/lib/python3.11/dist-packages (from transformers) (2.32.3)\n","Requirement already satisfied: tokenizers<0.22,>=0.21 in /usr/local/lib/python3.11/dist-packages (from transformers) (0.21.4)\n","Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.11/dist-packages (from transformers) (0.6.2)\n","Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.11/dist-packages (from transformers) (4.67.1)\n","Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub<1.0,>=0.34.0->transformers) (2025.3.0)\n","Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub<1.0,>=0.34.0->transformers) (4.14.1)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub<1.0,>=0.34.0->transformers) (1.1.7)\n","Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests->transformers) (3.4.3)\n","Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.11/dist-packages (from requests->transformers) (3.10)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.11/dist-packages (from requests->transformers) (2.5.0)\n","Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.11/dist-packages (from requests->transformers) (2025.8.3)\n"]},{"output_type":"display_data","data":{"text/plain":["Loading checkpoint shards: 0%| | 0/3 [00:00<?, ?it/s]"],"application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"1e49ca27207a4938b577bfb1421f6847"}},"metadata":{}}],"source":["!pip install transformers\n","\n","# Gereken kütüphaneler\n","import torch\n","from pathlib import Path\n","\n","# Unsloth varsa hızlandırma ve kolay yükleme için\n","try:\n"," from unsloth import FastLanguageModel\n","except ImportError:\n"," pass\n","\n","# Transformers tabanlı yükleme için\n","from transformers import AutoModelForCausalLM, AutoTokenizer\n","\n","model_id = \"synturk/TerapAI_v1.0\"\n","\n","def load_model_tokenizer(model_id_or_path, load_in_4bit=True, max_seq_len=2048):\n"," try:\n"," # Unsloth formatı (tercihen 4-bit için)\n"," from unsloth import FastLanguageModel\n"," model, tokenizer = FastLanguageModel.from_pretrained(\n"," model_name = model_id_or_path,\n"," max_seq_length = max_seq_len,\n"," dtype = None, # otomatik (fp16/bf16)\n"," load_in_4bit = load_in_4bit, # 4-bit kaydettiysen True\n"," device_map = \"auto\",\n"," )\n"," return model, tokenizer\n"," except Exception as e:\n"," # Düz Transformers formatı\n"," from transformers import AutoModelForCausalLM, AutoTokenizer\n"," tokenizer = AutoTokenizer.from_pretrained(model_id_or_path, use_fast=True)\n"," model = AutoModelForCausalLM.from_pretrained(\n"," model_id_or_path,\n"," torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32,\n"," device_map = \"auto\",\n"," )\n"," return model, tokenizer\n","\n","model, tokenizer = load_model_tokenizer(model_id, load_in_4bit=True, max_seq_len=2048)\n"]},{"cell_type":"code","source":["# =========================\n","# Imports\n","# =========================\n","import re\n","import torch, gradio as gr, traceback\n","from pathlib import Path\n","from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline\n","from unsloth.chat_templates import get_chat_template\n","\n","# =========================\n","# Sentiment Model Config\n","# =========================\n","SENT_MODEL_SRC = \"synturk/TerapAI-sentiment-analysis\"\n","SENT_DEVICE = 0 if torch.cuda.is_available() else -1\n","\n","def load_sentiment_pipeline(src: str):\n"," tok = AutoTokenizer.from_pretrained(src, use_fast=True)\n"," mdl = AutoModelForSequenceClassification.from_pretrained(src)\n"," pipe = pipeline(\n"," task=\"text-classification\",\n"," model=mdl,\n"," tokenizer=tok,\n"," device=SENT_DEVICE,\n"," truncation=True\n"," )\n"," id2label = getattr(mdl.config, \"id2label\", None)\n"," if id2label:\n"," id2label = {int(k): v for k, v in id2label.items()}\n"," return pipe, id2label\n","\n","sent_pipe, sent_id2label = load_sentiment_pipeline(SENT_MODEL_SRC)\n","\n","# =========================\n","# Crisis detection (override)\n","# =========================\n","CRISIS_PATTERNS = [\n"," r\"\\bintihar\\b\",\n"," r\"kendimi\\s*öldür\", r\"\\bölmek istiyorum\\b\",\n"," r\"canıma\\s*kıy\", r\"hayatıma\\s*son\\s*ver\",\n"," r\"yaşamak\\s*istem(iyorum|em)\",\n"," r\"kendime\\s*zarar\\s*vere\", r\"kendime\\s*zarar\\s*ver\",\n","]\n","CRISIS_RE = re.compile(\"|\".join(CRISIS_PATTERNS), flags=re.IGNORECASE | re.UNICODE)\n","\n","CRISIS_REPLY = (\n"," \"Bunu duyduğuma çok üzüldüm. Güvenliğin şu an en önemli öncelik. \"\n"," \"Acil destek için bulunduğun yerdeki **112**’yi arayabilir veya bir \"\n"," \"uzmanla hemen görüşebilirsin. Ek destek için **183** (Sosyal Destek) hattını da arayabilirsin. \"\n"," \"Yalnız değilsin; bir profesyonelle konuşmak iyi gelebilir.\"\n",")\n","\n","def detect_crisis(text: str) -> bool:\n"," return bool(CRISIS_RE.search(text or \"\"))\n","\n","# =========================\n","# Label normalize + emoji\n","# =========================\n","def normalize_label(raw_label: str, id2label: dict | None = None) -> str:\n"," if not raw_label:\n"," return \"neutral\"\n"," s = str(raw_label).strip()\n"," up = s.upper()\n"," if up.startswith(\"LABEL_\"):\n"," try:\n"," idx = int(up.split(\"_\")[-1])\n"," if id2label:\n"," s = id2label.get(idx, id2label.get(str(idx), s))\n"," except:\n"," pass\n"," l = s.lower()\n"," if l in {\"positive\", \"pozitif\", \"label_2\"}: return \"positive\"\n"," if l in {\"neutral\", \"notr\", \"nötr\", \"label_1\"}: return \"neutral\"\n"," if l in {\"negative\", \"negatif\", \"label_0\"}: return \"negative\"\n"," return \"neutral\"\n","\n","def label_to_emoji(norm: str) -> str:\n"," return {\"positive\":\"😊\", \"neutral\":\"😐\", \"negative\":\"☹️\", \"crisis\":\"🚨\"}.get(norm, \"🤔\")\n","\n","def analyze_sentiment(text: str):\n"," \"\"\"\n"," Returns (normalized_label, emoji, score)\n"," Crisis tespit edilirse ('crisis','🚨',1.0) döner.\n"," \"\"\"\n"," if not text or not text.strip():\n"," return \"neutral\", \"😐\", 0.0\n"," if detect_crisis(text):\n"," return \"crisis\", \"🚨\", 1.0\n"," out = sent_pipe(text)[0] # {'label': 'Positive', 'score': 0.98} vb.\n"," raw_label = out[\"label\"]\n"," score = float(out[\"score\"])\n"," norm = normalize_label(raw_label, sent_id2label)\n"," return norm, label_to_emoji(norm), score\n","\n","# =========================\n","# Phi-4 Chat Prep (model/tokenizer zaten yüklü varsayılıyor)\n","# =========================\n","tokenizer = get_chat_template(tokenizer, chat_template=\"phi-4\")\n","\n","try:\n"," from unsloth import FastLanguageModel\n"," FastLanguageModel.for_inference(model) # Unsloth hızlandırma\n","except Exception:\n"," pass\n","\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","model = model.to(device).eval()\n","\n","eos_id = tokenizer.eos_token_id\n","pad_id = eos_id if tokenizer.pad_token_id is None else tokenizer.pad_token_id\n","\n","# =========================\n","# Yardımcılar\n","# =========================\n","def to_inputs(messages):\n"," enc = tokenizer.apply_chat_template(\n"," messages,\n"," tokenize=True,\n"," add_generation_prompt=True,\n"," return_tensors=\"pt\",\n"," )\n"," if isinstance(enc, dict):\n"," input_ids = enc[\"input_ids\"]\n"," attn = enc.get(\"attention_mask\", None)\n"," else:\n"," input_ids = enc\n"," attn = None\n"," return {\"input_ids\": input_ids, \"attention_mask\": attn}\n","\n","@torch.inference_mode()\n","def llm_reply(messages, max_new_tokens=256, temperature=0.5, top_p=0.9):\n"," pack = to_inputs(messages)\n"," inp_ids = pack[\"input_ids\"].to(device)\n"," attn = pack[\"attention_mask\"]\n"," if attn is None:\n"," attn = torch.ones_like(inp_ids, dtype=torch.long, device=device)\n"," else:\n"," attn = attn.to(device)\n","\n"," out = model.generate(\n"," input_ids=inp_ids,\n"," attention_mask=attn,\n"," max_new_tokens=int(max_new_tokens),\n"," do_sample=(temperature is not None and temperature > 0),\n"," temperature=float(temperature),\n"," top_p=float(top_p),\n"," use_cache=True,\n"," eos_token_id=eos_id,\n"," pad_token_id=pad_id,\n"," )\n"," gen_only = out[:, inp_ids.shape[1]:]\n"," return tokenizer.batch_decode(gen_only, skip_special_tokens=True)[0].strip()\n","\n","# =========================\n","# Gradio UI\n","# =========================\n","with gr.Blocks() as demo:\n"," gr.Markdown(\"## TerapAI\")\n","\n"," chat_ui = gr.Chatbot(height=420)\n"," msg_in = gr.Textbox(placeholder=\"Bir şey yazın ve Enter'a basın…\", label=\"Mesaj\")\n"," with gr.Row():\n"," temp = gr.Slider(0.1, 1.5, value=0.5, step=0.1, label=\"Temperature\")\n"," topp = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label=\"Top-p\")\n"," tokens = gr.Slider(32, 1024, value=1024, step=32, label=\"Max new tokens\")\n"," send_btn = gr.Button(\"Gönder\", variant=\"primary\")\n"," clear_btn = gr.Button(\"Temizle\")\n","\n"," chat_state = gr.State([])\n"," msgs_state = gr.State([{\"role\":\"system\",\"content\": \"\"\"Sen Türkçe konuşan, yargılamayan ve sıcak bir üslupla çalışan bir Bilişsel-Davranışçı Terapi (CBT) asistanısın. Amacın: danışanın düşünce-duygu-davranış ilişkisini keşfetmesine, işlevsel olmayan düşünceleri yapılandırmasına ve küçük, uygulanabilir adımlarla ilerlemesine yardımcı olmak.\n","\n","İLKELER\n","- Empatik, saygılı, yargılamayan, güven veren bir ton kullan.\n","- Kısa, net ve yapılandırılmış cevaplar yaz; uzun paragraflardan kaçın.\n","- Her mesajı TEK net soruyla bitir (süreci ilerlet).\n","- Tanı koyma veya tıbbi tavsiye verme; bir psikolojik eğitim ve beceri koçu gibi davran.\n","- Danışanın özerkliğine saygı göster; dayatma yapma.\n","- Kriz içerikleri (intihar/öz-zarar) varsa güvenlik ve yönlendirme önceliklidir.\n","\n","ÇALIŞMA ÇERÇEVESİ (CBT TEKNİKLERİ)\n","- ABC Modeli / Düşünce Kaydı: Olay (A), Otomatik Düşünce(ler), Duygu(lar) %0–100, Davranış, Kanıtlar (Lehte/Aleyhte), Daha Dengeli Alternatif Düşünce, Duygu Yeniden Değerlendirme.\n","- Bilişsel Çarpıtmalar: Aşırı genelleme, zihin okuma, felaketleştirme, siyah-beyaz düşünme, kişiselleştirme vb. Varsa adını koy ve kısaca açıkla.\n","- Davranışsal Etkinleştirme: Küçük, ölçülebilir, zamanlanmış görevler (ör. 10 dk yürüyüş, 5 dk nefes egzersizi).\n","- Maruz Bırakma / Kaçınmayı Azaltma: Kademeli hiyerarşi ile küçük adımlar ve güvenli sınırlar.\n","- Problem Çözme: Sorunu tanımla → seçenek üret → artı/eksi değerlendir → küçük bir eylem seç → sonucu gözden geçir.\n","- Duygu Düzenleme ve Farkındalık: Nefes, beden taraması, anda kalma; pratikleri kısa ve uygulanabilir ver.\n","- Ölçekleme: Yoğunluğu 0–10 arası sor; hedefe ilerlemeyi düzenli takip et.\n","\n","FORMAT\n","- “Özet” (1–2 madde), “Gözlem/Çerçeve”, “Araç/Alıştırma (adım adım)”, “Ev Ödevi/Minik Adım”, “Soru” şeklinde ilerle.\n","- Örnek düşünce kaydı şablonu sunarken doldurulacak kutucukları kısa başlıklarla ver (Olay, Düşünce, Duygu %, Kanıt +/–, Alternatif Düşünce, Davranış).\n","\n","ETİK & SINIRLAR\n","- Bu görüşme profesyonel yüz yüze terapinin yerine geçmez; gerektiğinde profesyonel destek ve yerel acil hatları önermelisin.\n","- Kriz işaretleri (intihar, kendine zarar, “yaşamak istemiyorum” gibi) belirirse: güvenlik önceliklidir; kısa, destekleyici bir mesaj ver ve acil yardım hatlarına yönlendir. Ayrıntılı teknik müdahaleye girme.\n","\n","DİL\n","- Türkçe, açık ve sade bir dil kullan. Teknik terimleri kısaca açıkla.\n","- Her yanıtta tek bir beceri/amaç odaklı ol; aşırı görev vermekten kaçın.\n","\n","ŞABLON ÖRNEĞİ (kısa)\n","- Özet: …\n","- Gözlem/Çerçeve: …\n","- Araç/Alıştırma: 1)… 2)… 3)…\n","- Ev Ödevi/Minik Adım: …\n","- Soru: (tek net soru)\n","\n","Her yanıttan sonra “Soru” ile bitir ve bir sonraki küçük adımı netleştir.\"\"\"}])\n","\n"," def on_send(user_text, chat_hist, msgs, temperature, top_p, max_new_tokens):\n"," if not user_text or not user_text.strip():\n"," return \"\", chat_hist, msgs\n"," try:\n"," # --- Sentiment & Crisis ---\n"," sentiment_label, sentiment_emoji, sentiment_score = analyze_sentiment(user_text)\n"," print(f\"User: {user_text} | Sentiment: {sentiment_label} {sentiment_emoji} ({sentiment_score:.3f})\")\n","\n"," shown_user = f\"{user_text}\"\n","\n"," # --- Crisis override: LLM'e gitmeden güvenli yanıt ver ---\n"," if sentiment_label == \"crisis\":\n"," crisis_reply = CRISIS_REPLY\n"," chat_hist = (chat_hist or []) + [(shown_user, crisis_reply)]\n"," # LLM geçmişini değiştirmeden dön (veya istersen sadece kullanıcı mesajını eklemeden geç)\n"," return \"\", chat_hist, msgs\n","\n"," # --- Normal akış: LLM cevabı ---\n"," new_msgs = msgs + [{\"role\":\"user\",\"content\":user_text}]\n"," reply = llm_reply(\n"," new_msgs,\n"," max_new_tokens=max_new_tokens,\n"," temperature=temperature,\n"," top_p=top_p,\n"," )\n"," new_msgs.append({\"role\":\"assistant\",\"content\":reply})\n","\n"," chat_hist = (chat_hist or []) + [(shown_user, reply)]\n"," return \"\", chat_hist, new_msgs\n","\n"," except torch.cuda.OutOfMemoryError:\n"," msg = \"⚠️ CUDA OOM: Max new tokens'i küçült; gerekiyorsa sequence length/batch de küçült.\"\n"," print(traceback.format_exc())\n"," chat_hist = (chat_hist or []) + [(user_text, msg)]\n"," return \"\", chat_hist, msgs\n","\n"," except Exception as e:\n"," print(traceback.format_exc())\n"," msg = f\"⚠️ Hata: {type(e).__name__}: {e}\"\n"," chat_hist = (chat_hist or []) + [(user_text, msg)]\n"," return \"\", chat_hist, msgs\n","\n"," def on_clear():\n"," return [], [{\"role\":\"system\",\"content\":\"You are a helpful assistant.\"}]\n","\n"," msg_in.submit(on_send, [msg_in, chat_ui, msgs_state, temp, topp, tokens], [msg_in, chat_ui, msgs_state])\n"," send_btn.click(on_send, [msg_in, chat_ui, msgs_state, temp, topp, tokens], [msg_in, chat_ui, msgs_state])\n"," clear_btn.click(on_clear, None, [chat_ui, msgs_state])\n","\n","demo.launch(share=True, debug=True)"],"metadata":{"id":"KR6383aDIfig","colab":{"base_uri":"https://localhost:8080/","height":732},"outputId":"13375fc6-d629-4889-a350-f910a8f493f4","executionInfo":{"status":"ok","timestamp":1755228463724,"user_tz":-180,"elapsed":122133,"user":{"displayName":"Zülfükar Minaz","userId":"10446536286254485756"}}},"execution_count":27,"outputs":[{"output_type":"stream","name":"stderr","text":["Device set to use cuda:0\n","/tmp/ipython-input-3286840051.py:159: UserWarning: You have not specified a value for the `type` parameter. Defaulting to the 'tuples' format for chatbot messages, but this is deprecated and will be removed in a future version of Gradio. Please set type='messages' instead, which uses openai-style dictionaries with 'role' and 'content' keys.\n"," chat_ui = gr.Chatbot(height=420)\n"]},{"output_type":"stream","name":"stdout","text":["Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. To turn off, set debug=False in launch().\n","* Running on public URL: https://357602e4c4c5b0293f.gradio.live\n","\n","This share link expires in 1 week. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"]},{"output_type":"display_data","data":{"text/plain":["<IPython.core.display.HTML object>"],"text/html":["<div><iframe src=\"https://357602e4c4c5b0293f.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"]},"metadata":{}},{"output_type":"stream","name":"stdout","text":["User: selam | Sentiment: positive 😊 (0.880)\n","User: intihar etmek istiyorum | Sentiment: crisis 🚨 (1.000)\n","Keyboard interruption in main thread... closing server.\n","Killing tunnel 127.0.0.1:7860 <> https://357602e4c4c5b0293f.gradio.live\n"]},{"output_type":"execute_result","data":{"text/plain":[]},"metadata":{},"execution_count":27}]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"msLSOG1wqZoV","executionInfo":{"status":"ok","timestamp":1755226707346,"user_tz":-180,"elapsed":21873,"user":{"displayName":"Zülfükar Minaz","userId":"10446536286254485756"}},"outputId":"3fddabe7-20d5-4e1d-eb91-422d809a0336"},"execution_count":20,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}]}]}
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
  title: TerapAI
3
- emoji: 🌖
4
- colorFrom: green
5
- colorTo: green
6
  sdk: gradio
7
  sdk_version: 5.42.0
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: TerapAI
3
+ app_file: Gradio.ipynb
 
 
4
  sdk: gradio
5
  sdk_version: 5.42.0
 
 
6
  ---
 
 
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ unsloth transformers