ZakyF commited on
Commit
ce56fb1
·
1 Parent(s): 1f0a662
.gitignore ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ .venv/
6
+ venv/
7
+
8
+ # Environment
9
+ .env
10
+ .secrets
11
+
12
+ # AI/ML
13
+ checkpoint-*/
14
+ logs/
15
+ results/
16
+ wandb/
17
+
18
+ # OS
19
+ .DS_Store
20
+ Thumbs.db
Archon_Development.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
app.py CHANGED
@@ -1,79 +1,58 @@
1
- import gradio as gr
2
- import torch
3
  import os
 
4
  from transformers import pipeline
 
5
 
6
- # Konfigurasi Kategori sesuai Proposal Archon
 
7
  CATEGORIES = ["Income", "Bills", "Transport", "Retail/E-commerce", "Cash Withdrawal", "Transfer Out", "General Debit"]
8
 
9
- class ArchonEngine:
10
- def __init__(self, model_path="archon_v1"):
11
- # Pilar 1: NLP Transaction Classifier [cite: 83]
12
- self.classifier = pipeline(
13
- "text-classification",
14
- model=model_path,
15
- tokenizer=model_path
16
- )
17
 
18
- def process(self, text, amount, income, monthly_spending):
19
- # 1. Klasifikasi Transaksi
20
- pred = self.classifier(text)[0]
21
- label_id = int(pred['label'].split('_')[-1])
22
- category = CATEGORIES[label_id]
23
- conf = pred['score']
24
 
25
- # 2. Pilar 2: Machine Learning Predictive Model (Risk) [cite: 85]
26
- risk_score = 0.05
27
- ratio = amount / income if income > 0 else 0
28
- if ratio >= 0.25: risk_score += 0.45
29
 
30
- spend_rate = monthly_spending / income if income > 0 else 0
31
- if spend_rate >= 0.85: risk_score += 0.35
 
 
32
 
33
- if category in ["Cash Withdrawal", "Transfer Out"]: risk_score += 0.10
 
34
 
35
- risk_level = "High" if risk_score >= 0.6 else ("Medium" if risk_score >= 0.3 else "Low")
36
-
37
- # 3. Pilar 3: Next Best Offer (NBO) Engine [cite: 87]
38
- if risk_level == "High":
39
- recommendation = "Set immediate budget alert + suggest emergency saving plan; show debt counseling resources."
40
- elif category == "Income":
41
- recommendation = "Recommend automatic split: 10% to Emergency Fund, 5% to Investments."
42
- elif category in ["Retail/E-commerce", "General Debit"]:
43
- recommendation = "Offer discount coupons / loyalty suggestion or roundup saving feature."
44
- else:
45
- recommendation = "Maintain current budget; propose small Auto-Save (Rp20k/day)."
46
 
47
  return {
48
- "Kategori (Pilar 1)": f"{category} ({conf*100:.2f}%)",
49
- "Level Risiko (Pilar 2)": f"{risk_level} (Score: {risk_score:.2f})",
50
- "Rekomendasi NBO (Pilar 3)": recommendation
51
  }
52
 
53
- # Inisialisasi Engine
54
- # Pastikan folder 'archon_v1' ada di direktori yang sama
55
- engine = ArchonEngine("archon_v1")
56
-
57
- # UI Interface Gradio
58
- with gr.Blocks(title="Archon-AI: Financial Resilience Engine") as demo:
59
- gr.Markdown("# 🛡️ Archon-AI")
60
- gr.Markdown("### Financial Resilience Engine berbasis AI untuk Perbankan Indonesia [cite: 8]")
61
-
62
- with gr.Row():
63
- with gr.Column():
64
- input_text = gr.Textbox(label="Narasi Transaksi", placeholder="Contoh: GAJI PT MAJU JAYA")
65
- input_amount = gr.Number(label="Jumlah Transaksi (Rp)")
66
- input_income = gr.Number(label="Total Pendapatan Bulanan (Rp)")
67
- input_spending = gr.Number(label="Total Pengeluaran Bulan Ini (Rp)")
68
- btn = gr.Button("Analisis dengan Archon", variant="primary")
69
-
70
- with gr.Column():
71
- output = gr.JSON(label="Hasil Analisis AI")
72
-
73
- btn.click(
74
- fn=engine.process,
75
- inputs=[input_text, input_amount, input_income, input_spending],
76
- outputs=output
77
- )
78
-
79
- demo.launch()
 
 
 
1
  import os
2
+ import gradio as gr
3
  from transformers import pipeline
4
+ from huggingface_hub import InferenceClient
5
 
6
+ # Konfigurasi Pilar 1: Classifier (IndoBERT)
7
+ MODEL_PATH = "archon_v1"
8
  CATEGORIES = ["Income", "Bills", "Transport", "Retail/E-commerce", "Cash Withdrawal", "Transfer Out", "General Debit"]
9
 
10
+ # Konfigurasi Pilar 3: Generative NBO (Mistral-7B)
11
+ HF_TOKEN = os.getenv("HF_TOKEN")
12
+ llm_client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.3", token=HF_TOKEN)
 
 
 
 
 
13
 
14
+ class ArchonSystem:
15
+ def __init__(self):
16
+ # Pilar 1: Load Local Classifier
17
+ self.classifier = pipeline("text-classification", model=MODEL_PATH, tokenizer=MODEL_PATH)
 
 
18
 
19
+ def analyze(self, text, amount, income, monthly_spending):
20
+ # 1. NLP Classification (Pilar 1)
21
+ pred = self.classifier(text)[0]
22
+ cat = CATEGORIES[int(pred['label'].split('_')[-1])]
23
 
24
+ # 2. Risk Prediction (Pilar 2: Early Warning System)
25
+ # Menggunakan logika rasio pengeluaran adaptif
26
+ risk_score = (amount / income if income > 0 else 0) + (monthly_spending / income if income > 0 else 0)
27
+ risk_level = "High" if risk_score > 0.8 else ("Medium" if risk_score > 0.4 else "Low")
28
 
29
+ # 3. Generative NBO Engine (Pilar 3: Personal Recommendation) [cite: 87, 136]
30
+ prompt = f"Role: Financial Advisor Bank Profesional. Nasabah bertransaksi {cat} sebesar Rp{amount:,.0f}. Risiko: {risk_level}. Beri 1 saran finansial singkat, natural, dan ramah dalam Bahasa Indonesia (Maks 20 kata)."
31
 
32
+ try:
33
+ nbo_msg = llm_client.chat_completion(messages=[{"role": "user", "content": prompt}], max_tokens=80).choices[0].message.content
34
+ except:
35
+ nbo_msg = "Pertahankan kebiasaan menabung Anda dan pantau pengeluaran melalui dashboard Archon."
 
 
 
 
 
 
 
36
 
37
  return {
38
+ "Analysis": f"Category: {cat} | Resilience Status: {risk_level}",
39
+ "Archon's Personalized Advice": nbo_msg
 
40
  }
41
 
42
+ # UI Skala Industri
43
+ archon = ArchonSystem()
44
+ demo = gr.Interface(
45
+ fn=archon.analyze,
46
+ inputs=[
47
+ gr.Textbox(label="Transaction Narrative"),
48
+ gr.Number(label="Amount (Rp)"),
49
+ gr.Number(label="Monthly Income (Rp)"),
50
+ gr.Number(label="Total Current Spending (Rp)")
51
+ ],
52
+ outputs="json",
53
+ title="Archon-AI: Financial Resilience Engine",
54
+ description="Managed Services Provider (MSP) solution for Indonesian Banking."
55
+ )
56
+
57
+ if __name__ == "__main__":
58
+ demo.launch()
 
 
 
 
 
 
 
 
 
 
archon_v1/archon_v1_folder_backup/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 5,
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "dtype": "float32",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "LABEL_0",
15
+ "1": "LABEL_1",
16
+ "2": "LABEL_2",
17
+ "3": "LABEL_3",
18
+ "4": "LABEL_4",
19
+ "5": "LABEL_5",
20
+ "6": "LABEL_6"
21
+ },
22
+ "initializer_range": 0.02,
23
+ "intermediate_size": 3072,
24
+ "label2id": {
25
+ "LABEL_0": 0,
26
+ "LABEL_1": 1,
27
+ "LABEL_2": 2,
28
+ "LABEL_3": 3,
29
+ "LABEL_4": 4,
30
+ "LABEL_5": 5,
31
+ "LABEL_6": 6
32
+ },
33
+ "layer_norm_eps": 1e-12,
34
+ "max_position_embeddings": 512,
35
+ "model_type": "bert",
36
+ "num_attention_heads": 12,
37
+ "num_hidden_layers": 12,
38
+ "output_past": true,
39
+ "pad_token_id": 0,
40
+ "pooler_fc_size": 768,
41
+ "pooler_num_attention_heads": 12,
42
+ "pooler_num_fc_layers": 3,
43
+ "pooler_size_per_head": 128,
44
+ "pooler_type": "first_token_transform",
45
+ "position_embedding_type": "absolute",
46
+ "problem_type": "single_label_classification",
47
+ "transformers_version": "4.57.3",
48
+ "type_vocab_size": 2,
49
+ "use_cache": true,
50
+ "vocab_size": 50000
51
+ }
archon_v1/archon_v1_folder_backup/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c0685f1b560a84bcddfd6c9723a3f1d4b2ba1ea5e616a9159b6e8a8a82698e5
3
+ size 497810452
archon_v1/archon_v1_folder_backup/runs/Jan16_09-24-07_f599b1c76637/events.out.tfevents.1768555450.f599b1c76637.430.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce27747e29d2b852b01069e98f4e11fc04aa61fcdcccb300d99c2133cb2a93bb
3
+ size 6886
archon_v1/archon_v1_folder_backup/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
archon_v1/archon_v1_folder_backup/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
archon_v1/archon_v1_folder_backup/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
archon_v1/archon_v1_folder_backup/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt CHANGED
@@ -1,4 +1,7 @@
1
  transformers
2
  torch
 
 
3
  pandas
4
- numpy
 
 
1
  transformers
2
  torch
3
+ huggingface_hub
4
+ gradio
5
  pandas
6
+ numpy
7
+ accelerate