robertkm23 commited on
Commit
c772beb
Β·
verified Β·
1 Parent(s): 1cf1af6

uploading streamlit logic

Browse files
Files changed (3) hide show
  1. requirements.txt +5 -0
  2. serve_gru.py +90 -0
  3. streamlit_app.py +22 -0
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ streamlit==1.33.0
2
+ tensorflow==2.15.0
3
+ numpy>=1.20.0
4
+ requests>=2.0
5
+ huggingface-hub==0.32.0
serve_gru.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # serve_gru.py ────────────────────────────────────────────────
2
+ import re, numpy as np, tensorflow as tf
3
+ from tensorflow.keras.models import load_model
4
+ from tensorflow.keras.preprocessing.text import tokenizer_from_json
5
+ from huggingface_hub import hf_hub_download
6
+
7
+ # --- descarga desde tu Space/repo de HF ---
8
+ MODEL_PATH = hf_hub_download(
9
+ repo_id="robertkm23/chat_bot", filename="chatbot_seq2seq.keras",
10
+ repo_type="model"
11
+ )
12
+ TOK_PATH = hf_hub_download(
13
+ repo_id="robertkm23/chat_bot", filename="tokenizer.json",
14
+ repo_type="model"
15
+ )
16
+ MAXLEN = 22
17
+ START, END = "<start>", "<end>"
18
+
19
+ # ── utilidades ------------------------------------------------
20
+ def _norm(s: str) -> str:
21
+ s = re.sub(r"[^a-zA-Z0-9?!.]+", " ", s.lower())
22
+ s = re.sub(r"([?.!])", r" \1 ", s)
23
+ return re.sub(r"\s+", " ", s).strip()
24
+
25
+ def _pad(seq):
26
+ return tf.keras.preprocessing.sequence.pad_sequences(
27
+ seq, maxlen=MAXLEN, padding="post"
28
+ )
29
+
30
+ # ── carga modelo y tokenizer ----------------------------------
31
+ print("β€£ cargando modelo y tokenizer…", end="", flush=True)
32
+ model = load_model(MODEL_PATH)
33
+ with open(TOK_PATH, encoding="utf-8") as f:
34
+ tok = tokenizer_from_json(f.read())
35
+
36
+ emb_layer = model.get_layer("emb")
37
+ enc_gru = model.get_layer("enc_gru")
38
+ dec_gru = model.get_layer("dec_gru")
39
+ dense = model.get_layer("dense")
40
+
41
+ enc_model = tf.keras.Model(model.input[0], enc_gru.output[1])
42
+ dec_cell = dec_gru.cell
43
+
44
+ UNK_ID = tok.word_index["<unk>"]
45
+ START_ID = tok.word_index[START]
46
+ END_ID = tok.word_index[END]
47
+
48
+ print(" listo 🟒")
49
+
50
+ # ── paso ΓΊnico del decoder ------------------------------------
51
+ def _step(tok_id, state):
52
+ # token β†’ embedding
53
+ x = tf.constant([[tok_id]], dtype=tf.int32) # (1,1)
54
+ x = emb_layer(x) # (1,1,emb)
55
+ x = tf.squeeze(x, axis=1) # (1,emb)
56
+ h, _ = dec_cell(x, states=state) # (1,units)
57
+ logits = dense(h)[0].numpy() # (vocab,)
58
+ logits[UNK_ID] = -1e9 # nunca <unk>
59
+ return logits, [h]
60
+
61
+ # ── funciΓ³n de inferencia greedy -----------------------------
62
+ def reply(msg: str, max_len: int = MAXLEN) -> str:
63
+ # normaliza y codifica
64
+ seq = _pad(tok.texts_to_sequences([f"{START} {_norm(msg)} {END}"]))
65
+ h_enc = enc_model.predict(seq, verbose=0) # (1,units)
66
+ state = [tf.convert_to_tensor(h_enc)] # [(1,units)]
67
+
68
+ tok_id, out_ids = START_ID, []
69
+ for _ in range(max_len):
70
+ logits, state = _step(tok_id, state)
71
+ # greedy: la mΓ‘s probable
72
+ tok_id = int(np.argmax(logits))
73
+
74
+ # condiciones de parada
75
+ if tok_id in (END_ID, START_ID):
76
+ break
77
+ if len(out_ids) >= 2 and tok_id == out_ids[-1] == out_ids[-2]:
78
+ break
79
+
80
+ out_ids.append(tok_id)
81
+
82
+ # reconstruye texto
83
+ return " ".join(tok.index_word[i] for i in out_ids) or "(sin respuesta)"
84
+
85
+ # ── demo CLI (opcional) ---------------------------------------
86
+ if __name__ == "__main__":
87
+ while True:
88
+ q = input("TΓΊ: ").strip()
89
+ if not q: continue
90
+ print("Bot:", reply(q))
streamlit_app.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from serve_gru import reply
3
+
4
+ st.set_page_config(page_title="Chatbot GRU", page_icon="πŸ€–")
5
+ st.title("πŸ’¬ Chatbot GRU (Cornell Movie Dialogs)")
6
+
7
+ # Inicializa historial
8
+ if "history" not in st.session_state:
9
+ st.session_state.history = []
10
+
11
+ # Campo de chat integrado
12
+ msg = st.chat_input("Escribe tu mensaje...")
13
+ if msg:
14
+ # AΓ±ade mensaje del usuario
15
+ st.session_state.history.append(("user", msg))
16
+ # Obtiene respuesta del modelo
17
+ bot_resp = reply(msg)
18
+ st.session_state.history.append(("assistant", bot_resp))
19
+
20
+ # Renderiza el chat
21
+ for role, text in st.session_state.history:
22
+ st.chat_message(role).markdown(text)