File size: 3,012 Bytes
bde793d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
import pickle
import numpy as np
import os

app = Flask(__name__)
CORS(app)

# Load model and tokenizer
model = None
tokenizer = None
MAX_LEN = 300

# Registry for custom layers
def load_resources():
    global model, tokenizer
    model_path = 'chatbot_performance_advanced.h5'
    tokenizer_path = 'tokenizer_advanced.pickle'
    
    # Custom Attention Layer registration for loading
    class Attention(tf.keras.layers.Layer):
        def __init__(self, **kwargs):
            super(Attention, self).__init__(**kwargs)
        def build(self, input_shape):
            self.W = self.add_weight(name='attention_weight', shape=(input_shape[-1], 1), initializer='random_normal', trainable=True)
            self.b = self.add_weight(name='attention_bias', shape=(input_shape[1], 1), initializer='zeros', trainable=True)
            super(Attention, self).build(input_shape)
        def call(self, x):
            e = tf.keras.backend.tanh(tf.keras.backend.dot(x, self.W) + self.b)
            a = tf.keras.backend.softmax(e, axis=1)
            output = x * a
            return tf.keras.backend.sum(output, axis=1)

    if os.path.exists(model_path) and os.path.exists(tokenizer_path):
        model = tf.keras.models.load_model(model_path, custom_objects={'Attention': Attention})
        with open(tokenizer_path, 'rb') as handle:
            tokenizer = pickle.load(handle)
        print("Advanced Model and Tokenizer loaded successfully.")
        return True
    return False

@app.route('/')
def index():
    return render_template('index.html')

@app.route('/predict', methods=['POST'])
def predict():
    global model, tokenizer
    if model is None or tokenizer is None:
        if not load_resources():
            return jsonify({
                'error': 'Model is still training. Please wait a few minutes.',
                'status': 'training'
            }), 503

    data = request.json
    if data.get('ping'):
        return jsonify({'status': 'ready'})

    facts = data.get('facts', 'No context provided.')

    question = data.get('question', '')
    response = data.get('response', '')
    
    try:
        # Preprocess text with facts context
        text = f"[FACTS] {facts} [QUERY] {question} [RES] {response}".lower()
        seq = tokenizer.texts_to_sequences([text])
        pad = pad_sequences(seq, maxlen=MAX_LEN)
        
        # Prediction
        prediction = model.predict(pad)[0][0]
        is_best = bool(prediction > 0.5)
        
        return jsonify({
            'probability': float(prediction),
            'is_best': is_best
        })
    except Exception as e:
        return jsonify({'error': str(e)}), 500



if __name__ == '__main__':
    load_resources()
    app.run(debug=True, port=5000)