File size: 2,409 Bytes
2218f3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52e0b19
 
2218f3b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
from flask import Flask, request, jsonify
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

app = Flask(__name__)

# Load Hugging Face DialoGPT model
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")

# Simulated new releases (could be replaced with TMDB API or scraped data)
latest_releases = [
    {
        "title": "Neon Drift",
        "genre": "Sci-Fi | Netflix | April 10, 2025",
        "description": "A futuristic racer enters a deadly tournament to save his sister from a megacorp."
    },
    {
        "title": "The Forgotten Bloom",
        "genre": "Drama | Prime Video | April 9, 2025",
        "description": "A woman unearths family secrets while restoring her grandmother’s abandoned greenhouse."
    },
    {
        "title": "Shadow Protocol",
        "genre": "Action-Thriller | Disney+ | April 12, 2025",
        "description": "An ex-agent is forced back into action to prevent a digital war between superpowers."
    }
]

# Helper function to generate a reply
def generate_reply(user_input):
    new_user_input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt')
    bot_input_ids = new_user_input_ids
    chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
    reply = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
    return reply

@app.route("/chat", methods=["POST"])
def chat():
    user_message = request.json.get("message", "").lower()

    if "new" in user_message or "release" in user_message:
        movie_list = "\n\n".join([
            f"🎬 *{movie['title']}*\n{movie['genre']}\n_{movie['description']}_" 
            for movie in latest_releases
        ])
        return jsonify({"response": f"Here are the latest releases:\n\n{movie_list}"})

    elif any(title.lower() in user_message for title in [m["title"].lower() for m in latest_releases]):
        movie = next(m for m in latest_releases if m["title"].lower() in user_message)
        return jsonify({
            "response": f"🎬 *{movie['title']}*\n{movie['genre']}\n\n_{movie['description']}_"
        })

    else:
        reply = generate_reply(user_message)
        return jsonify({"response": reply})

if __name__ == "__main__":
    app.run(debug=True)