GGUF
conversational
File size: 1,475 Bytes
9933fc6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from flask import Flask, request, jsonify
from flask_cors import CORS
from gpt4all import GPT4All
import os

app = Flask(__name__)
# Sabhi connections allow karne ke liye CORS setup
CORS(app)

# --- CONFIGURATION ---
# Note: Is code mein hum model ko seedha tumhari naye repository se load karenge
MODEL_NAME = "Phi-3-mini-4k-instruct-q4.gguf"
REPO_ID = "rexprimematrix/RiShreAI" # Tumhara model repository

print(f"🔄 RiShre AI is waking up... Loading {MODEL_NAME}")

try:
    # Ye gpt4all ko batayega ki file Hugging Face repo se download/load karni hai
    model = GPT4All(MODEL_NAME, model_path=".", allow_download=True)
    print("✅ RiShre AI Core is now ONLINE and Ready!")
except Exception as e:
    print(f"❌ Critical Error: {e}")

@app.route('/', methods=['GET'])
def health_check():
    return "RiShre AI Server is Running!"

@app.route('/api/chat', methods=['POST'])
def chat():
    try:
        data = request.json
        user_msg = data.get("message", "")
        
        if not user_msg:
            return jsonify({"error": "No message provided"}), 400

        # AI Response Generation
        with model.chat_session():
            response = model.generate(prompt=user_msg, max_tokens=300)
        
        return jsonify({"text": response})
    
    except Exception as e:
        return jsonify({"error": str(e)}), 500

if __name__ == "__main__":
    # Hugging Face Spaces strictly port 7860 hi use karta hai
    app.run(host="0.0.0.0", port=7860)