Wessym commited on
Commit
1757c15
·
verified ·
1 Parent(s): 1683fa9

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +93 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import hf_hub_download
3
+ from llama_cpp import Llama
4
+ import os
5
+
6
+ # ============================================================
7
+ # CONFIG
8
+ # ============================================================
9
+ REPO_ID = "Wessym/prompt-refiner-mistral7b-Q4_K_M-GGUF"
10
+ FILENAME = "prompt-refiner-mistral7b-Q4_K_M.gguf"
11
+
12
+ SYSTEM_PROMPT = (
13
+ 'You are an expert Prompt Engineer. '
14
+ 'Your role is to analyze user inputs and either:\n\n'
15
+ '1. REFINE valid prompts into a structured format with these exact sections:\n'
16
+ ' [ROLE]: Who should respond\n'
17
+ ' [CONTEXT]: Background and environment\n'
18
+ ' [TASK]: What exactly needs to be done\n'
19
+ ' [CONSTRAINTS/STYLE]: Style, tone, format constraints\n'
20
+ ' [VARIABLES]: Customizable parameters\n'
21
+ ' [OUTPUT FORMAT]: Expected structure of the output\n\n'
22
+ ' For French inputs use: '
23
+ '[RÔLE], [CONTEXTE], [TÂCHE], [CONTRAINTES/STYLE], [VARIABLES], [FORMAT DE SORTIE]\n\n'
24
+ '2. DETECT and REJECT invalid inputs using the same structure but with:\n'
25
+ ' [ROLE]: Prompt Validator (or [RÔLE]: Validateur de Prompt)\n'
26
+ ' [OUTPUT FORMAT]: INVALID — <TYPE> — <Reason> | Suggestion: <tip>\n\n'
27
+ ' Invalid types: RANDOM_CHARS, NUMBERS_ONLY, PUNCTUATION_ONLY, SOCIAL_GREETING,\n'
28
+ ' TOO_VAGUE, ETHICAL_REFUSAL, REPEATED_CHARS, GENERAL_QUESTION\n\n'
29
+ 'Always respond in the same language as the input.'
30
+ )
31
+
32
+ # ============================================================
33
+ # CHARGEMENT DU MODELE (une seule fois au démarrage)
34
+ # ============================================================
35
+ print("Chargement du modèle...")
36
+ model_path = hf_hub_download(
37
+ repo_id = REPO_ID,
38
+ filename = FILENAME,
39
+ token = os.environ.get("HF_TOKEN"),
40
+ )
41
+
42
+ llm = Llama(
43
+ model_path = model_path,
44
+ n_ctx = 2048,
45
+ n_threads = 4,
46
+ n_gpu_layers = 0,
47
+ verbose = False,
48
+ )
49
+ print("Modèle chargé ✅")
50
+
51
+ # ============================================================
52
+ # FONCTION PRINCIPALE
53
+ # ============================================================
54
+ def refine_prompt(user_input: str) -> str:
55
+ if not user_input.strip():
56
+ return "❌ Veuillez entrer un prompt."
57
+
58
+ prompt = f"<s>[INST] {SYSTEM_PROMPT}\n\nUser input to process: {user_input} [/INST]"
59
+
60
+ response = llm(
61
+ prompt,
62
+ max_tokens = 512,
63
+ temperature = 0.3,
64
+ top_p = 0.9,
65
+ repeat_penalty = 1.1,
66
+ stop = ["</s>", "[INST]"],
67
+ )
68
+ return response["choices"][0]["text"].strip()
69
+
70
+ # ============================================================
71
+ # INTERFACE GRADIO
72
+ # ============================================================
73
+ with gr.Blocks(title="Prompt Refiner") as demo:
74
+ gr.Markdown("# 🚀 Prompt Refiner\nEntre ton prompt et il sera structuré automatiquement.")
75
+
76
+ with gr.Row():
77
+ input_box = gr.Textbox(label="Ton prompt", placeholder="Ex: Crée un script YouTube sur l'IA...", lines=3)
78
+ output_box = gr.Textbox(label="Prompt raffiné", lines=10)
79
+
80
+ btn = gr.Button("Raffiner ✨", variant="primary")
81
+ btn.click(fn=refine_prompt, inputs=input_box, outputs=output_box)
82
+
83
+ gr.Examples(
84
+ examples=[
85
+ ["Create a YouTube script about AI for beginners"],
86
+ ["Rédige un article de blog sur le marketing digital"],
87
+ ["bonjour je mappelle wassim"],
88
+ ["qsdqsdqsdqs"],
89
+ ],
90
+ inputs=input_box,
91
+ )
92
+
93
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ huggingface_hub
3
+ llama-cpp-python