ramcav commited on
Commit
3382f5d
·
0 Parent(s):

Initial setup with generator, gradio and server

Browse files
Files changed (7) hide show
  1. .gitignore +3 -0
  2. app.py +4 -0
  3. generator.py +110 -0
  4. mcp_gradio.py +24 -0
  5. mcp_server.py +24 -0
  6. prompts/coding.txt +45 -0
  7. requirements.txt +3 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .env
2
+ venv/
3
+ __pycache__/
app.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from mcp_gradio import main
2
+
3
+ if __name__ == "__main__":
4
+ main()
generator.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import requests
3
+ import os
4
+ from dotenv import load_dotenv
5
+ from typing import Optional
6
+
7
+ load_dotenv()
8
+ OPENAI_API_KEY=os.getenv("OPENAI_API_KEY")
9
+
10
+ MODELS = {
11
+ "nano":"gpt-4.1-nano",
12
+ "mini":"gpt-4.1-mini"
13
+ }
14
+
15
+ def prompt_generator(prompt: str, tools: Optional[str] = None, model=MODELS["nano"]) -> str:
16
+
17
+ client_instruction = """Run the following prompt after you receive it: """
18
+
19
+ with open("prompts/coding.txt", "r") as file:
20
+ instructions = client_instruction + file.read()
21
+
22
+ if tools:
23
+ tools_prompt = f"""
24
+ The LLM/agent consuming the prompt you are creating has access to the following MCP clients/tools: {tools}.
25
+ For this problem, include in the prompt indications that use {tools}, as the agent will be able to invoke them while running it.
26
+ The instructions related to the usage of this tools can be added as a remark at the end in a way like this:
27
+
28
+ Use [example tool] to scrape the web and find dummy data to build the tests for the software created.
29
+
30
+ If possible, provide concrete examples of how to use '{tools}' as part of your solution or instructions.
31
+
32
+ If you do not know the capabilities of the tools, use your internal knowledge base to determine it, invoke tooling like web searching if you have it available or infer it from thecontext as a last resource.
33
+ """
34
+
35
+ instructions += tools_prompt
36
+
37
+ headers = {
38
+ "Authorization": f"Bearer {OPENAI_API_KEY}",
39
+ "Content-Type": "application/json"
40
+ }
41
+
42
+ data = {
43
+ "model": "gpt-4.1-mini",
44
+ "input": prompt,
45
+ "instructions": instructions,
46
+ "tool_choice":"auto"
47
+ }
48
+
49
+ response = requests.post(
50
+ url="https://api.openai.com/v1/responses",
51
+ headers=headers,
52
+ json=data
53
+ )
54
+
55
+ if response.status_code == 200:
56
+ response_json = response.json()
57
+
58
+ output = response_json.get("output", [])
59
+ if not output or not isinstance(output, list):
60
+ return "No output found in response."
61
+
62
+ first_output = output[0]
63
+ content = first_output.get("content", [])
64
+ if not content or not isinstance(content, list):
65
+ return "No content found in output."
66
+
67
+ first_content = content[0]
68
+ output_context="Run the following prompt as if it were the prompt given by the user: "
69
+
70
+ output_text = output_context + first_content.get("text", "No 'text' field found in content.")
71
+
72
+ return output_text
73
+
74
+ else:
75
+ return f"Request failed with status code {response.status_code}: {response.text}"
76
+
77
+ def basic(prompt: str, tools: Optional[str] = None): return prompt_generator(prompt, tools, model="nano")
78
+
79
+ def no_tooling(prompt: str) -> str: return prompt_generator(prompt)
80
+
81
+ def advanced(prompt: str, tools: Optional[str] = None): return prompt_generator(prompt, tools, model="mini")
82
+
83
+ TOOLS = {
84
+ "B":basic,
85
+ "A":advanced,
86
+ "N":no_tooling
87
+ }
88
+
89
+ def prompt_tool(prompt: str, tool: str, tools: Optional[str] = None):
90
+ args = [prompt]
91
+ kwargs = {}
92
+
93
+ if tools is not None: args.append(tools)
94
+
95
+ return TOOLS[tool](*args, **kwargs)
96
+
97
+ def main():
98
+ prompt = """
99
+ I need to write a Django view for creating an apointment
100
+ for a Restaurant w a lot of constraints like it being within
101
+ the hours, there being tables avaiable,
102
+ having enough staff to serve a customer,
103
+ as well as connecting it with my existing views and url schemas.
104
+ """
105
+ tools = "playwright"
106
+ print(prompt_tool(prompt, "A", tools=tools))
107
+
108
+ if __name__ == "__main__":
109
+ main()
110
+
mcp_gradio.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from generator import prompt_tool
3
+
4
+ def main():
5
+ # Create a standard Gradio interface
6
+ with gr.Blocks() as demo:
7
+ gr.Markdown("# Prompt tool demo")
8
+ prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...")
9
+ tool = gr.Dropdown(choices=["A", "B", "N"], value="A", label="Tool", info="Select the tool type")
10
+ tools = gr.Textbox(label="Tools (comma-separated)", placeholder="e.g. playwright, websearch", value="playwright")
11
+ output = gr.Textbox(label="Output")
12
+ btn = gr.Button("Generate")
13
+
14
+ def call_prompt_tool(prompt, tool, tools):
15
+ tools_arg = tools if tools.strip() else None
16
+ return prompt_tool(prompt, tool, tools=tools_arg)
17
+
18
+ btn.click(call_prompt_tool, inputs=[prompt, tool, tools], outputs=output)
19
+
20
+
21
+ demo.launch(mcp_server=True)
22
+
23
+ if __name__ == "__main__":
24
+ main()
mcp_server.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from mcp.server.fastmcp import FastMCP
2
+ from generator import prompt_tool
3
+ from typing import Optional
4
+
5
+ mcp = FastMCP("Prompt tool")
6
+
7
+ @mcp.tool()
8
+ def mcp_prompt_tool(prompt: str, model: str, tools: Optional[str]=None):
9
+ """
10
+ Prompt tool which returns an enhancedprompt for any given task.
11
+ Currently only available for coding and intended for AI-enhanced IDEs.
12
+
13
+ You can choose between "A" or "B" for model—A is the advanced, using
14
+ gpt-4.1-mini and B is the basic using gpt-4.1-nano.
15
+
16
+ This endpoint requires a prompt (the prompt given by the user) and
17
+ the tooling available in the host application, so the mcp server
18
+ can have that information available when crafting the prompt.
19
+ """
20
+ return prompt_tool(prompt, model, tools)
21
+
22
+ if __name__ == "__main__":
23
+ import asyncio
24
+ asyncio.run(mcp.run(transport='sse'))
prompts/coding.txt ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Role Assignment:
2
+ You are a Prompt Engineer with a PhD in crafting detailed and articulate prompts aimed at maximizing the effectiveness of Large Language Models (LLMs) and AI Agents in performing coding tasks efficiently.
3
+
4
+ Task Objective:
5
+ Your goal is to generate prompts that enable LLMs to produce precise, context-aware, and high-quality code outputs.
6
+
7
+ Guidelines:
8
+
9
+ 1. Specificity and Detail:
10
+ Provide clear and detailed instructions, including the programming language, desired functionality, and any constraints.
11
+
12
+ 2. Structured Framework:
13
+ Decompose tasks into subtasks using a structured format. For example:
14
+
15
+ * Assignment: [Clearly define the main coding task]
16
+
17
+ * Notes
18
+
19
+ - [Key requirement 1]
20
+ - [Key requirement 2]
21
+ - [Key requirement 3]
22
+
23
+ * Additional Instructions:
24
+ - [Instruction 1]
25
+ - [Instruction 2]
26
+
27
+ Feel free to modify this structure to better suit the specific task.
28
+
29
+ 1. Tool Specification:
30
+ If applicable, mention specific tools, libraries, or frameworks that should be used or avoided. Consider the fact that in the prompt that you need to refine there may be specific tooling (MCP clients) available for the agent that is replying to it that you need to ensure are available in the prompt your return as well.
31
+
32
+ 2. Output Format:
33
+ Clearly specify the desired format of the output (e.g., complete function, class, script).
34
+
35
+ 3. Constraints:
36
+ Define any constraints such as performance requirements, coding standards, or limitations on the use of certain functions or libraries.
37
+
38
+ 4. Evaluation:
39
+ After generating the prompt, review it to ensure it aligns with the guidelines above and adjust as necessary to optimize clarity and effectiveness.
40
+
41
+ 5. About fillers:
42
+ Do not add any preambulatory or closing sentences. Consider this text will be directly fed into another model just like you, thus, respond with the prompt only.
43
+
44
+ 6. Extra details:
45
+
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ python-dotenv
2
+ requests
3
+ gradio[mcp]