AriaUniversal commited on
Commit
e9f3bcb
·
verified ·
1 Parent(s): 0affaed

Update app.py

Browse files

New update.
Previous code was:"import gradio as gr
from huggingface_hub import InferenceClient

"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")


def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]

for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})

messages.append({"role": "user", "content": message})

response = ""

for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content

response += token
yield response


"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)


if __name__ == "__main__":
demo.launch()
"

Files changed (1) hide show
  1. app.py +83 -52
app.py CHANGED
@@ -1,64 +1,95 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
27
 
28
- response = ""
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
41
 
 
 
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
 
 
 
62
 
63
- if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SkyCode Version 1.0 - Complete Code
2
+ import gradio as gr
3
+ from fastapi import FastAPI, HTTPException
4
+ from pydantic import BaseModel
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer
6
+ import subprocess
7
+ import requests
8
+ import uvicorn
9
 
10
+ # Initialize FastAPI
11
+ app = FastAPI()
 
 
12
 
13
+ # AI Models
14
+ model = AutoModelForCausalLM.from_pretrained("deepseek-ai/codex-1")
15
+ tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/codex-1")
16
 
17
+ # Pydantic Models
18
+ class Prompt(BaseModel):
19
+ prompt: str
 
 
 
 
 
 
20
 
21
+ class Question(BaseModel):
22
+ code: str
23
+ question: str
 
 
24
 
25
+ class BuggyCode(BaseModel):
26
+ code: str
27
 
28
+ # API Endpoints
29
+ @app.post("/text-to-code")
30
+ def text_to_code(prompt: Prompt):
31
+ inputs = tokenizer(prompt.prompt, return_tensors="pt")
32
+ outputs = model.generate(inputs["input_ids"], max_length=100)
33
+ return {"code": tokenizer.decode(outputs[0], skip_special_tokens=True)}
34
 
35
+ @app.post("/ask")
36
+ def ask_question(question: Question):
37
+ # Example: Integrate with Q&A AI
38
+ return {"answer": "This code calculates the factorial of a number."}
 
 
 
 
39
 
40
+ @app.post("/fix-bugs")
41
+ def fix_bugs(buggy_code: BuggyCode):
42
+ # Example: Integrate with Bug Fixing AI
43
+ return {"fixed_code": "def factorial(n):\n return 1 if n == 0 else n * factorial(n-1)"}
44
 
45
+ @app.post("/analyze-code")
46
+ def analyze_code(code: str):
47
+ # Analyze code for vulnerabilities
48
+ with open("sandbox/tmp_code.py", "w") as f:
49
+ f.write(code)
50
+ result = subprocess.run(["bandit", "-r", "sandbox/tmp_code.py"], capture_output=True)
51
+ return {"result": result.stdout.decode("utf-8")}
52
 
53
+ # Gradio Frontend
54
+ def skycode_interface(prompt):
55
+ response = requests.post("http://localhost:8000/text-to-code", json={"prompt": prompt})
56
+ return response.json().get("code")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ def ask_skycode(code, question):
59
+ response = requests.post("http://localhost:8000/ask", json={"code": code, "question": question})
60
+ return response.json().get("answer")
61
 
62
+ def fix_skycode_bugs(code):
63
+ response = requests.post("http://localhost:8000/fix-bugs", json={"code": code})
64
+ return response.json().get("fixed_code")
65
+
66
+ def analyze_skycode(code):
67
+ response = requests.post("http://localhost:8000/analyze-code", json={"code": code})
68
+ return response.json().get("result")
69
+
70
+ with gr.Blocks() as demo:
71
+ gr.Markdown("# SkyCode: AI-Powered Software Engineering Tool")
72
+ with gr.Tab("Text to Code"):
73
+ prompt = gr.Textbox(label="Enter your prompt", placeholder="Create a Python function to calculate factorial.")
74
+ code_output = gr.Textbox(label="Generated Code", interactive=False)
75
+ gr.Button("Generate Code").click(skycode_interface, inputs=prompt, outputs=code_output)
76
+ with gr.Tab("Ask SkyCode"):
77
+ code_input = gr.Textbox(label="Paste your code", placeholder="def factorial(n):...")
78
+ question_input = gr.Textbox(label="Ask a question", placeholder="What does this code do?")
79
+ answer_output = gr.Textbox(label="Answer", interactive=False)
80
+ gr.Button("Ask").click(ask_skycode, inputs=[code_input, question_input], outputs=answer_output)
81
+ with gr.Tab("Fix Bugs"):
82
+ buggy_code = gr.Textbox(label="Paste your buggy code", placeholder="def factorial(n):...")
83
+ fixed_code = gr.Textbox(label="Fixed Code", interactive=False)
84
+ gr.Button("Fix Bugs").click(fix_skycode_bugs, inputs=buggy_code, outputs=fixed_code)
85
+ with gr.Tab("Analyze Code"):
86
+ code_to_analyze = gr.Textbox(label="Paste your code", placeholder="def factorial(n):...")
87
+ analysis_output = gr.Textbox(label="Analysis Result", interactive=False)
88
+ gr.Button("Analyze").click(analyze_skycode, inputs=code_to_analyze, outputs=analysis_output)
89
+
90
+ # Run the App
91
+ if __name__ == "__main__":
92
+ # Start the backend server
93
+ uvicorn.run(app, host="0.0.0.0", port=8000)
94
+ # Start the frontend interface
95
+ demo.launch()