File size: 5,150 Bytes
6f302f4 4e8bbcd 6f302f4 4e8bbcd 6f302f4 4e8bbcd 6f302f4 4e8bbcd 6f302f4 574e439 d098055 574e439 4e8bbcd 574e439 cf436e1 574e439 d098055 574e439 cf436e1 574e439 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 | from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from huggingface_hub import InferenceClient
import json
app = FastAPI()
# Initialize the Hugging Face Inference Client
client = InferenceClient()
# Serve the static HTML file
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.get("/")
async def get():
return HTMLResponse(open("static/asic-design-code-generator.html").read())
@app.websocket("/ws/generate_code")
async def generate_code(websocket: WebSocket):
await websocket.accept()
try:
while True:
data = await websocket.receive_text()
request_data = json.loads(data)
description = request_data.get("description")
language = request_data.get("language")
# Build the prompt for ASIC design code generation
prompt = f"Generate {language} code only for the following ASIC design description. Provide the code without any additional explanation or comments.\n\n\n{description}"
messages = [{"role": "user", "content": prompt}]
# Stream content as it is generated
generated_code = ""
stream = client.chat.completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
messages=messages,
temperature=0.5,
max_tokens=4000,
top_p=0.7,
stream=True
)
# Send generated chunks over WebSocket
for chunk in stream:
generated_code += chunk.choices[0].delta.content
await websocket.send_text(generated_code)
except WebSocketDisconnect:
print("Client disconnected")
@app.websocket("/ws/refine_code")
async def refine_code(websocket: WebSocket):
await websocket.accept()
try:
while True:
data = await websocket.receive_text()
request_data = json.loads(data)
existing_code = request_data.get("existing_code")
language = request_data.get("language")
# Build the refinement prompt
prompt = f"Refine the following {language} code based on the provided details. Return only the updated code, without explanations or comments.\n\n\n{existing_code}"
messages = [{"role": "user", "content": prompt}]
# Stream refined code content
refined_code = ""
stream = client.chat.completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
messages=messages,
temperature=0.5,
max_tokens=4000,
top_p=0.7,
stream=True
)
# Send refined code chunks over WebSocket
for chunk in stream:
refined_code += chunk.choices[0].delta.content
await websocket.send_text(refined_code)
except WebSocketDisconnect:
print("Client disconnected")
@app.websocket("/ws/optimize_code")
async def optimize_code(websocket: WebSocket):
await websocket.accept()
try:
data = await websocket.receive_text()
request_data = json.loads(data)
code = request_data.get("code")
language = request_data.get("language")
prompt = f"Analyze the following {language} code for syntax, linting, and optimization suggestions:\n\n{code}"
messages = [{"role": "user", "content": prompt}]
refined_code = ""
stream = client.chat.completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
messages=messages,
temperature=0.5,
max_tokens=4000,
top_p=0.7,
stream=True
)
for chunk in stream:
refined_code += chunk.choices[0].delta.content
await websocket.send_text(refined_code)
except WebSocketDisconnect:
print("Client disconnected")
# WebSocket for Failure Mode Analysis and Test Bench Generation
@app.websocket("/ws/generate_test_bench")
async def generate_test_bench(websocket: WebSocket):
await websocket.accept()
try:
data = await websocket.receive_text()
request_data = json.loads(data)
code = request_data.get("code")
language = request_data.get("language")
prompt = f"Generate test benches and identify potential failure modes for the following {language} code:\n\n{code}"
messages = [{"role": "user", "content": prompt}]
refined_code = ""
stream = client.chat.completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
messages=messages,
temperature=0.5,
max_tokens=512,
top_p=0.7,
stream=True
)
for chunk in stream:
refined_code += chunk.choices[0].delta.content
await websocket.send_text(refined_code)
except WebSocketDisconnect:
print("Client disconnected") |