wyt2000 commited on
Commit
d25f628
·
verified ·
1 Parent(s): 211ca46

Upload inference.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. inference.py +65 -0
inference.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from vllm import LLM, SamplingParams
2
+ from transformers import AutoTokenizer
3
+
4
+ MODEL_DIR = "path/to/CodeV-SVA/"
5
+
6
+ def get_nl2sva_prompt(testbench: str, problem: str, reset_signal=None) -> str:
7
+ prompt = ""
8
+ prompt += "Here is the testbench to perform your translation:\n"
9
+ prompt += testbench
10
+ prompt += "\nQuestion: Create a SVA assertion that checks: "
11
+ prompt += problem + "\n"
12
+
13
+ if reset_signal is not None:
14
+ prompt += f"You should use `{reset_signal}` as the disable condition signal. "
15
+ example = f'''asrt: assert property (@(posedge clk) disable iff ({reset_signal})
16
+ (a && b) != 1'b1
17
+ );'''
18
+ else:
19
+ example = '''asrt: assert property (@(posedge clk)
20
+ (a && b) != 1'b1
21
+ );'''
22
+ prompt += f"""Do not add code to output an error message string.
23
+ Enclose your SVA code with ```systemverilog and ```. Only output the code snippet and do NOT output anything else.
24
+
25
+ For example,
26
+ ```systemverilog
27
+ {example}
28
+ ```
29
+ Answer:"""
30
+ return prompt
31
+
32
+ if __name__ == "__main__":
33
+ system_prompt = "You are an AI assistant tasked with formal verification of register transfer level (RTL) designs.\nYour job is to translate a description of an assertion to concrete SystemVerilog Assertion (SVA) implementation.\n"
34
+ testbench = "module fifo_1r1w_tb (\n clk,\n reset_,\n wr_vld,\n wr_data,\n wr_ready,\n rd_vld,\n rd_data,\n rd_ready\n );\n \n parameter FIFO_DEPTH = 4;\n parameter DATA_WIDTH = 1;\n\nlocalparam FIFO_DEPTH_log2 = $clog2(FIFO_DEPTH); \nlocalparam DATA_WIDTH_log2 = $clog2(DATA_WIDTH); \n\n input clk;\n input reset_;\n input wr_vld;\n input [DATA_WIDTH-1:0] wr_data;\n input wr_ready;\n input rd_vld;\n input [DATA_WIDTH-1:0] rd_data;\n input rd_ready;\n\nwire wr_push;\nwire rd_pop;\n\nwire tb_reset;\nassign tb_reset = (reset_ == 1'b0);\n\nwire fifo_full;\nassign wr_push = wr_vld && wr_ready;\nassign rd_pop = rd_vld && rd_ready; \n\nreg [DATA_WIDTH-1:0] fifo_array [FIFO_DEPTH-1:0]; //fifo array - shift register\nreg [FIFO_DEPTH_log2-1:0] fifo_rd_ptr; //fifo array - rd_ptr\nwire actual_fifo_pop; // actual pop == pop\nreg fifo_empty; // fifo empty\nwire [DATA_WIDTH-1:0] fifo_out_data; // dout\n\n// ---- shift register code start ----\nalways @(posedge clk) begin\n if (!reset_) fifo_array[0] <= 'd0;\n else if (wr_push) begin\n fifo_array[0] <= wr_data;\n end else fifo_array[0] <= fifo_array[0];\nend\nfor (genvar i = 1; i < FIFO_DEPTH; i++ ) begin : loop_id\n always @(posedge clk) begin\n if (!reset_) fifo_array[i] <= 'd0;\n else if (wr_push) begin\n fifo_array[i] <= fifo_array[i-1];\n end else fifo_array[i] <= fifo_array[i];\n end\nend\n\n// ---- read pointer/fifo empty code start ----\nalways @(posedge clk) begin\n if (!reset_) begin\n fifo_rd_ptr <= 'd0;\n end else if (wr_push && fifo_empty) begin\n fifo_rd_ptr <= 'd0;\n end else if (rd_pop && !fifo_empty && (fifo_rd_ptr == 'd0)) begin\n fifo_rd_ptr <= 'd0;\n end else begin\n fifo_rd_ptr <= fifo_rd_ptr + wr_push - rd_pop;\n end\n if (!reset_) begin\n fifo_empty <= 'd1;\n end else if (rd_pop && !fifo_empty && (fifo_rd_ptr == 'd0) && !wr_push) begin\n fifo_empty <= 'd1;\n end else if ((fifo_rd_ptr != 'd0) || wr_push && !rd_pop) begin\n fifo_empty <= 'd0;\n end\nend\n// ---- fifo full and dout code start ----\nassign fifo_full = (fifo_rd_ptr == (FIFO_DEPTH - 1)) && !fifo_empty;\nassign fifo_out_data = fifo_array[fifo_rd_ptr];\n\nendmodule"
35
+ problem = "When there is a write push to the FIFO, data is eventually popped. Use the signals 'rd_pop' and 'wr_push'."
36
+ reset_signal = "tb_reset"
37
+ user_prompt = get_nl2sva_prompt(testbench, problem, reset_signal)
38
+
39
+ dialog = [
40
+ {"role": "system", "content": system_prompt},
41
+ {"role": "user", "content": user_prompt}
42
+ ]
43
+
44
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
45
+ prompt = tokenizer.apply_chat_template(dialog, tokenize=False, add_generation_prompt=True)
46
+ print(f"### Prompt:\n{prompt}")
47
+
48
+ model = LLM(
49
+ MODEL_DIR,
50
+ tensor_parallel_size=4
51
+ )
52
+
53
+ sampling_params = SamplingParams(
54
+ temperature=0.8,
55
+ top_p=0.95,
56
+ max_tokens=32768,
57
+ n=1
58
+ )
59
+
60
+ responses = model.generate(prompt, sampling_params)
61
+ formal_statements = []
62
+ for i, output in enumerate(responses[0].outputs):
63
+ response_text = output.text
64
+ print(f"### Response:\n{response_text}")
65
+