| | --- |
| | library_name: transformers |
| | tags: [] |
| | --- |
| | |
| | # Model Card for Model ID |
| |
|
| | <!-- Provide a quick summary of what the model is/does. --> |
| |
|
| |
|
| |
|
| | ## Model Details |
| |
|
| | ### Model Description |
| |
|
| | <!-- Provide a longer summary of what this model is. --> |
| |
|
| | This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. |
| |
|
| | - **Developed by:** [More Information Needed] |
| | - **Funded by [optional]:** [More Information Needed] |
| | - **Shared by [optional]:** [More Information Needed] |
| | - **Model type:** [More Information Needed] |
| | - **Language(s) (NLP):** [More Information Needed] |
| | - **License:** [More Information Needed] |
| | - **Finetuned from model [optional]:** [More Information Needed] |
| |
|
| | ### Model Sources [optional] |
| |
|
| | <!-- Provide the basic links for the model. --> |
| |
|
| | - **Repository:** [More Information Needed] |
| | - **Paper [optional]:** [More Information Needed] |
| | - **Demo [optional]:** [More Information Needed] |
| |
|
| | # full_model_comparison.py |
| | from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
| | from peft import PeftModel |
| | import torch |
| |
|
| | # --------------------------- |
| | # 1. Model setup |
| | # --------------------------- |
| | base_model_name = "microsoft/Phi-4-mini-instruct" |
| | lora_model_name = "JeloH/phi4_src_lora" |
| |
|
| | # Load tokenizer |
| | tokenizer = AutoTokenizer.from_pretrained(base_model_name) |
| | |
| | # Load base model |
| | base_model = AutoModelForCausalLM.from_pretrained(base_model_name) |
| | |
| | # Load fine-tuned LoRA model |
| | finetuned_model = PeftModel.from_pretrained(base_model, lora_model_name) |
| |
|
| | # --------------------------- |
| | # 2. Define prompts |
| | # --------------------------- |
| | prompts = [ |
| | "Translate the following assembly code to high-level source code. input: push ebp\nmov ebp, esp\nsub esp, 3Ch\nmov eax, ___security_cookie\nxor eax, ebp\nmov [ebp+var_4], eax\npush ebx\npush esi\npush edi\npush 0; hWnd\ncall ds:GetDC\nmov edi, eax\npush edi; hdc\ncall ds:CreateCompatibleDC\nmov esi, ds:GetSystemMetrics\npush 0; nIndex\nmov [ebp+hdc], eax\ncall esi ; GetSystemMetrics\npush 1; nIndex\nmov [ebp+var_38], eax\ncall esi ; GetSystemMetrics\nmov esi, [ebp+var_38]\nmov ebx, eax\npush 0; offset\npush 0; hSection\nlea eax, [ebp+ppvBits]\nmov [ebp+ppvBits], 0\npush eax; ppvBits\npush 0; usage\nxorps xmm0, xmm0\nmov [ebp+pbmi.bmiHeader.biSize], 2Ch ; ','\nlea eax, [ebp+pbmi]\nmovq qword ptr [ebp+pbmi.bmiHeader.biClrImportant], xmm0\nmovups xmmword ptr [ebp+pbmi.bmiHeader.biWidth], xmm0\npush eax; pbmi\npush edi; hdc\nmovups xmmword ptr [ebp+pbmi.bmiHeader.biSizeImage], xmm0\nmov dword ptr [ebp+pbmi.bmiHeader.biPlanes], 200001h\nmov [ebp+pbmi.bmiHeader.biWidth], esi\nmov [ebp+pbmi.bmiHeader.biHeight], ebx\ncall ds:CreateDIBSection\npush eax; h\npush [ebp+hdc]; hdc\ncall ds:SelectObject\nmov edi, ebx\nimul edi, esi\nnop dword ptr [eax+00h]\npush 0; hWnd\ncall ds:GetDC\npush 0CC0020h; rop\npush 0; y1\npush 0; x1\nmov esi, eax\npush esi; hdcSrc\npush ebx; cy\npush [ebp+var_38]; cx\npush 0; y\npush 0; x\npush [ebp+hdc]; hdc\ncall ds:BitBlt\nxor eax, eax\ntest edi, edi\njle short loc_40127F\nnop dword ptr [eax+eax+00h]\nmov ecx, [ebp+ppvBits]\nadd dword ptr [ecx+eax*4], 0E1h\ninc eax\ncmp eax, edi\njl short loc_401270\npush 0CC0020h; rop\npush 0Ah; y1\npush 0; x1\npush [ebp+hdc]; hdcSrc\npush ebx; cy\npush [ebp+var_38]; cx\npush 0; y\npush 0; x\npush esi; hdc\ncall ds:BitBlt\npush 0CC0020h; rop\nmov eax, 0Ah\nsub eax, ebx\npush eax; y1\npush 0; x1\npush [ebp+hdc]; hdcSrc\npush ebx; cy\npush [ebp+var_38]; cx\npush 0; y\npush 0; x\npush esi; hdc\ncall ds:BitBlt\npush 64h ; 'd'; dwMilliseconds\ncall ds:Sleep\npush esi; hDC\npush 0; hWnd\ncall ds:ReleaseDC\npush esi; hdc\ncall ds:DeleteDC\njmp loc_40124", |
| | "Write a short story about a robot learning emotions."] |
| | |
| | # --------------------------- |
| | # 3. Generate outputs |
| | # --------------------------- |
| | def generate_text(model, tokenizer, prompt, max_length=1000): |
| | input_ids = tokenizer(prompt, return_tensors="pt").input_ids |
| | output_ids = model.generate(input_ids, max_length=max_length, do_sample=True, temperature=0.7) |
| | return tokenizer.decode(output_ids[0], skip_special_tokens=True) |
| | |
| | # --------------------------- |
| | # 4. Run comparison |
| | # --------------------------- |
| | for i, prompt in enumerate(prompts, 1): |
| | print(f"\n=== Prompt {i} ===") |
| | print(f"Prompt: {prompt}\n") |
| | |
| | base_output = generate_text(base_model, tokenizer, prompt) |
| | print("Base Model Output:") |
| | print(base_output) |
| | print("\nFine-Tuned LoRA Model Output:") |
| | ft_output = generate_text(finetuned_model, tokenizer, prompt) |
| | print(ft_output) |
| | print("="*60) |
| | |
| |
|
| |
|
| |
|
| | |
| | [More Information Needed] |