Text Generation
Transformers
Safetensors
English
qwen2
code
NextJS
conversational
text-generation-inference
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("nirusanan/Qwen2.5-1.5B-NextJs-code")
model = AutoModelForCausalLM.from_pretrained("nirusanan/Qwen2.5-1.5B-NextJs-code")
messages = [
{"role": "user", "content": "Who are you?"},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=40)
print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))Quick Links
Model Information
The Qwen2.5-1.5B-NextJs-code is a quantized, fine-tuned version of the Qwen2.5-1.5B-Instruct model designed specifically for generating NextJs code.
- Base model: Qwen/Qwen2.5-1.5B-Instruct
How to use
Starting with transformers version 4.44.0 and later, you can run conversational inference using the Transformers pipeline.
Make sure to update your transformers installation via pip install --upgrade transformers.
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
def get_pipline():
model_name = "nirusanan/Qwen2.5-1.5B-NextJs-code"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
device_map="cuda:0",
trust_remote_code=True
)
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=3500)
return pipe
pipe = get_pipline()
def generate_prompt(project_title, description):
prompt = f"""Below is an instruction that describes a project. Write Nextjs 14 code to accomplish the project described below.
### Instruction:
Project:
{project_title}
Project Description:
{description}
### Response:
"""
return prompt
prompt = generate_prompt(project_title = "Your NextJs project", description = "Your NextJs project description")
result = pipe(prompt)
generated_text = result[0]['generated_text']
print(generated_text.split("### End")[0])
- Downloads last month
- 19
Model tree for nirusanan/Qwen2.5-1.5B-NextJs-code
Base model
Qwen/Qwen2.5-1.5B Finetuned
Qwen/Qwen2.5-1.5B-Instruct
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="nirusanan/Qwen2.5-1.5B-NextJs-code") messages = [ {"role": "user", "content": "Who are you?"}, ] pipe(messages)