# Load model directly
from transformers import AutoModel
model = AutoModel.from_pretrained("adamabuhamdan/tinyllama-sql-lora", dtype="auto")Quick Links
๐ค TinyLlama Text-to-SQL (LoRA Adapter)
ูุฐุง ุงููู
ูุฐุฌ ุนุจุงุฑุฉ ุนู LoRA Adapter ุชู
ุชุฏุฑูุจู ูุชุนุฏูู ุณููู ูู
ูุฐุฌ TinyLlama-1.1B ููุตุจุญ ู
ุชุฎุตุตุงู ูู ุชุญููู ุงูุฃุณุฆูุฉ ุจุงููุบุฉ ุงูุทุจูุนูุฉ ุฅูู ููุฏ SQL ุฏููู ุจูุงุกู ุนูู ูููู ุงูุฌุฏูู (Schema) ุงูู
ุนุทู ููุ ุฏูู ุฃู ุซุฑุซุฑุฉ ุฒุงุฆุฏุฉ.
- Developed by: Adam Abu Hamdan
- Model type: PEFT (LoRA)
- Language: English (Text & SQL)
- Finetuned from model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
๐ก ููู ูุนู ู ุงููู ูุฐุฌ (How to Get Started)
ูู ููู ุชุดุบูู ูุฐุง ุงููู ูุฐุฌ ูุฏู ุฌ ุงูุฃุฏุงุจุชุฑ (ุงูู 20 ู ูุฌุงุจุงูุช) ู ุน ุงููู ูุฐุฌ ุงูุฃุณุงุณู ุจุงุณุชุฎุฏุงู ุงูููุฏ ุงูุชุงูู ูู ุจุงูุซูู:
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
BASE_MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
ADAPTER_MODEL = "adamabuhamdan/tinyllama-sql-lora"
# 1. ุชุญู
ูู ุงูู
ุชุฑุฌู
ูุงููู
ูุฐุฌ ุงูุฃุณุงุณู
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)
base_model = AutoModelForCausalLM.from_pretrained(
BASE_MODEL,
torch_dtype=torch.float16,
device_map="auto",
)
# 2. ุฏู
ุฌ ุฃูุฒุงู LoRA ุงูุชู ูู
ูุง ุจุชุฏุฑูุจูุง
model = PeftModel.from_pretrained(base_model, ADAPTER_MODEL)
model.eval()
# 3. ุชุฌุฑุจุฉ ุชูููุฏ ููุฏ SQL
schema = "CREATE TABLE employees (id INT, name TEXT, department TEXT, salary INT);"
question = "List the names of employees in Engineering earning more than 100000."
prompt = f"<|system|>\nYou are a SQL assistant. Given a table schema and a question, reply with ONLY the SQL query, nothing else.</s>\n<|user|>\nSchema:\n{schema}\n\nQuestion: {question}</s>\n<|assistant|>\n"
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(**inputs, max_new_tokens=100, do_sample=False)
print(tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True).strip())
- Downloads last month
- 44
Model tree for adamabuhamdan/tinyllama-sql-lora
Base model
TinyLlama/TinyLlama-1.1B-Chat-v1.0
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="adamabuhamdan/tinyllama-sql-lora") messages = [ {"role": "user", "content": "Who are you?"}, ] pipe(messages)