How to use from the
Use from the
Transformers library
# Use a pipeline as a high-level helper
from transformers import pipeline

pipe = pipeline("text-generation", model="promptagainstthemachine/Thinkmini")
messages = [
    {"role": "user", "content": "Who are you?"},
]
pipe(messages)
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM

tokenizer = AutoTokenizer.from_pretrained("promptagainstthemachine/Thinkmini")
model = AutoModelForCausalLM.from_pretrained("promptagainstthemachine/Thinkmini")
messages = [
    {"role": "user", "content": "Who are you?"},
]
inputs = tokenizer.apply_chat_template(
	messages,
	add_generation_prompt=True,
	tokenize=True,
	return_dict=True,
	return_tensors="pt",
).to(model.device)

outputs = model.generate(**inputs, max_new_tokens=40)
print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
Quick Links

Model Card for Model ID

  • Its a very simple model for text generation built on top of Llama3.2-1B.

  • It is very lightweight and can be inferenced on a CPU with 4 gb RAM.

  • Developed by: findthehead

Framework versions

  • PEFT 0.17.1

Inference Code

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig

model_name = "Prachir-AI/Thinkmini"
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Create a BitsAndBytesConfig to enable 4-bit loading
bnb_config = BitsAndBytesConfig(
    load_in_4bit=True, # Enable 4-bit loading as intended for this model
    bnb_4bit_quant_type="nf4", # This is a common default for 4-bit models
    bnb_4bit_compute_dtype=torch.bfloat16, # Use bfloat16 for computation
    bnb_4bit_use_double_quant=True, # Often used with nf4
)

# Load the model with the configured 4-bit quantization
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    quantization_config=bnb_config,
    torch_dtype=torch.bfloat16 # Ensure the model itself is loaded with bfloat16 dtypes where applicable
)

inputs = tokenizer("How do you plan for a full pentest of a web application?", return_tensors="pt").to('cuda')
  # inference mode

output_ids = model.generate(
    **inputs,
    max_new_tokens=500,
    temperature=0.7,
    top_p=0.9
)

print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
Downloads last month
2
Safetensors
Model size
1B params
Tensor type
F32
·
F16
·
U8
·
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Dataset used to train promptagainstthemachine/Thinkmini