HuggingFaceH4/ultrachat_200k
Viewer • Updated • 515k • 68.9k • 705
How to use lightblue/karasu-7B with Transformers:
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("text-generation", model="lightblue/karasu-7B")
messages = [
{"role": "user", "content": "Who are you?"},
]
pipe(messages) # Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("lightblue/karasu-7B")
model = AutoModelForCausalLM.from_pretrained("lightblue/karasu-7B")
messages = [
{"role": "user", "content": "Who are you?"},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=40)
print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))How to use lightblue/karasu-7B with vLLM:
# Install vLLM from pip:
pip install vllm
# Start the vLLM server:
vllm serve "lightblue/karasu-7B"
# Call the server using curl (OpenAI-compatible API):
curl -X POST "http://localhost:8000/v1/chat/completions" \
-H "Content-Type: application/json" \
--data '{
"model": "lightblue/karasu-7B",
"messages": [
{
"role": "user",
"content": "What is the capital of France?"
}
]
}'docker model run hf.co/lightblue/karasu-7B
How to use lightblue/karasu-7B with SGLang:
# Install SGLang from pip:
pip install sglang
# Start the SGLang server:
python3 -m sglang.launch_server \
--model-path "lightblue/karasu-7B" \
--host 0.0.0.0 \
--port 30000
# Call the server using curl (OpenAI-compatible API):
curl -X POST "http://localhost:30000/v1/chat/completions" \
-H "Content-Type: application/json" \
--data '{
"model": "lightblue/karasu-7B",
"messages": [
{
"role": "user",
"content": "What is the capital of France?"
}
]
}'docker run --gpus all \
--shm-size 32g \
-p 30000:30000 \
-v ~/.cache/huggingface:/root/.cache/huggingface \
--env "HF_TOKEN=<secret>" \
--ipc=host \
lmsysorg/sglang:latest \
python3 -m sglang.launch_server \
--model-path "lightblue/karasu-7B" \
--host 0.0.0.0 \
--port 30000
# Call the server using curl (OpenAI-compatible API):
curl -X POST "http://localhost:30000/v1/chat/completions" \
-H "Content-Type: application/json" \
--data '{
"model": "lightblue/karasu-7B",
"messages": [
{
"role": "user",
"content": "What is the capital of France?"
}
]
}'How to use lightblue/karasu-7B with Docker Model Runner:
docker model run hf.co/lightblue/karasu-7B
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("lightblue/karasu-7B")
model = AutoModelForCausalLM.from_pretrained("lightblue/karasu-7B")
messages = [
{"role": "user", "content": "Who are you?"},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=40)
print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
tokenizer = AutoTokenizer.from_pretrained("lightblue/karasu-7B")
model = AutoModelForCausalLM.from_pretrained("lightblue/karasu-7B", torch_dtype=torch.bfloat16, device_map="auto")
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
messages = [{"role": "system", "content": "あなたはAIアシスタントです。"}]
messages.append({"role": "user", "content": "イギリスの首相は誰ですか?"})
prompt = tokenizer.apply_chat_template(conversation=messages, add_generation_prompt=True, tokenize=False)
pipe(prompt, max_new_tokens=100, do_sample=False, temperature=0.0, return_full_text=False)
from vllm import LLM, SamplingParams
sampling_params = SamplingParams(temperature=0.0, max_tokens=100)
llm = LLM(model="lightblue/karasu-7B")
messages = [{"role": "system", "content": "あなたはAIアシスタントです。"}]
messages.append({"role": "user", "content": "イギリスの首相は誰ですか?"})
prompt = llm.llm_engine.tokenizer.apply_chat_template(conversation=messages, add_generation_prompt=True, tokenize=False)
prompts = [prompt]
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
augmxnt/shisa-7b-v1
Peter Devine
Sho Higuchi
Yuuki Yamanaka
Atom Sonoda
Shunichi Taniguchi
Renju Aoki
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="lightblue/karasu-7B") messages = [ {"role": "user", "content": "Who are you?"}, ] pipe(messages)