code-gemma
Google's gemma-2b-it trained code_instructions_122k_alpaca_style dataset
Usage
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("text-generation", model="gnumanth/code-gemma")
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("gnumanth/code-gemma")
model = AutoModelForCausalLM.from_pretrained("gnumanth/code-gemma")
- Downloads last month
- 6
Install from pip and serve model
# Install vLLM from pip: pip install vllm# Start the vLLM server: vllm serve "gnumanth/code-gemma"# Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "gnumanth/code-gemma", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'