CrimsonEyes commited on
Commit
6cf51d2
·
verified ·
1 Parent(s): e239856

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +91 -23
README.md CHANGED
@@ -24,32 +24,100 @@ This is a fine-tuned version of LLaMA optimized to respond like Rick Sanchez fro
24
 
25
  ```python
26
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- # For merged model
29
- model = AutoModelForCausalLM.from_pretrained("YOUR_USERNAME/rick-llama")
30
- tokenizer = AutoTokenizer.from_pretrained("YOUR_USERNAME/rick-llama")
31
-
32
- # For PEFT/LoRA model
33
- from peft import PeftModel, PeftConfig
34
- base_model = AutoModelForCausalLM.from_pretrained("unsloth/Llama-3.2-3B-Instruct")
35
- model = PeftModel.from_pretrained(base_model, "YOUR_USERNAME/rick-llama")
36
- tokenizer = AutoTokenizer.from_pretrained("YOUR_USERNAME/rick-llama")
37
-
38
- # Format your input
39
- text = "What do you think about space travel, Rick?"
40
-
41
- # Generate response
42
- inputs = tokenizer(text, return_tensors="pt")
43
- outputs = model.generate(
44
- **inputs,
45
- max_length=200,
46
- temperature=0.7,
47
- top_p=0.9,
48
- repetition_penalty=1.2
49
- )
50
- response = tokenizer.decode(outputs[0])
51
  ```
 
 
 
52
 
 
 
 
 
 
 
 
 
 
 
53
  ## Limitations
54
 
55
  - The model may generate responses that are sarcastic or irreverent
 
24
 
25
  ```python
26
  from transformers import AutoModelForCausalLM, AutoTokenizer
27
+ import torch
28
+
29
+ def setup_rick_model(model_id, use_token=False):
30
+ """
31
+ Setup the Rick model from Hugging Face
32
+ model_id: "username/model-name" from Hugging Face
33
+ use_token: Set True if it's a private repository
34
+ """
35
+ try:
36
+ # If private repository, first login with token
37
+ if use_token:
38
+ from huggingface_hub import login
39
+ token = "your_token_here" # Your Hugging Face token
40
+ login(token)
41
+
42
+ # Load model and tokenizer
43
+ model = AutoModelForCausalLM.from_pretrained(
44
+ model_id,
45
+ torch_dtype=torch.float16,
46
+ device_map="auto"
47
+ )
48
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
49
+
50
+ return model, tokenizer
51
+
52
+ except Exception as e:
53
+ print(f"Error loading model: {str(e)}")
54
+ return None, None
55
+
56
+ def ask_rick(question, model, tokenizer, max_length=200):
57
+ """Ask Rick a question"""
58
+ # Rick's personality prompt
59
+ role_play_prompt = (
60
+ "You are Rick Sanchez, a brilliant mad scientist, "
61
+ "the smartest man in the universe. Always respond as Rick would—"
62
+ "sarcastic, genius, and indifferent."
63
+ )
64
+
65
+ # Format input
66
+ input_text = f"<s>### Instruction:\n{role_play_prompt}\n\n### Input:\n{question}\n\n### Response:\n"
67
+
68
+ # Generate response
69
+ inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
70
+ outputs = model.generate(
71
+ inputs["input_ids"],
72
+ max_length=max_length,
73
+ temperature=0.8,
74
+ top_p=0.9,
75
+ do_sample=True,
76
+ repetition_penalty=1.2
77
+ )
78
+
79
+ # Decode response
80
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
81
+ return response.split("### Response:")[-1].strip()
82
+
83
+ # Usage example
84
+ if __name__ == "__main__":
85
+ # Replace with your model's repository name
86
+ MODEL_ID = "CrimsonEyes/rick_sanchez_model"
87
+
88
+ # Load model
89
+ model, tokenizer = setup_rick_model(MODEL_ID)
90
+
91
+ if model and tokenizer:
92
+ # Test questions
93
+ questions = [
94
+ "What do you think about space travel, Rick?",
95
+ "Can you explain quantum physics to me?",
96
+ "What's your opinion on family?"
97
+ ]
98
+
99
+ for question in questions:
100
+ print(f"\nQuestion: {question}")
101
+ response = ask_rick(question, model, tokenizer)
102
+ print(f"Rick's response: {response}")
103
+ ```
104
+ ## For a private repository:
105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  ```
107
+ # First, get your token from https://huggingface.co/settings/tokens
108
+ from huggingface_hub import login
109
+ login("your_token_here")
110
 
111
+ MODEL_ID = "username/model-name" # Replace with your model's repository name
112
+ model, tokenizer = setup_rick_model(MODEL_ID, use_token=True)
113
+ ```
114
+
115
+ ## Using the model:
116
+ ```
117
+ question = "What do you think about space travel, Rick?"
118
+ response = ask_rick(question, model, tokenizer)
119
+ print(f"Rick's response: {response}")
120
+ ```
121
  ## Limitations
122
 
123
  - The model may generate responses that are sarcastic or irreverent