Kumar955 commited on
Commit
871325c
·
verified ·
1 Parent(s): 57a40df

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -1
README.md CHANGED
@@ -50,7 +50,13 @@ model = "Kumar955/Hemanth-llm"
50
  messages = [{"role": "user", "content": "What is a large language model?"}]
51
 
52
  tokenizer = AutoTokenizer.from_pretrained(model)
53
- prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
 
 
 
 
 
 
54
  pipeline = transformers.pipeline(
55
  "text-generation",
56
  model=model,
@@ -60,4 +66,5 @@ pipeline = transformers.pipeline(
60
 
61
  outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
62
  print(outputs[0]["generated_text"])
 
63
  ```
 
50
  messages = [{"role": "user", "content": "What is a large language model?"}]
51
 
52
  tokenizer = AutoTokenizer.from_pretrained(model)
53
+
54
+ # Define a chat template
55
+ chat_template = """<s><|user|>{{ user_message }}<|assistant|>"""
56
+
57
+ # Use the chat template in apply_chat_template
58
+ prompt = tokenizer.apply_chat_template(messages, chat_template=chat_template, tokenize=False, add_generation_prompt=True)
59
+
60
  pipeline = transformers.pipeline(
61
  "text-generation",
62
  model=model,
 
66
 
67
  outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
68
  print(outputs[0]["generated_text"])
69
+
70
  ```