jaeyoungk commited on
Commit
4965ba7
·
verified ·
1 Parent(s): e47a857

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -0
README.md CHANGED
@@ -35,10 +35,14 @@ This is the model card of a 🤗 transformers model that has been pushed on the
35
 
36
  ## Uses
37
 
 
 
38
  import torch
39
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
40
  import re
41
 
 
 
42
  model_id = "jaeyoungk/albatross"
43
  bnb_config = BitsAndBytesConfig(
44
  load_in_4bit=True,
@@ -50,6 +54,7 @@ bnb_config = BitsAndBytesConfig(
50
  tokenizer = AutoTokenizer.from_pretrained('meta-llama/Meta-Llama-3-8B-Instruct')
51
  model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map='auto')
52
 
 
53
 
54
  def gen(x):
55
  system_prompt = f"""
@@ -95,6 +100,7 @@ def gen(x):
95
 
96
  return extracted_text
97
 
 
98
  gen('input your text here')
99
 
100
 
 
35
 
36
  ## Uses
37
 
38
+ # load library
39
+
40
  import torch
41
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
42
  import re
43
 
44
+ # load model
45
+
46
  model_id = "jaeyoungk/albatross"
47
  bnb_config = BitsAndBytesConfig(
48
  load_in_4bit=True,
 
54
  tokenizer = AutoTokenizer.from_pretrained('meta-llama/Meta-Llama-3-8B-Instruct')
55
  model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map='auto')
56
 
57
+ # define model generation function
58
 
59
  def gen(x):
60
  system_prompt = f"""
 
100
 
101
  return extracted_text
102
 
103
+ # test the model
104
  gen('input your text here')
105
 
106