coolAI commited on
Commit
398310d
·
verified ·
1 Parent(s): 5317fc7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -42,8 +42,8 @@ base_model = AutoModelForCausalLM.from_pretrained(
42
  )
43
 
44
  # Load LoRA adapters
45
- model = PeftModel.from_pretrained(base_model, "coolAI/precis-granite")
46
- tokenizer = AutoTokenizer.from_pretrained("coolAI/precis-granite")
47
 
48
  # Generate summary
49
  document = """Your long document here..."""
@@ -77,7 +77,7 @@ print(summary)
77
  from unsloth import FastLanguageModel
78
 
79
  model, tokenizer = FastLanguageModel.from_pretrained(
80
- model_name="coolAI/precis-granite",
81
  max_seq_length=2048,
82
  load_in_4bit=True, # For lower memory usage
83
  )
@@ -117,7 +117,7 @@ llm = LLM(
117
  lora_request = LoRARequest(
118
  "precis-granite",
119
  1,
120
- "coolAI/precis-granite"
121
  )
122
 
123
  # Sampling parameters
 
42
  )
43
 
44
  # Load LoRA adapters
45
+ model = PeftModel.from_pretrained(base_model, "cernis-intelligence/precis")
46
+ tokenizer = AutoTokenizer.from_pretrained("cernis-intelligence/precis")
47
 
48
  # Generate summary
49
  document = """Your long document here..."""
 
77
  from unsloth import FastLanguageModel
78
 
79
  model, tokenizer = FastLanguageModel.from_pretrained(
80
+ model_name="cernis-intelligence/precis",
81
  max_seq_length=2048,
82
  load_in_4bit=True, # For lower memory usage
83
  )
 
117
  lora_request = LoRARequest(
118
  "precis-granite",
119
  1,
120
+ "cernis-intelligence/precis"
121
  )
122
 
123
  # Sampling parameters