Safetensors
Korean
gpt_neox
juungwon commited on
Commit
4d4a05b
·
verified ·
1 Parent(s): 9b55459

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -0
README.md CHANGED
@@ -10,6 +10,10 @@ language:
10
 
11
  ## Inference example
12
  ```python
 
 
 
 
13
  import torch
14
  from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
15
 
@@ -22,6 +26,12 @@ quantization_config = BitsAndBytesConfig(
22
  model = AutoModelForCausalLM.from_pretrained(repo_name, quantization_config=quantization_config, device_map='cuda')
23
  tokenizer = AutoTokenizer.from_pretrained(repo_name)
24
 
 
 
 
 
 
 
25
 
26
  def ask(x, context='', is_input_full=False):
27
  ans = pipe(
 
10
 
11
  ## Inference example
12
  ```python
13
+ !pip install -U transformers
14
+ !pip install git+https://github.com/huggingface/peft
15
+ !pip install -U bitsandbytes
16
+
17
  import torch
18
  from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
19
 
 
26
  model = AutoModelForCausalLM.from_pretrained(repo_name, quantization_config=quantization_config, device_map='cuda')
27
  tokenizer = AutoTokenizer.from_pretrained(repo_name)
28
 
29
+ pipe = pipeline(
30
+ 'text-generation',
31
+ model=model,
32
+ tokenizer=tokenizer,
33
+ device_map='cuda',
34
+ )
35
 
36
  def ask(x, context='', is_input_full=False):
37
  ans = pipe(