Monimoy commited on
Commit
f7b71c7
·
verified ·
1 Parent(s): d5fef5d

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +0 -5
  2. requirements.txt +0 -2
app.py CHANGED
@@ -1,5 +1,3 @@
1
- import torch
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import openvino_genai
4
  import gradio as gr
5
 
@@ -19,9 +17,6 @@ pipe = openvino_genai.LLMPipeline(model=base_model_name, device=device, adapters
19
  print("Generate with LoRA adapter and alpha set to 0.75:")
20
  #print(pipe.generate(args.prompt, max_new_tokens=100, adapters=openvino_genai.AdapterConfig(adapter, 0.75)))
21
 
22
- # Load tokenizer
23
- tokenizer = AutoTokenizer.from_pretrained(base_model_name)
24
- tokenizer.pad_token = tokenizer.eos_token
25
 
26
  # Define prediction function
27
  def generate_response(prompt):
 
 
 
1
  import openvino_genai
2
  import gradio as gr
3
 
 
17
  print("Generate with LoRA adapter and alpha set to 0.75:")
18
  #print(pipe.generate(args.prompt, max_new_tokens=100, adapters=openvino_genai.AdapterConfig(adapter, 0.75)))
19
 
 
 
 
20
 
21
  # Define prediction function
22
  def generate_response(prompt):
requirements.txt CHANGED
@@ -1,5 +1,3 @@
1
- torch
2
  gradio
3
- transformers
4
  huggingface_hub
5
  openvino-genai
 
 
1
  gradio
 
2
  huggingface_hub
3
  openvino-genai