FadQ commited on
Commit
8bbf586
·
verified ·
1 Parent(s): 296b978

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -1,21 +1,21 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  from peft import PeftModel # Ensure PEFT is installed: pip install peft
4
  import os
5
 
6
  # Define the model and base paths
7
- model = "FadQ/gemma-2b-diary-consultaton-chatbot"
8
- # base_model = "google/gemma-2b"
9
 
10
- # # Use your Hugging Face token
11
- # hf_token = os.getenv('HF_TOKEN')
12
 
13
- # # Load tokenizer with authentication
14
- # tokenizer = AutoTokenizer.from_pretrained(base_model, token=hf_token, force_download=True)
15
 
16
- # # Load the base model and apply adapter with authentication
17
- # base_model = AutoModelForCausalLM.from_pretrained(base_model, device_map="auto", token=hf_token)
18
- # model = PeftModel.from_pretrained(base_model, model_path)
19
 
20
 
21
  # Create pipeline
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, AutoModelForSequenceClassification
3
  from peft import PeftModel # Ensure PEFT is installed: pip install peft
4
  import os
5
 
6
  # Define the model and base paths
7
+ model_path = "FadQ/gemma-2b-diary-consultaton-chatbot"
8
+ base_model = "google/gemma-2b"
9
 
10
+ # Use your Hugging Face token
11
+ hf_token = os.getenv('HF_TOKEN')
12
 
13
+ # Load tokenizer with authentication
14
+ tokenizer = AutoTokenizer.from_pretrained(base_model, token=hf_token, force_download=True)
15
 
16
+ # Load the base model and apply adapter with authentication
17
+ base_model = AutoModelForCausalLM.from_pretrained(base_model, device_map="auto", token=hf_token)
18
+ model = PeftModel.from_pretrained(base_model, model_path)
19
 
20
 
21
  # Create pipeline