Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,11 +9,11 @@ import json
|
|
| 9 |
|
| 10 |
# Load the Vicuna 7B model and tokenizer
|
| 11 |
vicuna_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.3")
|
| 12 |
-
vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.3")
|
| 13 |
|
| 14 |
# Load the LLaMA 7b model and tokenizer
|
| 15 |
llama_tokenizer = AutoTokenizer.from_pretrained("daryl149/llama-2-7b-chat-hf")
|
| 16 |
-
llama_model = AutoModelForCausalLM.from_pretrained("daryl149/llama-2-7b-chat-hf")
|
| 17 |
|
| 18 |
os.environ['OPENAI_API_KEY']
|
| 19 |
openai.api_key = os.environ['OPENAI_API_KEY']
|
|
|
|
| 9 |
|
| 10 |
# Load the Vicuna 7B model and tokenizer
|
| 11 |
vicuna_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.3")
|
| 12 |
+
vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.3", load_in_4bit=True)
|
| 13 |
|
| 14 |
# Load the LLaMA 7b model and tokenizer
|
| 15 |
llama_tokenizer = AutoTokenizer.from_pretrained("daryl149/llama-2-7b-chat-hf")
|
| 16 |
+
llama_model = AutoModelForCausalLM.from_pretrained("daryl149/llama-2-7b-chat-hf", load_in_4bit=True)
|
| 17 |
|
| 18 |
os.environ['OPENAI_API_KEY']
|
| 19 |
openai.api_key = os.environ['OPENAI_API_KEY']
|