Spaces:
Paused
Paused
add google token
Browse files
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import io
|
|
|
|
| 2 |
import streamlit as st
|
| 3 |
from PyPDF2 import PdfReader
|
| 4 |
from langchain.text_splitter import CharacterTextSplitter
|
|
@@ -37,10 +38,15 @@ def create_knowledge_base(chunks):
|
|
| 37 |
|
| 38 |
# Hugging Face 모델 로드
|
| 39 |
def load_model():
|
| 40 |
-
model_name = "google/gemma-2-2b" #
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
# QA 체인 설정
|
| 46 |
def setup_qa_chain():
|
|
|
|
| 1 |
import io
|
| 2 |
+
import os
|
| 3 |
import streamlit as st
|
| 4 |
from PyPDF2 import PdfReader
|
| 5 |
from langchain.text_splitter import CharacterTextSplitter
|
|
|
|
| 38 |
|
| 39 |
# Hugging Face 모델 로드
|
| 40 |
def load_model():
|
| 41 |
+
model_name = "google/gemma-2-2b" # Hugging Face 모델 ID
|
| 42 |
+
access_token = os.getenv("HF_TOKEN")
|
| 43 |
+
try:
|
| 44 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=access_token, clean_up_tokenization_spaces=False)
|
| 45 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=access_token)
|
| 46 |
+
return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=150, temperature=0.1)
|
| 47 |
+
except Exception as e:
|
| 48 |
+
print(f"Error loading model: {e}")
|
| 49 |
+
return None
|
| 50 |
|
| 51 |
# QA 체인 설정
|
| 52 |
def setup_qa_chain():
|