Update app.py
Browse files
app.py
CHANGED
|
@@ -18,14 +18,6 @@ load_in_4bit = True # Use 4-bit quantization to reduce memory usage
|
|
| 18 |
|
| 19 |
peft_model_name = "limitedonly41/website_qwen2_7b_2"
|
| 20 |
|
| 21 |
-
# Load the model and tokenizer during initialization (in the main process)
|
| 22 |
-
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 23 |
-
model_name=peft_model_name,
|
| 24 |
-
max_seq_length=max_seq_length,
|
| 25 |
-
dtype=dtype,
|
| 26 |
-
load_in_4bit=load_in_4bit,
|
| 27 |
-
)
|
| 28 |
-
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
|
| 29 |
|
| 30 |
|
| 31 |
def fetch_data(url):
|
|
@@ -110,6 +102,18 @@ def classify_website(url):
|
|
| 110 |
|
| 111 |
global model, tokenizer # Declare model and tokenizer as global variables
|
| 112 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
urls = [url]
|
| 114 |
results_shop = main(urls)
|
| 115 |
|
|
|
|
| 18 |
|
| 19 |
peft_model_name = "limitedonly41/website_qwen2_7b_2"
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
|
| 23 |
def fetch_data(url):
|
|
|
|
| 102 |
|
| 103 |
global model, tokenizer # Declare model and tokenizer as global variables
|
| 104 |
|
| 105 |
+
if model is None or tokenizer is None:
|
| 106 |
+
|
| 107 |
+
# Load the model and tokenizer during initialization (in the main process)
|
| 108 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 109 |
+
model_name=peft_model_name,
|
| 110 |
+
max_seq_length=max_seq_length,
|
| 111 |
+
dtype=dtype,
|
| 112 |
+
load_in_4bit=load_in_4bit,
|
| 113 |
+
)
|
| 114 |
+
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
|
| 115 |
+
|
| 116 |
+
|
| 117 |
urls = [url]
|
| 118 |
results_shop = main(urls)
|
| 119 |
|