Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -87,23 +87,34 @@ class SafeGeocoder:
|
|
| 87 |
def load_model():
|
| 88 |
global tokenizer, model
|
| 89 |
try:
|
| 90 |
-
|
| 91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
tokenizer = AutoTokenizer.from_pretrained(
|
| 93 |
MODEL_NAME,
|
| 94 |
-
trust_remote_code=True
|
|
|
|
| 95 |
)
|
| 96 |
|
| 97 |
-
# Load model exactly as shown in the usage example
|
| 98 |
model = AutoModelForCausalLM.from_pretrained(
|
| 99 |
MODEL_NAME,
|
| 100 |
torch_dtype=TORCH_DTYPE,
|
| 101 |
-
trust_remote_code=True
|
|
|
|
| 102 |
).to(DEVICE).eval()
|
| 103 |
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
test_text = "Test in Berlin."
|
| 108 |
test_template = '{"test_location": ""}'
|
| 109 |
test_template_formatted = json.dumps(json.loads(test_template), indent=4)
|
|
@@ -128,13 +139,13 @@ def load_model():
|
|
| 128 |
return "✅ Modell erfolgreich geladen und getestet!"
|
| 129 |
|
| 130 |
return "⚠️ Modell-Test nicht erfolgreich. Bitte versuchen Sie es erneut."
|
| 131 |
-
|
| 132 |
except Exception as e:
|
| 133 |
import traceback
|
| 134 |
trace = traceback.format_exc()
|
| 135 |
print(f"Error loading model: {e}\n{trace}")
|
| 136 |
return f"❌ Fehler beim Laden des Modells: {str(e)}"
|
| 137 |
-
|
| 138 |
@spaces.GPU
|
| 139 |
def extract_info(template, text):
|
| 140 |
global tokenizer, model
|
|
|
|
| 87 |
def load_model():
|
| 88 |
global tokenizer, model
|
| 89 |
try:
|
| 90 |
+
# Try to import Qwen2 components from modelscope
|
| 91 |
+
try:
|
| 92 |
+
from modelscope import AutoTokenizer as MSAutoTokenizer
|
| 93 |
+
tokenizer = MSAutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
|
| 94 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 95 |
+
MODEL_NAME,
|
| 96 |
+
torch_dtype=TORCH_DTYPE,
|
| 97 |
+
trust_remote_code=True
|
| 98 |
+
).to(DEVICE).eval()
|
| 99 |
+
print("Loaded model using modelscope AutoTokenizer")
|
| 100 |
+
except:
|
| 101 |
+
# If modelscope approach fails, try with specific revision
|
| 102 |
tokenizer = AutoTokenizer.from_pretrained(
|
| 103 |
MODEL_NAME,
|
| 104 |
+
trust_remote_code=True,
|
| 105 |
+
revision="main" # Try specifying a revision
|
| 106 |
)
|
| 107 |
|
|
|
|
| 108 |
model = AutoModelForCausalLM.from_pretrained(
|
| 109 |
MODEL_NAME,
|
| 110 |
torch_dtype=TORCH_DTYPE,
|
| 111 |
+
trust_remote_code=True,
|
| 112 |
+
revision="main" # Try specifying a revision
|
| 113 |
).to(DEVICE).eval()
|
| 114 |
|
| 115 |
+
print(f"✅ Loaded {MODEL_NAME} on {DEVICE}")
|
| 116 |
+
|
| 117 |
+
# Test the model
|
| 118 |
test_text = "Test in Berlin."
|
| 119 |
test_template = '{"test_location": ""}'
|
| 120 |
test_template_formatted = json.dumps(json.loads(test_template), indent=4)
|
|
|
|
| 139 |
return "✅ Modell erfolgreich geladen und getestet!"
|
| 140 |
|
| 141 |
return "⚠️ Modell-Test nicht erfolgreich. Bitte versuchen Sie es erneut."
|
| 142 |
+
|
| 143 |
except Exception as e:
|
| 144 |
import traceback
|
| 145 |
trace = traceback.format_exc()
|
| 146 |
print(f"Error loading model: {e}\n{trace}")
|
| 147 |
return f"❌ Fehler beim Laden des Modells: {str(e)}"
|
| 148 |
+
|
| 149 |
@spaces.GPU
|
| 150 |
def extract_info(template, text):
|
| 151 |
global tokenizer, model
|