sksameermujahid commited on
Commit
5ceec4b
·
verified ·
1 Parent(s): a7987ea

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +384 -100
  2. templates/index.html +96 -0
app.py CHANGED
@@ -5,8 +5,20 @@ import os
5
  import logging
6
  import requests
7
  import threading
8
- from queue import Queue
9
  from functools import lru_cache
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  app = Flask(__name__)
12
  logging.basicConfig(level=logging.INFO)
@@ -14,14 +26,133 @@ logging.basicConfig(level=logging.INFO)
14
  CACHE_DIR = "/data"
15
  model_name = "Qwen/Qwen2.5-0.5B-Instruct"
16
 
17
- # Load model and tokenizer
18
- logging.info("Loading tokenizer...")
19
- tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=CACHE_DIR, trust_remote_code=True)
20
- logging.info("Loading model...")
21
- model = AutoModelForCausalLM.from_pretrained(model_name, cache_dir=CACHE_DIR, trust_remote_code=True)
22
- logging.info("Creating pipeline...")
23
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
24
- logging.info("Model ready.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  # Fallback description text
27
  def get_fallback_description(details):
@@ -35,81 +166,135 @@ def get_fallback_description(details):
35
  nearby_info = ""
36
  lat = details.get('latitude')
37
  lon = details.get('longitude')
38
- if lat and lon:
39
- try:
40
- lat_f, lon_f = float(lat), float(lon)
41
- result_queue = Queue()
42
- thread = threading.Thread(target=fetch_nearby_amenities_thread, args=(lat_f, lon_f, 2500, result_queue))
43
- thread.start()
44
- thread.join(timeout=8)
45
- landmarks = result_queue.get() if not result_queue.empty() else []
46
-
47
- if landmarks:
48
- amenities = [x for x in landmarks if any(term in x.lower() for term in ['school', 'hospital', 'restaurant', 'pharmacy', 'bank', 'atm', 'cafe', 'bus', 'fuel', 'supermarket', 'police', 'post'])]
49
- landmarks = [x for x in landmarks if x not in amenities]
50
 
51
- if amenities:
52
- nearby_info += f" The property is conveniently located near {', '.join(amenities[:2])}."
53
  if landmarks:
54
- nearby_info += f" Notable landmarks include {', '.join(landmarks[:2])}."
55
- except Exception as e:
56
- logging.error(f"Fallback amenity fetch error: {str(e)}")
57
- pass
 
 
 
 
 
 
58
 
59
  if category in ['agricultural', 'commercial']:
60
  return f'''A prime agricultural property located at {address}. This expansive land spans {area} sq. ft., making it ideal for farming or agricultural development. The property is priced at ₹{price} and features {rooms} storage/utility rooms. The land is well-irrigated and offers excellent soil quality, perfect for various agricultural activities.{nearby_info} Whether you're looking to start a farm or expand your agricultural operations, this property provides the perfect foundation for your agricultural ventures.'''
61
  else: # Commercial
62
  return f'''A premium commercial property situated in the heart of {address}. This modern commercial space spans {area} sq. ft., offering a versatile layout with {rooms} rooms for optimal business operations. Priced at ₹{price}, this property features excellent visibility and accessibility.{nearby_info} The space is designed to accommodate various business needs, from retail to office use, with modern amenities and professional atmosphere. Whether you're establishing a new business or expanding existing operations, this location provides the perfect blend of convenience and functionality.'''
63
 
64
- @lru_cache(maxsize=100)
65
- def fetch_nearby_amenities_thread(lat, lon, radius, result_queue):
66
- try:
67
- overpass_url = "https://overpass-api.de/api/interpreter"
68
- query = f"""
69
- [out:json][timeout:10];
70
- (
71
- node["amenity"~"school|hospital|restaurant|temple|mandir|hotel|pharmacy|bank|atm|cafe|bus_station|fuel|supermarket|police|post_office"](around:{radius},{lat},{lon});
72
- node["tourism"~"hotel|museum|gallery|viewpoint|attraction"](around:{radius},{lat},{lon});
73
- node["historic"~"monument|memorial|archaeological_site|castle|ruins"](around:{radius},{lat},{lon});
74
- node["leisure"~"park|garden|sports_centre|stadium"](around:{radius},{lat},{lon});
75
- );
76
- out body;
77
- """
78
- response = requests.post(overpass_url, data={'data': query}, timeout=10)
79
- response.raise_for_status()
80
- data = response.json()
81
- landmarks = []
82
- seen_names = set()
 
 
 
 
 
 
 
 
 
 
83
 
84
- for element in data.get('elements', []):
85
- tags = element.get('tags', {})
86
- name = tags.get('name')
87
- if not name or name in seen_names:
88
- continue
89
- seen_names.add(name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
- place_type = None
92
- if tags.get('tourism'):
93
- place_type = tags['tourism'].replace('_', ' ').title()
94
- elif tags.get('historic'):
95
- place_type = tags['historic'].replace('_', ' ').title()
96
- elif tags.get('leisure'):
97
- place_type = tags['leisure'].replace('_', ' ').title()
98
- elif tags.get('amenity'):
99
- place_type = tags['amenity'].replace('_', ' ').title()
100
 
101
- if place_type:
102
- landmarks.append((f"{place_type} '{name}'", len(name)))
103
-
104
- # Sort by name length and take top results
105
- landmarks.sort(key=lambda x: x[1], reverse=True)
106
- result_queue.put([x[0] for x in landmarks])
107
- except Exception as e:
108
- logging.error(f"Overpass API error: {str(e)}")
109
- result_queue.put([])
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
  def generate_description(details):
 
 
112
  try:
 
 
 
 
 
 
 
113
  category = details.get('parent_category', '').lower()
114
 
115
  # Basic info - only include beds/rooms for residential properties
@@ -128,19 +313,16 @@ def generate_description(details):
128
  if baths: parts.append(bath_info)
129
  extra_info = "This property includes " + " and ".join(parts) + "."
130
 
131
- # Amenities thread with shorter timeout
132
  amenities_str = "None found nearby"
133
  landmarks_str = "None found nearby"
134
  lat = details.get('latitude')
135
  lon = details.get('longitude')
 
136
  if lat and lon:
137
  try:
138
  lat_f, lon_f = float(lat), float(lon)
139
- result_queue = Queue()
140
- thread = threading.Thread(target=fetch_nearby_amenities_thread, args=(lat_f, lon_f, 2000, result_queue))
141
- thread.start()
142
- thread.join(timeout=8)
143
- landmarks = result_queue.get() if not result_queue.empty() else []
144
 
145
  # Split into amenities and landmarks
146
  amenities = [x for x in landmarks if any(term in x.lower() for term in ['school', 'hospital', 'restaurant', 'pharmacy', 'bank', 'atm', 'cafe', 'bus', 'fuel', 'supermarket', 'police', 'post'])]
@@ -181,34 +363,36 @@ def generate_description(details):
181
  prompt += f"\n\n{extra_info}"
182
  prompt += "\nWrite a natural, complete property description focusing on urban and property characteristics only.\nDescription:"
183
 
184
- # Run generation in thread with longer timeout
185
- result_queue = Queue()
186
- def run_generation(q):
187
- try:
188
- out = generator(prompt, max_new_tokens=200, temperature=0.5)[0]['generated_text']
189
- q.put(out)
190
- except Exception as e:
191
- q.put(f"Error: {e}")
192
-
193
- gen_thread = threading.Thread(target=run_generation, args=(result_queue,))
194
- gen_thread.start()
195
- gen_thread.join(timeout=45) # Increased timeout to 45 seconds
196
-
197
- # Return dynamic fallback if generation takes too long or fails
198
- if result_queue.empty():
199
  logging.warning("Generation timeout - using fallback description")
200
- return get_fallback_description(details)
201
-
202
- result = result_queue.get()
203
- if result.startswith("Error:"):
204
- logging.warning(f"Generation error - using fallback description: {result}")
205
- return get_fallback_description(details)
206
-
207
- return result.split('Description:')[-1].strip() if 'Description:' in result else result.strip()
 
 
 
 
208
 
209
  except Exception as e:
210
  logging.error(f"Generation error: {str(e)}")
211
- return FALLBACK_DESCRIPTION
 
212
 
213
  @app.route('/', methods=['GET', 'POST'])
214
  def index():
@@ -258,6 +442,106 @@ def generate():
258
 
259
  return {"description": desc + extra_info}
260
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261
  if __name__ == '__main__':
262
  port = int(os.environ.get('PORT', 7860))
263
  logging.info(f"Starting app on port {port}")
 
5
  import logging
6
  import requests
7
  import threading
8
+ from queue import Queue, PriorityQueue
9
  from functools import lru_cache
10
+ import concurrent.futures
11
+ import asyncio
12
+ import aiohttp
13
+ import hashlib
14
+ import pickle
15
+ import time
16
+ import psutil
17
+ from threading import Lock
18
+ from dataclasses import dataclass
19
+ from enum import Enum
20
+ import heapq
21
+ import json
22
 
23
  app = Flask(__name__)
24
  logging.basicConfig(level=logging.INFO)
 
26
  CACHE_DIR = "/data"
27
  model_name = "Qwen/Qwen2.5-0.5B-Instruct"
28
 
29
+ # Performance monitoring
30
+ class PerformanceMonitor:
31
+ def __init__(self):
32
+ self.metrics = {
33
+ 'request_count': 0,
34
+ 'avg_response_time': 0,
35
+ 'memory_usage': 0,
36
+ 'cpu_usage': 0,
37
+ 'cache_hits': 0,
38
+ 'cache_misses': 0
39
+ }
40
+ self.lock = Lock()
41
+
42
+ def track_request(self, start_time, end_time, cache_hit=False):
43
+ with self.lock:
44
+ response_time = end_time - start_time
45
+ self.metrics['request_count'] += 1
46
+ self.metrics['avg_response_time'] = (
47
+ (self.metrics['avg_response_time'] * (self.metrics['request_count'] - 1) + response_time)
48
+ / self.metrics['request_count']
49
+ )
50
+ self.metrics['memory_usage'] = psutil.virtual_memory().percent
51
+ self.metrics['cpu_usage'] = psutil.cpu_percent()
52
+ if cache_hit:
53
+ self.metrics['cache_hits'] += 1
54
+ else:
55
+ self.metrics['cache_misses'] += 1
56
+
57
+ # Advanced caching system
58
+ class SmartCache:
59
+ def __init__(self, max_size=1000):
60
+ self.cache = {}
61
+ self.max_size = max_size
62
+ self.lock = Lock()
63
+ self.access_times = {}
64
+
65
+ def cache_key(self, data):
66
+ """Generate cache key from data"""
67
+ return hashlib.md5(str(sorted(data.items())).encode()).hexdigest()
68
+
69
+ def get(self, key):
70
+ """Get from cache with LRU eviction"""
71
+ with self.lock:
72
+ if key in self.cache:
73
+ self.access_times[key] = time.time()
74
+ return self.cache[key]
75
+ return None
76
+
77
+ def set(self, key, value):
78
+ """Set cache with LRU eviction"""
79
+ with self.lock:
80
+ if len(self.cache) >= self.max_size and key not in self.cache:
81
+ # Remove least recently used item
82
+ lru_key = min(self.access_times.keys(), key=lambda k: self.access_times[k])
83
+ del self.cache[lru_key]
84
+ del self.access_times[lru_key]
85
+
86
+ self.cache[key] = value
87
+ self.access_times[key] = time.time()
88
+
89
+ # Model Manager with Thread Pool
90
+ class ModelManager:
91
+ def __init__(self, model_name, max_workers=3):
92
+ self.model_name = model_name
93
+ self.model_pool = Queue(maxsize=max_workers)
94
+ self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
95
+ self._initialize_models(max_workers)
96
+
97
+ def _initialize_models(self, max_workers):
98
+ """Pre-load multiple model instances"""
99
+ logging.info(f"Initializing {max_workers} model instances...")
100
+ for i in range(max_workers):
101
+ try:
102
+ tokenizer = AutoTokenizer.from_pretrained(self.model_name, cache_dir=CACHE_DIR, trust_remote_code=True)
103
+ model = AutoModelForCausalLM.from_pretrained(self.model_name, cache_dir=CACHE_DIR, trust_remote_code=True)
104
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
105
+ self.model_pool.put(generator)
106
+ logging.info(f"Model instance {i+1} loaded successfully")
107
+ except Exception as e:
108
+ logging.error(f"Failed to load model instance {i+1}: {e}")
109
+
110
+ def generate_async(self, prompt, max_new_tokens=200, temperature=0.5):
111
+ """Submit generation task to thread pool"""
112
+ def _generate_with_model():
113
+ try:
114
+ generator = self.model_pool.get()
115
+ result = generator(prompt, max_new_tokens=max_new_tokens, temperature=temperature)[0]['generated_text']
116
+ self.model_pool.put(generator) # Return model to pool
117
+ return result
118
+ except Exception as e:
119
+ logging.error(f"Model generation error: {e}")
120
+ return f"Error: {e}"
121
+
122
+ return self.executor.submit(_generate_with_model)
123
+
124
+ def warmup_models(self):
125
+ """Warm up models with sample prompts"""
126
+ sample_prompts = [
127
+ "Generate a property description for a residential apartment",
128
+ "Generate a property description for a commercial office space",
129
+ "Generate a property description for agricultural land"
130
+ ]
131
+
132
+ futures = []
133
+ for prompt in sample_prompts:
134
+ future = self.generate_async(prompt, max_new_tokens=50)
135
+ futures.append(future)
136
+
137
+ # Wait for warmup to complete
138
+ for future in futures:
139
+ try:
140
+ future.result(timeout=30)
141
+ except Exception as e:
142
+ logging.warning(f"Warmup failed: {e}")
143
+
144
+ logging.info("Model warmup completed")
145
+
146
+ # Initialize components
147
+ performance_monitor = PerformanceMonitor()
148
+ cache = SmartCache()
149
+ model_manager = ModelManager(model_name)
150
+
151
+ # Warmup models in background
152
+ warmup_thread = threading.Thread(target=model_manager.warmup_models, daemon=True)
153
+ warmup_thread.start()
154
+
155
+ logging.info("Enhanced system initialized.")
156
 
157
  # Fallback description text
158
  def get_fallback_description(details):
 
166
  nearby_info = ""
167
  lat = details.get('latitude')
168
  lon = details.get('longitude')
169
+ if lat and lon:
170
+ try:
171
+ lat_f, lon_f = float(lat), float(lon)
172
+ landmarks = amenity_fetcher.fetch_amenities_parallel(lat_f, lon_f, 2500)
 
 
 
 
 
 
 
 
173
 
 
 
174
  if landmarks:
175
+ amenities = [x for x in landmarks if any(term in x.lower() for term in ['school', 'hospital', 'restaurant', 'pharmacy', 'bank', 'atm', 'cafe', 'bus', 'fuel', 'supermarket', 'police', 'post'])]
176
+ landmarks = [x for x in landmarks if x not in amenities]
177
+
178
+ if amenities:
179
+ nearby_info += f" The property is conveniently located near {', '.join(amenities[:2])}."
180
+ if landmarks:
181
+ nearby_info += f" Notable landmarks include {', '.join(landmarks[:2])}."
182
+ except Exception as e:
183
+ logging.error(f"Fallback amenity fetch error: {str(e)}")
184
+ pass
185
 
186
  if category in ['agricultural', 'commercial']:
187
  return f'''A prime agricultural property located at {address}. This expansive land spans {area} sq. ft., making it ideal for farming or agricultural development. The property is priced at ₹{price} and features {rooms} storage/utility rooms. The land is well-irrigated and offers excellent soil quality, perfect for various agricultural activities.{nearby_info} Whether you're looking to start a farm or expand your agricultural operations, this property provides the perfect foundation for your agricultural ventures.'''
188
  else: # Commercial
189
  return f'''A premium commercial property situated in the heart of {address}. This modern commercial space spans {area} sq. ft., offering a versatile layout with {rooms} rooms for optimal business operations. Priced at ₹{price}, this property features excellent visibility and accessibility.{nearby_info} The space is designed to accommodate various business needs, from retail to office use, with modern amenities and professional atmosphere. Whether you're establishing a new business or expanding existing operations, this location provides the perfect blend of convenience and functionality.'''
190
 
191
+ # Enhanced Parallel Amenity Fetcher
192
+ class ParallelAmenityFetcher:
193
+ def __init__(self, max_workers=5):
194
+ self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
195
+ self.cache = {}
196
+ self.cache_lock = Lock()
197
+
198
+ def fetch_amenities_parallel(self, lat, lon, radius=2000):
199
+ """Fetch amenities using parallel requests with caching"""
200
+ cache_key = f"{lat}_{lon}_{radius}"
201
+
202
+ # Check cache first
203
+ with self.cache_lock:
204
+ if cache_key in self.cache:
205
+ return self.cache[cache_key]
206
+
207
+ # Create multiple parallel requests for different amenity types
208
+ amenity_types = [
209
+ "school|hospital",
210
+ "restaurant|pharmacy|bank|atm",
211
+ "tourism|historic|leisure",
212
+ "fuel|supermarket|police|post_office",
213
+ "temple|mandir|hotel|cafe|bus_station"
214
+ ]
215
+
216
+ futures = []
217
+ for amenity_type in amenity_types:
218
+ future = self.executor.submit(self._fetch_amenity_type, lat, lon, radius, amenity_type)
219
+ futures.append(future)
220
 
221
+ # Collect results
222
+ all_amenities = []
223
+ for future in futures:
224
+ try:
225
+ result = future.result(timeout=8)
226
+ if result:
227
+ all_amenities.extend(result)
228
+ except Exception as e:
229
+ logging.error(f"Amenity fetch error: {e}")
230
+
231
+ # Cache result
232
+ with self.cache_lock:
233
+ self.cache[cache_key] = all_amenities
234
+
235
+ return all_amenities
236
+
237
+ def _fetch_amenity_type(self, lat, lon, radius, amenity_type):
238
+ """Fetch specific amenity type"""
239
+ try:
240
+ overpass_url = "https://overpass-api.de/api/interpreter"
241
+ query = f"""
242
+ [out:json][timeout:8];
243
+ (
244
+ node["amenity"~"{amenity_type}"](around:{radius},{lat},{lon});
245
+ node["tourism"~"{amenity_type}"](around:{radius},{lat},{lon});
246
+ node["historic"~"{amenity_type}"](around:{radius},{lat},{lon});
247
+ node["leisure"~"{amenity_type}"](around:{radius},{lat},{lon});
248
+ );
249
+ out body;
250
+ """
251
+ response = requests.post(overpass_url, data={'data': query}, timeout=8)
252
+ response.raise_for_status()
253
+ data = response.json()
254
+ landmarks = []
255
+ seen_names = set()
256
 
257
+ for element in data.get('elements', []):
258
+ tags = element.get('tags', {})
259
+ name = tags.get('name')
260
+ if not name or name in seen_names:
261
+ continue
262
+ seen_names.add(name)
 
 
 
263
 
264
+ place_type = None
265
+ if tags.get('tourism'):
266
+ place_type = tags['tourism'].replace('_', ' ').title()
267
+ elif tags.get('historic'):
268
+ place_type = tags['historic'].replace('_', ' ').title()
269
+ elif tags.get('leisure'):
270
+ place_type = tags['leisure'].replace('_', ' ').title()
271
+ elif tags.get('amenity'):
272
+ place_type = tags['amenity'].replace('_', ' ').title()
273
+
274
+ if place_type:
275
+ landmarks.append((f"{place_type} '{name}'", len(name)))
276
+
277
+ # Sort by name length and take top results
278
+ landmarks.sort(key=lambda x: x[1], reverse=True)
279
+ return [x[0] for x in landmarks]
280
+ except Exception as e:
281
+ logging.error(f"Overpass API error for {amenity_type}: {str(e)}")
282
+ return []
283
+
284
+ # Initialize amenity fetcher
285
+ amenity_fetcher = ParallelAmenityFetcher()
286
 
287
  def generate_description(details):
288
+ start_time = time.time()
289
+
290
  try:
291
+ # Check cache first
292
+ cache_key = cache.cache_key(details)
293
+ cached_result = cache.get(cache_key)
294
+ if cached_result:
295
+ performance_monitor.track_request(start_time, time.time(), cache_hit=True)
296
+ return cached_result
297
+
298
  category = details.get('parent_category', '').lower()
299
 
300
  # Basic info - only include beds/rooms for residential properties
 
313
  if baths: parts.append(bath_info)
314
  extra_info = "This property includes " + " and ".join(parts) + "."
315
 
316
+ # Parallel amenity fetching
317
  amenities_str = "None found nearby"
318
  landmarks_str = "None found nearby"
319
  lat = details.get('latitude')
320
  lon = details.get('longitude')
321
+
322
  if lat and lon:
323
  try:
324
  lat_f, lon_f = float(lat), float(lon)
325
+ landmarks = amenity_fetcher.fetch_amenities_parallel(lat_f, lon_f, 2000)
 
 
 
 
326
 
327
  # Split into amenities and landmarks
328
  amenities = [x for x in landmarks if any(term in x.lower() for term in ['school', 'hospital', 'restaurant', 'pharmacy', 'bank', 'atm', 'cafe', 'bus', 'fuel', 'supermarket', 'police', 'post'])]
 
363
  prompt += f"\n\n{extra_info}"
364
  prompt += "\nWrite a natural, complete property description focusing on urban and property characteristics only.\nDescription:"
365
 
366
+ # Use model manager for async generation
367
+ try:
368
+ future = model_manager.generate_async(prompt, max_new_tokens=200, temperature=0.5)
369
+ result = future.result(timeout=30) # Reduced timeout to 30 seconds
370
+
371
+ if result.startswith("Error:"):
372
+ logging.warning(f"Generation error - using fallback description: {result}")
373
+ result = get_fallback_description(details)
374
+ else:
375
+ result = result.split('Description:')[-1].strip() if 'Description:' in result else result.strip()
376
+
377
+ except concurrent.futures.TimeoutError:
 
 
 
378
  logging.warning("Generation timeout - using fallback description")
379
+ result = get_fallback_description(details)
380
+ except Exception as e:
381
+ logging.error(f"Generation error: {str(e)}")
382
+ result = get_fallback_description(details)
383
+
384
+ # Cache the result
385
+ cache.set(cache_key, result)
386
+
387
+ # Track performance
388
+ performance_monitor.track_request(start_time, time.time(), cache_hit=False)
389
+
390
+ return result
391
 
392
  except Exception as e:
393
  logging.error(f"Generation error: {str(e)}")
394
+ performance_monitor.track_request(start_time, time.time(), cache_hit=False)
395
+ return get_fallback_description(details)
396
 
397
  @app.route('/', methods=['GET', 'POST'])
398
  def index():
 
442
 
443
  return {"description": desc + extra_info}
444
 
445
+ @app.route('/generate_batch', methods=['POST'])
446
+ def generate_batch():
447
+ """Generate descriptions for multiple properties in parallel"""
448
+ if not request.is_json:
449
+ return {"error": "Request must be JSON"}, 400
450
+
451
+ data = request.get_json()
452
+ properties = data.get('properties', [])
453
+
454
+ if not properties:
455
+ return {"error": "No properties provided"}, 400
456
+
457
+ if len(properties) > 10:
458
+ return {"error": "Maximum 10 properties allowed per batch"}, 400
459
+
460
+ # Validate all properties
461
+ for i, prop in enumerate(properties):
462
+ if not prop.get('property_name'):
463
+ return {"error": f"Property {i+1} missing required field: property_name"}, 400
464
+
465
+ # Generate descriptions in parallel
466
+ with concurrent.futures.ThreadPoolExecutor(max_workers=min(len(properties), 5)) as executor:
467
+ futures = {executor.submit(generate_description, prop): prop for prop in properties}
468
+ results = []
469
+
470
+ for future in concurrent.futures.as_completed(futures):
471
+ prop = futures[future]
472
+ try:
473
+ desc = future.result()
474
+ category = prop.get('parent_category', '').lower()
475
+
476
+ # Add extra info based on category
477
+ if category not in ['agricultural', 'commercial']:
478
+ info_lines = []
479
+ beds = prop.get('beds', '')
480
+ rooms = prop.get('total_rooms', '')
481
+ baths = prop.get('baths', '')
482
+ if beds and beds != '0':
483
+ info_lines.append(f"Beds: {beds}")
484
+ if rooms and rooms != '0':
485
+ info_lines.append(f"Rooms: {rooms}")
486
+ if baths and baths != '0':
487
+ info_lines.append(f"Baths: {baths}")
488
+ info_lines.append(f"Year: {prop.get('year', '')}")
489
+ info_lines.append(f"Price: ₹{prop.get('price', '')}")
490
+ extra_info = "\n" + "\n".join(info_lines)
491
+ else:
492
+ extra_info = f"\nYear: {prop.get('year', '')}\nPrice: ₹{prop.get('price', '')}"
493
+
494
+ results.append({
495
+ "property_name": prop.get('property_name'),
496
+ "description": desc + extra_info
497
+ })
498
+ except Exception as e:
499
+ results.append({
500
+ "property_name": prop.get('property_name'),
501
+ "error": str(e)
502
+ })
503
+
504
+ return {"results": results}
505
+
506
+ @app.route('/metrics', methods=['GET'])
507
+ def get_metrics():
508
+ """Get performance metrics"""
509
+ with performance_monitor.lock:
510
+ metrics = performance_monitor.metrics.copy()
511
+
512
+ # Add cache statistics
513
+ with cache.lock:
514
+ metrics['cache_size'] = len(cache.cache)
515
+ metrics['cache_max_size'] = cache.max_size
516
+
517
+ # Add system metrics
518
+ metrics['system_memory_percent'] = psutil.virtual_memory().percent
519
+ metrics['system_cpu_percent'] = psutil.cpu_percent()
520
+
521
+ return jsonify(metrics)
522
+
523
+ @app.route('/health', methods=['GET'])
524
+ def health_check():
525
+ """Health check endpoint"""
526
+ try:
527
+ # Test model availability
528
+ test_prompt = "Test prompt"
529
+ future = model_manager.generate_async(test_prompt, max_new_tokens=10)
530
+ future.result(timeout=5)
531
+
532
+ return {
533
+ "status": "healthy",
534
+ "timestamp": time.time(),
535
+ "model_ready": True,
536
+ "cache_ready": True
537
+ }
538
+ except Exception as e:
539
+ return {
540
+ "status": "unhealthy",
541
+ "timestamp": time.time(),
542
+ "error": str(e)
543
+ }, 500
544
+
545
  if __name__ == '__main__':
546
  port = int(os.environ.get('PORT', 7860))
547
  logging.info(f"Starting app on port {port}")
templates/index.html CHANGED
@@ -216,6 +216,7 @@
216
  </div>
217
 
218
  <button type="button" id="generate-btn">Generate Description</button>
 
219
  </form>
220
  <div id="loading" style="display:none; color:#2980b9; margin-top:10px;">Generating description...</div>
221
  <div id="error" style="display:none; color:red; margin-top:10px;"></div>
@@ -223,6 +224,14 @@
223
  <h3>Generated Description:</h3>
224
  <p id="description-text"></p>
225
  </div>
 
 
 
 
 
 
 
 
226
  <script>
227
  document.getElementById('generate-btn').addEventListener('click', async function() {
228
  const form = document.querySelector('form');
@@ -232,6 +241,7 @@
232
  document.getElementById('loading').style.display = 'block';
233
  document.getElementById('error').style.display = 'none';
234
  document.getElementById('description-container').style.display = 'none';
 
235
  try {
236
  const response = await fetch('/generate', {
237
  method: 'POST',
@@ -253,6 +263,92 @@
253
  document.getElementById('error').style.display = 'block';
254
  }
255
  });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256
  </script>
257
  </div>
258
  </body>
 
216
  </div>
217
 
218
  <button type="button" id="generate-btn">Generate Description</button>
219
+ <button type="button" id="batch-btn" style="background-color: #27ae60; margin-top: 10px;">Generate Batch (Demo)</button>
220
  </form>
221
  <div id="loading" style="display:none; color:#2980b9; margin-top:10px;">Generating description...</div>
222
  <div id="error" style="display:none; color:red; margin-top:10px;"></div>
 
224
  <h3>Generated Description:</h3>
225
  <p id="description-text"></p>
226
  </div>
227
+ <div id="batch-container" style="display:none; margin-top:20px;">
228
+ <h3>Batch Generation Results:</h3>
229
+ <div id="batch-results"></div>
230
+ </div>
231
+ <div id="metrics-container" style="display:none; margin-top:20px; padding:15px; background-color:#f8f9fa; border-radius:4px;">
232
+ <h3>Performance Metrics:</h3>
233
+ <div id="metrics-content"></div>
234
+ </div>
235
  <script>
236
  document.getElementById('generate-btn').addEventListener('click', async function() {
237
  const form = document.querySelector('form');
 
241
  document.getElementById('loading').style.display = 'block';
242
  document.getElementById('error').style.display = 'none';
243
  document.getElementById('description-container').style.display = 'none';
244
+ document.getElementById('batch-container').style.display = 'none';
245
  try {
246
  const response = await fetch('/generate', {
247
  method: 'POST',
 
263
  document.getElementById('error').style.display = 'block';
264
  }
265
  });
266
+
267
+ document.getElementById('batch-btn').addEventListener('click', async function() {
268
+ const form = document.querySelector('form');
269
+ const formData = new FormData(form);
270
+ const baseData = {};
271
+ formData.forEach((value, key) => { baseData[key] = value; });
272
+
273
+ // Create sample batch data
274
+ const batchData = {
275
+ properties: [
276
+ { ...baseData, property_name: baseData.property_name + ' - Unit A' },
277
+ { ...baseData, property_name: baseData.property_name + ' - Unit B' },
278
+ { ...baseData, property_name: baseData.property_name + ' - Unit C' }
279
+ ]
280
+ };
281
+
282
+ document.getElementById('loading').style.display = 'block';
283
+ document.getElementById('error').style.display = 'none';
284
+ document.getElementById('description-container').style.display = 'none';
285
+ document.getElementById('batch-container').style.display = 'none';
286
+
287
+ try {
288
+ const response = await fetch('/generate_batch', {
289
+ method: 'POST',
290
+ headers: { 'Content-Type': 'application/json' },
291
+ body: JSON.stringify(batchData)
292
+ });
293
+ const result = await response.json();
294
+ document.getElementById('loading').style.display = 'none';
295
+
296
+ if (response.ok && result.results) {
297
+ const resultsDiv = document.getElementById('batch-results');
298
+ resultsDiv.innerHTML = '';
299
+
300
+ result.results.forEach((item, index) => {
301
+ const itemDiv = document.createElement('div');
302
+ itemDiv.style.marginBottom = '20px';
303
+ itemDiv.style.padding = '15px';
304
+ itemDiv.style.border = '1px solid #ddd';
305
+ itemDiv.style.borderRadius = '4px';
306
+ itemDiv.style.backgroundColor = '#f9f9f9';
307
+
308
+ if (item.error) {
309
+ itemDiv.innerHTML = `<h4>${item.property_name}</h4><p style="color: red;">Error: ${item.error}</p>`;
310
+ } else {
311
+ itemDiv.innerHTML = `<h4>${item.property_name}</h4><p>${item.description}</p>`;
312
+ }
313
+
314
+ resultsDiv.appendChild(itemDiv);
315
+ });
316
+
317
+ document.getElementById('batch-container').style.display = 'block';
318
+ } else {
319
+ document.getElementById('error').textContent = result.error || 'Failed to generate batch descriptions.';
320
+ document.getElementById('error').style.display = 'block';
321
+ }
322
+ } catch (e) {
323
+ document.getElementById('loading').style.display = 'none';
324
+ document.getElementById('error').textContent = 'An error occurred during batch generation.';
325
+ document.getElementById('error').style.display = 'block';
326
+ }
327
+ });
328
+
329
+ // Load metrics on page load
330
+ async function loadMetrics() {
331
+ try {
332
+ const response = await fetch('/metrics');
333
+ const metrics = await response.json();
334
+
335
+ const metricsContent = document.getElementById('metrics-content');
336
+ metricsContent.innerHTML = `
337
+ <p><strong>Total Requests:</strong> ${metrics.request_count || 0}</p>
338
+ <p><strong>Average Response Time:</strong> ${(metrics.avg_response_time || 0).toFixed(2)}s</p>
339
+ <p><strong>Cache Hit Rate:</strong> ${metrics.cache_hits || 0}/${(metrics.cache_hits || 0) + (metrics.cache_misses || 0)}</p>
340
+ <p><strong>Cache Size:</strong> ${metrics.cache_size || 0}/${metrics.cache_max_size || 0}</p>
341
+ <p><strong>Memory Usage:</strong> ${(metrics.system_memory_percent || 0).toFixed(1)}%</p>
342
+ <p><strong>CPU Usage:</strong> ${(metrics.system_cpu_percent || 0).toFixed(1)}%</p>
343
+ `;
344
+ document.getElementById('metrics-container').style.display = 'block';
345
+ } catch (e) {
346
+ console.log('Could not load metrics:', e);
347
+ }
348
+ }
349
+
350
+ // Load metrics when page loads
351
+ window.addEventListener('load', loadMetrics);
352
  </script>
353
  </div>
354
  </body>