Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
import
|
| 2 |
import pandas as pd
|
| 3 |
import json
|
| 4 |
from sentence_transformers import SentenceTransformer, util
|
|
@@ -6,9 +6,13 @@ import torch
|
|
| 6 |
import requests
|
| 7 |
import re
|
| 8 |
import urllib.parse
|
| 9 |
-
import itertools
|
| 10 |
import os
|
| 11 |
-
import io
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
# --- Configuration ---
|
| 14 |
CATEGORY_JSON_PATH = "categories.json"
|
|
@@ -19,104 +23,23 @@ MAX_TECHNOLOGIES_TO_SHOW = 8 # Max technologies relevant to the problem (selecte
|
|
| 19 |
MAX_TECHNOLOGY_PAIRS_TO_SEARCH = 5 # Max pairs (from the relevant tech) to use for solution search
|
| 20 |
MAX_SEARCH_REFERENCES_PER_PAIR = 5 # Max references from the API per pair
|
| 21 |
SEARCH_API_URL = "https://ychkhan-ptt-endpoints.hf.space/search"
|
|
|
|
| 22 |
|
| 23 |
-
# --- Global Variables ---
|
| 24 |
-
#
|
| 25 |
-
categories_data = {}
|
| 26 |
-
category_names = []
|
| 27 |
-
category_embeddings = None
|
| 28 |
-
technologies_df = pd.DataFrame()
|
| 29 |
-
technology_embeddings = None # Will store pre-computed embeddings for descriptions
|
| 30 |
-
model = None
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
###- GOOGLE DRIVE API
|
| 34 |
-
# Check if running in an environment where Google Credentials are set
|
| 35 |
-
# Use placeholder credentials if not found, but functionality will fail
|
| 36 |
-
GOOGLE_CREDENTIALS = os.environ.get("GOOGLE_CREDENTIALS")
|
| 37 |
-
FOLDER_ID = os.getenv("FOLDER_ID") # Optional: Folder ID for uploads
|
| 38 |
-
|
| 39 |
-
# Only import Google libraries if credentials are potentially available
|
| 40 |
-
if GOOGLE_CREDENTIALS:
|
| 41 |
-
try:
|
| 42 |
-
from google.oauth2 import service_account
|
| 43 |
-
from googleapiclient.discovery import build
|
| 44 |
-
from googleapiclient.http import MediaIoBaseDownload, MediaIoBaseUpload
|
| 45 |
-
GOOGLE_API_AVAILABLE = True
|
| 46 |
-
print("Google API libraries loaded.")
|
| 47 |
-
except ImportError:
|
| 48 |
-
print("Warning: Google API libraries not found. Google Drive upload will be disabled.")
|
| 49 |
-
GOOGLE_API_AVAILABLE = False
|
| 50 |
-
else:
|
| 51 |
-
print("Warning: GOOGLE_CREDENTIALS environment variable not set. Google Drive upload will be disabled.")
|
| 52 |
-
GOOGLE_API_AVAILABLE = False
|
| 53 |
-
# Define dummy functions or handle calls gracefully if needed elsewhere
|
| 54 |
-
def create_new_file_in_drive(*args, **kwargs):
|
| 55 |
-
print("Google Drive upload skipped: Credentials not configured.")
|
| 56 |
-
return None
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
if GOOGLE_API_AVAILABLE:
|
| 60 |
-
def create_new_file_in_drive(username, dataframe_to_upload, credentials_json_str, folder_id):
|
| 61 |
-
"""Crée un nouveau fichier CSV dans Google Drive à partir d'un DataFrame Pandas."""
|
| 62 |
-
print(f"Attempting to upload results for user: {username}")
|
| 63 |
-
if not credentials_json_str:
|
| 64 |
-
print("Error: Google Credentials JSON string is empty.")
|
| 65 |
-
return None
|
| 66 |
-
if not folder_id:
|
| 67 |
-
print("Warning: Google Drive FOLDER_ID not specified. Upload might fail or go to root.")
|
| 68 |
-
# Decide if you want to default to root or fail
|
| 69 |
-
# return None # Option: Fail if no folder ID
|
| 70 |
-
|
| 71 |
-
try:
|
| 72 |
-
creds_dict = json.loads(credentials_json_str)
|
| 73 |
-
except json.JSONDecodeError as e:
|
| 74 |
-
print(f"Error decoding Google Credentials JSON: {e}")
|
| 75 |
-
return None
|
| 76 |
-
|
| 77 |
-
try:
|
| 78 |
-
# Charger les informations d'identification du compte de service
|
| 79 |
-
creds = service_account.Credentials.from_service_account_info(creds_dict)
|
| 80 |
-
|
| 81 |
-
# Construire le service API Drive
|
| 82 |
-
service = build('drive', 'v3', credentials=creds)
|
| 83 |
|
| 84 |
-
|
| 85 |
-
csv_buffer = io.BytesIO()
|
| 86 |
-
# Ensure UTF-8 encoding, especially with BOM for Excel compatibility if needed
|
| 87 |
-
dataframe_to_upload.to_csv(csv_buffer, index=False, sep=';', encoding='utf-8-sig')
|
| 88 |
-
csv_buffer.seek(0)
|
| 89 |
|
| 90 |
-
|
| 91 |
-
filename = f"rating-results-{username}.csv" # Consider adding a timestamp
|
| 92 |
-
file_metadata = {'name': filename}
|
| 93 |
-
if folder_id:
|
| 94 |
-
file_metadata['parents'] = [folder_id]
|
| 95 |
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
file = service.files().create(body=file_metadata, media_body=media, fields='id, name, webViewLink').execute()
|
| 99 |
-
|
| 100 |
-
print(f"File '{file.get('name')}' created successfully in Google Drive. ID: {file.get('id')}")
|
| 101 |
-
print(f"Link: {file.get('webViewLink')}") # Optional: print link
|
| 102 |
-
return file.get('id')
|
| 103 |
-
|
| 104 |
-
except Exception as e:
|
| 105 |
-
print(f"Error during Google Drive upload: {e}")
|
| 106 |
-
# Consider more specific error handling (e.g., authentication errors)
|
| 107 |
-
return None
|
| 108 |
-
|
| 109 |
-
###-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
# --- Load Data and Model (Load once at startup) ---
|
| 113 |
def load_data_and_model():
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
print("Loading data and model...")
|
| 117 |
try:
|
| 118 |
# Load Categories
|
| 119 |
-
with open(CATEGORY_JSON_PATH, 'r', encoding='utf-8') as f:
|
| 120 |
categories_data = json.load(f)["Category"]
|
| 121 |
category_names = list(categories_data.keys())
|
| 122 |
category_texts = [f"{name}: {', '.join(keywords)}" for name, keywords in categories_data.items()]
|
|
@@ -124,15 +47,12 @@ def load_data_and_model():
|
|
| 124 |
|
| 125 |
# Load Technologies
|
| 126 |
technologies_df = pd.read_excel(TECHNOLOGY_EXCEL_PATH)
|
| 127 |
-
# Clean column names (remove leading/trailing spaces)
|
| 128 |
technologies_df.columns = technologies_df.columns.str.strip()
|
| 129 |
-
# Ensure required columns exist
|
| 130 |
if 'technology' not in technologies_df.columns or 'description' not in technologies_df.columns:
|
| 131 |
-
|
| 132 |
-
technologies_df['category'] = technologies_df.get('category', '').fillna('').astype(str)
|
| 133 |
technologies_df['description_clean'] = technologies_df['description'].fillna('').astype(str)
|
| 134 |
-
|
| 135 |
-
technologies_df['tech_id'] = technologies_df.index
|
| 136 |
print(f"Loaded {len(technologies_df)} technologies.")
|
| 137 |
|
| 138 |
# Load Sentence Transformer Model
|
|
@@ -141,281 +61,180 @@ def load_data_and_model():
|
|
| 141 |
|
| 142 |
# Pre-compute category embeddings
|
| 143 |
print("Computing category embeddings...")
|
| 144 |
-
category_embeddings = model.encode(category_texts, convert_to_tensor=True
|
| 145 |
print("Category embeddings computed.")
|
| 146 |
|
| 147 |
# Pre-compute technology description embeddings
|
| 148 |
print("Computing technology description embeddings...")
|
| 149 |
valid_descriptions = technologies_df['description_clean'].tolist()
|
| 150 |
-
technology_embeddings = model.encode(valid_descriptions, convert_to_tensor=True
|
| 151 |
print(f"Technology description embeddings computed (shape: {technology_embeddings.shape}).")
|
| 152 |
|
|
|
|
|
|
|
|
|
|
| 153 |
except FileNotFoundError as e:
|
|
|
|
| 154 |
print(f"ERROR: File not found - {e}. Please ensure '{CATEGORY_JSON_PATH}' and '{TECHNOLOGY_EXCEL_PATH}' exist.")
|
| 155 |
-
|
| 156 |
except Exception as e:
|
|
|
|
| 157 |
print(f"ERROR loading data or model: {e}")
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
# --- Helper Functions ---
|
| 161 |
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
Finds the most relevant category using pre-computed embeddings.
|
| 165 |
-
This is now primarily for informational output.
|
| 166 |
-
"""
|
| 167 |
if not problem_description or not category_names or category_embeddings is None:
|
| 168 |
-
return None, 0.0
|
| 169 |
try:
|
| 170 |
problem_embedding = model.encode(problem_description, convert_to_tensor=True)
|
| 171 |
cosine_scores = util.pytorch_cos_sim(problem_embedding, category_embeddings)[0]
|
| 172 |
best_score, best_idx = torch.max(cosine_scores, dim=0)
|
| 173 |
-
# Return the best category regardless of threshold, but indicate confidence
|
| 174 |
best_category_name = category_names[best_idx.item()]
|
| 175 |
best_category_score = best_score.item()
|
| 176 |
-
|
| 177 |
-
# Decide if the match is confident enough to strongly suggest
|
| 178 |
is_confident = best_category_score >= CATEGORY_SIMILARITY_THRESHOLD
|
| 179 |
-
|
| 180 |
return best_category_name, best_category_score, is_confident
|
| 181 |
-
|
| 182 |
except Exception as e:
|
| 183 |
print(f"Error during category finding: {e}")
|
| 184 |
return None, 0.0, False
|
| 185 |
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
"""
|
| 189 |
-
Calculates similarity between the problem description and ALL technology
|
| 190 |
-
descriptions using pre-computed embeddings, sorts, and returns the top results.
|
| 191 |
-
Category is no longer used for filtering here.
|
| 192 |
-
"""
|
| 193 |
all_tech_data = []
|
| 194 |
if technologies_df.empty or technology_embeddings is None or not problem_description:
|
| 195 |
print("Warning: Technologies DF, embeddings, or problem description missing.")
|
| 196 |
return pd.DataFrame()
|
| 197 |
-
|
| 198 |
try:
|
| 199 |
problem_embedding = model.encode(problem_description, convert_to_tensor=True)
|
|
|
|
|
|
|
| 200 |
|
| 201 |
-
#
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
# Ensure tech_id is within the bounds of the embeddings tensor
|
| 206 |
-
if tech_id >= technology_embeddings.shape[0]:
|
| 207 |
-
print(f"Warning: tech_id {tech_id} is out of bounds for technology_embeddings (shape: {technology_embeddings.shape}). Skipping.")
|
| 208 |
-
continue
|
| 209 |
-
|
| 210 |
-
# Retrieve pre-computed embedding using tech_id
|
| 211 |
-
tech_embedding = technology_embeddings[tech_id]
|
| 212 |
-
|
| 213 |
-
# Calculate similarity score with the problem
|
| 214 |
-
# Ensure embeddings are compatible (e.g., both are single vectors)
|
| 215 |
-
if problem_embedding.ndim == 1:
|
| 216 |
-
problem_embedding_exp = problem_embedding.unsqueeze(0) # Add batch dimension if needed
|
| 217 |
-
else:
|
| 218 |
-
problem_embedding_exp = problem_embedding
|
| 219 |
-
|
| 220 |
-
if tech_embedding.ndim == 1:
|
| 221 |
-
tech_embedding_exp = tech_embedding.unsqueeze(0)
|
| 222 |
-
else:
|
| 223 |
-
tech_embedding_exp = tech_embedding
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
similarity_score = util.pytorch_cos_sim(problem_embedding_exp, tech_embedding_exp)[0][0].item()
|
| 227 |
-
|
| 228 |
-
# Store the original row data and the similarity score
|
| 229 |
-
all_tech_data.append({'data': row.to_dict(), 'similarity_score_problem': similarity_score})
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
# Sort technologies based on similarity to the problem (descending)
|
| 233 |
-
all_tech_data.sort(key=lambda item: item['similarity_score_problem'], reverse=True)
|
| 234 |
-
|
| 235 |
-
if not all_tech_data:
|
| 236 |
-
print("No technologies found or scored.")
|
| 237 |
-
return pd.DataFrame()
|
| 238 |
-
|
| 239 |
-
# Create DataFrame from the top N results
|
| 240 |
-
# Extract the 'data' part (which is a dict) for DataFrame creation
|
| 241 |
-
top_tech_rows = [item['data'] for item in all_tech_data[:MAX_TECHNOLOGIES_TO_SHOW]]
|
| 242 |
-
# Extract the corresponding scores
|
| 243 |
-
top_tech_scores = [item['similarity_score_problem'] for item in all_tech_data[:MAX_TECHNOLOGIES_TO_SHOW]]
|
| 244 |
-
|
| 245 |
-
if not top_tech_rows:
|
| 246 |
-
return pd.DataFrame()
|
| 247 |
-
|
| 248 |
-
relevant_df = pd.DataFrame(top_tech_rows)
|
| 249 |
-
# Important: Ensure the index aligns if you add the score column later
|
| 250 |
-
relevant_df = relevant_df.reset_index(drop=True)
|
| 251 |
-
relevant_df['similarity_score_problem'] = top_tech_scores # Add scores as a new column
|
| 252 |
|
|
|
|
|
|
|
| 253 |
|
| 254 |
# print(f"Top relevant technologies DF head:\n{relevant_df.head()}") # Debug print
|
| 255 |
-
return relevant_df
|
| 256 |
|
| 257 |
except Exception as e:
|
| 258 |
print(f"Error during technology finding/scoring: {e}")
|
| 259 |
-
import traceback
|
| 260 |
traceback.print_exc() # Print full traceback for debugging
|
| 261 |
return pd.DataFrame()
|
| 262 |
|
| 263 |
|
| 264 |
-
def find_top_technology_pairs(relevant_technologies_df):
|
| 265 |
-
"""
|
| 266 |
-
Calculates similarity between pairs of the identified relevant technologies
|
| 267 |
-
(which were selected based on problem similarity) and returns the top pairs.
|
| 268 |
-
Uses pre-computed embeddings.
|
| 269 |
-
"""
|
| 270 |
if relevant_technologies_df.empty or len(relevant_technologies_df) < 2 or technology_embeddings is None:
|
| 271 |
-
# print("Warning: Not enough relevant technologies (<2) or embeddings missing for pairing.")
|
| 272 |
return []
|
| 273 |
|
| 274 |
pairs_with_scores = []
|
| 275 |
-
# Use tech_id (which should be the original index) to reliably get embeddings
|
| 276 |
-
# Check if 'tech_id' column exists in the relevant_technologies_df
|
| 277 |
if 'tech_id' not in relevant_technologies_df.columns:
|
| 278 |
-
|
| 279 |
-
|
| 280 |
|
| 281 |
tech_ids = relevant_technologies_df['tech_id'].tolist()
|
| 282 |
-
# Create a mapping from tech_id back to the technology name in the relevant subset for easy lookup
|
| 283 |
tech_id_to_name = pd.Series(relevant_technologies_df['technology'].values, index=relevant_technologies_df['tech_id']).to_dict()
|
| 284 |
|
| 285 |
-
|
| 286 |
-
# Generate unique pairs of tech_ids from the relevant list
|
| 287 |
for id_a, id_b in itertools.combinations(tech_ids, 2):
|
| 288 |
try:
|
| 289 |
-
#
|
| 290 |
-
# Add boundary checks again just in case
|
| 291 |
if id_a >= technology_embeddings.shape[0] or id_b >= technology_embeddings.shape[0]:
|
| 292 |
-
|
| 293 |
-
|
| 294 |
|
| 295 |
embedding_a = technology_embeddings[id_a]
|
| 296 |
embedding_b = technology_embeddings[id_b]
|
| 297 |
|
| 298 |
-
# Ensure embeddings are 1D or correctly shaped for cos_sim
|
| 299 |
-
if embedding_a.ndim > 1: embedding_a = embedding_a.squeeze()
|
| 300 |
-
if embedding_b.ndim > 1: embedding_b = embedding_b.squeeze()
|
| 301 |
-
if embedding_a.ndim == 0 or embedding_b.ndim == 0: # Check if squeeze resulted in 0-dim tensor
|
| 302 |
-
print(f"Warning: Invalid embedding dimension after squeeze for pair ({id_a}, {id_b}). Skipping.")
|
| 303 |
-
continue
|
| 304 |
-
|
| 305 |
# Calculate inter-technology similarity
|
| 306 |
-
inter_similarity = util.pytorch_cos_sim(embedding_a, embedding_b)[0][0].item()
|
| 307 |
|
| 308 |
-
# Get technology names using the mapping created earlier
|
| 309 |
tech_name_a = tech_id_to_name.get(id_a, f"Unknown Tech (ID:{id_a})")
|
| 310 |
tech_name_b = tech_id_to_name.get(id_b, f"Unknown Tech (ID:{id_b})")
|
| 311 |
|
| 312 |
-
# Clean names for display/use
|
| 313 |
clean_tech_name_a = re.sub(r'^- Title\s*:\s*', '', str(tech_name_a)).strip()
|
| 314 |
clean_tech_name_b = re.sub(r'^- Title\s*:\s*', '', str(tech_name_b)).strip()
|
| 315 |
|
| 316 |
pairs_with_scores.append(((clean_tech_name_a, clean_tech_name_b), inter_similarity))
|
| 317 |
|
| 318 |
-
except IndexError:
|
| 319 |
-
print(f"Warning: Could not find pre-computed embedding for index {id_a} or {id_b}. Skipping pair.")
|
| 320 |
-
continue
|
| 321 |
except Exception as e:
|
| 322 |
print(f"Error calculating similarity for pair ({id_a}, {id_b}): {e}")
|
| 323 |
-
import traceback
|
| 324 |
traceback.print_exc()
|
| 325 |
continue
|
| 326 |
|
| 327 |
-
|
| 328 |
-
# Sort pairs by inter-similarity score (descending)
|
| 329 |
pairs_with_scores.sort(key=lambda item: item[1], reverse=True)
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
return
|
| 334 |
|
| 335 |
|
| 336 |
def search_solutions_for_pairs(problem_description, top_pairs):
|
| 337 |
-
"""
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
return "No relevant technology pairs were identified (need at least 2 relevant technologies). Cannot search for solutions.\n"
|
| 345 |
-
else: # problem_description must be missing
|
| 346 |
-
return "Problem description is missing. Cannot search for solutions.\n"
|
| 347 |
-
|
| 348 |
|
| 349 |
headers = {'accept': 'application/json'}
|
|
|
|
| 350 |
|
| 351 |
for pair_info in top_pairs:
|
| 352 |
pair_names, pair_score = pair_info
|
| 353 |
tech_a_name, tech_b_name = pair_names
|
|
|
|
| 354 |
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
# Construct query for the API
|
| 358 |
-
# Focus query on tech combination and context (patent/research)
|
| 359 |
-
# Keep problem description out of the API query unless the API is designed for it
|
| 360 |
-
# query = f'"{tech_a_name}" AND "{tech_b_name}" patent OR research paper OR application'
|
| 361 |
-
# More targeted query:
|
| 362 |
-
query = f'research paper or patent on {tech_a_name} and {tech_b_name} of {problem_description}' # Use snippet of problem
|
| 363 |
-
|
| 364 |
params = {
|
| 365 |
'query': query,
|
| 366 |
'max_references': MAX_SEARCH_REFERENCES_PER_PAIR
|
| 367 |
}
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
pair_key = f"{tech_a_name} + {tech_b_name}" # Key for storing results
|
| 372 |
-
print(f"Calling API for pair ({pair_key}): POST {SEARCH_API_URL} with query: {query}") # Log query separately
|
| 373 |
|
| 374 |
try:
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
|
| 379 |
-
|
| 380 |
-
try:
|
| 381 |
-
api_response = response.json()
|
| 382 |
-
except json.JSONDecodeError:
|
| 383 |
-
err_msg = f"API Error: Invalid JSON response. Status: {response.status_code}, Response text: {response.text[:200]}"
|
| 384 |
-
print(f"Error decoding JSON response for pair '{pair_key}'. {err_msg}")
|
| 385 |
-
results[pair_key] = {"score": pair_score, "error": err_msg}
|
| 386 |
-
continue # Skip to next pair
|
| 387 |
|
| 388 |
search_results = []
|
| 389 |
# --- Adapt based on actual API response structure ---
|
| 390 |
if isinstance(api_response, list):
|
| 391 |
-
search_results = api_response
|
| 392 |
-
elif isinstance(api_response, dict)
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 397 |
else:
|
| 398 |
-
|
| 399 |
-
# Attempt to extract links if possible, otherwise mark as no results
|
| 400 |
-
# This part needs adjustment based on observed API responses
|
| 401 |
-
search_results = [] # Default to empty if format unknown
|
| 402 |
-
|
| 403 |
# --- End adaptation ---
|
| 404 |
|
| 405 |
valid_links = []
|
| 406 |
for r in search_results:
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
results[pair_key] = {
|
| 416 |
-
"score": pair_score, # Store pair score for context
|
| 417 |
-
"links": valid_links
|
| 418 |
-
}
|
| 419 |
|
| 420 |
except requests.exceptions.Timeout:
|
| 421 |
print(f"Error: API call timed out for pair '{pair_key}'")
|
|
@@ -426,55 +245,57 @@ def search_solutions_for_pairs(problem_description, top_pairs):
|
|
| 426 |
except requests.exceptions.RequestException as e:
|
| 427 |
print(f"Error calling search API for pair '{pair_key}': {e}")
|
| 428 |
results[pair_key] = {"score": pair_score, "error": f"API Request Error: {e}"}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 429 |
except Exception as e:
|
| 430 |
err_msg = f"Unexpected Error during API call: {e}"
|
| 431 |
print(f"Unexpected error during API call for pair '{pair_key}': {e}")
|
| 432 |
-
import traceback
|
| 433 |
traceback.print_exc()
|
| 434 |
results[pair_key] = {"score": pair_score, "error": err_msg}
|
| 435 |
|
| 436 |
-
|
| 437 |
# Format results for display
|
| 438 |
-
output = f"### Potential Solutions & Patents (Found using Top {len(results)} Technology Pairs):\n\n"
|
| 439 |
if not results:
|
| 440 |
-
|
| 441 |
-
return
|
| 442 |
|
| 443 |
-
# Display results in the order they were searched (already sorted by pair score)
|
| 444 |
for pair_key, search_data in results.items():
|
| 445 |
pair_score = search_data.get('score', 0.0)
|
| 446 |
-
|
| 447 |
-
|
| 448 |
if "error" in search_data:
|
| 449 |
-
|
| 450 |
elif "links" in search_data:
|
| 451 |
links = search_data["links"]
|
| 452 |
if links:
|
| 453 |
for link_info in links:
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
title_sanitized = title_str.replace('[','(').replace(']',')')
|
| 458 |
-
output += f"- [{title_sanitized}]({link_info.get('link', '#')})\n"
|
| 459 |
else:
|
| 460 |
-
|
| 461 |
else:
|
| 462 |
-
|
| 463 |
-
|
| 464 |
|
| 465 |
-
return
|
| 466 |
|
| 467 |
# --- Main Processing Function ---
|
| 468 |
-
def process_problem(problem_description):
|
| 469 |
"""
|
| 470 |
-
Main function called by
|
|
|
|
| 471 |
"""
|
| 472 |
-
print(f"\n--- Processing request for: '{problem_description[:100]}...' ---")
|
| 473 |
-
if not
|
| 474 |
-
|
|
|
|
| 475 |
|
| 476 |
-
|
| 477 |
-
|
|
|
|
|
|
|
|
|
|
| 478 |
if category_name:
|
| 479 |
confidence_text = "(Confident Match)" if is_confident else "(Possible Match)"
|
| 480 |
category_output = f"**Best Matching Category:** {category_name} {confidence_text} (Similarity Score: {cat_score:.3f})"
|
|
@@ -482,110 +303,127 @@ def process_problem(problem_description):
|
|
| 482 |
category_output = "**Could not identify a matching category.**"
|
| 483 |
print(f"Category identified: {category_name} (Score: {cat_score:.3f}, Confident: {is_confident})")
|
| 484 |
|
| 485 |
-
# 2. Find Relevant Technologies
|
| 486 |
-
relevant_technologies_df = find_relevant_technologies(problem_description)
|
| 487 |
print(f"Found {len(relevant_technologies_df)} relevant technologies based on problem similarity.")
|
| 488 |
-
|
| 489 |
tech_output = ""
|
| 490 |
if not relevant_technologies_df.empty:
|
| 491 |
tech_output += f"### Top {len(relevant_technologies_df)} Most Relevant Technologies (selected based on similarity to your problem):\n\n"
|
|
|
|
|
|
|
| 492 |
for _, row in relevant_technologies_df.iterrows():
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
original_cats = str(row.get('category', 'Unknown')).strip()
|
| 497 |
-
if original_cats:
|
| 498 |
-
tech_output += f" *Original Category listed as: {original_cats}*\n"
|
| 499 |
-
tech_output += "\n---\n" # Add separator
|
| 500 |
-
else:
|
| 501 |
-
tech_output = "Could not identify any relevant technologies based on the problem description.\n\n---\n"
|
| 502 |
|
|
|
|
|
|
|
|
|
|
| 503 |
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
|
|
|
|
|
|
|
|
|
|
| 508 |
pairs_output = ""
|
| 509 |
if top_pairs:
|
| 510 |
pairs_output += f"### Top {len(top_pairs)} Technology Pairs (selected from the relevant technologies above, based on their inter-similarity):\n\n"
|
| 511 |
for pair_names, score in top_pairs:
|
| 512 |
pairs_output += f"- **{pair_names[0]} + {pair_names[1]}** (Inter-Similarity: {score:.3f})\n"
|
| 513 |
pairs_output += "\n---\n"
|
| 514 |
-
#
|
| 515 |
|
| 516 |
# 4. Search for Solutions using the Top Pairs
|
| 517 |
-
|
| 518 |
print("API search for solutions completed.")
|
| 519 |
|
| 520 |
-
# 5. Combine Outputs
|
| 521 |
final_output = (
|
| 522 |
f"## Analysis Results for: \"{problem_description[:150]}...\"\n\n"
|
| 523 |
f"{category_output}\n\n"
|
| 524 |
f"{tech_output}"
|
| 525 |
-
# Intentionally left blank line above for structure
|
| 526 |
)
|
| 527 |
-
|
| 528 |
-
# Add the pairs section conditionally - This avoids the backslash issue
|
| 529 |
if top_pairs:
|
| 530 |
-
final_output += pairs_output
|
| 531 |
else:
|
| 532 |
-
|
| 533 |
-
final_output += "No technology pairs identified to search with.\n\n---\n"
|
| 534 |
|
| 535 |
-
|
| 536 |
-
final_output += solution_output
|
| 537 |
-
# --- END OF CORRECTION ---
|
| 538 |
|
| 539 |
print("--- Processing finished ---")
|
| 540 |
-
|
| 541 |
-
|
| 542 |
-
|
| 543 |
-
|
| 544 |
-
|
| 545 |
-
|
| 546 |
-
|
| 547 |
-
|
| 548 |
-
|
| 549 |
-
|
| 550 |
-
|
| 551 |
-
|
| 552 |
-
|
| 553 |
-
|
| 554 |
-
|
| 555 |
-
|
| 556 |
-
fn=process_problem,
|
| 557 |
-
inputs=gr.Textbox(lines=5, label="Enter Technical Problem Description", placeholder="Describe your technical challenge or requirement here... e.g., 'Develop low-latency communication protocols for 6G networks'"),
|
| 558 |
-
outputs=gr.Markdown(label="Analysis and Potential Solutions"),
|
| 559 |
-
title="Technical Problem Analyzer v4 (Cross-Category Relevance)",
|
| 560 |
-
description=(
|
| 561 |
-
"Enter a technical problem. The app:\n"
|
| 562 |
-
"1. Identifies the best matching **category** (for informational purposes).\n"
|
| 563 |
-
"2. Finds the **most relevant technologies** based *directly on your problem description* (across all categories).\n"
|
| 564 |
-
"3. Identifies **promising pairs** among these relevant technologies based on their similarity to each other.\n"
|
| 565 |
-
"4. Searches for **patents/research** using these pairs via an external API."
|
| 566 |
-
),
|
| 567 |
-
examples=[
|
| 568 |
-
["How can I establish reliable communication between low-orbit satellites for continuous global monitoring?"],
|
| 569 |
-
["Need a system to automatically detect anomalies in sensor data from industrial machinery using machine learning."],
|
| 570 |
-
["Develop low-latency communication protocols for 6G networks"],
|
| 571 |
-
["Design efficient routing algorithms for large scale mesh networks in smart cities"],
|
| 572 |
-
["Create biodegradable packaging material from agricultural waste"], # Example crossing categories potentially
|
| 573 |
-
["Develop a method for real-time traffic prediction using heterogeneous data sources"]
|
| 574 |
-
],
|
| 575 |
-
allow_flagging='never',
|
| 576 |
-
# Add theme for better visuals if desired
|
| 577 |
-
# theme=gr.themes.Soft()
|
| 578 |
)
|
| 579 |
-
else:
|
| 580 |
-
# Provide a dummy interface indicating failure
|
| 581 |
-
def error_fn():
|
| 582 |
-
return "Application failed to initialize. Please check the logs for errors (e.g., missing files or model issues)."
|
| 583 |
-
iface = gr.Interface(fn=error_fn, inputs=[], outputs=gr.Markdown(), title="Initialization Failed")
|
| 584 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 585 |
|
| 586 |
-
# ---
|
| 587 |
if __name__ == "__main__":
|
| 588 |
-
|
| 589 |
-
# Consider adding share=True for public link if running on appropriate infra
|
| 590 |
-
# debug=True can be helpful during development
|
| 591 |
-
iface.launch()
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
import pandas as pd
|
| 3 |
import json
|
| 4 |
from sentence_transformers import SentenceTransformer, util
|
|
|
|
| 6 |
import requests
|
| 7 |
import re
|
| 8 |
import urllib.parse
|
| 9 |
+
import itertools # For generating pairs
|
| 10 |
import os
|
| 11 |
+
import io # Keep for potential future use (e.g., local download)
|
| 12 |
+
import traceback # Keep for error logging
|
| 13 |
+
|
| 14 |
+
# -- Fix SSL error
|
| 15 |
+
os.environ['REQUESTS_CA_BUNDLE'] = '/etc/ssl/certs/ca-certificates.crt'
|
| 16 |
|
| 17 |
# --- Configuration ---
|
| 18 |
CATEGORY_JSON_PATH = "categories.json"
|
|
|
|
| 23 |
MAX_TECHNOLOGY_PAIRS_TO_SEARCH = 5 # Max pairs (from the relevant tech) to use for solution search
|
| 24 |
MAX_SEARCH_REFERENCES_PER_PAIR = 5 # Max references from the API per pair
|
| 25 |
SEARCH_API_URL = "https://ychkhan-ptt-endpoints.hf.space/search"
|
| 26 |
+
# --- Removed Google Drive Config ---
|
| 27 |
|
| 28 |
+
# --- Global Variables (will be managed by Streamlit's caching) ---
|
| 29 |
+
# These are loaded once via the cached function below
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
+
# --- Removed Google Drive API Setup ---
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
+
# --- Removed Google Drive Function ---
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
+
# --- Load Data and Model (Cached) ---
|
| 36 |
+
@st.cache_resource # Cache the model and embeddings
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
def load_data_and_model():
|
| 38 |
+
"""Loads data files and the Sentence Transformer model once."""
|
| 39 |
+
print("Attempting to load data and model...")
|
|
|
|
| 40 |
try:
|
| 41 |
# Load Categories
|
| 42 |
+
with open(CATEGORY_JSON_PATH, 'r', encoding='utf-8') as f:
|
| 43 |
categories_data = json.load(f)["Category"]
|
| 44 |
category_names = list(categories_data.keys())
|
| 45 |
category_texts = [f"{name}: {', '.join(keywords)}" for name, keywords in categories_data.items()]
|
|
|
|
| 47 |
|
| 48 |
# Load Technologies
|
| 49 |
technologies_df = pd.read_excel(TECHNOLOGY_EXCEL_PATH)
|
|
|
|
| 50 |
technologies_df.columns = technologies_df.columns.str.strip()
|
|
|
|
| 51 |
if 'technology' not in technologies_df.columns or 'description' not in technologies_df.columns:
|
| 52 |
+
raise ValueError("Missing required columns 'technology' or 'description' in technologies.xlsx")
|
| 53 |
+
technologies_df['category'] = technologies_df.get('category', '').fillna('').astype(str)
|
| 54 |
technologies_df['description_clean'] = technologies_df['description'].fillna('').astype(str)
|
| 55 |
+
technologies_df['tech_id'] = technologies_df.index # Use index as unique ID
|
|
|
|
| 56 |
print(f"Loaded {len(technologies_df)} technologies.")
|
| 57 |
|
| 58 |
# Load Sentence Transformer Model
|
|
|
|
| 61 |
|
| 62 |
# Pre-compute category embeddings
|
| 63 |
print("Computing category embeddings...")
|
| 64 |
+
category_embeddings = model.encode(category_texts, convert_to_tensor=True)
|
| 65 |
print("Category embeddings computed.")
|
| 66 |
|
| 67 |
# Pre-compute technology description embeddings
|
| 68 |
print("Computing technology description embeddings...")
|
| 69 |
valid_descriptions = technologies_df['description_clean'].tolist()
|
| 70 |
+
technology_embeddings = model.encode(valid_descriptions, convert_to_tensor=True)
|
| 71 |
print(f"Technology description embeddings computed (shape: {technology_embeddings.shape}).")
|
| 72 |
|
| 73 |
+
return (model, categories_data, category_names, category_embeddings,
|
| 74 |
+
technologies_df, technology_embeddings)
|
| 75 |
+
|
| 76 |
except FileNotFoundError as e:
|
| 77 |
+
st.error(f"ERROR: File not found - {e}. Please ensure '{CATEGORY_JSON_PATH}' and '{TECHNOLOGY_EXCEL_PATH}' are in the same directory as the script.")
|
| 78 |
print(f"ERROR: File not found - {e}. Please ensure '{CATEGORY_JSON_PATH}' and '{TECHNOLOGY_EXCEL_PATH}' exist.")
|
| 79 |
+
return None # Indicate failure
|
| 80 |
except Exception as e:
|
| 81 |
+
st.error(f"ERROR loading data or model: {e}")
|
| 82 |
print(f"ERROR loading data or model: {e}")
|
| 83 |
+
traceback.print_exc()
|
| 84 |
+
return None # Indicate failure
|
|
|
|
| 85 |
|
| 86 |
+
# --- Helper Functions (unchanged, use loaded_data) ---
|
| 87 |
+
def find_best_category(problem_description, model, category_names, category_embeddings):
|
| 88 |
+
"""Finds the most relevant category using pre-computed embeddings."""
|
|
|
|
|
|
|
| 89 |
if not problem_description or not category_names or category_embeddings is None:
|
| 90 |
+
return None, 0.0, False
|
| 91 |
try:
|
| 92 |
problem_embedding = model.encode(problem_description, convert_to_tensor=True)
|
| 93 |
cosine_scores = util.pytorch_cos_sim(problem_embedding, category_embeddings)[0]
|
| 94 |
best_score, best_idx = torch.max(cosine_scores, dim=0)
|
|
|
|
| 95 |
best_category_name = category_names[best_idx.item()]
|
| 96 |
best_category_score = best_score.item()
|
|
|
|
|
|
|
| 97 |
is_confident = best_category_score >= CATEGORY_SIMILARITY_THRESHOLD
|
|
|
|
| 98 |
return best_category_name, best_category_score, is_confident
|
|
|
|
| 99 |
except Exception as e:
|
| 100 |
print(f"Error during category finding: {e}")
|
| 101 |
return None, 0.0, False
|
| 102 |
|
| 103 |
+
def find_relevant_technologies(problem_description, model, technologies_df, technology_embeddings):
|
| 104 |
+
"""Calculates similarity between the problem and ALL technology descriptions."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
all_tech_data = []
|
| 106 |
if technologies_df.empty or technology_embeddings is None or not problem_description:
|
| 107 |
print("Warning: Technologies DF, embeddings, or problem description missing.")
|
| 108 |
return pd.DataFrame()
|
|
|
|
| 109 |
try:
|
| 110 |
problem_embedding = model.encode(problem_description, convert_to_tensor=True)
|
| 111 |
+
# Efficiently calculate all similarities at once
|
| 112 |
+
cosine_scores = util.pytorch_cos_sim(problem_embedding, technology_embeddings)[0]
|
| 113 |
|
| 114 |
+
# Add scores to the dataframe temporarily
|
| 115 |
+
temp_df = technologies_df.copy()
|
| 116 |
+
temp_df['similarity_score_problem'] = cosine_scores.cpu().numpy() # Move scores to CPU and numpy
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
|
| 118 |
+
# Sort by similarity and get top N
|
| 119 |
+
relevant_df = temp_df.nlargest(MAX_TECHNOLOGIES_TO_SHOW, 'similarity_score_problem')
|
| 120 |
|
| 121 |
# print(f"Top relevant technologies DF head:\n{relevant_df.head()}") # Debug print
|
| 122 |
+
return relevant_df
|
| 123 |
|
| 124 |
except Exception as e:
|
| 125 |
print(f"Error during technology finding/scoring: {e}")
|
|
|
|
| 126 |
traceback.print_exc() # Print full traceback for debugging
|
| 127 |
return pd.DataFrame()
|
| 128 |
|
| 129 |
|
| 130 |
+
def find_top_technology_pairs(relevant_technologies_df, technology_embeddings):
|
| 131 |
+
"""Calculates similarity between pairs of relevant technologies."""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
if relevant_technologies_df.empty or len(relevant_technologies_df) < 2 or technology_embeddings is None:
|
|
|
|
| 133 |
return []
|
| 134 |
|
| 135 |
pairs_with_scores = []
|
|
|
|
|
|
|
| 136 |
if 'tech_id' not in relevant_technologies_df.columns:
|
| 137 |
+
print("Error: 'tech_id' column missing in relevant_technologies_df.")
|
| 138 |
+
return []
|
| 139 |
|
| 140 |
tech_ids = relevant_technologies_df['tech_id'].tolist()
|
|
|
|
| 141 |
tech_id_to_name = pd.Series(relevant_technologies_df['technology'].values, index=relevant_technologies_df['tech_id']).to_dict()
|
| 142 |
|
|
|
|
|
|
|
| 143 |
for id_a, id_b in itertools.combinations(tech_ids, 2):
|
| 144 |
try:
|
| 145 |
+
# Boundary checks
|
|
|
|
| 146 |
if id_a >= technology_embeddings.shape[0] or id_b >= technology_embeddings.shape[0]:
|
| 147 |
+
print(f"Warning: tech_id {id_a} or {id_b} out of bounds. Skipping pair.")
|
| 148 |
+
continue
|
| 149 |
|
| 150 |
embedding_a = technology_embeddings[id_a]
|
| 151 |
embedding_b = technology_embeddings[id_b]
|
| 152 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
# Calculate inter-technology similarity
|
| 154 |
+
inter_similarity = util.pytorch_cos_sim(embedding_a.unsqueeze(0), embedding_b.unsqueeze(0))[0][0].item()
|
| 155 |
|
|
|
|
| 156 |
tech_name_a = tech_id_to_name.get(id_a, f"Unknown Tech (ID:{id_a})")
|
| 157 |
tech_name_b = tech_id_to_name.get(id_b, f"Unknown Tech (ID:{id_b})")
|
| 158 |
|
|
|
|
| 159 |
clean_tech_name_a = re.sub(r'^- Title\s*:\s*', '', str(tech_name_a)).strip()
|
| 160 |
clean_tech_name_b = re.sub(r'^- Title\s*:\s*', '', str(tech_name_b)).strip()
|
| 161 |
|
| 162 |
pairs_with_scores.append(((clean_tech_name_a, clean_tech_name_b), inter_similarity))
|
| 163 |
|
|
|
|
|
|
|
|
|
|
| 164 |
except Exception as e:
|
| 165 |
print(f"Error calculating similarity for pair ({id_a}, {id_b}): {e}")
|
|
|
|
| 166 |
traceback.print_exc()
|
| 167 |
continue
|
| 168 |
|
|
|
|
|
|
|
| 169 |
pairs_with_scores.sort(key=lambda item: item[1], reverse=True)
|
| 170 |
+
pairs_with_scores_min_max = []
|
| 171 |
+
pairs_with_scores_min_max.extend(pairs_with_scores[:MAX_TECHNOLOGY_PAIRS_TO_SEARCH-2])
|
| 172 |
+
pairs_with_scores_min_max.extend(pairs_with_scores[MAX_TECHNOLOGY_PAIRS_TO_SEARCH-3:])
|
| 173 |
+
return pairs_with_scores_min_max
|
| 174 |
|
| 175 |
|
| 176 |
def search_solutions_for_pairs(problem_description, top_pairs):
|
| 177 |
+
"""Searches for solutions/patents using pairs of technologies via the API."""
|
| 178 |
+
results = {}
|
| 179 |
+
if not top_pairs:
|
| 180 |
+
# Return value modified for clarity
|
| 181 |
+
return "No relevant technology pairs were identified (need at least 2 relevant technologies). Cannot search for solutions.\n", results
|
| 182 |
+
if not problem_description:
|
| 183 |
+
return "Problem description is missing. Cannot search for solutions.\n", results
|
|
|
|
|
|
|
|
|
|
|
|
|
| 184 |
|
| 185 |
headers = {'accept': 'application/json'}
|
| 186 |
+
api_output = f"### Potential Solutions & Patents (Found using Top {len(top_pairs)} Technology Pairs):\n\n"
|
| 187 |
|
| 188 |
for pair_info in top_pairs:
|
| 189 |
pair_names, pair_score = pair_info
|
| 190 |
tech_a_name, tech_b_name = pair_names
|
| 191 |
+
if not tech_a_name or not tech_b_name: continue
|
| 192 |
|
| 193 |
+
query = f'research paper or patent on {tech_a_name} and {tech_b_name} related to {problem_description[:100]}...' # Keep query focused
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 194 |
params = {
|
| 195 |
'query': query,
|
| 196 |
'max_references': MAX_SEARCH_REFERENCES_PER_PAIR
|
| 197 |
}
|
| 198 |
+
pair_key = f"{tech_a_name} + {tech_b_name}"
|
| 199 |
+
print(f"Calling API for pair ({pair_key}): POST {SEARCH_API_URL} with query snippet: {query[:100]}...")
|
|
|
|
|
|
|
|
|
|
| 200 |
|
| 201 |
try:
|
| 202 |
+
response = requests.post(SEARCH_API_URL, headers=headers, params=params, timeout=45)
|
| 203 |
+
response.raise_for_status()
|
| 204 |
+
api_response = response.json() # Assume JSON response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
|
| 206 |
search_results = []
|
| 207 |
# --- Adapt based on actual API response structure ---
|
| 208 |
if isinstance(api_response, list):
|
| 209 |
+
search_results = api_response
|
| 210 |
+
elif isinstance(api_response, dict):
|
| 211 |
+
# Try common keys for results lists
|
| 212 |
+
if 'results' in api_response and isinstance(api_response.get('results'), list):
|
| 213 |
+
search_results = api_response['results']
|
| 214 |
+
elif 'references' in api_response and isinstance(api_response.get('references'), list):
|
| 215 |
+
search_results = api_response['references']
|
| 216 |
+
elif 'links' in api_response and isinstance(api_response.get('links'), list): # Another possibility
|
| 217 |
+
search_results = api_response['links']
|
| 218 |
+
else: # Check if the dict itself contains title/url
|
| 219 |
+
if 'title' in api_response and ('url' in api_response or 'link' in api_response):
|
| 220 |
+
search_results = [api_response] # Wrap it in a list
|
| 221 |
+
else:
|
| 222 |
+
print(f"Warning: Unexpected API response format for pair '{pair_key}'. Response keys: {list(api_response.keys())}")
|
| 223 |
else:
|
| 224 |
+
print(f"Warning: Unexpected API response type for pair '{pair_key}'. Type: {type(api_response)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 225 |
# --- End adaptation ---
|
| 226 |
|
| 227 |
valid_links = []
|
| 228 |
for r in search_results:
|
| 229 |
+
if isinstance(r, dict):
|
| 230 |
+
title = r.get('title', 'N/A')
|
| 231 |
+
url = r.get('url', r.get('link')) # Check for 'url' or 'link'
|
| 232 |
+
if url and isinstance(url, str) and url.startswith(('http://', 'https://')):
|
| 233 |
+
valid_links.append({'title': title, 'link': url})
|
| 234 |
+
elif url:
|
| 235 |
+
print(f"Warning: Invalid or missing URL for result '{title}' in pair '{pair_key}': {url}")
|
| 236 |
+
|
| 237 |
+
results[pair_key] = {"score": pair_score, "links": valid_links}
|
|
|
|
|
|
|
|
|
|
| 238 |
|
| 239 |
except requests.exceptions.Timeout:
|
| 240 |
print(f"Error: API call timed out for pair '{pair_key}'")
|
|
|
|
| 245 |
except requests.exceptions.RequestException as e:
|
| 246 |
print(f"Error calling search API for pair '{pair_key}': {e}")
|
| 247 |
results[pair_key] = {"score": pair_score, "error": f"API Request Error: {e}"}
|
| 248 |
+
except json.JSONDecodeError:
|
| 249 |
+
err_msg = f"API Error: Invalid JSON response. Status: {response.status_code}, Response text: {response.text[:200]}"
|
| 250 |
+
print(f"Error decoding JSON response for pair '{pair_key}'. {err_msg}")
|
| 251 |
+
results[pair_key] = {"score": pair_score, "error": err_msg}
|
| 252 |
except Exception as e:
|
| 253 |
err_msg = f"Unexpected Error during API call: {e}"
|
| 254 |
print(f"Unexpected error during API call for pair '{pair_key}': {e}")
|
|
|
|
| 255 |
traceback.print_exc()
|
| 256 |
results[pair_key] = {"score": pair_score, "error": err_msg}
|
| 257 |
|
|
|
|
| 258 |
# Format results for display
|
|
|
|
| 259 |
if not results:
|
| 260 |
+
api_output += "No search results could be retrieved from the API for the generated technology pairs."
|
| 261 |
+
return api_output, results # Return formatted string and raw results dict
|
| 262 |
|
|
|
|
| 263 |
for pair_key, search_data in results.items():
|
| 264 |
pair_score = search_data.get('score', 0.0)
|
| 265 |
+
api_output += f"**For Technology Pair: {pair_key}** (Inter-Similarity Score: {pair_score:.3f})\n"
|
|
|
|
| 266 |
if "error" in search_data:
|
| 267 |
+
api_output += f"- *Search failed: {search_data['error']}*\n"
|
| 268 |
elif "links" in search_data:
|
| 269 |
links = search_data["links"]
|
| 270 |
if links:
|
| 271 |
for link_info in links:
|
| 272 |
+
title_str = str(link_info.get('title', 'N/A'))
|
| 273 |
+
title_sanitized = title_str.replace('[','(').replace(']',')')
|
| 274 |
+
api_output += f"- [{title_sanitized}]({link_info.get('link', '#')})\n"
|
|
|
|
|
|
|
| 275 |
else:
|
| 276 |
+
api_output += "- *No specific results found by the API for this technology pair.*\n"
|
| 277 |
else:
|
| 278 |
+
api_output += "- *Unknown search result state.*\n"
|
| 279 |
+
api_output += "\n"
|
| 280 |
|
| 281 |
+
return api_output, results # Return formatted string and raw results dict
|
| 282 |
|
| 283 |
# --- Main Processing Function ---
|
| 284 |
+
def process_problem(problem_description, loaded_data):
|
| 285 |
"""
|
| 286 |
+
Main function called by Streamlit interface. Orchestrates the process.
|
| 287 |
+
Returns the formatted output string AND the relevant technologies DataFrame.
|
| 288 |
"""
|
| 289 |
+
print(f"\n--- Processing request for: '{problem_description[:100]}...' ---")
|
| 290 |
+
if not loaded_data:
|
| 291 |
+
# This case should ideally be handled before calling process_problem
|
| 292 |
+
return "Error: Model and data not loaded.", pd.DataFrame()
|
| 293 |
|
| 294 |
+
(model, categories_data, category_names, category_embeddings,
|
| 295 |
+
technologies_df, technology_embeddings) = loaded_data
|
| 296 |
+
|
| 297 |
+
# 1. Categorize Problem
|
| 298 |
+
category_name, cat_score, is_confident = find_best_category(problem_description, model, category_names, category_embeddings)
|
| 299 |
if category_name:
|
| 300 |
confidence_text = "(Confident Match)" if is_confident else "(Possible Match)"
|
| 301 |
category_output = f"**Best Matching Category:** {category_name} {confidence_text} (Similarity Score: {cat_score:.3f})"
|
|
|
|
| 303 |
category_output = "**Could not identify a matching category.**"
|
| 304 |
print(f"Category identified: {category_name} (Score: {cat_score:.3f}, Confident: {is_confident})")
|
| 305 |
|
| 306 |
+
# 2. Find Relevant Technologies
|
| 307 |
+
relevant_technologies_df = find_relevant_technologies(problem_description, model, technologies_df, technology_embeddings)
|
| 308 |
print(f"Found {len(relevant_technologies_df)} relevant technologies based on problem similarity.")
|
|
|
|
| 309 |
tech_output = ""
|
| 310 |
if not relevant_technologies_df.empty:
|
| 311 |
tech_output += f"### Top {len(relevant_technologies_df)} Most Relevant Technologies (selected based on similarity to your problem):\n\n"
|
| 312 |
+
# Create a list for display, keeping relevant data
|
| 313 |
+
display_tech_list = []
|
| 314 |
for _, row in relevant_technologies_df.iterrows():
|
| 315 |
+
tech_name = re.sub(r'^- Title\s*:\s*', '', str(row.get('technology', 'N/A'))).strip()
|
| 316 |
+
problem_relevance = row.get('similarity_score_problem', 0.0)
|
| 317 |
+
original_cats = str(row.get('category', 'Unknown')).strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 318 |
|
| 319 |
+
tech_output += f"- **{tech_name}** (Problem Relevance: {problem_relevance:.3f})\n"
|
| 320 |
+
if original_cats:
|
| 321 |
+
tech_output += f" *Original Category listed as: {original_cats}*\n"
|
| 322 |
|
| 323 |
+
tech_output += "\n---\n"
|
| 324 |
+
else:
|
| 325 |
+
tech_output = "Could not identify any relevant technologies based on the problem description.\n\n---\n"
|
| 326 |
|
| 327 |
+
# 3. Find Top Technology Pairs
|
| 328 |
+
top_pairs = find_top_technology_pairs(relevant_technologies_df, technology_embeddings)
|
| 329 |
+
print(f"Identified {len(top_pairs)} top technology pairs for searching.")
|
| 330 |
pairs_output = ""
|
| 331 |
if top_pairs:
|
| 332 |
pairs_output += f"### Top {len(top_pairs)} Technology Pairs (selected from the relevant technologies above, based on their inter-similarity):\n\n"
|
| 333 |
for pair_names, score in top_pairs:
|
| 334 |
pairs_output += f"- **{pair_names[0]} + {pair_names[1]}** (Inter-Similarity: {score:.3f})\n"
|
| 335 |
pairs_output += "\n---\n"
|
| 336 |
+
# No 'else' needed here, handled in final assembly
|
| 337 |
|
| 338 |
# 4. Search for Solutions using the Top Pairs
|
| 339 |
+
solution_output_text, _ = search_solutions_for_pairs(problem_description, top_pairs) # Ignore raw results dict here
|
| 340 |
print("API search for solutions completed.")
|
| 341 |
|
| 342 |
+
# 5. Combine Outputs
|
| 343 |
final_output = (
|
| 344 |
f"## Analysis Results for: \"{problem_description[:150]}...\"\n\n"
|
| 345 |
f"{category_output}\n\n"
|
| 346 |
f"{tech_output}"
|
|
|
|
| 347 |
)
|
|
|
|
|
|
|
| 348 |
if top_pairs:
|
| 349 |
+
final_output += pairs_output
|
| 350 |
else:
|
| 351 |
+
final_output += "No technology pairs identified (need >= 2 relevant technologies to form pairs).\n\n---\n"
|
|
|
|
| 352 |
|
| 353 |
+
final_output += solution_output_text
|
|
|
|
|
|
|
| 354 |
|
| 355 |
print("--- Processing finished ---")
|
| 356 |
+
# Return both the formatted text and the DataFrame (might be useful later)
|
| 357 |
+
return final_output, relevant_technologies_df
|
| 358 |
+
|
| 359 |
+
# --- Streamlit UI ---
|
| 360 |
+
def main():
|
| 361 |
+
st.set_page_config(page_title="Technical Problem Analyzer", layout="wide")
|
| 362 |
+
st.title("🔧 Technical Problem Analyzer v4 (Local Streamlit)")
|
| 363 |
+
|
| 364 |
+
st.markdown(
|
| 365 |
+
"""
|
| 366 |
+
Enter a technical problem. The app will:
|
| 367 |
+
1. Identify the best matching **category** (for informational purposes).
|
| 368 |
+
2. Find the **most relevant technologies** based *directly on your problem description*.
|
| 369 |
+
3. Identify **promising pairs** among these relevant technologies based on their similarity.
|
| 370 |
+
4. Search for **patents/research** using these pairs via an external API.
|
| 371 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 372 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 373 |
|
| 374 |
+
# Load data and model (cached)
|
| 375 |
+
loaded_data = load_data_and_model()
|
| 376 |
+
|
| 377 |
+
if loaded_data is None:
|
| 378 |
+
st.error("Application initialization failed. Check logs for details.")
|
| 379 |
+
st.stop() # Stop execution if loading failed
|
| 380 |
+
|
| 381 |
+
# Example problems (optional)
|
| 382 |
+
st.subheader("Example Problems:")
|
| 383 |
+
examples = [
|
| 384 |
+
"How can I establish reliable communication between low-orbit satellites for continuous global monitoring?",
|
| 385 |
+
"Need a system to automatically detect anomalies in sensor data from industrial machinery using machine learning.",
|
| 386 |
+
"Develop low-latency communication protocols for 6G networks",
|
| 387 |
+
"Design efficient routing algorithms for large scale mesh networks in smart cities",
|
| 388 |
+
"Create biodegradable packaging material from agricultural waste",
|
| 389 |
+
"Develop a method for real-time traffic prediction using heterogeneous data sources"
|
| 390 |
+
]
|
| 391 |
+
selected_example = st.selectbox("Select an example or enter your own below:", [""] + examples)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
# User input
|
| 395 |
+
problem_description_input = st.text_area(
|
| 396 |
+
"Enter Technical Problem Description:",
|
| 397 |
+
height=150,
|
| 398 |
+
placeholder="Describe your technical challenge or requirement here...",
|
| 399 |
+
value=selected_example # Use selected example if chosen
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
# Button to trigger analysis
|
| 403 |
+
analyze_button = st.button("Analyze Problem")
|
| 404 |
+
|
| 405 |
+
if analyze_button and problem_description_input:
|
| 406 |
+
with st.spinner("Analyzing problem and searching for solutions..."):
|
| 407 |
+
# Run the main processing function
|
| 408 |
+
analysis_output, relevant_tech_df = process_problem(problem_description_input, loaded_data)
|
| 409 |
+
|
| 410 |
+
# Display results
|
| 411 |
+
st.markdown("---") # Separator
|
| 412 |
+
st.markdown(analysis_output) # Display formatted text results
|
| 413 |
+
|
| 414 |
+
# --- Removed Google Drive Upload Section ---
|
| 415 |
+
# You could potentially add other actions here using relevant_tech_df,
|
| 416 |
+
# like displaying it as a table or offering a local download.
|
| 417 |
+
# Example: Display relevant technologies table
|
| 418 |
+
if not relevant_tech_df.empty:
|
| 419 |
+
st.markdown("---")
|
| 420 |
+
st.subheader("Relevant Technologies Data")
|
| 421 |
+
st.dataframe(relevant_tech_df[['technology', 'description', 'category', 'similarity_score_problem']])
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
elif analyze_button and not problem_description_input:
|
| 425 |
+
st.warning("Please enter a problem description.")
|
| 426 |
|
| 427 |
+
# --- Run the App ---
|
| 428 |
if __name__ == "__main__":
|
| 429 |
+
main()
|
|
|
|
|
|
|
|
|