Spaces:
Runtime error
Runtime error
Vaishnav14220
commited on
Commit
·
40e0218
1
Parent(s):
7ac4139
Deploy app with syntax issue to test on Hugging Face - the error may be environment-specific
Browse files
app.py
CHANGED
|
@@ -212,32 +212,38 @@ def _build_db_url(db_name: str, query: str, phase: str | None) -> str:
|
|
| 212 |
|
| 213 |
|
| 214 |
def fetch_specific_db(db_name, formula):
|
|
|
|
| 215 |
if db_name not in DB_TABS:
|
| 216 |
return "Invalid database.", None, None
|
|
|
|
|
|
|
| 217 |
config = DB_TABS[db_name]
|
| 218 |
url = f"https://webbook.nist.gov/cgi/cbook.cgi?Name={quote_plus(formula)}&Units=SI&{config['param']}"
|
|
|
|
|
|
|
| 219 |
try:
|
| 220 |
-
response = requests.get(url)
|
|
|
|
| 221 |
soup = BeautifulSoup(response.text, 'html.parser')
|
| 222 |
|
| 223 |
-
#
|
| 224 |
tables = soup.find_all('table')
|
| 225 |
-
|
| 226 |
-
|
| 227 |
df = pd.read_html(StringIO(str(tables[0])))[0]
|
| 228 |
|
| 229 |
-
#
|
| 230 |
links = [a['href'] for a in soup.find_all('a', href=True) if any(ext in a['href'] for ext in ['.pdf', '.sd', '.jdx'])]
|
| 231 |
link_text = f"Download links: {links}" if links else ""
|
| 232 |
|
| 233 |
-
#
|
| 234 |
md_content = f"### {db_name}\n{config['summary']}\n\n**Query:** {formula}\n\n{link_text}\n\n**Extracted Data:**"
|
| 235 |
if df is not None:
|
| 236 |
md_content += "\n" + df.to_markdown(index=False)
|
| 237 |
else:
|
| 238 |
md_content += "\nNo tabular data found."
|
| 239 |
|
| 240 |
-
return md_content, df, None
|
| 241 |
|
| 242 |
except Exception as e:
|
| 243 |
return f"Error fetching {db_name}: {e}", None, None
|
|
|
|
| 212 |
|
| 213 |
|
| 214 |
def fetch_specific_db(db_name, formula):
|
| 215 |
+
# Validate inputs
|
| 216 |
if db_name not in DB_TABS:
|
| 217 |
return "Invalid database.", None, None
|
| 218 |
+
|
| 219 |
+
# Get configuration
|
| 220 |
config = DB_TABS[db_name]
|
| 221 |
url = f"https://webbook.nist.gov/cgi/cbook.cgi?Name={quote_plus(formula)}&Units=SI&{config['param']}"
|
| 222 |
+
|
| 223 |
+
# Fetch and parse data
|
| 224 |
try:
|
| 225 |
+
response = requests.get(url, timeout=20)
|
| 226 |
+
response.raise_for_status()
|
| 227 |
soup = BeautifulSoup(response.text, 'html.parser')
|
| 228 |
|
| 229 |
+
# Extract tables
|
| 230 |
tables = soup.find_all('table')
|
| 231 |
+
df = None
|
| 232 |
+
if tables:
|
| 233 |
df = pd.read_html(StringIO(str(tables[0])))[0]
|
| 234 |
|
| 235 |
+
# Extract download links
|
| 236 |
links = [a['href'] for a in soup.find_all('a', href=True) if any(ext in a['href'] for ext in ['.pdf', '.sd', '.jdx'])]
|
| 237 |
link_text = f"Download links: {links}" if links else ""
|
| 238 |
|
| 239 |
+
# Format output
|
| 240 |
md_content = f"### {db_name}\n{config['summary']}\n\n**Query:** {formula}\n\n{link_text}\n\n**Extracted Data:**"
|
| 241 |
if df is not None:
|
| 242 |
md_content += "\n" + df.to_markdown(index=False)
|
| 243 |
else:
|
| 244 |
md_content += "\nNo tabular data found."
|
| 245 |
|
| 246 |
+
return md_content, df, None
|
| 247 |
|
| 248 |
except Exception as e:
|
| 249 |
return f"Error fetching {db_name}: {e}", None, None
|