instruction
stringlengths
0
1k
input
stringlengths
1
1k
output
stringlengths
10
6.51k
, } class SimpleTypesenseIndexer: """Simplified indexer using your existing logic.""" def __init__(self): self.client = typesense.Client(TYPESENSE_CONFIG) def smart_title_case(self, name: str) -> str: words = name.split(" ") title_cased_words = [] for word in words: if word.upper() in ACRONYMS: title_cased_words.append(word.upper()) else: title_cased_words.append(word.capitalize()) return " ".join(title_cased_words) def clean_name(self, name: str) -> str: if name.lower().endswith(".md"): name = name[:-3] name = name.replace("_", " ").replace("-", " ").strip() return self.smart_title_case(name) def clean_markdown(self, text: str) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL)
text = re.sub(r"`[^`]+`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encod
text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE)
sting logic.""" def __init__(self): self.client = typesense.Client(TYPESENSE_CONFIG) def smart_title_case(self, name: str) -> str: words = name.split(" ") title_cased_words = [] for word in words: if word.upper() in ACRONYMS: title_cased_words.append(word.upper()) else: title_cased_words.append(word.capitalize()) return " ".join(title_cased_words) def clean_name(self, name: str) -> str: if name.lower().endswith(".md"): name = name[:-3] name = name.replace("_", " ").replace("-", " ").strip() return self.smart_title_case(name) def clean_markdown(self, text: str) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE)
text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.r
text = re.sub(r"`[^`]+`", "", text)
self.client = typesense.Client(TYPESENSE_CONFIG) def smart_title_case(self, name: str) -> str: words = name.split(" ") title_cased_words = [] for word in words: if word.upper() in ACRONYMS: title_cased_words.append(word.upper()) else: title_cased_words.append(word.capitalize()) return " ".join(title_cased_words) def clean_name(self, name: str) -> str: if name.lower().endswith(".md"): name = name[:-3] name = name.replace("_", " ").replace("-", " ").strip() return self.smart_title_case(name) def clean_markdown(self, text: str) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE) text = re.sub(r"`[^`]+`", "", text)
text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) l
text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE)
smart_title_case(self, name: str) -> str: words = name.split(" ") title_cased_words = [] for word in words: if word.upper() in ACRONYMS: title_cased_words.append(word.upper()) else: title_cased_words.append(word.capitalize()) return " ".join(title_cased_words) def clean_name(self, name: str) -> str: if name.lower().endswith(".md"): name = name[:-3] name = name.replace("_", " ").replace("-", " ").strip() return self.smart_title_case(name) def clean_markdown(self, text: str) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE) text = re.sub(r"`[^`]+`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE)
text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:
text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text)
e.split(" ") title_cased_words = [] for word in words: if word.upper() in ACRONYMS: title_cased_words.append(word.upper()) else: title_cased_words.append(word.capitalize()) return " ".join(title_cased_words) def clean_name(self, name: str) -> str: if name.lower().endswith(".md"): name = name[:-3] name = name.replace("_", " ").replace("-", " ").strip() return self.smart_title_case(name) def clean_markdown(self, text: str) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE) text = re.sub(r"`[^`]+`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text)
text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).s
text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL)
words: if word.upper() in ACRONYMS: title_cased_words.append(word.upper()) else: title_cased_words.append(word.capitalize()) return " ".join(title_cased_words) def clean_name(self, name: str) -> str: if name.lower().endswith(".md"): name = name[:-3] name = name.replace("_", " ").replace("-", " ").strip() return self.smart_title_case(name) def clean_markdown(self, text: str) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE) text = re.sub(r"`[^`]+`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL)
return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lin
text = re.sub(r"\n\s*\n+", "\n\n", text)
d_words.append(word.upper()) else: title_cased_words.append(word.capitalize()) return " ".join(title_cased_words) def clean_name(self, name: str) -> str: if name.lower().endswith(".md"): name = name[:-3] name = name.replace("_", " ").replace("-", " ").strip() return self.smart_title_case(name) def clean_markdown(self, text: str) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE) text = re.sub(r"`[^`]+`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip()
def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content."""
def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings
turn " ".join(title_cased_words) def clean_name(self, name: str) -> str: if name.lower().endswith(".md"): name = name[:-3] name = name.replace("_", " ").replace("-", " ").strip() return self.smart_title_case(name) def clean_markdown(self, text: str) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE) text = re.sub(r"`[^`]+`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content."""
lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/")
headings = []
sed_words) def clean_name(self, name: str) -> str: if name.lower().endswith(".md"): name = name[:-3] name = name.replace("_", " ").replace("-", " ").strip() return self.smart_title_case(name) def clean_markdown(self, text: str) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE) text = re.sub(r"`[^`]+`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = []
for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs":
lines = content.split("\n")
name: str) -> str: if name.lower().endswith(".md"): name = name[:-3] name = name.replace("_", " ").replace("-", " ").strip() return self.smart_title_case(name) def clean_markdown(self, text: str) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE) text = re.sub(r"`[^`]+`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n")
return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw m
for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip())
ame.lower().endswith(".md"): name = name[:-3] name = name.replace("_", " ").replace("-", " ").strip() return self.smart_title_case(name) def clean_markdown(self, text: str) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE) text = re.sub(r"`[^`]+`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines:
if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low":
line = line.strip()
name = name[:-3] name = name.replace("_", " ").replace("-", " ").strip() return self.smart_title_case(name) def clean_markdown(self, text: str) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE) text = re.sub(r"`[^`]+`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip()
return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw m
if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip())
name.replace("_", " ").replace("-", " ").strip() return self.smart_title_case(name) def clean_markdown(self, text: str) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE) text = re.sub(r"`[^`]+`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"):
heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part
heading_text = re.sub(r"^#+\s*", "", line)
turn self.smart_title_case(name) def clean_markdown(self, text: str) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE) text = re.sub(r"`[^`]+`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line)
if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extr
heading_text = re.sub(r"\{[^}]*\}", "", heading_text)
tr) -> str: text = re.sub(r"^---[\s\S]*?---\s*", "", text, flags=re.MULTILINE) text = re.sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE) text = re.sub(r"`[^`]+`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text)
return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw m
if heading_text: headings.append(heading_text.strip())
sub(r"```[\s\S]*?```", "", text) text = re.sub(r"```.*?```", "", text, flags=re.DOTALL) text = re.sub(r"<div[\s\S]*?</div>", "", text, flags=re.IGNORECASE) text = re.sub(r"`[^`]+`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings
def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existi
def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text
`", "", text) text = re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f:
cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b",
content = f.read()
re.sub(r"^#+\s*", "", text, flags=re.MULTILINE) text = re.sub(r"\[([^\]]+)\]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read()
lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pa
cleaned = self.clean_markdown(content)
]\([^)]+\)", r"\1", text) text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines()
truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content)
truncated_lines = lines[:max_lines]
sub(r"<!--.*?-->", "", text, flags=re.DOTALL) text = re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines]
if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components)
truncated_text = "\n".join(truncated_lines).strip()
re.sub(r"\n\s*\n+", "\n\n", text) return text.strip() def extract_headings(self, content: str) -> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip()
return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optiona
if len(truncated_lines) < max_lines: return truncated_text
-> List[str]: """Extract headings from markdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text
def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(p
def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable)
rkdown content.""" headings = [] lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str:
if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root,
parts = url.strip("/").split("/")
lines = content.split("\n") for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/")
if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_pat
if parts[0] == "docs": parts = parts[1:]
for line in lines: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs":
if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_pat
parts = parts[1:]
: line = line.strip() if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:]
readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = fi
if parts and parts[-1] == "low": parts[-1] = "low-level"
if line.startswith("#"): heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low":
readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = fi
parts[-1] = "low-level"
heading_text = re.sub(r"^#+\s*", "", line) heading_text = re.sub(r"\{[^}]*\}", "", heading_text) if heading_text: headings.append(heading_text.strip()) return headings def summarize_markdown(self, md_path: str, max_lines: int = 100) -> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level"
return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enu
readable = " ".join(part.replace("-", " ") for part in parts)
-> str: """Your existing summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f:
patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_
content = f.read()
sting summarize function - simplified.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() cleaned = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read()
components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit
patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ]
ed = self.clean_markdown(content) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ]
for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts]
components = set()
ntent) lines = cleaned.splitlines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set()
return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low")
for pattern in patterns: matches = re.findall(pattern, content) components.update(matches)
ines() truncated_lines = lines[:max_lines] truncated_text = "\n".join(truncated_lines).strip() if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns:
components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] =
matches = re.findall(pattern, content)
if len(truncated_lines) < max_lines: return truncated_text return truncated_text def name_from_url(self, url: str) -> str: parts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components)
def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields
def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, }
ts = url.strip("/").split("/") if parts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense."""
rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_
file_path = os.path.join(root, file)
ts[0] == "docs": parts = parts[1:] if parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file)
parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read())
rel_path = os.path.relpath(file_path, docs_path)
parts and parts[-1] == "low": parts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path)
filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else ""
parts = pathlib.Path(rel_path).parts
ts[-1] = "low-level" readable = " ".join(part.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts
parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluste
filename_no_ext = file.rsplit(".", 1)[0]
art.replace("-", " ") for part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0]
for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLU
parts_clean = []
part in parts) return self.smart_title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = []
url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "se
for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p))
title_case(readable) def extract_components(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts):
if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = c
is_last = i == len(parts) - 1
onents(self, file_path: str) -> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1
url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "se
if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p))
-> List[str]: """Extract components from raw markdown file content.""" with open(file_path, "r", encoding="utf-8") as f: content = f.read() patterns = [ r"(?:rx|reflex)\.([A-Z][a-zA-Z0-9_]*)", r"\b([A-Z][a-z]+(?:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last:
else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings,
if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext))
:[A-Z][a-z]*)*Component?)\b", ] components = set() for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p))
if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else No
url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts]
for pattern in patterns: matches = re.findall(pattern, content) components.update(matches) return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts]
url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _i
if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low")
return list(components) def process_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low")
name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict
url = "/" + "/".join(url_parts)
s_doc_file(self, docs_path: str, file: str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts)
full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing pr
name = " ".join(parts_clean)
str, root: str) -> Optional[dict]: """Your existing process_file function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean)
components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API referen
full_content = self.summarize_markdown(file_path, max_lines=100)
function adapted for Typesense.""" file_path = os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100)
headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if i
components = self.extract_components(file_path)
os.path.join(root, file) rel_path = os.path.relpath(file_path, docs_path) parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path)
parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module
headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read())
parts = pathlib.Path(rel_path).parts filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read())
cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extr
parent = parts[0] if parts else ""
filename_no_ext = file.rsplit(".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else ""
for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules:
cluster = "Uncategorized"
".", 1)[0] parts_clean = [] for i, p in enumerate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized"
return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_field
for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break
ate(parts): is_last = i == len(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items():
return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_field
if parent in folder_list: cluster = cluster_name break
(parts) - 1 if is_last: if filename_no_ext.lower().endswith("-ll"): parts_clean.append("Low Level") else: parts_clean.append(self.clean_name(filename_no_ext)) else: parts_clean.append(self.clean_name(p)) url_parts = [p.replace("_", "-").rsplit(".", 1)[0] for p in parts] if url_parts and url_parts[-1].endswith("-ll"): url_parts[-1] = url_parts[-1].replace("-ll", "/low") url = "/" + "/".join(url_parts) name = " ".join(parts_clean) full_content = self.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list:
break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else:
cluster = cluster_name
.summarize_markdown(file_path, max_lines=100) components = self.extract_components(file_path) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...")
# Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get(
documents = []
ath) headings = self.extract_headings(open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules:
s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(ext
if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None
open(file_path, "r", encoding="utf-8").read()) parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple):
extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") descript
module, *extra_modules = module
parent = parts[0] if parts else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module
for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "")
extra_fields = []
else "" cluster = "Uncategorized" for cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = []
else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields()
for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields())
cluster_name, folder_list in CLUSTERS.items(): if parent in folder_list: cluster = cluster_name break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules:
extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.a
s_extra = Source(module=extra_module)
break return { "id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else:
s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(ext
extra_fields = None
"id": str(rel_path), "title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None
name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields:
s = Source(module=module)
"title": name, "content": full_content, "components": components, "headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module)
# Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fiel
name = module.__name__.lower()
"headings": headings, "path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly
headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields:
content_parts = []
"path": str(rel_path), "url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = []
overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.g
headings = []
"url": f"docs{url}", "section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = []
if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not pro
overview = s.get_overview()
"section": parent, "subsection": parts[1] if len(parts) > 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview()
class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = geta
if overview: content_parts.append(overview)
> 1 else None, "cluster": cluster, "is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview)
if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") descrip
class_fields = s.get_class_fields()
"is_blog": False, "parts": parts_clean, } def _index_programmatic_docs(self) -> List[dict]: logger.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields()
fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description",
if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name)
r.info("Processing programmatic docs...") documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields")
fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description",
for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name)
documents = [] # Process API reference pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields:
if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods:
prop = field.get("prop")
rence pages for module in modules: if isinstance(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop")
prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n")
if not prop: continue
ce(module, tuple): module, *extra_modules = module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue
description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method
prop_name = getattr(prop, "name", "")
= module extra_fields = [] for extra_module in extra_modules: s_extra = Source(module=extra_module) extra_fields.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "")
content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name"
description = field.get("description", "")
lds.extend(s_extra.get_fields()) else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name)
if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_part
fields = s.get_fields()
else: extra_fields = None s = Source(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields()
if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{descri
if extra_fields: fields.extend(extra_fields)
(module=module) name = module.__name__.lower() # Get the content from the source object directly content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields)
methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings
if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name)
content_parts = [] headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields")
methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings
for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name)
headings = [] overview = s.get_overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields:
if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}"
prop = field.get("prop")
overview() if overview: content_parts.append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop")
prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") pat
if not prop: continue
append(overview) class_fields = s.get_class_fields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue
description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append(
prop_name = getattr(prop, "name", "")
ields() if class_fields: content_parts.append("\n## Class Fields\n") headings.append("Class Fields") for field in class_fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "")
content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path,
description = field.get("description", "")
ields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name)
if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path,
methods = s.get_methods()
d.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods()
content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables" env_var_title = self.name_from_url(f"docs{env_var_url_path}") env_var_path
if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}")
"name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods")
content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables" env_var_title = self.name_from_url(f"docs{env_var_url_path}") env_var_path
for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}")
= field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods:
signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False,
method_name = method.get("name", "")
nt_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "")
description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title],
signature = method.get("signature", "")
headings.append(prop_name) fields = s.get_fields() if extra_fields: fields.extend(extra_fields) if fields: content_parts.append("\n## Fields\n") headings.append("Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "")
content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } )
description = method.get("description", "")
"Fields") for field in fields: prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}")
url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables" env_var_title = self.name_from_url(f"docs{env_var_url_path}") env_var_path = "api-reference/environment-variables"
content = "\n".join(content_parts)
prop = field.get("prop") if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts)
title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables" env_var_title = self.name_from_url(f"docs{env_var_url_path}") env_var_path = "api-reference/environment-variables" all_vars = EnvVarDocs.get_all_env_vars()
url_path = f"/api-reference/{name}"
if not prop: continue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}"
path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables" env_var_title = self.name_from_url(f"docs{env_var_url_path}") env_var_path = "api-reference/environment-variables" all_vars = EnvVarDocs.get_all_env_vars() content_parts = [ "Reflex provides a number
title = self.name_from_url(f"docs{url_path}")
nue prop_name = getattr(prop, "name", "") description = field.get("description", "") content_parts.append(f"### {prop_name}\n{description}\n") headings.append(prop_name) methods = s.get_methods() if methods: content_parts.append("\n## Methods\n") headings.append("Methods") for method in methods: method_name = method.get("name", "") signature = method.get("signature", "") description = method.get("description", "") content_parts.append( f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}")
documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables" env_var_title = self.name_from_url(f"docs{env_var_url_path}") env_var_path = "api-reference/environment-variables" all_vars = EnvVarDocs.get_all_env_vars() content_parts = [ "Reflex provides a number of environment variables that can be used t
path = f"api-reference/{name}"
f"### {method_name}{signature}\n{description}\n" ) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page
env_var_title = self.name_from_url(f"docs{env_var_url_path}") env_var_path = "api-reference/environment-variables" all_vars = EnvVarDocs.get_all_env_vars() content_parts = [ "Reflex provides a number of environment variables that can be used to configure the behavior of your application. These environment variables can be set in your shell environment or in a .env file. This page documents all available environment variables in Reflex." ] headings = ["Environment Variables"] for name, var in all_vars: if not getattr(var, "internal", False): docstring = EnvVarDocs.get_env_var_docstring(name) or "" var_type = ( var.type_.__name__ if hasattr(var.type_, "__name__") else str(var.type_) ) content_parts.append( f"{var.name}: {docstring} (Type: {var_type}, Default: {var.d
env_var_url_path = "/api-reference/environment-variables"
) headings.append(f"{method_name}{signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables"
env_var_path = "api-reference/environment-variables" all_vars = EnvVarDocs.get_all_env_vars() content_parts = [ "Reflex provides a number of environment variables that can be used to configure the behavior of your application. These environment variables can be set in your shell environment or in a .env file. This page documents all available environment variables in Reflex." ] headings = ["Environment Variables"] for name, var in all_vars: if not getattr(var, "internal", False): docstring = EnvVarDocs.get_env_var_docstring(name) or "" var_type = ( var.type_.__name__ if hasattr(var.type_, "__name__") else str(var.type_) ) content_parts.append( f"{var.name}: {docstring} (Type: {var_type}, Default: {var.default})" ) headings.append(var.name)
env_var_title = self.name_from_url(f"docs{env_var_url_path}")
signature}") content = "\n".join(content_parts) url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables" env_var_title = self.name_from_url(f"docs{env_var_url_path}")
all_vars = EnvVarDocs.get_all_env_vars() content_parts = [ "Reflex provides a number of environment variables that can be used to configure the behavior of your application. These environment variables can be set in your shell environment or in a .env file. This page documents all available environment variables in Reflex." ] headings = ["Environment Variables"] for name, var in all_vars: if not getattr(var, "internal", False): docstring = EnvVarDocs.get_env_var_docstring(name) or "" var_type = ( var.type_.__name__ if hasattr(var.type_, "__name__") else str(var.type_) ) content_parts.append( f"{var.name}: {docstring} (Type: {var_type}, Default: {var.default})" ) headings.append(var.name) content = "\n".join(content_parts) document
env_var_path = "api-reference/environment-variables"
url_path = f"/api-reference/{name}" title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables" env_var_title = self.name_from_url(f"docs{env_var_url_path}") env_var_path = "api-reference/environment-variables"
content_parts = [ "Reflex provides a number of environment variables that can be used to configure the behavior of your application. These environment variables can be set in your shell environment or in a .env file. This page documents all available environment variables in Reflex." ] headings = ["Environment Variables"] for name, var in all_vars: if not getattr(var, "internal", False): docstring = EnvVarDocs.get_env_var_docstring(name) or "" var_type = ( var.type_.__name__ if hasattr(var.type_, "__name__") else str(var.type_) ) content_parts.append( f"{var.name}: {docstring} (Type: {var_type}, Default: {var.default})" ) headings.append(var.name) content = "\n".join(content_parts) documents.append( { "id": env_
all_vars = EnvVarDocs.get_all_env_vars()
title = self.name_from_url(f"docs{url_path}") path = f"api-reference/{name}" documents.append( { "id": path, "title": title, "content": self.clean_markdown(content), "headings": headings, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables" env_var_title = self.name_from_url(f"docs{env_var_url_path}") env_var_path = "api-reference/environment-variables" all_vars = EnvVarDocs.get_all_env_vars()
headings = ["Environment Variables"] for name, var in all_vars: if not getattr(var, "internal", False): docstring = EnvVarDocs.get_env_var_docstring(name) or "" var_type = ( var.type_.__name__ if hasattr(var.type_, "__name__") else str(var.type_) ) content_parts.append( f"{var.name}: {docstring} (Type: {var_type}, Default: {var.default})" ) headings.append(var.name) content = "\n".join(content_parts) documents.append( { "id": env_var_path, "title": env_var_title, "content": self.clean_markdown(content), "headings": headings, "path": env_var_path, "url": f"docs{env_var_url_path}", "section": "API Reference", "subsection": "Environ
content_parts = [ "Reflex provides a number of environment variables that can be used to configure the behavior of your application. These environment variables can be set in your shell environment or in a .env file. This page documents all available environment variables in Reflex." ]
ngs, "path": path, "url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables" env_var_title = self.name_from_url(f"docs{env_var_url_path}") env_var_path = "api-reference/environment-variables" all_vars = EnvVarDocs.get_all_env_vars() content_parts = [ "Reflex provides a number of environment variables that can be used to configure the behavior of your application. These environment variables can be set in your shell environment or in a .env file. This page documents all available environment variables in Reflex." ]
for name, var in all_vars: if not getattr(var, "internal", False): docstring = EnvVarDocs.get_env_var_docstring(name) or "" var_type = ( var.type_.__name__ if hasattr(var.type_, "__name__") else str(var.type_) ) content_parts.append( f"{var.name}: {docstring} (Type: {var_type}, Default: {var.default})" ) headings.append(var.name) content = "\n".join(content_parts) documents.append( { "id": env_var_path, "title": env_var_title, "content": self.clean_markdown(content), "headings": headings, "path": env_var_path, "url": f"docs{env_var_url_path}", "section": "API Reference", "subsection": "Environment Variables", "cluster": "
headings = ["Environment Variables"]
"url": f"docs{url_path}", "section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables" env_var_title = self.name_from_url(f"docs{env_var_url_path}") env_var_path = "api-reference/environment-variables" all_vars = EnvVarDocs.get_all_env_vars() content_parts = [ "Reflex provides a number of environment variables that can be used to configure the behavior of your application. These environment variables can be set in your shell environment or in a .env file. This page documents all available environment variables in Reflex." ] headings = ["Environment Variables"]
content = "\n".join(content_parts) documents.append( { "id": env_var_path, "title": env_var_title, "content": self.clean_markdown(content), "headings": headings, "path": env_var_path, "url": f"docs{env_var_url_path}", "section": "API Reference", "subsection": "Environment Variables", "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", env_var_title], "components": [], } ) logger.info(f"Found {len(documents)} programmatic docs.") return documents def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL)
for name, var in all_vars: if not getattr(var, "internal", False): docstring = EnvVarDocs.get_env_var_docstring(name) or "" var_type = ( var.type_.__name__ if hasattr(var.type_, "__name__") else str(var.type_) ) content_parts.append( f"{var.name}: {docstring} (Type: {var_type}, Default: {var.default})" ) headings.append(var.name)