instruction
stringlengths
0
1k
input
stringlengths
1
1k
output
stringlengths
10
6.51k
"section": "API Reference", "subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables" env_var_title = self.name_from_url(f"docs{env_var_url_path}") env_var_path = "api-reference/environment-variables" all_vars = EnvVarDocs.get_all_env_vars() content_parts = [ "Reflex provides a number of environment variables that can be used to configure the behavior of your application. These environment variables can be set in your shell environment or in a .env file. This page documents all available environment variables in Reflex." ] headings = ["Environment Variables"] for name, var in all_vars:
content = "\n".join(content_parts) documents.append( { "id": env_var_path, "title": env_var_title, "content": self.clean_markdown(content), "headings": headings, "path": env_var_path, "url": f"docs{env_var_url_path}", "section": "API Reference", "subsection": "Environment Variables", "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", env_var_title], "components": [], } ) logger.info(f"Found {len(documents)} programmatic docs.") return documents def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL)
if not getattr(var, "internal", False): docstring = EnvVarDocs.get_env_var_docstring(name) or "" var_type = ( var.type_.__name__ if hasattr(var.type_, "__name__") else str(var.type_) ) content_parts.append( f"{var.name}: {docstring} (Type: {var_type}, Default: {var.default})" ) headings.append(var.name)
"subsection": name, "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables" env_var_title = self.name_from_url(f"docs{env_var_url_path}") env_var_path = "api-reference/environment-variables" all_vars = EnvVarDocs.get_all_env_vars() content_parts = [ "Reflex provides a number of environment variables that can be used to configure the behavior of your application. These environment variables can be set in your shell environment or in a .env file. This page documents all available environment variables in Reflex." ] headings = ["Environment Variables"] for name, var in all_vars: if not getattr(var, "internal", False):
var_type = ( var.type_.__name__ if hasattr(var.type_, "__name__") else str(var.type_) ) content_parts.append( f"{var.name}: {docstring} (Type: {var_type}, Default: {var.default})" ) headings.append(var.name) content = "\n".join(content_parts) documents.append( { "id": env_var_path, "title": env_var_title, "content": self.clean_markdown(content), "headings": headings, "path": env_var_path, "url": f"docs{env_var_url_path}", "section": "API Reference", "subsection": "Environment Variables", "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", env_var_title], "components": [], }
docstring = EnvVarDocs.get_env_var_docstring(name) or ""
rence", "is_blog": False, "parts": ["API Reference", title], "components": [], } ) # Process Environment Variables page env_var_url_path = "/api-reference/environment-variables" env_var_title = self.name_from_url(f"docs{env_var_url_path}") env_var_path = "api-reference/environment-variables" all_vars = EnvVarDocs.get_all_env_vars() content_parts = [ "Reflex provides a number of environment variables that can be used to configure the behavior of your application. These environment variables can be set in your shell environment or in a .env file. This page documents all available environment variables in Reflex." ] headings = ["Environment Variables"] for name, var in all_vars: if not getattr(var, "internal", False): docstring = EnvVarDocs.get_env_var_docstring(name) or ""
content_parts.append( f"{var.name}: {docstring} (Type: {var_type}, Default: {var.default})" ) headings.append(var.name) content = "\n".join(content_parts) documents.append( { "id": env_var_path, "title": env_var_title, "content": self.clean_markdown(content), "headings": headings, "path": env_var_path, "url": f"docs{env_var_url_path}", "section": "API Reference", "subsection": "Environment Variables", "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", env_var_title], "components": [], } ) logger.info(f"Found {len(documents)} programmatic docs.") return documents def extract_frontmatter(self, md_path: str) -> dict: """Your existin
var_type = ( var.type_.__name__ if hasattr(var.type_, "__name__") else str(var.type_) )
env_var_path = "api-reference/environment-variables" all_vars = EnvVarDocs.get_all_env_vars() content_parts = [ "Reflex provides a number of environment variables that can be used to configure the behavior of your application. These environment variables can be set in your shell environment or in a .env file. This page documents all available environment variables in Reflex." ] headings = ["Environment Variables"] for name, var in all_vars: if not getattr(var, "internal", False): docstring = EnvVarDocs.get_env_var_docstring(name) or "" var_type = ( var.type_.__name__ if hasattr(var.type_, "__name__") else str(var.type_) ) content_parts.append( f"{var.name}: {docstring} (Type: {var_type}, Default: {var.default})" ) headings.append(var.name)
documents.append( { "id": env_var_path, "title": env_var_title, "content": self.clean_markdown(content), "headings": headings, "path": env_var_path, "url": f"docs{env_var_url_path}", "section": "API Reference", "subsection": "Environment Variables", "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", env_var_title], "components": [], } ) logger.info(f"Found {len(documents)} programmatic docs.") return documents def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL) if not match: return {}
content = "\n".join(content_parts)
type_, "__name__") else str(var.type_) ) content_parts.append( f"{var.name}: {docstring} (Type: {var_type}, Default: {var.default})" ) headings.append(var.name) content = "\n".join(content_parts) documents.append( { "id": env_var_path, "title": env_var_title, "content": self.clean_markdown(content), "headings": headings, "path": env_var_path, "url": f"docs{env_var_url_path}", "section": "API Reference", "subsection": "Environment Variables", "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", env_var_title], "components": [], } ) logger.info(f"Found {len(documents)} programmatic docs.") return documents
def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense.""" file_path = os.path.join(root, file) fm = self.extract_frontmatter(file_path) if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None rel_path = os.path.relpath(file_path, blog_root) slug = pathlib.Path(file_path).stem.lower().replace("_", "-") url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f: content = f.read() full_content = self.clean_markdown(content) headings = self.extract_headings(content) return { "id": str(rel_path), "title": fm["title"], "image": fm["image"], "date": fm["date"].strftime("%b %d, %Y") if isinstance(fm["date"], (datetime.date, datetime.datetime)) e
def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL) if not match: return {} try: frontmatter = yaml.safe_load(match.group(1)) return frontmatter except yaml.YAMLError: return {}
type}, Default: {var.default})" ) headings.append(var.name) content = "\n".join(content_parts) documents.append( { "id": env_var_path, "title": env_var_title, "content": self.clean_markdown(content), "headings": headings, "path": env_var_path, "url": f"docs{env_var_url_path}", "section": "API Reference", "subsection": "Environment Variables", "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", env_var_title], "components": [], } ) logger.info(f"Found {len(documents)} programmatic docs.") return documents def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f:
match = re.match(r"---\n(.*?)\n---", content, re.DOTALL) if not match: return {} try: frontmatter = yaml.safe_load(match.group(1)) return frontmatter except yaml.YAMLError: return {} def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense.""" file_path = os.path.join(root, file) fm = self.extract_frontmatter(file_path) if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None rel_path = os.path.relpath(file_path, blog_root) slug = pathlib.Path(file_path).stem.lower().replace("_", "-") url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f: content = f.read() full_content = self.clean_markdown(content) headings = self.extract_headings(con
content = f.read()
})" ) headings.append(var.name) content = "\n".join(content_parts) documents.append( { "id": env_var_path, "title": env_var_title, "content": self.clean_markdown(content), "headings": headings, "path": env_var_path, "url": f"docs{env_var_url_path}", "section": "API Reference", "subsection": "Environment Variables", "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", env_var_title], "components": [], } ) logger.info(f"Found {len(documents)} programmatic docs.") return documents def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read()
if not match: return {} try: frontmatter = yaml.safe_load(match.group(1)) return frontmatter except yaml.YAMLError: return {} def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense.""" file_path = os.path.join(root, file) fm = self.extract_frontmatter(file_path) if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None rel_path = os.path.relpath(file_path, blog_root) slug = pathlib.Path(file_path).stem.lower().replace("_", "-") url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f: content = f.read() full_content = self.clean_markdown(content) headings = self.extract_headings(content) return { "id": str(rel_path),
match = re.match(r"---\n(.*?)\n---", content, re.DOTALL)
content = "\n".join(content_parts) documents.append( { "id": env_var_path, "title": env_var_title, "content": self.clean_markdown(content), "headings": headings, "path": env_var_path, "url": f"docs{env_var_url_path}", "section": "API Reference", "subsection": "Environment Variables", "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", env_var_title], "components": [], } ) logger.info(f"Found {len(documents)} programmatic docs.") return documents def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL)
try: frontmatter = yaml.safe_load(match.group(1)) return frontmatter except yaml.YAMLError: return {} def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense.""" file_path = os.path.join(root, file) fm = self.extract_frontmatter(file_path) if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None rel_path = os.path.relpath(file_path, blog_root) slug = pathlib.Path(file_path).stem.lower().replace("_", "-") url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f: content = f.read() full_content = self.clean_markdown(content) headings = self.extract_headings(content) return { "id": str(rel_path), "title": fm["title"], "image"
if not match: return {}
append( { "id": env_var_path, "title": env_var_title, "content": self.clean_markdown(content), "headings": headings, "path": env_var_path, "url": f"docs{env_var_url_path}", "section": "API Reference", "subsection": "Environment Variables", "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", env_var_title], "components": [], } ) logger.info(f"Found {len(documents)} programmatic docs.") return documents def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL) if not match: return {} try:
return frontmatter except yaml.YAMLError: return {} def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense.""" file_path = os.path.join(root, file) fm = self.extract_frontmatter(file_path) if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None rel_path = os.path.relpath(file_path, blog_root) slug = pathlib.Path(file_path).stem.lower().replace("_", "-") url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f: content = f.read() full_content = self.clean_markdown(content) headings = self.extract_headings(content) return { "id": str(rel_path), "title": fm["title"], "image": fm["image"], "date": fm["date"].strftime("%b %d, %Y")
frontmatter = yaml.safe_load(match.group(1))
n_markdown(content), "headings": headings, "path": env_var_path, "url": f"docs{env_var_url_path}", "section": "API Reference", "subsection": "Environment Variables", "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", env_var_title], "components": [], } ) logger.info(f"Found {len(documents)} programmatic docs.") return documents def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL) if not match: return {} try: frontmatter = yaml.safe_load(match.group(1)) return frontmatter except yaml.YAMLError: return {}
def create_collection(self, force_recreate: bool = False) -> bool: """Create or recreate the collection.""" try: try: self.client.collections["docs"].retrieve() if force_recreate: logger.info("Deleting existing collection...") self.client.collections["docs"].delete() else: logger.info("Collection already exists. Use --force to recreate.") return True except typesense.exceptions.ObjectNotFound: pass logger.info("Creating collection...") self.client.collections.create(COLLECTION_SCHEMA) logger.info("Collection created successfully.") return True except Exception: logger.exception("Error creating collection") return False def index_documents( self, docs_path: str, blog_path: str, max_w
def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense.""" file_path = os.path.join(root, file) fm = self.extract_frontmatter(file_path) if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None rel_path = os.path.relpath(file_path, blog_root) slug = pathlib.Path(file_path).stem.lower().replace("_", "-") url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f: content = f.read() full_content = self.clean_markdown(content) headings = self.extract_headings(content) return { "id": str(rel_path), "title": fm["title"], "image": fm["image"], "date": fm["date"].strftime("%b %d, %Y") if isinstance(fm["date"], (datetime.date, datetime.datetime)) else fm["date"], "author": fm["author"], "content": full_content, "headings": headings, "path": str(rel_path), "url": url, "section": "Blog", "subsection": fm["author"], "cluster": "Blog Posts", "is_blog": True, "parts": ["Blog", fm["author"], fm["title"]], }
"section": "API Reference", "subsection": "Environment Variables", "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", env_var_title], "components": [], } ) logger.info(f"Found {len(documents)} programmatic docs.") return documents def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL) if not match: return {} try: frontmatter = yaml.safe_load(match.group(1)) return frontmatter except yaml.YAMLError: return {} def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense."""
fm = self.extract_frontmatter(file_path) if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None rel_path = os.path.relpath(file_path, blog_root) slug = pathlib.Path(file_path).stem.lower().replace("_", "-") url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f: content = f.read() full_content = self.clean_markdown(content) headings = self.extract_headings(content) return { "id": str(rel_path), "title": fm["title"], "image": fm["image"], "date": fm["date"].strftime("%b %d, %Y") if isinstance(fm["date"], (datetime.date, datetime.datetime)) else fm["date"], "author": fm["author"], "content": full_content, "headings": headings, "path": str(rel_path), "url": url, "section
file_path = os.path.join(root, file)
"subsection": "Environment Variables", "cluster": "API Reference", "is_blog": False, "parts": ["API Reference", env_var_title], "components": [], } ) logger.info(f"Found {len(documents)} programmatic docs.") return documents def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL) if not match: return {} try: frontmatter = yaml.safe_load(match.group(1)) return frontmatter except yaml.YAMLError: return {} def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense.""" file_path = os.path.join(root, file)
if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None rel_path = os.path.relpath(file_path, blog_root) slug = pathlib.Path(file_path).stem.lower().replace("_", "-") url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f: content = f.read() full_content = self.clean_markdown(content) headings = self.extract_headings(content) return { "id": str(rel_path), "title": fm["title"], "image": fm["image"], "date": fm["date"].strftime("%b %d, %Y") if isinstance(fm["date"], (datetime.date, datetime.datetime)) else fm["date"], "author": fm["author"], "content": full_content, "headings": headings, "path": str(rel_path), "url": url, "section": "Blog", "subsection": fm["author"]
fm = self.extract_frontmatter(file_path)
"cluster": "API Reference", "is_blog": False, "parts": ["API Reference", env_var_title], "components": [], } ) logger.info(f"Found {len(documents)} programmatic docs.") return documents def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL) if not match: return {} try: frontmatter = yaml.safe_load(match.group(1)) return frontmatter except yaml.YAMLError: return {} def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense.""" file_path = os.path.join(root, file) fm = self.extract_frontmatter(file_path)
rel_path = os.path.relpath(file_path, blog_root) slug = pathlib.Path(file_path).stem.lower().replace("_", "-") url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f: content = f.read() full_content = self.clean_markdown(content) headings = self.extract_headings(content) return { "id": str(rel_path), "title": fm["title"], "image": fm["image"], "date": fm["date"].strftime("%b %d, %Y") if isinstance(fm["date"], (datetime.date, datetime.datetime)) else fm["date"], "author": fm["author"], "content": full_content, "headings": headings, "path": str(rel_path), "url": url, "section": "Blog", "subsection": fm["author"], "cluster": "Blog Posts", "is_blog": True, "parts": ["Blog", fm["author"], fm["title"]], }
if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None
"components": [], } ) logger.info(f"Found {len(documents)} programmatic docs.") return documents def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL) if not match: return {} try: frontmatter = yaml.safe_load(match.group(1)) return frontmatter except yaml.YAMLError: return {} def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense.""" file_path = os.path.join(root, file) fm = self.extract_frontmatter(file_path) if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None
slug = pathlib.Path(file_path).stem.lower().replace("_", "-") url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f: content = f.read() full_content = self.clean_markdown(content) headings = self.extract_headings(content) return { "id": str(rel_path), "title": fm["title"], "image": fm["image"], "date": fm["date"].strftime("%b %d, %Y") if isinstance(fm["date"], (datetime.date, datetime.datetime)) else fm["date"], "author": fm["author"], "content": full_content, "headings": headings, "path": str(rel_path), "url": url, "section": "Blog", "subsection": fm["author"], "cluster": "Blog Posts", "is_blog": True, "parts": ["Blog", fm["author"], fm["title"]], } def create_collection(self, force_recreate: bool = Fals
rel_path = os.path.relpath(file_path, blog_root)
logger.info(f"Found {len(documents)} programmatic docs.") return documents def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL) if not match: return {} try: frontmatter = yaml.safe_load(match.group(1)) return frontmatter except yaml.YAMLError: return {} def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense.""" file_path = os.path.join(root, file) fm = self.extract_frontmatter(file_path) if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None rel_path = os.path.relpath(file_path, blog_root)
url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f: content = f.read() full_content = self.clean_markdown(content) headings = self.extract_headings(content) return { "id": str(rel_path), "title": fm["title"], "image": fm["image"], "date": fm["date"].strftime("%b %d, %Y") if isinstance(fm["date"], (datetime.date, datetime.datetime)) else fm["date"], "author": fm["author"], "content": full_content, "headings": headings, "path": str(rel_path), "url": url, "section": "Blog", "subsection": fm["author"], "cluster": "Blog Posts", "is_blog": True, "parts": ["Blog", fm["author"], fm["title"]], } def create_collection(self, force_recreate: bool = False) -> bool: """Create or recreate the collection.""" t
slug = pathlib.Path(file_path).stem.lower().replace("_", "-")
rn documents def extract_frontmatter(self, md_path: str) -> dict: """Your existing frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL) if not match: return {} try: frontmatter = yaml.safe_load(match.group(1)) return frontmatter except yaml.YAMLError: return {} def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense.""" file_path = os.path.join(root, file) fm = self.extract_frontmatter(file_path) if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None rel_path = os.path.relpath(file_path, blog_root) slug = pathlib.Path(file_path).stem.lower().replace("_", "-")
with open(file_path, "r", encoding="utf-8") as f: content = f.read() full_content = self.clean_markdown(content) headings = self.extract_headings(content) return { "id": str(rel_path), "title": fm["title"], "image": fm["image"], "date": fm["date"].strftime("%b %d, %Y") if isinstance(fm["date"], (datetime.date, datetime.datetime)) else fm["date"], "author": fm["author"], "content": full_content, "headings": headings, "path": str(rel_path), "url": url, "section": "Blog", "subsection": fm["author"], "cluster": "Blog Posts", "is_blog": True, "parts": ["Blog", fm["author"], fm["title"]], } def create_collection(self, force_recreate: bool = False) -> bool: """Create or recreate the collection.""" try: try:
url = f"/blog/{slug}"
ng frontmatter extraction.""" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL) if not match: return {} try: frontmatter = yaml.safe_load(match.group(1)) return frontmatter except yaml.YAMLError: return {} def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense.""" file_path = os.path.join(root, file) fm = self.extract_frontmatter(file_path) if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None rel_path = os.path.relpath(file_path, blog_root) slug = pathlib.Path(file_path).stem.lower().replace("_", "-") url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f:
full_content = self.clean_markdown(content) headings = self.extract_headings(content) return { "id": str(rel_path), "title": fm["title"], "image": fm["image"], "date": fm["date"].strftime("%b %d, %Y") if isinstance(fm["date"], (datetime.date, datetime.datetime)) else fm["date"], "author": fm["author"], "content": full_content, "headings": headings, "path": str(rel_path), "url": url, "section": "Blog", "subsection": fm["author"], "cluster": "Blog Posts", "is_blog": True, "parts": ["Blog", fm["author"], fm["title"]], } def create_collection(self, force_recreate: bool = False) -> bool: """Create or recreate the collection.""" try: try: self.client.collections["docs"].retrieve() if force_recreate:
content = f.read()
" with open(md_path, "r", encoding="utf-8") as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL) if not match: return {} try: frontmatter = yaml.safe_load(match.group(1)) return frontmatter except yaml.YAMLError: return {} def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense.""" file_path = os.path.join(root, file) fm = self.extract_frontmatter(file_path) if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None rel_path = os.path.relpath(file_path, blog_root) slug = pathlib.Path(file_path).stem.lower().replace("_", "-") url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f: content = f.read()
headings = self.extract_headings(content) return { "id": str(rel_path), "title": fm["title"], "image": fm["image"], "date": fm["date"].strftime("%b %d, %Y") if isinstance(fm["date"], (datetime.date, datetime.datetime)) else fm["date"], "author": fm["author"], "content": full_content, "headings": headings, "path": str(rel_path), "url": url, "section": "Blog", "subsection": fm["author"], "cluster": "Blog Posts", "is_blog": True, "parts": ["Blog", fm["author"], fm["title"]], } def create_collection(self, force_recreate: bool = False) -> bool: """Create or recreate the collection.""" try: try: self.client.collections["docs"].retrieve() if force_recreate: logger.info("Deleting existing collect
full_content = self.clean_markdown(content)
as f: content = f.read() match = re.match(r"---\n(.*?)\n---", content, re.DOTALL) if not match: return {} try: frontmatter = yaml.safe_load(match.group(1)) return frontmatter except yaml.YAMLError: return {} def process_blog_file(self, blog_root: str, file: str, root: str) -> Optional[dict]: """Your existing blog processing adapted for Typesense.""" file_path = os.path.join(root, file) fm = self.extract_frontmatter(file_path) if not fm or not all( k in fm for k in ("title", "author", "date", "description") ): return None rel_path = os.path.relpath(file_path, blog_root) slug = pathlib.Path(file_path).stem.lower().replace("_", "-") url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f: content = f.read() full_content = self.clean_markdown(content)
return { "id": str(rel_path), "title": fm["title"], "image": fm["image"], "date": fm["date"].strftime("%b %d, %Y") if isinstance(fm["date"], (datetime.date, datetime.datetime)) else fm["date"], "author": fm["author"], "content": full_content, "headings": headings, "path": str(rel_path), "url": url, "section": "Blog", "subsection": fm["author"], "cluster": "Blog Posts", "is_blog": True, "parts": ["Blog", fm["author"], fm["title"]], } def create_collection(self, force_recreate: bool = False) -> bool: """Create or recreate the collection.""" try: try: self.client.collections["docs"].retrieve() if force_recreate: logger.info("Deleting existing collection...") self.client.collectio
headings = self.extract_headings(content)
rel_path = os.path.relpath(file_path, blog_root) slug = pathlib.Path(file_path).stem.lower().replace("_", "-") url = f"/blog/{slug}" with open(file_path, "r", encoding="utf-8") as f: content = f.read() full_content = self.clean_markdown(content) headings = self.extract_headings(content) return { "id": str(rel_path), "title": fm["title"], "image": fm["image"], "date": fm["date"].strftime("%b %d, %Y") if isinstance(fm["date"], (datetime.date, datetime.datetime)) else fm["date"], "author": fm["author"], "content": full_content, "headings": headings, "path": str(rel_path), "url": url, "section": "Blog", "subsection": fm["author"], "cluster": "Blog Posts", "is_blog": True, "parts": ["Blog", fm["author"], fm["title"]], }
def index_documents( self, docs_path: str, blog_path: str, max_workers: int = 4, batch_size: int = 100, ) -> bool: """Index both docs and blog files.""" try: programmatic_docs = self._index_programmatic_docs() docs_files = [] for root, _, files in os.walk(docs_path): docs_files.extend( [ (docs_path, file, root, False) for file in files if file.endswith(".md") ] ) blog_files = [] if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files
def create_collection(self, force_recreate: bool = False) -> bool: """Create or recreate the collection.""" try: try: self.client.collections["docs"].retrieve() if force_recreate: logger.info("Deleting existing collection...") self.client.collections["docs"].delete() else: logger.info("Collection already exists. Use --force to recreate.") return True except typesense.exceptions.ObjectNotFound: pass logger.info("Creating collection...") self.client.collections.create(COLLECTION_SCHEMA) logger.info("Collection created successfully.") return True except Exception: logger.exception("Error creating collection") return False
content = f.read() full_content = self.clean_markdown(content) headings = self.extract_headings(content) return { "id": str(rel_path), "title": fm["title"], "image": fm["image"], "date": fm["date"].strftime("%b %d, %Y") if isinstance(fm["date"], (datetime.date, datetime.datetime)) else fm["date"], "author": fm["author"], "content": full_content, "headings": headings, "path": str(rel_path), "url": url, "section": "Blog", "subsection": fm["author"], "cluster": "Blog Posts", "is_blog": True, "parts": ["Blog", fm["author"], fm["title"]], } def create_collection(self, force_recreate: bool = False) -> bool: """Create or recreate the collection.""" try: try: self.client.collections["docs"].retrieve()
except typesense.exceptions.ObjectNotFound: pass logger.info("Creating collection...") self.client.collections.create(COLLECTION_SCHEMA) logger.info("Collection created successfully.") return True except Exception: logger.exception("Error creating collection") return False def index_documents( self, docs_path: str, blog_path: str, max_workers: int = 4, batch_size: int = 100, ) -> bool: """Index both docs and blog files.""" try: programmatic_docs = self._index_programmatic_docs() docs_files = [] for root, _, files in os.walk(docs_path): docs_files.extend( [ (docs_path, file, root, False) for file in files if file.endswith(".md") ] )
if force_recreate: logger.info("Deleting existing collection...") self.client.collections["docs"].delete() else: logger.info("Collection already exists. Use --force to recreate.") return True
try: self.client.collections["docs"].retrieve() if force_recreate: logger.info("Deleting existing collection...") self.client.collections["docs"].delete() else: logger.info("Collection already exists. Use --force to recreate.") return True except typesense.exceptions.ObjectNotFound: pass logger.info("Creating collection...") self.client.collections.create(COLLECTION_SCHEMA) logger.info("Collection created successfully.") return True except Exception: logger.exception("Error creating collection") return False def index_documents( self, docs_path: str, blog_path: str, max_workers: int = 4, batch_size: int = 100, ) -> bool: """Index both docs and blog files.""" try:
docs_files = [] for root, _, files in os.walk(docs_path): docs_files.extend( [ (docs_path, file, root, False) for file in files if file.endswith(".md") ] ) blog_files = [] if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [
programmatic_docs = self._index_programmatic_docs()
.retrieve() if force_recreate: logger.info("Deleting existing collection...") self.client.collections["docs"].delete() else: logger.info("Collection already exists. Use --force to recreate.") return True except typesense.exceptions.ObjectNotFound: pass logger.info("Creating collection...") self.client.collections.create(COLLECTION_SCHEMA) logger.info("Collection created successfully.") return True except Exception: logger.exception("Error creating collection") return False def index_documents( self, docs_path: str, blog_path: str, max_workers: int = 4, batch_size: int = 100, ) -> bool: """Index both docs and blog files.""" try: programmatic_docs = self._index_programmatic_docs()
for root, _, files in os.walk(docs_path): docs_files.extend( [ (docs_path, file, root, False) for file in files if file.endswith(".md") ] ) blog_files = [] if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_ar
docs_files = []
if force_recreate: logger.info("Deleting existing collection...") self.client.collections["docs"].delete() else: logger.info("Collection already exists. Use --force to recreate.") return True except typesense.exceptions.ObjectNotFound: pass logger.info("Creating collection...") self.client.collections.create(COLLECTION_SCHEMA) logger.info("Collection created successfully.") return True except Exception: logger.exception("Error creating collection") return False def index_documents( self, docs_path: str, blog_path: str, max_workers: int = 4, batch_size: int = 100, ) -> bool: """Index both docs and blog files.""" try: programmatic_docs = self._index_programmatic_docs() docs_files = []
blog_files = [] if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future) for future in as_completed(futures): doc = future.result() if doc: doc
for root, _, files in os.walk(docs_path): docs_files.extend( [ (docs_path, file, root, False) for file in files if file.endswith(".md") ] )
except typesense.exceptions.ObjectNotFound: pass logger.info("Creating collection...") self.client.collections.create(COLLECTION_SCHEMA) logger.info("Collection created successfully.") return True except Exception: logger.exception("Error creating collection") return False def index_documents( self, docs_path: str, blog_path: str, max_workers: int = 4, batch_size: int = 100, ) -> bool: """Index both docs and blog files.""" try: programmatic_docs = self._index_programmatic_docs() docs_files = [] for root, _, files in os.walk(docs_path): docs_files.extend( [ (docs_path, file, root, False) for file in files if file.endswith(".md") ] )
if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future) for future in as_completed(futures): doc = future.result() if doc: documents.append(doc)
blog_files = []
ns.ObjectNotFound: pass logger.info("Creating collection...") self.client.collections.create(COLLECTION_SCHEMA) logger.info("Collection created successfully.") return True except Exception: logger.exception("Error creating collection") return False def index_documents( self, docs_path: str, blog_path: str, max_workers: int = 4, batch_size: int = 100, ) -> bool: """Index both docs and blog files.""" try: programmatic_docs = self._index_programmatic_docs() docs_files = [] for root, _, files in os.walk(docs_path): docs_files.extend( [ (docs_path, file, root, False) for file in files if file.endswith(".md") ] ) blog_files = []
all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future) for future in as_completed(futures): doc = future.result() if doc: documents.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} file
if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") )
logger.info("Creating collection...") self.client.collections.create(COLLECTION_SCHEMA) logger.info("Collection created successfully.") return True except Exception: logger.exception("Error creating collection") return False def index_documents( self, docs_path: str, blog_path: str, max_workers: int = 4, batch_size: int = 100, ) -> bool: """Index both docs and blog files.""" try: programmatic_docs = self._index_programmatic_docs() docs_files = [] for root, _, files in os.walk(docs_path): docs_files.extend( [ (docs_path, file, root, False) for file in files if file.endswith(".md") ] ) blog_files = [] if os.path.exists(blog_path):
all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future) for future in as_completed(futures): doc = future.result() if doc: documents.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} file
for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") )
ng collection") return False def index_documents( self, docs_path: str, blog_path: str, max_workers: int = 4, batch_size: int = 100, ) -> bool: """Index both docs and blog files.""" try: programmatic_docs = self._index_programmatic_docs() docs_files = [] for root, _, files in os.walk(docs_path): docs_files.extend( [ (docs_path, file, root, False) for file in files if file.endswith(".md") ] ) blog_files = [] if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") )
logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future) for future in as_completed(futures): doc = future.result() if doc: documents.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)"
all_files = docs_files + blog_files
tch_size: int = 100, ) -> bool: """Index both docs and blog files.""" try: programmatic_docs = self._index_programmatic_docs() docs_files = [] for root, _, files in os.walk(docs_path): docs_files.extend( [ (docs_path, file, root, False) for file in files if file.endswith(".md") ] ) blog_files = [] if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" )
processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future) for future in as_completed(futures): doc = future.result() if doc: documents.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_
documents = programmatic_docs
"""Index both docs and blog files.""" try: programmatic_docs = self._index_programmatic_docs() docs_files = [] for root, _, files in os.walk(docs_path): docs_files.extend( [ (docs_path, file, root, False) for file in files if file.endswith(".md") ] ) blog_files = [] if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs
with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future) for future in as_completed(futures): doc = future.result() if doc: documents.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus progra
processed = 0
atic_docs() docs_files = [] for root, _, files in os.walk(docs_path): docs_files.extend( [ (docs_path, file, root, False) for file in files if file.endswith(".md") ] ) blog_files = [] if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor:
for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future) for future in as_completed(futures): doc = future.result() if doc: documents.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!")
futures = []
_files = [] for root, _, files in os.walk(docs_path): docs_files.extend( [ (docs_path, file, root, False) for file in files if file.endswith(".md") ] ) blog_files = [] if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = []
for future in as_completed(futures): doc = future.result() if doc: documents.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: s
for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future)
.walk(docs_path): docs_files.extend( [ (docs_path, file, root, False) for file in files if file.endswith(".md") ] ) blog_files = [] if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_args in all_files:
futures.append(future) for future in as_completed(futures): doc = future.result() if doc: documents.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _
future = executor.submit(self._process_file_wrapper, *file_args)
lse) for file in files if file.endswith(".md") ] ) blog_files = [] if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future)
if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try:
for future in as_completed(futures): doc = future.result() if doc: documents.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" )
if file.endswith(".md") ] ) blog_files = [] if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future) for future in as_completed(futures):
if doc: documents.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to
doc = future.result()
] ) blog_files = [] if os.path.exists(blog_path): for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future) for future in as_completed(futures): doc = future.result()
if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try:
if doc: documents.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" )
for root, _, files in os.walk(blog_path): blog_files.extend( (blog_path, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future) for future in as_completed(futures): doc = future.result() if doc: documents.append(doc) processed += 1
if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try:
if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" )
th, file, root, True) for file in files if file.endswith(".md") ) all_files = docs_files + blog_files logger.info( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future) for future in as_completed(futures): doc = future.result() if doc: documents.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents)
logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"
documents = []
fo( f"Found {len(docs_files)} docs and {len(blog_files)} blog files" ) documents = programmatic_docs processed = 0 with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = [] for file_args in all_files: future = executor.submit(self._process_file_wrapper, *file_args) futures.append(future) for future in as_completed(futures): doc = future.result() if doc: documents.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" )
logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try: clean_docs = [] for doc in documents: clean_doc = {k: v for k, v in doc.items() if v is not None} if "subsection" not in clean_doc: clean
if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" )
args) futures.append(future) for future in as_completed(futures): doc = future.result() if doc: documents.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False
def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try: clean_docs = [] for doc in documents: clean_doc = {k: v for k, v in doc.items() if v is not None} if "subsection" not in clean_doc: clean_doc["subsection"] = "" clean_docs.append(clean_doc) results = self.client.collections["docs"].documents.import_( clean_docs, {"action": "upsert"} ) for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_ar
def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None
ments.append(doc) processed += 1 if len(documents) >= batch_size: self._index_batch(documents) documents = [] logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try:
except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try: clean_docs = [] for doc in documents: clean_doc = {k: v for k, v in doc.items() if v is not None} if "subsection" not in clean_doc: clean_doc["subsection"] = "" clean_docs.append(clean_doc) results = self.client.collections["docs"].documents.import_( clean_docs, {"action": "upsert"} ) for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process.""" import argparse parser = argp
if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root)
f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None
def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False doc
def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try: clean_docs = [] for doc in documents: clean_doc = {k: v for k, v in doc.items() if v is not None} if "subsection" not in clean_doc: clean_doc["subsection"] = "" clean_docs.append(clean_doc) results = self.client.collections["docs"].documents.import_( clean_docs, {"action": "upsert"} ) for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch")
if documents: self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try:
for doc in documents: clean_doc = {k: v for k, v in doc.items() if v is not None} if "subsection" not in clean_doc: clean_doc["subsection"] = "" clean_docs.append(clean_doc) results = self.client.collections["docs"].documents.import_( clean_docs, {"action": "upsert"} ) for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog direct
clean_docs = []
self._index_batch(documents) logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try: clean_docs = []
results = self.client.collections["docs"].documents.import_( clean_docs, {"action": "upsert"} ) for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-work
for doc in documents: clean_doc = {k: v for k, v in doc.items() if v is not None} if "subsection" not in clean_doc: clean_doc["subsection"] = "" clean_docs.append(clean_doc)
logger.info( f"Processed {processed}/{len(all_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try: clean_docs = [] for doc in documents:
if "subsection" not in clean_doc: clean_doc["subsection"] = "" clean_docs.append(clean_doc) results = self.client.collections["docs"].documents.import_( clean_docs, {"action": "upsert"} ) for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" )
clean_doc = {k: v for k, v in doc.items() if v is not None}
l_files)} files (plus programmatic docs)" ) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try: clean_docs = [] for doc in documents: clean_doc = {k: v for k, v in doc.items() if v is not None}
clean_docs.append(clean_doc) results = self.client.collections["docs"].documents.import_( clean_docs, {"action": "upsert"} ) for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexi
if "subsection" not in clean_doc: clean_doc["subsection"] = ""
) logger.info("Indexing completed successfully!") return True except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try: clean_docs = [] for doc in documents: clean_doc = {k: v for k, v in doc.items() if v is not None} if "subsection" not in clean_doc:
clean_docs.append(clean_doc) results = self.client.collections["docs"].documents.import_( clean_docs, {"action": "upsert"} ) for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexi
clean_doc["subsection"] = ""
rue except Exception: logger.exception("Error during indexing") return False def _process_file_wrapper( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try: clean_docs = [] for doc in documents: clean_doc = {k: v for k, v in doc.items() if v is not None} if "subsection" not in clean_doc: clean_doc["subsection"] = "" clean_docs.append(clean_doc)
for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): lo
results = self.client.collections["docs"].documents.import_( clean_docs, {"action": "upsert"} )
per( self, path: str, file: str, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try: clean_docs = [] for doc in documents: clean_doc = {k: v for k, v in doc.items() if v is not None} if "subsection" not in clean_doc: clean_doc["subsection"] = "" clean_docs.append(clean_doc) results = self.client.collections["docs"].documents.import_( clean_docs, {"action": "upsert"} )
except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENS
for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}")
, root: str, is_blog: bool ) -> Optional[dict]: """Wrapper to route to correct processing function.""" try: if is_blog: return self.process_blog_file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try: clean_docs = [] for doc in documents: clean_doc = {k: v for k, v in doc.items() if v is not None} if "subsection" not in clean_doc: clean_doc["subsection"] = "" clean_docs.append(clean_doc) results = self.client.collections["docs"].documents.import_( clean_docs, {"action": "upsert"} ) for result in results:
except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENS
if not result.get("success", False): logger.warning(f"Failed to index document: {result}")
file(path, file, root) else: return self.process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try: clean_docs = [] for doc in documents: clean_doc = {k: v for k, v in doc.items() if v is not None} if "subsection" not in clean_doc: clean_doc["subsection"] = "" clean_docs.append(clean_doc) results = self.client.collections["docs"].documents.import_( clean_docs, {"action": "upsert"} ) for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch")
if __name__ == "__main__": success = main() exit(0 if success else 1)
def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False docs_path = pathlib.Path(args.docs_path) if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False blog_path = pathlib.Path(args.blog_path) if not blog_path.exists(): logger.warning(f"Blog path does not exist: {blog_path} - skipping blogs") args.blog_path = None indexer = SimpleTypesenseIndexer() if not indexer.create_collection(force_recreate=args.force): return False success = indexer.index_documents( str(docs_path), str(blog_path) if blog_path.exists() else "", max_workers=args.max_workers, batch_size=args.batch_size, ) return success
process_doc_file(path, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try: clean_docs = [] for doc in documents: clean_doc = {k: v for k, v in doc.items() if v is not None} if "subsection" not in clean_doc: clean_doc["subsection"] = "" clean_docs.append(clean_doc) results = self.client.collections["docs"].documents.import_( clean_docs, {"action": "upsert"} ) for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process."""
parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False docs_path = pathlib.Path(args.docs_path) if not docs_path.exists(): logger.err
import argparse
, file, root) except Exception: logger.exception(f"Error processing {file}") return None def _index_batch(self, documents: List[Dict[str, Any]]) -> None: """Index a batch of documents.""" try: clean_docs = [] for doc in documents: clean_doc = {k: v for k, v in doc.items() if v is not None} if "subsection" not in clean_doc: clean_doc["subsection"] = "" clean_docs.append(clean_doc) results = self.client.collections["docs"].documents.import_( clean_docs, {"action": "upsert"} ) for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process.""" import argparse
parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False docs_path = pathlib.Path(args.docs_path) if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False blog_path = pathlib.Path(args.blog_pa
parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" )
uments.import_( clean_docs, {"action": "upsert"} ) for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads")
if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False docs_path = pathlib.Path(args.docs_path) if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False blog_path = pathlib.Path(args.blog_path) if not blog_path.exists(): logger.warning(f"Blog path does not exist: {blog_path} - skipping blogs") args.blog_path = None indexer = SimpleTypesenseIndexer() if not indexer.create_collection(force_recreate=args.force): return False success = indexer.index_documents( str(docs_path), str(blog_path) if blog_path.exists() else "", max_workers=args.max_workers, batch_size=args.batch_size, ) return success if __name__ ==
args = parser.parse_args()
clean_docs, {"action": "upsert"} ) for result in results: if not result.get("success", False): logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args()
if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False docs_path = pathlib.Path(args.docs_path) if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False blog_path = pathlib.Path(args.blog_path) if not blog_path.exists(): logger.warning(f"Blog path does not exist: {blog_path} - skipping blogs") args.blog_path = None indexer = SimpleTypesenseIndexer() if not indexer.create_collection(force_recreate=args.force): return False success = indexer.index_documents( str(docs_path), str(blog_path) if blog_path.exists() else "", max_workers=args.max_workers, batch_size=args.batch_size, ) return success if __name__ == "__main__": success = main() exit(0 if success else 1)
if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False
: logger.warning(f"Failed to index document: {result}") except Exception: logger.exception("Error indexing batch") def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False
docs_path = pathlib.Path(args.docs_path) if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False blog_path = pathlib.Path(args.blog_path) if not blog_path.exists(): logger.warning(f"Blog path does not exist: {blog_path} - skipping blogs") args.blog_path = None indexer = SimpleTypesenseIndexer() if not indexer.create_collection(force_recreate=args.force): return False success = indexer.index_documents( str(docs_path), str(blog_path) if blog_path.exists() else "", max_workers=args.max_workers, batch_size=args.batch_size, ) return success if __name__ == "__main__": success = main() exit(0 if success else 1)
if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False
h") def main(): """Main function to run the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False
if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False blog_path = pathlib.Path(args.blog_path) if not blog_path.exists(): logger.warning(f"Blog path does not exist: {blog_path} - skipping blogs") args.blog_path = None indexer = SimpleTypesenseIndexer() if not indexer.create_collection(force_recreate=args.force): return False success = indexer.index_documents( str(docs_path), str(blog_path) if blog_path.exists() else "", max_workers=args.max_workers, batch_size=args.batch_size, ) return success if __name__ == "__main__": success = main() exit(0 if success else 1)
docs_path = pathlib.Path(args.docs_path)
the indexing process.""" import argparse parser = argparse.ArgumentParser( description="Simple Typesense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False docs_path = pathlib.Path(args.docs_path)
blog_path = pathlib.Path(args.blog_path) if not blog_path.exists(): logger.warning(f"Blog path does not exist: {blog_path} - skipping blogs") args.blog_path = None indexer = SimpleTypesenseIndexer() if not indexer.create_collection(force_recreate=args.force): return False success = indexer.index_documents( str(docs_path), str(blog_path) if blog_path.exists() else "", max_workers=args.max_workers, batch_size=args.batch_size, ) return success if __name__ == "__main__": success = main() exit(0 if success else 1)
if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False
esense indexer for Reflex docs" ) parser.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False docs_path = pathlib.Path(args.docs_path) if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False
if not blog_path.exists(): logger.warning(f"Blog path does not exist: {blog_path} - skipping blogs") args.blog_path = None indexer = SimpleTypesenseIndexer() if not indexer.create_collection(force_recreate=args.force): return False success = indexer.index_documents( str(docs_path), str(blog_path) if blog_path.exists() else "", max_workers=args.max_workers, batch_size=args.batch_size, ) return success if __name__ == "__main__": success = main() exit(0 if success else 1)
blog_path = pathlib.Path(args.blog_path)
er.add_argument("--docs-path", default="./docs", help="Path to docs directory") parser.add_argument("--blog-path", default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False docs_path = pathlib.Path(args.docs_path) if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False blog_path = pathlib.Path(args.blog_path)
indexer = SimpleTypesenseIndexer() if not indexer.create_collection(force_recreate=args.force): return False success = indexer.index_documents( str(docs_path), str(blog_path) if blog_path.exists() else "", max_workers=args.max_workers, batch_size=args.batch_size, ) return success if __name__ == "__main__": success = main() exit(0 if success else 1)
if not blog_path.exists(): logger.warning(f"Blog path does not exist: {blog_path} - skipping blogs") args.blog_path = None
, default="./blog", help="Path to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False docs_path = pathlib.Path(args.docs_path) if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False blog_path = pathlib.Path(args.blog_path) if not blog_path.exists(): logger.warning(f"Blog path does not exist: {blog_path} - skipping blogs")
indexer = SimpleTypesenseIndexer() if not indexer.create_collection(force_recreate=args.force): return False success = indexer.index_documents( str(docs_path), str(blog_path) if blog_path.exists() else "", max_workers=args.max_workers, batch_size=args.batch_size, ) return success if __name__ == "__main__": success = main() exit(0 if success else 1)
args.blog_path = None
ath to blog directory") parser.add_argument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False docs_path = pathlib.Path(args.docs_path) if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False blog_path = pathlib.Path(args.blog_path) if not blog_path.exists(): logger.warning(f"Blog path does not exist: {blog_path} - skipping blogs") args.blog_path = None
if not indexer.create_collection(force_recreate=args.force): return False success = indexer.index_documents( str(docs_path), str(blog_path) if blog_path.exists() else "", max_workers=args.max_workers, batch_size=args.batch_size, ) return success if __name__ == "__main__": success = main() exit(0 if success else 1)
indexer = SimpleTypesenseIndexer()
rgument( "--force", action="store_true", help="Force recreate collection" ) parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False docs_path = pathlib.Path(args.docs_path) if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False blog_path = pathlib.Path(args.blog_path) if not blog_path.exists(): logger.warning(f"Blog path does not exist: {blog_path} - skipping blogs") args.blog_path = None indexer = SimpleTypesenseIndexer()
success = indexer.index_documents( str(docs_path), str(blog_path) if blog_path.exists() else "", max_workers=args.max_workers, batch_size=args.batch_size, ) return success if __name__ == "__main__": success = main() exit(0 if success else 1)
if not indexer.create_collection(force_recreate=args.force): return False
parser.add_argument( "--batch-size", type=int, default=100, help="Batch size for indexing" ) parser.add_argument("--max-workers", type=int, default=4, help="Max worker threads") args = parser.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False docs_path = pathlib.Path(args.docs_path) if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False blog_path = pathlib.Path(args.blog_path) if not blog_path.exists(): logger.warning(f"Blog path does not exist: {blog_path} - skipping blogs") args.blog_path = None indexer = SimpleTypesenseIndexer() if not indexer.create_collection(force_recreate=args.force): return False
return success if __name__ == "__main__": success = main() exit(0 if success else 1)
success = indexer.index_documents( str(docs_path), str(blog_path) if blog_path.exists() else "", max_workers=args.max_workers, batch_size=args.batch_size, )
er.parse_args() if not os.getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False docs_path = pathlib.Path(args.docs_path) if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False blog_path = pathlib.Path(args.blog_path) if not blog_path.exists(): logger.warning(f"Blog path does not exist: {blog_path} - skipping blogs") args.blog_path = None indexer = SimpleTypesenseIndexer() if not indexer.create_collection(force_recreate=args.force): return False success = indexer.index_documents( str(docs_path), str(blog_path) if blog_path.exists() else "", max_workers=args.max_workers, batch_size=args.batch_size, ) return success
if __name__ == "__main__": success = main() exit(0 if success else 1)
getenv("TYPESENSE_HOST"): logger.error("TYPESENSE_HOST environment variable is required") return False if not os.getenv("TYPESENSE_ADMIN_API_KEY"): logger.error("TYPESENSE_ADMIN_API_KEY environment variable is required") return False docs_path = pathlib.Path(args.docs_path) if not docs_path.exists(): logger.error(f"Docs path does not exist: {docs_path}") return False blog_path = pathlib.Path(args.blog_path) if not blog_path.exists(): logger.warning(f"Blog path does not exist: {blog_path} - skipping blogs") args.blog_path = None indexer = SimpleTypesenseIndexer() if not indexer.create_collection(force_recreate=args.force): return False success = indexer.index_documents( str(docs_path), str(blog_path) if blog_path.exists() else "", max_workers=args.max_workers, batch_size=args.batch_size, ) return success if __name__ == "__main__":
exit(0 if success else 1)
success = main()
from pathlib import Path import pytest from reflex.testing import AppHarness # Add tests directory to Python path for absolute imports sys.path.insert(0, str(Path(__file__).parent)) @pytest.fixture(scope="session") def reflex_web_app(): app_root = Path(__file__).parent.parent from pcweb.whitelist import WHITELISTED_PAGES WHITELISTED_PAGES.extend( [ "/docs/events", "/docs/vars", "/docs/getting-started", "/docs/library/graphing", "/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness @pytest.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mappi
import sys
import sys
import pytest from reflex.testing import AppHarness # Add tests directory to Python path for absolute imports sys.path.insert(0, str(Path(__file__).parent)) @pytest.fixture(scope="session") def reflex_web_app(): app_root = Path(__file__).parent.parent from pcweb.whitelist import WHITELISTED_PAGES WHITELISTED_PAGES.extend( [ "/docs/events", "/docs/vars", "/docs/getting-started", "/docs/library/graphing", "/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness @pytest.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on tes
from pathlib import Path
import sys from pathlib import Path
from reflex.testing import AppHarness # Add tests directory to Python path for absolute imports sys.path.insert(0, str(Path(__file__).parent)) @pytest.fixture(scope="session") def reflex_web_app(): app_root = Path(__file__).parent.parent from pcweb.whitelist import WHITELISTED_PAGES WHITELISTED_PAGES.extend( [ "/docs/events", "/docs/vars", "/docs/getting-started", "/docs/library/graphing", "/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness @pytest.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and c
import pytest
import sys from pathlib import Path import pytest
# Add tests directory to Python path for absolute imports sys.path.insert(0, str(Path(__file__).parent)) @pytest.fixture(scope="session") def reflex_web_app(): app_root = Path(__file__).parent.parent from pcweb.whitelist import WHITELISTED_PAGES WHITELISTED_PAGES.extend( [ "/docs/events", "/docs/vars", "/docs/getting-started", "/docs/library/graphing", "/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness @pytest.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests."""
from reflex.testing import AppHarness
import sys from pathlib import Path import pytest from reflex.testing import AppHarness # Add tests directory to Python path for absolute imports sys.path.insert(0, str(Path(__file__).parent)) @pytest.fixture(scope="session")
@pytest.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ):
def reflex_web_app(): app_root = Path(__file__).parent.parent from pcweb.whitelist import WHITELISTED_PAGES WHITELISTED_PAGES.extend( [ "/docs/events", "/docs/vars", "/docs/getting-started", "/docs/library/graphing", "/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness
import sys from pathlib import Path import pytest from reflex.testing import AppHarness # Add tests directory to Python path for absolute imports sys.path.insert(0, str(Path(__file__).parent)) @pytest.fixture(scope="session") def reflex_web_app():
from pcweb.whitelist import WHITELISTED_PAGES WHITELISTED_PAGES.extend( [ "/docs/events", "/docs/vars", "/docs/getting-started", "/docs/library/graphing", "/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness @pytest.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "
app_root = Path(__file__).parent.parent
import sys from pathlib import Path import pytest from reflex.testing import AppHarness # Add tests directory to Python path for absolute imports sys.path.insert(0, str(Path(__file__).parent)) @pytest.fixture(scope="session") def reflex_web_app(): app_root = Path(__file__).parent.parent
WHITELISTED_PAGES.extend( [ "/docs/events", "/docs/vars", "/docs/getting-started", "/docs/library/graphing", "/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness @pytest.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs: page = ite
from pcweb.whitelist import WHITELISTED_PAGES
import sys from pathlib import Path import pytest from reflex.testing import AppHarness # Add tests directory to Python path for absolute imports sys.path.insert(0, str(Path(__file__).parent)) @pytest.fixture(scope="session") def reflex_web_app(): app_root = Path(__file__).parent.parent from pcweb.whitelist import WHITELISTED_PAGES WHITELISTED_PAGES.extend( [ "/docs/events", "/docs/vars", "/docs/getting-started", "/docs/library/graphing", "/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness @pytest.fixture
@pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in
def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, }
# Add tests directory to Python path for absolute imports sys.path.insert(0, str(Path(__file__).parent)) @pytest.fixture(scope="session") def reflex_web_app(): app_root = Path(__file__).parent.parent from pcweb.whitelist import WHITELISTED_PAGES WHITELISTED_PAGES.extend( [ "/docs/events", "/docs/vars", "/docs/getting-started", "/docs/library/graphing", "/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness @pytest.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests."""
report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break excep
outcome = yield
y to Python path for absolute imports sys.path.insert(0, str(Path(__file__).parent)) @pytest.fixture(scope="session") def reflex_web_app(): app_root = Path(__file__).parent.parent from pcweb.whitelist import WHITELISTED_PAGES WHITELISTED_PAGES.extend( [ "/docs/events", "/docs/vars", "/docs/getting-started", "/docs/library/graphing", "/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness @pytest.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield
# Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception:
report = outcome.get_result()
def reflex_web_app(): app_root = Path(__file__).parent.parent from pcweb.whitelist import WHITELISTED_PAGES WHITELISTED_PAGES.extend( [ "/docs/events", "/docs/vars", "/docs/getting-started", "/docs/library/graphing", "/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness @pytest.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call":
if hasattr(item, "funcargs"): if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get vide
page = None
rent from pcweb.whitelist import WHITELISTED_PAGES WHITELISTED_PAGES.extend( [ "/docs/events", "/docs/vars", "/docs/getting-started", "/docs/library/graphing", "/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness @pytest.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"):
if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = (
if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break
STED_PAGES WHITELISTED_PAGES.extend( [ "/docs/events", "/docs/vars", "/docs/getting-started", "/docs/library/graphing", "/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness @pytest.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs:
else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) i
page = item.funcargs["page"]
docs/getting-started", "/docs/library/graphing", "/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness @pytest.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures
if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = (
for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break
"/docs/api-reference/special-events", ] ) with AppHarness.create(root=app_root) as harness: yield harness @pytest.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values():
if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = (
if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break
st.fixture def browser_context_args(): """Configure browser context with video recording.""" return { "record_video_dir": "test-videos/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ):
break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_
page = fixture_value.page
s/", "record_video_size": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try:
video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) m
import time
ze": {"width": 1280, "height": 720}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time
for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=
video_path = None
}, } @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None
if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try: content = f.read() metadata = json.loads(content) i
for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5)
est_runtest_makereport(item, call): """Create metadata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try:
if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcnt
video_path = page.video.path()
ata mapping for video files on test failure and clean up videos for passed tests.""" outcome = yield report = outcome.get_result() # Handle test completion (both pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path()
except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try: c
if video_path and Path(video_path).exists(): break
pass and fail) if report.when == "call": page = None if hasattr(item, "funcargs"): if "page" in item.funcargs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5)
video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try: content = f.read() metadata = json.loads(content) if content.strip() else {} except (json.JSONDecodeError, ValueError): metadata = {}
if not video_path: print(f"Failed to get video path for test: {item.name}") return
gs: page = item.funcargs["page"] else: # Look for page object in other fixtures for fixture_value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return
if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try: content = f.read() metadata = json.loads(content) if content.strip() else {} except (json.JSONDecodeError, ValueError): metadata = {} video_filename = video_file
video_file = Path(video_path)
value in item.funcargs.values(): if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata
import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try: content = f.read() metadata = json.loads(content) if content.strip() else {} except (json.JSONDecodeError, ValueError): metadata = {} video_filename = video_file.name metadata[video_filename] = test_name f.seek(0) f.truncate()
test_name = item.name
if hasattr(fixture_value, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name
import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try: content = f.read() metadata = json.loads(content) if content.strip() else {} except (json.JSONDecodeError, ValueError): metadata = {} video_filename = video_file.name metadata[video_filename] = test_name f.seek(0) f.truncate() json.dump(metad
import fcntl
e, "page") and hasattr( fixture_value.page, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl
import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try: content = f.read() metadata = json.loads(content) if content.strip() else {} except (json.JSONDecodeError, ValueError): metadata = {} video_filename = video_file.name metadata[video_filename] = test_name f.seek(0) f.truncate() json.dump(metadata, f, indent=2)
import json
age, "video" ): page = fixture_value.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os
metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try: content = f.read() metadata = json.loads(content) if content.strip() else {} except (json.JSONDecodeError, ValueError): metadata = {} video_filename = video_file.name metadata[video_filename] = test_name f.seek(0) f.truncate() json.dump(metadata, f, indent=2) else: # Test passed - remove video file if video_file.exists():
split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1")
alue.page break if page and hasattr(page, "video") and page.video: try: import time video_path = None for _ in range(3): try: video_path = page.video.path() if video_path and Path(video_path).exists(): break except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1")
metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try: content = f.read() metadata = json.loads(content) if content.strip() else {} except (json.JSONDecodeError, ValueError): metadata = {} video_filename = video_file.name metadata[video_filename] = test_name f.seek(0) f.truncate() json.dump(metadata, f, indent=2) else: # Test passed - remove video file if video_file.exists(): video_file.unlink() except Exception as e: print(f"Failed to process video for test {item.n
metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" )
k except Exception: time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try:
metadata = json.loads(content) if content.strip() else {} except (json.JSONDecodeError, ValueError): metadata = {} video_filename = video_file.name metadata[video_filename] = test_name f.seek(0) f.truncate() json.dump(metadata, f, indent=2) else: # Test passed - remove video file if video_file.exists(): video_file.unlink() except Exception as e: print(f"Failed to process video for test {item.name}: {e}") import traceback traceback.print_exc() else: if report.failed: print(f"No video available for failed test: {item.name}") video_dir = Path("test-videos") if video_dir.exists():
content = f.read()
time.sleep(0.5) if not video_path: print(f"Failed to get video path for test: {item.name}") return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try: content = f.read()
except (json.JSONDecodeError, ValueError): metadata = {} video_filename = video_file.name metadata[video_filename] = test_name f.seek(0) f.truncate() json.dump(metadata, f, indent=2) else: # Test passed - remove video file if video_file.exists(): video_file.unlink() except Exception as e: print(f"Failed to process video for test {item.name}: {e}") import traceback traceback.print_exc() else: if report.failed: print(f"No video available for failed test: {item.name}") video_dir = Path("test-videos") if video_dir.exists(): import time recent_videos = [
metadata = json.loads(content) if content.strip() else {}
return video_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try: content = f.read() metadata = json.loads(content) if content.strip() else {} except (json.JSONDecodeError, ValueError):
video_filename = video_file.name metadata[video_filename] = test_name f.seek(0) f.truncate() json.dump(metadata, f, indent=2) else: # Test passed - remove video file if video_file.exists(): video_file.unlink() except Exception as e: print(f"Failed to process video for test {item.name}: {e}") import traceback traceback.print_exc() else: if report.failed: print(f"No video available for failed test: {item.name}") video_dir = Path("test-videos") if video_dir.exists(): import time recent_videos = [ f for f in video_dir.glob("*.webm") if f.stat().st_mtim
metadata = {}
deo_file = Path(video_path) if report.failed: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try: content = f.read() metadata = json.loads(content) if content.strip() else {} except (json.JSONDecodeError, ValueError): metadata = {}
metadata[video_filename] = test_name f.seek(0) f.truncate() json.dump(metadata, f, indent=2) else: # Test passed - remove video file if video_file.exists(): video_file.unlink() except Exception as e: print(f"Failed to process video for test {item.name}: {e}") import traceback traceback.print_exc() else: if report.failed: print(f"No video available for failed test: {item.name}") video_dir = Path("test-videos") if video_dir.exists(): import time recent_videos = [ f for f in video_dir.glob("*.webm") if f.stat().st_mtime > (time.time() - 60) ]
video_filename = video_file.name
iled: # Test failed - keep video and create metadata test_name = item.name import fcntl import json import os split_index = os.environ.get("PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try: content = f.read() metadata = json.loads(content) if content.strip() else {} except (json.JSONDecodeError, ValueError): metadata = {} video_filename = video_file.name
f.seek(0) f.truncate() json.dump(metadata, f, indent=2) else: # Test passed - remove video file if video_file.exists(): video_file.unlink() except Exception as e: print(f"Failed to process video for test {item.name}: {e}") import traceback traceback.print_exc() else: if report.failed: print(f"No video available for failed test: {item.name}") video_dir = Path("test-videos") if video_dir.exists(): import time recent_videos = [ f for f in video_dir.glob("*.webm") if f.stat().st_mtime > (time.time() - 60) ] print( f"Recent video files fo
metadata[video_filename] = test_name
PYTEST_SPLIT_INDEX", "1") metadata_file = ( Path("test-videos") / f"video_metadata_{split_index}.json" ) metadata_file.parent.mkdir(exist_ok=True) with metadata_file.open("a+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) f.seek(0) try: content = f.read() metadata = json.loads(content) if content.strip() else {} except (json.JSONDecodeError, ValueError): metadata = {} video_filename = video_file.name metadata[video_filename] = test_name f.seek(0) f.truncate() json.dump(metadata, f, indent=2) else: # Test passed - remove video file
except Exception as e: print(f"Failed to process video for test {item.name}: {e}") import traceback traceback.print_exc() else: if report.failed: print(f"No video available for failed test: {item.name}") video_dir = Path("test-videos") if video_dir.exists(): import time recent_videos = [ f for f in video_dir.glob("*.webm") if f.stat().st_mtime > (time.time() - 60) ] print( f"Recent video files found: {[f.name for f in recent_videos]}" )
if video_file.exists(): video_file.unlink()