instruction
stringlengths
0
1k
input
stringlengths
1
1k
output
stringlengths
10
6.51k
lines[1]]) ).strip() # Create the code snippet. cb = code_block_dark if theme == "dark" else code_block return cb( code=code, language=language, ) def docdemo( code: str, state: str | None = None, comp: rx.Component | None = None, context: bool = False, demobox_props: dict[str, Any] | None = None, theme: str | None = None, **props, ) -> rx.Component: """Create a documentation demo with code and output. Args: code: The code to render the component. state: Code for any state needed for the component. comp: The pre-rendered component. context: Whether to wrap the render code in a function. demobox_props: Props to apply to the demo box. theme: The theme for the code snippet. props: Additional props to apply to the component. Returns: The styled demo. """ demobox_props = demobox_props or {} # Render the component if necessary.
# Wrap the render code in a function if needed. if context: code = f"""def index(): return {code} """ # Add the state code if state is not None: code = state + code if demobox_props.pop("toggle", False): return rx.tabs.root( rx.tabs.list( rx.tabs.trigger( rx.box( "UI", ), value="tab1", class_name="tab-style", ), rx.tabs.trigger( rx.box( "Code", ), value="tab2", class_name="tab-style", ), class_name="justify-end", ), rx.tabs.content( rx.box(docdemobox(comp, **(demobox_props or {})), class_name="my-4"), value="tab1", ), rx.tabs.content(
if comp is None: comp = eval(code)
ip() # Create the code snippet. cb = code_block_dark if theme == "dark" else code_block return cb( code=code, language=language, ) def docdemo( code: str, state: str | None = None, comp: rx.Component | None = None, context: bool = False, demobox_props: dict[str, Any] | None = None, theme: str | None = None, **props, ) -> rx.Component: """Create a documentation demo with code and output. Args: code: The code to render the component. state: Code for any state needed for the component. comp: The pre-rendered component. context: Whether to wrap the render code in a function. demobox_props: Props to apply to the demo box. theme: The theme for the code snippet. props: Additional props to apply to the component. Returns: The styled demo. """ demobox_props = demobox_props or {} # Render the component if necessary. if comp is None:
# Wrap the render code in a function if needed. if context: code = f"""def index(): return {code} """ # Add the state code if state is not None: code = state + code if demobox_props.pop("toggle", False): return rx.tabs.root( rx.tabs.list( rx.tabs.trigger( rx.box( "UI", ), value="tab1", class_name="tab-style", ), rx.tabs.trigger( rx.box( "Code", ), value="tab2", class_name="tab-style", ), class_name="justify-end", ), rx.tabs.content( rx.box(docdemobox(comp, **(demobox_props or {})), class_name="my-4"), value="tab1", ), rx.tabs.content(
comp = eval(code)
dark" else code_block return cb( code=code, language=language, ) def docdemo( code: str, state: str | None = None, comp: rx.Component | None = None, context: bool = False, demobox_props: dict[str, Any] | None = None, theme: str | None = None, **props, ) -> rx.Component: """Create a documentation demo with code and output. Args: code: The code to render the component. state: Code for any state needed for the component. comp: The pre-rendered component. context: Whether to wrap the render code in a function. demobox_props: Props to apply to the demo box. theme: The theme for the code snippet. props: Additional props to apply to the component. Returns: The styled demo. """ demobox_props = demobox_props or {} # Render the component if necessary. if comp is None: comp = eval(code) # Wrap the render code in a function if needed.
# Add the state code if state is not None: code = state + code if demobox_props.pop("toggle", False): return rx.tabs.root( rx.tabs.list( rx.tabs.trigger( rx.box( "UI", ), value="tab1", class_name="tab-style", ), rx.tabs.trigger( rx.box( "Code", ), value="tab2", class_name="tab-style", ), class_name="justify-end", ), rx.tabs.content( rx.box(docdemobox(comp, **(demobox_props or {})), class_name="my-4"), value="tab1", ), rx.tabs.content( rx.box(doccode(code, theme=theme), class_name="my-4"), value="tab2", ), default_value="tab1",
if context: code = f"""def index(): return {code} """
k return cb( code=code, language=language, ) def docdemo( code: str, state: str | None = None, comp: rx.Component | None = None, context: bool = False, demobox_props: dict[str, Any] | None = None, theme: str | None = None, **props, ) -> rx.Component: """Create a documentation demo with code and output. Args: code: The code to render the component. state: Code for any state needed for the component. comp: The pre-rendered component. context: Whether to wrap the render code in a function. demobox_props: Props to apply to the demo box. theme: The theme for the code snippet. props: Additional props to apply to the component. Returns: The styled demo. """ demobox_props = demobox_props or {} # Render the component if necessary. if comp is None: comp = eval(code) # Wrap the render code in a function if needed. if context:
# Add the state code if state is not None: code = state + code if demobox_props.pop("toggle", False): return rx.tabs.root( rx.tabs.list( rx.tabs.trigger( rx.box( "UI", ), value="tab1", class_name="tab-style", ), rx.tabs.trigger( rx.box( "Code", ), value="tab2", class_name="tab-style", ), class_name="justify-end", ), rx.tabs.content( rx.box(docdemobox(comp, **(demobox_props or {})), class_name="my-4"), value="tab1", ), rx.tabs.content( rx.box(doccode(code, theme=theme), class_name="my-4"), value="tab2", ), default_value="tab1",
code = f"""def index(): return {code} """
code: str, state: str | None = None, comp: rx.Component | None = None, context: bool = False, demobox_props: dict[str, Any] | None = None, theme: str | None = None, **props, ) -> rx.Component: """Create a documentation demo with code and output. Args: code: The code to render the component. state: Code for any state needed for the component. comp: The pre-rendered component. context: Whether to wrap the render code in a function. demobox_props: Props to apply to the demo box. theme: The theme for the code snippet. props: Additional props to apply to the component. Returns: The styled demo. """ demobox_props = demobox_props or {} # Render the component if necessary. if comp is None: comp = eval(code) # Wrap the render code in a function if needed. if context: code = f"""def index(): return {code} """ # Add the state code
if demobox_props.pop("toggle", False): return rx.tabs.root( rx.tabs.list( rx.tabs.trigger( rx.box( "UI", ), value="tab1", class_name="tab-style", ), rx.tabs.trigger( rx.box( "Code", ), value="tab2", class_name="tab-style", ), class_name="justify-end", ), rx.tabs.content( rx.box(docdemobox(comp, **(demobox_props or {})), class_name="my-4"), value="tab1", ), rx.tabs.content( rx.box(doccode(code, theme=theme), class_name="my-4"), value="tab2", ), default_value="tab1", ) # Create the demo. return rx.box( docdemobox(comp, **(
if state is not None: code = state + code
ne = None, comp: rx.Component | None = None, context: bool = False, demobox_props: dict[str, Any] | None = None, theme: str | None = None, **props, ) -> rx.Component: """Create a documentation demo with code and output. Args: code: The code to render the component. state: Code for any state needed for the component. comp: The pre-rendered component. context: Whether to wrap the render code in a function. demobox_props: Props to apply to the demo box. theme: The theme for the code snippet. props: Additional props to apply to the component. Returns: The styled demo. """ demobox_props = demobox_props or {} # Render the component if necessary. if comp is None: comp = eval(code) # Wrap the render code in a function if needed. if context: code = f"""def index(): return {code} """ # Add the state code if state is not None:
if demobox_props.pop("toggle", False): return rx.tabs.root( rx.tabs.list( rx.tabs.trigger( rx.box( "UI", ), value="tab1", class_name="tab-style", ), rx.tabs.trigger( rx.box( "Code", ), value="tab2", class_name="tab-style", ), class_name="justify-end", ), rx.tabs.content( rx.box(docdemobox(comp, **(demobox_props or {})), class_name="my-4"), value="tab1", ), rx.tabs.content( rx.box(doccode(code, theme=theme), class_name="my-4"), value="tab2", ), default_value="tab1", ) # Create the demo. return rx.box( docdemobox(comp, **(
code = state + code
"""Template for documentation pages."""
icon_margins = { "h1": "10px", "h2": "5px", "h3": "2px", "h4": "0px", } def h_comp_common( text: str, heading: str, style: dict | None = None, mt: str = "4", class_name: str = "", ) -> rx.Component: id_ = text.lower().split(" ").join("-") href = rx.State.router.page.full_path + "#" + id_ return rx.link( rx.heading( text, id=id_, as_=heading, style=style if style is not None else {}, class_name=class_name + " scroll-m-[5rem] mt-" + mt, ), rx.icon( tag="link", size=18, class_name="!text-violet-11 invisible transition-[visibility_0.075s_ease-out] group-hover:visible mt-" + mt, ), underline="none", href=href, on_click=lambda: rx.set_clipboard(href), # as_child=True, class_name="flex flex-row items-center gap-6 hover:!text-violet-11 text-slate-12 cursor-pointer
import reflex as rx
"""Template for documentation pages.""" import reflex as rx
def h_comp_common( text: str, heading: str, style: dict | None = None, mt: str = "4", class_name: str = "", ) -> rx.Component: id_ = text.lower().split(" ").join("-") href = rx.State.router.page.full_path + "#" + id_ return rx.link( rx.heading( text, id=id_, as_=heading, style=style if style is not None else {}, class_name=class_name + " scroll-m-[5rem] mt-" + mt, ), rx.icon( tag="link", size=18, class_name="!text-violet-11 invisible transition-[visibility_0.075s_ease-out] group-hover:visible mt-" + mt, ), underline="none", href=href, on_click=lambda: rx.set_clipboard(href), # as_child=True, class_name="flex flex-row items-center gap-6 hover:!text-violet-11 text-slate-12 cursor-pointer mb-2 transition-colors group", ) @rx.memo def h1_comp(text: str) -> rx.Component:
icon_margins = { "h1": "10px", "h2": "5px", "h3": "2px", "h4": "0px", }
"""Template for documentation pages.""" import reflex as rx icon_margins = { "h1": "10px", "h2": "5px", "h3": "2px", "h4": "0px", } def h_comp_common( text: str, heading: str, style: dict | None = None, mt: str = "4", class_name: str = "", ) -> rx.Component:
href = rx.State.router.page.full_path + "#" + id_ return rx.link( rx.heading( text, id=id_, as_=heading, style=style if style is not None else {}, class_name=class_name + " scroll-m-[5rem] mt-" + mt, ), rx.icon( tag="link", size=18, class_name="!text-violet-11 invisible transition-[visibility_0.075s_ease-out] group-hover:visible mt-" + mt, ), underline="none", href=href, on_click=lambda: rx.set_clipboard(href), # as_child=True, class_name="flex flex-row items-center gap-6 hover:!text-violet-11 text-slate-12 cursor-pointer mb-2 transition-colors group", ) @rx.memo def h1_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h1", class_name="font-x-large lg:font-xx-large", ) @rx.memo def h1_comp_xd(text: list[str]) -> rx.Component: retur
id_ = text.lower().split(" ").join("-")
"""Template for documentation pages.""" import reflex as rx icon_margins = { "h1": "10px", "h2": "5px", "h3": "2px", "h4": "0px", } def h_comp_common( text: str, heading: str, style: dict | None = None, mt: str = "4", class_name: str = "", ) -> rx.Component: id_ = text.lower().split(" ").join("-")
return rx.link( rx.heading( text, id=id_, as_=heading, style=style if style is not None else {}, class_name=class_name + " scroll-m-[5rem] mt-" + mt, ), rx.icon( tag="link", size=18, class_name="!text-violet-11 invisible transition-[visibility_0.075s_ease-out] group-hover:visible mt-" + mt, ), underline="none", href=href, on_click=lambda: rx.set_clipboard(href), # as_child=True, class_name="flex flex-row items-center gap-6 hover:!text-violet-11 text-slate-12 cursor-pointer mb-2 transition-colors group", ) @rx.memo def h1_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h1", class_name="font-x-large lg:font-xx-large", ) @rx.memo def h1_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading
href = rx.State.router.page.full_path + "#" + id_
5px", "h3": "2px", "h4": "0px", } def h_comp_common( text: str, heading: str, style: dict | None = None, mt: str = "4", class_name: str = "", ) -> rx.Component: id_ = text.lower().split(" ").join("-") href = rx.State.router.page.full_path + "#" + id_ return rx.link( rx.heading( text, id=id_, as_=heading, style=style if style is not None else {}, class_name=class_name + " scroll-m-[5rem] mt-" + mt, ), rx.icon( tag="link", size=18, class_name="!text-violet-11 invisible transition-[visibility_0.075s_ease-out] group-hover:visible mt-" + mt, ), underline="none", href=href, on_click=lambda: rx.set_clipboard(href), # as_child=True, class_name="flex flex-row items-center gap-6 hover:!text-violet-11 text-slate-12 cursor-pointer mb-2 transition-colors group", ) @rx.memo
@rx.memo def h1_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h1", class_name="font-x-large lg:font-xx-large", ) @rx.memo def h2_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h2", mt="8", class_name="font-large lg:font-x-large", ) @rx.memo def h2_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h2", mt="8", class_name="font-large lg:font-x-large", ) @rx.memo def h3_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h3", mt="4", class_name="font-large", ) @rx.memo def h3_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h3", mt="4", class_name="font-large", ) @rx.memo def h4_comp(text: str) -> rx.Component: return h
def h1_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h1", class_name="font-x-large lg:font-xx-large", )
rx.Component: id_ = text.lower().split(" ").join("-") href = rx.State.router.page.full_path + "#" + id_ return rx.link( rx.heading( text, id=id_, as_=heading, style=style if style is not None else {}, class_name=class_name + " scroll-m-[5rem] mt-" + mt, ), rx.icon( tag="link", size=18, class_name="!text-violet-11 invisible transition-[visibility_0.075s_ease-out] group-hover:visible mt-" + mt, ), underline="none", href=href, on_click=lambda: rx.set_clipboard(href), # as_child=True, class_name="flex flex-row items-center gap-6 hover:!text-violet-11 text-slate-12 cursor-pointer mb-2 transition-colors group", ) @rx.memo def h1_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h1", class_name="font-x-large lg:font-xx-large", ) @rx.memo
@rx.memo def h2_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h2", mt="8", class_name="font-large lg:font-x-large", ) @rx.memo def h2_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h2", mt="8", class_name="font-large lg:font-x-large", ) @rx.memo def h3_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h3", mt="4", class_name="font-large", ) @rx.memo def h3_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h3", mt="4", class_name="font-large", ) @rx.memo def h4_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h4", mt="2", class_name="font-md-smbold", ) @rx.memo def h4_comp_xd(text: list[str]) -> rx.Component: return h_c
def h1_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h1", class_name="font-x-large lg:font-xx-large", )
d_, as_=heading, style=style if style is not None else {}, class_name=class_name + " scroll-m-[5rem] mt-" + mt, ), rx.icon( tag="link", size=18, class_name="!text-violet-11 invisible transition-[visibility_0.075s_ease-out] group-hover:visible mt-" + mt, ), underline="none", href=href, on_click=lambda: rx.set_clipboard(href), # as_child=True, class_name="flex flex-row items-center gap-6 hover:!text-violet-11 text-slate-12 cursor-pointer mb-2 transition-colors group", ) @rx.memo def h1_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h1", class_name="font-x-large lg:font-xx-large", ) @rx.memo def h1_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h1", class_name="font-x-large lg:font-xx-large", ) @rx.memo
@rx.memo def h2_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h2", mt="8", class_name="font-large lg:font-x-large", ) @rx.memo def h3_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h3", mt="4", class_name="font-large", ) @rx.memo def h3_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h3", mt="4", class_name="font-large", ) @rx.memo def h4_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h4", mt="2", class_name="font-md-smbold", ) @rx.memo def h4_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h4", mt="2", class_name="font-md-smbold", )
def h2_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h2", mt="8", class_name="font-large lg:font-x-large", )
ag="link", size=18, class_name="!text-violet-11 invisible transition-[visibility_0.075s_ease-out] group-hover:visible mt-" + mt, ), underline="none", href=href, on_click=lambda: rx.set_clipboard(href), # as_child=True, class_name="flex flex-row items-center gap-6 hover:!text-violet-11 text-slate-12 cursor-pointer mb-2 transition-colors group", ) @rx.memo def h1_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h1", class_name="font-x-large lg:font-xx-large", ) @rx.memo def h1_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h1", class_name="font-x-large lg:font-xx-large", ) @rx.memo def h2_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h2", mt="8", class_name="font-large lg:font-x-large", ) @rx.memo
@rx.memo def h3_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h3", mt="4", class_name="font-large", ) @rx.memo def h3_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h3", mt="4", class_name="font-large", ) @rx.memo def h4_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h4", mt="2", class_name="font-md-smbold", ) @rx.memo def h4_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h4", mt="2", class_name="font-md-smbold", )
def h2_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h2", mt="8", class_name="font-large lg:font-x-large", )
href=href, on_click=lambda: rx.set_clipboard(href), # as_child=True, class_name="flex flex-row items-center gap-6 hover:!text-violet-11 text-slate-12 cursor-pointer mb-2 transition-colors group", ) @rx.memo def h1_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h1", class_name="font-x-large lg:font-xx-large", ) @rx.memo def h1_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h1", class_name="font-x-large lg:font-xx-large", ) @rx.memo def h2_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h2", mt="8", class_name="font-large lg:font-x-large", ) @rx.memo def h2_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h2", mt="8", class_name="font-large lg:font-x-large", ) @rx.memo
@rx.memo def h3_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h3", mt="4", class_name="font-large", ) @rx.memo def h4_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h4", mt="2", class_name="font-md-smbold", ) @rx.memo def h4_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h4", mt="2", class_name="font-md-smbold", )
def h3_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h3", mt="4", class_name="font-large", )
-slate-12 cursor-pointer mb-2 transition-colors group", ) @rx.memo def h1_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h1", class_name="font-x-large lg:font-xx-large", ) @rx.memo def h1_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h1", class_name="font-x-large lg:font-xx-large", ) @rx.memo def h2_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h2", mt="8", class_name="font-large lg:font-x-large", ) @rx.memo def h2_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h2", mt="8", class_name="font-large lg:font-x-large", ) @rx.memo def h3_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h3", mt="4", class_name="font-large", ) @rx.memo
@rx.memo def h4_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h4", mt="2", class_name="font-md-smbold", ) @rx.memo def h4_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h4", mt="2", class_name="font-md-smbold", )
def h3_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h3", mt="4", class_name="font-large", )
class_name="font-x-large lg:font-xx-large", ) @rx.memo def h1_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h1", class_name="font-x-large lg:font-xx-large", ) @rx.memo def h2_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h2", mt="8", class_name="font-large lg:font-x-large", ) @rx.memo def h2_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h2", mt="8", class_name="font-large lg:font-x-large", ) @rx.memo def h3_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h3", mt="4", class_name="font-large", ) @rx.memo def h3_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h3", mt="4", class_name="font-large", ) @rx.memo
@rx.memo def h4_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h4", mt="2", class_name="font-md-smbold", )
def h4_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h4", mt="2", class_name="font-md-smbold", )
"h1", class_name="font-x-large lg:font-xx-large", ) @rx.memo def h2_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h2", mt="8", class_name="font-large lg:font-x-large", ) @rx.memo def h2_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h2", mt="8", class_name="font-large lg:font-x-large", ) @rx.memo def h3_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h3", mt="4", class_name="font-large", ) @rx.memo def h3_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h3", mt="4", class_name="font-large", ) @rx.memo def h4_comp(text: str) -> rx.Component: return h_comp_common( text=text, heading="h4", mt="2", class_name="font-md-smbold", ) @rx.memo
def h4_comp_xd(text: list[str]) -> rx.Component: return h_comp_common( text=text[0], heading="h4", mt="2", class_name="font-md-smbold", )
"""Typography blocks for doc pages."""
from pcweb.styles import fonts def definition(title: str, *children) -> rx.Component: """Create a definition for a doc page. Args: title: The title of the definition. children: The children to display. Returns: The styled definition. """ return rx.vstack( rx.heading( title, font_size="1em", font_weight="bold", color=rx.color("mauve", 12) ), *children, color=rx.color("mauve", 10), padding="1em", border=f"1px solid {rx.color('mauve', 4)}", background_color=rx.color("mauve", 2), border_radius="8px", _hover={ "border": f"1px solid {rx.color('mauve', 5)}", "background_color": rx.color("mauve", 3), }, align_items="start", ) @rx.memo def text_comp(text: rx.Var[str]) -> rx.Component: return rx.text(text, class_name="font-normal text-slate-12 mb-4 leading-7") @rx.memo def text_comp_2(text: rx.Var[str]) -> rx.Com
import reflex as rx
"""Typography blocks for doc pages.""" import reflex as rx
def definition(title: str, *children) -> rx.Component: """Create a definition for a doc page. Args: title: The title of the definition. children: The children to display. Returns: The styled definition. """ return rx.vstack( rx.heading( title, font_size="1em", font_weight="bold", color=rx.color("mauve", 12) ), *children, color=rx.color("mauve", 10), padding="1em", border=f"1px solid {rx.color('mauve', 4)}", background_color=rx.color("mauve", 2), border_radius="8px", _hover={ "border": f"1px solid {rx.color('mauve', 5)}", "background_color": rx.color("mauve", 3), }, align_items="start", ) @rx.memo def text_comp(text: rx.Var[str]) -> rx.Component: return rx.text(text, class_name="font-normal text-slate-12 mb-4 leading-7") @rx.memo def text_comp_2(text: rx.Var[str]) -> rx.Component: return rx.text(text,
from pcweb.styles import fonts
"""Typography blocks for doc pages.""" import reflex as rx from pcweb.styles import fonts def definition(title: str, *children) -> rx.Component: """Create a definition for a doc page. Args: title: The title of the definition. children: The children to display. Returns: The styled definition. """ return rx.vstack( rx.heading( title, font_size="1em", font_weight="bold", color=rx.color("mauve", 12) ), *children, color=rx.color("mauve", 10), padding="1em", border=f"1px solid {rx.color('mauve', 4)}", background_color=rx.color("mauve", 2), border_radius="8px", _hover={ "border": f"1px solid {rx.color('mauve', 5)}", "background_color": rx.color("mauve", 3), }, align_items="start", ) @rx.memo
@rx.memo def text_comp_2(text: rx.Var[str]) -> rx.Component: return rx.text(text, class_name="font-normal text-slate-12 max-w-[80%] mb-10") @rx.memo def list_comp(text: rx.Var[str]) -> rx.Component: return rx.list_item(text, class_name="font-normal text-slate-12 mb-4") @rx.memo def unordered_list_comp(items: rx.Var[list[str]]) -> rx.Component: return rx.list.unordered(items, class_name="mb-6") @rx.memo def ordered_list_comp(items: rx.Var[list[str]]) -> rx.Component: return rx.list.ordered(items, class_name="mb-6") @rx.memo def code_comp(text: rx.Var[str]) -> rx.Component: return rx.code(text, class_name="code-style") def doclink(text: str, href: str, **props) -> rx.Component: """Create a styled link for doc pages. Args: text: The text to display. href: The link to go to. props: Props to apply to the link. Returns: The styled link. """ return rx.link( text, underline="always", href=href, **pro
def text_comp(text: rx.Var[str]) -> rx.Component: return rx.text(text, class_name="font-normal text-slate-12 mb-4 leading-7")
locks for doc pages.""" import reflex as rx from pcweb.styles import fonts def definition(title: str, *children) -> rx.Component: """Create a definition for a doc page. Args: title: The title of the definition. children: The children to display. Returns: The styled definition. """ return rx.vstack( rx.heading( title, font_size="1em", font_weight="bold", color=rx.color("mauve", 12) ), *children, color=rx.color("mauve", 10), padding="1em", border=f"1px solid {rx.color('mauve', 4)}", background_color=rx.color("mauve", 2), border_radius="8px", _hover={ "border": f"1px solid {rx.color('mauve', 5)}", "background_color": rx.color("mauve", 3), }, align_items="start", ) @rx.memo def text_comp(text: rx.Var[str]) -> rx.Component: return rx.text(text, class_name="font-normal text-slate-12 mb-4 leading-7") @rx.memo
@rx.memo def list_comp(text: rx.Var[str]) -> rx.Component: return rx.list_item(text, class_name="font-normal text-slate-12 mb-4") @rx.memo def unordered_list_comp(items: rx.Var[list[str]]) -> rx.Component: return rx.list.unordered(items, class_name="mb-6") @rx.memo def ordered_list_comp(items: rx.Var[list[str]]) -> rx.Component: return rx.list.ordered(items, class_name="mb-6") @rx.memo def code_comp(text: rx.Var[str]) -> rx.Component: return rx.code(text, class_name="code-style") def doclink(text: str, href: str, **props) -> rx.Component: """Create a styled link for doc pages. Args: text: The text to display. href: The link to go to. props: Props to apply to the link. Returns: The styled link. """ return rx.link( text, underline="always", href=href, **props, class_name="text-violet-10" ) def doclink2(text: str, **props) -> rx.Component: """Create a styled link for doc pages. Args:
def text_comp_2(text: rx.Var[str]) -> rx.Component: return rx.text(text, class_name="font-normal text-slate-12 max-w-[80%] mb-10")
e a definition for a doc page. Args: title: The title of the definition. children: The children to display. Returns: The styled definition. """ return rx.vstack( rx.heading( title, font_size="1em", font_weight="bold", color=rx.color("mauve", 12) ), *children, color=rx.color("mauve", 10), padding="1em", border=f"1px solid {rx.color('mauve', 4)}", background_color=rx.color("mauve", 2), border_radius="8px", _hover={ "border": f"1px solid {rx.color('mauve', 5)}", "background_color": rx.color("mauve", 3), }, align_items="start", ) @rx.memo def text_comp(text: rx.Var[str]) -> rx.Component: return rx.text(text, class_name="font-normal text-slate-12 mb-4 leading-7") @rx.memo def text_comp_2(text: rx.Var[str]) -> rx.Component: return rx.text(text, class_name="font-normal text-slate-12 max-w-[80%] mb-10") @rx.memo
@rx.memo def unordered_list_comp(items: rx.Var[list[str]]) -> rx.Component: return rx.list.unordered(items, class_name="mb-6") @rx.memo def ordered_list_comp(items: rx.Var[list[str]]) -> rx.Component: return rx.list.ordered(items, class_name="mb-6") @rx.memo def code_comp(text: rx.Var[str]) -> rx.Component: return rx.code(text, class_name="code-style") def doclink(text: str, href: str, **props) -> rx.Component: """Create a styled link for doc pages. Args: text: The text to display. href: The link to go to. props: Props to apply to the link. Returns: The styled link. """ return rx.link( text, underline="always", href=href, **props, class_name="text-violet-10" ) def doclink2(text: str, **props) -> rx.Component: """Create a styled link for doc pages. Args: text: The text to display. href: The link to go to. props: Props to apply to the link. Returns: The s
def list_comp(text: rx.Var[str]) -> rx.Component: return rx.list_item(text, class_name="font-normal text-slate-12 mb-4")
turns: The styled definition. """ return rx.vstack( rx.heading( title, font_size="1em", font_weight="bold", color=rx.color("mauve", 12) ), *children, color=rx.color("mauve", 10), padding="1em", border=f"1px solid {rx.color('mauve', 4)}", background_color=rx.color("mauve", 2), border_radius="8px", _hover={ "border": f"1px solid {rx.color('mauve', 5)}", "background_color": rx.color("mauve", 3), }, align_items="start", ) @rx.memo def text_comp(text: rx.Var[str]) -> rx.Component: return rx.text(text, class_name="font-normal text-slate-12 mb-4 leading-7") @rx.memo def text_comp_2(text: rx.Var[str]) -> rx.Component: return rx.text(text, class_name="font-normal text-slate-12 max-w-[80%] mb-10") @rx.memo def list_comp(text: rx.Var[str]) -> rx.Component: return rx.list_item(text, class_name="font-normal text-slate-12 mb-4") @rx.memo
@rx.memo def ordered_list_comp(items: rx.Var[list[str]]) -> rx.Component: return rx.list.ordered(items, class_name="mb-6") @rx.memo def code_comp(text: rx.Var[str]) -> rx.Component: return rx.code(text, class_name="code-style") def doclink(text: str, href: str, **props) -> rx.Component: """Create a styled link for doc pages. Args: text: The text to display. href: The link to go to. props: Props to apply to the link. Returns: The styled link. """ return rx.link( text, underline="always", href=href, **props, class_name="text-violet-10" ) def doclink2(text: str, **props) -> rx.Component: """Create a styled link for doc pages. Args: text: The text to display. href: The link to go to. props: Props to apply to the link. Returns: The styled link. """ return rx.link(text, underline="always", **props, style=fonts.base)
def unordered_list_comp(items: rx.Var[list[str]]) -> rx.Component: return rx.list.unordered(items, class_name="mb-6")
ht="bold", color=rx.color("mauve", 12) ), *children, color=rx.color("mauve", 10), padding="1em", border=f"1px solid {rx.color('mauve', 4)}", background_color=rx.color("mauve", 2), border_radius="8px", _hover={ "border": f"1px solid {rx.color('mauve', 5)}", "background_color": rx.color("mauve", 3), }, align_items="start", ) @rx.memo def text_comp(text: rx.Var[str]) -> rx.Component: return rx.text(text, class_name="font-normal text-slate-12 mb-4 leading-7") @rx.memo def text_comp_2(text: rx.Var[str]) -> rx.Component: return rx.text(text, class_name="font-normal text-slate-12 max-w-[80%] mb-10") @rx.memo def list_comp(text: rx.Var[str]) -> rx.Component: return rx.list_item(text, class_name="font-normal text-slate-12 mb-4") @rx.memo def unordered_list_comp(items: rx.Var[list[str]]) -> rx.Component: return rx.list.unordered(items, class_name="mb-6") @rx.memo
@rx.memo def code_comp(text: rx.Var[str]) -> rx.Component: return rx.code(text, class_name="code-style") def doclink(text: str, href: str, **props) -> rx.Component: """Create a styled link for doc pages. Args: text: The text to display. href: The link to go to. props: Props to apply to the link. Returns: The styled link. """ return rx.link( text, underline="always", href=href, **props, class_name="text-violet-10" ) def doclink2(text: str, **props) -> rx.Component: """Create a styled link for doc pages. Args: text: The text to display. href: The link to go to. props: Props to apply to the link. Returns: The styled link. """ return rx.link(text, underline="always", **props, style=fonts.base)
def ordered_list_comp(items: rx.Var[list[str]]) -> rx.Component: return rx.list.ordered(items, class_name="mb-6")
border=f"1px solid {rx.color('mauve', 4)}", background_color=rx.color("mauve", 2), border_radius="8px", _hover={ "border": f"1px solid {rx.color('mauve', 5)}", "background_color": rx.color("mauve", 3), }, align_items="start", ) @rx.memo def text_comp(text: rx.Var[str]) -> rx.Component: return rx.text(text, class_name="font-normal text-slate-12 mb-4 leading-7") @rx.memo def text_comp_2(text: rx.Var[str]) -> rx.Component: return rx.text(text, class_name="font-normal text-slate-12 max-w-[80%] mb-10") @rx.memo def list_comp(text: rx.Var[str]) -> rx.Component: return rx.list_item(text, class_name="font-normal text-slate-12 mb-4") @rx.memo def unordered_list_comp(items: rx.Var[list[str]]) -> rx.Component: return rx.list.unordered(items, class_name="mb-6") @rx.memo def ordered_list_comp(items: rx.Var[list[str]]) -> rx.Component: return rx.list.ordered(items, class_name="mb-6") @rx.memo
def doclink(text: str, href: str, **props) -> rx.Component: """Create a styled link for doc pages. Args: text: The text to display. href: The link to go to. props: Props to apply to the link. Returns: The styled link. """ return rx.link( text, underline="always", href=href, **props, class_name="text-violet-10" ) def doclink2(text: str, **props) -> rx.Component: """Create a styled link for doc pages. Args: text: The text to display. href: The link to go to. props: Props to apply to the link. Returns: The styled link. """ return rx.link(text, underline="always", **props, style=fonts.base)
def code_comp(text: rx.Var[str]) -> rx.Component: return rx.code(text, class_name="code-style")
import reflex as rx from pcweb.components.icons.icons import get_icon from pcweb.constants import ( DISCORD_URL, FORUM_URL, GITHUB_URL, LINKEDIN_URL, ROADMAP_URL, TWITTER_URL, ) from pcweb.pages.blog import blogs from pcweb.pages.docs import getting_started, hosting from pcweb.pages.docs.library import library from pcweb.pages.errors import errors from pcweb.pages.faq import faq from pcweb.pages.gallery import gallery from pcweb.pages.use_cases.use_cases import use_cases_page from pcweb.signup import IndexState def footer_link(text: str, href: str) -> rx.Component: return rx.link( text, href=href, class_name="font-small text-slate-9 hover:!text-slate-11 no-underline transition-color", ) def footer_link_flex(heading: str, links: list[rx.Component]) -> rx.Component: return rx.box( rx.el.h4( heading, class_name="font-smbold text-slate-12", ), *links, class_name="flex
from datetime import datetime
from datetime import datetime
from pcweb.components.icons.icons import get_icon from pcweb.constants import ( DISCORD_URL, FORUM_URL, GITHUB_URL, LINKEDIN_URL, ROADMAP_URL, TWITTER_URL, ) from pcweb.pages.blog import blogs from pcweb.pages.docs import getting_started, hosting from pcweb.pages.docs.library import library from pcweb.pages.errors import errors from pcweb.pages.faq import faq from pcweb.pages.gallery import gallery from pcweb.pages.use_cases.use_cases import use_cases_page from pcweb.signup import IndexState def footer_link(text: str, href: str) -> rx.Component: return rx.link( text, href=href, class_name="font-small text-slate-9 hover:!text-slate-11 no-underline transition-color", ) def footer_link_flex(heading: str, links: list[rx.Component]) -> rx.Component: return rx.box( rx.el.h4( heading, class_name="font-smbold text-slate-12", ), *links, class_name="flex flex-col gap-4",
import reflex as rx
from datetime import datetime import reflex as rx
from pcweb.constants import ( DISCORD_URL, FORUM_URL, GITHUB_URL, LINKEDIN_URL, ROADMAP_URL, TWITTER_URL, ) from pcweb.pages.blog import blogs from pcweb.pages.docs import getting_started, hosting from pcweb.pages.docs.library import library from pcweb.pages.errors import errors from pcweb.pages.faq import faq from pcweb.pages.gallery import gallery from pcweb.pages.use_cases.use_cases import use_cases_page from pcweb.signup import IndexState def footer_link(text: str, href: str) -> rx.Component: return rx.link( text, href=href, class_name="font-small text-slate-9 hover:!text-slate-11 no-underline transition-color", ) def footer_link_flex(heading: str, links: list[rx.Component]) -> rx.Component: return rx.box( rx.el.h4( heading, class_name="font-smbold text-slate-12", ), *links, class_name="flex flex-col gap-4", ) def social_menu_item(icon: str, url: str = "/",
from pcweb.components.icons.icons import get_icon
from datetime import datetime import reflex as rx from pcweb.components.icons.icons import get_icon from pcweb.constants import ( DISCORD_URL, FORUM_URL, GITHUB_URL, LINKEDIN_URL, ROADMAP_URL, TWITTER_URL, )
from pcweb.pages.docs import getting_started, hosting from pcweb.pages.docs.library import library from pcweb.pages.errors import errors from pcweb.pages.faq import faq from pcweb.pages.gallery import gallery from pcweb.pages.use_cases.use_cases import use_cases_page from pcweb.signup import IndexState def footer_link(text: str, href: str) -> rx.Component: return rx.link( text, href=href, class_name="font-small text-slate-9 hover:!text-slate-11 no-underline transition-color", ) def footer_link_flex(heading: str, links: list[rx.Component]) -> rx.Component: return rx.box( rx.el.h4( heading, class_name="font-smbold text-slate-12", ), *links, class_name="flex flex-col gap-4", ) def social_menu_item(icon: str, url: str = "/", class_name: str = "") -> rx.Component: return rx.link( get_icon(icon=icon), class_name="flex justify-center items-center gap-2 bg-slate-1 hover:bg
from pcweb.pages.blog import blogs
from datetime import datetime import reflex as rx from pcweb.components.icons.icons import get_icon from pcweb.constants import ( DISCORD_URL, FORUM_URL, GITHUB_URL, LINKEDIN_URL, ROADMAP_URL, TWITTER_URL, ) from pcweb.pages.blog import blogs
from pcweb.pages.docs.library import library from pcweb.pages.errors import errors from pcweb.pages.faq import faq from pcweb.pages.gallery import gallery from pcweb.pages.use_cases.use_cases import use_cases_page from pcweb.signup import IndexState def footer_link(text: str, href: str) -> rx.Component: return rx.link( text, href=href, class_name="font-small text-slate-9 hover:!text-slate-11 no-underline transition-color", ) def footer_link_flex(heading: str, links: list[rx.Component]) -> rx.Component: return rx.box( rx.el.h4( heading, class_name="font-smbold text-slate-12", ), *links, class_name="flex flex-col gap-4", ) def social_menu_item(icon: str, url: str = "/", class_name: str = "") -> rx.Component: return rx.link( get_icon(icon=icon), class_name="flex justify-center items-center gap-2 bg-slate-1 hover:bg-slate-3 p-[0.125rem_0.75rem] text-slate-9 hover:!text
from pcweb.pages.docs import getting_started, hosting
from datetime import datetime import reflex as rx from pcweb.components.icons.icons import get_icon from pcweb.constants import ( DISCORD_URL, FORUM_URL, GITHUB_URL, LINKEDIN_URL, ROADMAP_URL, TWITTER_URL, ) from pcweb.pages.blog import blogs from pcweb.pages.docs import getting_started, hosting
from pcweb.pages.errors import errors from pcweb.pages.faq import faq from pcweb.pages.gallery import gallery from pcweb.pages.use_cases.use_cases import use_cases_page from pcweb.signup import IndexState def footer_link(text: str, href: str) -> rx.Component: return rx.link( text, href=href, class_name="font-small text-slate-9 hover:!text-slate-11 no-underline transition-color", ) def footer_link_flex(heading: str, links: list[rx.Component]) -> rx.Component: return rx.box( rx.el.h4( heading, class_name="font-smbold text-slate-12", ), *links, class_name="flex flex-col gap-4", ) def social_menu_item(icon: str, url: str = "/", class_name: str = "") -> rx.Component: return rx.link( get_icon(icon=icon), class_name="flex justify-center items-center gap-2 bg-slate-1 hover:bg-slate-3 p-[0.125rem_0.75rem] text-slate-9 hover:!text-slate-9 transition-bg cursor-pointer overflo
from pcweb.pages.docs.library import library
from datetime import datetime import reflex as rx from pcweb.components.icons.icons import get_icon from pcweb.constants import ( DISCORD_URL, FORUM_URL, GITHUB_URL, LINKEDIN_URL, ROADMAP_URL, TWITTER_URL, ) from pcweb.pages.blog import blogs from pcweb.pages.docs import getting_started, hosting from pcweb.pages.docs.library import library
from pcweb.pages.faq import faq from pcweb.pages.gallery import gallery from pcweb.pages.use_cases.use_cases import use_cases_page from pcweb.signup import IndexState def footer_link(text: str, href: str) -> rx.Component: return rx.link( text, href=href, class_name="font-small text-slate-9 hover:!text-slate-11 no-underline transition-color", ) def footer_link_flex(heading: str, links: list[rx.Component]) -> rx.Component: return rx.box( rx.el.h4( heading, class_name="font-smbold text-slate-12", ), *links, class_name="flex flex-col gap-4", ) def social_menu_item(icon: str, url: str = "/", class_name: str = "") -> rx.Component: return rx.link( get_icon(icon=icon), class_name="flex justify-center items-center gap-2 bg-slate-1 hover:bg-slate-3 p-[0.125rem_0.75rem] text-slate-9 hover:!text-slate-9 transition-bg cursor-pointer overflow-hidden" + " " + clas
from pcweb.pages.errors import errors
from datetime import datetime import reflex as rx from pcweb.components.icons.icons import get_icon from pcweb.constants import ( DISCORD_URL, FORUM_URL, GITHUB_URL, LINKEDIN_URL, ROADMAP_URL, TWITTER_URL, ) from pcweb.pages.blog import blogs from pcweb.pages.docs import getting_started, hosting from pcweb.pages.docs.library import library from pcweb.pages.errors import errors
from pcweb.pages.gallery import gallery from pcweb.pages.use_cases.use_cases import use_cases_page from pcweb.signup import IndexState def footer_link(text: str, href: str) -> rx.Component: return rx.link( text, href=href, class_name="font-small text-slate-9 hover:!text-slate-11 no-underline transition-color", ) def footer_link_flex(heading: str, links: list[rx.Component]) -> rx.Component: return rx.box( rx.el.h4( heading, class_name="font-smbold text-slate-12", ), *links, class_name="flex flex-col gap-4", ) def social_menu_item(icon: str, url: str = "/", class_name: str = "") -> rx.Component: return rx.link( get_icon(icon=icon), class_name="flex justify-center items-center gap-2 bg-slate-1 hover:bg-slate-3 p-[0.125rem_0.75rem] text-slate-9 hover:!text-slate-9 transition-bg cursor-pointer overflow-hidden" + " " + class_name, href=url,
from pcweb.pages.faq import faq
from datetime import datetime import reflex as rx from pcweb.components.icons.icons import get_icon from pcweb.constants import ( DISCORD_URL, FORUM_URL, GITHUB_URL, LINKEDIN_URL, ROADMAP_URL, TWITTER_URL, ) from pcweb.pages.blog import blogs from pcweb.pages.docs import getting_started, hosting from pcweb.pages.docs.library import library from pcweb.pages.errors import errors from pcweb.pages.faq import faq
from pcweb.pages.use_cases.use_cases import use_cases_page from pcweb.signup import IndexState def footer_link(text: str, href: str) -> rx.Component: return rx.link( text, href=href, class_name="font-small text-slate-9 hover:!text-slate-11 no-underline transition-color", ) def footer_link_flex(heading: str, links: list[rx.Component]) -> rx.Component: return rx.box( rx.el.h4( heading, class_name="font-smbold text-slate-12", ), *links, class_name="flex flex-col gap-4", ) def social_menu_item(icon: str, url: str = "/", class_name: str = "") -> rx.Component: return rx.link( get_icon(icon=icon), class_name="flex justify-center items-center gap-2 bg-slate-1 hover:bg-slate-3 p-[0.125rem_0.75rem] text-slate-9 hover:!text-slate-9 transition-bg cursor-pointer overflow-hidden" + " " + class_name, href=url, is_external=True, ) def menu_soc
from pcweb.pages.gallery import gallery
from datetime import datetime import reflex as rx from pcweb.components.icons.icons import get_icon from pcweb.constants import ( DISCORD_URL, FORUM_URL, GITHUB_URL, LINKEDIN_URL, ROADMAP_URL, TWITTER_URL, ) from pcweb.pages.blog import blogs from pcweb.pages.docs import getting_started, hosting from pcweb.pages.docs.library import library from pcweb.pages.errors import errors from pcweb.pages.faq import faq from pcweb.pages.gallery import gallery
from pcweb.signup import IndexState def footer_link(text: str, href: str) -> rx.Component: return rx.link( text, href=href, class_name="font-small text-slate-9 hover:!text-slate-11 no-underline transition-color", ) def footer_link_flex(heading: str, links: list[rx.Component]) -> rx.Component: return rx.box( rx.el.h4( heading, class_name="font-smbold text-slate-12", ), *links, class_name="flex flex-col gap-4", ) def social_menu_item(icon: str, url: str = "/", class_name: str = "") -> rx.Component: return rx.link( get_icon(icon=icon), class_name="flex justify-center items-center gap-2 bg-slate-1 hover:bg-slate-3 p-[0.125rem_0.75rem] text-slate-9 hover:!text-slate-9 transition-bg cursor-pointer overflow-hidden" + " " + class_name, href=url, is_external=True, ) def menu_socials() -> rx.Component: return rx.box( rx.box(
from pcweb.pages.use_cases.use_cases import use_cases_page
from datetime import datetime import reflex as rx from pcweb.components.icons.icons import get_icon from pcweb.constants import ( DISCORD_URL, FORUM_URL, GITHUB_URL, LINKEDIN_URL, ROADMAP_URL, TWITTER_URL, ) from pcweb.pages.blog import blogs from pcweb.pages.docs import getting_started, hosting from pcweb.pages.docs.library import library from pcweb.pages.errors import errors from pcweb.pages.faq import faq from pcweb.pages.gallery import gallery from pcweb.pages.use_cases.use_cases import use_cases_page
def footer_link(text: str, href: str) -> rx.Component: return rx.link( text, href=href, class_name="font-small text-slate-9 hover:!text-slate-11 no-underline transition-color", ) def footer_link_flex(heading: str, links: list[rx.Component]) -> rx.Component: return rx.box( rx.el.h4( heading, class_name="font-smbold text-slate-12", ), *links, class_name="flex flex-col gap-4", ) def social_menu_item(icon: str, url: str = "/", class_name: str = "") -> rx.Component: return rx.link( get_icon(icon=icon), class_name="flex justify-center items-center gap-2 bg-slate-1 hover:bg-slate-3 p-[0.125rem_0.75rem] text-slate-9 hover:!text-slate-9 transition-bg cursor-pointer overflow-hidden" + " " + class_name, href=url, is_external=True, ) def menu_socials() -> rx.Component: return rx.box( rx.box( social_menu_item("github
from pcweb.signup import IndexState
from datetime import datetime import reflex as rx from pcweb.components.icons.icons import get_icon from pcweb.constants import ( DISCORD_URL, FORUM_URL, GITHUB_URL, LINKEDIN_URL, ROADMAP_URL, TWITTER_URL, ) from pcweb.pages.blog import blogs from pcweb.pages.docs import getting_started, hosting from pcweb.pages.docs.library import library from pcweb.pages.errors import errors from pcweb.pages.faq import faq from pcweb.pages.gallery import gallery from pcweb.pages.use_cases.use_cases import use_cases_page from pcweb.signup import IndexState
def footer_link_flex(heading: str, links: list[rx.Component]) -> rx.Component: return rx.box( rx.el.h4( heading, class_name="font-smbold text-slate-12", ), *links, class_name="flex flex-col gap-4", ) def social_menu_item(icon: str, url: str = "/", class_name: str = "") -> rx.Component: return rx.link( get_icon(icon=icon), class_name="flex justify-center items-center gap-2 bg-slate-1 hover:bg-slate-3 p-[0.125rem_0.75rem] text-slate-9 hover:!text-slate-9 transition-bg cursor-pointer overflow-hidden" + " " + class_name, href=url, is_external=True, ) def menu_socials() -> rx.Component: return rx.box( rx.box( social_menu_item("github", GITHUB_URL), social_menu_item( "twitter", TWITTER_URL, class_name="border-l border-slate-5 border-solid border-y-0 border-r-0", ),
def footer_link(text: str, href: str) -> rx.Component: return rx.link( text, href=href, class_name="font-small text-slate-9 hover:!text-slate-11 no-underline transition-color", )
from datetime import datetime import reflex as rx from pcweb.components.icons.icons import get_icon from pcweb.constants import ( DISCORD_URL, FORUM_URL, GITHUB_URL, LINKEDIN_URL, ROADMAP_URL, TWITTER_URL, ) from pcweb.pages.blog import blogs from pcweb.pages.docs import getting_started, hosting from pcweb.pages.docs.library import library from pcweb.pages.errors import errors from pcweb.pages.faq import faq from pcweb.pages.gallery import gallery from pcweb.pages.use_cases.use_cases import use_cases_page from pcweb.signup import IndexState def footer_link(text: str, href: str) -> rx.Component: return rx.link( text, href=href, class_name="font-small text-slate-9 hover:!text-slate-11 no-underline transition-color", )
def social_menu_item(icon: str, url: str = "/", class_name: str = "") -> rx.Component: return rx.link( get_icon(icon=icon), class_name="flex justify-center items-center gap-2 bg-slate-1 hover:bg-slate-3 p-[0.125rem_0.75rem] text-slate-9 hover:!text-slate-9 transition-bg cursor-pointer overflow-hidden" + " " + class_name, href=url, is_external=True, ) def menu_socials() -> rx.Component: return rx.box( rx.box( social_menu_item("github", GITHUB_URL), social_menu_item( "twitter", TWITTER_URL, class_name="border-l border-slate-5 border-solid border-y-0 border-r-0", ), social_menu_item( "forum", FORUM_URL, class_name="!border-l !border-r border-slate-5 border-solid border-y-0", ), social_menu_item("discord", DISCORD_URL), social_menu_item(
def footer_link_flex(heading: str, links: list[rx.Component]) -> rx.Component: return rx.box( rx.el.h4( heading, class_name="font-smbold text-slate-12", ), *links, class_name="flex flex-col gap-4", )
om pcweb.components.icons.icons import get_icon from pcweb.constants import ( DISCORD_URL, FORUM_URL, GITHUB_URL, LINKEDIN_URL, ROADMAP_URL, TWITTER_URL, ) from pcweb.pages.blog import blogs from pcweb.pages.docs import getting_started, hosting from pcweb.pages.docs.library import library from pcweb.pages.errors import errors from pcweb.pages.faq import faq from pcweb.pages.gallery import gallery from pcweb.pages.use_cases.use_cases import use_cases_page from pcweb.signup import IndexState def footer_link(text: str, href: str) -> rx.Component: return rx.link( text, href=href, class_name="font-small text-slate-9 hover:!text-slate-11 no-underline transition-color", ) def footer_link_flex(heading: str, links: list[rx.Component]) -> rx.Component: return rx.box( rx.el.h4( heading, class_name="font-smbold text-slate-12", ), *links, class_name="flex flex-col gap-4", )
def menu_socials() -> rx.Component: return rx.box( rx.box( social_menu_item("github", GITHUB_URL), social_menu_item( "twitter", TWITTER_URL, class_name="border-l border-slate-5 border-solid border-y-0 border-r-0", ), social_menu_item( "forum", FORUM_URL, class_name="!border-l !border-r border-slate-5 border-solid border-y-0", ), social_menu_item("discord", DISCORD_URL), social_menu_item( "linkedin", LINKEDIN_URL, class_name="!border-l !border-r border-slate-5 border-solid border-y-0", ), class_name="flex flex-row h-full align-center divide-x divide-slate-5 border-solid", ), class_name="border-slate-5 bg-slate-1 shadow-large border rounded-full h-6 overflow-hidden", ) def newsletter_form() -> rx.Compone
def social_menu_item(icon: str, url: str = "/", class_name: str = "") -> rx.Component: return rx.link( get_icon(icon=icon), class_name="flex justify-center items-center gap-2 bg-slate-1 hover:bg-slate-3 p-[0.125rem_0.75rem] text-slate-9 hover:!text-slate-9 transition-bg cursor-pointer overflow-hidden" + " " + class_name, href=url, is_external=True, )
input_email", type="email", class_name="box-border border-slate-5 focus:border-violet-9 focus:border-1 bg-white-1 p-[0.5rem_0.75rem] border rounded-[10px] font-small text-slate-11 placeholder:text-slate-9 outline-none focus:outline-none", ), rx.form.submit( rx.el.button( "Subscribe", class_name="flex justify-center items-center bg-slate-4 hover:bg-slate-5 p-[0.5rem_0.875rem] rounded-[10px] font-smbold text-slate-9 transition-bg cursor-pointer", ), as_child=True, ), class_name="flex flex-row gap-2 align-center", ), on_submit=IndexState.signup, ), ), class_name="flex flex-col gap-4 align-start", ), )
@rx.memo def footer() -> rx.Component: from pcweb.pages.framework.views.footer_index import dark_mode_toggle return rx.el.footer( rx.box( rx.box( rx.box( menu_socials(), ph_1(), class_name="flex flex-col gap-6", ), rx.text( f"© {datetime.now().year} Pynecone, Inc.", class_name="font-small text-slate-9", ), class_name="flex flex-col justify-between items-start gap-4 self-stretch", ), footer_link_flex( "Links", [ footer_link("Home", "/"), footer_link("Templates", gallery.path), footer_link("Blog", blogs.path), footer_link( "Changelog", "https://github.com/reflex-dev/reflex/releases" ), ]
def ph_1() -> rx.Component: return rx.fragment( rx.image( src="/logos/dark/ph_1.svg", class_name="hidden dark:block h-[40px] w-fit", alt="1st product of the day logo", ), rx.image( src="/logos/light/ph_1.svg", class_name="dark:hidden block h-[40px] w-fit", alt="1st product of the day logo", ), )
class_name="flex justify-center items-center bg-slate-4 hover:bg-slate-5 p-[0.5rem_0.875rem] rounded-[10px] font-smbold text-slate-9 transition-bg cursor-pointer", ), as_child=True, ), class_name="flex flex-row gap-2 align-center", ), on_submit=IndexState.signup, ), ), class_name="flex flex-col gap-4 align-start", ), ) def ph_1() -> rx.Component: return rx.fragment( rx.image( src="/logos/dark/ph_1.svg", class_name="hidden dark:block h-[40px] w-fit", alt="1st product of the day logo", ), rx.image( src="/logos/light/ph_1.svg", class_name="dark:hidden block h-[40px] w-fit", alt="1st product of the day logo", ), ) @rx.memo def footer() -> rx.Component:
return rx.el.footer( rx.box( rx.box( rx.box( menu_socials(), ph_1(), class_name="flex flex-col gap-6", ), rx.text( f"© {datetime.now().year} Pynecone, Inc.", class_name="font-small text-slate-9", ), class_name="flex flex-col justify-between items-start gap-4 self-stretch", ), footer_link_flex( "Links", [ footer_link("Home", "/"), footer_link("Templates", gallery.path), footer_link("Blog", blogs.path), footer_link( "Changelog", "https://github.com/reflex-dev/reflex/releases" ), ], ), footer_link_flex( "Documentation", [
from pcweb.pages.framework.views.footer_index import dark_mode_toggle
from pcweb.views.bottom_section.get_started import get_started from pcweb.views.bottom_section.newsletter import newsletter @rx.memo def bottom_section() -> rx.Component: return rx.box( newsletter(), get_started(), class_name="flex flex-col items-center gap-20 lg:gap-32 pt-8 lg:pt-[6.5rem] w-[22rem] lg:w-[25rem]", )
import reflex as rx
import reflex_ui as ui from pcweb.components.hint import hint from pcweb.components.icons.icons import get_icon from pcweb.pages.docs import getting_started def code_block() -> rx.Component: return rx.box( # Glow rx.html( """<svg xmlns="http://www.w3.org/2000/svg" width="598" height="247" viewBox="0 0 598 247" fill="none"> <path d="M598 123.5C598 191.707 464.133 247 299 247C133.867 247 0 191.707 0 123.5C0 55.2928 133.867 0 299 0C464.133 0 598 55.2928 598 123.5Z" fill="url(#paint0_radial_10744_8795)"/> <defs> <radialGradient id="paint0_radial_10744_8795" cx="0" cy="0" r="1" gradientUnits="userSpaceOnUse" gradientTransform="translate(299 123.5) rotate(90) scale(123.5 299)"> <stop stop-color="var(--violet-3)"/> <stop offset="1" stop-color="var(--c-slate-1)" stop-opacity="0"/> </radialGradient> </defs> </svg>""", class_name="w-[37.375rem] h-[15.4375rem] shrink-0 absolute top-1/2 left-1/2 transform -translate-x-1/2 -t
import reflex as rx
import reflex as rx
from pcweb.components.hint import hint from pcweb.components.icons.icons import get_icon from pcweb.pages.docs import getting_started def code_block() -> rx.Component: return rx.box( # Glow rx.html( """<svg xmlns="http://www.w3.org/2000/svg" width="598" height="247" viewBox="0 0 598 247" fill="none"> <path d="M598 123.5C598 191.707 464.133 247 299 247C133.867 247 0 191.707 0 123.5C0 55.2928 133.867 0 299 0C464.133 0 598 55.2928 598 123.5Z" fill="url(#paint0_radial_10744_8795)"/> <defs> <radialGradient id="paint0_radial_10744_8795" cx="0" cy="0" r="1" gradientUnits="userSpaceOnUse" gradientTransform="translate(299 123.5) rotate(90) scale(123.5 299)"> <stop stop-color="var(--violet-3)"/> <stop offset="1" stop-color="var(--c-slate-1)" stop-opacity="0"/> </radialGradient> </defs> </svg>""", class_name="w-[37.375rem] h-[15.4375rem] shrink-0 absolute top-1/2 left-1/2 transform -translate-x-1/2 -translate-y-1/2 z-[-1]",
import reflex_ui as ui
import reflex as rx import reflex_ui as ui
from pcweb.components.icons.icons import get_icon from pcweb.pages.docs import getting_started def code_block() -> rx.Component: return rx.box( # Glow rx.html( """<svg xmlns="http://www.w3.org/2000/svg" width="598" height="247" viewBox="0 0 598 247" fill="none"> <path d="M598 123.5C598 191.707 464.133 247 299 247C133.867 247 0 191.707 0 123.5C0 55.2928 133.867 0 299 0C464.133 0 598 55.2928 598 123.5Z" fill="url(#paint0_radial_10744_8795)"/> <defs> <radialGradient id="paint0_radial_10744_8795" cx="0" cy="0" r="1" gradientUnits="userSpaceOnUse" gradientTransform="translate(299 123.5) rotate(90) scale(123.5 299)"> <stop stop-color="var(--violet-3)"/> <stop offset="1" stop-color="var(--c-slate-1)" stop-opacity="0"/> </radialGradient> </defs> </svg>""", class_name="w-[37.375rem] h-[15.4375rem] shrink-0 absolute top-1/2 left-1/2 transform -translate-x-1/2 -translate-y-1/2 z-[-1]", ), # Copy button
from pcweb.components.hint import hint
import reflex as rx import reflex_ui as ui from pcweb.components.hint import hint
from pcweb.pages.docs import getting_started def code_block() -> rx.Component: return rx.box( # Glow rx.html( """<svg xmlns="http://www.w3.org/2000/svg" width="598" height="247" viewBox="0 0 598 247" fill="none"> <path d="M598 123.5C598 191.707 464.133 247 299 247C133.867 247 0 191.707 0 123.5C0 55.2928 133.867 0 299 0C464.133 0 598 55.2928 598 123.5Z" fill="url(#paint0_radial_10744_8795)"/> <defs> <radialGradient id="paint0_radial_10744_8795" cx="0" cy="0" r="1" gradientUnits="userSpaceOnUse" gradientTransform="translate(299 123.5) rotate(90) scale(123.5 299)"> <stop stop-color="var(--violet-3)"/> <stop offset="1" stop-color="var(--c-slate-1)" stop-opacity="0"/> </radialGradient> </defs> </svg>""", class_name="w-[37.375rem] h-[15.4375rem] shrink-0 absolute top-1/2 left-1/2 transform -translate-x-1/2 -translate-y-1/2 z-[-1]", ), # Copy button rx.box( rx.box( rx.e
from pcweb.components.icons.icons import get_icon
import reflex as rx import reflex_ui as ui from pcweb.components.hint import hint from pcweb.components.icons.icons import get_icon
def code_block() -> rx.Component: return rx.box( # Glow rx.html( """<svg xmlns="http://www.w3.org/2000/svg" width="598" height="247" viewBox="0 0 598 247" fill="none"> <path d="M598 123.5C598 191.707 464.133 247 299 247C133.867 247 0 191.707 0 123.5C0 55.2928 133.867 0 299 0C464.133 0 598 55.2928 598 123.5Z" fill="url(#paint0_radial_10744_8795)"/> <defs> <radialGradient id="paint0_radial_10744_8795" cx="0" cy="0" r="1" gradientUnits="userSpaceOnUse" gradientTransform="translate(299 123.5) rotate(90) scale(123.5 299)"> <stop stop-color="var(--violet-3)"/> <stop offset="1" stop-color="var(--c-slate-1)" stop-opacity="0"/> </radialGradient> </defs> </svg>""", class_name="w-[37.375rem] h-[15.4375rem] shrink-0 absolute top-1/2 left-1/2 transform -translate-x-1/2 -translate-y-1/2 z-[-1]", ), # Copy button rx.box( rx.box( rx.el.p("$ pip install reflex"),
from pcweb.pages.docs import getting_started
from pcweb.components.button import button from pcweb.signup import IndexState def newletter_input() -> rx.Component: return rx.box( rx.cond( IndexState.signed_up, rx.box( rx.box( rx.icon( tag="circle-check", size=16, class_name="!text-violet-9", ), rx.text( "Thanks for subscribing!", class_name="font-base text-slate-11", ), class_name="flex flex-row items-center gap-2", ), button( "Sign up for another email", variant="muted", on_click=IndexState.signup_for_another_user, ), class_name="flex flex-col flex-wrap gap-2", ), rx.form( # Glow rx
import reflex as rx
import reflex as rx
from pcweb.signup import IndexState def newletter_input() -> rx.Component: return rx.box( rx.cond( IndexState.signed_up, rx.box( rx.box( rx.icon( tag="circle-check", size=16, class_name="!text-violet-9", ), rx.text( "Thanks for subscribing!", class_name="font-base text-slate-11", ), class_name="flex flex-row items-center gap-2", ), button( "Sign up for another email", variant="muted", on_click=IndexState.signup_for_another_user, ), class_name="flex flex-col flex-wrap gap-2", ), rx.form( # Glow rx.box( # Glow
from pcweb.components.button import button
import reflex as rx from pcweb.components.button import button
def newletter_input() -> rx.Component: return rx.box( rx.cond( IndexState.signed_up, rx.box( rx.box( rx.icon( tag="circle-check", size=16, class_name="!text-violet-9", ), rx.text( "Thanks for subscribing!", class_name="font-base text-slate-11", ), class_name="flex flex-row items-center gap-2", ), button( "Sign up for another email", variant="muted", on_click=IndexState.signup_for_another_user, ), class_name="flex flex-col flex-wrap gap-2", ), rx.form( # Glow rx.box( # Glow rx.html(
from pcweb.signup import IndexState
1.4375rem] h-[5.5625rem] -translate-y-1/2 right-[-2.5rem] top-1/2 z-[-1]", ), button( "Subscribe", type="submit", variant="muted", ), class_name="relative", ), class_name="flex flex-row gap-2 align-center", on_submit=IndexState.signup, ), ), class_name="w-full", ) def newsletter_card() -> rx.Component: return rx.box( rx.box( rx.el.h2("Newsletter", class_name="font-large text-slate-12"), rx.el.p( """Get the latest updates and news about Reflex""", class_name="font-base text-slate-10 whitespace-pre", ), class_name="flex flex-col items-center text-center gap-2", ), newletter_input(), class_name="flex flex-col gap-4 w-full items-center", )
def newsletter() -> rx.Component: return rx.el.section( newsletter_card(), class_name="flex items-center justify-center w-full", )
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """
import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url):
import argparse
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse
import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): ""
import sys
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time
from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parse
import xml.etree.ElementTree as ET
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET
from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) norm
from collections import deque
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque
import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc
from urllib.parse import urljoin, urlparse
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse
from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}"
import requests
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests
class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query:
from bs4 import BeautifulSoup
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker:
def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try: response = self.session.head( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 405: response = sel
def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} )
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5):
self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link
self.base_url = base_url.rstrip("/")
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/")
self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_l
self.domain = urlparse(base_url).netloc
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc
self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True
self.max_pages = max_pages
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages
self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url)
self.timeout = timeout
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout
self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse
self.delay = delay
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay
self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["
self.visited_pages = set()
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set()
self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstat
self.checked_links = set()
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set()
self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return
self.dead_links = []
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = []
self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try: response = self.s
self.pages_to_visit = deque([base_url])
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url])
self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try: response = self.session.head( url, timeout=s
self.session = requests.Session()
#!/usr/bin/env python3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} )
def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try: response = self.session.head( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 405: response = self.session.get( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 403 and parsed.netloc in [ "twit
def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == ""
hon3 """Dead link checker for the Reflex website. Crawls the deployed site and checks for broken links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain."""
return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try: response = self.session.head( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 405: response = self.session.get( url, timeout=self.timeout, allow_redirects=True ) if respo
parsed = urlparse(url)
n links. """ import argparse import sys import time import xml.etree.ElementTree as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == ""
def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try: response = self.session.head( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 405: response = self.session.get( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 403 and parsed.netloc in [ "twitter.com", "www.twitter.com", "x.com", "www.x.com", ]: print(f"Warning: Twitter/X link may be blocked by bot detection: {url}") return True if response.status_code >= 400:
def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized
as ET from collections import deque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison."""
normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try: response = self.session.head( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 405: response = self.session.get( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 403 and parsed.netloc in [ "twitter.com", "www.twitter.com", "x.com", "www.x.com",
parsed = urlparse(url)
eque from urllib.parse import urljoin, urlparse import requests from bs4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url)
if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try: response = self.session.head( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 405: response = self.session.get( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 403 and parsed.netloc in [ "twitter.com", "www.twitter.com", "x.com", "www.x.com", ]: print(f"Warning: Twitter/X link may be blocked by b
normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}"
s4 import BeautifulSoup class DeadLinkChecker: def __init__(self, base_url, max_pages=None, timeout=10, delay=0.5): self.base_url = base_url.rstrip("/") self.domain = urlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}"
return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try: response = self.session.head( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 405: response = self.session.get( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 403 and parsed.netloc in [ "twitter.com", "www.twitter.com", "x.com", "www.x.com", ]: print(f"Warning: Twitter/X link may be blocked by bot detection: {url}") return True if resp
if parsed.query: normalized += f"?{parsed.query}"
rlparse(base_url).netloc self.max_pages = max_pages self.timeout = timeout self.delay = delay self.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working."""
self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try: response = self.session.head( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 405: response = self.session.get( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 403 and parsed.netloc in [ "twitter.com", "www.twitter.com", "x.com", "www.x.com", ]: print(f"Warning: Twitter/X link may be blocked by bot detection: {url}") return True if response.status_code >= 400: self.dead_links.append( { "url": url, "status_code": response.status_code
if url in self.checked_links: return True
.visited_pages = set() self.checked_links = set() self.dead_links = [] self.pages_to_visit = deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url)
try: response = self.session.head( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 405: response = self.session.get( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 403 and parsed.netloc in [ "twitter.com", "www.twitter.com", "x.com", "www.x.com", ]: print(f"Warning: Twitter/X link may be blocked by bot detection: {url}") return True if response.status_code >= 400: self.dead_links.append( { "url": url, "status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } )
if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True
deque([base_url]) self.session = requests.Session() self.session.headers.update( {"User-Agent": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try:
if response.status_code == 405: response = self.session.get( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 403 and parsed.netloc in [ "twitter.com", "www.twitter.com", "x.com", "www.x.com", ]: print(f"Warning: Twitter/X link may be blocked by bot detection: {url}") return True if response.status_code >= 400: self.dead_links.append( { "url": url, "status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( {
response = self.session.head( url, timeout=self.timeout, allow_redirects=True )
t": "Mozilla/5.0 (compatible; DeadLinkChecker/1.0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try: response = self.session.head( url, timeout=self.timeout, allow_redirects=True )
if response.status_code == 403 and parsed.netloc in [ "twitter.com", "www.twitter.com", "x.com", "www.x.com", ]: print(f"Warning: Twitter/X link may be blocked by bot detection: {url}") return True if response.status_code >= 400: self.dead_links.append( { "url": url, "status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), }
if response.status_code == 405: response = self.session.get( url, timeout=self.timeout, allow_redirects=True )
0)"} ) def is_internal_url(self, url): """Check if URL is internal to our domain.""" parsed = urlparse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try: response = self.session.head( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 405:
if response.status_code == 403 and parsed.netloc in [ "twitter.com", "www.twitter.com", "x.com", "www.x.com", ]: print(f"Warning: Twitter/X link may be blocked by bot detection: {url}") return True if response.status_code >= 400: self.dead_links.append( { "url": url, "status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), }
response = self.session.get( url, timeout=self.timeout, allow_redirects=True )
rse(url) return parsed.netloc == self.domain or parsed.netloc == "" def normalize_url(self, url): """Normalize URL for comparison.""" parsed = urlparse(url) normalized = f"{parsed.scheme}://{parsed.netloc}{parsed.path}" if parsed.query: normalized += f"?{parsed.query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try: response = self.session.head( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 405: response = self.session.get( url, timeout=self.timeout, allow_redirects=True )
if response.status_code >= 400: self.dead_links.append( { "url": url, "status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser") links = [] for tag in soup.find_all(["a", "link", "img", "script"]): url = None
if response.status_code == 403 and parsed.netloc in [ "twitter.com", "www.twitter.com", "x.com", "www.x.com", ]: print(f"Warning: Twitter/X link may be blocked by bot detection: {url}") return True
query}" return normalized def check_link(self, url, source_page): """Check if a single link is working.""" if url in self.checked_links: return True self.checked_links.add(url) parsed = urlparse(url) if parsed.netloc in ["fonts.googleapis.com", "fonts.gstatic.com"]: return True try: response = self.session.head( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 405: response = self.session.get( url, timeout=self.timeout, allow_redirects=True ) if response.status_code == 403 and parsed.netloc in [ "twitter.com", "www.twitter.com", "x.com", "www.x.com", ]: print(f"Warning: Twitter/X link may be blocked by bot detection: {url}") return True
except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser") links = [] for tag in soup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src") if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url)
if response.status_code >= 400: self.dead_links.append( { "url": url, "status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False
n [ "twitter.com", "www.twitter.com", "x.com", "www.x.com", ]: print(f"Warning: Twitter/X link may be blocked by bot detection: {url}") return True if response.status_code >= 400: self.dead_links.append( { "url": url, "status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True
def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get("content-type", "").lower() if "text/html" not in content_type: return [] links = self.extract_links(response.text, url) for link in links: self.check_link(link, url) if self.is_internal_url(link): normalized = self.normalize_url(link) if normalized not in self.visited_pages: self.pages_to_visit.append(normalized) time.sleep(self.delay) return lin
def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser") links = [] for tag in soup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src") if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url) return links
"www.x.com", ]: print(f"Warning: Twitter/X link may be blocked by bot detection: {url}") return True if response.status_code >= 400: self.dead_links.append( { "url": url, "status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True def extract_links(self, html, page_url): """Extract all links from HTML content."""
links = [] for tag in soup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src") if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url) return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get
soup = BeautifulSoup(html, "html.parser")
print(f"Warning: Twitter/X link may be blocked by bot detection: {url}") return True if response.status_code >= 400: self.dead_links.append( { "url": url, "status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser")
for tag in soup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src") if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url) return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get("content-type", ""
links = []
: Twitter/X link may be blocked by bot detection: {url}") return True if response.status_code >= 400: self.dead_links.append( { "url": url, "status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser") links = []
return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get("content-type", "").lower() if "text/html" not in content_type: return [] links = self.extract_links(response.text, url) for link in links: self.check_link(link, url) if self.is_internal_url(link): normalized = self.normalize_url(link) if normalized not in self.visited_pages: self.pages_to_visit.append(normalized) time.sleep(self.delay)
for tag in soup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src") if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url)
return True if response.status_code >= 400: self.dead_links.append( { "url": url, "status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser") links = [] for tag in soup.find_all(["a", "link", "img", "script"]):
if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src") if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url) return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get("content-type", "").lower() if "text/html" not in content_type: return []
url = None
if response.status_code >= 400: self.dead_links.append( { "url": url, "status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser") links = [] for tag in soup.find_all(["a", "link", "img", "script"]): url = None
if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url) return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get("content-type", "").lower() if "text/html" not in content_type: return [] links = self.extract_links(response.text, url) for link in links: self.check_link(link, url) if self.is_internal_url(link):
if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src")
.dead_links.append( { "url": url, "status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser") links = [] for tag in soup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link":
elif tag.name == "img" or tag.name == "script": url = tag.get("src") if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url) return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get("content-type", "").lower() if "text/html" not in content_type: return [] links = self.extract_links(response.text, url) for link in links:
url = tag.get("href")
{ "url": url, "status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser") links = [] for tag in soup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link": url = tag.get("href")
if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url) return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get("content-type", "").lower() if "text/html" not in content_type: return [] links = self.extract_links(response.text, url) for link in links: self.check_link(link, url) if self.is_internal_url(link):
elif tag.name == "img" or tag.name == "script": url = tag.get("src")
"status_code": response.status_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser") links = [] for tag in soup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script":
if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url) return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get("content-type", "").lower() if "text/html" not in content_type: return [] links = self.extract_links(response.text, url) for link in links: self.check_link(link, url) if self.is_internal_url(link):
url = tag.get("src")
_code, "source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser") links = [] for tag in soup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src")
return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get("content-type", "").lower() if "text/html" not in content_type: return [] links = self.extract_links(response.text, url) for link in links: self.check_link(link, url) if self.is_internal_url(link): normalized = self.normalize_url(link) if normalized not in self.visited_pages: self.pages_to_visit.append(normalized) time.sleep(self.delay)
if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url)
"source_page": source_page, "error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser") links = [] for tag in soup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src") if url:
if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url) return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get("content-type", "").lower() if "text/html" not in content_type: return [] links = self.extract_links(response.text, url) for link in links: self.check_link(link, url) if self.is_internal_url(link): normalized = self.normalize_url(link) if normal
absolute_url = urljoin(page_url, url)
"error": f"HTTP {response.status_code}", } ) return False except requests.exceptions.RequestException as e: self.dead_links.append( { "url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser") links = [] for tag in soup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src") if url: absolute_url = urljoin(page_url, url)
return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get("content-type", "").lower() if "text/html" not in content_type: return [] links = self.extract_links(response.text, url) for link in links: self.check_link(link, url) if self.is_internal_url(link): normalized = self.normalize_url(link) if normalized not in self.visited_pages: self.pages_to_visit.append(normalized) time.sleep(self.delay)
if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url)
"url": url, "status_code": None, "source_page": source_page, "error": str(e), } ) return False return True def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser") links = [] for tag in soup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src") if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url) return links def crawl_page(self, url): """Crawl a single page and extract links."""
self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get("content-type", "").lower() if "text/html" not in content_type: return [] links = self.extract_links(response.text, url) for link in links: self.check_link(link, url) if self.is_internal_url(link): normalized = self.normalize_url(link) if normalized not in self.visited_pages: self.pages_to_visit.append(normalized) time.sleep(self.delay) return links except requests.exceptions.RequestException as e: print(f"Error crawling {url}: {e}") return [] def get_sitemap_urls(self): """Try to get URLs from sitemap.xml.""" sitemap_url
if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return []
def extract_links(self, html, page_url): """Extract all links from HTML content.""" soup = BeautifulSoup(html, "html.parser") links = [] for tag in soup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src") if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url) return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try:
response.raise_for_status() content_type = response.headers.get("content-type", "").lower() if "text/html" not in content_type: return [] links = self.extract_links(response.text, url) for link in links: self.check_link(link, url) if self.is_internal_url(link): normalized = self.normalize_url(link) if normalized not in self.visited_pages: self.pages_to_visit.append(normalized) time.sleep(self.delay) return links except requests.exceptions.RequestException as e: print(f"Error crawling {url}: {e}") return [] def get_sitemap_urls(self): """Try to get URLs from sitemap.xml.""" sitemap_url = f"{self.base_url}/sitemap.xml" print(f"Checking for sitemap at: {sitemap_url}") try: response = self.session.get(sitemap_
response = self.session.get(url, timeout=self.timeout)
BeautifulSoup(html, "html.parser") links = [] for tag in soup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src") if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url) return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status()
if "text/html" not in content_type: return [] links = self.extract_links(response.text, url) for link in links: self.check_link(link, url) if self.is_internal_url(link): normalized = self.normalize_url(link) if normalized not in self.visited_pages: self.pages_to_visit.append(normalized) time.sleep(self.delay) return links except requests.exceptions.RequestException as e: print(f"Error crawling {url}: {e}") return [] def get_sitemap_urls(self): """Try to get URLs from sitemap.xml.""" sitemap_url = f"{self.base_url}/sitemap.xml" print(f"Checking for sitemap at: {sitemap_url}") try: response = self.session.get(sitemap_url, timeout=self.timeout) if response.status_code == 200: print("✅ Found sitemap.xml, pa
content_type = response.headers.get("content-type", "").lower()
oup.find_all(["a", "link", "img", "script"]): url = None if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src") if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url) return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get("content-type", "").lower()
links = self.extract_links(response.text, url) for link in links: self.check_link(link, url) if self.is_internal_url(link): normalized = self.normalize_url(link) if normalized not in self.visited_pages: self.pages_to_visit.append(normalized) time.sleep(self.delay) return links except requests.exceptions.RequestException as e: print(f"Error crawling {url}: {e}") return [] def get_sitemap_urls(self): """Try to get URLs from sitemap.xml.""" sitemap_url = f"{self.base_url}/sitemap.xml" print(f"Checking for sitemap at: {sitemap_url}") try: response = self.session.get(sitemap_url, timeout=self.timeout) if response.status_code == 200: print("✅ Found sitemap.xml, parsing URLs...") root = ET.fromstring(response.content)
if "text/html" not in content_type: return []
if tag.name == "a" or tag.name == "link": url = tag.get("href") elif tag.name == "img" or tag.name == "script": url = tag.get("src") if url: absolute_url = urljoin(page_url, url) if not absolute_url.startswith(("javascript:", "mailto:", "tel:")): links.append(absolute_url) return links def crawl_page(self, url): """Crawl a single page and extract links.""" if url in self.visited_pages or ( self.max_pages and len(self.visited_pages) >= self.max_pages ): return [] self.visited_pages.add(url) print(f"Crawling: {url}") try: response = self.session.get(url, timeout=self.timeout) response.raise_for_status() content_type = response.headers.get("content-type", "").lower() if "text/html" not in content_type: return []
for link in links: self.check_link(link, url) if self.is_internal_url(link): normalized = self.normalize_url(link) if normalized not in self.visited_pages: self.pages_to_visit.append(normalized) time.sleep(self.delay) return links except requests.exceptions.RequestException as e: print(f"Error crawling {url}: {e}") return [] def get_sitemap_urls(self): """Try to get URLs from sitemap.xml.""" sitemap_url = f"{self.base_url}/sitemap.xml" print(f"Checking for sitemap at: {sitemap_url}") try: response = self.session.get(sitemap_url, timeout=self.timeout) if response.status_code == 200: print("✅ Found sitemap.xml, parsing URLs...") root = ET.fromstring(response.content) urls = [] for url_elem in root
links = self.extract_links(response.text, url)