Spaces:
Running
Running
require login
Browse files
app.py
CHANGED
|
@@ -66,10 +66,10 @@ GRADIO_DOCS_LAST_UPDATE_FILE = ".gradio_docs_last_update.txt"
|
|
| 66 |
GRADIO_DOCS_UPDATE_ON_APP_UPDATE = True # Only update when app is updated, not on a timer
|
| 67 |
|
| 68 |
# Global variable to store the current Gradio documentation
|
| 69 |
-
_gradio_docs_content:
|
| 70 |
_gradio_docs_last_fetched: Optional[datetime] = None
|
| 71 |
|
| 72 |
-
def fetch_gradio_docs() ->
|
| 73 |
"""Fetch the latest Gradio documentation from llms.txt"""
|
| 74 |
try:
|
| 75 |
response = requests.get(GRADIO_LLMS_TXT_URL, timeout=10)
|
|
@@ -79,7 +79,7 @@ def fetch_gradio_docs() -> Optional[str]:
|
|
| 79 |
print(f"Warning: Failed to fetch Gradio docs from {GRADIO_LLMS_TXT_URL}: {e}")
|
| 80 |
return None
|
| 81 |
|
| 82 |
-
def load_cached_gradio_docs() ->
|
| 83 |
"""Load cached Gradio documentation from file"""
|
| 84 |
try:
|
| 85 |
if os.path.exists(GRADIO_DOCS_CACHE_FILE):
|
|
@@ -894,7 +894,7 @@ def _ensure_video_dir_exists() -> None:
|
|
| 894 |
pass
|
| 895 |
|
| 896 |
|
| 897 |
-
def _register_video_for_session(session_id:
|
| 898 |
if not session_id or not file_path:
|
| 899 |
return
|
| 900 |
with _VIDEO_FILES_LOCK:
|
|
@@ -903,7 +903,7 @@ def _register_video_for_session(session_id: Optional[str], file_path: str) -> No
|
|
| 903 |
_SESSION_VIDEO_FILES[session_id].append(file_path)
|
| 904 |
|
| 905 |
|
| 906 |
-
def cleanup_session_videos(session_id:
|
| 907 |
if not session_id:
|
| 908 |
return
|
| 909 |
with _VIDEO_FILES_LOCK:
|
|
@@ -952,7 +952,7 @@ def _ensure_audio_dir_exists() -> None:
|
|
| 952 |
pass
|
| 953 |
|
| 954 |
|
| 955 |
-
def _register_audio_for_session(session_id:
|
| 956 |
if not session_id or not file_path:
|
| 957 |
return
|
| 958 |
with _AUDIO_FILES_LOCK:
|
|
@@ -961,7 +961,7 @@ def _register_audio_for_session(session_id: Optional[str], file_path: str) -> No
|
|
| 961 |
_SESSION_AUDIO_FILES[session_id].append(file_path)
|
| 962 |
|
| 963 |
|
| 964 |
-
def cleanup_session_audio(session_id:
|
| 965 |
if not session_id:
|
| 966 |
return
|
| 967 |
with _AUDIO_FILES_LOCK:
|
|
@@ -2561,7 +2561,7 @@ def infer_svelte_dependencies(files: Dict[str, str]) -> Dict[str, str]:
|
|
| 2561 |
|
| 2562 |
return deps
|
| 2563 |
|
| 2564 |
-
def build_svelte_package_json(existing_json_text:
|
| 2565 |
"""Create or merge a package.json for Svelte spaces.
|
| 2566 |
|
| 2567 |
- If existing_json_text is provided, merge detected deps into its dependencies.
|
|
@@ -2781,7 +2781,7 @@ def _ensure_media_dir_exists() -> None:
|
|
| 2781 |
except Exception:
|
| 2782 |
pass
|
| 2783 |
|
| 2784 |
-
def track_session_media_file(session_id:
|
| 2785 |
"""Track a media file for session-based cleanup."""
|
| 2786 |
if not session_id or not file_path:
|
| 2787 |
return
|
|
@@ -2790,7 +2790,7 @@ def track_session_media_file(session_id: Optional[str], file_path: str) -> None:
|
|
| 2790 |
_SESSION_MEDIA_FILES[session_id] = []
|
| 2791 |
_SESSION_MEDIA_FILES[session_id].append(file_path)
|
| 2792 |
|
| 2793 |
-
def cleanup_session_media(session_id:
|
| 2794 |
"""Clean up media files for a specific session."""
|
| 2795 |
if not session_id:
|
| 2796 |
return
|
|
@@ -2879,7 +2879,7 @@ def cleanup_all_temp_media_on_shutdown() -> None:
|
|
| 2879 |
# Register shutdown cleanup handler
|
| 2880 |
atexit.register(cleanup_all_temp_media_on_shutdown)
|
| 2881 |
|
| 2882 |
-
def create_temp_media_url(media_bytes: bytes, filename: str, media_type: str = "image", session_id:
|
| 2883 |
"""Create a temporary file and return a local URL for preview.
|
| 2884 |
|
| 2885 |
Args:
|
|
@@ -3325,7 +3325,7 @@ def generate_image_to_image(input_image_data, prompt: str, token: gr.OAuthToken
|
|
| 3325 |
print(f"Image-to-image generation error: {str(e)}")
|
| 3326 |
return f"Error generating image (image-to-image): {str(e)}"
|
| 3327 |
|
| 3328 |
-
def generate_video_from_image(input_image_data, prompt: str, session_id:
|
| 3329 |
"""Generate a video from an input image and prompt using Hugging Face InferenceClient.
|
| 3330 |
|
| 3331 |
Returns an HTML <video> tag whose source points to a local file URL (file://...).
|
|
@@ -3452,7 +3452,7 @@ def generate_video_from_image(input_image_data, prompt: str, session_id: Optiona
|
|
| 3452 |
print(f"Image-to-video generation error: {str(e)}")
|
| 3453 |
return f"Error generating video (image-to-video): {str(e)}"
|
| 3454 |
|
| 3455 |
-
def generate_video_from_text(prompt: str, session_id:
|
| 3456 |
"""Generate a video from a text prompt using Hugging Face InferenceClient.
|
| 3457 |
|
| 3458 |
Returns an HTML <video> tag with compressed data URI for deployment compatibility.
|
|
@@ -3522,7 +3522,7 @@ def generate_video_from_text(prompt: str, session_id: Optional[str] = None, toke
|
|
| 3522 |
print(f"Text-to-video generation error: {str(e)}")
|
| 3523 |
return f"Error generating video (text-to-video): {str(e)}"
|
| 3524 |
|
| 3525 |
-
def generate_video_from_video(input_video_data, prompt: str, session_id:
|
| 3526 |
"""Generate a video from an input video and prompt using Decart AI's Lucy Pro V2V API.
|
| 3527 |
|
| 3528 |
Returns an HTML <video> tag whose source points to a temporary file URL.
|
|
@@ -3630,7 +3630,7 @@ def generate_video_from_video(input_video_data, prompt: str, session_id: Optiona
|
|
| 3630 |
print(f"Video-to-video generation error: {str(e)}")
|
| 3631 |
return f"Error generating video (video-to-video): {str(e)}"
|
| 3632 |
|
| 3633 |
-
def generate_music_from_text(prompt: str, music_length_ms: int = 30000, session_id:
|
| 3634 |
"""Generate music from a text prompt using ElevenLabs Music API and return an HTML <audio> tag.
|
| 3635 |
|
| 3636 |
Returns compressed data URI for deployment compatibility.
|
|
@@ -3842,7 +3842,7 @@ class WanAnimateApp:
|
|
| 3842 |
print(f"[WanAnimate] {error_msg}")
|
| 3843 |
return None, error_msg
|
| 3844 |
|
| 3845 |
-
def generate_animation_from_image_video(input_image_data, input_video_data, prompt: str, model_id: str = "wan2.2-animate-move", model: str = "wan-pro", session_id:
|
| 3846 |
"""Generate animated video from reference image and template video using Wan2.2-Animate.
|
| 3847 |
|
| 3848 |
Returns an HTML <video> tag whose source points to a temporary file URL.
|
|
@@ -4179,7 +4179,7 @@ def create_image_replacement_blocks_text_to_image_single(html_content: str, prom
|
|
| 4179 |
# If no <body>, just append
|
| 4180 |
return f"{SEARCH_START}\n\n{DIVIDER}\n{image_html}\n{REPLACE_END}"
|
| 4181 |
|
| 4182 |
-
def create_video_replacement_blocks_text_to_video(html_content: str, prompt: str, session_id:
|
| 4183 |
"""Create search/replace blocks that generate and insert ONLY ONE text-to-video result.
|
| 4184 |
|
| 4185 |
Replaces the first detected <img> placeholder; if none found, inserts one video near the top of <body>.
|
|
@@ -4290,7 +4290,7 @@ def create_video_replacement_blocks_text_to_video(html_content: str, prompt: str
|
|
| 4290 |
# If no <body>, just append
|
| 4291 |
return f"{SEARCH_START}\n\n{DIVIDER}\n{video_html}\n{REPLACE_END}"
|
| 4292 |
|
| 4293 |
-
def create_music_replacement_blocks_text_to_music(html_content: str, prompt: str, session_id:
|
| 4294 |
"""Create search/replace blocks that insert ONE generated <audio> near the top of <body>.
|
| 4295 |
|
| 4296 |
Unlike images/videos which replace placeholders, music doesn't map to an <img> tag.
|
|
@@ -4430,7 +4430,7 @@ def create_image_replacement_blocks_from_input_image(html_content: str, user_pro
|
|
| 4430 |
|
| 4431 |
return '\n\n'.join(replacement_blocks)
|
| 4432 |
|
| 4433 |
-
def create_video_replacement_blocks_from_input_image(html_content: str, user_prompt: str, input_image_data, session_id:
|
| 4434 |
"""Create search/replace blocks that replace the first <img> (or placeholder) with a generated <video>.
|
| 4435 |
|
| 4436 |
Uses generate_video_from_image to produce a single video and swaps it in.
|
|
@@ -4516,7 +4516,7 @@ def create_video_replacement_blocks_from_input_image(html_content: str, user_pro
|
|
| 4516 |
print("[Image2Video] No <body> tag; appending video via replacement block")
|
| 4517 |
return f"{SEARCH_START}\n\n{DIVIDER}\n{video_html}\n{REPLACE_END}"
|
| 4518 |
|
| 4519 |
-
def create_video_replacement_blocks_from_input_video(html_content: str, user_prompt: str, input_video_data, session_id:
|
| 4520 |
"""Create search/replace blocks that replace the first <video> (or placeholder) with a generated <video>.
|
| 4521 |
|
| 4522 |
Uses generate_video_from_video to produce a single video and swaps it in.
|
|
@@ -4603,7 +4603,7 @@ def create_video_replacement_blocks_from_input_video(html_content: str, user_pro
|
|
| 4603 |
print("[Video2Video] No <body> tag; appending video via replacement block")
|
| 4604 |
return f"{SEARCH_START}\n\n{DIVIDER}\n{video_html}\n{REPLACE_END}"
|
| 4605 |
|
| 4606 |
-
def apply_generated_media_to_html(html_content: str, user_prompt: str, enable_text_to_image: bool, enable_image_to_image: bool, input_image_data, image_to_image_prompt: str | None = None, text_to_image_prompt: str | None = None, enable_image_to_video: bool = False, image_to_video_prompt: str | None = None, session_id:
|
| 4607 |
"""Apply text/image/video/music replacements to HTML content.
|
| 4608 |
|
| 4609 |
- Works with single-document HTML strings
|
|
@@ -4614,7 +4614,7 @@ def apply_generated_media_to_html(html_content: str, user_prompt: str, enable_te
|
|
| 4614 |
# Detect multi-page sections and choose an entry HTML to modify
|
| 4615 |
is_multipage = False
|
| 4616 |
multipage_files: Dict[str, str] = {}
|
| 4617 |
-
entry_html_path:
|
| 4618 |
try:
|
| 4619 |
multipage_files = parse_multipage_html_output(html_content) or {}
|
| 4620 |
if multipage_files:
|
|
@@ -5837,7 +5837,62 @@ The HTML code above contains the complete original website structure with all im
|
|
| 5837 |
stop_generation = False
|
| 5838 |
|
| 5839 |
|
| 5840 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5841 |
if query is None:
|
| 5842 |
query = ''
|
| 5843 |
if _history is None:
|
|
@@ -7028,7 +7083,7 @@ def deploy_to_spaces_static(code):
|
|
| 7028 |
full_url = f"{base_url}?{params}&{files_params}"
|
| 7029 |
webbrowser.open_new_tab(full_url)
|
| 7030 |
|
| 7031 |
-
def check_hf_space_url(url: str) -> Tuple[bool,
|
| 7032 |
"""Check if URL is a valid Hugging Face Spaces URL and extract username/project"""
|
| 7033 |
import re
|
| 7034 |
|
|
@@ -7264,7 +7319,7 @@ def _parse_repo_or_model_url(url: str) -> Tuple[str, Optional[dict]]:
|
|
| 7264 |
pass
|
| 7265 |
return "unknown", None
|
| 7266 |
|
| 7267 |
-
def _fetch_hf_model_readme(repo_id: str) ->
|
| 7268 |
"""Fetch README.md (model card) for a Hugging Face model repo."""
|
| 7269 |
try:
|
| 7270 |
api = HfApi()
|
|
@@ -7281,7 +7336,7 @@ def _fetch_hf_model_readme(repo_id: str) -> Optional[str]:
|
|
| 7281 |
except Exception:
|
| 7282 |
return None
|
| 7283 |
|
| 7284 |
-
def _fetch_github_readme(owner: str, repo: str) ->
|
| 7285 |
"""Fetch README.md from a GitHub repo via raw URLs, trying HEAD/main/master."""
|
| 7286 |
bases = [
|
| 7287 |
f"https://raw.githubusercontent.com/{owner}/{repo}/HEAD/README.md",
|
|
@@ -7297,7 +7352,7 @@ def _fetch_github_readme(owner: str, repo: str) -> Optional[str]:
|
|
| 7297 |
continue
|
| 7298 |
return None
|
| 7299 |
|
| 7300 |
-
def _extract_transformers_or_diffusers_snippet(markdown_text: str) -> Tuple[
|
| 7301 |
"""Extract the most relevant Python code block referencing transformers/diffusers from markdown.
|
| 7302 |
|
| 7303 |
Returns (language, code). If not found, returns (None, None).
|
|
@@ -7335,7 +7390,7 @@ def _extract_transformers_or_diffusers_snippet(markdown_text: str) -> Tuple[Opti
|
|
| 7335 |
return scored[0][0] or None, scored[0][1]
|
| 7336 |
return None, None
|
| 7337 |
|
| 7338 |
-
def _infer_task_from_context(snippet:
|
| 7339 |
"""Infer a task string for transformers pipeline; fall back to provided pipeline_tag or 'text-generation'."""
|
| 7340 |
if pipeline_tag:
|
| 7341 |
return pipeline_tag
|
|
@@ -7779,6 +7834,24 @@ with gr.Blocks(
|
|
| 7779 |
#beta_chat .message.user {
|
| 7780 |
background: rgba(70, 70, 70, 0.95);
|
| 7781 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7782 |
"""
|
| 7783 |
) as demo:
|
| 7784 |
history = gr.State([])
|
|
@@ -7791,6 +7864,14 @@ with gr.Blocks(
|
|
| 7791 |
|
| 7792 |
with gr.Sidebar() as sidebar:
|
| 7793 |
login_button = gr.LoginButton()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7794 |
beta_toggle = gr.Checkbox(
|
| 7795 |
value=False,
|
| 7796 |
label="Beta: Chat UI",
|
|
@@ -7867,9 +7948,10 @@ with gr.Blocks(
|
|
| 7867 |
|
| 7868 |
input = gr.Textbox(
|
| 7869 |
label="What would you like to build?",
|
| 7870 |
-
placeholder="
|
| 7871 |
lines=3,
|
| 7872 |
-
visible=True
|
|
|
|
| 7873 |
)
|
| 7874 |
# Language dropdown for code generation (add Streamlit and Gradio as first-class options)
|
| 7875 |
language_choices = [
|
|
@@ -7908,7 +7990,7 @@ with gr.Blocks(
|
|
| 7908 |
visible=False
|
| 7909 |
)
|
| 7910 |
with gr.Row():
|
| 7911 |
-
btn = gr.Button("Generate", variant="
|
| 7912 |
clear_btn = gr.Button("Clear", variant="secondary", size="sm", scale=1, visible=True)
|
| 7913 |
# --- Move deploy/app name/sdk here, right before web search ---
|
| 7914 |
space_name_input = gr.Textbox(
|
|
@@ -8661,7 +8743,7 @@ with gr.Blocks(
|
|
| 8661 |
show_progress="hidden",
|
| 8662 |
).then(
|
| 8663 |
generation_code,
|
| 8664 |
-
inputs=[input, image_input, generation_image_input, file_input, website_url_input, setting, history, current_model, search_toggle, language_dropdown, provider_state, image_generation_toggle, image_to_image_toggle, image_to_image_prompt, text_to_image_prompt, image_to_video_toggle, image_to_video_prompt, text_to_video_toggle, text_to_video_prompt, video_to_video_toggle, video_to_video_prompt, video_input, text_to_music_toggle, text_to_music_prompt, image_video_to_animation_toggle, animation_mode_dropdown, animation_quality_dropdown, animation_video_input],
|
| 8665 |
outputs=[code_output, history, sandbox, history_output]
|
| 8666 |
).then(
|
| 8667 |
end_generation_ui,
|
|
@@ -8702,7 +8784,7 @@ with gr.Blocks(
|
|
| 8702 |
show_progress="hidden",
|
| 8703 |
).then(
|
| 8704 |
generation_code,
|
| 8705 |
-
inputs=[input, image_input, generation_image_input, file_input, website_url_input, setting, history, current_model, search_toggle, language_dropdown, provider_state, image_generation_toggle, image_to_image_toggle, image_to_image_prompt, text_to_image_prompt, image_to_video_toggle, image_to_video_prompt, text_to_video_toggle, text_to_video_prompt, video_to_video_toggle, video_to_video_prompt, video_input, text_to_music_toggle, text_to_music_prompt],
|
| 8706 |
outputs=[code_output, history, sandbox, history_output]
|
| 8707 |
).then(
|
| 8708 |
end_generation_ui,
|
|
@@ -8975,7 +9057,7 @@ with gr.Blocks(
|
|
| 8975 |
show_progress="hidden",
|
| 8976 |
).then(
|
| 8977 |
generation_code,
|
| 8978 |
-
inputs=[input, image_input, generation_image_input, file_input, website_url_input, setting, history, current_model, search_toggle, language_dropdown, provider_state, image_generation_toggle, image_to_image_toggle, image_to_image_prompt, text_to_image_prompt, image_to_video_toggle, image_to_video_prompt, text_to_video_toggle, text_to_video_prompt, video_to_video_toggle, video_to_video_prompt, video_input, text_to_music_toggle, text_to_music_prompt, image_video_to_animation_toggle, animation_mode_dropdown, animation_quality_dropdown, animation_video_input],
|
| 8979 |
outputs=[code_output, history, sandbox, history_output]
|
| 8980 |
).then(
|
| 8981 |
end_generation_ui,
|
|
@@ -9735,6 +9817,21 @@ with gr.Blocks(
|
|
| 9735 |
)
|
| 9736 |
# Keep the old deploy method as fallback (if not logged in, user can still use the old method)
|
| 9737 |
# Optionally, you can keep the old deploy_btn.click for the default method as a secondary button.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9738 |
|
| 9739 |
if __name__ == "__main__":
|
| 9740 |
# Initialize Gradio documentation system
|
|
|
|
| 66 |
GRADIO_DOCS_UPDATE_ON_APP_UPDATE = True # Only update when app is updated, not on a timer
|
| 67 |
|
| 68 |
# Global variable to store the current Gradio documentation
|
| 69 |
+
_gradio_docs_content: str | None = None
|
| 70 |
_gradio_docs_last_fetched: Optional[datetime] = None
|
| 71 |
|
| 72 |
+
def fetch_gradio_docs() -> str | None:
|
| 73 |
"""Fetch the latest Gradio documentation from llms.txt"""
|
| 74 |
try:
|
| 75 |
response = requests.get(GRADIO_LLMS_TXT_URL, timeout=10)
|
|
|
|
| 79 |
print(f"Warning: Failed to fetch Gradio docs from {GRADIO_LLMS_TXT_URL}: {e}")
|
| 80 |
return None
|
| 81 |
|
| 82 |
+
def load_cached_gradio_docs() -> str | None:
|
| 83 |
"""Load cached Gradio documentation from file"""
|
| 84 |
try:
|
| 85 |
if os.path.exists(GRADIO_DOCS_CACHE_FILE):
|
|
|
|
| 894 |
pass
|
| 895 |
|
| 896 |
|
| 897 |
+
def _register_video_for_session(session_id: str | None, file_path: str) -> None:
|
| 898 |
if not session_id or not file_path:
|
| 899 |
return
|
| 900 |
with _VIDEO_FILES_LOCK:
|
|
|
|
| 903 |
_SESSION_VIDEO_FILES[session_id].append(file_path)
|
| 904 |
|
| 905 |
|
| 906 |
+
def cleanup_session_videos(session_id: str | None) -> None:
|
| 907 |
if not session_id:
|
| 908 |
return
|
| 909 |
with _VIDEO_FILES_LOCK:
|
|
|
|
| 952 |
pass
|
| 953 |
|
| 954 |
|
| 955 |
+
def _register_audio_for_session(session_id: str | None, file_path: str) -> None:
|
| 956 |
if not session_id or not file_path:
|
| 957 |
return
|
| 958 |
with _AUDIO_FILES_LOCK:
|
|
|
|
| 961 |
_SESSION_AUDIO_FILES[session_id].append(file_path)
|
| 962 |
|
| 963 |
|
| 964 |
+
def cleanup_session_audio(session_id: str | None) -> None:
|
| 965 |
if not session_id:
|
| 966 |
return
|
| 967 |
with _AUDIO_FILES_LOCK:
|
|
|
|
| 2561 |
|
| 2562 |
return deps
|
| 2563 |
|
| 2564 |
+
def build_svelte_package_json(existing_json_text: str | None, detected_dependencies: Dict[str, str]) -> str:
|
| 2565 |
"""Create or merge a package.json for Svelte spaces.
|
| 2566 |
|
| 2567 |
- If existing_json_text is provided, merge detected deps into its dependencies.
|
|
|
|
| 2781 |
except Exception:
|
| 2782 |
pass
|
| 2783 |
|
| 2784 |
+
def track_session_media_file(session_id: str | None, file_path: str) -> None:
|
| 2785 |
"""Track a media file for session-based cleanup."""
|
| 2786 |
if not session_id or not file_path:
|
| 2787 |
return
|
|
|
|
| 2790 |
_SESSION_MEDIA_FILES[session_id] = []
|
| 2791 |
_SESSION_MEDIA_FILES[session_id].append(file_path)
|
| 2792 |
|
| 2793 |
+
def cleanup_session_media(session_id: str | None) -> None:
|
| 2794 |
"""Clean up media files for a specific session."""
|
| 2795 |
if not session_id:
|
| 2796 |
return
|
|
|
|
| 2879 |
# Register shutdown cleanup handler
|
| 2880 |
atexit.register(cleanup_all_temp_media_on_shutdown)
|
| 2881 |
|
| 2882 |
+
def create_temp_media_url(media_bytes: bytes, filename: str, media_type: str = "image", session_id: str | None = None) -> str:
|
| 2883 |
"""Create a temporary file and return a local URL for preview.
|
| 2884 |
|
| 2885 |
Args:
|
|
|
|
| 3325 |
print(f"Image-to-image generation error: {str(e)}")
|
| 3326 |
return f"Error generating image (image-to-image): {str(e)}"
|
| 3327 |
|
| 3328 |
+
def generate_video_from_image(input_image_data, prompt: str, session_id: str | None = None, token: gr.OAuthToken | None = None) -> str:
|
| 3329 |
"""Generate a video from an input image and prompt using Hugging Face InferenceClient.
|
| 3330 |
|
| 3331 |
Returns an HTML <video> tag whose source points to a local file URL (file://...).
|
|
|
|
| 3452 |
print(f"Image-to-video generation error: {str(e)}")
|
| 3453 |
return f"Error generating video (image-to-video): {str(e)}"
|
| 3454 |
|
| 3455 |
+
def generate_video_from_text(prompt: str, session_id: str | None = None, token: gr.OAuthToken | None = None) -> str:
|
| 3456 |
"""Generate a video from a text prompt using Hugging Face InferenceClient.
|
| 3457 |
|
| 3458 |
Returns an HTML <video> tag with compressed data URI for deployment compatibility.
|
|
|
|
| 3522 |
print(f"Text-to-video generation error: {str(e)}")
|
| 3523 |
return f"Error generating video (text-to-video): {str(e)}"
|
| 3524 |
|
| 3525 |
+
def generate_video_from_video(input_video_data, prompt: str, session_id: str | None = None, token: gr.OAuthToken | None = None) -> str:
|
| 3526 |
"""Generate a video from an input video and prompt using Decart AI's Lucy Pro V2V API.
|
| 3527 |
|
| 3528 |
Returns an HTML <video> tag whose source points to a temporary file URL.
|
|
|
|
| 3630 |
print(f"Video-to-video generation error: {str(e)}")
|
| 3631 |
return f"Error generating video (video-to-video): {str(e)}"
|
| 3632 |
|
| 3633 |
+
def generate_music_from_text(prompt: str, music_length_ms: int = 30000, session_id: str | None = None, token: gr.OAuthToken | None = None) -> str:
|
| 3634 |
"""Generate music from a text prompt using ElevenLabs Music API and return an HTML <audio> tag.
|
| 3635 |
|
| 3636 |
Returns compressed data URI for deployment compatibility.
|
|
|
|
| 3842 |
print(f"[WanAnimate] {error_msg}")
|
| 3843 |
return None, error_msg
|
| 3844 |
|
| 3845 |
+
def generate_animation_from_image_video(input_image_data, input_video_data, prompt: str, model_id: str = "wan2.2-animate-move", model: str = "wan-pro", session_id: str | None = None, token: gr.OAuthToken | None = None) -> str:
|
| 3846 |
"""Generate animated video from reference image and template video using Wan2.2-Animate.
|
| 3847 |
|
| 3848 |
Returns an HTML <video> tag whose source points to a temporary file URL.
|
|
|
|
| 4179 |
# If no <body>, just append
|
| 4180 |
return f"{SEARCH_START}\n\n{DIVIDER}\n{image_html}\n{REPLACE_END}"
|
| 4181 |
|
| 4182 |
+
def create_video_replacement_blocks_text_to_video(html_content: str, prompt: str, session_id: str | None = None) -> str:
|
| 4183 |
"""Create search/replace blocks that generate and insert ONLY ONE text-to-video result.
|
| 4184 |
|
| 4185 |
Replaces the first detected <img> placeholder; if none found, inserts one video near the top of <body>.
|
|
|
|
| 4290 |
# If no <body>, just append
|
| 4291 |
return f"{SEARCH_START}\n\n{DIVIDER}\n{video_html}\n{REPLACE_END}"
|
| 4292 |
|
| 4293 |
+
def create_music_replacement_blocks_text_to_music(html_content: str, prompt: str, session_id: str | None = None) -> str:
|
| 4294 |
"""Create search/replace blocks that insert ONE generated <audio> near the top of <body>.
|
| 4295 |
|
| 4296 |
Unlike images/videos which replace placeholders, music doesn't map to an <img> tag.
|
|
|
|
| 4430 |
|
| 4431 |
return '\n\n'.join(replacement_blocks)
|
| 4432 |
|
| 4433 |
+
def create_video_replacement_blocks_from_input_image(html_content: str, user_prompt: str, input_image_data, session_id: str | None = None) -> str:
|
| 4434 |
"""Create search/replace blocks that replace the first <img> (or placeholder) with a generated <video>.
|
| 4435 |
|
| 4436 |
Uses generate_video_from_image to produce a single video and swaps it in.
|
|
|
|
| 4516 |
print("[Image2Video] No <body> tag; appending video via replacement block")
|
| 4517 |
return f"{SEARCH_START}\n\n{DIVIDER}\n{video_html}\n{REPLACE_END}"
|
| 4518 |
|
| 4519 |
+
def create_video_replacement_blocks_from_input_video(html_content: str, user_prompt: str, input_video_data, session_id: str | None = None) -> str:
|
| 4520 |
"""Create search/replace blocks that replace the first <video> (or placeholder) with a generated <video>.
|
| 4521 |
|
| 4522 |
Uses generate_video_from_video to produce a single video and swaps it in.
|
|
|
|
| 4603 |
print("[Video2Video] No <body> tag; appending video via replacement block")
|
| 4604 |
return f"{SEARCH_START}\n\n{DIVIDER}\n{video_html}\n{REPLACE_END}"
|
| 4605 |
|
| 4606 |
+
def apply_generated_media_to_html(html_content: str, user_prompt: str, enable_text_to_image: bool, enable_image_to_image: bool, input_image_data, image_to_image_prompt: str | None = None, text_to_image_prompt: str | None = None, enable_image_to_video: bool = False, image_to_video_prompt: str | None = None, session_id: str | None = None, enable_text_to_video: bool = False, text_to_video_prompt: str | None = None, enable_video_to_video: bool = False, video_to_video_prompt: str | None = None, input_video_data = None, enable_text_to_music: bool = False, text_to_music_prompt: str | None = None, enable_image_video_to_animation: bool = False, animation_mode: str = "wan2.2-animate-move", animation_quality: str = "wan-pro", animation_video_data = None, token: gr.OAuthToken | None = None) -> str:
|
| 4607 |
"""Apply text/image/video/music replacements to HTML content.
|
| 4608 |
|
| 4609 |
- Works with single-document HTML strings
|
|
|
|
| 4614 |
# Detect multi-page sections and choose an entry HTML to modify
|
| 4615 |
is_multipage = False
|
| 4616 |
multipage_files: Dict[str, str] = {}
|
| 4617 |
+
entry_html_path: str | None = None
|
| 4618 |
try:
|
| 4619 |
multipage_files = parse_multipage_html_output(html_content) or {}
|
| 4620 |
if multipage_files:
|
|
|
|
| 5837 |
stop_generation = False
|
| 5838 |
|
| 5839 |
|
| 5840 |
+
def check_authentication(profile: gr.OAuthProfile | None = None, token: gr.OAuthToken | None = None) -> tuple[bool, str]:
|
| 5841 |
+
"""Check if user is authenticated and return status with message."""
|
| 5842 |
+
if not profile or not token:
|
| 5843 |
+
return False, "Please log in with your Hugging Face account to use AnyCoder."
|
| 5844 |
+
|
| 5845 |
+
if not token.token:
|
| 5846 |
+
return False, "Authentication token is invalid. Please log in again."
|
| 5847 |
+
|
| 5848 |
+
return True, f"Authenticated as {profile.username}"
|
| 5849 |
+
|
| 5850 |
+
|
| 5851 |
+
def update_ui_for_auth_status(profile: gr.OAuthProfile | None = None, token: gr.OAuthToken | None = None):
|
| 5852 |
+
"""Update UI components based on authentication status."""
|
| 5853 |
+
is_authenticated, auth_message = check_authentication(profile, token)
|
| 5854 |
+
|
| 5855 |
+
if is_authenticated:
|
| 5856 |
+
# User is authenticated - enable all components
|
| 5857 |
+
return {
|
| 5858 |
+
# Enable main input and button
|
| 5859 |
+
input: gr.update(interactive=True, placeholder="Describe your application..."),
|
| 5860 |
+
btn: gr.update(interactive=True, variant="primary"),
|
| 5861 |
+
# Show authentication status
|
| 5862 |
+
auth_status: gr.update(
|
| 5863 |
+
value=f"β
{auth_message}",
|
| 5864 |
+
visible=True
|
| 5865 |
+
)
|
| 5866 |
+
}
|
| 5867 |
+
else:
|
| 5868 |
+
# User not authenticated - disable main components
|
| 5869 |
+
return {
|
| 5870 |
+
# Disable main input and button with clear messaging
|
| 5871 |
+
input: gr.update(
|
| 5872 |
+
interactive=False,
|
| 5873 |
+
placeholder="π Please log in with Hugging Face to use AnyCoder..."
|
| 5874 |
+
),
|
| 5875 |
+
btn: gr.update(interactive=False, variant="secondary"),
|
| 5876 |
+
# Show authentication requirement
|
| 5877 |
+
auth_status: gr.update(
|
| 5878 |
+
value=f"π {auth_message}",
|
| 5879 |
+
visible=True
|
| 5880 |
+
)
|
| 5881 |
+
}
|
| 5882 |
+
|
| 5883 |
+
|
| 5884 |
+
def generation_code(query: str | None, vlm_image: Optional[gr.Image], gen_image: Optional[gr.Image], file: str | None, website_url: str | None, _setting: Dict[str, str], _history: Optional[History], _current_model: Dict, enable_search: bool = False, language: str = "html", provider: str = "auto", enable_image_generation: bool = False, enable_image_to_image: bool = False, image_to_image_prompt: str | None = None, text_to_image_prompt: str | None = None, enable_image_to_video: bool = False, image_to_video_prompt: str | None = None, enable_text_to_video: bool = False, text_to_video_prompt: str | None = None, enable_video_to_video: bool = False, video_to_video_prompt: str | None = None, input_video_data = None, enable_text_to_music: bool = False, text_to_music_prompt: str | None = None, enable_image_video_to_animation: bool = False, animation_mode: str = "wan2.2-animate-move", animation_quality: str = "wan-pro", animation_video_data = None, profile: gr.OAuthProfile | None = None, token: gr.OAuthToken | None = None):
|
| 5885 |
+
# Check authentication first
|
| 5886 |
+
is_authenticated, auth_message = check_authentication(profile, token)
|
| 5887 |
+
if not is_authenticated:
|
| 5888 |
+
error_message = f"π Authentication Required\n\n{auth_message}\n\nPlease click the 'Sign in with Hugging Face' button in the sidebar to continue."
|
| 5889 |
+
yield {
|
| 5890 |
+
code_output: error_message,
|
| 5891 |
+
history_output: history_to_chatbot_messages(_history or []),
|
| 5892 |
+
sandbox: f"<div style='padding:2em;text-align:center;color:#e74c3c;font-size:1.2em;'><h3>π Authentication Required</h3><p>{auth_message}</p><p>Please log in to use AnyCoder.</p></div>",
|
| 5893 |
+
}
|
| 5894 |
+
return
|
| 5895 |
+
|
| 5896 |
if query is None:
|
| 5897 |
query = ''
|
| 5898 |
if _history is None:
|
|
|
|
| 7083 |
full_url = f"{base_url}?{params}&{files_params}"
|
| 7084 |
webbrowser.open_new_tab(full_url)
|
| 7085 |
|
| 7086 |
+
def check_hf_space_url(url: str) -> Tuple[bool, str | None, str | None]:
|
| 7087 |
"""Check if URL is a valid Hugging Face Spaces URL and extract username/project"""
|
| 7088 |
import re
|
| 7089 |
|
|
|
|
| 7319 |
pass
|
| 7320 |
return "unknown", None
|
| 7321 |
|
| 7322 |
+
def _fetch_hf_model_readme(repo_id: str) -> str | None:
|
| 7323 |
"""Fetch README.md (model card) for a Hugging Face model repo."""
|
| 7324 |
try:
|
| 7325 |
api = HfApi()
|
|
|
|
| 7336 |
except Exception:
|
| 7337 |
return None
|
| 7338 |
|
| 7339 |
+
def _fetch_github_readme(owner: str, repo: str) -> str | None:
|
| 7340 |
"""Fetch README.md from a GitHub repo via raw URLs, trying HEAD/main/master."""
|
| 7341 |
bases = [
|
| 7342 |
f"https://raw.githubusercontent.com/{owner}/{repo}/HEAD/README.md",
|
|
|
|
| 7352 |
continue
|
| 7353 |
return None
|
| 7354 |
|
| 7355 |
+
def _extract_transformers_or_diffusers_snippet(markdown_text: str) -> Tuple[str | None, str | None]:
|
| 7356 |
"""Extract the most relevant Python code block referencing transformers/diffusers from markdown.
|
| 7357 |
|
| 7358 |
Returns (language, code). If not found, returns (None, None).
|
|
|
|
| 7390 |
return scored[0][0] or None, scored[0][1]
|
| 7391 |
return None, None
|
| 7392 |
|
| 7393 |
+
def _infer_task_from_context(snippet: str | None, pipeline_tag: str | None) -> str:
|
| 7394 |
"""Infer a task string for transformers pipeline; fall back to provided pipeline_tag or 'text-generation'."""
|
| 7395 |
if pipeline_tag:
|
| 7396 |
return pipeline_tag
|
|
|
|
| 7834 |
#beta_chat .message.user {
|
| 7835 |
background: rgba(70, 70, 70, 0.95);
|
| 7836 |
}
|
| 7837 |
+
/* Authentication status styling */
|
| 7838 |
+
.auth-status {
|
| 7839 |
+
padding: 8px 12px;
|
| 7840 |
+
border-radius: 6px;
|
| 7841 |
+
margin: 8px 0;
|
| 7842 |
+
font-weight: 500;
|
| 7843 |
+
text-align: center;
|
| 7844 |
+
}
|
| 7845 |
+
.auth-status:has-text("π") {
|
| 7846 |
+
background: rgba(231, 76, 60, 0.1);
|
| 7847 |
+
border: 1px solid rgba(231, 76, 60, 0.3);
|
| 7848 |
+
color: #e74c3c;
|
| 7849 |
+
}
|
| 7850 |
+
.auth-status:has-text("β
") {
|
| 7851 |
+
background: rgba(46, 204, 113, 0.1);
|
| 7852 |
+
border: 1px solid rgba(46, 204, 113, 0.3);
|
| 7853 |
+
color: #2ecc71;
|
| 7854 |
+
}
|
| 7855 |
"""
|
| 7856 |
) as demo:
|
| 7857 |
history = gr.State([])
|
|
|
|
| 7864 |
|
| 7865 |
with gr.Sidebar() as sidebar:
|
| 7866 |
login_button = gr.LoginButton()
|
| 7867 |
+
|
| 7868 |
+
# Authentication status display
|
| 7869 |
+
auth_status = gr.Markdown(
|
| 7870 |
+
value="π Please log in with your Hugging Face account to use AnyCoder.",
|
| 7871 |
+
visible=True,
|
| 7872 |
+
elem_classes=["auth-status"]
|
| 7873 |
+
)
|
| 7874 |
+
|
| 7875 |
beta_toggle = gr.Checkbox(
|
| 7876 |
value=False,
|
| 7877 |
label="Beta: Chat UI",
|
|
|
|
| 7948 |
|
| 7949 |
input = gr.Textbox(
|
| 7950 |
label="What would you like to build?",
|
| 7951 |
+
placeholder="π Please log in with Hugging Face to use AnyCoder...",
|
| 7952 |
lines=3,
|
| 7953 |
+
visible=True,
|
| 7954 |
+
interactive=False
|
| 7955 |
)
|
| 7956 |
# Language dropdown for code generation (add Streamlit and Gradio as first-class options)
|
| 7957 |
language_choices = [
|
|
|
|
| 7990 |
visible=False
|
| 7991 |
)
|
| 7992 |
with gr.Row():
|
| 7993 |
+
btn = gr.Button("Generate", variant="secondary", size="lg", scale=2, visible=True, interactive=False)
|
| 7994 |
clear_btn = gr.Button("Clear", variant="secondary", size="sm", scale=1, visible=True)
|
| 7995 |
# --- Move deploy/app name/sdk here, right before web search ---
|
| 7996 |
space_name_input = gr.Textbox(
|
|
|
|
| 8743 |
show_progress="hidden",
|
| 8744 |
).then(
|
| 8745 |
generation_code,
|
| 8746 |
+
inputs=[input, image_input, generation_image_input, file_input, website_url_input, setting, history, current_model, search_toggle, language_dropdown, provider_state, image_generation_toggle, image_to_image_toggle, image_to_image_prompt, text_to_image_prompt, image_to_video_toggle, image_to_video_prompt, text_to_video_toggle, text_to_video_prompt, video_to_video_toggle, video_to_video_prompt, video_input, text_to_music_toggle, text_to_music_prompt, image_video_to_animation_toggle, animation_mode_dropdown, animation_quality_dropdown, animation_video_input, login_button, login_button],
|
| 8747 |
outputs=[code_output, history, sandbox, history_output]
|
| 8748 |
).then(
|
| 8749 |
end_generation_ui,
|
|
|
|
| 8784 |
show_progress="hidden",
|
| 8785 |
).then(
|
| 8786 |
generation_code,
|
| 8787 |
+
inputs=[input, image_input, generation_image_input, file_input, website_url_input, setting, history, current_model, search_toggle, language_dropdown, provider_state, image_generation_toggle, image_to_image_toggle, image_to_image_prompt, text_to_image_prompt, image_to_video_toggle, image_to_video_prompt, text_to_video_toggle, text_to_video_prompt, video_to_video_toggle, video_to_video_prompt, video_input, text_to_music_toggle, text_to_music_prompt, login_button, login_button],
|
| 8788 |
outputs=[code_output, history, sandbox, history_output]
|
| 8789 |
).then(
|
| 8790 |
end_generation_ui,
|
|
|
|
| 9057 |
show_progress="hidden",
|
| 9058 |
).then(
|
| 9059 |
generation_code,
|
| 9060 |
+
inputs=[input, image_input, generation_image_input, file_input, website_url_input, setting, history, current_model, search_toggle, language_dropdown, provider_state, image_generation_toggle, image_to_image_toggle, image_to_image_prompt, text_to_image_prompt, image_to_video_toggle, image_to_video_prompt, text_to_video_toggle, text_to_video_prompt, video_to_video_toggle, video_to_video_prompt, video_input, text_to_music_toggle, text_to_music_prompt, image_video_to_animation_toggle, animation_mode_dropdown, animation_quality_dropdown, animation_video_input, login_button, login_button],
|
| 9061 |
outputs=[code_output, history, sandbox, history_output]
|
| 9062 |
).then(
|
| 9063 |
end_generation_ui,
|
|
|
|
| 9817 |
)
|
| 9818 |
# Keep the old deploy method as fallback (if not logged in, user can still use the old method)
|
| 9819 |
# Optionally, you can keep the old deploy_btn.click for the default method as a secondary button.
|
| 9820 |
+
|
| 9821 |
+
# Handle login status changes to update UI
|
| 9822 |
+
login_button.login(
|
| 9823 |
+
update_ui_for_auth_status,
|
| 9824 |
+
inputs=[login_button, login_button],
|
| 9825 |
+
outputs=[input, btn, auth_status],
|
| 9826 |
+
queue=False
|
| 9827 |
+
)
|
| 9828 |
+
|
| 9829 |
+
login_button.logout(
|
| 9830 |
+
update_ui_for_auth_status,
|
| 9831 |
+
inputs=[login_button, login_button],
|
| 9832 |
+
outputs=[input, btn, auth_status],
|
| 9833 |
+
queue=False
|
| 9834 |
+
)
|
| 9835 |
|
| 9836 |
if __name__ == "__main__":
|
| 9837 |
# Initialize Gradio documentation system
|