Spaces:
Runtime error
Runtime error
| import os | |
| import json | |
| import gradio as gr | |
| from transformers import Tool | |
| from huggingface_hub import upload_folder | |
| from huggingface_hub import create_repo | |
| #from gradio import forms | |
| import time | |
| ######### | |
| import streamlit as st | |
| st.title("Drop files onto the chat input field") | |
| def handle_file_drop(files): | |
| # Do something with the dropped files (e.g., process them, store them, etc.) | |
| print(f"Dropped files: {files}") | |
| # Create a chat input field with file upload capabilities | |
| chat_input = st.chat_input( | |
| placeholder="Type your message or drop a file...", | |
| key="chat_input", | |
| # accept_drops=False, | |
| on_submit=handle_file_drop | |
| ) | |
| #if __name__ == "__main__": | |
| # st.launch(function_name="app") | |
| ######### | |
| ############################### | |
| #os.environ["DISPLAY"] = ":99.0" | |
| import streamlit as st | |
| import pyautogui | |
| import os | |
| st.title("Chat Interface with Screenshot Capability") | |
| # Create a chat input field | |
| chat_input = st.chat_input( | |
| placeholder="Type your message or click the button to take a screenshot...", | |
| key="chat_input" | |
| ) | |
| # Create a button to trigger the screenshot capture | |
| screenshot_button = st.button("Take Screenshot") | |
| # Define a function to capture the screenshot | |
| def capture_screenshot(): | |
| # Capture the entire screen | |
| img = pyautogui.screenshot("EntireScreen") | |
| # Upload the screenshot to the chat input field | |
| chat_input.insert_image(img) | |
| # Bind the function to the button | |
| screenshot_button.do(capture_screenshot) | |
| # Display the chat input field and button | |
| st.write(chat_input) | |
| st.write(screenshot_button) | |
| #################### | |
| def generate_files(title="Text Generation Tool", tool_description="This is a tool that chats with a user. " | |
| "It takes an input named `prompt` which contains a system_role, user_message, context and history. It returns a text message."): | |
| # Generate readme content | |
| readme_content = ''' | |
| --- | |
| title: {} | |
| emoji: ๐ | |
| colorFrom: blue | |
| colorTo: blue | |
| sdk: gradio | |
| sdk_version: 4.3.0 | |
| app_file: app.py | |
| pinned: false | |
| tags: | |
| - tool | |
| --- | |
| '''.format(title) | |
| tool_name = title.replace(" ", "_").lower() | |
| tool_class = title.replace(" ", "") | |
| tool_repo_id = title.replace(" ", "-") | |
| # Generate tool config JSON content | |
| tool_config = { | |
| "description": tool_description, | |
| "name": tool_name, | |
| "tool_class": "{}Tool".format(tool_class) | |
| } | |
| tool_config_json = json.dumps(tool_config, indent=4) | |
| # Generate app.py content | |
| app_py_content = ''' | |
| from transformers.tools.base | |
| from transformers import Tool | |
| import launch_gradio_demo | |
| from {} import {} | |
| launch_gradio_demo({}Tool) | |
| '''.format( tool_name, tool_class, tool_class) | |
| # Generate requirements.txt content | |
| requirements_content = ''' | |
| transformers>=4.29.0 | |
| # diffusers | |
| accelerate | |
| torch | |
| ''' | |
| # Generate text_generator.py content | |
| text_generator_py_content = ''' | |
| import os | |
| from transformers import pipeline | |
| class {}(Tool): | |
| name = "{}" | |
| description = ( | |
| "{}" | |
| ) | |
| inputs = ["text"] | |
| outputs = ["text"] | |
| def __call__(self, prompt: str): | |
| token = os.environ['hf'] | |
| text_generator = pipeline(model="microsoft/Orca-2-13b", token=token) | |
| generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7) | |
| print(generated_text) | |
| return generated_text | |
| '''.format(tool_class, tool_name, tool_description) | |
| # Create a new folder for the tool | |
| os.makedirs(tool_class, exist_ok=True) | |
| # Write content to files | |
| with open(f"{tool_class}/README.md", "w") as readme_file: | |
| readme_file.write(readme_content) | |
| with open(f"{tool_class}/tool_config.json", "w") as tool_config_file: | |
| tool_config_file.write(tool_config_json) | |
| with open(f"{tool_class}/app.py", "w") as app_py_file: | |
| app_py_file.write(app_py_content) | |
| with open(f"{tool_class}/requirements.txt", "w") as requirements_file: | |
| requirements_file.write(requirements_content) | |
| with open(f"{tool_class}/app.py", "w") as text_generator_py_file: | |
| text_generator_py_file.write(text_generator_py_content) | |
| create_repo(repo_id=tool_repo_id, repo_type="space", space_sdk = "gradio") | |
| #repo_type="space" | |
| # Sleep for 5 seconds | |
| time.sleep(5) | |
| print("Slept for 5 seconds!") | |
| # Upload the folder to the Hugging Face Hub | |
| upload_folder( | |
| folder_path=tool_class, | |
| repo_id=f"Chris4K/{tool_repo_id}", | |
| repo_type="space" | |
| ) | |
| # Return the generated files for download | |
| return f"Chris4K/{tool_class}" | |
| # Define the inputs for the Gradio interface | |
| io = gr.Interface(generate_files, | |
| inputs=[ | |
| gr.Textbox( | |
| label="Titel", | |
| info="Initial text", | |
| lines=1, | |
| value="Cool Tool3", | |
| ), | |
| gr.Textbox( | |
| label="Text 2", | |
| info="Text to compare", | |
| lines=3, | |
| value="The fast brown fox jumps over lazy dogs.", | |
| ), | |
| ], | |
| outputs=["text"]) | |
| # Launch the Gradio interface | |
| io.launch() | |