Spaces:
Runtime error
Runtime error
File size: 5,306 Bytes
1ddef71 93a9f5e a67f9ac cde3364 c888fa1 177edd4 cd0007b 7d9fd2e cd0007b bb5d0c4 cd0007b c5f8baf 9b51164 2e4de11 c5f8baf cd0007b c5f8baf 93a9f5e 2a0f189 03b4c35 93a9f5e 2a0f189 213cac4 2a0f189 93a9f5e 9fdc9e5 ffbd4f4 93a9f5e ffbd4f4 93a9f5e 213cac4 ffbd4f4 93a9f5e 213cac4 2a0f189 93a9f5e 213cac4 dc96452 2a0f189 213cac4 ffbd4f4 dc96452 ffbd4f4 f66c336 93a9f5e ffbd4f4 93a9f5e ccebec7 ffbd4f4 93a9f5e ccebec7 ffbd4f4 93a9f5e ffbd4f4 93a9f5e ffbd4f4 93a9f5e ccebec7 ffbd4f4 5ae2229 177edd4 dc96452 ffbd4f4 700862a dc96452 7fd533f caffa1e 93a9f5e ffbd4f4 ccebec7 93a9f5e 7f48d07 f0fb392 213cac4 f0fb392 f129734 ccebec7 93a9f5e 26cc307 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 | import os
import json
import gradio as gr
from transformers import Tool
from huggingface_hub import upload_folder
from huggingface_hub import create_repo
#from gradio import forms
import time
#########
import streamlit as st
st.title("Drop files onto the chat input field")
def handle_file_drop(files):
# Do something with the dropped files (e.g., process them, store them, etc.)
print(f"Dropped files: {files}")
# Create a chat input field with file upload capabilities
chat_input = st.chat_input(
placeholder="Type your message or drop a file...",
key="chat_input",
# accept_drops=False,
on_submit=handle_file_drop
)
#if __name__ == "__main__":
# st.launch(function_name="app")
#########
###############################
#os.environ["DISPLAY"] = ":99.0"
import streamlit as st
import pyautogui
import os
st.title("Chat Interface with Screenshot Capability")
# Create a chat input field
chat_input = st.chat_input(
placeholder="Type your message or click the button to take a screenshot...",
key="chat_input"
)
# Create a button to trigger the screenshot capture
screenshot_button = st.button("Take Screenshot")
# Define a function to capture the screenshot
def capture_screenshot():
# Capture the entire screen
img = pyautogui.screenshot("EntireScreen")
# Upload the screenshot to the chat input field
chat_input.insert_image(img)
# Bind the function to the button
screenshot_button.do(capture_screenshot)
# Display the chat input field and button
st.write(chat_input)
st.write(screenshot_button)
####################
def generate_files(title="Text Generation Tool", tool_description="This is a tool that chats with a user. "
"It takes an input named `prompt` which contains a system_role, user_message, context and history. It returns a text message."):
# Generate readme content
readme_content = '''
---
title: {}
emoji: 🌖
colorFrom: blue
colorTo: blue
sdk: gradio
sdk_version: 4.3.0
app_file: app.py
pinned: false
tags:
- tool
---
'''.format(title)
tool_name = title.replace(" ", "_").lower()
tool_class = title.replace(" ", "")
tool_repo_id = title.replace(" ", "-")
# Generate tool config JSON content
tool_config = {
"description": tool_description,
"name": tool_name,
"tool_class": "{}Tool".format(tool_class)
}
tool_config_json = json.dumps(tool_config, indent=4)
# Generate app.py content
app_py_content = '''
from transformers.tools.base
from transformers import Tool
import launch_gradio_demo
from {} import {}
launch_gradio_demo({}Tool)
'''.format( tool_name, tool_class, tool_class)
# Generate requirements.txt content
requirements_content = '''
transformers>=4.29.0
# diffusers
accelerate
torch
'''
# Generate text_generator.py content
text_generator_py_content = '''
import os
from transformers import pipeline
class {}(Tool):
name = "{}"
description = (
"{}"
)
inputs = ["text"]
outputs = ["text"]
def __call__(self, prompt: str):
token = os.environ['hf']
text_generator = pipeline(model="microsoft/Orca-2-13b", token=token)
generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
print(generated_text)
return generated_text
'''.format(tool_class, tool_name, tool_description)
# Create a new folder for the tool
os.makedirs(tool_class, exist_ok=True)
# Write content to files
with open(f"{tool_class}/README.md", "w") as readme_file:
readme_file.write(readme_content)
with open(f"{tool_class}/tool_config.json", "w") as tool_config_file:
tool_config_file.write(tool_config_json)
with open(f"{tool_class}/app.py", "w") as app_py_file:
app_py_file.write(app_py_content)
with open(f"{tool_class}/requirements.txt", "w") as requirements_file:
requirements_file.write(requirements_content)
with open(f"{tool_class}/app.py", "w") as text_generator_py_file:
text_generator_py_file.write(text_generator_py_content)
create_repo(repo_id=tool_repo_id, repo_type="space", space_sdk = "gradio")
#repo_type="space"
# Sleep for 5 seconds
time.sleep(5)
print("Slept for 5 seconds!")
# Upload the folder to the Hugging Face Hub
upload_folder(
folder_path=tool_class,
repo_id=f"Chris4K/{tool_repo_id}",
repo_type="space"
)
# Return the generated files for download
return f"Chris4K/{tool_class}"
# Define the inputs for the Gradio interface
io = gr.Interface(generate_files,
inputs=[
gr.Textbox(
label="Titel",
info="Initial text",
lines=1,
value="Cool Tool3",
),
gr.Textbox(
label="Text 2",
info="Text to compare",
lines=3,
value="The fast brown fox jumps over lazy dogs.",
),
],
outputs=["text"])
# Launch the Gradio interface
io.launch()
|