Spaces:
Build error
Build error
| from interpreter import interpreter | |
| import streamlit as st | |
| output = interpreter.chat("hi, how are you") | |
| st.write(output) | |
| # import subprocess | |
| # def run_terminal_command(command): | |
| # try: | |
| # # Run the terminal command and capture its output | |
| # output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT) | |
| # return output.decode("utf-8") # Decode bytes to string | |
| # except subprocess.CalledProcessError as e: | |
| # # Handle errors if the command fails | |
| # return f"Error: {e.output.decode('utf-8')}" | |
| # # Example command: list files in the current directory | |
| # command = "ls" | |
| # output = run_terminal_command(command) | |
| # print(output) | |
| # import streamlit as st | |
| # import torch | |
| # from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler | |
| # from huggingface_hub import hf_hub_download | |
| # from safetensors.torch import load_file | |
| # # Model Path/Repo Information | |
| # base = "stabilityai/stable-diffusion-xl-base-1.0" | |
| # repo = "ByteDance/SDXL-Lightning" | |
| # ckpt = "sdxl_lightning_4step_unet.safetensors" | |
| # # Load model (Executed only once for efficiency) | |
| # @st.cache_resource | |
| # def load_sdxl_pipeline(): | |
| # unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cpu", torch.float32) | |
| # unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cpu")) | |
| # pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float32, variant="fp16").to("cpu") | |
| # pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing") | |
| # return pipe | |
| # # Streamlit UI | |
| # st.title("Image Generation") | |
| # prompt = st.text_input("Enter your image prompt:") | |
| # if st.button("Generate Image"): | |
| # if not prompt: | |
| # st.warning("Please enter a prompt.") | |
| # else: | |
| # pipe = load_sdxl_pipeline() # Load the pipeline from cache | |
| # with torch.no_grad(): | |
| # image = pipe(prompt).images[0] | |
| # st.image(image) | |
| # GOOGLE_API_KEY = "" | |
| # genai.configure(api_key=GOOGLE_API_KEY) | |
| # model = genai.GenerativeModel('gemini-pro') | |
| # def add_to_json(goal): | |
| # try: | |
| # with open("test.json", "r") as file: | |
| # data = json.load(file) | |
| # except FileNotFoundError: | |
| # data = {"goals": []} # Create the file with an empty 'goals' list if it doesn't exist | |
| # new_item = {"Goal": goal} | |
| # data["goals"].append(new_item) | |
| # with open("test.json", "w") as file: | |
| # json.dump(data, file, indent=4) | |
| # def main(): | |
| # if prompt := st.chat_input("Hi, how can I help you?"): | |
| # goals_prompt = f"""Act as a personal assistant... {prompt} """ | |
| # completion = model.generate_content(goals_prompt) | |
| # add_to_json(prompt) | |
| # with st.chat_message("Assistant"): | |
| # st.write(completion.text) | |
| # # Display JSON Data | |
| # if st.button("Show JSON Data"): | |
| # with open("test.json", "r") as file: | |
| # data = json.load(file) | |
| # st.json(data) # Streamlit's way to display JSON | |
| # if __name__ == "__main__": | |
| # main() | |