{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "machine_shape": "hm", "gpuType": "T4", "provenance": [] }, "accelerator": "GPU", "kaggle": { "accelerator": "gpu" }, "language_info": { "name": "python" }, "kernelspec": { "name": "python3", "display_name": "Python 3" } }, "cells": [ { "cell_type": "code", "source": [ "!pip install --upgrade diffusers[torch] safetensors opencv-python\n", "!pip install transformers" ], "metadata": { "id": "gMbeyk56crpp" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "## Local Inference on GPU\n", "Model page: https://huggingface.co/Sami-AI-Lab/Guovdageaidnu_male_gakti\n", "\n", "⚠️ If the generated code snippets do not work, please open an issue on either the [model repo](https://huggingface.co/Sami-AI-Lab/Guovdageaidnu_male_gakti)\n", "\t\t\tand/or on [huggingface.js](https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/model-libraries-snippets.ts) 🙏" ], "metadata": { "id": "-HfTBfzhcrpq" } }, { "cell_type": "code", "source": [ "import torch\n", "from diffusers import StableDiffusionPipeline\n", "from diffusers import (\n", " DDPMScheduler,\n", " DDIMScheduler,\n", " PNDMScheduler,\n", " LMSDiscreteScheduler,\n", " EulerDiscreteScheduler,\n", " EulerAncestralDiscreteScheduler,\n", " DPMSolverMultistepScheduler,\n", ")\n", "\n", "import matplotlib.pyplot as plt\n", "import time\n", "\n", "begin = time.time()\n", "\n", "#For advanced users, see the StableDiffusionPipeline document for more information on adjusting settings https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/text2img\n", "pipe = StableDiffusionPipeline.from_pretrained(\"Lykon/dreamshaper-8\",\n", " # use_safetensors=True,\n", " # local_files_only=True,\n", " # device_map=\"auto\",\n", " # safety_checker=True\n", " cache_dir=\"models\", adapter_name=\"checkpoint\")\n", "pipe.load_lora_weights(\"Sami-AI-Lab/historikklavvo\", adapter_name=\"lora\")\n", "#Adjust adapter_weights to tune the strength of the LoRA with numbers between 0 - 1\n", "pipe.set_adapters([\"lora\"], adapter_weights=[0.90])\n", "\n", "# by default uses the PNDM scheduler with 50 steps, you can change the scheduler here\n", "# pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)\n", "\n", "print(f\"Load : {time.time()-begin}\")\n", "pipe = pipe.to(\"cuda\")\n", "print(f\"Load+cuda : {time.time()-begin}\")\n", "\n", "# This is your text-2-Image prompt below, adjust within quotation marks to try different generations.\n", "prompt = \"historikklavvo, a traditional Sámi lavvo, tent\"\n", "image = pipe(prompt, num_inference_steps=25).images[0]\n", "\n", "# If the image is a torch tensor, convert it to numpy array\n", "if torch.is_tensor(image):\n", " image = image.cpu().numpy()\n", "\n", "# Display the image\n", "plt.imshow(image)\n", "plt.axis('off') # To turn off axis numbers\n", "plt.show()" ], "metadata": { "id": "tufsR6jyc0U0" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [], "metadata": { "id": "5YzjFlUIc88M" }, "execution_count": null, "outputs": [] } ] }