File size: 3,927 Bytes
8460725 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"machine_shape": "hm",
"gpuType": "T4",
"provenance": []
},
"accelerator": "GPU",
"kaggle": {
"accelerator": "gpu"
},
"language_info": {
"name": "python"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
}
},
"cells": [
{
"cell_type": "code",
"source": [
"!pip install --upgrade diffusers[torch] safetensors opencv-python\n",
"!pip install transformers"
],
"metadata": {
"id": "gMbeyk56crpp"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"## Local Inference on GPU\n",
"Model page: https://huggingface.co/Sami-AI-Lab/Guovdageaidnu_male_gakti\n",
"\n",
"⚠️ If the generated code snippets do not work, please open an issue on either the [model repo](https://huggingface.co/Sami-AI-Lab/Guovdageaidnu_male_gakti)\n",
"\t\t\tand/or on [huggingface.js](https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/model-libraries-snippets.ts) 🙏"
],
"metadata": {
"id": "-HfTBfzhcrpq"
}
},
{
"cell_type": "code",
"source": [
"import torch\n",
"from diffusers import StableDiffusionPipeline\n",
"from diffusers import (\n",
" DDPMScheduler,\n",
" DDIMScheduler,\n",
" PNDMScheduler,\n",
" LMSDiscreteScheduler,\n",
" EulerDiscreteScheduler,\n",
" EulerAncestralDiscreteScheduler,\n",
" DPMSolverMultistepScheduler,\n",
")\n",
"\n",
"import matplotlib.pyplot as plt\n",
"import time\n",
"\n",
"begin = time.time()\n",
"\n",
"#For advanced users, see the StableDiffusionPipeline document for more information on adjusting settings https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/text2img\n",
"pipe = StableDiffusionPipeline.from_pretrained(\"Lykon/dreamshaper-8\",\n",
" # use_safetensors=True,\n",
" # local_files_only=True,\n",
" # device_map=\"auto\",\n",
" # safety_checker=True\n",
" cache_dir=\"models\", adapter_name=\"checkpoint\")\n",
"pipe.load_lora_weights(\"Sami-AI-Lab/historikklavvo\", adapter_name=\"lora\")\n",
"#Adjust adapter_weights to tune the strength of the LoRA with numbers between 0 - 1\n",
"pipe.set_adapters([\"lora\"], adapter_weights=[0.90])\n",
"\n",
"# by default uses the PNDM scheduler with 50 steps, you can change the scheduler here\n",
"# pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)\n",
"\n",
"print(f\"Load : {time.time()-begin}\")\n",
"pipe = pipe.to(\"cuda\")\n",
"print(f\"Load+cuda : {time.time()-begin}\")\n",
"\n",
"# This is your text-2-Image prompt below, adjust within quotation marks to try different generations.\n",
"prompt = \"historikklavvo, a traditional Sámi lavvo, tent\"\n",
"image = pipe(prompt, num_inference_steps=25).images[0]\n",
"\n",
"# If the image is a torch tensor, convert it to numpy array\n",
"if torch.is_tensor(image):\n",
" image = image.cpu().numpy()\n",
"\n",
"# Display the image\n",
"plt.imshow(image)\n",
"plt.axis('off') # To turn off axis numbers\n",
"plt.show()"
],
"metadata": {
"id": "tufsR6jyc0U0"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "5YzjFlUIc88M"
},
"execution_count": null,
"outputs": []
}
]
} |