multimodalart HF Staff commited on
Commit
d66c4de
·
verified ·
1 Parent(s): ff1715b

Upload Import_Replicate_SDXL_LoRA_to_Hugging_Face_🤗.ipynb

Browse files
Import_Replicate_SDXL_LoRA_to_Hugging_Face_🤗.ipynb ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "private_outputs": true,
7
+ "provenance": []
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ }
16
+ },
17
+ "cells": [
18
+ {
19
+ "cell_type": "code",
20
+ "execution_count": null,
21
+ "metadata": {
22
+ "id": "EzgI2tPTO97I"
23
+ },
24
+ "outputs": [],
25
+ "source": [
26
+ "#@markdown Install OS dependencies\n",
27
+ "!apt-get install -y skopeo\n",
28
+ "!apt-get install -y jq"
29
+ ]
30
+ },
31
+ {
32
+ "cell_type": "code",
33
+ "source": [
34
+ "#@markdown Install python dependencies\n",
35
+ "!pip install huggingface_hub"
36
+ ],
37
+ "metadata": {
38
+ "id": "jitE6IrayFDH"
39
+ },
40
+ "execution_count": null,
41
+ "outputs": []
42
+ },
43
+ {
44
+ "cell_type": "code",
45
+ "source": [
46
+ "#@markdown Choose the Replicate SDXL LoRA repository you would like to upload to Hugging Face (you don't need to be the author). Grab your Replicate token [here](https://replicate.com/account/api-tokens)\n",
47
+ "import requests\n",
48
+ "import json\n",
49
+ "\n",
50
+ "replicate_model = \"fofr/sdxl-emoji\" #@param {type: \"string\"}\n",
51
+ "replicate_token = \"r8_***\" #@param {type: \"string\"}\n",
52
+ "\n",
53
+ "headers = { \"Authorization\": f\"Token {replicate_token}\" }\n",
54
+ "url = f\"https://api.replicate.com/v1/models/{replicate_model}\"\n",
55
+ "\n",
56
+ "response = requests.get(url, headers=headers)\n",
57
+ "model_data = response.json()\n",
58
+ "model_latest_version = model_data['latest_version']['id']\n",
59
+ "lora_name = model_data['name']\n",
60
+ "lora_author = model_data['owner']\n",
61
+ "lora_description = model_data['description']\n",
62
+ "lora_url = model_data['url']\n",
63
+ "lora_image = model_data['cover_image_url']\n",
64
+ "lora_docker_image = f\"{lora_name}@sha256:{model_latest_version}\"\n",
65
+ "default_prompt = model_data[\"default_example\"][\"input\"][\"prompt\"]"
66
+ ],
67
+ "metadata": {
68
+ "id": "1SNPPvVVUk5T"
69
+ },
70
+ "execution_count": null,
71
+ "outputs": []
72
+ },
73
+ {
74
+ "cell_type": "code",
75
+ "source": [
76
+ "!skopeo inspect docker://r8.im/lucataco/sdxl-panoramic@sha256:76acc4075d0633dcb3823c1fed0419de21d42001b65c816c7b5b9beff30ec8cd"
77
+ ],
78
+ "metadata": {
79
+ "id": "_EYVnbuUV5yd"
80
+ },
81
+ "execution_count": null,
82
+ "outputs": []
83
+ },
84
+ {
85
+ "cell_type": "code",
86
+ "source": [
87
+ "!sh data.sh"
88
+ ],
89
+ "metadata": {
90
+ "id": "1wv2AxI2eVp9"
91
+ },
92
+ "execution_count": null,
93
+ "outputs": []
94
+ },
95
+ {
96
+ "cell_type": "code",
97
+ "source": [
98
+ "#@markdown Grab the trained LoRA and unTAR to a folder\n",
99
+ "cmd = f'skopeo inspect docker://r8.im/{replicate_model}@sha256:{model_latest_version} --config | jq -r \\'.config.Env[] | select(startswith(\"COG_WEIGHTS=\"))\\' | awk -F= \\'{{print $2}}\\''\n",
100
+ "print(cmd)\n",
101
+ "url = !{cmd}\n",
102
+ "print(url)\n",
103
+ "url = url[0]\n",
104
+ "tar_name = url.split(\"/\")[-1]\n",
105
+ "folder_name = \"lora_folder\" #@param {type:\"string\"}\n",
106
+ "!mkdir {folder_name}\n",
107
+ "!wget {url}\n",
108
+ "!tar -xvf {tar_name} -C {folder_name}"
109
+ ],
110
+ "metadata": {
111
+ "id": "cdtnTm0GPLFH"
112
+ },
113
+ "execution_count": null,
114
+ "outputs": []
115
+ },
116
+ {
117
+ "cell_type": "code",
118
+ "source": [
119
+ "#@markdown Login with Hugging Face Hub (pick a `write` token)\n",
120
+ "from huggingface_hub import notebook_login, upload_folder, create_repo\n",
121
+ "notebook_login()"
122
+ ],
123
+ "metadata": {
124
+ "id": "eV6ApIY6dU4K"
125
+ },
126
+ "execution_count": null,
127
+ "outputs": []
128
+ },
129
+ {
130
+ "cell_type": "code",
131
+ "source": [
132
+ "#@markdown Insert the `hf_repo` you would like to upload this model to. It has to be either the you logged in above from (e.g.: `fofr`, `zeke`, `nateraw`) or an organization you are part of (e.g.: `replicate`)\n",
133
+ "hf_repo = \"multimodalart\" #@param {type: \"string\"}"
134
+ ],
135
+ "metadata": {
136
+ "id": "5LCbCbjZdnZZ"
137
+ },
138
+ "execution_count": null,
139
+ "outputs": []
140
+ },
141
+ {
142
+ "cell_type": "code",
143
+ "source": [
144
+ "#@markdown Create HF model repo `hf_repo/lora_name`\n",
145
+ "hf_model_slug = f\"{hf_repo}/{lora_name}\"\n",
146
+ "create_repo(hf_model_slug, repo_type=\"model\")"
147
+ ],
148
+ "metadata": {
149
+ "id": "ZVWoAy1U2cis"
150
+ },
151
+ "execution_count": null,
152
+ "outputs": []
153
+ },
154
+ {
155
+ "cell_type": "code",
156
+ "source": [
157
+ "#@markdown Set up the `README.md` for the HF model repo.\n",
158
+ "\n",
159
+ "#Replaces the nicename token with the\n",
160
+ "#model's token as specified in the `special_params.json`\n",
161
+ "replaced_prompt = default_prompt\n",
162
+ "activation_triggers = []\n",
163
+ "with open(f'{folder_name}/special_params.json', 'r') as f:\n",
164
+ " token_data = json.load(f)\n",
165
+ "for key, value in token_data.items():\n",
166
+ " replaced_prompt = replaced_prompt.replace(key, value)\n",
167
+ " activation_triggers.append(value)\n",
168
+ "comma_activation_triggers = ', '.join(map(str, activation_triggers))\n",
169
+ "README_TEXT = f'''---\n",
170
+ "license: creativeml-openrail-m\n",
171
+ "tags:\n",
172
+ " - text-to-image\n",
173
+ " - stable-diffusion\n",
174
+ " - lora\n",
175
+ " - diffusers\n",
176
+ " - pivotal-tuning\n",
177
+ "base_model: stabilityai/stable-diffusion-xl-base-1.0\n",
178
+ "pivotal_tuning: true\n",
179
+ "textual_embeddings: embeddings.pti\n",
180
+ "instance_prompt: {comma_activation_triggers}\n",
181
+ "inference: true\n",
182
+ "---\n",
183
+ "# {lora_name} LoRA by [{lora_author}](https://replicate.com/{lora_author})\n",
184
+ "### {lora_description}\n",
185
+ "\n",
186
+ "![lora_image]({lora_image})\n",
187
+ ">\n",
188
+ "\n",
189
+ "## Inference with Replicate API\n",
190
+ "Grab your replicate token [here](https://replicate.com/account)\n",
191
+ "```bash\n",
192
+ "pip install replicate\n",
193
+ "export REPLICATE_API_TOKEN=r8_*************************************\n",
194
+ "```\n",
195
+ "\n",
196
+ "```py\n",
197
+ "import replicate\n",
198
+ "\n",
199
+ "output = replicate.run(\n",
200
+ " \"{lora_docker_image}\",\n",
201
+ " input={{\"prompt\": \"{default_prompt}\"}}\n",
202
+ ")\n",
203
+ "print(output)\n",
204
+ "```\n",
205
+ "You may also do inference via the API with Node.js or curl, and locally with COG and Docker, [check out the Replicate API page for this model]({lora_url}/api)\n",
206
+ "\n",
207
+ "## Inference with 🧨 diffusers\n",
208
+ "Replicate SDXL LoRAs are trained with Pivotal Tuning, which combines training a concept via Dreambooth LoRA with training a new token with Textual Inversion.\n",
209
+ "As `diffusers` doesn't yet support textual inversion for SDXL, we will use cog-sdxl `TokenEmbeddingsHandler` class.\n",
210
+ "\n",
211
+ "The trigger tokens for your prompt will be `{comma_activation_triggers}`\n",
212
+ "\n",
213
+ "```shell\n",
214
+ "pip install diffusers transformers accelerate safetensors huggingface_hub\n",
215
+ "git clone https://github.com/replicate/cog-sdxl cog_sdxl\n",
216
+ "```\n",
217
+ "\n",
218
+ "```py\n",
219
+ "import torch\n",
220
+ "from huggingface_hub import hf_hub_download\n",
221
+ "from diffusers import DiffusionPipeline\n",
222
+ "from safetensors.torch import load_file\n",
223
+ "from diffusers.models import AutoencoderKL\n",
224
+ "\n",
225
+ "pipe = DiffusionPipeline.from_pretrained(\n",
226
+ " \"stabilityai/stable-diffusion-xl-base-1.0\",\n",
227
+ " torch_dtype=torch.float16,\n",
228
+ " variant=\"fp16\",\n",
229
+ ").to(\"cuda\")\n",
230
+ "\n",
231
+ "pipe.load_lora_weights(\"{hf_model_slug}\", weight_name=\"lora.safetensors\")\n",
232
+ "\n",
233
+ "embedding_path = hf_hub_download(repo_id=\"{hf_model_slug}\", filename=\"embeddings.pti\", repo_type=\"model\")\n",
234
+ "\n",
235
+ "state_dict = load_file(embedding_path)\n",
236
+ "\n",
237
+ "pipe.load_textual_inversion(state_dict[\"text_encoders_0\"], token=[\"<s0>\", \"<s1>\"], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer)\n",
238
+ "pipe.load_textual_inversion(state_dict[\"text_encoders_1\"], token=[\"<s0>\", \"<s1>\"], text_encoder=pipeline.text_encoder_2, tokenizer=pipeline.tokenizer_2)\n",
239
+ "\n",
240
+ "prompt=\"{replaced_prompt}\"\n",
241
+ "images = pipe(\n",
242
+ " prompt,\n",
243
+ " cross_attention_kwargs={{\"scale\": 0.8}},\n",
244
+ ").images\n",
245
+ "#your output image\n",
246
+ "images[0]\n",
247
+ "```\n",
248
+ "'''\n",
249
+ "\n",
250
+ "with open(f'{folder_name}/README.md', 'w') as f:\n",
251
+ " f.write(README_TEXT)"
252
+ ],
253
+ "metadata": {
254
+ "id": "tEaGfGz0RRMK"
255
+ },
256
+ "execution_count": null,
257
+ "outputs": []
258
+ },
259
+ {
260
+ "cell_type": "code",
261
+ "source": [
262
+ "#@markdown Upload the repo to HF!\n",
263
+ "upload_folder(\n",
264
+ " folder_path=folder_name,\n",
265
+ " repo_id=hf_model_slug,\n",
266
+ " repo_type=\"model\"\n",
267
+ ")"
268
+ ],
269
+ "metadata": {
270
+ "id": "_8MGlxgBgKyT"
271
+ },
272
+ "execution_count": null,
273
+ "outputs": []
274
+ }
275
+ ]
276
+ }