Upload fusion_t2i_CLIP_interrogator.ipynb
Browse files
Google Colab Jupyter Notebooks/fusion_t2i_CLIP_interrogator.ipynb
CHANGED
|
@@ -132,7 +132,9 @@
|
|
| 132 |
"log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
| 133 |
"prompt_strength = math.pow(10 ,log_strength-1)\n",
|
| 134 |
"reference = torch.zeros(768)\n",
|
| 135 |
-
"
|
|
|
|
|
|
|
| 136 |
"references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n",
|
| 137 |
"reference = torch.add(reference, C * references[index][0].dequantize())\n",
|
| 138 |
"reference = torch.add(reference, (1-C) * references[index][1].dequantize())\n",
|
|
@@ -214,7 +216,7 @@
|
|
| 214 |
" for _index in range(list_size):\n",
|
| 215 |
" output = output + prompts[f'{indices[min(_index+start_at_index,NUM_VOCAB_ITEMS-1)].item()}'] + '|'\n",
|
| 216 |
" #---------#\n",
|
| 217 |
-
" output = (output + '}').replace('|}' , '}
|
| 218 |
" for iter in range(N):\n",
|
| 219 |
" print(output)\n",
|
| 220 |
"#-------#\n",
|
|
|
|
| 132 |
"log_strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.01}\n",
|
| 133 |
"prompt_strength = math.pow(10 ,log_strength-1)\n",
|
| 134 |
"reference = torch.zeros(768)\n",
|
| 135 |
+
"\n",
|
| 136 |
+
"\n",
|
| 137 |
+
"\n",
|
| 138 |
"references = torch.load('reference_text_and_image_encodings.pt' , weights_only=False)\n",
|
| 139 |
"reference = torch.add(reference, C * references[index][0].dequantize())\n",
|
| 140 |
"reference = torch.add(reference, (1-C) * references[index][1].dequantize())\n",
|
|
|
|
| 216 |
" for _index in range(list_size):\n",
|
| 217 |
" output = output + prompts[f'{indices[min(_index+start_at_index,NUM_VOCAB_ITEMS-1)].item()}'] + '|'\n",
|
| 218 |
" #---------#\n",
|
| 219 |
+
" output = (output + '}').replace('|}' , '} ')\n",
|
| 220 |
" for iter in range(N):\n",
|
| 221 |
" print(output)\n",
|
| 222 |
"#-------#\n",
|