ZhouZJ36DL commited on
Commit
a6f7d55
·
1 Parent(s): c28416b

new file: .gitignore

Browse files
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.pyc
4
+ heatmap/
5
+ src/gradio_utils/gradio_outputs/
app_test.py DELETED
@@ -1,37 +0,0 @@
1
- import gradio as gr
2
-
3
- def calculator(num1, operation, num2):
4
- if operation == "add":
5
- return num1 + num2
6
- elif operation == "subtract":
7
- return num1 - num2
8
- elif operation == "multiply":
9
- return num1 * num2
10
- elif operation == "divide":
11
- return num1 / num2
12
-
13
- with gr.Blocks() as demo:
14
- with gr.Row():
15
- with gr.Column():
16
- num_1 = gr.Number(value=4)
17
- operation = gr.Radio(["add", "subtract", "multiply", "divide"])
18
- num_2 = gr.Number(value=0)
19
- submit_btn = gr.Button(value="Calculate")
20
- with gr.Column():
21
- result = gr.Number()
22
-
23
- submit_btn.click(
24
- calculator, inputs=[num_1, operation, num_2], outputs=[result], api_name=False
25
- )
26
- examples = gr.Examples(
27
- examples=[
28
- [5, "add", 3],
29
- [4, "divide", 2],
30
- [-4, "multiply", 2.5],
31
- [0, "subtract", 1.2],
32
- ],
33
- inputs=[num_1, operation, num_2],
34
- )
35
-
36
- if __name__ == "__main__":
37
- demo.launch(show_api=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/flux/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/__init__.cpython-310.pyc and b/src/flux/__pycache__/__init__.cpython-310.pyc differ
 
src/flux/__pycache__/_version.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/_version.cpython-310.pyc and b/src/flux/__pycache__/_version.cpython-310.pyc differ
 
src/flux/__pycache__/math.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/math.cpython-310.pyc and b/src/flux/__pycache__/math.cpython-310.pyc differ
 
src/flux/__pycache__/model.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/model.cpython-310.pyc and b/src/flux/__pycache__/model.cpython-310.pyc differ
 
src/flux/__pycache__/sampling.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/sampling.cpython-310.pyc and b/src/flux/__pycache__/sampling.cpython-310.pyc differ
 
src/flux/__pycache__/util.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/util.cpython-310.pyc and b/src/flux/__pycache__/util.cpython-310.pyc differ
 
src/flux/modules/__pycache__/autoencoder.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc and b/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc differ
 
src/flux/modules/__pycache__/conditioner.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/conditioner.cpython-310.pyc and b/src/flux/modules/__pycache__/conditioner.cpython-310.pyc differ
 
src/flux/modules/__pycache__/layers.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/layers.cpython-310.pyc and b/src/flux/modules/__pycache__/layers.cpython-310.pyc differ
 
src/flux/modules/conditioner.py CHANGED
@@ -4,6 +4,7 @@ from transformers import (CLIPTextModel, CLIPTokenizer, T5EncoderModel,
4
  import os
5
  import torch
6
 
 
7
  class HFEmbedder(nn.Module):
8
  def __init__(self, version: str, max_length: int, is_clip, **hf_kwargs):
9
  super().__init__()
@@ -133,3 +134,38 @@ class HFEmbedder(nn.Module):
133
  raise # Re-raise the error after logging
134
 
135
  return outputs[self.output_key]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import os
5
  import torch
6
 
7
+ '''
8
  class HFEmbedder(nn.Module):
9
  def __init__(self, version: str, max_length: int, is_clip, **hf_kwargs):
10
  super().__init__()
 
134
  raise # Re-raise the error after logging
135
 
136
  return outputs[self.output_key]
137
+ '''
138
+ class HFEmbedder(nn.Module):
139
+ def __init__(self, version: str, max_length: int, is_clip, **hf_kwargs):
140
+ super().__init__()
141
+ self.is_clip = is_clip
142
+ self.max_length = max_length
143
+ self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"
144
+
145
+ if self.is_clip:
146
+ self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length)
147
+ self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs)
148
+ else:
149
+ self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version, max_length=max_length)
150
+ #self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained("black-forest-labs/FLUX.1-dev/tokenizer_2", max_length=max_length)
151
+ self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version, **hf_kwargs)
152
+
153
+ self.hf_module = self.hf_module.eval().requires_grad_(False)
154
+
155
+ def forward(self, text: list[str]) -> Tensor:
156
+ batch_encoding = self.tokenizer(
157
+ text,
158
+ truncation=True,
159
+ max_length=self.max_length,
160
+ return_length=False,
161
+ return_overflowing_tokens=False,
162
+ padding="max_length",
163
+ return_tensors="pt",
164
+ )
165
+
166
+ outputs = self.hf_module(
167
+ input_ids=batch_encoding["input_ids"].to(self.hf_module.device),
168
+ attention_mask=None,
169
+ output_hidden_states=False,
170
+ )
171
+ return outputs[self.output_key]