manbeast3b commited on
Commit ·
7a0ec59
1
Parent(s): 84fa3a0
add
Browse files- RobertML.png +0 -3
- loss_params.pth +0 -3
- src/main.py +1 -1
- src/pipeline.py +4 -3
RobertML.png
DELETED
Git LFS Details
|
loss_params.pth
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:b0ee6fa5873dbc8df9daeeb105e220266bcf6634c6806b69da38fdc0a5c12b81
|
| 3 |
-
size 3184
|
|
|
|
|
|
|
|
|
|
|
|
src/main.py
CHANGED
|
@@ -23,7 +23,7 @@ def main():
|
|
| 23 |
atexit.register(at_exit)
|
| 24 |
|
| 25 |
print(f"Loading pipeline")
|
| 26 |
-
pipeline =
|
| 27 |
|
| 28 |
print(f"Pipeline loaded, creating socket at '{SOCKET}'")
|
| 29 |
|
|
|
|
| 23 |
atexit.register(at_exit)
|
| 24 |
|
| 25 |
print(f"Loading pipeline")
|
| 26 |
+
pipeline = load_pipeline()
|
| 27 |
|
| 28 |
print(f"Pipeline loaded, creating socket at '{SOCKET}'")
|
| 29 |
|
src/pipeline.py
CHANGED
|
@@ -95,10 +95,11 @@ def load_pipeline() -> Pipeline:
|
|
| 95 |
|
| 96 |
return pipeline
|
| 97 |
|
| 98 |
-
_inference_count = 0
|
| 99 |
|
| 100 |
@torch.inference_mode()
|
| 101 |
def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
|
|
|
|
| 102 |
global _inference_count
|
| 103 |
|
| 104 |
# Clear on first inference
|
|
@@ -110,8 +111,8 @@ def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
|
|
| 110 |
if _inference_count >= 4:
|
| 111 |
empty_cache()
|
| 112 |
_inference_count = 0
|
| 113 |
-
|
| 114 |
-
torch.cuda.reset_peak_memory_stats()
|
| 115 |
generator = Generator("cuda").manual_seed(request.seed)
|
| 116 |
try:
|
| 117 |
return pipeline(
|
|
|
|
| 95 |
|
| 96 |
return pipeline
|
| 97 |
|
| 98 |
+
#_inference_count = 0
|
| 99 |
|
| 100 |
@torch.inference_mode()
|
| 101 |
def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
|
| 102 |
+
'''
|
| 103 |
global _inference_count
|
| 104 |
|
| 105 |
# Clear on first inference
|
|
|
|
| 111 |
if _inference_count >= 4:
|
| 112 |
empty_cache()
|
| 113 |
_inference_count = 0
|
| 114 |
+
'''
|
| 115 |
+
#torch.cuda.reset_peak_memory_stats()
|
| 116 |
generator = Generator("cuda").manual_seed(request.seed)
|
| 117 |
try:
|
| 118 |
return pipeline(
|