niplinig commited on
Commit
112d8f5
·
verified ·
1 Parent(s): a1c44e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +135 -133
app.py CHANGED
@@ -1,146 +1,148 @@
 
 
1
  import gradio as gr
2
- import numpy as np
3
- import random
4
- from diffusers import DiffusionPipeline
5
- import torch
6
 
7
- device = "cuda" if torch.cuda.is_available() else "cpu"
8
 
9
- if torch.cuda.is_available():
10
- torch.cuda.max_memory_allocated(device=device)
11
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
- pipe.enable_xformers_memory_efficient_attention()
13
- pipe = pipe.to(device)
14
- else:
15
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
16
- pipe = pipe.to(device)
 
 
 
 
 
17
 
18
- MAX_SEED = np.iinfo(np.int32).max
19
- MAX_IMAGE_SIZE = 1024
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
 
 
 
 
 
 
 
 
 
 
22
 
23
- if randomize_seed:
24
- seed = random.randint(0, MAX_SEED)
25
-
26
- generator = torch.Generator().manual_seed(seed)
27
-
28
- image = pipe(
29
- prompt = prompt,
30
- negative_prompt = negative_prompt,
31
- guidance_scale = guidance_scale,
32
- num_inference_steps = num_inference_steps,
33
- width = width,
34
- height = height,
35
- generator = generator
36
- ).images[0]
37
-
38
- return image
39
 
40
- examples = [
41
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
42
- "An astronaut riding a green horse",
43
- "A delicious ceviche cheesecake slice",
44
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
- css="""
47
- #col-container {
48
- margin: 0 auto;
49
- max-width: 520px;
50
- }
51
- """
 
 
 
 
 
 
 
 
52
 
53
- if torch.cuda.is_available():
54
- power_device = "GPU"
55
- else:
56
- power_device = "CPU"
57
 
58
- with gr.Blocks(css=css) as demo:
59
-
60
- with gr.Column(elem_id="col-container"):
61
- gr.Markdown(f"""
62
- # Text-to-Image Gradio Template
63
- Currently running on {power_device}.
64
- """)
65
-
66
- with gr.Row():
67
-
68
- prompt = gr.Text(
69
- label="Prompt",
70
- show_label=False,
71
- max_lines=1,
72
- placeholder="Enter your prompt",
73
- container=False,
74
- )
75
-
76
- run_button = gr.Button("Run", scale=0)
77
-
78
- result = gr.Image(label="Result", show_label=False)
79
 
80
- with gr.Accordion("Advanced Settings", open=False):
81
-
82
- negative_prompt = gr.Text(
83
- label="Negative prompt",
84
- max_lines=1,
85
- placeholder="Enter a negative prompt",
86
- visible=False,
87
- )
88
-
89
- seed = gr.Slider(
90
- label="Seed",
91
- minimum=0,
92
- maximum=MAX_SEED,
93
- step=1,
94
- value=0,
95
- )
96
-
97
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
98
-
99
- with gr.Row():
100
-
101
- width = gr.Slider(
102
- label="Width",
103
- minimum=256,
104
- maximum=MAX_IMAGE_SIZE,
105
- step=32,
106
- value=512,
107
- )
108
-
109
- height = gr.Slider(
110
- label="Height",
111
- minimum=256,
112
- maximum=MAX_IMAGE_SIZE,
113
- step=32,
114
- value=512,
115
- )
116
-
117
- with gr.Row():
118
-
119
- guidance_scale = gr.Slider(
120
- label="Guidance scale",
121
- minimum=0.0,
122
- maximum=10.0,
123
- step=0.1,
124
- value=0.0,
125
- )
126
-
127
- num_inference_steps = gr.Slider(
128
- label="Number of inference steps",
129
- minimum=1,
130
- maximum=12,
131
- step=1,
132
- value=2,
133
- )
134
-
135
- gr.Examples(
136
- examples = examples,
137
- inputs = [prompt]
138
- )
139
 
140
- run_button.click(
141
- fn = infer,
142
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
143
- outputs = [result]
144
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
145
 
146
- demo.queue().launch()
 
 
 
 
1
+ import sys
2
+ from datetime import date
3
  import gradio as gr
4
+ import pandas as pd
5
+ from pickle import load
6
+ from radiomics import featureextractor
 
7
 
8
+ extractor3D = featureextractor.RadiomicsFeatureExtractor("3DParams.yaml")
9
 
10
+ with open("model.pickle", "rb") as file:
11
+ loaded_model = load(file)
12
+
13
+ class TextStream:
14
+ def __init__(self):
15
+ self.data : list = []
16
+
17
+ def write(self, s):
18
+ if s.strip():
19
+ self.data.append(s.strip())
20
+
21
+ def flush(self):
22
+ pass
23
 
24
+ def image_classifier(image, segment):
25
+ features3D = extractor3D.execute(imageFilepath=image, maskFilepath=segment)
26
+ keys = [key for key in features3D.keys()]
27
+ values = [value for value in features3D.values()]
28
+ sortedLists = sorted(list(zip(keys, values)), key=lambda x: x[0])
29
+ sortedKeys, sortedValues = zip(*sortedLists)
30
+ original_stdout = sys.stdout
31
+ text_stream = TextStream()
32
+ sys.stdout = text_stream
33
+ print(*sortedValues, sep="\n")
34
+ sys.stdout = original_stdout
35
+ sortedValues = text_stream.data[4:7] + text_stream.data[15:17] + text_stream.data[22:]
36
+ dataframe = pd.DataFrame(
37
+ data=sortedValues,
38
+ )
39
+ dataframe = dataframe.transpose()
40
+ prediction = loaded_model.predict_proba(dataframe).tolist()[0]
41
+ return {"Grade 1": prediction[0], "Grade 2": prediction[1]}
42
 
43
+ def logging(image, label_output):
44
+ dataframe = pd.DataFrame(data={
45
+ "Imagen": image,
46
+ "Grado 1": label_output.values()[0],
47
+ "Grado 2": label_output.values()[1],
48
+ "Observación": "",
49
+ "Fecha": date.today(),
50
+ "Acción": f"[Descarga]({image})"
51
+ })
52
+ dataframe.to_csv(path_or_buf="flagged/log.csv", sep=";")
53
+ print(dataframe)
54
 
55
+ # Logger = gr.SimpleCSVLogger()
56
+
57
+ with gr.Blocks(title="Historial de diagnósticos") as ViewingHistory:
58
+ gr.Dataframe(
59
+ headers=["Imagen", "Grado 1", "Grado 2", "Observación", "Fecha", "Acción"],
60
+ datatype=["str", "number", "number", "str", "date", "markdown"],
61
+ row_count=(3, "dynamic"),
62
+ col_count=(6, "dynamic"),
63
+ type="pandas",
64
+ wrap=True
65
+ )
 
 
 
 
 
66
 
67
+ with gr.Blocks(title="Base de datos") as Database:
68
+ with gr.Row():
69
+ with gr.Column():
70
+ gr.Dropdown(
71
+ choices=["Usuarios", "Imágenes", "Resultados"],
72
+ filterable=True, label="Tabla",
73
+ scale=2
74
+ )
75
+ with gr.Row():
76
+ gr.Dataframe(
77
+ headers=["Imagen", "Grado 1", "Grado 2", "Observación", "Fecha"],
78
+ datatype=["str", "number", "number", "str", "date"],
79
+ row_count=(3, "dynamic"),
80
+ col_count=(5, "dynamic"),
81
+ type="pandas",
82
+ wrap=True,
83
+ interactive=False
84
+ )
85
 
86
+ with gr.Blocks(title="Información de usuario") as AdminInformation:
87
+ with gr.Row():
88
+ with gr.Column():
89
+ gr.Image(interactive=True)
90
+ with gr.Column():
91
+ gr.Textbox(value="Nicolás Andrés", label="Nombres", interactive=True, show_copy_button=True, type="text", max_lines=1, container=False)
92
+ gr.Textbox(value="niplinig", label="Usuario", interactive=False, show_copy_button=True, type="text", max_lines=1, container=False)
93
+ gr.Textbox(value="Administrador", label="Rol", interactive=False, show_copy_button=True, type="text", max_lines=1, container=False)
94
+ with gr.Column():
95
+ gr.Textbox(value="Plaza Iñiguez", label="Apellidos", interactive=True, show_copy_button=True, type="text", max_lines=1, container=False)
96
+ gr.Textbox(value="niplinig@espol.edu.ec", label="Correo electrónico", interactive=True, show_copy_button=True, type="email", max_lines=1, container=False)
97
+ gr.Textbox(value="0939552946", label="Número de teléfono", interactive=True, show_copy_button=True, type="text", max_lines=1, container=False)
98
+ with gr.Row():
99
+ gr.Button(value="Guardar")
100
 
101
+ # def on_select(event : gr.SelectData):
102
+ # print(event.value, event.index, event.target, sep=",")
103
+ # return f"You selected {event.value} at {event.index} from {event.target}"
 
104
 
105
+ # image_file.select(fn=on_select, inputs=None, outputs=None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
+ with gr.Blocks(title="Clasificación") as MyModel:
108
+ with gr.Row():
109
+ with gr.Column():
110
+ image_file = gr.File(file_count="single", file_types=[".nii.gz", ".nii"], type="filepath", label="Imagen")
111
+ segment_file = gr.File(file_count="single", file_types=[".nii.gz", ".nii"], type="filepath", label="Segmento")
112
+ with gr.Column():
113
+ label_output = gr.Label(label="Resultado")
114
+ with gr.Row():
115
+ with gr.Column():
116
+ with gr.Row():
117
+ with gr.Column():
118
+ clear_button = gr.ClearButton(value="Borrar", components=[image_file, segment_file, label_output])
119
+ with gr.Column():
120
+ submit_button = gr.Button(value="Enviar", variant="primary")
121
+ with gr.Column():
122
+ flag_button = gr.Button(value="Marcar")
123
+ flag_button.click(fn=logging, inputs=[image_file, label_output])
124
+ submit_button.click(fn=image_classifier, inputs=[image_file, segment_file], outputs=[label_output])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
+ # MainModel = gr.Interface(
127
+ # fn=image_classifier,
128
+ # inputs=[image_file, segment_file],
129
+ # outputs=[gr.Label(label="Resultado")],
130
+ # title="Clasificación",
131
+ # description="Clasificación",
132
+ # allow_flagging="manual",
133
+ # flagging_callback=Logger,
134
+ # submit_btn="Enviar",
135
+ # stop_btn="Suspender",
136
+ # clear_btn="Borrar",
137
+ # show_progress="full",
138
+ # )
139
+
140
+ demo = gr.TabbedInterface(
141
+ interface_list=[MyModel, ViewingHistory, Database, AdminInformation],
142
+ tab_names=["Aplicación", "Historial", "Base de datos", "Administrador"],
143
+ )
144
 
145
+ demo.launch(
146
+ share=True,
147
+ debug=True
148
+ )