LiKenun commited on
Commit
979bbdf
·
1 Parent(s): b4d819b

Initial gallery

Browse files
Files changed (4) hide show
  1. .cursorignore +2 -0
  2. .gitignore +24 -0
  3. app.py +105 -10
  4. requirements.txt +6 -0
.cursorignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Environment variables
2
+ .env
.gitignore ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ .venv/
8
+ venv/
9
+ ENV/
10
+ env/
11
+
12
+ # Environment variables
13
+ .env
14
+
15
+ # IDE
16
+ .vscode/
17
+ .idea/
18
+ *.swp
19
+ *.swo
20
+ *~
21
+
22
+ # OS
23
+ .DS_Store
24
+ Thumbs.db
app.py CHANGED
@@ -1,14 +1,109 @@
 
1
  import gradio as gr
2
- import spaces
3
- import torch
 
 
 
 
 
 
4
 
5
- zero = torch.Tensor([0]).cuda()
6
- print(zero.device) # <-- 'cpu' 🤔
7
 
8
- @spaces.GPU
9
- def greet(n):
10
- print(zero.device) # <-- 'cuda:0' 🤗
11
- return f"Hello {zero + n} Tensor"
12
 
13
- demo = gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text())
14
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
  import gradio as gr
3
+ from huggingface_hub import InferenceClient
4
+ from io import BytesIO
5
+ from os import path, unlink
6
+ import pandas as pd
7
+ from pandas import DataFrame
8
+ from PIL.Image import Image, open as open_image
9
+ import requests
10
+ import tempfile
11
 
12
+ REQUEST_TIMEOUT = 45
 
13
 
14
+ TEXT_TO_IMAGE_MODEL = "black-forest-labs/FLUX.1-dev"
15
+ IMAGE_CLASSIFICATION_MODEL = "prithivMLmods/Trash-Net"
 
 
16
 
17
+ # Load environment variables from .env file
18
+ load_dotenv()
19
+
20
+ client = InferenceClient()
21
+
22
+ def save_image_to_temp_file(image: Image) -> str:
23
+ image_format = image.format if image.format else 'PNG'
24
+ format_extension = image_format.lower() if image_format else 'png'
25
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=f".{format_extension}")
26
+ temp_path = temp_file.name
27
+ temp_file.close()
28
+ image.save(temp_path, format=image_format)
29
+ return temp_path
30
+
31
+ def text_to_image(prompt: str) -> Image:
32
+ return client.text_to_image(prompt, model=TEXT_TO_IMAGE_MODEL)
33
+
34
+ def image_classification(image_url: str | None, uploaded_image: Image | None) -> tuple[Image | None, DataFrame]:
35
+ temp_file_path = None
36
+ try:
37
+ if uploaded_image is not None and image_url and image_url.strip():
38
+ raise gr.Error("Both an image URL and an uploaded image were provided. Please provide only one or the other.")
39
+ elif uploaded_image is not None:
40
+ temp_file_path = save_image_to_temp_file(uploaded_image)
41
+ classifications = client.image_classification(temp_file_path, model=IMAGE_CLASSIFICATION_MODEL)
42
+ image = None
43
+ elif image_url and image_url.strip():
44
+ try:
45
+ response = requests.get(image_url, timeout=REQUEST_TIMEOUT)
46
+ response.raise_for_status()
47
+ image = open_image(BytesIO(response.content))
48
+ temp_file_path = save_image_to_temp_file(image)
49
+ classifications = client.image_classification(temp_file_path, model=IMAGE_CLASSIFICATION_MODEL)
50
+ except Exception as e:
51
+ raise gr.Error(f"Failed to fetch image from URL: {str(e)}")
52
+ else:
53
+ raise gr.Error("Please either provide an image URL or upload an image.")
54
+ df = pd.DataFrame([
55
+ {"Label": classification.label, "Probability": f"{classification.score:.2%}"}
56
+ for classification in classifications
57
+ ])
58
+ return image, df
59
+ finally:
60
+ # Clean up temporary file.
61
+ if temp_file_path and path.exists(temp_file_path):
62
+ try:
63
+ unlink(temp_file_path)
64
+ except Exception:
65
+ pass # Ignore clean-up errors.
66
+
67
+ with gr.Blocks(title="AI Building Blocks") as demo:
68
+ gr.Markdown("# AI Building Blocks")
69
+ gr.Markdown("A gallery of building blocks for building AI applications")
70
+ with gr.Tabs():
71
+ with gr.Tab("Text-to-image Generation"):
72
+ gr.Markdown("Generate an image from a text prompt.")
73
+ text_to_image_prompt = gr.Textbox(label="Prompt", value="A panda under a giant mushroom next to a pumpkin")
74
+ text_to_image_generate_button = gr.Button("Generate")
75
+ text_to_image_output = gr.Image(label="Image", type="pil")
76
+ text_to_image_generate_button.click(
77
+ fn=text_to_image,
78
+ inputs=text_to_image_prompt,
79
+ outputs=text_to_image_output
80
+ )
81
+ with gr.Tab("Image Classification"):
82
+ gr.Markdown("Classify a recyclable item as one of: cardboard, glass, metal, paper, plastic, or other using [Trash-Net](https://huggingface.co/prithivMLmods/Trash-Net).")
83
+ with gr.Row():
84
+ with gr.Column():
85
+ image_classification_url_input = gr.Textbox(
86
+ label="Image URL",
87
+ value="https://campuslifeservices.ucsf.edu/upload/facilities/galleries/cardboard_0.jpg",
88
+ placeholder="Enter the URL of the image to classify",
89
+ scale=2
90
+ )
91
+ image_classification_image_preview = gr.Image(label="Image Preview", type="pil")
92
+ image_classification_upload_input = gr.Image(
93
+ label="Or Upload Image",
94
+ type="pil",
95
+ scale=2
96
+ )
97
+ image_classification_button = gr.Button("Classify")
98
+ image_classification_output = gr.Dataframe(
99
+ label="Classification Results",
100
+ headers=["Label", "Probability"],
101
+ interactive=False
102
+ )
103
+ image_classification_button.click(
104
+ fn=image_classification,
105
+ inputs=[image_classification_url_input, image_classification_upload_input],
106
+ outputs=[image_classification_image_preview, image_classification_output]
107
+ )
108
+
109
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio>=5.49.1
2
+ huggingface-hub>=1.0.1
3
+ python-dotenv>=1.0.0
4
+ pandas>=2.0.0
5
+ pillow>=10.0.0
6
+ requests>=2.31.0