ryfluk01 commited on
Commit
379a946
·
verified ·
1 Parent(s): 711743f

Upload 310 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +20 -0
  2. .history/app_20240520154754.py +42 -0
  3. .history/app_20240520174348.py +42 -0
  4. .history/app_20240520175521.py +42 -0
  5. .history/app_20240520175656.py +42 -0
  6. .history/app_20240521123743.py +42 -0
  7. .ipynb_checkpoints/main-checkpoint.ipynb +203 -0
  8. app.py +42 -0
  9. best_model.keras +3 -0
  10. main.ipynb +246 -0
  11. pokemon/Bulbasaur/00000000.png +0 -0
  12. pokemon/Bulbasaur/00000002.PNG +0 -0
  13. pokemon/Bulbasaur/00000003.png +0 -0
  14. pokemon/Bulbasaur/00000004.png +0 -0
  15. pokemon/Bulbasaur/00000005.png +0 -0
  16. pokemon/Bulbasaur/00000006.jpg +0 -0
  17. pokemon/Bulbasaur/00000006.png +0 -0
  18. pokemon/Bulbasaur/00000007.jpg +0 -0
  19. pokemon/Bulbasaur/00000007.png +0 -0
  20. pokemon/Bulbasaur/00000008.png +0 -0
  21. pokemon/Bulbasaur/00000009.png +0 -0
  22. pokemon/Bulbasaur/00000010.png +0 -0
  23. pokemon/Bulbasaur/00000011.png +0 -0
  24. pokemon/Bulbasaur/00000012.png +0 -0
  25. pokemon/Bulbasaur/00000013.png +3 -0
  26. pokemon/Bulbasaur/00000014.jpg +0 -0
  27. pokemon/Bulbasaur/00000014.png +0 -0
  28. pokemon/Bulbasaur/00000015.jpg +0 -0
  29. pokemon/Bulbasaur/00000015.png +0 -0
  30. pokemon/Bulbasaur/00000016.png +0 -0
  31. pokemon/Bulbasaur/00000017.png +0 -0
  32. pokemon/Bulbasaur/00000018.jpg +0 -0
  33. pokemon/Bulbasaur/00000019.jpg +0 -0
  34. pokemon/Bulbasaur/00000019.png +0 -0
  35. pokemon/Bulbasaur/00000020.png +0 -0
  36. pokemon/Bulbasaur/00000021.png +0 -0
  37. pokemon/Bulbasaur/00000023.jpg +0 -0
  38. pokemon/Bulbasaur/00000024.png +0 -0
  39. pokemon/Bulbasaur/00000025.jpg +0 -0
  40. pokemon/Bulbasaur/00000027.jpg +0 -0
  41. pokemon/Bulbasaur/00000027.png +0 -0
  42. pokemon/Bulbasaur/00000028.png +0 -0
  43. pokemon/Bulbasaur/00000029.jpg +0 -0
  44. pokemon/Bulbasaur/00000030.png +0 -0
  45. pokemon/Bulbasaur/00000031.jpg +0 -0
  46. pokemon/Bulbasaur/00000031.png +0 -0
  47. pokemon/Bulbasaur/00000032.jpg +0 -0
  48. pokemon/Bulbasaur/00000032.png +0 -0
  49. pokemon/Bulbasaur/00000034.png +0 -0
  50. pokemon/Bulbasaur/00000035.jpg +0 -0
.gitattributes CHANGED
@@ -53,3 +53,23 @@ PIcture/pokemon/Rhyhorn/00000073.png filter=lfs diff=lfs merge=lfs -text
53
  PIcture/pokemon/Rhyhorn/00000094.png filter=lfs diff=lfs merge=lfs -text
54
  PIcture/pokemon/Rhyhorn/00000108.png filter=lfs diff=lfs merge=lfs -text
55
  PIcture/pokemon/Rhyhorn/00000111.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  PIcture/pokemon/Rhyhorn/00000094.png filter=lfs diff=lfs merge=lfs -text
54
  PIcture/pokemon/Rhyhorn/00000108.png filter=lfs diff=lfs merge=lfs -text
55
  PIcture/pokemon/Rhyhorn/00000111.png filter=lfs diff=lfs merge=lfs -text
56
+ best_model.keras filter=lfs diff=lfs merge=lfs -text
57
+ pokemon_classifier_model.keras filter=lfs diff=lfs merge=lfs -text
58
+ pokemon/Bulbasaur/00000013.png filter=lfs diff=lfs merge=lfs -text
59
+ pokemon/Bulbasaur/00000039.png filter=lfs diff=lfs merge=lfs -text
60
+ pokemon/Bulbasaur/00000040.jpg filter=lfs diff=lfs merge=lfs -text
61
+ pokemon/Bulbasaur/00000058.png filter=lfs diff=lfs merge=lfs -text
62
+ pokemon/Bulbasaur/00000059.jpg filter=lfs diff=lfs merge=lfs -text
63
+ pokemon/Bulbasaur/00000082.png filter=lfs diff=lfs merge=lfs -text
64
+ pokemon/Bulbasaur/00000140.png filter=lfs diff=lfs merge=lfs -text
65
+ pokemon/Bulbasaur/00000145.png filter=lfs diff=lfs merge=lfs -text
66
+ pokemon/Jigglypuff/00000014.jpg filter=lfs diff=lfs merge=lfs -text
67
+ pokemon/Jigglypuff/00000019.PNG filter=lfs diff=lfs merge=lfs -text
68
+ pokemon/Jigglypuff/00000081.jpg filter=lfs diff=lfs merge=lfs -text
69
+ pokemon/Jigglypuff/00000099.jpg filter=lfs diff=lfs merge=lfs -text
70
+ pokemon/Rhyhorn/00000012.png filter=lfs diff=lfs merge=lfs -text
71
+ pokemon/Rhyhorn/00000027.png filter=lfs diff=lfs merge=lfs -text
72
+ pokemon/Rhyhorn/00000073.png filter=lfs diff=lfs merge=lfs -text
73
+ pokemon/Rhyhorn/00000094.png filter=lfs diff=lfs merge=lfs -text
74
+ pokemon/Rhyhorn/00000108.png filter=lfs diff=lfs merge=lfs -text
75
+ pokemon/Rhyhorn/00000111.png filter=lfs diff=lfs merge=lfs -text
.history/app_20240520154754.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tensorflow as tf
3
+ from PIL import Image
4
+ import numpy as np
5
+
6
+ model_path = "pokemon_classifier_model.keras"
7
+ model = tf.keras.models.load_model(model_path)
8
+
9
+
10
+ labels = ['Pikachu', 'Sandshrew', 'Squirtle']
11
+
12
+ def predict_image(image):
13
+ # Preprocess image
14
+ image = Image.fromarray(image.astype('uint8')) # Convert numpy array to PIL image
15
+ image = image.resize((224, 224)) # Resize the image to 224x224 pixels
16
+ image = np.array(image) / 255.0 # Convert to float and normalize
17
+
18
+ # Ensure the image has 3 color channels
19
+ if image.ndim == 2: # If grayscale, convert to RGB
20
+ image = np.stack((image,)*3, axis=-1)
21
+
22
+ prediction = model.predict(image[None, ...]) # Adding batch dimension
23
+ confidences = {labels[i]: float(prediction[0][i]) for i in range(len(labels))}
24
+ return confidences
25
+
26
+
27
+ input_image = gr.Image()
28
+ output_text = gr.Textbox(label="Predicted Value")
29
+
30
+
31
+ iface = gr.Interface(
32
+ fn=predict_image,
33
+ inputs=input_image,
34
+ outputs=gr.Label(),
35
+ title="Pokémon Classifier",
36
+ examples=["images/pikachu.png", "images/squirtle.png", "images/sandshrew.png"],
37
+ description="Upload an image of Pikachu, Sandshrew, or Squirtle and the classifier will predict which one it is."
38
+ )
39
+
40
+
41
+
42
+ iface.launch()
.history/app_20240520174348.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tensorflow as tf
3
+ from PIL import Image
4
+ import numpy as np
5
+
6
+ model_path = "pokemon_classifier_model.keras"
7
+ model = tf.keras.models.load_model(model_path)
8
+
9
+
10
+ labels = ['Bulbasaur', 'Jigglypuff', 'Rhyhorn']
11
+
12
+ def predict_image(image):
13
+ # Preprocess image
14
+ image = Image.fromarray(image.astype('uint8')) # Convert numpy array to PIL image
15
+ image = image.resize((224, 224)) # Resize the image to 224x224 pixels
16
+ image = np.array(image) / 255.0 # Convert to float and normalize
17
+
18
+ # Ensure the image has 3 color channels
19
+ if image.ndim == 2: # If grayscale, convert to RGB
20
+ image = np.stack((image,)*3, axis=-1)
21
+
22
+ prediction = model.predict(image[None, ...]) # Adding batch dimension
23
+ confidences = {labels[i]: float(prediction[0][i]) for i in range(len(labels))}
24
+ return confidences
25
+
26
+
27
+ input_image = gr.Image()
28
+ output_text = gr.Textbox(label="Predicted Value")
29
+
30
+
31
+ iface = gr.Interface(
32
+ fn=predict_image,
33
+ inputs=input_image,
34
+ outputs=gr.Label(),
35
+ title="Pokémon Classifier",
36
+ examples=["images/pikachu.png", "images/squirtle.png", "images/sandshrew.png"],
37
+ description="Upload an image of Pikachu, Sandshrew, or Squirtle and the classifier will predict which one it is."
38
+ )
39
+
40
+
41
+
42
+ iface.launch()
.history/app_20240520175521.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tensorflow as tf
3
+ from PIL import Image
4
+ import numpy as np
5
+
6
+ model_path = "pokemon_classifier_model.keras"
7
+ model = tf.keras.models.load_model(model_path)
8
+
9
+
10
+ labels = ['Bulbasaur', 'Jigglypuff', 'Rhyhorn']
11
+
12
+ def predict_image(image):
13
+ # Preprocess image
14
+ image = Image.fromarray(image.astype('uint8')) # Convert numpy array to PIL image
15
+ image = image.resize((224, 224)) # Resize the image to 224x224 pixels
16
+ image = np.array(image) / 255.0 # Convert to float and normalize
17
+
18
+ # Ensure the image has 3 color channels
19
+ if image.ndim == 2: # If grayscale, convert to RGB
20
+ image = np.stack((image,)*3, axis=-1)
21
+
22
+ prediction = model.predict(image[None, ...]) # Adding batch dimension
23
+ confidences = {labels[i]: float(prediction[0][i]) for i in range(len(labels))}
24
+ return confidences
25
+
26
+
27
+ input_image = gr.Image()
28
+ output_text = gr.Textbox(label="Predicted Value")
29
+
30
+
31
+ iface = gr.Interface(
32
+ fn=predict_image,
33
+ inputs=input_image,
34
+ outputs=gr.Label(),
35
+ title="Pokémon Classifier",
36
+ examples=["images/pikachu.png", "images/squirtle.png", "images/sandshrew.png"],
37
+ description="Upload an image of Pikachu, Sandshrew, or Squirtle and the classifier will predict which one it is."
38
+ )
39
+
40
+
41
+
42
+ iface.launch()
.history/app_20240520175656.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tensorflow as tf
3
+ from PIL import Image
4
+ import numpy as np
5
+
6
+ model_path = "pokemon_classifier_model.keras"
7
+ model = tf.keras.models.load_model(model_path)
8
+
9
+
10
+ labels = ['Bulbasaur', 'Jigglypuff', 'Rhyhorn']
11
+
12
+ def predict_image(image):
13
+ # Preprocess image
14
+ image = Image.fromarray(image.astype('uint8')) # Convert numpy array to PIL image
15
+ image = image.resize((224, 224)) # Resize the image to 224x224 pixels
16
+ image = np.array(image) / 255.0 # Convert to float and normalize
17
+
18
+ # Ensure the image has 3 color channels
19
+ if image.ndim == 2: # If grayscale, convert to RGB
20
+ image = np.stack((image,)*3, axis=-1)
21
+
22
+ prediction = model.predict(image[None, ...]) # Adding batch dimension
23
+ confidences = {labels[i]: float(prediction[0][i]) for i in range(len(labels))}
24
+ return confidences
25
+
26
+
27
+ input_image = gr.Image()
28
+ output_text = gr.Textbox(label="Predicted Value")
29
+
30
+
31
+ iface = gr.Interface(
32
+ fn=predict_image,
33
+ inputs=input_image,
34
+ outputs=gr.Label(),
35
+ title="Pokémon Classifier",
36
+ examples=["images/pikachu.png", "images/squirtle.png", "images/sandshrew.png"],
37
+ description="Upload an image of Pikachu, Sandshrew, or Squirtle and the classifier will predict which one it is."
38
+ )
39
+
40
+
41
+
42
+ iface.launch()
.history/app_20240521123743.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tensorflow as tf
3
+ from PIL import Image
4
+ import numpy as np
5
+
6
+ model_path = "pokemon_classifier_model.keras"
7
+ model = tf.keras.models.load_model(model_path)
8
+
9
+
10
+ labels = ['Bulbasaur', 'Jigglypuff', 'Rhyhorn']
11
+
12
+ def predict_image(image):
13
+ # Preprocess image
14
+ image = Image.fromarray(image.astype('uint8')) # Convert numpy array to PIL image
15
+ image = image.resize((224, 224)) # Resize the image to 224x224 pixels
16
+ image = np.array(image) / 255.0 # Convert to float and normalize
17
+
18
+ # Ensure the image has 3 color channels
19
+ if image.ndim == 2: # If grayscale, convert to RGB
20
+ image = np.stack((image,)*3, axis=-1)
21
+
22
+ prediction = model.predict(image[None, ...]) # Adding batch dimension
23
+ confidences = {labels[i]: float(prediction[0][i]) for i in range(len(labels))}
24
+ return confidences
25
+
26
+
27
+ input_image = gr.Image()
28
+ output_text = gr.Textbox(label="Predicted Value")
29
+
30
+
31
+ iface = gr.Interface(
32
+ fn=predict_image,
33
+ inputs=input_image,
34
+ outputs=gr.Label(),
35
+ title="Pokémon Classifier",
36
+ examples=["pokemon/Bulbasur.png", "pokemon/Jigglypuff.png", "pokemon/Rhyhorn.png"],
37
+ description="Upload an image of Bulbasur, Jigglypuff, or Rhyhorn and the classifier will predict which one it is."
38
+ )
39
+
40
+
41
+
42
+ iface.launch()
.ipynb_checkpoints/main-checkpoint.ipynb ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 20,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stdout",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "Found 391 images belonging to 3 classes.\n",
13
+ "Found 96 images belonging to 3 classes.\n"
14
+ ]
15
+ }
16
+ ],
17
+ "source": [
18
+ "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
19
+ "from sklearn.model_selection import train_test_split\n",
20
+ "import os\n",
21
+ "\n",
22
+ "base_dir = 'dataset'\n",
23
+ "\n",
24
+ "image_size = (224, 224)\n",
25
+ "batch_size = 32\n",
26
+ "\n",
27
+ "train_datagen = ImageDataGenerator(\n",
28
+ " rescale=1./255,\n",
29
+ " rotation_range=40,\n",
30
+ " width_shift_range=0.2,\n",
31
+ " height_shift_range=0.2,\n",
32
+ " shear_range=0.2,\n",
33
+ " zoom_range=0.2,\n",
34
+ " horizontal_flip=True,\n",
35
+ " fill_mode='nearest',\n",
36
+ " validation_split=0.2 \n",
37
+ ")\n",
38
+ "\n",
39
+ "train_generator = train_datagen.flow_from_directory(\n",
40
+ " base_dir,\n",
41
+ " target_size=image_size,\n",
42
+ " batch_size=batch_size,\n",
43
+ " class_mode='categorical',\n",
44
+ " subset='training' \n",
45
+ ")\n",
46
+ "\n",
47
+ "validation_generator = train_datagen.flow_from_directory(\n",
48
+ " base_dir,\n",
49
+ " target_size=image_size,\n",
50
+ " batch_size=batch_size,\n",
51
+ " class_mode='categorical',\n",
52
+ " subset='validation' \n",
53
+ ")\n"
54
+ ]
55
+ },
56
+ {
57
+ "cell_type": "code",
58
+ "execution_count": 21,
59
+ "metadata": {},
60
+ "outputs": [],
61
+ "source": [
62
+ "from tensorflow.keras.models import Sequential\n",
63
+ "from tensorflow.keras.layers import Dense, GlobalAveragePooling2D\n",
64
+ "from tensorflow.keras.applications import ResNet50\n",
65
+ "from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\n",
66
+ "import tensorflow as tf\n",
67
+ "\n",
68
+ "base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(224, 224, 3))\n",
69
+ "\n",
70
+ "base_model.trainable = False\n",
71
+ "\n",
72
+ "model = Sequential([\n",
73
+ " base_model,\n",
74
+ " GlobalAveragePooling2D(),\n",
75
+ " Dense(512, activation='relu'),\n",
76
+ " Dense(3, activation='softmax') \n",
77
+ "])\n",
78
+ "\n",
79
+ "model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n",
80
+ "\n",
81
+ "early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)\n",
82
+ "model_checkpoint = ModelCheckpoint('best_model.keras', save_best_only=True)\n",
83
+ "reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=2, min_lr=1e-7)\n"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "code",
88
+ "execution_count": 22,
89
+ "metadata": {},
90
+ "outputs": [
91
+ {
92
+ "name": "stdout",
93
+ "output_type": "stream",
94
+ "text": [
95
+ "Epoch 1/10\n",
96
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m27s\u001b[0m 2s/step - accuracy: 0.3927 - loss: 1.3381 - val_accuracy: 0.4688 - val_loss: 1.0825 - learning_rate: 0.0010\n",
97
+ "Epoch 2/10\n",
98
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m22s\u001b[0m 1s/step - accuracy: 0.4262 - loss: 1.1153 - val_accuracy: 0.4792 - val_loss: 1.0168 - learning_rate: 0.0010\n",
99
+ "Epoch 3/10\n",
100
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m21s\u001b[0m 1s/step - accuracy: 0.5207 - loss: 1.0038 - val_accuracy: 0.6146 - val_loss: 0.9397 - learning_rate: 0.0010\n",
101
+ "Epoch 4/10\n",
102
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m21s\u001b[0m 1s/step - accuracy: 0.5667 - loss: 0.9722 - val_accuracy: 0.5521 - val_loss: 0.8991 - learning_rate: 0.0010\n",
103
+ "Epoch 5/10\n",
104
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m24s\u001b[0m 2s/step - accuracy: 0.4955 - loss: 1.0044 - val_accuracy: 0.6562 - val_loss: 0.9241 - learning_rate: 0.0010\n",
105
+ "Epoch 6/10\n",
106
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m30s\u001b[0m 2s/step - accuracy: 0.5938 - loss: 0.9319 - val_accuracy: 0.5938 - val_loss: 0.8967 - learning_rate: 0.0010\n",
107
+ "Epoch 7/10\n",
108
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m28s\u001b[0m 2s/step - accuracy: 0.6017 - loss: 0.9330 - val_accuracy: 0.6354 - val_loss: 0.8814 - learning_rate: 0.0010\n",
109
+ "Epoch 8/10\n",
110
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m27s\u001b[0m 2s/step - accuracy: 0.5250 - loss: 0.9443 - val_accuracy: 0.6458 - val_loss: 0.8834 - learning_rate: 0.0010\n",
111
+ "Epoch 9/10\n",
112
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m27s\u001b[0m 2s/step - accuracy: 0.5166 - loss: 0.9913 - val_accuracy: 0.6562 - val_loss: 0.8957 - learning_rate: 0.0010\n",
113
+ "Epoch 10/10\n",
114
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m27s\u001b[0m 2s/step - accuracy: 0.6796 - loss: 0.8320 - val_accuracy: 0.6875 - val_loss: 0.7982 - learning_rate: 2.0000e-04\n"
115
+ ]
116
+ }
117
+ ],
118
+ "source": [
119
+ "history = model.fit(\n",
120
+ " train_generator,\n",
121
+ " epochs=10,\n",
122
+ " validation_data=validation_generator,\n",
123
+ " callbacks=[early_stopping, model_checkpoint, reduce_lr]\n",
124
+ ")"
125
+ ]
126
+ },
127
+ {
128
+ "cell_type": "code",
129
+ "execution_count": 23,
130
+ "metadata": {},
131
+ "outputs": [
132
+ {
133
+ "name": "stdout",
134
+ "output_type": "stream",
135
+ "text": [
136
+ "Epoch 10/20\n",
137
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m51s\u001b[0m 3s/step - accuracy: 0.4460 - loss: 1.3447 - val_accuracy: 0.5625 - val_loss: 0.8845 - learning_rate: 1.0000e-05\n",
138
+ "Epoch 11/20\n",
139
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m46s\u001b[0m 3s/step - accuracy: 0.5523 - loss: 1.0105 - val_accuracy: 0.5000 - val_loss: 1.0040 - learning_rate: 1.0000e-05\n",
140
+ "Epoch 12/20\n",
141
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m49s\u001b[0m 3s/step - accuracy: 0.6260 - loss: 0.8541 - val_accuracy: 0.4688 - val_loss: 1.1044 - learning_rate: 1.0000e-05\n",
142
+ "Epoch 13/20\n",
143
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m48s\u001b[0m 3s/step - accuracy: 0.6385 - loss: 0.7894 - val_accuracy: 0.4583 - val_loss: 1.3327 - learning_rate: 2.0000e-06\n",
144
+ "Epoch 14/20\n",
145
+ "\u001b[1m13/13\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m50s\u001b[0m 3s/step - accuracy: 0.6777 - loss: 0.7459 - val_accuracy: 0.4792 - val_loss: 1.6466 - learning_rate: 2.0000e-06\n"
146
+ ]
147
+ }
148
+ ],
149
+ "source": [
150
+ "base_model.trainable = True\n",
151
+ "fine_tune_at = 100\n",
152
+ "\n",
153
+ "for layer in base_model.layers[:fine_tune_at]:\n",
154
+ " layer.trainable = False\n",
155
+ "\n",
156
+ "model.compile(optimizer=tf.keras.optimizers.Adam(1e-5), \n",
157
+ " metrics=['accuracy'])\n",
158
+ "\n",
159
+ "fine_tune_epochs = 10\n",
160
+ "total_epochs = history.epoch[-1] + fine_tune_epochs + 1\n",
161
+ "\n",
162
+ "history_fine = model.fit(\n",
163
+ " train_generator,\n",
164
+ " epochs=total_epochs,\n",
165
+ " initial_epoch=history.epoch[-1],\n",
166
+ " validation_data=validation_generator,\n",
167
+ " callbacks=[early_stopping, model_checkpoint, reduce_lr]\n",
168
+ ")"
169
+ ]
170
+ },
171
+ {
172
+ "cell_type": "code",
173
+ "execution_count": 25,
174
+ "metadata": {},
175
+ "outputs": [],
176
+ "source": [
177
+ "model.save('pokemon_classifier_model.keras')\n",
178
+ "\n"
179
+ ]
180
+ }
181
+ ],
182
+ "metadata": {
183
+ "kernelspec": {
184
+ "display_name": "Python 3 (ipykernel)",
185
+ "language": "python",
186
+ "name": "python3"
187
+ },
188
+ "language_info": {
189
+ "codemirror_mode": {
190
+ "name": "ipython",
191
+ "version": 3
192
+ },
193
+ "file_extension": ".py",
194
+ "mimetype": "text/x-python",
195
+ "name": "python",
196
+ "nbconvert_exporter": "python",
197
+ "pygments_lexer": "ipython3",
198
+ "version": "3.9.12"
199
+ }
200
+ },
201
+ "nbformat": 4,
202
+ "nbformat_minor": 2
203
+ }
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tensorflow as tf
3
+ from PIL import Image
4
+ import numpy as np
5
+
6
+ model_path = "pokemon_classifier_model.keras"
7
+ model = tf.keras.models.load_model(model_path)
8
+
9
+
10
+ labels = ['Bulbasaur', 'Jigglypuff', 'Rhyhorn']
11
+
12
+ def predict_image(image):
13
+ # Preprocess image
14
+ image = Image.fromarray(image.astype('uint8')) # Convert numpy array to PIL image
15
+ image = image.resize((224, 224)) # Resize the image to 224x224 pixels
16
+ image = np.array(image) / 255.0 # Convert to float and normalize
17
+
18
+ # Ensure the image has 3 color channels
19
+ if image.ndim == 2: # If grayscale, convert to RGB
20
+ image = np.stack((image,)*3, axis=-1)
21
+
22
+ prediction = model.predict(image[None, ...]) # Adding batch dimension
23
+ confidences = {labels[i]: float(prediction[0][i]) for i in range(len(labels))}
24
+ return confidences
25
+
26
+
27
+ input_image = gr.Image()
28
+ output_text = gr.Textbox(label="Predicted Value")
29
+
30
+
31
+ iface = gr.Interface(
32
+ fn=predict_image,
33
+ inputs=input_image,
34
+ outputs=gr.Label(),
35
+ title="Pokémon Classifier",
36
+ examples=["pokemon/Bulbasur.png", "pokemon/Jigglypuff.png", "pokemon/Rhyhorn.png"],
37
+ description="Upload an image of Bulbasur, Jigglypuff, or Rhyhorn and the classifier will predict which one it is."
38
+ )
39
+
40
+
41
+
42
+ iface.launch()
best_model.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4d49cc8758bae0cadc0069d0bc1f76e31d37fbb7366db48916ee4276ac0e1ed
3
+ size 107570656
main.ipynb ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stdout",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "Import erfolgreich\n"
13
+ ]
14
+ }
15
+ ],
16
+ "source": [
17
+ "from tensorflow.keras.models import Sequential\n",
18
+ "from tensorflow.keras.layers import Dense, GlobalAveragePooling2D\n",
19
+ "from tensorflow.keras.applications import ResNet50\n",
20
+ "from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\n",
21
+ "import tensorflow as tf\n",
22
+ "print(\"Import erfolgreich\")\n"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 2,
28
+ "metadata": {},
29
+ "outputs": [
30
+ {
31
+ "name": "stdout",
32
+ "output_type": "stream",
33
+ "text": [
34
+ "Found 361 images belonging to 3 classes.\n",
35
+ "Found 89 images belonging to 3 classes.\n",
36
+ "Aktueller Arbeitspfad: c:\\Users\\lukas\\Studium\\6. Semester\\KI Anwendungen\\PIcture\n",
37
+ "Verzeichnisinhalt: ['.history', '.ipynb_checkpoints', 'app.py', 'main.ipynb', 'pokemon']\n"
38
+ ]
39
+ }
40
+ ],
41
+ "source": [
42
+ "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
43
+ "from sklearn.model_selection import train_test_split\n",
44
+ "import os\n",
45
+ "\n",
46
+ "base_dir = 'pokemon'\n",
47
+ "\n",
48
+ "image_size = (224, 224)\n",
49
+ "batch_size = 32\n",
50
+ "\n",
51
+ "train_datagen = ImageDataGenerator(\n",
52
+ " rescale=1./255,\n",
53
+ " rotation_range=40,\n",
54
+ " width_shift_range=0.2,\n",
55
+ " height_shift_range=0.2,\n",
56
+ " shear_range=0.2,\n",
57
+ " zoom_range=0.2,\n",
58
+ " horizontal_flip=True,\n",
59
+ " fill_mode='nearest',\n",
60
+ " validation_split=0.2 \n",
61
+ ")\n",
62
+ "\n",
63
+ "train_generator = train_datagen.flow_from_directory(\n",
64
+ " base_dir,\n",
65
+ " target_size=image_size,\n",
66
+ " batch_size=batch_size,\n",
67
+ " class_mode='categorical',\n",
68
+ " subset='training' \n",
69
+ ")\n",
70
+ "\n",
71
+ "validation_generator = train_datagen.flow_from_directory(\n",
72
+ " base_dir,\n",
73
+ " target_size=image_size,\n",
74
+ " batch_size=batch_size,\n",
75
+ " class_mode='categorical',\n",
76
+ " subset='validation' \n",
77
+ ")\n",
78
+ "\n",
79
+ "# Aktuellen Arbeitspfad drucken\n",
80
+ "print(\"Aktueller Arbeitspfad:\", os.getcwd())\n",
81
+ "\n",
82
+ "# Inhalte im aktuellen Verzeichnis auflisten\n",
83
+ "print(\"Verzeichnisinhalt:\", os.listdir())"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "code",
88
+ "execution_count": 3,
89
+ "metadata": {},
90
+ "outputs": [],
91
+ "source": [
92
+ "base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(224, 224, 3))\n",
93
+ "\n",
94
+ "base_model.trainable = False\n",
95
+ "\n",
96
+ "model = Sequential([\n",
97
+ " base_model,\n",
98
+ " GlobalAveragePooling2D(),\n",
99
+ " Dense(512, activation='relu'),\n",
100
+ " Dense(3, activation='softmax') \n",
101
+ "])\n",
102
+ "\n",
103
+ "model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n",
104
+ "\n",
105
+ "early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)\n",
106
+ "model_checkpoint = ModelCheckpoint('best_model.keras', save_best_only=True)\n",
107
+ "reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=2, min_lr=1e-7)\n"
108
+ ]
109
+ },
110
+ {
111
+ "cell_type": "code",
112
+ "execution_count": 4,
113
+ "metadata": {},
114
+ "outputs": [
115
+ {
116
+ "name": "stdout",
117
+ "output_type": "stream",
118
+ "text": [
119
+ "Epoch 1/10\n"
120
+ ]
121
+ },
122
+ {
123
+ "name": "stderr",
124
+ "output_type": "stream",
125
+ "text": [
126
+ "c:\\Users\\lukas\\anaconda3\\envs\\kia\\lib\\site-packages\\keras\\src\\trainers\\data_adapters\\py_dataset_adapter.py:121: UserWarning: Your `PyDataset` class should call `super().__init__(**kwargs)` in its constructor. `**kwargs` can include `workers`, `use_multiprocessing`, `max_queue_size`. Do not pass these arguments to `fit()`, as they will be ignored.\n",
127
+ " self._warn_if_super_not_called()\n",
128
+ "c:\\Users\\lukas\\anaconda3\\envs\\kia\\lib\\site-packages\\PIL\\Image.py:1000: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images\n",
129
+ " warnings.warn(\n"
130
+ ]
131
+ },
132
+ {
133
+ "name": "stdout",
134
+ "output_type": "stream",
135
+ "text": [
136
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m82s\u001b[0m 5s/step - accuracy: 0.3422 - loss: 1.4997 - val_accuracy: 0.3258 - val_loss: 1.1893 - learning_rate: 0.0010\n",
137
+ "Epoch 2/10\n",
138
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m60s\u001b[0m 4s/step - accuracy: 0.3986 - loss: 1.1751 - val_accuracy: 0.4045 - val_loss: 1.1911 - learning_rate: 0.0010\n",
139
+ "Epoch 3/10\n",
140
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━��━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m59s\u001b[0m 4s/step - accuracy: 0.4265 - loss: 1.1501 - val_accuracy: 0.5393 - val_loss: 1.0268 - learning_rate: 0.0010\n",
141
+ "Epoch 4/10\n",
142
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m51s\u001b[0m 4s/step - accuracy: 0.5038 - loss: 1.0047 - val_accuracy: 0.6180 - val_loss: 0.9316 - learning_rate: 0.0010\n",
143
+ "Epoch 5/10\n",
144
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m47s\u001b[0m 3s/step - accuracy: 0.4520 - loss: 1.0198 - val_accuracy: 0.4831 - val_loss: 0.9292 - learning_rate: 0.0010\n",
145
+ "Epoch 6/10\n",
146
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m45s\u001b[0m 3s/step - accuracy: 0.5053 - loss: 0.9459 - val_accuracy: 0.6292 - val_loss: 0.9976 - learning_rate: 0.0010\n",
147
+ "Epoch 7/10\n",
148
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m46s\u001b[0m 3s/step - accuracy: 0.5592 - loss: 0.9549 - val_accuracy: 0.5618 - val_loss: 0.8820 - learning_rate: 0.0010\n",
149
+ "Epoch 8/10\n",
150
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m53s\u001b[0m 4s/step - accuracy: 0.4919 - loss: 0.9838 - val_accuracy: 0.5281 - val_loss: 0.8794 - learning_rate: 0.0010\n",
151
+ "Epoch 9/10\n",
152
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m60s\u001b[0m 4s/step - accuracy: 0.5190 - loss: 0.9214 - val_accuracy: 0.6180 - val_loss: 0.8400 - learning_rate: 0.0010\n",
153
+ "Epoch 10/10\n",
154
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m51s\u001b[0m 4s/step - accuracy: 0.5295 - loss: 0.9299 - val_accuracy: 0.5730 - val_loss: 0.8847 - learning_rate: 0.0010\n"
155
+ ]
156
+ }
157
+ ],
158
+ "source": [
159
+ "history = model.fit(\n",
160
+ " train_generator,\n",
161
+ " epochs=10,\n",
162
+ " validation_data=validation_generator,\n",
163
+ " callbacks=[early_stopping, model_checkpoint, reduce_lr]\n",
164
+ ")"
165
+ ]
166
+ },
167
+ {
168
+ "cell_type": "code",
169
+ "execution_count": 6,
170
+ "metadata": {},
171
+ "outputs": [
172
+ {
173
+ "name": "stdout",
174
+ "output_type": "stream",
175
+ "text": [
176
+ "Epoch 10/20\n",
177
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m101s\u001b[0m 7s/step - accuracy: 0.4401 - loss: 3.5201 - val_accuracy: 0.4607 - val_loss: 0.9513 - learning_rate: 1.0000e-05\n",
178
+ "Epoch 11/20\n",
179
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m78s\u001b[0m 6s/step - accuracy: 0.5555 - loss: 2.0987 - val_accuracy: 0.4157 - val_loss: 1.0550 - learning_rate: 1.0000e-05\n",
180
+ "Epoch 12/20\n",
181
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m73s\u001b[0m 6s/step - accuracy: 0.4555 - loss: 1.7529 - val_accuracy: 0.4157 - val_loss: 1.2538 - learning_rate: 1.0000e-05\n",
182
+ "Epoch 13/20\n",
183
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m72s\u001b[0m 6s/step - accuracy: 0.5554 - loss: 1.0633 - val_accuracy: 0.4157 - val_loss: 1.4237 - learning_rate: 2.0000e-06\n",
184
+ "Epoch 14/20\n",
185
+ "\u001b[1m12/12\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m75s\u001b[0m 6s/step - accuracy: 0.5212 - loss: 1.2389 - val_accuracy: 0.4157 - val_loss: 1.7295 - learning_rate: 2.0000e-06\n"
186
+ ]
187
+ }
188
+ ],
189
+ "source": [
190
+ "base_model.trainable = True\n",
191
+ "fine_tune_at = 100\n",
192
+ "\n",
193
+ "# Set the earlier layers to not be trainable\n",
194
+ "for layer in base_model.layers[:fine_tune_at]:\n",
195
+ " layer.trainable = False\n",
196
+ "\n",
197
+ "# Ensure you include a loss function here\n",
198
+ "model.compile(optimizer=tf.keras.optimizers.Adam(1e-5), \n",
199
+ " loss='categorical_crossentropy', # This should be the loss function you used initially\n",
200
+ " metrics=['accuracy'])\n",
201
+ "\n",
202
+ "fine_tune_epochs = 10\n",
203
+ "total_epochs = history.epoch[-1] + fine_tune_epochs + 1\n",
204
+ "\n",
205
+ "history_fine = model.fit(\n",
206
+ " train_generator,\n",
207
+ " epochs=total_epochs,\n",
208
+ " initial_epoch=history.epoch[-1],\n",
209
+ " validation_data=validation_generator,\n",
210
+ " callbacks=[early_stopping, model_checkpoint, reduce_lr]\n",
211
+ ")\n"
212
+ ]
213
+ },
214
+ {
215
+ "cell_type": "code",
216
+ "execution_count": 7,
217
+ "metadata": {},
218
+ "outputs": [],
219
+ "source": [
220
+ "model.save('pokemon_classifier_model.keras')\n",
221
+ "\n"
222
+ ]
223
+ }
224
+ ],
225
+ "metadata": {
226
+ "kernelspec": {
227
+ "display_name": "Python 3 (ipykernel)",
228
+ "language": "python",
229
+ "name": "python3"
230
+ },
231
+ "language_info": {
232
+ "codemirror_mode": {
233
+ "name": "ipython",
234
+ "version": 3
235
+ },
236
+ "file_extension": ".py",
237
+ "mimetype": "text/x-python",
238
+ "name": "python",
239
+ "nbconvert_exporter": "python",
240
+ "pygments_lexer": "ipython3",
241
+ "version": "3.9.19"
242
+ }
243
+ },
244
+ "nbformat": 4,
245
+ "nbformat_minor": 2
246
+ }
pokemon/Bulbasaur/00000000.png ADDED
pokemon/Bulbasaur/00000002.PNG ADDED
pokemon/Bulbasaur/00000003.png ADDED
pokemon/Bulbasaur/00000004.png ADDED
pokemon/Bulbasaur/00000005.png ADDED
pokemon/Bulbasaur/00000006.jpg ADDED
pokemon/Bulbasaur/00000006.png ADDED
pokemon/Bulbasaur/00000007.jpg ADDED
pokemon/Bulbasaur/00000007.png ADDED
pokemon/Bulbasaur/00000008.png ADDED
pokemon/Bulbasaur/00000009.png ADDED
pokemon/Bulbasaur/00000010.png ADDED
pokemon/Bulbasaur/00000011.png ADDED
pokemon/Bulbasaur/00000012.png ADDED
pokemon/Bulbasaur/00000013.png ADDED

Git LFS Details

  • SHA256: 4d2a1c30dab7a669b8de36b93351109d6882bdbd276ec6c5a150f4a30699b73f
  • Pointer size: 132 Bytes
  • Size of remote file: 1.3 MB
pokemon/Bulbasaur/00000014.jpg ADDED
pokemon/Bulbasaur/00000014.png ADDED
pokemon/Bulbasaur/00000015.jpg ADDED
pokemon/Bulbasaur/00000015.png ADDED
pokemon/Bulbasaur/00000016.png ADDED
pokemon/Bulbasaur/00000017.png ADDED
pokemon/Bulbasaur/00000018.jpg ADDED
pokemon/Bulbasaur/00000019.jpg ADDED
pokemon/Bulbasaur/00000019.png ADDED
pokemon/Bulbasaur/00000020.png ADDED
pokemon/Bulbasaur/00000021.png ADDED
pokemon/Bulbasaur/00000023.jpg ADDED
pokemon/Bulbasaur/00000024.png ADDED
pokemon/Bulbasaur/00000025.jpg ADDED
pokemon/Bulbasaur/00000027.jpg ADDED
pokemon/Bulbasaur/00000027.png ADDED
pokemon/Bulbasaur/00000028.png ADDED
pokemon/Bulbasaur/00000029.jpg ADDED
pokemon/Bulbasaur/00000030.png ADDED
pokemon/Bulbasaur/00000031.jpg ADDED
pokemon/Bulbasaur/00000031.png ADDED
pokemon/Bulbasaur/00000032.jpg ADDED
pokemon/Bulbasaur/00000032.png ADDED
pokemon/Bulbasaur/00000034.png ADDED
pokemon/Bulbasaur/00000035.jpg ADDED