shiue2000 commited on
Commit
1db0189
·
verified ·
1 Parent(s): b15cbdc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -0
app.py CHANGED
@@ -5,21 +5,47 @@ from transformers import ViTForImageClassification, ViTModel, ViTImageProcessor
5
  from PIL import Image
6
  from sklearn.preprocessing import LabelEncoder
7
  import pandas as pd
 
8
 
9
  # 中文問候函數
10
  def greet(name):
11
  return f"你好,{name}!!"
12
 
13
  # 圖像預處理
 
 
14
  def preprocess_image(image):
 
 
 
 
 
15
  if isinstance(image, np.ndarray):
16
  image = Image.fromarray(np.uint8(image))
17
  elif not isinstance(image, Image.Image):
18
  image = Image.open(image)
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  if image.mode != 'RGB':
21
  image = image.convert('RGB')
22
 
 
23
  inputs = feature_extractor(images=[image])
24
  image_tensor = torch.tensor(inputs['pixel_values'][0], dtype=torch.float32)
25
 
 
5
  from PIL import Image
6
  from sklearn.preprocessing import LabelEncoder
7
  import pandas as pd
8
+ from PIL import Image, ExifTags
9
 
10
  # 中文問候函數
11
  def greet(name):
12
  return f"你好,{name}!!"
13
 
14
  # 圖像預處理
15
+ from PIL import Image, ExifTags
16
+
17
  def preprocess_image(image):
18
+ """
19
+ 將輸入圖像轉換為模型可接受的 tensor
20
+ 支援 iPhone 圖片自動旋轉
21
+ """
22
+ # 轉成 PIL Image
23
  if isinstance(image, np.ndarray):
24
  image = Image.fromarray(np.uint8(image))
25
  elif not isinstance(image, Image.Image):
26
  image = Image.open(image)
27
 
28
+ # 修正 iPhone EXIF 方向
29
+ try:
30
+ for orientation in ExifTags.TAGS.keys():
31
+ if ExifTags.TAGS[orientation]=='Orientation':
32
+ break
33
+ exif=dict(image._getexif().items())
34
+ if exif[orientation] == 3:
35
+ image=image.rotate(180, expand=True)
36
+ elif exif[orientation] == 6:
37
+ image=image.rotate(270, expand=True)
38
+ elif exif[orientation] == 8:
39
+ image=image.rotate(90, expand=True)
40
+ except:
41
+ # 沒有 EXIF 資訊就直接跳過
42
+ pass
43
+
44
+ # 轉成 RGB
45
  if image.mode != 'RGB':
46
  image = image.convert('RGB')
47
 
48
+ # 使用 feature extractor
49
  inputs = feature_extractor(images=[image])
50
  image_tensor = torch.tensor(inputs['pixel_values'][0], dtype=torch.float32)
51