Atulit23 commited on
Commit
a52e9a5
·
verified ·
1 Parent(s): 7d31b02

Upload folder using huggingface_hub

Browse files
Files changed (10) hide show
  1. .github/workflows/update_space.yml +28 -0
  2. README.md +3 -8
  3. a.js +0 -0
  4. app.py +56 -0
  5. best (4).pt +3 -0
  6. best (5).pt +3 -0
  7. new_yolo.py +48 -0
  8. ocr_app.py +52 -0
  9. popup_yolo.ipynb +0 -0
  10. requirements.txt +17 -0
.github/workflows/update_space.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run Python script
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Checkout
14
+ uses: actions/checkout@v2
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v2
18
+ with:
19
+ python-version: '3.9'
20
+
21
+ - name: Install Gradio
22
+ run: python -m pip install gradio
23
+
24
+ - name: Log in to Hugging Face
25
+ run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
26
+
27
+ - name: Deploy to Spaces
28
+ run: gradio deploy
README.md CHANGED
@@ -1,12 +1,7 @@
1
  ---
2
- title: Ui Deception
3
- emoji: 👀
4
- colorFrom: green
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 4.16.0
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: ui-deception
 
 
 
 
 
3
  app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 3.44.4
6
  ---
7
 
 
a.js ADDED
File without changes
app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from ultralytics import YOLO
3
+ import cv2
4
+ import matplotlib.pyplot as plt
5
+ import matplotlib.patches as patches
6
+ import numpy as np
7
+ import gradio as gr
8
+
9
+ model = YOLO('best (5).pt')
10
+
11
+ def index(img_url):
12
+ response = requests.get(img_url, stream=True)
13
+ img_array = np.asarray(bytearray(response.content), dtype=np.uint8)
14
+ img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
15
+
16
+ print(img_url)
17
+
18
+ classes_ = {0: 'noti', 1: 'pop'}
19
+
20
+ results = model.predict(source=img, conf = 0.7)
21
+
22
+ boxes = results[0].boxes.xyxy.tolist()
23
+ classes = results[0].boxes.cls.tolist()
24
+ names = results[0].names
25
+ confidences = results[0].boxes.conf.tolist()
26
+
27
+ print(boxes)
28
+ print(classes)
29
+ print(names)
30
+ print(confidences)
31
+
32
+ result_dict = {"boxes": boxes, "classes": classes, "names": names, "confidence": confidences}
33
+
34
+ return len(boxes)
35
+
36
+
37
+ inputs_image_url = [
38
+ gr.Textbox(type="text", label="Image URL"),
39
+ ]
40
+
41
+ outputs_result_dict = [
42
+ gr.Textbox(type="text", label="Result Dictionary"),
43
+ ]
44
+
45
+ interface_image_url = gr.Interface(
46
+ fn=index,
47
+ inputs=inputs_image_url,
48
+ outputs=outputs_result_dict,
49
+ title="Popup detection",
50
+ cache_examples=False,
51
+ )
52
+
53
+ gr.TabbedInterface(
54
+ [interface_image_url],
55
+ tab_names=['Image inference']
56
+ ).queue().launch()
best (4).pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05a39bfcd900571c5224acf52909d170f9924233a697c9226134625d437bd9e1
3
+ size 22554073
best (5).pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97ef1e395ae24b79b06e9539b6b9347e8a9f711185f1ee115dc6fff741bc6da0
3
+ size 22515161
new_yolo.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+ import cv2
3
+ import matplotlib.pyplot as plt
4
+ import matplotlib.patches as patches
5
+
6
+ model = YOLO('api/all_elements.pt')
7
+ img = cv2.imread('api/Screenshot 2024-01-19 000410.png')
8
+
9
+ classes_ = {0: 'Button', 1: 'Edit Text', 2: 'Header Bar', 3: 'Image Button', 4: 'Image View', 5: 'Text Button', 6: 'Text View'}
10
+
11
+ results = model.predict(source=img, conf = 0.5)
12
+
13
+ # results = model.predict('api/default_1280-720-screenshot.webp', confidence=40, overlap=30).json()
14
+ boxes = results[0].boxes.xyxy.tolist()
15
+ classes = results[0].boxes.cls.tolist()
16
+ names = results[0].names
17
+ confidences = results[0].boxes.conf.tolist()
18
+
19
+ print(boxes)
20
+ print(classes)
21
+ # print(confidences)
22
+
23
+ # Iterate through the results
24
+ for box, cls, conf in zip(boxes, classes, confidences):
25
+ x1, y1, x2, y2 = box
26
+ confidence = conf
27
+ detected_class = cls
28
+ name = names[int(cls)]
29
+
30
+ def plot_img_bbox(img, target):
31
+ fig, a = plt.subplots(1,1)
32
+ fig.set_size_inches(10, 10)
33
+ a.imshow(img)
34
+ for i, box in enumerate(target):
35
+ #print(target['boxes'])
36
+ x, y, width, height = box[0], box[1], box[2]-box[0], box[3]-box[1]
37
+ # if arr[target['labels'][i]] == 'ad':
38
+ rect = patches.Rectangle((x, y),
39
+ width, height,
40
+ linewidth = 2,
41
+ edgecolor = 'r',
42
+ facecolor = 'none')
43
+ a.text(x, y-20, classes_[classes[i]], color='b', verticalalignment='top')
44
+
45
+ a.add_patch(rect)
46
+ plt.show()
47
+
48
+ plot_img_bbox(img, boxes)
ocr_app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from paddleocr import PaddleOCR
2
+ import requests
3
+ import numpy as np
4
+ from PIL import Image
5
+ from io import BytesIO
6
+ import json
7
+ import gradio as gr
8
+ import paddleocr
9
+
10
+ # ocr = PaddleOCR(use_angle_cls=True, lang='en', use_pdserving=False, cls_batch_num=8, det_batch_num=8, rec_batch_num=8)
11
+
12
+ ocr = PaddleOCR(use_angle_cls=True, lang='en')
13
+
14
+ def index(url):
15
+ response = requests.get(url)
16
+ img = Image.open(BytesIO(response.content))
17
+ resize_factor = 1
18
+ new_size = tuple(int(dim * resize_factor) for dim in img.size)
19
+ img = img.resize(new_size, Image.Resampling.LANCZOS)
20
+
21
+ img_array = np.array(img.convert('RGB'))
22
+
23
+ result = ocr.ocr(img_array)
24
+
25
+ boxes = [line[0] for line in result]
26
+ txts = [line[1][0] for line in result]
27
+ scores = [line[1][1] for line in result]
28
+
29
+ print(boxes)
30
+ print(txts)
31
+
32
+ output_dict = {"texts": txts, "boxes": boxes, "scores": scores}
33
+ output_json = json.dumps(output_dict) # Convert to JSON string
34
+
35
+ return output_json
36
+
37
+
38
+ inputs_image_url = [
39
+ gr.Textbox(type="text", label="Image URL"),
40
+ ]
41
+
42
+ outputs_result_json = [
43
+ gr.Textbox(type="text", label="Result JSON"),
44
+ ]
45
+
46
+ interface_image_url = gr.Interface(
47
+ fn=index,
48
+ inputs=inputs_image_url,
49
+ outputs=outputs_result_json,
50
+ title="Text Extraction",
51
+ cache_examples=False,
52
+ ).queue().launch()
popup_yolo.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ flask-mongoengine @ git+https://github.com/idoshr/flask-mongoengine.git@e244408acf440c4208f7ddcd6e5d819cb472e4da
2
+ flask
3
+ requests
4
+ datetime
5
+ pandas
6
+ numpy
7
+ gensim
8
+ requests
9
+ bs4
10
+ tensorflow
11
+ ultralytics
12
+ opencv-python
13
+ matplotlib
14
+ gunicorn
15
+ gevent
16
+ streamlit
17
+ gradio