sivan26 commited on
Commit
8ff9c6b
·
verified ·
1 Parent(s): 74e2cc2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -59
app.py CHANGED
@@ -2,17 +2,14 @@ import gradio as gr
2
  import requests
3
  import random
4
  import time
5
- from PIL import Image
6
- import io
7
 
8
- def get_random_placeholder_image():
9
-
10
- response = requests.get("https://picsum.photos/400")
11
- return Image.open(io.BytesIO(response.content))
12
 
13
  def get_wikipedia_facts(topic):
14
  if not topic.strip():
15
- return "Please enter a topic or use 'Surprise me!'", None
16
 
17
  headers = {
18
  "User-Agent": "RandomFactApp/3.0 (https://huggingface.co/spaces/yourname) Python requests"
@@ -28,7 +25,7 @@ def get_wikipedia_facts(topic):
28
  }
29
 
30
  try:
31
-
32
  search_response = requests.get(search_url, params=search_params, headers=headers)
33
  time.sleep(0.3)
34
  search_response.raise_for_status()
@@ -36,18 +33,20 @@ def get_wikipedia_facts(topic):
36
 
37
  search_hits = search_data.get("query", {}).get("search", [])
38
  if not search_hits:
39
- return f"Sorry, no information found for '{topic}'.", None
40
 
41
  best_title = search_hits[0]["title"]
42
 
43
-
44
  extract_params = {
45
  "action": "query",
46
  "format": "json",
47
- "prop": "extracts",
48
  "exintro": True,
49
  "explaintext": True,
50
- "titles": best_title
 
 
51
  }
52
 
53
  extract_response = requests.get(search_url, params=extract_params, headers=headers)
@@ -60,53 +59,7 @@ def get_wikipedia_facts(topic):
60
  extract_text = page.get("extract", "")
61
 
62
  if not extract_text:
63
- return f"Sorry, no extract found for '{topic}'.", None
64
 
65
 
66
- sentences = [s.strip() for s in extract_text.replace("\n", " ").split(". ") if s.strip()]
67
- if len(sentences) == 0:
68
- return f"Sorry, no facts available for '{topic}'.", None
69
-
70
-
71
- num_facts = min(3, len(sentences))
72
- facts = random.sample(sentences, num_facts)
73
-
74
-
75
- facts = [fact if fact.endswith(".") else fact + "." for fact in facts]
76
-
77
- facts_text = "\n\n".join(f"💡 {fact}" for fact in facts)
78
-
79
-
80
- image = get_random_placeholder_image()
81
-
82
- return facts_text, image
83
-
84
- except Exception as e:
85
- print("Error:", e)
86
- return "Oops! Something went wrong while fetching your facts.", None
87
-
88
-
89
- random_topics = [
90
- "cats", "space", "chocolate", "Egypt", "Leonardo da Vinci",
91
- "volcanoes", "Tokyo", "honeybees", "quantum physics", "orcas"
92
- ]
93
-
94
- def surprise_topic(_):
95
- topic = random.choice(random_topics)
96
- return get_wikipedia_facts(topic)
97
-
98
- with gr.Blocks() as demo:
99
- gr.Markdown("## 🤖 Topic-based Facts + Placeholder Image\nEnter a topic or click 'Surprise me!' for 3 distinct facts and a random image!")
100
-
101
- with gr.Row():
102
- topic_input = gr.Textbox(placeholder="Enter a topic like 'koalas' or 'Eiffel Tower'")
103
- surprise_button = gr.Button("🎲 Surprise me!")
104
-
105
- facts_output = gr.Textbox(label="3 Distinct Wikipedia Facts")
106
- image_output = gr.Image(label="Random Image")
107
-
108
- topic_input.submit(get_wikipedia_facts, inputs=topic_input, outputs=[facts_output, image_output])
109
- surprise_button.click(surprise_topic, inputs=None, outputs=[facts_output, image_output])
110
 
111
- if __name__ == "__main__":
112
- demo.launch()
 
2
  import requests
3
  import random
4
  import time
5
+ from transformers import pipeline
 
6
 
7
+
8
+ classifier = pipeline("zero-shot-classification")
 
 
9
 
10
  def get_wikipedia_facts(topic):
11
  if not topic.strip():
12
+ return "Please enter a topic or use 'Surprise me!'", None, None
13
 
14
  headers = {
15
  "User-Agent": "RandomFactApp/3.0 (https://huggingface.co/spaces/yourname) Python requests"
 
25
  }
26
 
27
  try:
28
+
29
  search_response = requests.get(search_url, params=search_params, headers=headers)
30
  time.sleep(0.3)
31
  search_response.raise_for_status()
 
33
 
34
  search_hits = search_data.get("query", {}).get("search", [])
35
  if not search_hits:
36
+ return f"Sorry, no information found for '{topic}'.", None, None
37
 
38
  best_title = search_hits[0]["title"]
39
 
40
+
41
  extract_params = {
42
  "action": "query",
43
  "format": "json",
44
+ "prop": "extracts|pageimages",
45
  "exintro": True,
46
  "explaintext": True,
47
+ "titles": best_title,
48
+ "piprop": "thumbnail",
49
+ "pithumbsize": 400
50
  }
51
 
52
  extract_response = requests.get(search_url, params=extract_params, headers=headers)
 
59
  extract_text = page.get("extract", "")
60
 
61
  if not extract_text:
62
+ return f"Sorry, no extract found for '{topic}'.", None, None
63
 
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65