q6 commited on
Commit
31a48a1
·
1 Parent(s): b6cf1cb
Files changed (2) hide show
  1. API/app.py +3 -0
  2. Client/Extract Pixiv/ai_search.py +6 -2
API/app.py CHANGED
@@ -27,6 +27,8 @@ env_path = os.path.dirname(os.path.realpath(__file__)) + "/../.env"
27
  if os.path.exists(env_path):
28
  load_dotenv(env_path)
29
  PHPSESSID = os.getenv("PHPSESSID")
 
 
30
  cookies = {"PHPSESSID": PHPSESSID}
31
 
32
  headers = {
@@ -64,6 +66,7 @@ async def search(raw, pages, ai_only=True, cookies=None, headers=None):
64
  responses = await asyncio.gather(*tasks)
65
  for data in responses:
66
  if ai_only:
 
67
  posts = [post for post in data['body']['illustManga']['data'] if post['aiType'] == 2]
68
  else:
69
  posts = data['body']['illustManga']['data']
 
27
  if os.path.exists(env_path):
28
  load_dotenv(env_path)
29
  PHPSESSID = os.getenv("PHPSESSID")
30
+
31
+ print(PHPSESSID)
32
  cookies = {"PHPSESSID": PHPSESSID}
33
 
34
  headers = {
 
66
  responses = await asyncio.gather(*tasks)
67
  for data in responses:
68
  if ai_only:
69
+ print(data['body']['illustManga']['data'])
70
  posts = [post for post in data['body']['illustManga']['data'] if post['aiType'] == 2]
71
  else:
72
  posts = data['body']['illustManga']['data']
Client/Extract Pixiv/ai_search.py CHANGED
@@ -1,6 +1,11 @@
1
  import requests
2
  import os
3
 
 
 
 
 
 
4
  os.chdir(os.path.dirname(os.path.abspath(__file__)))
5
 
6
  input_url = input("Enter the URL: ")
@@ -13,9 +18,8 @@ params = {
13
  'ai_only': True,
14
  }
15
 
16
- response = requests.get('https://q6-p.hf.space/search', params=params)
17
 
18
  data = response.json()
19
-
20
  with open(f"../{data['filename']}.txt", "w") as f:
21
  f.write("\n".join(data['post_ids']))
 
1
  import requests
2
  import os
3
 
4
+ local = False
5
+ if local:
6
+ endpoint = "http://127.0.0.1:7860"
7
+ else:
8
+ endpoint = "https://q6-p.hf.space/"
9
  os.chdir(os.path.dirname(os.path.abspath(__file__)))
10
 
11
  input_url = input("Enter the URL: ")
 
18
  'ai_only': True,
19
  }
20
 
21
+ response = requests.get(f'{endpoint}/search', params=params)
22
 
23
  data = response.json()
 
24
  with open(f"../{data['filename']}.txt", "w") as f:
25
  f.write("\n".join(data['post_ids']))