Corin1998 commited on
Commit
ab638b2
·
verified ·
1 Parent(s): c1cc164

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +115 -35
app.py CHANGED
@@ -1,20 +1,25 @@
1
- # --- 変更1: Files の type を "filepath" にし、ハンドラをファイルパス対応へ ---
2
- with gr.Row():
3
- in_files = gr.Files(
4
- label="レジュメ類 (PDF/画像/Word/テキスト) 複数可",
5
- file_count="multiple",
6
- file_types=[".pdf", ".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".docx", ".txt"],
7
- type="filepath", # "file" から修正
8
- )
9
- candidate_id = gr.Textbox(label="候補者ID(任意。未入力なら自動生成)")
10
-
11
-
12
- # --- 変更2: out_skills は schema 周りの不具合回避のため JSON → Code に ---
13
- with gr.Tab("抽出スキル"):
14
- out_skills = gr.Code(label="スキル一覧(JSON)") # gr.JSON から修正
 
 
 
 
 
 
15
 
16
 
17
- # --- 変更3: ハンドラの files 取り扱いを「パス」前提に修正 ---
18
  def process_resumes(files, candidate_id: str, additional_notes: str = ""):
19
  if not files:
20
  raise gr.Error("少なくとも1ファイルをアップロードしてください。")
@@ -22,24 +27,24 @@ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
22
  partial_records = []
23
  raw_texts = []
24
 
25
- for path in files: # ← UploadedFile ではなく filepath の配列
26
- with open(path, "rb") as fh:
27
- raw_bytes = fh.read()
28
- fname = os.path.basename(path)
29
 
30
- filetype = detect_filetype(fname, raw_bytes)
31
-
32
- # 1) テキスト抽出
33
  if filetype in {"pdf", "image"}:
34
- text = extract_text_with_openai(raw_bytes, filename=fname, filetype=filetype)
35
  else:
36
  base_text = load_doc_text(filetype, raw_bytes)
37
- text = extract_text_with_openai(base_text.encode("utf-8"), filename=fname, filetype="txt")
 
38
 
39
- raw_texts.append({"filename": fname, "text": text})
40
 
41
- # 2) 構造化
42
  structured = structure_with_openai(text)
 
43
  normalized = normalize_resume({
44
  "work_experience": structured.get("work_experience_raw", ""),
45
  "education": structured.get("education_raw", ""),
@@ -47,18 +52,38 @@ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
47
  "skills": ", ".join(structured.get("skills_list", [])),
48
  })
49
  partial_records.append({
50
- "source": fname,
51
  "text": text,
52
  "structured": structured,
53
  "normalized": normalized,
54
  })
55
 
56
- # ...(中略:ロジックはそのまま)...
 
 
 
 
 
 
 
 
 
 
57
 
58
- # 8) まとめ
 
 
 
 
 
 
 
 
 
 
59
  result_json = {
60
  "candidate_id": candidate_id or hashlib.sha256(merged_text.encode("utf-8")).hexdigest()[:16],
61
- "files": [os.path.basename(p) for p in files], # ← fname ���列へ
62
  "merged": merged,
63
  "skills": skills,
64
  "quality_score": score,
@@ -67,12 +92,25 @@ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
67
  "notes": additional_notes,
68
  }
69
 
70
- # ...(Datasets 保存処理はそのまま)...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
- # 返り値:out_skills を Code にしたので JSON 文字列で返す
73
  return (
74
  json.dumps(result_json, ensure_ascii=False, indent=2),
75
- json.dumps(skills, ensure_ascii=False, indent=2), # ← 文字列化
76
  json.dumps(score, ensure_ascii=False, indent=2),
77
  summaries["300chars"],
78
  summaries["100chars"],
@@ -82,6 +120,48 @@ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
82
  )
83
 
84
 
85
- # --- 変更4: HF Spaces での起動を安定させるため launch の引数を明示 ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  if __name__ == "__main__":
87
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
1
+ import os
2
+ import io
3
+ import json
4
+ import hashlib
5
+ import gradio as gr
6
+
7
+ from pipelines.openai_ingest import (
8
+ extract_text_with_openai,
9
+ structure_with_openai,
10
+ summarize_with_openai,
11
+ )
12
+ from pipelines.parsing import normalize_resume
13
+ from pipelines.merge import merge_normalized_records
14
+ from pipelines.skills import extract_skills
15
+ from pipelines.anonymize import anonymize_text, render_anonymized_pdf
16
+ from pipelines.scoring import compute_quality_score
17
+ from pipelines.storage import persist_to_hf
18
+ from pipelines.utils import detect_filetype, load_doc_text
19
+
20
+ APP_TITLE = "候補者インテーク & レジュメ標準化(OpenAI版)"
21
 
22
 
 
23
  def process_resumes(files, candidate_id: str, additional_notes: str = ""):
24
  if not files:
25
  raise gr.Error("少なくとも1ファイルをアップロードしてください。")
 
27
  partial_records = []
28
  raw_texts = []
29
 
30
+ for f in files:
31
+ # gr.Files(type="binary") では .read() / .name が利用可能
32
+ raw_bytes = f.read()
33
+ filetype = detect_filetype(f.name, raw_bytes)
34
 
35
+ # 1) テキスト抽出:画像/PDFはOpenAI Vision OCR、docx/txtは生文面+OpenAI整形
 
 
36
  if filetype in {"pdf", "image"}:
37
+ text = extract_text_with_openai(raw_bytes, filename=f.name, filetype=filetype)
38
  else:
39
  base_text = load_doc_text(filetype, raw_bytes)
40
+ # 生テキストをそのままOpenAIへ渡し、軽く整形した全文を返す
41
+ text = extract_text_with_openai(base_text.encode("utf-8"), filename=f.name, filetype="txt")
42
 
43
+ raw_texts.append({"filename": f.name, "text": text})
44
 
45
+ # 2) OpenAIでセクション構造化
46
  structured = structure_with_openai(text)
47
+ # 念のためルールベース正規化も適用(期間抽出など補助)
48
  normalized = normalize_resume({
49
  "work_experience": structured.get("work_experience_raw", ""),
50
  "education": structured.get("education_raw", ""),
 
52
  "skills": ", ".join(structured.get("skills_list", [])),
53
  })
54
  partial_records.append({
55
+ "source": f.name,
56
  "text": text,
57
  "structured": structured,
58
  "normalized": normalized,
59
  })
60
 
61
+ # 3) 統合(複数ファイル→1候補者)
62
+ merged = merge_normalized_records([r["normalized"] for r in partial_records])
63
+
64
+ # 4) スキル抽出(辞書/正規表現)
65
+ merged_text = "\n\n".join([r["text"] for r in partial_records])
66
+ skills = extract_skills(merged_text, {
67
+ "work_experience": merged.get("raw_sections", {}).get("work_experience", ""),
68
+ "education": merged.get("raw_sections", {}).get("education", ""),
69
+ "certifications": merged.get("raw_sections", {}).get("certifications", ""),
70
+ "skills": ", ".join(merged.get("skills", [])),
71
+ })
72
 
73
+ # 5) 匿名化
74
+ anonymized_text, anon_map = anonymize_text(merged_text)
75
+ anon_pdf_bytes = render_anonymized_pdf(anonymized_text)
76
+
77
+ # 6) 品質スコア
78
+ score = compute_quality_score(merged_text, merged)
79
+
80
+ # 7) 要約(300/100/1文)
81
+ summaries = summarize_with_openai(merged_text)
82
+
83
+ # 8) 構造化出力
84
  result_json = {
85
  "candidate_id": candidate_id or hashlib.sha256(merged_text.encode("utf-8")).hexdigest()[:16],
86
+ "files": [f.name for f in files],
87
  "merged": merged,
88
  "skills": skills,
89
  "quality_score": score,
 
92
  "notes": additional_notes,
93
  }
94
 
95
+ # 9) HF Datasets 保存
96
+ dataset_repo = os.environ.get("DATASET_REPO")
97
+ commit_info = None
98
+ if dataset_repo:
99
+ file_hash = result_json["candidate_id"]
100
+ commit_info = persist_to_hf(
101
+ dataset_repo=dataset_repo,
102
+ record=result_json,
103
+ anon_pdf_bytes=anon_pdf_bytes,
104
+ parquet_path=f"candidates/{file_hash}.parquet",
105
+ json_path=f"candidates/{file_hash}.json",
106
+ pdf_path=f"candidates/{file_hash}.anon.pdf",
107
+ )
108
+
109
+ anon_pdf = (result_json["candidate_id"] + ".anon.pdf", anon_pdf_bytes)
110
 
 
111
  return (
112
  json.dumps(result_json, ensure_ascii=False, indent=2),
113
+ skills,
114
  json.dumps(score, ensure_ascii=False, indent=2),
115
  summaries["300chars"],
116
  summaries["100chars"],
 
120
  )
121
 
122
 
123
+ with gr.Blocks(title=APP_TITLE) as demo:
124
+ gr.Markdown(f"# {APP_TITLE}\n複数ファイルを統合→OpenAIで読み込み/構造化/要約→匿名化→Datasets保存")
125
+
126
+ with gr.Row():
127
+ in_files = gr.Files(
128
+ label="レジュメ類 (PDF/画像/Word/テキスト) 複数可",
129
+ file_count="multiple",
130
+ file_types=[".pdf", ".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".docx", ".txt"],
131
+ type="binary", # Gradio 4.44系:'binary' or 'filepath'
132
+ )
133
+ candidate_id = gr.Textbox(label="候補者ID(任意。未入力なら自動生成)")
134
+ notes = gr.Textbox(label="補足メモ(任意)", lines=3)
135
+
136
+ run_btn = gr.Button("実行")
137
+
138
+ with gr.Tab("構造化JSON"):
139
+ out_json = gr.Code(label="統合出力 (JSON)")
140
+
141
+ with gr.Tab("抽出スキル"):
142
+ out_skills = gr.JSON(label="スキル一覧")
143
+
144
+ with gr.Tab("品質スコア"):
145
+ out_score = gr.Code(label="品質評価")
146
+
147
+ with gr.Tab("要約 (300/100/1文)"):
148
+ out_sum_300 = gr.Textbox(label="300字要約")
149
+ out_sum_100 = gr.Textbox(label="100字要約")
150
+ out_sum_1 = gr.Textbox(label="1文要約")
151
+
152
+ with gr.Tab("匿名PDF"):
153
+ out_pdf = gr.File(label="匿名PDFダウンロード")
154
+
155
+ with gr.Tab("Datasets 保存ログ"):
156
+ out_commit = gr.Code(label="コミット情報")
157
+
158
+ run_btn.click(
159
+ process_resumes,
160
+ inputs=[in_files, candidate_id, notes],
161
+ outputs=[out_json, out_skills, out_score, out_sum_300, out_sum_100, out_sum_1, out_pdf, out_commit],
162
+ )
163
+
164
+
165
  if __name__ == "__main__":
166
+ # Hugging Face Spaces では share=True は不要/非推奨
167
+ demo.launch()