Corin1998 commited on
Commit
87d8093
·
verified ·
1 Parent(s): fcf00ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -32
app.py CHANGED
@@ -21,27 +21,38 @@ APP_TITLE = "候補者インテーク & レジュメ標準化(OpenAI版)"
21
 
22
 
23
  def process_resumes(files, candidate_id: str, additional_notes: str = ""):
 
 
 
 
24
  if not files:
25
  raise gr.Error("少なくとも1ファイルをアップロードしてください。")
26
 
27
  partial_records = []
28
  raw_texts = []
29
 
30
- for f in files:
31
- # gr.Files(type="binary"): .read() / .name が利用可能
32
- raw_bytes = f.read()
33
- filetype = detect_filetype(f.name, raw_bytes)
 
 
 
34
 
35
- # 1) テキスト抽出
 
 
 
36
  if filetype in {"pdf", "image"}:
37
- text = extract_text_with_openai(raw_bytes, filename=f.name, filetype=filetype)
38
  else:
39
  base_text = load_doc_text(filetype, raw_bytes)
40
- text = extract_text_with_openai(base_text.encode("utf-8"), filename=f.name, filetype="txt")
 
41
 
42
- raw_texts.append({"filename": f.name, "text": text})
43
 
44
- # 2) 構造化 -> 3) 正規化
45
  structured = structure_with_openai(text)
46
  normalized = normalize_resume({
47
  "work_experience": structured.get("work_experience_raw", ""),
@@ -50,16 +61,16 @@ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
50
  "skills": ", ".join(structured.get("skills_list", [])),
51
  })
52
  partial_records.append({
53
- "source": f.name,
54
  "text": text,
55
  "structured": structured,
56
  "normalized": normalized,
57
  })
58
 
59
- # 4) 複数ファイル統合
60
  merged = merge_normalized_records([r["normalized"] for r in partial_records])
61
 
62
- # 5) スキル抽出
63
  merged_text = "\n\n".join([r["text"] for r in partial_records])
64
  skills = extract_skills(merged_text, {
65
  "work_experience": merged.get("raw_sections", {}).get("work_experience", ""),
@@ -68,20 +79,20 @@ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
68
  "skills": ", ".join(merged.get("skills", [])),
69
  })
70
 
71
- # 6) 匿名化
72
  anonymized_text, anon_map = anonymize_text(merged_text)
73
  anon_pdf_bytes = render_anonymized_pdf(anonymized_text)
74
 
75
- # 7) 品質スコア
76
  score = compute_quality_score(merged_text, merged)
77
 
78
- # 8) 要約
79
  summaries = summarize_with_openai(merged_text)
80
 
81
- # 9) 構造化出力(最終JSON)
82
  result_json = {
83
  "candidate_id": candidate_id or hashlib.sha256(merged_text.encode("utf-8")).hexdigest()[:16],
84
- "files": [f.name for f in files],
85
  "merged": merged,
86
  "skills": skills,
87
  "quality_score": score,
@@ -90,7 +101,7 @@ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
90
  "notes": additional_notes,
91
  }
92
 
93
- # 10) HF Datasets 保存(任意)
94
  dataset_repo = os.environ.get("DATASET_REPO")
95
  commit_info = None
96
  if dataset_repo:
@@ -104,24 +115,23 @@ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
104
  pdf_path=f"candidates/{file_hash}.anon.pdf",
105
  )
106
 
107
- # gr.File には (filename, bytes) を返す
108
  anon_pdf = (result_json["candidate_id"] + ".anon.pdf", anon_pdf_bytes)
109
 
110
- # ⚠️ gr.JSON は 4.44 で API スキーマ生成がコケる事があるため
111
- # 画面表示用はすべて「文字列」にして gr.Code へ渡す
112
  return (
113
- json.dumps(result_json, ensure_ascii=False, indent=2), # out_json -> Code
114
- json.dumps(skills, ensure_ascii=False, indent=2), # out_skills -> Code
115
- json.dumps(score, ensure_ascii=False, indent=2), # out_score -> Code
116
- summaries["300chars"],
117
- summaries["100chars"],
118
- summaries["onesent"],
119
  anon_pdf,
120
  json.dumps(commit_info or {"status": "skipped (DATASET_REPO not set)"}, ensure_ascii=False, indent=2),
121
  )
122
 
123
 
124
- with gr.Blocks(title=APP_TITLE, analytics_enabled=False) as demo:
125
  gr.Markdown(f"# {APP_TITLE}\n複数ファイルを統合→OpenAIで読み込み/構造化/要約→匿名化→Datasets保存")
126
 
127
  with gr.Row():
@@ -129,7 +139,7 @@ with gr.Blocks(title=APP_TITLE, analytics_enabled=False) as demo:
129
  label="レジュメ類 (PDF/画像/Word/テキスト) 複数可",
130
  file_count="multiple",
131
  file_types=[".pdf", ".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".docx", ".txt"],
132
- type="binary", # 4.44系は 'binary' or 'filepath'
133
  )
134
  candidate_id = gr.Textbox(label="候補者ID(任意。未入力なら自動生成)")
135
  notes = gr.Textbox(label="補足メモ(任意)", lines=3)
@@ -140,10 +150,11 @@ with gr.Blocks(title=APP_TITLE, analytics_enabled=False) as demo:
140
  out_json = gr.Code(label="統合出力 (JSON)")
141
 
142
  with gr.Tab("抽出スキル"):
 
143
  out_skills = gr.Code(label="スキル一覧(JSON表示)")
144
 
145
  with gr.Tab("品質スコア"):
146
- out_score = gr.Code(label="品質評価(JSON表示)")
147
 
148
  with gr.Tab("要約 (300/100/1文)"):
149
  out_sum_300 = gr.Textbox(label="300字要約")
@@ -164,10 +175,11 @@ with gr.Blocks(title=APP_TITLE, analytics_enabled=False) as demo:
164
 
165
 
166
  if __name__ == "__main__":
167
- # Spaces 等のPaaSで localhost アクセス不可な環境に合わせて明示
168
  demo.launch(
169
  server_name="0.0.0.0",
170
  server_port=int(os.environ.get("PORT", "7860")),
171
- share=True, # 必要環境での起動失敗を回避
172
  show_error=True,
 
173
  )
 
21
 
22
 
23
  def process_resumes(files, candidate_id: str, additional_notes: str = ""):
24
+ """
25
+ files: gr.Files(type="filepath") から渡る「ファイルパスのリスト」
26
+ 返り値は Gradio の API スキーマ生成エラーを避けるため、**全て文字列 or ファイル**に統一する。
27
+ """
28
  if not files:
29
  raise gr.Error("少なくとも1ファイルをアップロードしてください。")
30
 
31
  partial_records = []
32
  raw_texts = []
33
 
34
+ # Files(type="filepath") files はパスのリスト
35
+ for path in files:
36
+ try:
37
+ with open(path, "rb") as rf:
38
+ raw_bytes = rf.read()
39
+ except Exception as e:
40
+ raise gr.Error(f"ファイル読み込みに失敗しました: {path}: {e}")
41
 
42
+ fname = os.path.basename(path)
43
+ filetype = detect_filetype(fname, raw_bytes)
44
+
45
+ # 1) テキスト抽出:画像/PDFはOpenAI Vision OCR、docx/txtは生文面+OpenAI整形
46
  if filetype in {"pdf", "image"}:
47
+ text = extract_text_with_openai(raw_bytes, filename=fname, filetype=filetype)
48
  else:
49
  base_text = load_doc_text(filetype, raw_bytes)
50
+ # 生テキストをそのままOpenAIへ渡し、軽く整形した全文を返す
51
+ text = extract_text_with_openai(base_text.encode("utf-8"), filename=fname, filetype="txt")
52
 
53
+ raw_texts.append({"filename": fname, "text": text})
54
 
55
+ # 2) OpenAIでセクション構造化 ルールベース正規化
56
  structured = structure_with_openai(text)
57
  normalized = normalize_resume({
58
  "work_experience": structured.get("work_experience_raw", ""),
 
61
  "skills": ", ".join(structured.get("skills_list", [])),
62
  })
63
  partial_records.append({
64
+ "source": fname,
65
  "text": text,
66
  "structured": structured,
67
  "normalized": normalized,
68
  })
69
 
70
+ # 3) 統合(複数ファイル→1候補者)
71
  merged = merge_normalized_records([r["normalized"] for r in partial_records])
72
 
73
+ # 4) スキル抽出(辞書/正規表現)
74
  merged_text = "\n\n".join([r["text"] for r in partial_records])
75
  skills = extract_skills(merged_text, {
76
  "work_experience": merged.get("raw_sections", {}).get("work_experience", ""),
 
79
  "skills": ", ".join(merged.get("skills", [])),
80
  })
81
 
82
+ # 5) 匿名化
83
  anonymized_text, anon_map = anonymize_text(merged_text)
84
  anon_pdf_bytes = render_anonymized_pdf(anonymized_text)
85
 
86
+ # 6) 品質スコア
87
  score = compute_quality_score(merged_text, merged)
88
 
89
+ # 7) 要約(300/100/1文)
90
  summaries = summarize_with_openai(merged_text)
91
 
92
+ # 8) 構造化出力(文字列化して返す)
93
  result_json = {
94
  "candidate_id": candidate_id or hashlib.sha256(merged_text.encode("utf-8")).hexdigest()[:16],
95
+ "files": [os.path.basename(p) for p in files],
96
  "merged": merged,
97
  "skills": skills,
98
  "quality_score": score,
 
101
  "notes": additional_notes,
102
  }
103
 
104
+ # 9) HF Datasets 保存
105
  dataset_repo = os.environ.get("DATASET_REPO")
106
  commit_info = None
107
  if dataset_repo:
 
115
  pdf_path=f"candidates/{file_hash}.anon.pdf",
116
  )
117
 
118
+ # gr.File 用の (filename, bytes) タプル
119
  anon_pdf = (result_json["candidate_id"] + ".anon.pdf", anon_pdf_bytes)
120
 
121
+ # 返り値は**すべて文字列**(と1つのファイル)に統一
 
122
  return (
123
+ json.dumps(result_json, ensure_ascii=False, indent=2),
124
+ json.dumps(skills, ensure_ascii=False, indent=2),
125
+ json.dumps(score, ensure_ascii=False, indent=2),
126
+ summaries.get("300chars", ""),
127
+ summaries.get("100chars", ""),
128
+ summaries.get("onesent", ""),
129
  anon_pdf,
130
  json.dumps(commit_info or {"status": "skipped (DATASET_REPO not set)"}, ensure_ascii=False, indent=2),
131
  )
132
 
133
 
134
+ with gr.Blocks(title=APP_TITLE) as demo:
135
  gr.Markdown(f"# {APP_TITLE}\n複数ファイルを統合→OpenAIで読み込み/構造化/要約→匿名化→Datasets保存")
136
 
137
  with gr.Row():
 
139
  label="レジュメ類 (PDF/画像/Word/テキスト) 複数可",
140
  file_count="multiple",
141
  file_types=[".pdf", ".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".docx", ".txt"],
142
+ type="filepath", # 重要: 'file' は無効。'filepath' か 'binary'
143
  )
144
  candidate_id = gr.Textbox(label="候補者ID(任意。未入力なら自動生成)")
145
  notes = gr.Textbox(label="補足メモ(任意)", lines=3)
 
150
  out_json = gr.Code(label="統合出力 (JSON)")
151
 
152
  with gr.Tab("抽出スキル"):
153
+ # gr.JSON は API スキーマ生成で例外が出るケースがあるため回避し、文字列(JSON)を表示
154
  out_skills = gr.Code(label="スキル一覧(JSON表示)")
155
 
156
  with gr.Tab("品質スコア"):
157
+ out_score = gr.Code(label="品質評価(JSON)")
158
 
159
  with gr.Tab("要約 (300/100/1文)"):
160
  out_sum_300 = gr.Textbox(label="300字要約")
 
175
 
176
 
177
  if __name__ == "__main__":
178
+ # Spaces 等で localhost 非公開環境を考慮
179
  demo.launch(
180
  server_name="0.0.0.0",
181
  server_port=int(os.environ.get("PORT", "7860")),
182
+ share=True,
183
  show_error=True,
184
+ analytics_enabled=False,
185
  )