Corin1998 commited on
Commit
a82717b
·
verified ·
1 Parent(s): 85287ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -26
app.py CHANGED
@@ -20,33 +20,30 @@ from pipelines.utils import detect_filetype, load_doc_text
20
  APP_TITLE = "候補者インテーク & レジュメ標準化(OpenAI版)"
21
 
22
 
23
- def process_resumes(filepaths, candidate_id: str, additional_notes: str = ""):
24
- if not filepaths:
25
  raise gr.Error("少なくとも1ファイルをアップロードしてください。")
26
 
27
  partial_records = []
28
  raw_texts = []
29
 
30
- # ⚠️ gr.Files(type="filepath") のため、ここは「パス文字列」の配列
31
- for path in filepaths:
32
- filepath = str(path)
33
- filename = os.path.basename(filepath)
34
- with open(filepath, "rb") as fp:
35
- raw_bytes = fp.read()
36
 
37
- filetype = detect_filetype(filename, raw_bytes)
38
-
39
- # 1) テキスト抽出:画像/PDF→Vision OCR、docx/txt→生文面+整形
40
  if filetype in {"pdf", "image"}:
41
- text = extract_text_with_openai(raw_bytes, filename=filename, filetype=filetype)
42
  else:
43
  base_text = load_doc_text(filetype, raw_bytes)
44
- text = extract_text_with_openai(base_text.encode("utf-8"), filename=filename, filetype="txt")
 
45
 
46
- raw_texts.append({"filename": filename, "text": text})
47
 
48
- # 2) 構造化 → 正規化
49
  structured = structure_with_openai(text)
 
50
  normalized = normalize_resume({
51
  "work_experience": structured.get("work_experience_raw", ""),
52
  "education": structured.get("education_raw", ""),
@@ -54,7 +51,7 @@ def process_resumes(filepaths, candidate_id: str, additional_notes: str = ""):
54
  "skills": ", ".join(structured.get("skills_list", [])),
55
  })
56
  partial_records.append({
57
- "source": filename,
58
  "text": text,
59
  "structured": structured,
60
  "normalized": normalized,
@@ -63,7 +60,7 @@ def process_resumes(filepaths, candidate_id: str, additional_notes: str = ""):
63
  # 3) 統合(複数ファイル→1候補者)
64
  merged = merge_normalized_records([r["normalized"] for r in partial_records])
65
 
66
- # 4) スキル抽出
67
  merged_text = "\n\n".join([r["text"] for r in partial_records])
68
  skills = extract_skills(merged_text, {
69
  "work_experience": merged.get("raw_sections", {}).get("work_experience", ""),
@@ -72,20 +69,20 @@ def process_resumes(filepaths, candidate_id: str, additional_notes: str = ""):
72
  "skills": ", ".join(merged.get("skills", [])),
73
  })
74
 
75
- # 5) 匿名化 → PDF化
76
  anonymized_text, anon_map = anonymize_text(merged_text)
77
  anon_pdf_bytes = render_anonymized_pdf(anonymized_text)
78
 
79
  # 6) 品質スコア
80
  score = compute_quality_score(merged_text, merged)
81
 
82
- # 7) 要約
83
  summaries = summarize_with_openai(merged_text)
84
 
85
  # 8) 構造化出力
86
  result_json = {
87
  "candidate_id": candidate_id or hashlib.sha256(merged_text.encode("utf-8")).hexdigest()[:16],
88
- "files": [os.path.basename(p) for p in filepaths],
89
  "merged": merged,
90
  "skills": skills,
91
  "quality_score": score,
@@ -94,7 +91,7 @@ def process_resumes(filepaths, candidate_id: str, additional_notes: str = ""):
94
  "notes": additional_notes,
95
  }
96
 
97
- # 9) HF Datasets 保存(任意)
98
  dataset_repo = os.environ.get("DATASET_REPO")
99
  commit_info = None
100
  if dataset_repo:
@@ -110,10 +107,9 @@ def process_resumes(filepaths, candidate_id: str, additional_notes: str = ""):
110
 
111
  anon_pdf = (result_json["candidate_id"] + ".anon.pdf", anon_pdf_bytes)
112
 
113
- # ← GradioのAPIスキーマ生成バグ回避のため、JSONは文字列で返す(gr.Codeに表示)
114
  return (
115
  json.dumps(result_json, ensure_ascii=False, indent=2),
116
- json.dumps(skills, ensure_ascii=False, indent=2),
117
  json.dumps(score, ensure_ascii=False, indent=2),
118
  summaries["300chars"],
119
  summaries["100chars"],
@@ -131,7 +127,7 @@ with gr.Blocks(title=APP_TITLE) as demo:
131
  label="レジュメ類 (PDF/画像/Word/テキスト) 複数可",
132
  file_count="multiple",
133
  file_types=[".pdf", ".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".docx", ".txt"],
134
- type="filepath", # 重要: 'file' ではなく 'filepath'
135
  )
136
  candidate_id = gr.Textbox(label="候補者ID(任意。未入力なら自動生成)")
137
  notes = gr.Textbox(label="補足メモ(任意)", lines=3)
@@ -142,7 +138,8 @@ with gr.Blocks(title=APP_TITLE) as demo:
142
  out_json = gr.Code(label="統合出力 (JSON)")
143
 
144
  with gr.Tab("抽出スキル"):
145
- out_skills = gr.Code(label="スキル一覧 (JSON)") # gr.JSON は使わない
 
146
 
147
  with gr.Tab("品質スコア"):
148
  out_score = gr.Code(label="品質評価")
@@ -166,5 +163,5 @@ with gr.Blocks(title=APP_TITLE) as demo:
166
 
167
 
168
  if __name__ == "__main__":
169
- # HF Spaces share=True は未対応。明示的に 0.0.0.0 を指定。
170
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
20
  APP_TITLE = "候補者インテーク & レジュメ標準化(OpenAI版)"
21
 
22
 
23
+ def process_resumes(files, candidate_id: str, additional_notes: str = ""):
24
+ if not files:
25
  raise gr.Error("少なくとも1ファイルをアップロードしてください。")
26
 
27
  partial_records = []
28
  raw_texts = []
29
 
30
+ for f in files:
31
+ raw_bytes = f.read()
32
+ filetype = detect_filetype(f.name, raw_bytes)
 
 
 
33
 
34
+ # 1) テキスト抽出:画像/PDFはOpenAI Vision OCR、docx/txtは生文面+OpenAI整形
 
 
35
  if filetype in {"pdf", "image"}:
36
+ text = extract_text_with_openai(raw_bytes, filename=f.name, filetype=filetype)
37
  else:
38
  base_text = load_doc_text(filetype, raw_bytes)
39
+ # 生テキストをそのままOpenAIへ渡し、軽く整形した全文を返す
40
+ text = extract_text_with_openai(base_text.encode("utf-8"), filename=f.name, filetype="txt")
41
 
42
+ raw_texts.append({"filename": f.name, "text": text})
43
 
44
+ # 2) OpenAIでセクション構造化
45
  structured = structure_with_openai(text)
46
+ # 念のためルールベース正規化も適用(期間抽出など補助)
47
  normalized = normalize_resume({
48
  "work_experience": structured.get("work_experience_raw", ""),
49
  "education": structured.get("education_raw", ""),
 
51
  "skills": ", ".join(structured.get("skills_list", [])),
52
  })
53
  partial_records.append({
54
+ "source": f.name,
55
  "text": text,
56
  "structured": structured,
57
  "normalized": normalized,
 
60
  # 3) 統合(複数ファイル→1候補者)
61
  merged = merge_normalized_records([r["normalized"] for r in partial_records])
62
 
63
+ # 4) スキル抽出(辞書/正規表現)
64
  merged_text = "\n\n".join([r["text"] for r in partial_records])
65
  skills = extract_skills(merged_text, {
66
  "work_experience": merged.get("raw_sections", {}).get("work_experience", ""),
 
69
  "skills": ", ".join(merged.get("skills", [])),
70
  })
71
 
72
+ # 5) 匿名化
73
  anonymized_text, anon_map = anonymize_text(merged_text)
74
  anon_pdf_bytes = render_anonymized_pdf(anonymized_text)
75
 
76
  # 6) 品質スコア
77
  score = compute_quality_score(merged_text, merged)
78
 
79
+ # 7) 要約(300/100/1文)
80
  summaries = summarize_with_openai(merged_text)
81
 
82
  # 8) 構造化出力
83
  result_json = {
84
  "candidate_id": candidate_id or hashlib.sha256(merged_text.encode("utf-8")).hexdigest()[:16],
85
+ "files": [f.name for f in files],
86
  "merged": merged,
87
  "skills": skills,
88
  "quality_score": score,
 
91
  "notes": additional_notes,
92
  }
93
 
94
+ # 9) HF Datasets 保存
95
  dataset_repo = os.environ.get("DATASET_REPO")
96
  commit_info = None
97
  if dataset_repo:
 
107
 
108
  anon_pdf = (result_json["candidate_id"] + ".anon.pdf", anon_pdf_bytes)
109
 
 
110
  return (
111
  json.dumps(result_json, ensure_ascii=False, indent=2),
112
+ skills,
113
  json.dumps(score, ensure_ascii=False, indent=2),
114
  summaries["300chars"],
115
  summaries["100chars"],
 
127
  label="レジュメ類 (PDF/画像/Word/テキスト) 複数可",
128
  file_count="multiple",
129
  file_types=[".pdf", ".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".docx", ".txt"],
130
+ type="file" # ※読み取りコードが .read() 前提のため file のまま
131
  )
132
  candidate_id = gr.Textbox(label="候補者ID(任意。未入力なら自動生成)")
133
  notes = gr.Textbox(label="補足メモ(任意)", lines=3)
 
138
  out_json = gr.Code(label="統合出力 (JSON)")
139
 
140
  with gr.Tab("抽出スキル"):
141
+ # gr.JSON gr.Code に変更(Gradio 4.44.0 の schema 解析バグ回避)
142
+ out_skills = gr.Code(label="スキル一覧(JSON表示)")
143
 
144
  with gr.Tab("品質スコア"):
145
  out_score = gr.Code(label="品質評価")
 
163
 
164
 
165
  if __name__ == "__main__":
166
+ # HF Spaces 上で localhost 非アクセス時の ValueError を回避
167
  demo.launch(server_name="0.0.0.0", server_port=7860)