maahi2412 commited on
Commit
1c64c13
·
verified ·
1 Parent(s): d725dbc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -7
app.py CHANGED
@@ -5,7 +5,7 @@ import pytesseract
5
  import numpy as np
6
  from flask import Flask, request, jsonify
7
  from flask_cors import CORS
8
- import transformers # Full import for logging
9
  from transformers import PegasusForConditionalGeneration, PegasusTokenizer, BertTokenizer, BertForSequenceClassification, Trainer, TrainingArguments
10
  from datasets import load_dataset, concatenate_datasets
11
  import torch
@@ -20,7 +20,6 @@ BERT_MODEL_DIR = 'fine_tuned_bert'
20
  LEGALBERT_MODEL_DIR = 'fine_tuned_legalbert'
21
  MAX_FILE_SIZE = 100 * 1024 * 1024
22
 
23
- # Ensure upload folder exists
24
  if not os.path.exists(UPLOAD_FOLDER):
25
  os.makedirs(UPLOAD_FOLDER, exist_ok=True)
26
 
@@ -39,14 +38,20 @@ def load_or_finetune_pegasus():
39
  model = PegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
40
 
41
  cnn_dm = load_dataset("cnn_dailymail", "3.0.0", split="train[:5000]")
42
- xsum = load_dataset("xsum", split="train[:5000]", trust_remote_code=True) # Added trust_remote_code=True
43
  combined_dataset = concatenate_datasets([cnn_dm, xsum])
44
 
45
  def preprocess_function(examples):
46
- inputs = tokenizer(examples["article"] if "article" in examples else examples["document"],
47
- max_length=512, truncation=True, padding="max_length")
48
- targets = tokenizer(examples["highlights"] if "highlights" in examples else examples["summary"],
49
- max_length=400, truncation=True, padding="max_length")
 
 
 
 
 
 
50
  inputs["labels"] = targets["input_ids"]
51
  return inputs
52
 
 
5
  import numpy as np
6
  from flask import Flask, request, jsonify
7
  from flask_cors import CORS
8
+ import transformers
9
  from transformers import PegasusForConditionalGeneration, PegasusTokenizer, BertTokenizer, BertForSequenceClassification, Trainer, TrainingArguments
10
  from datasets import load_dataset, concatenate_datasets
11
  import torch
 
20
  LEGALBERT_MODEL_DIR = 'fine_tuned_legalbert'
21
  MAX_FILE_SIZE = 100 * 1024 * 1024
22
 
 
23
  if not os.path.exists(UPLOAD_FOLDER):
24
  os.makedirs(UPLOAD_FOLDER, exist_ok=True)
25
 
 
38
  model = PegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
39
 
40
  cnn_dm = load_dataset("cnn_dailymail", "3.0.0", split="train[:5000]")
41
+ xsum = load_dataset("xsum", split="train[:5000]", trust_remote_code=True)
42
  combined_dataset = concatenate_datasets([cnn_dm, xsum])
43
 
44
  def preprocess_function(examples):
45
+ # Extract the correct text field (article or document) as a list of strings
46
+ texts = [examples["article"][i] if "article" in examples else examples["document"][i]
47
+ for i in range(len(examples["article"] if "article" in examples else examples["document"]))]
48
+ inputs = tokenizer(texts, max_length=512, truncation=True, padding="max_length", return_tensors="pt")
49
+
50
+ # Extract the correct summary field (highlights or summary) as a list of strings
51
+ summaries = [examples["highlights"][i] if "highlights" in examples else examples["summary"][i]
52
+ for i in range(len(examples["highlights"] if "highlights" in examples else examples["summary"]))]
53
+ targets = tokenizer(summaries, max_length=400, truncation=True, padding="max_length", return_tensors="pt")
54
+
55
  inputs["labels"] = targets["input_ids"]
56
  return inputs
57