sanketshinde3001 commited on
Commit
155070e
·
verified ·
1 Parent(s): afab4f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -23
app.py CHANGED
@@ -36,9 +36,6 @@ humanize_pipe = None
36
 
37
  # Load NLP models
38
  try:
39
- # Initialize with a flag to ensure loading only happens once
40
- model_loaded = False
41
-
42
  # Load spaCy model
43
  nlp = spacy.load("en_core_web_sm")
44
 
@@ -55,7 +52,7 @@ except Exception as e:
55
  def get_humanize_pipeline():
56
  """
57
  Lazy-load the humanization pipeline on first use.
58
- Ensures it runs on CPU with limited memory settings.
59
  """
60
  global humanize_pipe
61
  if humanize_pipe is None:
@@ -65,36 +62,30 @@ def get_humanize_pipeline():
65
  # Force CPU usage
66
  device = torch.device("cpu")
67
 
68
- # Set low memory footprint
69
- model_kwargs = {
70
- "low_cpu_mem_usage": True,
71
- "device_map": "cpu"
72
- }
73
-
74
- # Load model with specific settings for resource-constrained environments
75
  model = AutoModelForSeq2SeqLM.from_pretrained(
76
  "danibor/flan-t5-base-humanizer",
77
- **model_kwargs,
78
  torch_dtype=torch.float32 # Use float32 instead of float16 for CPU
79
  )
80
  tokenizer = AutoTokenizer.from_pretrained("danibor/flan-t5-base-humanizer")
81
 
82
- # Create pipeline with optimized settings
83
  humanize_pipe = pipeline(
84
  "text2text-generation",
85
  model=model,
86
  tokenizer=tokenizer,
87
- device=device, # Explicitly specify CPU
88
- framework="pt"
89
  )
90
 
91
  print("Humanizer model loaded successfully!")
 
92
  except Exception as e:
93
  print(f"Error loading humanizer model: {e}")
94
- # Return a simple function that just returns the input as fallback
95
- def fallback_humanize(text, **kwargs):
96
- return [{"generated_text": f"FALLBACK: {text} (Model loading failed: {str(e)})"}]
97
- humanize_pipe = fallback_humanize
 
98
 
99
  return humanize_pipe
100
 
@@ -127,13 +118,11 @@ async def humanize_text(request: TextRequest):
127
  # Get or initialize the pipeline
128
  pipeline = get_humanize_pipeline()
129
 
130
- # Generate humanized text with memory-conscious settings
131
  result = pipeline(
132
  input_text,
133
  max_length=min(500, len(input_text) * 2), # Limit max length
134
- do_sample=True,
135
- num_return_sequences=1,
136
- batch_size=1 # Small batch size for memory constraints
137
  )
138
 
139
  humanized_text = result[0]['generated_text']
 
36
 
37
  # Load NLP models
38
  try:
 
 
 
39
  # Load spaCy model
40
  nlp = spacy.load("en_core_web_sm")
41
 
 
52
  def get_humanize_pipeline():
53
  """
54
  Lazy-load the humanization pipeline on first use.
55
+ Uses standard settings that don't require accelerate.
56
  """
57
  global humanize_pipe
58
  if humanize_pipe is None:
 
62
  # Force CPU usage
63
  device = torch.device("cpu")
64
 
65
+ # Load model with basic settings (no accelerate needed)
 
 
 
 
 
 
66
  model = AutoModelForSeq2SeqLM.from_pretrained(
67
  "danibor/flan-t5-base-humanizer",
 
68
  torch_dtype=torch.float32 # Use float32 instead of float16 for CPU
69
  )
70
  tokenizer = AutoTokenizer.from_pretrained("danibor/flan-t5-base-humanizer")
71
 
72
+ # Create pipeline with basic settings
73
  humanize_pipe = pipeline(
74
  "text2text-generation",
75
  model=model,
76
  tokenizer=tokenizer,
77
+ device=device # Explicitly specify CPU
 
78
  )
79
 
80
  print("Humanizer model loaded successfully!")
81
+ return humanize_pipe
82
  except Exception as e:
83
  print(f"Error loading humanizer model: {e}")
84
+ # Create a simple pipeline-like function that just returns the input
85
+ def simple_pipeline(text, **kwargs):
86
+ return [{"generated_text": f"Could not process: {text} (Model failed to load)"}]
87
+ humanize_pipe = simple_pipeline
88
+ return humanize_pipe
89
 
90
  return humanize_pipe
91
 
 
118
  # Get or initialize the pipeline
119
  pipeline = get_humanize_pipeline()
120
 
121
+ # Generate humanized text with basic settings
122
  result = pipeline(
123
  input_text,
124
  max_length=min(500, len(input_text) * 2), # Limit max length
125
+ do_sample=True
 
 
126
  )
127
 
128
  humanized_text = result[0]['generated_text']