codingcoolfun9ed commited on
Commit
84c1843
·
verified ·
1 Parent(s): 19782f6

ai generated implementation did not work

Browse files

removed stuff and put it back to normal I think

Files changed (1) hide show
  1. app.py +6 -11
app.py CHANGED
@@ -5,32 +5,27 @@ import sys
5
  sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
6
  from api.predict import predict_review, models_loaded
7
 
8
- def analyze_review(text):
9
- if not text or len(text.strip()) == 0:
10
  return "error: please enter some text"
11
 
12
  if not models_loaded:
13
  return "models are loading for the first time, this will take 20-30 minutes. please wait..."
14
 
15
  try:
16
- result = predict_review(text)
17
 
18
  if "error" in result and result["prediction"] == "error":
19
  return f"error: {result['error']}"
20
 
21
- prediction = result['prediction']
22
- confidence = result['confidence']
23
- is_fake = result['is_fake']
24
-
25
- status = "FAKE" if is_fake else "GENUINE"
26
-
27
- output = f"""prediction: {status}
28
- confidence: {confidence:.2%}
29
 
30
  fake probability: {result['fake_probability']:.2%}
31
  genuine probability: {result['genuine_probability']:.2%}
32
 
33
  model agreement: {result['model_agreement']:.1f}%
 
34
  length category: {result['length_category']}
35
  token count: {result['token_count']}"""
36
 
 
5
  sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
6
  from api.predict import predict_review, models_loaded
7
 
8
+ def analyze_review(reviewText):
9
+ if not reviewText or len(reviewText.strip()) == 0:
10
  return "error: please enter some text"
11
 
12
  if not models_loaded:
13
  return "models are loading for the first time, this will take 20-30 minutes. please wait..."
14
 
15
  try:
16
+ result = predict_review(reviewText)
17
 
18
  if "error" in result and result["prediction"] == "error":
19
  return f"error: {result['error']}"
20
 
21
+ output = f"""prediction: {result['prediction']}
22
+ confidence: {result['confidence']:.2%}
 
 
 
 
 
 
23
 
24
  fake probability: {result['fake_probability']:.2%}
25
  genuine probability: {result['genuine_probability']:.2%}
26
 
27
  model agreement: {result['model_agreement']:.1f}%
28
+ is fake: {result['is_fake']}
29
  length category: {result['length_category']}
30
  token count: {result['token_count']}"""
31