frankai98 commited on
Commit
b6ea9b0
·
verified ·
1 Parent(s): dfd6b67

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -91,7 +91,7 @@ def build_prompt(query_input, sampled_docs):
91
  system_message = """You are an intelligent assistant. Read the Tweets with their sentiment score (Negative, Neutral, Positive) provided and produce a well-structured report that answers the query question.
92
  Your task:
93
  - Summarize both positive and negative aspects, highlighting any trends in user sentiment.
94
- - Include an introduction, key insights, and a conclusion, reaching about 1000 words.
95
  - DO NOT repeat these instructions or the user's query in the final report. Only provide the final text."""
96
 
97
  user_content = f"""**Tweets**:
@@ -274,7 +274,7 @@ def main():
274
  tokenizer=tokenizer,
275
  device=0 if torch.cuda.is_available() else -1,
276
  )
277
- result = pipe(prompt, max_new_tokens=1024, repetition_penalty=1.2, do_sample=True, temperature=0.7, return_full_text=False)
278
  return result, None
279
  except Exception as e:
280
  return None, str(e)
 
91
  system_message = """You are an intelligent assistant. Read the Tweets with their sentiment score (Negative, Neutral, Positive) provided and produce a well-structured report that answers the query question.
92
  Your task:
93
  - Summarize both positive and negative aspects, highlighting any trends in user sentiment.
94
+ - Include an introduction, key insights, and a conclusion, reaching about 500 words.
95
  - DO NOT repeat these instructions or the user's query in the final report. Only provide the final text."""
96
 
97
  user_content = f"""**Tweets**:
 
274
  tokenizer=tokenizer,
275
  device=0 if torch.cuda.is_available() else -1,
276
  )
277
+ result = pipe(prompt, max_new_tokens=512, repetition_penalty=1.2, do_sample=True, temperature=0.7, return_full_text=False)
278
  return result, None
279
  except Exception as e:
280
  return None, str(e)