sreepathi-ravikumar commited on
Commit
bd2d0eb
·
verified ·
1 Parent(s): d1cdf2e

Update text2generation.py

Browse files
Files changed (1) hide show
  1. text2generation.py +10 -26
text2generation.py CHANGED
@@ -1,45 +1,29 @@
1
- import os
2
  from transformers import pipeline, set_seed
 
3
 
4
- # Configure environment for optimal performance
5
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
6
- set_seed(42) # For reproducible outputs
7
 
8
  def generate_long_answer(question):
9
- """
10
- Generates detailed educational answers using Falcon-7B
11
- Returns: formatted markdown response with sections
12
- """
13
- # Initialize pipeline with performance optimizations
14
  generator = pipeline(
15
  "text-generation",
16
- model="tiiuae/falcon-7b-instruct",
17
  device_map="auto",
18
- torch_dtype="auto",
19
- model_kwargs={"load_in_8bit": True} # Reduces memory usage
20
  )
21
 
22
- # Carefully crafted prompt for educational responses
23
- prompt = f"""You are a university professor. Provide a comprehensive answer with:
24
- 1. Key Concepts
25
- 2. Real-world Examples
26
- 3. Common Misconceptions
27
- 4. Further Reading
28
-
29
  Question: {question}
30
- Answer:\n"""
31
 
32
- # Generate response with academic tone
33
  result = generator(
34
  prompt,
35
- max_length=2000,
36
- num_return_sequences=1,
37
  temperature=0.7,
38
  top_p=0.9,
39
- repetition_penalty=1.1,
40
  do_sample=True
41
  )
42
 
43
- # Post-process the output
44
- answer = result[0]['generated_text'].split("Answer:")[-1]
45
- return answer.strip()
 
 
1
  from transformers import pipeline, set_seed
2
+ import os
3
 
 
4
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
5
+ set_seed(42)
6
 
7
  def generate_long_answer(question):
8
+ # Using smaller but capable model (1.5B parameters)
 
 
 
 
9
  generator = pipeline(
10
  "text-generation",
11
+ model="google/flan-t5-large",
12
  device_map="auto",
13
+ torch_dtype="auto"
 
14
  )
15
 
16
+ prompt = f"""Answer this educational question in detail:
17
+
 
 
 
 
 
18
  Question: {question}
19
+ Answer:"""
20
 
 
21
  result = generator(
22
  prompt,
23
+ max_length=1000,
 
24
  temperature=0.7,
25
  top_p=0.9,
 
26
  do_sample=True
27
  )
28
 
29
+ return result[0]['generated_text'].split("Answer:")[-1].strip()