Spaces:
Sleeping
Sleeping
namberino
commited on
Commit
·
48df675
1
Parent(s):
a8b213d
Update generator
Browse files- generator.py +3 -3
generator.py
CHANGED
|
@@ -207,7 +207,7 @@ class RAGMCQ:
|
|
| 207 |
|
| 208 |
# ask generator
|
| 209 |
try:
|
| 210 |
-
structured_context = structure_context_for_llm(context, model=self.generation_model, temperature=0.2, enable_fiddler=False
|
| 211 |
mcq_block = generate_mcqs_from_text(
|
| 212 |
source_text=chunk_text, n=to_gen, model=self.generation_model, temperature=temperature, enable_fiddler=enable_fiddler
|
| 213 |
)
|
|
@@ -262,7 +262,7 @@ class RAGMCQ:
|
|
| 262 |
# call generator for 1 question (or small batch) with the retrieved context
|
| 263 |
try:
|
| 264 |
# request 1 question at a time to keep diversity
|
| 265 |
-
structured_context = structure_context_for_llm(context, model=self.generation_model, temperature=0.2, enable_fiddler=False
|
| 266 |
mcq_block = new_generate_mcqs_from_text(structured_context, n=questions_per_page, model=self.generation_model, temperature=temperature, enable_fiddler=False, target_difficulty=target_difficulty)
|
| 267 |
except Exception as e:
|
| 268 |
print(f"Generator failed during RAG attempt {attempts}: {e}")
|
|
@@ -941,7 +941,7 @@ class RAGMCQ:
|
|
| 941 |
# q generation
|
| 942 |
try:
|
| 943 |
# Difficulty pipeline: easy, mid, difficult
|
| 944 |
-
structured_context = structure_context_for_llm(context, model=self.generation_model, temperature=0.2, enable_fiddler=False
|
| 945 |
mcq_block = new_generate_mcqs_from_text(structured_context, n=questions_per_chunk, model=self.generation_model, temperature=temperature, enable_fiddler=False, target_difficulty=target_difficulty)
|
| 946 |
except Exception as e:
|
| 947 |
print(f"Generator failed during RAG attempt {attempts}: {e}")
|
|
|
|
| 207 |
|
| 208 |
# ask generator
|
| 209 |
try:
|
| 210 |
+
structured_context = structure_context_for_llm(context, model=self.generation_model, temperature=0.2, enable_fiddler=False)
|
| 211 |
mcq_block = generate_mcqs_from_text(
|
| 212 |
source_text=chunk_text, n=to_gen, model=self.generation_model, temperature=temperature, enable_fiddler=enable_fiddler
|
| 213 |
)
|
|
|
|
| 262 |
# call generator for 1 question (or small batch) with the retrieved context
|
| 263 |
try:
|
| 264 |
# request 1 question at a time to keep diversity
|
| 265 |
+
structured_context = structure_context_for_llm(context, model=self.generation_model, temperature=0.2, enable_fiddler=False)
|
| 266 |
mcq_block = new_generate_mcqs_from_text(structured_context, n=questions_per_page, model=self.generation_model, temperature=temperature, enable_fiddler=False, target_difficulty=target_difficulty)
|
| 267 |
except Exception as e:
|
| 268 |
print(f"Generator failed during RAG attempt {attempts}: {e}")
|
|
|
|
| 941 |
# q generation
|
| 942 |
try:
|
| 943 |
# Difficulty pipeline: easy, mid, difficult
|
| 944 |
+
structured_context = structure_context_for_llm(context, model=self.generation_model, temperature=0.2, enable_fiddler=False)
|
| 945 |
mcq_block = new_generate_mcqs_from_text(structured_context, n=questions_per_chunk, model=self.generation_model, temperature=temperature, enable_fiddler=False, target_difficulty=target_difficulty)
|
| 946 |
except Exception as e:
|
| 947 |
print(f"Generator failed during RAG attempt {attempts}: {e}")
|