Create generate_dataset.py
Browse files- generate_dataset.py +1603 -0
generate_dataset.py
ADDED
|
@@ -0,0 +1,1603 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import numpy as np
|
| 5 |
+
from typing import List, Dict, Tuple, Optional, Any
|
| 6 |
+
import logging
|
| 7 |
+
import random
|
| 8 |
+
import time
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
from openai import AzureOpenAI
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
import concurrent.futures
|
| 13 |
+
import threading
|
| 14 |
+
from dataclasses import dataclass
|
| 15 |
+
import queue
|
| 16 |
+
import math
|
| 17 |
+
import re
|
| 18 |
+
|
| 19 |
+
# Configure logging
|
| 20 |
+
logging.basicConfig(
|
| 21 |
+
level=logging.INFO,
|
| 22 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
| 23 |
+
handlers=[
|
| 24 |
+
logging.FileHandler("sales_ai_system.log"),
|
| 25 |
+
logging.StreamHandler()
|
| 26 |
+
]
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
logger = logging.getLogger(__name__)
|
| 30 |
+
|
| 31 |
+
# Hard-coded Azure OpenAI credentials
|
| 32 |
+
AZURE_OPENAI_API_KEY =""
|
| 33 |
+
AZURE_OPENAI_DEPLOYMENT_NAME = ""
|
| 34 |
+
AZURE_OPENAI_ENDPOINT = "https://resource_name.openai.azure.com/"
|
| 35 |
+
AZURE_EMBEDDING_DEPLOYMENT_NAME = "text-embedding-3-large"
|
| 36 |
+
|
| 37 |
+
# Rate limiting parameters
|
| 38 |
+
RATE_LIMIT_RPM = 2500 # Requests per minute limit
|
| 39 |
+
# Use only 60% of the limit to account for retry overhead and be conservative
|
| 40 |
+
SAFE_RATE_LIMIT_RPM = int(RATE_LIMIT_RPM * 0.6)
|
| 41 |
+
RATE_LIMIT_RPS = SAFE_RATE_LIMIT_RPM / 60 # Requests per second
|
| 42 |
+
MIN_REQUEST_INTERVAL = 1.0 / RATE_LIMIT_RPS # Minimum interval between requests
|
| 43 |
+
|
| 44 |
+
# Thread-local storage for clients
|
| 45 |
+
local = threading.local()
|
| 46 |
+
|
| 47 |
+
# More granular rate limiters for different endpoints
|
| 48 |
+
class FixedWindowRateLimiter:
|
| 49 |
+
"""
|
| 50 |
+
Fixed window rate limiter that tracks requests in a sliding window.
|
| 51 |
+
More conservative than token bucket for API rate limits.
|
| 52 |
+
"""
|
| 53 |
+
def __init__(self, max_requests, window_size=60):
|
| 54 |
+
self.max_requests = max_requests
|
| 55 |
+
self.window_size = window_size # in seconds
|
| 56 |
+
self.request_timestamps = []
|
| 57 |
+
self.lock = threading.Lock()
|
| 58 |
+
|
| 59 |
+
def wait_if_needed(self):
|
| 60 |
+
"""Wait if the rate limit would be exceeded."""
|
| 61 |
+
with self.lock:
|
| 62 |
+
now = time.time()
|
| 63 |
+
|
| 64 |
+
# Remove timestamps outside the window
|
| 65 |
+
self.request_timestamps = [ts for ts in self.request_timestamps
|
| 66 |
+
if now - ts < self.window_size]
|
| 67 |
+
|
| 68 |
+
# Check if we're at the limit
|
| 69 |
+
if len(self.request_timestamps) >= self.max_requests:
|
| 70 |
+
# Calculate wait time - oldest timestamp will be removed after window_size
|
| 71 |
+
wait_time = self.window_size - (now - self.request_timestamps[0])
|
| 72 |
+
|
| 73 |
+
# Add a small jitter to avoid all threads waking up at the same time
|
| 74 |
+
wait_time = max(0.1, wait_time + random.uniform(0, 0.5))
|
| 75 |
+
|
| 76 |
+
return wait_time
|
| 77 |
+
|
| 78 |
+
# No need to wait
|
| 79 |
+
self.request_timestamps.append(now)
|
| 80 |
+
return 0
|
| 81 |
+
|
| 82 |
+
def record_request(self):
|
| 83 |
+
"""Record a request without waiting."""
|
| 84 |
+
with self.lock:
|
| 85 |
+
now = time.time()
|
| 86 |
+
self.request_timestamps.append(now)
|
| 87 |
+
|
| 88 |
+
# Create rate limiters
|
| 89 |
+
chat_limiter = FixedWindowRateLimiter(SAFE_RATE_LIMIT_RPM)
|
| 90 |
+
embedding_limiter = FixedWindowRateLimiter(SAFE_RATE_LIMIT_RPM)
|
| 91 |
+
|
| 92 |
+
@dataclass
|
| 93 |
+
class SaaSProfile:
|
| 94 |
+
"""Profile of a SaaS product."""
|
| 95 |
+
company_id: str
|
| 96 |
+
company_name: str
|
| 97 |
+
product_name: str
|
| 98 |
+
product_description: str
|
| 99 |
+
target_audience: List[str]
|
| 100 |
+
key_features: List[str]
|
| 101 |
+
value_propositions: List[str]
|
| 102 |
+
pricing_structure: Dict[str, Any]
|
| 103 |
+
common_objections: List[str]
|
| 104 |
+
industry: str
|
| 105 |
+
company_size: str # small, medium, large, enterprise
|
| 106 |
+
|
| 107 |
+
class ResultWriter:
|
| 108 |
+
"""Thread-safe writer for results to CSV"""
|
| 109 |
+
|
| 110 |
+
def __init__(self, output_path: str, columns: List[str]):
|
| 111 |
+
self.output_path = output_path
|
| 112 |
+
self.columns = columns
|
| 113 |
+
self.lock = threading.Lock()
|
| 114 |
+
|
| 115 |
+
# Create the file with headers if it doesn't exist
|
| 116 |
+
if not os.path.exists(output_path):
|
| 117 |
+
with open(output_path, 'w') as f:
|
| 118 |
+
f.write(','.join(columns) + '\n')
|
| 119 |
+
|
| 120 |
+
logger.info(f"Initialized ResultWriter for {output_path}")
|
| 121 |
+
|
| 122 |
+
def write_rows(self, rows: List[Dict]):
|
| 123 |
+
"""Write rows directly to the CSV file"""
|
| 124 |
+
if not rows:
|
| 125 |
+
return
|
| 126 |
+
|
| 127 |
+
try:
|
| 128 |
+
with self.lock:
|
| 129 |
+
with open(self.output_path, 'a') as f:
|
| 130 |
+
for row in rows:
|
| 131 |
+
# Convert values to strings and escape commas
|
| 132 |
+
csv_values = []
|
| 133 |
+
for col in self.columns:
|
| 134 |
+
val = str(row.get(col, ""))
|
| 135 |
+
# Escape double quotes by doubling them
|
| 136 |
+
val = val.replace('"', '""')
|
| 137 |
+
# Wrap in quotes
|
| 138 |
+
csv_values.append(f'"{val}"')
|
| 139 |
+
csv_line = ','.join(csv_values)
|
| 140 |
+
f.write(csv_line + '\n')
|
| 141 |
+
|
| 142 |
+
logger.debug(f"Wrote {len(rows)} rows to {self.output_path}")
|
| 143 |
+
except Exception as e:
|
| 144 |
+
logger.error(f"Error writing to CSV: {str(e)}")
|
| 145 |
+
|
| 146 |
+
class EnhancedSaaSDatasetGenerator:
|
| 147 |
+
"""
|
| 148 |
+
Generates diverse and realistic synthetic sales conversation datasets for SaaS companies
|
| 149 |
+
using Azure OpenAI with multithreading support.
|
| 150 |
+
"""
|
| 151 |
+
|
| 152 |
+
def __init__(self):
|
| 153 |
+
"""Initialize the dataset generator."""
|
| 154 |
+
self.profile_lock = threading.Lock()
|
| 155 |
+
self.profiles = None
|
| 156 |
+
self.writer = None
|
| 157 |
+
self.total_generated = 0
|
| 158 |
+
self.total_counter_lock = threading.Lock()
|
| 159 |
+
|
| 160 |
+
# Track API call statistics
|
| 161 |
+
self.api_calls = 0
|
| 162 |
+
self.api_call_lock = threading.Lock()
|
| 163 |
+
self.retry_count = 0
|
| 164 |
+
self.retry_lock = threading.Lock()
|
| 165 |
+
|
| 166 |
+
# Track last retry timestamp to prevent thundering herd
|
| 167 |
+
self.last_retry_time = 0
|
| 168 |
+
self.last_retry_lock = threading.Lock()
|
| 169 |
+
|
| 170 |
+
# Conversation style templates - define various styles to create diverse conversations
|
| 171 |
+
self.conversation_styles = [
|
| 172 |
+
"casual_friendly",
|
| 173 |
+
"direct_professional",
|
| 174 |
+
"technical_detailed",
|
| 175 |
+
"consultative_advisory",
|
| 176 |
+
"empathetic_supportive",
|
| 177 |
+
"skeptical_challenging",
|
| 178 |
+
"urgent_time_pressed",
|
| 179 |
+
"confused_overwhelmed",
|
| 180 |
+
"knowledgeable_assertive",
|
| 181 |
+
"storytelling_narrative"
|
| 182 |
+
]
|
| 183 |
+
|
| 184 |
+
# Communication channel templates with their unique characteristics
|
| 185 |
+
self.communication_channels = {
|
| 186 |
+
"email": {
|
| 187 |
+
"formality": "high",
|
| 188 |
+
"response_time": "delayed",
|
| 189 |
+
"message_length": "medium to long",
|
| 190 |
+
"format_elements": ["subject lines", "signatures", "quoted replies"]
|
| 191 |
+
},
|
| 192 |
+
"live_chat": {
|
| 193 |
+
"formality": "low to medium",
|
| 194 |
+
"response_time": "immediate",
|
| 195 |
+
"message_length": "short to medium",
|
| 196 |
+
"format_elements": ["quick responses", "emojis", "typing indicators"]
|
| 197 |
+
},
|
| 198 |
+
"phone_call": {
|
| 199 |
+
"formality": "medium",
|
| 200 |
+
"response_time": "immediate",
|
| 201 |
+
"message_length": "conversational",
|
| 202 |
+
"format_elements": ["verbal pauses", "interruptions", "voice tone indicators"]
|
| 203 |
+
},
|
| 204 |
+
"video_call": {
|
| 205 |
+
"formality": "medium",
|
| 206 |
+
"response_time": "immediate",
|
| 207 |
+
"message_length": "medium",
|
| 208 |
+
"format_elements": ["screen sharing references", "visual cues", "environment mentions"]
|
| 209 |
+
},
|
| 210 |
+
"in_person": {
|
| 211 |
+
"formality": "varies",
|
| 212 |
+
"response_time": "immediate",
|
| 213 |
+
"message_length": "varies",
|
| 214 |
+
"format_elements": ["environment references", "body language cues", "material handouts"]
|
| 215 |
+
},
|
| 216 |
+
"sms": {
|
| 217 |
+
"formality": "low",
|
| 218 |
+
"response_time": "varies",
|
| 219 |
+
"message_length": "very short",
|
| 220 |
+
"format_elements": ["abbreviations", "emojis", "brief statements"]
|
| 221 |
+
},
|
| 222 |
+
"social_media": {
|
| 223 |
+
"formality": "low to medium",
|
| 224 |
+
"response_time": "varies",
|
| 225 |
+
"message_length": "short",
|
| 226 |
+
"format_elements": ["hashtags", "mentions", "public/private context references"]
|
| 227 |
+
}
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
# Customer personas templates with more diverse traits
|
| 231 |
+
self.persona_templates = [
|
| 232 |
+
{
|
| 233 |
+
"name": "Time-Pressed Executive",
|
| 234 |
+
"traits": ["direct", "value-focused", "impatient", "decisive"],
|
| 235 |
+
"communication_style": "brief and to-the-point, may use truncated sentences and check messages between meetings",
|
| 236 |
+
"typical_objections": ["too time-consuming", "prove ROI quickly", "competitor comparisons"]
|
| 237 |
+
},
|
| 238 |
+
{
|
| 239 |
+
"name": "Technical Evaluator",
|
| 240 |
+
"traits": ["detail-oriented", "skeptical", "analytical", "research-driven"],
|
| 241 |
+
"communication_style": "asks specific technical questions, uses industry jargon, references research",
|
| 242 |
+
"typical_objections": ["technical limitations", "integration concerns", "security issues"]
|
| 243 |
+
},
|
| 244 |
+
{
|
| 245 |
+
"name": "Budget-Conscious Manager",
|
| 246 |
+
"traits": ["price-sensitive", "cautious", "ROI-focused", "deliberate"],
|
| 247 |
+
"communication_style": "frequently mentions costs, compares alternatives, asks about discounts",
|
| 248 |
+
"typical_objections": ["too expensive", "budget constraints", "not worth the investment"]
|
| 249 |
+
},
|
| 250 |
+
{
|
| 251 |
+
"name": "Relationship Builder",
|
| 252 |
+
"traits": ["conversational", "personable", "story-driven", "trust-focused"],
|
| 253 |
+
"communication_style": "shares personal anecdotes, asks about the sales rep, builds rapport before business",
|
| 254 |
+
"typical_objections": ["need to build trust", "want references", "need team buy-in"]
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"name": "Innovation Seeker",
|
| 258 |
+
"traits": ["trend-aware", "competitive", "growth-focused", "risk-tolerant"],
|
| 259 |
+
"communication_style": "references industry trends, talks about growth goals, explores cutting-edge features",
|
| 260 |
+
"typical_objections": ["not innovative enough", "will soon be outdated", "competitive advantage concerns"]
|
| 261 |
+
},
|
| 262 |
+
{
|
| 263 |
+
"name": "Overwhelmed User",
|
| 264 |
+
"traits": ["stressed", "confused", "seeking guidance", "time-constrained"],
|
| 265 |
+
"communication_style": "asks many basic questions, may seem scattered, expresses feeling overwhelmed",
|
| 266 |
+
"typical_objections": ["too complicated", "training requirements", "implementation time"]
|
| 267 |
+
},
|
| 268 |
+
{
|
| 269 |
+
"name": "Delegated Researcher",
|
| 270 |
+
"traits": ["information-gathering", "non-decision-maker", "thorough", "process-oriented"],
|
| 271 |
+
"communication_style": "mentions reporting back to others, asks for materials, follows structured evaluation",
|
| 272 |
+
"typical_objections": ["need to consult others", "gathering information only", "complex approval process"]
|
| 273 |
+
},
|
| 274 |
+
{
|
| 275 |
+
"name": "Competitor User",
|
| 276 |
+
"traits": ["comparative", "experienced", "specific needs", "solution-aware"],
|
| 277 |
+
"communication_style": "frequently mentions current solution, asks about specific differences, uses competitor terminology",
|
| 278 |
+
"typical_objections": ["switching costs", "feature parity", "disruption concerns"]
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"name": "Enthusiastic Champion",
|
| 282 |
+
"traits": ["excited", "vision-aligned", "quick to connect", "internal seller"],
|
| 283 |
+
"communication_style": "expresses excitement, talks about company vision, discusses internal advocacy",
|
| 284 |
+
"typical_objections": ["need help convincing others", "implementation support", "proving value to team"]
|
| 285 |
+
},
|
| 286 |
+
{
|
| 287 |
+
"name": "Resistant Stakeholder",
|
| 288 |
+
"traits": ["change-averse", "skeptical", "security-focused", "process-oriented"],
|
| 289 |
+
"communication_style": "questions necessity, raises potential problems, defensive about current solutions",
|
| 290 |
+
"typical_objections": ["disruption to workflow", "employee resistance", "security concerns"]
|
| 291 |
+
}
|
| 292 |
+
]
|
| 293 |
+
|
| 294 |
+
# Conversation flow patterns to create non-linear, realistic conversations
|
| 295 |
+
self.conversation_flows = [
|
| 296 |
+
"standard_linear", # Traditional linear sales conversation
|
| 297 |
+
"multiple_objection_loops", # Customer raises several objections that must be addressed
|
| 298 |
+
"subject_switching", # Conversation jumps between different topics
|
| 299 |
+
"interrupted_followup", # Conversation gets interrupted and resumes later
|
| 300 |
+
"technical_deep_dive", # Detailed exploration of technical aspects
|
| 301 |
+
"competitive_comparison", # Heavy focus on comparing with competitors
|
| 302 |
+
"gradual_discovery", # Slow revelation of needs throughout conversation
|
| 303 |
+
"immediate_interest", # Customer shows high interest from the beginning
|
| 304 |
+
"initial_rejection", # Starts negative but potentially turns around
|
| 305 |
+
"stakeholder_expansion", # Involves bringing in additional decision makers
|
| 306 |
+
"pricing_negotiation", # Extended discussion about pricing and terms
|
| 307 |
+
"implementation_concerns", # Focused on implementation challenges
|
| 308 |
+
"value_justification", # Customer needs convincing on ROI
|
| 309 |
+
"relationship_building", # Heavy on personal connection before business
|
| 310 |
+
"multi_session", # Simulates a conversation occurring across multiple contacts
|
| 311 |
+
"demo_walkthrough" # Simulates a product demonstration conversation
|
| 312 |
+
]
|
| 313 |
+
|
| 314 |
+
# Speech patterns and quirks to make conversations more human-like
|
| 315 |
+
self.speech_patterns = [
|
| 316 |
+
# Hesitations and fillers
|
| 317 |
+
{"pattern": "filler_words", "examples": ["um", "uh", "like", "you know", "actually", "basically", "I mean"]},
|
| 318 |
+
|
| 319 |
+
# Grammatical quirks
|
| 320 |
+
{"pattern": "run_on_sentences", "examples": ["and then we also", "plus we need to", "which also means"]},
|
| 321 |
+
{"pattern": "self_corrections", "examples": ["I mean", "what I meant was", "actually, let me rephrase", "sorry, what I'm trying to say"]},
|
| 322 |
+
|
| 323 |
+
# Typing variations (for written channels)
|
| 324 |
+
{"pattern": "typos", "examples": ["teh", "adn", "waht", "thigns", "compnay", "prodcut", "featuers"]},
|
| 325 |
+
{"pattern": "autocorrections", "examples": ["Our team is looking for skeletons (solutions)", "We need better coffin (conferencing)"]},
|
| 326 |
+
|
| 327 |
+
# Regional expressions
|
| 328 |
+
{"pattern": "regionalisms", "examples": ["y'all", "folks", "brilliant", "cheers", "no worries", "wicked", "proper"]},
|
| 329 |
+
|
| 330 |
+
# Punctuation habits
|
| 331 |
+
{"pattern": "over_punctuation", "examples": ["!!!", "???", "..."]},
|
| 332 |
+
{"pattern": "under_punctuation", "examples": ["no periods", "run on thoughts", "missing question marks"]},
|
| 333 |
+
|
| 334 |
+
# Message structure
|
| 335 |
+
{"pattern": "fragmented_thoughts", "examples": ["Need to check on...", "Not sure if...", "Let me think...", "One more thing."]},
|
| 336 |
+
{"pattern": "tangents", "examples": ["By the way", "Oh that reminds me", "Not related, but", "Random thought"]},
|
| 337 |
+
|
| 338 |
+
# Emphasis patterns
|
| 339 |
+
{"pattern": "emphasis_capital", "examples": ["REALLY", "VERY", "NEVER", "ALWAYS", "MUST"]},
|
| 340 |
+
{"pattern": "emphasis_repetition", "examples": ["very very", "really really", "many many"]}
|
| 341 |
+
]
|
| 342 |
+
|
| 343 |
+
# Customer needs categories with specific language patterns
|
| 344 |
+
self.customer_needs = [
|
| 345 |
+
{"type": "efficiency", "keywords": ["faster", "streamline", "automate", "time-consuming", "manual", "process", "workflow"]},
|
| 346 |
+
{"type": "cost_reduction", "keywords": ["expenses", "budget", "save money", "affordable", "cost-effective", "ROI", "investment"]},
|
| 347 |
+
{"type": "growth", "keywords": ["scale", "expand", "increase revenue", "market share", "competitive", "opportunity", "growth"]},
|
| 348 |
+
{"type": "compliance", "keywords": ["regulations", "requirements", "standards", "audit", "legal", "compliance", "risk"]},
|
| 349 |
+
{"type": "integration", "keywords": ["connect", "compatible", "ecosystem", "work with", "existing systems", "API", "integration"]},
|
| 350 |
+
{"type": "usability", "keywords": ["easy to use", "intuitive", "learning curve", "training", "user-friendly", "simple", "interface"]},
|
| 351 |
+
{"type": "reliability", "keywords": ["uptime", "stable", "dependable", "trust", "consistent", "failover", "backup"]},
|
| 352 |
+
{"type": "security", "keywords": ["protect", "data security", "encryption", "sensitive information", "breach", "privacy", "secure"]},
|
| 353 |
+
{"type": "support", "keywords": ["help", "customer service", "response time", "training", "documentation", "support team", "assistance"]},
|
| 354 |
+
{"type": "analytics", "keywords": ["insights", "reporting", "dashboard", "metrics", "data", "visibility", "analytics"]}
|
| 355 |
+
]
|
| 356 |
+
|
| 357 |
+
logger.info("Initialized EnhancedSaaSDatasetGenerator with diverse conversation templates")
|
| 358 |
+
|
| 359 |
+
def get_openai_client(self):
|
| 360 |
+
"""Get thread-local Azure OpenAI client"""
|
| 361 |
+
if not hasattr(local, 'client'):
|
| 362 |
+
local.client = AzureOpenAI(
|
| 363 |
+
api_key=AZURE_OPENAI_API_KEY,
|
| 364 |
+
api_version="2023-05-15",
|
| 365 |
+
azure_endpoint=AZURE_OPENAI_ENDPOINT
|
| 366 |
+
)
|
| 367 |
+
return local.client
|
| 368 |
+
|
| 369 |
+
def _wait_with_jitter(self, base_time):
|
| 370 |
+
"""Wait with jitter to avoid thundering herd problem"""
|
| 371 |
+
jitter = random.uniform(0, 1)
|
| 372 |
+
wait_time = base_time + jitter
|
| 373 |
+
time.sleep(wait_time)
|
| 374 |
+
|
| 375 |
+
def _handle_rate_limit(self, retry_after=None):
|
| 376 |
+
"""Handle rate limit exceptions with proper backoff"""
|
| 377 |
+
with self.retry_lock:
|
| 378 |
+
self.retry_count += 1
|
| 379 |
+
|
| 380 |
+
# If retry_after is provided in header, use it; otherwise use default
|
| 381 |
+
wait_time = retry_after if retry_after else 30
|
| 382 |
+
|
| 383 |
+
# Add jitter to avoid thundering herd problem
|
| 384 |
+
jitter = random.uniform(0, 5)
|
| 385 |
+
wait_time += jitter
|
| 386 |
+
|
| 387 |
+
# Update last retry time - all threads can see when the last retry happened
|
| 388 |
+
with self.last_retry_lock:
|
| 389 |
+
self.last_retry_time = time.time()
|
| 390 |
+
|
| 391 |
+
logger.warning(f"Rate limit exceeded. Waiting for {wait_time:.2f} seconds before retry.")
|
| 392 |
+
time.sleep(wait_time)
|
| 393 |
+
|
| 394 |
+
def _get_embedding(self, text: str) -> List[float]:
|
| 395 |
+
"""Get embeddings using Azure OpenAI."""
|
| 396 |
+
max_retries = 5
|
| 397 |
+
|
| 398 |
+
for attempt in range(max_retries):
|
| 399 |
+
try:
|
| 400 |
+
# Check if we need to wait based on embedding rate limiter
|
| 401 |
+
wait_time = embedding_limiter.wait_if_needed()
|
| 402 |
+
if wait_time > 0:
|
| 403 |
+
time.sleep(wait_time)
|
| 404 |
+
|
| 405 |
+
# Check if any thread recently hit a rate limit
|
| 406 |
+
with self.last_retry_lock:
|
| 407 |
+
time_since_last_retry = time.time() - self.last_retry_time
|
| 408 |
+
|
| 409 |
+
# If a retry happened recently, stagger our requests
|
| 410 |
+
if time_since_last_retry < 10:
|
| 411 |
+
time.sleep(random.uniform(0.5, 3.0))
|
| 412 |
+
|
| 413 |
+
client = self.get_openai_client()
|
| 414 |
+
response = client.embeddings.create(
|
| 415 |
+
model=AZURE_EMBEDDING_DEPLOYMENT_NAME,
|
| 416 |
+
input=text
|
| 417 |
+
)
|
| 418 |
+
|
| 419 |
+
# Track successful API call
|
| 420 |
+
with self.api_call_lock:
|
| 421 |
+
self.api_calls += 1
|
| 422 |
+
|
| 423 |
+
return response.data[0].embedding
|
| 424 |
+
|
| 425 |
+
except Exception as e:
|
| 426 |
+
error_msg = str(e).lower()
|
| 427 |
+
|
| 428 |
+
# Handle rate limit errors
|
| 429 |
+
if "429" in error_msg or "too many requests" in error_msg:
|
| 430 |
+
self._handle_rate_limit()
|
| 431 |
+
continue
|
| 432 |
+
|
| 433 |
+
# Log other errors
|
| 434 |
+
logger.error(f"Error attempt {attempt+1}/{max_retries} getting embedding: {str(e)}")
|
| 435 |
+
|
| 436 |
+
if attempt < max_retries - 1:
|
| 437 |
+
# Exponential backoff with jitter
|
| 438 |
+
backoff_time = (2 ** attempt) + random.uniform(0, 1)
|
| 439 |
+
time.sleep(backoff_time)
|
| 440 |
+
else:
|
| 441 |
+
logger.error(f"Failed to get embedding after {max_retries} attempts")
|
| 442 |
+
return [0.0] * 3072 # Default dimension for embeddings
|
| 443 |
+
|
| 444 |
+
def _generate_completion(self, system_prompt: str, user_prompt: str, temperature: float = 0.7, retries: int = 5) -> str:
|
| 445 |
+
"""Generate text completion using Azure OpenAI with improved error handling."""
|
| 446 |
+
client = self.get_openai_client()
|
| 447 |
+
|
| 448 |
+
for attempt in range(retries):
|
| 449 |
+
try:
|
| 450 |
+
# Check if we need to wait based on chat rate limiter
|
| 451 |
+
wait_time = chat_limiter.wait_if_needed()
|
| 452 |
+
if wait_time > 0:
|
| 453 |
+
time.sleep(wait_time)
|
| 454 |
+
|
| 455 |
+
# Check if any thread recently hit a rate limit
|
| 456 |
+
with self.last_retry_lock:
|
| 457 |
+
time_since_last_retry = time.time() - self.last_retry_time
|
| 458 |
+
|
| 459 |
+
# If a retry happened recently, stagger our requests
|
| 460 |
+
if time_since_last_retry < 10:
|
| 461 |
+
time.sleep(random.uniform(0.5, 3.0))
|
| 462 |
+
|
| 463 |
+
# Add explicit JSON formatting request to the system prompt
|
| 464 |
+
enhanced_system_prompt = f"{system_prompt}\nImportant: Your response must be valid JSON only, with no explanations or additional text."
|
| 465 |
+
|
| 466 |
+
# Add explicit JSON formatting instructions to the user prompt
|
| 467 |
+
enhanced_user_prompt = f"{user_prompt}\n\nYour response should be formatted as valid JSON only. Do not include any text before or after the JSON."
|
| 468 |
+
|
| 469 |
+
response = client.chat.completions.create(
|
| 470 |
+
model=AZURE_OPENAI_DEPLOYMENT_NAME,
|
| 471 |
+
messages=[
|
| 472 |
+
{"role": "system", "content": enhanced_system_prompt},
|
| 473 |
+
{"role": "user", "content": enhanced_user_prompt}
|
| 474 |
+
],
|
| 475 |
+
temperature=temperature
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
# Track successful API call
|
| 479 |
+
with self.api_call_lock:
|
| 480 |
+
self.api_calls += 1
|
| 481 |
+
|
| 482 |
+
content = response.choices[0].message.content.strip()
|
| 483 |
+
|
| 484 |
+
# Remove any potential non-JSON content before and after the actual JSON
|
| 485 |
+
if content.startswith("```json"):
|
| 486 |
+
content = content.split("```json", 1)[1]
|
| 487 |
+
if content.endswith("```"):
|
| 488 |
+
content = content.rsplit("```", 1)[0]
|
| 489 |
+
|
| 490 |
+
# Further cleanup to ensure we have valid JSON
|
| 491 |
+
content = content.strip()
|
| 492 |
+
|
| 493 |
+
# Check if the first character is not '{' but contains '{' somewhere
|
| 494 |
+
if not content.startswith('{') and '{' in content:
|
| 495 |
+
content = content[content.find('{'):]
|
| 496 |
+
|
| 497 |
+
# Check if the last character is not '}' but contains '}' somewhere
|
| 498 |
+
if not content.endswith('}') and '}' in content:
|
| 499 |
+
content = content[:content.rfind('}')+1]
|
| 500 |
+
|
| 501 |
+
try:
|
| 502 |
+
# Parse and re-serialize to ensure valid JSON
|
| 503 |
+
parsed_json = json.loads(content)
|
| 504 |
+
return json.dumps(parsed_json)
|
| 505 |
+
except json.JSONDecodeError:
|
| 506 |
+
# If we can't parse the JSON, try to fix common issues
|
| 507 |
+
# Replace single quotes with double quotes
|
| 508 |
+
content = content.replace("'", '"')
|
| 509 |
+
# Fix unquoted keys
|
| 510 |
+
content = re.sub(r'(\s*?)(\w+)(\s*?):', r'\1"\2"\3:', content)
|
| 511 |
+
try:
|
| 512 |
+
parsed_json = json.loads(content)
|
| 513 |
+
return json.dumps(parsed_json)
|
| 514 |
+
except json.JSONDecodeError:
|
| 515 |
+
if attempt < retries - 1:
|
| 516 |
+
continue
|
| 517 |
+
else:
|
| 518 |
+
# Create minimal valid JSON as fallback
|
| 519 |
+
logger.warning("Returning fallback JSON due to parsing error")
|
| 520 |
+
return '{"error": "Could not generate valid JSON", "partial_content": "Content generation failed"}'
|
| 521 |
+
|
| 522 |
+
except Exception as e:
|
| 523 |
+
error_msg = str(e).lower()
|
| 524 |
+
|
| 525 |
+
# Handle rate limit errors
|
| 526 |
+
if "429" in error_msg or "too many requests" in error_msg:
|
| 527 |
+
self._handle_rate_limit()
|
| 528 |
+
continue
|
| 529 |
+
|
| 530 |
+
# Log other errors
|
| 531 |
+
logger.error(f"Error attempt {attempt+1}/{retries} generating completion: {str(e)}")
|
| 532 |
+
|
| 533 |
+
if attempt < retries - 1:
|
| 534 |
+
# Exponential backoff with jitter
|
| 535 |
+
backoff_time = (2 ** attempt) + random.uniform(0, 1)
|
| 536 |
+
time.sleep(backoff_time)
|
| 537 |
+
else:
|
| 538 |
+
logger.error(f"Failed to generate completion after {retries} attempts")
|
| 539 |
+
# Return a minimal valid JSON as fallback
|
| 540 |
+
return '{"error": "Failed to generate completion", "message": "Maximum retries exceeded"}'
|
| 541 |
+
|
| 542 |
+
def generate_saas_profiles(self, num_profiles: int = 10) -> List[SaaSProfile]:
|
| 543 |
+
"""Generate a diverse set of SaaS company profiles with improved error handling."""
|
| 544 |
+
|
| 545 |
+
profiles = []
|
| 546 |
+
|
| 547 |
+
# Expanded SaaS categories with modern AI/ML and other specialized categories
|
| 548 |
+
saas_categories = [
|
| 549 |
+
# Core SaaS Categories
|
| 550 |
+
"Project Management", "CRM", "Marketing Automation", "HR Software",
|
| 551 |
+
"Customer Support", "Accounting", "Business Intelligence", "Collaboration",
|
| 552 |
+
"DevOps", "Security", "E-commerce", "ERP", "Content Management",
|
| 553 |
+
|
| 554 |
+
# AI/ML/LLM Specific
|
| 555 |
+
"LLM Development Platform", "AI Orchestration", "Prompt Engineering Tools",
|
| 556 |
+
"AI Agent Framework", "Machine Learning Operations", "Vector Database",
|
| 557 |
+
"AI Content Generation", "Computer Vision Platform", "NLP Solutions",
|
| 558 |
+
"AI Model Marketplace", "Semantic Search", "AI Development Environment",
|
| 559 |
+
"Multimodal AI Platform", "AI Workflow Automation", "GenAI Enterprise Solutions",
|
| 560 |
+
|
| 561 |
+
# Emerging Tech SaaS
|
| 562 |
+
"Blockchain Services", "IoT Platform", "Augmented Reality", "Virtual Reality",
|
| 563 |
+
"Edge Computing", "Quantum Computing Services", "Digital Twin Platform",
|
| 564 |
+
|
| 565 |
+
# Industry-Specific SaaS
|
| 566 |
+
"HealthTech", "FinTech", "EdTech", "LegalTech", "PropTech", "AgriTech",
|
| 567 |
+
"InsurTech", "RegTech", "CleanTech", "BioTech", "FoodTech", "RetailTech",
|
| 568 |
+
|
| 569 |
+
# Data-Focused SaaS
|
| 570 |
+
"Data Integration", "ETL Platform", "Data Visualization", "Data Governance",
|
| 571 |
+
"Big Data Analytics", "Predictive Analytics", "Data Labeling", "Data Quality",
|
| 572 |
+
"Real-time Analytics", "Data Pipeline", "Time Series Database",
|
| 573 |
+
|
| 574 |
+
# Specialized SaaS
|
| 575 |
+
"API Management", "Workflow Automation", "Knowledge Management", "Network Monitoring",
|
| 576 |
+
"Identity Management", "Email Marketing", "Video Conferencing", "Product Analytics",
|
| 577 |
+
"Customer Data Platform", "Event Management", "Subscription Management",
|
| 578 |
+
"Conversational AI", "Pricing Optimization", "Sales Enablement", "Revenue Operations",
|
| 579 |
+
|
| 580 |
+
# Developer Tools
|
| 581 |
+
"Code Repository", "CI/CD Pipeline", "Testing Automation", "Microservices Platform",
|
| 582 |
+
"Serverless Computing", "API Development", "Low-Code Platform", "No-Code Platform",
|
| 583 |
+
"Database Management", "Container Orchestration", "Application Monitoring",
|
| 584 |
+
|
| 585 |
+
# Security SaaS
|
| 586 |
+
"Endpoint Protection", "Cloud Security", "Identity Access Management",
|
| 587 |
+
"Vulnerability Management", "Threat Intelligence", "Data Loss Prevention",
|
| 588 |
+
"Security Information Management", "Privileged Access Management", "Zero Trust Security",
|
| 589 |
+
|
| 590 |
+
# Remote Work SaaS
|
| 591 |
+
"Virtual Desktop", "Remote Team Collaboration", "Digital Workplace", "Employee Monitoring",
|
| 592 |
+
"Virtual Onboarding", "Distributed Team Management", "Workforce Analytics"
|
| 593 |
+
]
|
| 594 |
+
|
| 595 |
+
# Expanded industries list to match the diverse SaaS categories
|
| 596 |
+
industries = [
|
| 597 |
+
# Traditional Industries
|
| 598 |
+
"Technology", "Healthcare", "Finance", "Education", "Retail",
|
| 599 |
+
"Manufacturing", "Media", "Real Estate", "Legal", "Non-profit",
|
| 600 |
+
|
| 601 |
+
# Expanded Technology Sectors
|
| 602 |
+
"Software Development", "Cloud Services", "Data Science", "Artificial Intelligence",
|
| 603 |
+
"Cybersecurity", "Telecommunications", "Gaming", "Digital Marketing",
|
| 604 |
+
|
| 605 |
+
# Specific Verticals
|
| 606 |
+
"E-commerce", "Banking", "Insurance", "Pharmaceuticals", "Entertainment",
|
| 607 |
+
"Hospitality", "Transportation", "Logistics", "Construction", "Energy",
|
| 608 |
+
"Agriculture", "Automotive", "Aerospace", "Public Sector", "Professional Services",
|
| 609 |
+
|
| 610 |
+
# Emerging Industries
|
| 611 |
+
"Renewable Energy", "Biotechnology", "Nanotechnology", "Space Technology",
|
| 612 |
+
"Smart Cities", "Sustainable Development", "Circular Economy",
|
| 613 |
+
|
| 614 |
+
# Service Sectors
|
| 615 |
+
"Consulting", "Staffing", "Training & Development", "Research", "Marketing Services"
|
| 616 |
+
]
|
| 617 |
+
|
| 618 |
+
company_sizes = ["small", "medium", "large", "enterprise"]
|
| 619 |
+
|
| 620 |
+
# Calculate effective thread count - be conservative for profile generation
|
| 621 |
+
effective_threads = min(num_profiles, 15)
|
| 622 |
+
|
| 623 |
+
# Use ThreadPoolExecutor for parallel profile generation
|
| 624 |
+
with concurrent.futures.ThreadPoolExecutor(max_workers=effective_threads) as executor:
|
| 625 |
+
futures = []
|
| 626 |
+
|
| 627 |
+
# Submit each profile generation task
|
| 628 |
+
for i in range(num_profiles):
|
| 629 |
+
category = random.choice(saas_categories)
|
| 630 |
+
industry = random.choice(industries)
|
| 631 |
+
size = random.choice(company_sizes)
|
| 632 |
+
|
| 633 |
+
futures.append(executor.submit(
|
| 634 |
+
self._generate_single_profile, i, category, industry, size
|
| 635 |
+
))
|
| 636 |
+
|
| 637 |
+
# Collect results as they complete
|
| 638 |
+
for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures), desc="Generating SaaS profiles"):
|
| 639 |
+
try:
|
| 640 |
+
profile = future.result()
|
| 641 |
+
if profile:
|
| 642 |
+
profiles.append(profile)
|
| 643 |
+
except Exception as e:
|
| 644 |
+
logger.error(f"Error in profile generation thread: {str(e)}")
|
| 645 |
+
|
| 646 |
+
logger.info(f"Generated {len(profiles)} SaaS profiles")
|
| 647 |
+
return profiles
|
| 648 |
+
|
| 649 |
+
def _generate_single_profile(self, i: int, category: str, industry: str, size: str) -> Optional[SaaSProfile]:
|
| 650 |
+
"""Generate a single SaaS profile."""
|
| 651 |
+
max_retries = 3
|
| 652 |
+
for attempt in range(max_retries):
|
| 653 |
+
try:
|
| 654 |
+
# Use a simpler prompt focused on just creating valid JSON
|
| 655 |
+
system_prompt = "You are a SaaS industry expert. Generate a JSON object according to the specification."
|
| 656 |
+
|
| 657 |
+
user_prompt = f"""
|
| 658 |
+
Create a SaaS company profile for a {category} software company targeting {size} businesses in the {industry} industry.
|
| 659 |
+
|
| 660 |
+
Return ONLY a JSON object with this exact structure:
|
| 661 |
+
{{
|
| 662 |
+
"company_id": "saas-{i}",
|
| 663 |
+
"company_name": "Company name",
|
| 664 |
+
"product_name": "Product name",
|
| 665 |
+
"product_description": "A 2-3 sentence description",
|
| 666 |
+
"target_audience": ["Target 1", "Target 2", "Target 3"],
|
| 667 |
+
"key_features": ["Feature 1", "Feature 2", "Feature 3", "Feature 4"],
|
| 668 |
+
"value_propositions": ["Value prop 1", "Value prop 2", "Value prop 3"],
|
| 669 |
+
"pricing_structure": {{
|
| 670 |
+
"model": "Pricing model type",
|
| 671 |
+
"tiers": [
|
| 672 |
+
{{
|
| 673 |
+
"name": "Tier name",
|
| 674 |
+
"price": "Price",
|
| 675 |
+
"features": ["Feature 1", "Feature 2"]
|
| 676 |
+
}}
|
| 677 |
+
]
|
| 678 |
+
}},
|
| 679 |
+
"common_objections": ["Objection 1", "Objection 2", "Objection 3", "Objection 4"],
|
| 680 |
+
"industry": "{industry}",
|
| 681 |
+
"company_size": "{size}"
|
| 682 |
+
}}
|
| 683 |
+
|
| 684 |
+
Ensure your response is ONLY valid JSON with no additional text, markdown code blocks, or explanations.
|
| 685 |
+
"""
|
| 686 |
+
|
| 687 |
+
content = self._generate_completion(system_prompt, user_prompt)
|
| 688 |
+
profile_json = json.loads(content)
|
| 689 |
+
|
| 690 |
+
profile = SaaSProfile(
|
| 691 |
+
company_id=profile_json.get("company_id", f"saas-{i}"),
|
| 692 |
+
company_name=profile_json["company_name"],
|
| 693 |
+
product_name=profile_json["product_name"],
|
| 694 |
+
product_description=profile_json["product_description"],
|
| 695 |
+
target_audience=profile_json["target_audience"],
|
| 696 |
+
key_features=profile_json["key_features"],
|
| 697 |
+
value_propositions=profile_json["value_propositions"],
|
| 698 |
+
pricing_structure=profile_json["pricing_structure"],
|
| 699 |
+
common_objections=profile_json["common_objections"],
|
| 700 |
+
industry=profile_json["industry"],
|
| 701 |
+
company_size=profile_json["company_size"]
|
| 702 |
+
)
|
| 703 |
+
|
| 704 |
+
logger.info(f"Successfully generated profile {i}: {profile.company_name}")
|
| 705 |
+
return profile
|
| 706 |
+
|
| 707 |
+
except Exception as e:
|
| 708 |
+
logger.error(f"Error attempt {attempt+1}/{max_retries} generating profile {i}: {str(e)}")
|
| 709 |
+
if attempt < max_retries - 1:
|
| 710 |
+
# Exponential backoff
|
| 711 |
+
backoff_time = (2 ** attempt) + random.uniform(0, 1)
|
| 712 |
+
time.sleep(backoff_time)
|
| 713 |
+
|
| 714 |
+
# Create a fallback profile on final attempt
|
| 715 |
+
logger.warning(f"Using fallback profile for {i}")
|
| 716 |
+
return SaaSProfile(
|
| 717 |
+
company_id=f"saas-{i}",
|
| 718 |
+
company_name=f"{category} Solutions Inc",
|
| 719 |
+
product_name=f"{category} Pro",
|
| 720 |
+
product_description=f"A {category} solution for {size} businesses in the {industry} industry.",
|
| 721 |
+
target_audience=[f"{industry} professionals", f"{size} businesses", "Department managers"],
|
| 722 |
+
key_features=["Easy setup", "Intuitive interface", "Advanced reporting", "Team collaboration"],
|
| 723 |
+
value_propositions=["Increase productivity", "Reduce costs", "Improve visibility"],
|
| 724 |
+
pricing_structure={
|
| 725 |
+
"model": "tiered",
|
| 726 |
+
"tiers": [
|
| 727 |
+
{
|
| 728 |
+
"name": "Basic",
|
| 729 |
+
"price": "$10/user/month",
|
| 730 |
+
"features": ["Core features", "Email support"]
|
| 731 |
+
}
|
| 732 |
+
]
|
| 733 |
+
},
|
| 734 |
+
common_objections=["Too expensive", "Complex implementation", "Training required", "Integration concerns"],
|
| 735 |
+
industry=industry,
|
| 736 |
+
company_size=size
|
| 737 |
+
)
|
| 738 |
+
|
| 739 |
+
def _generate_customer_persona(self, profile: SaaSProfile) -> Dict:
|
| 740 |
+
"""Generate a realistic customer persona based on the SaaS profile with improved diversity."""
|
| 741 |
+
max_retries = 3
|
| 742 |
+
for attempt in range(max_retries):
|
| 743 |
+
try:
|
| 744 |
+
# Select a random persona template for more diversity
|
| 745 |
+
persona_template = random.choice(self.persona_templates)
|
| 746 |
+
|
| 747 |
+
system_prompt = """You are an expert in creating realistic customer personas.
|
| 748 |
+
Generate a JSON object according to the specification. Make the persona feel real, with specific challenges and needs."""
|
| 749 |
+
|
| 750 |
+
user_prompt = f"""
|
| 751 |
+
Generate a realistic customer persona for a potential {profile.product_name} customer based on:
|
| 752 |
+
|
| 753 |
+
Company: {profile.company_name}
|
| 754 |
+
Product: {profile.product_name}
|
| 755 |
+
Description: {profile.product_description}
|
| 756 |
+
Target Audience: {", ".join(profile.target_audience)}
|
| 757 |
+
Industry: {profile.industry}
|
| 758 |
+
|
| 759 |
+
Please base this persona loosely on the following template:
|
| 760 |
+
Template name: {persona_template["name"]}
|
| 761 |
+
Traits: {", ".join(persona_template["traits"])}
|
| 762 |
+
Communication style: {persona_template["communication_style"]}
|
| 763 |
+
Typical objections: {", ".join(persona_template["typical_objections"])}
|
| 764 |
+
|
| 765 |
+
Return ONLY a JSON object with this exact structure:
|
| 766 |
+
{{
|
| 767 |
+
"name": "Full customer name (realistic)",
|
| 768 |
+
"company": "Customer's company name (specific, not generic)",
|
| 769 |
+
"role": "Customer's specific job title",
|
| 770 |
+
"company_size": "{profile.company_size}",
|
| 771 |
+
"industry": "{profile.industry}",
|
| 772 |
+
"pain_points": ["Specific pain point 1", "Specific pain point 2", "Specific pain point 3"],
|
| 773 |
+
"needs": ["Specific need 1", "Specific need 2", "Specific need 3"],
|
| 774 |
+
"communication_preferences": ["Email", "Phone", "Chat", etc.],
|
| 775 |
+
"technical_expertise": "low/medium/high",
|
| 776 |
+
"budget_sensitivity": "low/medium/high",
|
| 777 |
+
"decision_making_authority": "none/influence/decide",
|
| 778 |
+
"personality_traits": ["Trait 1", "Trait 2", "Trait 3"],
|
| 779 |
+
"background": "Brief background of the customer (2-3 sentences)",
|
| 780 |
+
"objection_style": "How they typically raise objections (direct, passive, etc.)"
|
| 781 |
+
}}
|
| 782 |
+
|
| 783 |
+
Ensure your response is ONLY valid JSON with no additional text or markdown.
|
| 784 |
+
"""
|
| 785 |
+
|
| 786 |
+
content = self._generate_completion(system_prompt, user_prompt, temperature=0.8)
|
| 787 |
+
persona_json = json.loads(content)
|
| 788 |
+
|
| 789 |
+
# Adding more details to each persona to enhance realism
|
| 790 |
+
persona_json["preferred_speech_patterns"] = random.sample([p["pattern"] for p in self.speech_patterns], 2)
|
| 791 |
+
persona_json["primary_need_type"] = random.choice([n["type"] for n in self.customer_needs])
|
| 792 |
+
|
| 793 |
+
return persona_json
|
| 794 |
+
|
| 795 |
+
except Exception as e:
|
| 796 |
+
logger.error(f"Error attempt {attempt+1}/{max_retries} generating persona: {str(e)}")
|
| 797 |
+
if attempt < max_retries - 1:
|
| 798 |
+
# Exponential backoff
|
| 799 |
+
backoff_time = (2 ** attempt) + random.uniform(0, 1)
|
| 800 |
+
time.sleep(backoff_time)
|
| 801 |
+
else:
|
| 802 |
+
# Return a default persona as fallback but with more variation
|
| 803 |
+
logger.warning(f"Using fallback persona for {profile.company_name}")
|
| 804 |
+
return {
|
| 805 |
+
"name": f"{random.choice(['Alex', 'Jordan', 'Taylor', 'Sam', 'Morgan', 'Jamie'])} {random.choice(['Smith', 'Johnson', 'Wong', 'Garcia', 'Patel', 'Kim'])}",
|
| 806 |
+
"company": f"{random.choice(['Innovative', 'Global', 'Modern', 'Premier', 'Advanced'])} {random.choice(['Systems', 'Solutions', 'Technologies', 'Enterprises', 'Group'])}",
|
| 807 |
+
"role": random.choice(["Department Manager", "IT Director", "Operations Lead", "VP of Technology", "CFO", "CEO", "CTO"]),
|
| 808 |
+
"company_size": profile.company_size,
|
| 809 |
+
"industry": profile.industry,
|
| 810 |
+
"pain_points": ["Efficiency issues", "Cost management", "Integration challenges"],
|
| 811 |
+
"needs": ["Better reporting", "Team collaboration", "Process automation"],
|
| 812 |
+
"communication_preferences": random.sample(["Email", "Phone", "Chat", "In-person", "Video call"], 2),
|
| 813 |
+
"technical_expertise": random.choice(["low", "medium", "high"]),
|
| 814 |
+
"budget_sensitivity": random.choice(["low", "medium", "high"]),
|
| 815 |
+
"decision_making_authority": random.choice(["none", "influence", "decide"]),
|
| 816 |
+
"personality_traits": random.sample(["analytical", "direct", "cautious", "friendly", "detail-oriented", "big-picture", "skeptical"], 3),
|
| 817 |
+
"background": "Has been with the company for 5 years. Previously worked at a competitor.",
|
| 818 |
+
"objection_style": random.choice(["direct", "passive-aggressive", "analytical", "price-focused"]),
|
| 819 |
+
"preferred_speech_patterns": random.sample([p["pattern"] for p in self.speech_patterns], 2),
|
| 820 |
+
"primary_need_type": random.choice([n["type"] for n in self.customer_needs])
|
| 821 |
+
}
|
| 822 |
+
|
| 823 |
+
def _generate_conversation_scenario(self, profile: SaaSProfile, customer_persona: Dict) -> Dict:
|
| 824 |
+
"""Generate a diverse conversation scenario with improved complexity."""
|
| 825 |
+
max_retries = 3
|
| 826 |
+
for attempt in range(max_retries):
|
| 827 |
+
try:
|
| 828 |
+
# Select random conversation style and flow pattern for diversity
|
| 829 |
+
conversation_style = random.choice(self.conversation_styles)
|
| 830 |
+
conversation_flow = random.choice(self.conversation_flows)
|
| 831 |
+
communication_channel = random.choice(list(self.communication_channels.keys()))
|
| 832 |
+
|
| 833 |
+
# Randomize expected outcome to ensure dataset balance
|
| 834 |
+
expected_outcome = random.choice([True, False])
|
| 835 |
+
|
| 836 |
+
# Generate a conversion probability that matches the expected outcome
|
| 837 |
+
if expected_outcome:
|
| 838 |
+
# For positive outcomes, higher probability
|
| 839 |
+
expected_probability = random.uniform(0.6, 0.95)
|
| 840 |
+
else:
|
| 841 |
+
# For negative outcomes, lower probability
|
| 842 |
+
expected_probability = random.uniform(0.05, 0.4)
|
| 843 |
+
|
| 844 |
+
system_prompt = """You are an expert in creating realistic sales scenarios.
|
| 845 |
+
Generate a JSON object according to the specification. Focus on making the scenario detailed and context-rich."""
|
| 846 |
+
|
| 847 |
+
user_prompt = f"""
|
| 848 |
+
Create a detailed sales scenario between a {profile.company_name} representative and this customer:
|
| 849 |
+
|
| 850 |
+
{json.dumps(customer_persona, indent=2)}
|
| 851 |
+
|
| 852 |
+
Company & Product Details:
|
| 853 |
+
- Product: {profile.product_name}
|
| 854 |
+
- Description: {profile.product_description}
|
| 855 |
+
- Key Features: {", ".join(profile.key_features)}
|
| 856 |
+
- Common Objections: {", ".join(profile.common_objections)}
|
| 857 |
+
|
| 858 |
+
Use the following parameters to shape the scenario:
|
| 859 |
+
- Conversation style: {conversation_style}
|
| 860 |
+
- Conversation flow pattern: {conversation_flow}
|
| 861 |
+
- Communication channel: {communication_channel} ({self.communication_channels[communication_channel]["formality"]} formality, {self.communication_channels[communication_channel]["response_time"]} response time)
|
| 862 |
+
- Expected outcome: {"successful conversion" if expected_outcome else "no conversion"}
|
| 863 |
+
|
| 864 |
+
Return ONLY a JSON object with this exact structure:
|
| 865 |
+
{{
|
| 866 |
+
"customer_persona": {json.dumps(customer_persona)},
|
| 867 |
+
"sales_channel": "{communication_channel}",
|
| 868 |
+
"conversation_context": "Detailed background context for the conversation",
|
| 869 |
+
"customer_intent": "Specific reason why the customer initiated contact",
|
| 870 |
+
"customer_knowledge_level": "How much the customer already knows about the product or solution space",
|
| 871 |
+
"objection_focus": ["Specific objection 1", "Specific objection 2"],
|
| 872 |
+
"complexity": "simple/moderate/complex",
|
| 873 |
+
"expected_outcome": {"true" if expected_outcome else "false"},
|
| 874 |
+
"expected_conversion_probability": {expected_probability},
|
| 875 |
+
"conversation_style": "{conversation_style}",
|
| 876 |
+
"conversation_flow": "{conversation_flow}",
|
| 877 |
+
"critical_moment": "The turning point in the conversation where the outcome might be decided",
|
| 878 |
+
"time_pressure": "Whether there's urgency for a decision (none/some/high)",
|
| 879 |
+
"external_factors": ["Factor 1", "Factor 2"]
|
| 880 |
+
}}
|
| 881 |
+
|
| 882 |
+
Ensure your response is ONLY valid JSON with no additional text or markdown.
|
| 883 |
+
"""
|
| 884 |
+
|
| 885 |
+
content = self._generate_completion(system_prompt, user_prompt, temperature=0.8)
|
| 886 |
+
scenario_json = json.loads(content)
|
| 887 |
+
|
| 888 |
+
# Ensure expected_outcome is a boolean
|
| 889 |
+
if isinstance(scenario_json["expected_outcome"], str):
|
| 890 |
+
scenario_json["expected_outcome"] = scenario_json["expected_outcome"].lower() == "true"
|
| 891 |
+
|
| 892 |
+
# Ensure expected_conversion_probability is a float
|
| 893 |
+
if isinstance(scenario_json["expected_conversion_probability"], str):
|
| 894 |
+
scenario_json["expected_conversion_probability"] = float(scenario_json["expected_conversion_probability"])
|
| 895 |
+
|
| 896 |
+
return scenario_json
|
| 897 |
+
|
| 898 |
+
except Exception as e:
|
| 899 |
+
logger.error(f"Error attempt {attempt+1}/{max_retries} generating scenario: {str(e)}")
|
| 900 |
+
if attempt < max_retries - 1:
|
| 901 |
+
# Exponential backoff
|
| 902 |
+
backoff_time = (2 ** attempt) + random.uniform(0, 1)
|
| 903 |
+
time.sleep(backoff_time)
|
| 904 |
+
else:
|
| 905 |
+
# Return a default scenario as fallback but with more variation
|
| 906 |
+
logger.warning(f"Using fallback scenario for {profile.company_name}")
|
| 907 |
+
|
| 908 |
+
# Randomize expected outcome
|
| 909 |
+
expected_outcome = random.choice([True, False])
|
| 910 |
+
expected_probability = 0.7 if expected_outcome else 0.3
|
| 911 |
+
|
| 912 |
+
return {
|
| 913 |
+
"customer_persona": customer_persona,
|
| 914 |
+
"sales_channel": random.choice(list(self.communication_channels.keys())),
|
| 915 |
+
"conversation_context": random.choice(["Initial inquiry", "Follow-up call", "Demo meeting", "Pricing discussion", "Implementation planning"]),
|
| 916 |
+
"customer_intent": random.choice(["Exploring options", "Comparing vendors", "Addressing urgent need", "Planning future implementation", "Evaluating potential ROI"]),
|
| 917 |
+
"customer_knowledge_level": random.choice(["minimal", "moderate", "extensive"]),
|
| 918 |
+
"objection_focus": profile.common_objections[:2] if len(profile.common_objections) >= 2 else ["Price", "Implementation"],
|
| 919 |
+
"complexity": random.choice(["simple", "moderate", "complex"]),
|
| 920 |
+
"expected_outcome": expected_outcome,
|
| 921 |
+
"expected_conversion_probability": expected_probability,
|
| 922 |
+
"conversation_style": random.choice(self.conversation_styles),
|
| 923 |
+
"conversation_flow": random.choice(self.conversation_flows),
|
| 924 |
+
"critical_moment": "Discussion of pricing and ROI",
|
| 925 |
+
"time_pressure": random.choice(["none", "some", "high"]),
|
| 926 |
+
"external_factors": ["Budget cycle", "Competitor evaluation"]
|
| 927 |
+
}
|
| 928 |
+
|
| 929 |
+
def _apply_speech_patterns(self, text: str, patterns: List[str], probability: float = 0.3) -> str:
|
| 930 |
+
"""Apply realistic speech patterns to make text more human."""
|
| 931 |
+
if not text or not patterns or random.random() > probability:
|
| 932 |
+
return text
|
| 933 |
+
|
| 934 |
+
modified_text = text
|
| 935 |
+
|
| 936 |
+
for pattern_name in patterns:
|
| 937 |
+
# Find the pattern details
|
| 938 |
+
pattern_details = next((p for p in self.speech_patterns if p["pattern"] == pattern_name), None)
|
| 939 |
+
if not pattern_details or random.random() > 0.4: # Only apply some patterns
|
| 940 |
+
continue
|
| 941 |
+
|
| 942 |
+
examples = pattern_details["examples"]
|
| 943 |
+
|
| 944 |
+
if pattern_name == "filler_words":
|
| 945 |
+
# Insert filler words
|
| 946 |
+
words = modified_text.split()
|
| 947 |
+
for i in range(len(words) - 1):
|
| 948 |
+
if random.random() < 0.1: # 10% chance per position
|
| 949 |
+
filler = random.choice(examples)
|
| 950 |
+
words.insert(i + 1, filler)
|
| 951 |
+
modified_text = " ".join(words)
|
| 952 |
+
|
| 953 |
+
elif pattern_name == "typos":
|
| 954 |
+
# Replace some words with typos
|
| 955 |
+
words = modified_text.split()
|
| 956 |
+
for i, word in enumerate(words):
|
| 957 |
+
if len(word) > 3 and random.random() < 0.05: # 5% chance per word
|
| 958 |
+
# Simple typo simulation - swap two adjacent characters
|
| 959 |
+
char_list = list(word)
|
| 960 |
+
j = random.randint(0, len(char_list) - 2)
|
| 961 |
+
char_list[j], char_list[j + 1] = char_list[j + 1], char_list[j]
|
| 962 |
+
words[i] = "".join(char_list)
|
| 963 |
+
modified_text = " ".join(words)
|
| 964 |
+
|
| 965 |
+
elif pattern_name == "over_punctuation":
|
| 966 |
+
# Add excessive punctuation
|
| 967 |
+
for punct in ["!", "?", "..."]:
|
| 968 |
+
modified_text = modified_text.replace(f"{punct}", random.choice([f"{punct}", f"{punct}{punct}", f"{punct}{punct}{punct}"]))
|
| 969 |
+
|
| 970 |
+
elif pattern_name == "self_corrections":
|
| 971 |
+
# Add self-corrections
|
| 972 |
+
sentences = modified_text.split('. ')
|
| 973 |
+
if len(sentences) > 1:
|
| 974 |
+
i = random.randint(0, len(sentences) - 1)
|
| 975 |
+
correction = random.choice(examples)
|
| 976 |
+
sentences[i] = f"{sentences[i]}... {correction}, {sentences[i]}"
|
| 977 |
+
modified_text = '. '.join(sentences)
|
| 978 |
+
|
| 979 |
+
elif pattern_name == "emphasis_capital":
|
| 980 |
+
# Capitalize some words for emphasis
|
| 981 |
+
words = modified_text.split()
|
| 982 |
+
for i, word in enumerate(words):
|
| 983 |
+
if len(word) > 3 and word.isalpha() and random.random() < 0.05:
|
| 984 |
+
words[i] = word.upper()
|
| 985 |
+
modified_text = " ".join(words)
|
| 986 |
+
|
| 987 |
+
return modified_text
|
| 988 |
+
|
| 989 |
+
def _apply_channel_formatting(self, message: str, channel: str) -> str:
|
| 990 |
+
"""Apply formatting specific to different communication channels."""
|
| 991 |
+
if not channel in self.communication_channels:
|
| 992 |
+
return message
|
| 993 |
+
|
| 994 |
+
channel_format = self.communication_channels[channel]
|
| 995 |
+
modified_message = message
|
| 996 |
+
|
| 997 |
+
# Apply channel-specific formatting
|
| 998 |
+
if channel == "email":
|
| 999 |
+
# Add email elements like signatures or formal greetings randomly
|
| 1000 |
+
if random.random() < 0.3 and "sales_rep" in message:
|
| 1001 |
+
signature = f"\n\nBest regards,\n[Name]\n[Company]\n[Contact Info]"
|
| 1002 |
+
modified_message += signature
|
| 1003 |
+
|
| 1004 |
+
elif channel == "live_chat":
|
| 1005 |
+
# Add chat elements like quick responses or emojis
|
| 1006 |
+
if random.random() < 0.4:
|
| 1007 |
+
emojis = ["👍", "😊", "👋", "👏", "🙌", "💯", "🤔", "📊", "📈"]
|
| 1008 |
+
if random.random() < 0.5:
|
| 1009 |
+
modified_message += f" {random.choice(emojis)}"
|
| 1010 |
+
else:
|
| 1011 |
+
modified_message = f"{random.choice(emojis)} {modified_message}"
|
| 1012 |
+
|
| 1013 |
+
elif channel == "phone_call" or channel == "video_call":
|
| 1014 |
+
# Add verbal pause indicators
|
| 1015 |
+
verbal_pauses = ["*pauses*", "*brief silence*", "*thinking*"]
|
| 1016 |
+
if random.random() < 0.2:
|
| 1017 |
+
sentences = modified_message.split('. ')
|
| 1018 |
+
if len(sentences) > 1:
|
| 1019 |
+
i = random.randint(0, len(sentences) - 1)
|
| 1020 |
+
sentences[i] = f"{sentences[i]}. {random.choice(verbal_pauses)} "
|
| 1021 |
+
modified_message = '. '.join(sentences)
|
| 1022 |
+
|
| 1023 |
+
elif channel == "sms":
|
| 1024 |
+
# Shorten message and add abbreviations
|
| 1025 |
+
if len(modified_message) > 100 and random.random() < 0.5:
|
| 1026 |
+
# Replace some common words with abbreviations
|
| 1027 |
+
abbr = {"with": "w/", "without": "w/o", "thanks": "thx", "please": "pls",
|
| 1028 |
+
"about": "abt", "meeting": "mtg", "tomorrow": "tmrw"}
|
| 1029 |
+
for word, abbr_word in abbr.items():
|
| 1030 |
+
if random.random() < 0.5:
|
| 1031 |
+
modified_message = re.sub(r'\b' + word + r'\b', abbr_word, modified_message, flags=re.IGNORECASE)
|
| 1032 |
+
|
| 1033 |
+
return modified_message
|
| 1034 |
+
|
| 1035 |
+
def _generate_conversation(self, profile: SaaSProfile, scenario: Dict) -> Dict:
|
| 1036 |
+
"""Generate a complete human-like sales conversation with non-linear flows and realistic patterns."""
|
| 1037 |
+
max_retries = 3
|
| 1038 |
+
for attempt in range(max_retries):
|
| 1039 |
+
try:
|
| 1040 |
+
# Extract key scenario parameters for controlling the conversation
|
| 1041 |
+
channel = scenario.get("sales_channel", "email")
|
| 1042 |
+
conversation_style = scenario.get("conversation_style", "direct_professional")
|
| 1043 |
+
conversation_flow = scenario.get("conversation_flow", "standard_linear")
|
| 1044 |
+
expected_outcome = scenario.get("expected_outcome", True)
|
| 1045 |
+
|
| 1046 |
+
# Prepare persona information
|
| 1047 |
+
persona = scenario.get("customer_persona", {})
|
| 1048 |
+
persona_name = persona.get("name", "Customer")
|
| 1049 |
+
speech_patterns = persona.get("preferred_speech_patterns",
|
| 1050 |
+
random.sample([p["pattern"] for p in self.speech_patterns], 2))
|
| 1051 |
+
|
| 1052 |
+
# Prepare template parameters based on scenario
|
| 1053 |
+
min_messages = 8 # Minimum messages for a meaningful conversation
|
| 1054 |
+
max_messages = 18 # Maximum to keep conversations reasonably sized
|
| 1055 |
+
|
| 1056 |
+
# Adjust message count based on complexity
|
| 1057 |
+
if scenario.get("complexity") == "simple":
|
| 1058 |
+
target_messages = random.randint(8, 12)
|
| 1059 |
+
elif scenario.get("complexity") == "complex":
|
| 1060 |
+
target_messages = random.randint(14, 18)
|
| 1061 |
+
else: # moderate
|
| 1062 |
+
target_messages = random.randint(10, 14)
|
| 1063 |
+
|
| 1064 |
+
# Further customize conversation parameters based on flow type
|
| 1065 |
+
flow_params = {}
|
| 1066 |
+
if conversation_flow == "multiple_objection_loops":
|
| 1067 |
+
flow_params["objections_to_raise"] = min(len(profile.common_objections), 3)
|
| 1068 |
+
flow_params["objection_resolution_probability"] = 0.7 if expected_outcome else 0.4
|
| 1069 |
+
elif conversation_flow == "subject_switching":
|
| 1070 |
+
flow_params["topic_switches"] = random.randint(2, 4)
|
| 1071 |
+
flow_params["switch_probability"] = 0.3
|
| 1072 |
+
elif conversation_flow == "interrupted_followup":
|
| 1073 |
+
flow_params["interruption_point"] = random.randint(3, 5)
|
| 1074 |
+
flow_params["followup_delay"] = "a few hours" if channel in ["email", "chat"] else "a week"
|
| 1075 |
+
|
| 1076 |
+
# Create a conversation prompt that allows for natural, human-like dialogue
|
| 1077 |
+
system_prompt = f"""You are an expert in creating realistic sales conversations.
|
| 1078 |
+
Generate a JSON object with a natural, imperfect conversation between a sales rep and customer.
|
| 1079 |
+
|
| 1080 |
+
Important guidelines:
|
| 1081 |
+
- The conversation should feel HUMAN and NATURAL, not scripted or perfect
|
| 1082 |
+
- Include human elements: hesitations, typos, interruptions, tangents, repetition, in a millennial style
|
| 1083 |
+
- Do NOT open with cheesy greetings like "Hi X, this is Y from Z company" unless natural for the context
|
| 1084 |
+
- The customer should sometimes be unclear, ambiguous, or send incomplete thoughts
|
| 1085 |
+
- People sometimes send multiple messages in a row
|
| 1086 |
+
- Sometimes include pauses or pacing in the conversation
|
| 1087 |
+
- Not every objection needs to be perfectly addressed
|
| 1088 |
+
- Use natural language that matches the {channel} communication channel"""
|
| 1089 |
+
|
| 1090 |
+
user_prompt = f"""
|
| 1091 |
+
Generate a realistic {channel} sales conversation between a {profile.company_name} representative and a customer with:
|
| 1092 |
+
|
| 1093 |
+
CUSTOMER DETAILS:
|
| 1094 |
+
Name: {persona_name}
|
| 1095 |
+
Company: {persona.get('company', 'Company')}
|
| 1096 |
+
Role: {persona.get('role', 'Role')}
|
| 1097 |
+
Industry: {persona.get('industry', profile.industry)}
|
| 1098 |
+
Current pain points: {', '.join(persona.get('pain_points', ['Unspecified']))}
|
| 1099 |
+
Communication style: {persona.get('objection_style', 'direct')}
|
| 1100 |
+
Tech expertise: {persona.get('technical_expertise', 'medium')}
|
| 1101 |
+
|
| 1102 |
+
CONVERSATION CONTEXT:
|
| 1103 |
+
Channel: {channel}
|
| 1104 |
+
Context: {scenario.get('conversation_context', 'Initial inquiry')}
|
| 1105 |
+
Customer intent: {scenario.get('customer_intent', 'Exploring options')}
|
| 1106 |
+
Flow pattern: {conversation_flow}
|
| 1107 |
+
Style: {conversation_style}
|
| 1108 |
+
Expected outcome: {"conversion" if expected_outcome else "no conversion"}
|
| 1109 |
+
Primary objections: {', '.join(scenario.get('objection_focus', profile.common_objections[:2]))}
|
| 1110 |
+
|
| 1111 |
+
PRODUCT DETAILS:
|
| 1112 |
+
Product: {profile.product_name}
|
| 1113 |
+
Description: {profile.product_description}
|
| 1114 |
+
Key features: {', '.join(profile.key_features)}
|
| 1115 |
+
Value props: {', '.join(profile.value_propositions)}
|
| 1116 |
+
Pricing: {profile.pricing_structure.get('model', 'tiered')} - {profile.pricing_structure.get('tiers', [{}])[0].get('price', '$X/month')}
|
| 1117 |
+
|
| 1118 |
+
Return ONLY a JSON object with this exact structure:
|
| 1119 |
+
{{
|
| 1120 |
+
"messages": [
|
| 1121 |
+
{{"speaker": "customer/sales_rep", "message": "message text"}}
|
| 1122 |
+
],
|
| 1123 |
+
"outcome": {"true" if expected_outcome else "false"},
|
| 1124 |
+
"key_objections": ["Specific objection 1", "Specific objection 2"],
|
| 1125 |
+
"key_value_props_mentioned": ["Value prop 1", "Value prop 2"],
|
| 1126 |
+
"customer_engagement_level": 0.X,
|
| 1127 |
+
"sales_rep_effectiveness": 0.X,
|
| 1128 |
+
"conversation_length": X,
|
| 1129 |
+
"conversion_probability_at_turn": {{"0": 0.X, "1": 0.X}}
|
| 1130 |
+
}}
|
| 1131 |
+
|
| 1132 |
+
Include {target_messages} messages in the conversation. The conversion_probability_at_turn should show
|
| 1133 |
+
how the probability changes throughout the conversation.
|
| 1134 |
+
|
| 1135 |
+
IMPORTANT FORMATTING:
|
| 1136 |
+
- Make messages sound like real humans typing/talking with millennial style - include occasional typos, hesitations, filler words
|
| 1137 |
+
- Format the conversation appropriately for a {channel} conversation
|
| 1138 |
+
- Messages should vary in length - some short, some longer
|
| 1139 |
+
- Avoid perfect grammar and complete sentences when natural
|
| 1140 |
+
- Use contractions, abbreviations, and casual language where appropriate
|
| 1141 |
+
- For the customer, occasionally use multiple messages in a row
|
| 1142 |
+
- Use natural greetings appropriate to the channel, not formulaic ones
|
| 1143 |
+
"""
|
| 1144 |
+
|
| 1145 |
+
content = self._generate_completion(system_prompt, user_prompt, temperature=0.9)
|
| 1146 |
+
conversation_json = json.loads(content)
|
| 1147 |
+
|
| 1148 |
+
# Apply additional human-like formatting and speech patterns to each message
|
| 1149 |
+
for i, message in enumerate(conversation_json["messages"]):
|
| 1150 |
+
speaker = message.get("speaker", "")
|
| 1151 |
+
message_text = message.get("message", "")
|
| 1152 |
+
|
| 1153 |
+
# Apply appropriate speech patterns based on speaker
|
| 1154 |
+
if speaker == "customer":
|
| 1155 |
+
# Apply customer persona speech patterns
|
| 1156 |
+
message_text = self._apply_speech_patterns(message_text, speech_patterns, 0.5)
|
| 1157 |
+
else:
|
| 1158 |
+
# Apply general speech patterns for sales rep
|
| 1159 |
+
rep_patterns = random.sample([p["pattern"] for p in self.speech_patterns], 2)
|
| 1160 |
+
message_text = self._apply_speech_patterns(message_text, rep_patterns, 0.3)
|
| 1161 |
+
|
| 1162 |
+
# Apply channel-specific formatting
|
| 1163 |
+
message_text = self._apply_channel_formatting(message_text, channel)
|
| 1164 |
+
|
| 1165 |
+
# Apply non-linearity based on conversation flow
|
| 1166 |
+
if conversation_flow == "subject_switching" and random.random() < flow_params.get("switch_probability", 0):
|
| 1167 |
+
# Add topic switch indicators
|
| 1168 |
+
switches = ["By the way", "Actually, I also wanted to ask", "On another note",
|
| 1169 |
+
"While we're talking", "That reminds me", "Before I forget"]
|
| 1170 |
+
message_text += f" {random.choice(switches)}, {random.choice(profile.key_features)}?"
|
| 1171 |
+
|
| 1172 |
+
# Update the message
|
| 1173 |
+
conversation_json["messages"][i]["message"] = message_text
|
| 1174 |
+
|
| 1175 |
+
# Add markers for interrupted conversations if applicable
|
| 1176 |
+
if conversation_flow == "interrupted_followup":
|
| 1177 |
+
interrupt_point = flow_params.get("interruption_point", 4)
|
| 1178 |
+
if len(conversation_json["messages"]) > interrupt_point + 2:
|
| 1179 |
+
# Add interruption marker
|
| 1180 |
+
interrupt_idx = min(interrupt_point, len(conversation_json["messages"]) - 3)
|
| 1181 |
+
|
| 1182 |
+
# Add time passage marker
|
| 1183 |
+
time_marker = {"speaker": "system", "message": f"--- {flow_params.get('followup_delay', 'some time')} later ---"}
|
| 1184 |
+
conversation_json["messages"].insert(interrupt_idx + 1, time_marker)
|
| 1185 |
+
|
| 1186 |
+
# Add follow-up message from sales rep
|
| 1187 |
+
followup = {"speaker": "sales_rep", "message": self._apply_speech_patterns(
|
| 1188 |
+
f"Hi {persona_name}, I wanted to follow up on our previous conversation about {profile.product_name}. Have you had a chance to think more about it?",
|
| 1189 |
+
random.sample([p["pattern"] for p in self.speech_patterns], 2)
|
| 1190 |
+
)}
|
| 1191 |
+
conversation_json["messages"].insert(interrupt_idx + 2, followup)
|
| 1192 |
+
|
| 1193 |
+
# Ensure outcome is a boolean
|
| 1194 |
+
if isinstance(conversation_json["outcome"], str):
|
| 1195 |
+
conversation_json["outcome"] = conversation_json["outcome"].lower() == "true"
|
| 1196 |
+
|
| 1197 |
+
# Ensure numeric values are floats/ints
|
| 1198 |
+
if isinstance(conversation_json["customer_engagement_level"], str):
|
| 1199 |
+
conversation_json["customer_engagement_level"] = float(conversation_json["customer_engagement_level"])
|
| 1200 |
+
|
| 1201 |
+
if isinstance(conversation_json["sales_rep_effectiveness"], str):
|
| 1202 |
+
conversation_json["sales_rep_effectiveness"] = float(conversation_json["sales_rep_effectiveness"])
|
| 1203 |
+
|
| 1204 |
+
if isinstance(conversation_json["conversation_length"], str):
|
| 1205 |
+
conversation_json["conversation_length"] = int(conversation_json["conversation_length"])
|
| 1206 |
+
|
| 1207 |
+
# Ensure probability trajectory has numeric keys and values
|
| 1208 |
+
probability_trajectory = {}
|
| 1209 |
+
for k, v in conversation_json["conversion_probability_at_turn"].items():
|
| 1210 |
+
if isinstance(v, str):
|
| 1211 |
+
v = float(v)
|
| 1212 |
+
probability_trajectory[int(k)] = float(v)
|
| 1213 |
+
conversation_json["conversion_probability_at_turn"] = probability_trajectory
|
| 1214 |
+
|
| 1215 |
+
# Add scenario data to the conversation for reference
|
| 1216 |
+
conversation_json["scenario"] = {
|
| 1217 |
+
"channel": channel,
|
| 1218 |
+
"conversation_style": conversation_style,
|
| 1219 |
+
"conversation_flow": conversation_flow
|
| 1220 |
+
}
|
| 1221 |
+
|
| 1222 |
+
return conversation_json
|
| 1223 |
+
|
| 1224 |
+
except Exception as e:
|
| 1225 |
+
logger.error(f"Error attempt {attempt+1}/{max_retries} generating conversation: {str(e)}")
|
| 1226 |
+
if attempt < max_retries - 1:
|
| 1227 |
+
# Exponential backoff
|
| 1228 |
+
backoff_time = (2 ** attempt) + random.uniform(0, 1)
|
| 1229 |
+
time.sleep(backoff_time)
|
| 1230 |
+
else:
|
| 1231 |
+
# Return a minimal conversation as fallback
|
| 1232 |
+
logger.warning(f"Using fallback conversation for {profile.company_name}")
|
| 1233 |
+
expected_outcome = scenario.get("expected_outcome", True)
|
| 1234 |
+
|
| 1235 |
+
# Create more varied fallback conversations
|
| 1236 |
+
starters = [
|
| 1237 |
+
"Hey, I've been looking around for a solution like yours.",
|
| 1238 |
+
"I need some help with my current processes.",
|
| 1239 |
+
"Been hearing about your product, got some questions.",
|
| 1240 |
+
"Our team needs something better than what we're using.",
|
| 1241 |
+
"Quick question about your pricing."
|
| 1242 |
+
]
|
| 1243 |
+
|
| 1244 |
+
responses = [
|
| 1245 |
+
f"Thanks for reaching out! What specific challenges are you facing?",
|
| 1246 |
+
f"Happy to help! What's your current setup like?",
|
| 1247 |
+
f"I'd be glad to answer questions. What would you like to know?",
|
| 1248 |
+
f"Sure thing. What's not working with your current solution?",
|
| 1249 |
+
f"Of course, I can walk you through our pricing. What's your use case?"
|
| 1250 |
+
]
|
| 1251 |
+
|
| 1252 |
+
# Generate a basic conversation with some variation
|
| 1253 |
+
messages = [
|
| 1254 |
+
{"speaker": "customer", "message": random.choice(starters)},
|
| 1255 |
+
{"speaker": "sales_rep", "message": random.choice(responses)},
|
| 1256 |
+
{"speaker": "customer", "message": f"We're looking for a {profile.product_name} solution. What makes your product different?"},
|
| 1257 |
+
{"speaker": "sales_rep", "message": f"Our {profile.product_name} stands out because of {profile.value_propositions[0] if profile.value_propositions else 'its features'}. Many of our customers appreciate this."}
|
| 1258 |
+
]
|
| 1259 |
+
|
| 1260 |
+
# Add a bit more variation based on expected outcome
|
| 1261 |
+
if expected_outcome:
|
| 1262 |
+
messages.append({"speaker": "customer", "message": "That sounds interesting. Can you tell me more about your pricing?"})
|
| 1263 |
+
messages.append({"speaker": "sales_rep", "message": f"Our pricing starts at {profile.pricing_structure.get('tiers', [{}])[0].get('price', '$X/month')}. Would you like to schedule a demo?"})
|
| 1264 |
+
messages.append({"speaker": "customer", "message": "Yes, that would be helpful. Let's set something up for next week."})
|
| 1265 |
+
else:
|
| 1266 |
+
messages.append({"speaker": "customer", "message": "That's interesting, but I'm not sure it fits our needs right now. Let me think about it."})
|
| 1267 |
+
messages.append({"speaker": "sales_rep", "message": f"I understand. Would it be helpful if I sent you some more information about our {profile.product_name}?"})
|
| 1268 |
+
messages.append({"speaker": "customer", "message": "Maybe later. We're still evaluating other options at the moment."})
|
| 1269 |
+
|
| 1270 |
+
# Create probability trajectory
|
| 1271 |
+
probability_trajectory = {}
|
| 1272 |
+
for i in range(len(messages)):
|
| 1273 |
+
if expected_outcome:
|
| 1274 |
+
prob = min(0.5 + i * 0.07, 0.9)
|
| 1275 |
+
else:
|
| 1276 |
+
prob = max(0.5 - i * 0.07, 0.1)
|
| 1277 |
+
probability_trajectory[i] = prob
|
| 1278 |
+
|
| 1279 |
+
return {
|
| 1280 |
+
"messages": messages,
|
| 1281 |
+
"outcome": expected_outcome,
|
| 1282 |
+
"key_objections": scenario.get("objection_focus", ["Price", "Implementation"]),
|
| 1283 |
+
"key_value_props_mentioned": profile.value_propositions[:2] if len(profile.value_propositions) >= 2 else ["Value", "Efficiency"],
|
| 1284 |
+
"customer_engagement_level": 0.7 if expected_outcome else 0.4,
|
| 1285 |
+
"sales_rep_effectiveness": 0.6 if expected_outcome else 0.5,
|
| 1286 |
+
"conversation_length": len(messages),
|
| 1287 |
+
"conversion_probability_at_turn": probability_trajectory,
|
| 1288 |
+
"scenario": {
|
| 1289 |
+
"channel": scenario.get("sales_channel", "email"),
|
| 1290 |
+
"conversation_style": scenario.get("conversation_style", "direct_professional"),
|
| 1291 |
+
"conversation_flow": scenario.get("conversation_flow", "standard_linear")
|
| 1292 |
+
}
|
| 1293 |
+
}
|
| 1294 |
+
|
| 1295 |
+
def _generate_full_conversation(self, profile_idx: int, conversation_idx: int) -> Dict:
|
| 1296 |
+
"""Generate a complete conversation from profile to final conversation data."""
|
| 1297 |
+
with self.profile_lock:
|
| 1298 |
+
profile = self.profiles[profile_idx]
|
| 1299 |
+
|
| 1300 |
+
try:
|
| 1301 |
+
# Generate customer persona with more richness and diversity
|
| 1302 |
+
persona = self._generate_customer_persona(profile)
|
| 1303 |
+
|
| 1304 |
+
# Generate conversation scenario with more variables
|
| 1305 |
+
scenario = self._generate_conversation_scenario(profile, persona)
|
| 1306 |
+
|
| 1307 |
+
# Generate complete conversation with more natural and varied flow
|
| 1308 |
+
conversation_data = self._generate_conversation(profile, scenario)
|
| 1309 |
+
|
| 1310 |
+
# Get conversation embeddings
|
| 1311 |
+
full_text = " ".join([msg["message"] for msg in conversation_data["messages"] if msg.get("speaker") != "system"])
|
| 1312 |
+
embeddings = self._get_embedding(full_text)
|
| 1313 |
+
|
| 1314 |
+
# Create row data
|
| 1315 |
+
row_data = {
|
| 1316 |
+
'company_id': profile.company_id,
|
| 1317 |
+
'company_name': profile.company_name,
|
| 1318 |
+
'product_name': profile.product_name,
|
| 1319 |
+
'product_type': profile.industry,
|
| 1320 |
+
'conversation_id': f"{profile.company_id}-conv-{conversation_idx}",
|
| 1321 |
+
'scenario': json.dumps(scenario),
|
| 1322 |
+
'conversation': json.dumps(conversation_data["messages"]),
|
| 1323 |
+
'full_text': full_text,
|
| 1324 |
+
'outcome': 1 if conversation_data["outcome"] else 0,
|
| 1325 |
+
'conversation_length': conversation_data["conversation_length"],
|
| 1326 |
+
'customer_engagement': conversation_data["customer_engagement_level"],
|
| 1327 |
+
'sales_effectiveness': conversation_data["sales_rep_effectiveness"],
|
| 1328 |
+
'probability_trajectory': json.dumps(conversation_data["conversion_probability_at_turn"]),
|
| 1329 |
+
'conversation_style': scenario.get("conversation_style", "direct_professional"),
|
| 1330 |
+
'conversation_flow': scenario.get("conversation_flow", "standard_linear"),
|
| 1331 |
+
'communication_channel': scenario.get("sales_channel", "email")
|
| 1332 |
+
}
|
| 1333 |
+
|
| 1334 |
+
# Add embeddings
|
| 1335 |
+
for j, embed_value in enumerate(embeddings):
|
| 1336 |
+
row_data[f'embedding_{j}'] = embed_value
|
| 1337 |
+
|
| 1338 |
+
# Update counter
|
| 1339 |
+
with self.total_counter_lock:
|
| 1340 |
+
self.total_generated += 1
|
| 1341 |
+
total = self.total_generated
|
| 1342 |
+
|
| 1343 |
+
if total % 100 == 0:
|
| 1344 |
+
logger.info(f"Generated {total} conversations so far")
|
| 1345 |
+
|
| 1346 |
+
return row_data
|
| 1347 |
+
|
| 1348 |
+
except Exception as e:
|
| 1349 |
+
logger.error(f"Error generating conversation {conversation_idx} for profile {profile_idx}: {str(e)}")
|
| 1350 |
+
return None
|
| 1351 |
+
|
| 1352 |
+
def _worker(self, task_queue, result_batch_size=5):
|
| 1353 |
+
"""Worker function that processes tasks from the queue with immediate saving"""
|
| 1354 |
+
batch = []
|
| 1355 |
+
|
| 1356 |
+
while True:
|
| 1357 |
+
try:
|
| 1358 |
+
# Get task from queue with timeout
|
| 1359 |
+
task = task_queue.get(timeout=5)
|
| 1360 |
+
|
| 1361 |
+
# Check for termination signal
|
| 1362 |
+
if task is None:
|
| 1363 |
+
# Submit any remaining items in batch
|
| 1364 |
+
if batch:
|
| 1365 |
+
self.writer.write_rows(batch)
|
| 1366 |
+
task_queue.task_done()
|
| 1367 |
+
break
|
| 1368 |
+
|
| 1369 |
+
# Add a random delay to stagger requests
|
| 1370 |
+
time.sleep(random.uniform(0.1, 0.5))
|
| 1371 |
+
|
| 1372 |
+
# Process the task
|
| 1373 |
+
profile_idx, conv_idx = task
|
| 1374 |
+
row_data = self._generate_full_conversation(profile_idx, conv_idx)
|
| 1375 |
+
|
| 1376 |
+
if row_data:
|
| 1377 |
+
batch.append(row_data)
|
| 1378 |
+
|
| 1379 |
+
# Write batch when it reaches the threshold - use smaller batch size (5 instead of 10)
|
| 1380 |
+
if len(batch) >= result_batch_size:
|
| 1381 |
+
self.writer.write_rows(batch)
|
| 1382 |
+
batch = [] # Clear batch after writing
|
| 1383 |
+
|
| 1384 |
+
# Mark task as done
|
| 1385 |
+
task_queue.task_done()
|
| 1386 |
+
|
| 1387 |
+
except queue.Empty:
|
| 1388 |
+
# Check if there are any items in the batch to write
|
| 1389 |
+
if batch:
|
| 1390 |
+
self.writer.write_rows(batch)
|
| 1391 |
+
batch = []
|
| 1392 |
+
continue
|
| 1393 |
+
except Exception as e:
|
| 1394 |
+
logger.error(f"Error in worker thread: {str(e)}")
|
| 1395 |
+
try:
|
| 1396 |
+
# Write any collected data before potentially crashing
|
| 1397 |
+
if batch:
|
| 1398 |
+
self.writer.write_rows(batch)
|
| 1399 |
+
batch = []
|
| 1400 |
+
task_queue.task_done()
|
| 1401 |
+
except:
|
| 1402 |
+
pass
|
| 1403 |
+
|
| 1404 |
+
def generate_dataset(
|
| 1405 |
+
self,
|
| 1406 |
+
num_conversations: int = 100000,
|
| 1407 |
+
num_profiles: int = 20,
|
| 1408 |
+
output_path: str = "enhanced_saas_sales_conversations.csv",
|
| 1409 |
+
num_threads: int = 25,
|
| 1410 |
+
result_batch_size: int = 5
|
| 1411 |
+
) -> str:
|
| 1412 |
+
"""
|
| 1413 |
+
Generate a complete dataset of diverse sales conversations using multithreading.
|
| 1414 |
+
|
| 1415 |
+
Args:
|
| 1416 |
+
num_conversations: Total number of conversations to generate
|
| 1417 |
+
num_profiles: Number of unique SaaS profiles to use
|
| 1418 |
+
output_path: Path to save the CSV dataset
|
| 1419 |
+
num_threads: Number of worker threads to use
|
| 1420 |
+
result_batch_size: Number of items to batch before writing to CSV
|
| 1421 |
+
|
| 1422 |
+
Returns:
|
| 1423 |
+
Path to the generated dataset
|
| 1424 |
+
"""
|
| 1425 |
+
logger.info(f"Starting enhanced dataset generation: {num_conversations} conversations using {num_profiles} profiles with {num_threads} threads")
|
| 1426 |
+
|
| 1427 |
+
start_time = time.time()
|
| 1428 |
+
|
| 1429 |
+
# Generate SaaS profiles first (this is done in parallel already)
|
| 1430 |
+
self.profiles = self.generate_saas_profiles(num_profiles)
|
| 1431 |
+
|
| 1432 |
+
# Initialize counters
|
| 1433 |
+
self.total_generated = 0
|
| 1434 |
+
self.api_calls = 0
|
| 1435 |
+
self.retry_count = 0
|
| 1436 |
+
self.last_retry_time = 0
|
| 1437 |
+
|
| 1438 |
+
# Define columns for the dataset
|
| 1439 |
+
columns = ['company_id', 'company_name', 'product_name', 'product_type',
|
| 1440 |
+
'conversation_id', 'scenario', 'conversation', 'full_text',
|
| 1441 |
+
'outcome', 'conversation_length', 'customer_engagement',
|
| 1442 |
+
'sales_effectiveness', 'probability_trajectory',
|
| 1443 |
+
'conversation_style', 'conversation_flow', 'communication_channel']
|
| 1444 |
+
|
| 1445 |
+
# Add embedding columns
|
| 1446 |
+
for i in range(3072):
|
| 1447 |
+
columns.append(f'embedding_{i}')
|
| 1448 |
+
|
| 1449 |
+
# Initialize the writer
|
| 1450 |
+
self.writer = ResultWriter(output_path, columns)
|
| 1451 |
+
|
| 1452 |
+
# Create task queue and populate it
|
| 1453 |
+
task_queue = queue.Queue()
|
| 1454 |
+
|
| 1455 |
+
# Calculate conversations per profile
|
| 1456 |
+
conversations_per_profile = num_conversations // len(self.profiles)
|
| 1457 |
+
remaining = num_conversations % len(self.profiles)
|
| 1458 |
+
|
| 1459 |
+
# Create tasks (profile_idx, conversation_idx) with more even distribution
|
| 1460 |
+
for profile_idx in range(len(self.profiles)):
|
| 1461 |
+
profile_conversations = conversations_per_profile
|
| 1462 |
+
if profile_idx < remaining:
|
| 1463 |
+
profile_conversations += 1
|
| 1464 |
+
|
| 1465 |
+
for conv_idx in range(profile_conversations):
|
| 1466 |
+
task_queue.put((profile_idx, conv_idx))
|
| 1467 |
+
|
| 1468 |
+
# Calculate effective thread count based on rate limits
|
| 1469 |
+
# Each conversation makes about 4 API calls (persona, scenario, conversation, embedding)
|
| 1470 |
+
# We want to use only about 50% of the rate limit for better stability
|
| 1471 |
+
requests_per_conversation = 4
|
| 1472 |
+
requests_per_minute_per_thread = 10 # Conservative estimate
|
| 1473 |
+
target_threads = (SAFE_RATE_LIMIT_RPM) / requests_per_minute_per_thread
|
| 1474 |
+
|
| 1475 |
+
# Determine effective thread count
|
| 1476 |
+
effective_threads = min(
|
| 1477 |
+
num_threads, # User requested threads
|
| 1478 |
+
int(target_threads), # Rate-limit-based threads
|
| 1479 |
+
30, # Hard limit for stability
|
| 1480 |
+
task_queue.qsize() # No more threads than tasks
|
| 1481 |
+
)
|
| 1482 |
+
|
| 1483 |
+
logger.info(f"Using {effective_threads} threads based on rate limit of {RATE_LIMIT_RPM} RPM")
|
| 1484 |
+
|
| 1485 |
+
# Start worker threads
|
| 1486 |
+
workers = []
|
| 1487 |
+
for _ in range(effective_threads):
|
| 1488 |
+
worker = threading.Thread(target=self._worker, args=(task_queue, result_batch_size))
|
| 1489 |
+
worker.daemon = True
|
| 1490 |
+
worker.start()
|
| 1491 |
+
workers.append(worker)
|
| 1492 |
+
|
| 1493 |
+
# Progress indicator and statistics tracker
|
| 1494 |
+
last_count = 0
|
| 1495 |
+
last_stats_time = time.time()
|
| 1496 |
+
with tqdm(total=num_conversations, desc="Generating conversations") as pbar:
|
| 1497 |
+
while self.total_generated < num_conversations:
|
| 1498 |
+
time.sleep(1) # Update progress every second
|
| 1499 |
+
|
| 1500 |
+
with self.total_counter_lock:
|
| 1501 |
+
current_count = self.total_generated
|
| 1502 |
+
|
| 1503 |
+
# Update progress bar
|
| 1504 |
+
delta = current_count - last_count
|
| 1505 |
+
if delta > 0:
|
| 1506 |
+
pbar.update(delta)
|
| 1507 |
+
last_count = current_count
|
| 1508 |
+
|
| 1509 |
+
# Print statistics every 60 seconds
|
| 1510 |
+
now = time.time()
|
| 1511 |
+
if now - last_stats_time > 60:
|
| 1512 |
+
# Calculate API call rate
|
| 1513 |
+
with self.api_call_lock:
|
| 1514 |
+
api_calls = self.api_calls
|
| 1515 |
+
|
| 1516 |
+
with self.retry_lock:
|
| 1517 |
+
retries = self.retry_count
|
| 1518 |
+
|
| 1519 |
+
elapsed = now - start_time
|
| 1520 |
+
calls_per_minute = (api_calls / elapsed) * 60
|
| 1521 |
+
|
| 1522 |
+
logger.info(f"Statistics - Generated: {current_count}, API calls: {api_calls} " +
|
| 1523 |
+
f"({calls_per_minute:.1f}/min), Retries: {retries}")
|
| 1524 |
+
|
| 1525 |
+
last_stats_time = now
|
| 1526 |
+
|
| 1527 |
+
# Check if we've reached our target or if the queue is empty
|
| 1528 |
+
if current_count >= num_conversations or task_queue.empty():
|
| 1529 |
+
break
|
| 1530 |
+
|
| 1531 |
+
# Signal workers to terminate
|
| 1532 |
+
for _ in range(effective_threads):
|
| 1533 |
+
task_queue.put(None)
|
| 1534 |
+
|
| 1535 |
+
# Wait for all workers to finish (with timeout)
|
| 1536 |
+
for worker in workers:
|
| 1537 |
+
worker.join(timeout=30)
|
| 1538 |
+
|
| 1539 |
+
end_time = time.time()
|
| 1540 |
+
duration = end_time - start_time
|
| 1541 |
+
|
| 1542 |
+
# Calculate final statistics
|
| 1543 |
+
with self.api_call_lock:
|
| 1544 |
+
total_api_calls = self.api_calls
|
| 1545 |
+
|
| 1546 |
+
with self.retry_lock:
|
| 1547 |
+
total_retries = self.retry_count
|
| 1548 |
+
|
| 1549 |
+
logger.info(f"Dataset generation complete. Statistics:")
|
| 1550 |
+
logger.info(f"- Total conversations: {self.total_generated}")
|
| 1551 |
+
logger.info(f"- Total API calls: {total_api_calls}")
|
| 1552 |
+
logger.info(f"- Total retries: {total_retries}")
|
| 1553 |
+
logger.info(f"- Generation took {duration:.2f} seconds ({self.total_generated / duration:.2f} conversations/second)")
|
| 1554 |
+
logger.info(f"- API call rate: {(total_api_calls / duration) * 60:.1f} calls/minute")
|
| 1555 |
+
|
| 1556 |
+
return output_path
|
| 1557 |
+
|
| 1558 |
+
def main():
|
| 1559 |
+
"""Main function to run the enhanced dataset generator."""
|
| 1560 |
+
import argparse
|
| 1561 |
+
|
| 1562 |
+
parser = argparse.ArgumentParser(description="Enhanced SaaS Sales Dataset Generator")
|
| 1563 |
+
parser.add_argument("--num_conversations", type=int, default=100000,
|
| 1564 |
+
help="Number of conversations to generate")
|
| 1565 |
+
parser.add_argument("--num_profiles", type=int, default=20,
|
| 1566 |
+
help="Number of SaaS profiles to generate")
|
| 1567 |
+
parser.add_argument("--output_path", type=str, default="enhanced_saas_sales_conversations.csv",
|
| 1568 |
+
help="Path to save the generated dataset")
|
| 1569 |
+
parser.add_argument("--num_threads", type=int, default=25,
|
| 1570 |
+
help="Number of worker threads to use (will be rate-limited)")
|
| 1571 |
+
parser.add_argument("--rate_limit", type=int, default=2500,
|
| 1572 |
+
help="API rate limit in requests per minute")
|
| 1573 |
+
parser.add_argument("--batch_size", type=int, default=5,
|
| 1574 |
+
help="Number of conversations to batch before writing to CSV")
|
| 1575 |
+
|
| 1576 |
+
args = parser.parse_args()
|
| 1577 |
+
|
| 1578 |
+
# Update rate limit if provided
|
| 1579 |
+
global RATE_LIMIT_RPM, SAFE_RATE_LIMIT_RPM, RATE_LIMIT_RPS, MIN_REQUEST_INTERVAL
|
| 1580 |
+
if args.rate_limit:
|
| 1581 |
+
RATE_LIMIT_RPM = args.rate_limit
|
| 1582 |
+
SAFE_RATE_LIMIT_RPM = int(RATE_LIMIT_RPM * 0.6)
|
| 1583 |
+
RATE_LIMIT_RPS = SAFE_RATE_LIMIT_RPM / 60
|
| 1584 |
+
MIN_REQUEST_INTERVAL = 1.0 / RATE_LIMIT_RPS
|
| 1585 |
+
|
| 1586 |
+
# Initialize generator
|
| 1587 |
+
generator = EnhancedSaaSDatasetGenerator()
|
| 1588 |
+
|
| 1589 |
+
# Generate dataset
|
| 1590 |
+
output_path = generator.generate_dataset(
|
| 1591 |
+
num_conversations=args.num_conversations,
|
| 1592 |
+
num_profiles=args.num_profiles,
|
| 1593 |
+
output_path=args.output_path,
|
| 1594 |
+
num_threads=args.num_threads,
|
| 1595 |
+
result_batch_size=args.batch_size
|
| 1596 |
+
)
|
| 1597 |
+
|
| 1598 |
+
print(f"\nEnhanced dataset generation complete!")
|
| 1599 |
+
print(f"Dataset saved to: {output_path}")
|
| 1600 |
+
|
| 1601 |
+
if __name__ == "__main__":
|
| 1602 |
+
main()
|
| 1603 |
+
#python enhanced_saas_generator.py --num_conversations 100000 --num_profiles 20 --output_path custom_dataset.csv --num_threads 15 --rate_limit 2000 --batch_size 10
|