github-actions commited on
Commit
1152c12
Β·
1 Parent(s): 0f6cd86

Sync from GitHub

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. hf_space/data/loader.py +1 -1
  2. hf_space/hf_space/hf_space/hf_space/app.py +2 -19
  3. hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +61 -89
  4. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +89 -59
  5. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +29 -42
  6. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +15 -19
  7. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +13 -36
  8. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +47 -12
  9. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +11 -18
  10. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +74 -23
  11. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py +72 -33
  12. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +25 -66
  13. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +65 -15
  14. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +36 -77
  15. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +5 -6
  16. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py +6 -8
  17. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py +9 -5
  18. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +32 -28
  19. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py +16 -15
  20. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +32 -38
  21. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py +33 -30
  22. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py +41 -25
  23. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +59 -69
  24. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py +4 -4
  25. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +19 -7
  26. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py +25 -32
  27. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +20 -45
  28. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +65 -38
  29. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py +38 -27
  30. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py +45 -38
  31. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +8 -9
  32. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +55 -32
  33. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +72 -6
  34. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +14 -40
  35. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +51 -22
  36. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +22 -34
  37. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +36 -26
  38. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +14 -36
  39. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +30 -18
  40. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +26 -24
  41. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +22 -98
  42. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +49 -23
  43. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +56 -45
  44. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +75 -26
  45. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py +40 -0
  46. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/engine/trend_engine.py +36 -0
  47. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py +33 -48
  48. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py +23 -47
  49. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/README.md +9 -13
  50. hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/Dockerfile +21 -16
hf_space/data/loader.py CHANGED
@@ -7,7 +7,7 @@ import streamlit as st
7
 
8
  # --- GLOBAL CONSTANTS ---
9
  X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
10
- FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
11
  REPO_ID = "P2SAMAPA/etf_trend_data"
12
  FILENAME = "market_data.csv"
13
 
 
7
 
8
  # --- GLOBAL CONSTANTS ---
9
  X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
10
+ FI_TICKERS = ["TLT", "IEF", "TIP", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
11
  REPO_ID = "P2SAMAPA/etf_trend_data"
12
  FILENAME = "market_data.csv"
13
 
hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -6,27 +6,22 @@ from engine.trend_engine import run_trend_module
6
 
7
  st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
8
 
9
- # Initial Data Load
10
  if 'master_data' not in st.session_state:
11
  st.session_state.master_data = load_from_hf()
12
 
13
  with st.sidebar:
14
  st.header("πŸ—‚οΈ Configuration")
15
-
16
  if st.session_state.master_data is None:
17
  if st.button("πŸš€ Seed Database"):
18
  st.session_state.master_data = seed_dataset_from_scratch()
19
  st.rerun()
20
  else:
21
- # Show database status
22
  st.success(f"DB Last Entry: {st.session_state.master_data.index.max().date()}")
23
 
24
- # Sync Action
25
  if st.button("πŸ”„ Sync New Data"):
26
  updated_df, status_code = sync_incremental_data(st.session_state.master_data)
27
  st.session_state.master_data = updated_df
28
 
29
- # Map logical codes to UI messages
30
  messages = {
31
  "success": "βœ… Data refreshed",
32
  "already_current": "ℹ️ Already up-to-date",
@@ -34,33 +29,26 @@ with st.sidebar:
34
  "api_failure": "❌ Connection/API issue",
35
  "error": "❌ Critical Error"
36
  }
37
- # Save message to session state so it survives the rerun
38
  st.session_state.sync_status = messages.get(status_code, "❓ Unknown Status")
39
  st.rerun()
40
 
41
- # Persistent status display
42
  if 'sync_status' in st.session_state:
43
  st.sidebar.info(st.session_state.sync_status)
44
 
45
  st.divider()
46
-
47
- # Strategy Parameters
48
  option = st.selectbox("Universe Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
49
  sub_option = st.selectbox("Conviction Strategy",
50
  ("All Trending ETFs", "3 Highest Conviction", "1 Highest Conviction"))
51
  start_yr = st.slider("OOS Start Year", 2008, 2026, 2018)
52
  vol_target = st.slider("Risk Target (%)", 5, 20, 12) / 100
53
-
54
  run_btn = st.button("πŸš€ Run Analysis", use_container_width=True, type="primary")
55
 
56
  if st.session_state.master_data is not None:
57
  if run_btn:
58
- # Configuration mapping
59
  is_fi = "Option A" in option
60
  univ = FI_TICKERS if is_fi else X_EQUITY_TICKERS
61
  bench = "AGG" if is_fi else "SPY"
62
 
63
- # Compute Results
64
  results = run_trend_module(st.session_state.master_data[univ],
65
  st.session_state.master_data[bench],
66
  st.session_state.master_data['SOFR_ANNUAL'],
@@ -68,23 +56,20 @@ if st.session_state.master_data is not None:
68
 
69
  st.title(f"πŸ“Š {option}: {sub_option}")
70
 
71
- # Display Metrics
72
  m1, m2, m3, m4 = st.columns(4)
73
  m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
74
  m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
75
  m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
76
  m4.metric("Current SOFR", f"{results['current_sofr']:.2%}")
77
 
78
- # Performance Visualization
79
  fig = go.Figure()
80
  fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
81
  fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
82
- fig.update_layout(title="Out-of-Sample Performance", template="plotly_dark", hovermode="x unified")
83
  st.plotly_chart(fig, use_container_width=True)
84
 
85
  st.divider()
86
  col_l, col_r = st.columns([1, 1.5])
87
-
88
  with col_l:
89
  st.subheader(f"🎯 Target Allocation: {results['next_day']}")
90
  weights = results['current_weights'][results['current_weights'] > 0.0001].to_dict()
@@ -95,6 +80,4 @@ if st.session_state.master_data is not None:
95
  st.subheader("πŸ“š Methodology: Zarattini & Antonacci")
96
  st.markdown("Strategy uses 50/200 SMA filters, conviction ranking, and 60-day volatility targeting.")
97
  else:
98
- st.info("πŸ’‘ Adjust your parameters and click 'Run Analysis'.")
99
- else:
100
- st.warning("⚠️ Data source missing. Please Seed or check HF Token.")
 
6
 
7
  st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
8
 
 
9
  if 'master_data' not in st.session_state:
10
  st.session_state.master_data = load_from_hf()
11
 
12
  with st.sidebar:
13
  st.header("πŸ—‚οΈ Configuration")
 
14
  if st.session_state.master_data is None:
15
  if st.button("πŸš€ Seed Database"):
16
  st.session_state.master_data = seed_dataset_from_scratch()
17
  st.rerun()
18
  else:
 
19
  st.success(f"DB Last Entry: {st.session_state.master_data.index.max().date()}")
20
 
 
21
  if st.button("πŸ”„ Sync New Data"):
22
  updated_df, status_code = sync_incremental_data(st.session_state.master_data)
23
  st.session_state.master_data = updated_df
24
 
 
25
  messages = {
26
  "success": "βœ… Data refreshed",
27
  "already_current": "ℹ️ Already up-to-date",
 
29
  "api_failure": "❌ Connection/API issue",
30
  "error": "❌ Critical Error"
31
  }
 
32
  st.session_state.sync_status = messages.get(status_code, "❓ Unknown Status")
33
  st.rerun()
34
 
 
35
  if 'sync_status' in st.session_state:
36
  st.sidebar.info(st.session_state.sync_status)
37
 
38
  st.divider()
 
 
39
  option = st.selectbox("Universe Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
40
  sub_option = st.selectbox("Conviction Strategy",
41
  ("All Trending ETFs", "3 Highest Conviction", "1 Highest Conviction"))
42
  start_yr = st.slider("OOS Start Year", 2008, 2026, 2018)
43
  vol_target = st.slider("Risk Target (%)", 5, 20, 12) / 100
 
44
  run_btn = st.button("πŸš€ Run Analysis", use_container_width=True, type="primary")
45
 
46
  if st.session_state.master_data is not None:
47
  if run_btn:
 
48
  is_fi = "Option A" in option
49
  univ = FI_TICKERS if is_fi else X_EQUITY_TICKERS
50
  bench = "AGG" if is_fi else "SPY"
51
 
 
52
  results = run_trend_module(st.session_state.master_data[univ],
53
  st.session_state.master_data[bench],
54
  st.session_state.master_data['SOFR_ANNUAL'],
 
56
 
57
  st.title(f"πŸ“Š {option}: {sub_option}")
58
 
 
59
  m1, m2, m3, m4 = st.columns(4)
60
  m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
61
  m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
62
  m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
63
  m4.metric("Current SOFR", f"{results['current_sofr']:.2%}")
64
 
 
65
  fig = go.Figure()
66
  fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
67
  fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
68
+ fig.update_layout(title="Out-of-Sample Performance", template="plotly_dark")
69
  st.plotly_chart(fig, use_container_width=True)
70
 
71
  st.divider()
72
  col_l, col_r = st.columns([1, 1.5])
 
73
  with col_l:
74
  st.subheader(f"🎯 Target Allocation: {results['next_day']}")
75
  weights = results['current_weights'][results['current_weights'] > 0.0001].to_dict()
 
80
  st.subheader("πŸ“š Methodology: Zarattini & Antonacci")
81
  st.markdown("Strategy uses 50/200 SMA filters, conviction ranking, and 60-day volatility targeting.")
82
  else:
83
+ st.info("πŸ’‘ Adjust settings and click 'Run Analysis'.")
 
 
hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -1,100 +1,72 @@
1
- import streamlit as st
2
  import pandas as pd
3
- import plotly.graph_objects as go
4
- from data.loader import load_from_hf, seed_dataset_from_scratch, sync_incremental_data, X_EQUITY_TICKERS, FI_TICKERS
5
- from engine.trend_engine import run_trend_module
 
 
6
 
7
- st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
 
 
 
 
8
 
9
- # Initial Data Load
10
- if 'master_data' not in st.session_state:
11
- st.session_state.master_data = load_from_hf()
12
 
13
- with st.sidebar:
14
- st.header("πŸ—‚οΈ Configuration")
15
-
16
- if st.session_state.master_data is None:
17
- if st.button("πŸš€ Seed Database"):
18
- st.session_state.master_data = seed_dataset_from_scratch()
19
- st.rerun()
20
- else:
21
- # Show database status
22
- st.success(f"DB Last Entry: {st.session_state.master_data.index.max().date()}")
23
-
24
- # Sync Action
25
- if st.button("πŸ”„ Sync New Data"):
26
- updated_df, status_code = sync_incremental_data(st.session_state.master_data)
27
- st.session_state.master_data = updated_df
28
-
29
- # Map logical codes to UI messages
30
- messages = {
31
- "success": "βœ… Data refreshed",
32
- "already_current": "ℹ️ Already up-to-date",
33
- "no_new_data_yet": "⏳ Market not yet closed",
34
- "api_failure": "❌ Connection/API issue",
35
- "error": "❌ Critical Error"
36
- }
37
- # Save message to session state so it survives the rerun
38
- st.session_state.sync_status = messages.get(status_code, "❓ Unknown Status")
39
- st.rerun()
40
 
41
- # Persistent status display
42
- if 'sync_status' in st.session_state:
43
- st.sidebar.info(st.session_state.sync_status)
 
 
 
 
 
 
 
 
 
 
44
 
45
- st.divider()
46
-
47
- # Strategy Parameters
48
- option = st.selectbox("Universe Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
49
- sub_option = st.selectbox("Conviction Strategy",
50
- ("All Trending ETFs", "3 Highest Conviction", "1 Highest Conviction"))
51
- start_yr = st.slider("OOS Start Year", 2008, 2026, 2018)
52
- vol_target = st.slider("Risk Target (%)", 5, 20, 12) / 100
53
 
54
- run_btn = st.button("πŸš€ Run Analysis", use_container_width=True, type="primary")
55
-
56
- if st.session_state.master_data is not None:
57
- if run_btn:
58
- # Configuration mapping
59
- is_fi = "Option A" in option
60
- univ = FI_TICKERS if is_fi else X_EQUITY_TICKERS
61
- bench = "AGG" if is_fi else "SPY"
62
-
63
- # Compute Results
64
- results = run_trend_module(st.session_state.master_data[univ],
65
- st.session_state.master_data[bench],
66
- st.session_state.master_data['SOFR_ANNUAL'],
67
- vol_target, start_yr, sub_option)
68
-
69
- st.title(f"πŸ“Š {option}: {sub_option}")
70
-
71
- # Display Metrics
72
- m1, m2, m3, m4 = st.columns(4)
73
- m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
74
- m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
75
- m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
76
- m4.metric("Current SOFR", f"{results['current_sofr']:.2%}")
77
 
78
- # Performance Visualization
79
- fig = go.Figure()
80
- fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
81
- fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
82
- fig.update_layout(title="Out-of-Sample Performance", template="plotly_dark", hovermode="x unified")
83
- st.plotly_chart(fig, use_container_width=True)
84
 
85
- st.divider()
86
- col_l, col_r = st.columns([1, 1.5])
 
87
 
88
- with col_l:
89
- st.subheader(f"🎯 Target Allocation: {results['next_day']}")
90
- weights = results['current_weights'][results['current_weights'] > 0.0001].to_dict()
91
- weights['CASH (SOFR)'] = results['cash_weight']
92
- st.table(pd.DataFrame.from_dict(weights, orient='index', columns=['Weight']).style.format("{:.2%}"))
93
 
94
- with col_r:
95
- st.subheader("πŸ“š Methodology: Zarattini & Antonacci")
96
- st.markdown("Strategy uses 50/200 SMA filters, conviction ranking, and 60-day volatility targeting.")
97
- else:
98
- st.info("πŸ’‘ Adjust your parameters and click 'Run Analysis'.")
99
- else:
100
- st.warning("⚠️ Data source missing. Please Seed or check HF Token.")
 
 
1
  import pandas as pd
2
+ import pandas_datareader.data as web
3
+ import yfinance as yf
4
+ from huggingface_hub import hf_hub_download, HfApi
5
+ import os
6
+ import streamlit as st
7
 
8
+ # --- GLOBAL CONSTANTS ---
9
+ X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
10
+ FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
11
+ REPO_ID = "P2SAMAPA/etf_trend_data"
12
+ FILENAME = "market_data.csv"
13
 
14
+ def get_safe_token():
15
+ try: return st.secrets["HF_TOKEN"]
16
+ except: return os.getenv("HF_TOKEN")
17
 
18
+ def load_from_hf():
19
+ token = get_safe_token()
20
+ if not token: return None
21
+ try:
22
+ path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
23
+ df = pd.read_csv(path, index_col=0, parse_dates=True)
24
+ return df.ffill()
25
+ except: return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ def seed_dataset_from_scratch():
28
+ tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
29
+ data = yf.download(tickers, start="2008-01-01", progress=False)
30
+ master_df = data['Adj Close'] if 'Adj Close' in data.columns else data['Close']
31
+ try:
32
+ sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
33
+ master_df['SOFR_ANNUAL'] = sofr / 100
34
+ except:
35
+ master_df['SOFR_ANNUAL'] = 0.045
36
+ master_df = master_df.sort_index().ffill()
37
+ master_df.to_csv(FILENAME)
38
+ upload_to_hf(FILENAME)
39
+ return master_df
40
 
41
+ def sync_incremental_data(df):
42
+ if df is None: return None, "error"
43
+ last_date = pd.to_datetime(df.index.max()).date()
44
+ today = pd.Timestamp.now().date()
 
 
 
 
45
 
46
+ if last_date >= today:
47
+ return df, "already_current"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
+ sync_start = last_date + pd.Timedelta(days=1)
50
+ tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
51
+ try:
52
+ new_data_raw = yf.download(tickers, start=sync_start, progress=False)
53
+ if new_data_raw is None or new_data_raw.empty:
54
+ return df, "no_new_data_yet"
55
 
56
+ new_data = new_data_raw['Adj Close'] if 'Adj Close' in new_data_raw.columns else new_data_raw['Close']
57
+ combined = pd.concat([df, new_data]).sort_index()
58
+ combined = combined[~combined.index.duplicated(keep='last')].ffill()
59
 
60
+ combined.to_csv(FILENAME)
61
+ upload_to_hf(FILENAME)
62
+ return combined, "success"
63
+ except:
64
+ return df, "api_failure"
65
 
66
+ def upload_to_hf(path):
67
+ token = get_safe_token()
68
+ if token:
69
+ api = HfApi()
70
+ try:
71
+ api.upload_file(path_or_fileobj=path, path_in_repo=FILENAME, repo_id=REPO_ID, repo_type="dataset", token=token)
72
+ except: pass
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -1,70 +1,100 @@
1
- import pandas as pd
2
- import pandas_datareader.data as web
3
- import yfinance as yf
4
- from huggingface_hub import hf_hub_download, HfApi
5
- import os
6
  import streamlit as st
 
 
 
 
7
 
8
- # --- GLOBAL CONSTANTS ---
9
- X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
10
- FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
11
-
12
- REPO_ID = "P2SAMAPA/etf_trend_data"
13
- FILENAME = "market_data.csv"
14
 
15
- def get_safe_token():
16
- try: return st.secrets["HF_TOKEN"]
17
- except: return os.getenv("HF_TOKEN")
18
 
19
- def load_from_hf():
20
- token = get_safe_token()
21
- if not token: return None
22
- try:
23
- path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
24
- df = pd.read_csv(path, index_col=0, parse_dates=True)
25
- return df.ffill()
26
- except: return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- def seed_dataset_from_scratch():
29
- tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
30
- data = yf.download(tickers, start="2008-01-01", progress=False)
31
- master_df = data['Adj Close'] if 'Adj Close' in data.columns else data['Close']
32
- try:
33
- sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
34
- master_df['SOFR_ANNUAL'] = sofr / 100
35
- except:
36
- master_df['SOFR_ANNUAL'] = 0.045
37
- master_df = master_df.sort_index().ffill()
38
- master_df.to_csv(FILENAME)
39
- upload_to_hf(FILENAME)
40
- return master_df
41
 
42
- def sync_incremental_data(df):
43
- """Syncs data and returns (df, success_bool)"""
44
- if df is None: return None, False
45
- last_date = pd.to_datetime(df.index.max())
46
- sync_start = last_date + pd.Timedelta(days=1)
47
 
48
- if sync_start > pd.Timestamp.now().normalize():
49
- return df, False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
- tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
52
- try:
53
- new_data_raw = yf.download(tickers, start=sync_start, progress=False)
54
- if new_data_raw.empty: return df, False
 
 
55
 
56
- new_data = new_data_raw['Adj Close'] if 'Adj Close' in new_data_raw.columns else new_data_raw['Close']
57
- combined = pd.concat([df, new_data]).sort_index()
58
- combined = combined[~combined.index.duplicated(keep='last')].ffill()
59
 
60
- combined.to_csv(FILENAME)
61
- upload_to_hf(FILENAME)
62
- return combined, True
63
- except:
64
- return df, False
65
 
66
- def upload_to_hf(path):
67
- token = get_safe_token()
68
- if token:
69
- api = HfApi()
70
- api.upload_file(path_or_fileobj=path, path_in_repo=FILENAME, repo_id=REPO_ID, repo_type="dataset", token=token)
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import pandas as pd
3
+ import plotly.graph_objects as go
4
+ from data.loader import load_from_hf, seed_dataset_from_scratch, sync_incremental_data, X_EQUITY_TICKERS, FI_TICKERS
5
+ from engine.trend_engine import run_trend_module
6
 
7
+ st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
 
 
 
 
 
8
 
9
+ # Initial Data Load
10
+ if 'master_data' not in st.session_state:
11
+ st.session_state.master_data = load_from_hf()
12
 
13
+ with st.sidebar:
14
+ st.header("πŸ—‚οΈ Configuration")
15
+
16
+ if st.session_state.master_data is None:
17
+ if st.button("πŸš€ Seed Database"):
18
+ st.session_state.master_data = seed_dataset_from_scratch()
19
+ st.rerun()
20
+ else:
21
+ # Show database status
22
+ st.success(f"DB Last Entry: {st.session_state.master_data.index.max().date()}")
23
+
24
+ # Sync Action
25
+ if st.button("πŸ”„ Sync New Data"):
26
+ updated_df, status_code = sync_incremental_data(st.session_state.master_data)
27
+ st.session_state.master_data = updated_df
28
+
29
+ # Map logical codes to UI messages
30
+ messages = {
31
+ "success": "βœ… Data refreshed",
32
+ "already_current": "ℹ️ Already up-to-date",
33
+ "no_new_data_yet": "⏳ Market not yet closed",
34
+ "api_failure": "❌ Connection/API issue",
35
+ "error": "❌ Critical Error"
36
+ }
37
+ # Save message to session state so it survives the rerun
38
+ st.session_state.sync_status = messages.get(status_code, "❓ Unknown Status")
39
+ st.rerun()
40
 
41
+ # Persistent status display
42
+ if 'sync_status' in st.session_state:
43
+ st.sidebar.info(st.session_state.sync_status)
 
 
 
 
 
 
 
 
 
 
44
 
45
+ st.divider()
 
 
 
 
46
 
47
+ # Strategy Parameters
48
+ option = st.selectbox("Universe Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
49
+ sub_option = st.selectbox("Conviction Strategy",
50
+ ("All Trending ETFs", "3 Highest Conviction", "1 Highest Conviction"))
51
+ start_yr = st.slider("OOS Start Year", 2008, 2026, 2018)
52
+ vol_target = st.slider("Risk Target (%)", 5, 20, 12) / 100
53
+
54
+ run_btn = st.button("πŸš€ Run Analysis", use_container_width=True, type="primary")
55
+
56
+ if st.session_state.master_data is not None:
57
+ if run_btn:
58
+ # Configuration mapping
59
+ is_fi = "Option A" in option
60
+ univ = FI_TICKERS if is_fi else X_EQUITY_TICKERS
61
+ bench = "AGG" if is_fi else "SPY"
62
+
63
+ # Compute Results
64
+ results = run_trend_module(st.session_state.master_data[univ],
65
+ st.session_state.master_data[bench],
66
+ st.session_state.master_data['SOFR_ANNUAL'],
67
+ vol_target, start_yr, sub_option)
68
+
69
+ st.title(f"πŸ“Š {option}: {sub_option}")
70
+
71
+ # Display Metrics
72
+ m1, m2, m3, m4 = st.columns(4)
73
+ m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
74
+ m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
75
+ m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
76
+ m4.metric("Current SOFR", f"{results['current_sofr']:.2%}")
77
 
78
+ # Performance Visualization
79
+ fig = go.Figure()
80
+ fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
81
+ fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
82
+ fig.update_layout(title="Out-of-Sample Performance", template="plotly_dark", hovermode="x unified")
83
+ st.plotly_chart(fig, use_container_width=True)
84
 
85
+ st.divider()
86
+ col_l, col_r = st.columns([1, 1.5])
 
87
 
88
+ with col_l:
89
+ st.subheader(f"🎯 Target Allocation: {results['next_day']}")
90
+ weights = results['current_weights'][results['current_weights'] > 0.0001].to_dict()
91
+ weights['CASH (SOFR)'] = results['cash_weight']
92
+ st.table(pd.DataFrame.from_dict(weights, orient='index', columns=['Weight']).style.format("{:.2%}"))
93
 
94
+ with col_r:
95
+ st.subheader("πŸ“š Methodology: Zarattini & Antonacci")
96
+ st.markdown("Strategy uses 50/200 SMA filters, conviction ranking, and 60-day volatility targeting.")
97
+ else:
98
+ st.info("πŸ’‘ Adjust your parameters and click 'Run Analysis'.")
99
+ else:
100
+ st.warning("⚠️ Data source missing. Please Seed or check HF Token.")
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -6,7 +6,7 @@ from engine.trend_engine import run_trend_module
6
 
7
  st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
8
 
9
- # Initialize Session State safely
10
  if 'master_data' not in st.session_state:
11
  st.session_state.master_data = load_from_hf()
12
 
@@ -18,28 +18,33 @@ with st.sidebar:
18
  st.session_state.master_data = seed_dataset_from_scratch()
19
  st.rerun()
20
  else:
 
21
  st.success(f"DB Last Entry: {st.session_state.master_data.index.max().date()}")
22
 
23
- # Sync Button with Persistent UI Feedback
24
  if st.button("πŸ”„ Sync New Data"):
25
- updated_df, success = sync_incremental_data(st.session_state.master_data)
26
  st.session_state.master_data = updated_df
27
 
28
- # Store the result so it survives the rerun
29
- if success:
30
- st.session_state.sync_status = "βœ… Data refreshed"
31
- else:
32
- st.session_state.sync_status = "❌ Data refresh failed"
33
-
 
 
 
 
34
  st.rerun()
35
 
36
- # Display the persisted status message if it exists in state
37
  if 'sync_status' in st.session_state:
38
- st.sidebar.write(st.session_state.sync_status)
39
 
40
  st.divider()
41
 
42
- # Strategy Inputs
43
  option = st.selectbox("Universe Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
44
  sub_option = st.selectbox("Conviction Strategy",
45
  ("All Trending ETFs", "3 Highest Conviction", "1 Highest Conviction"))
@@ -50,12 +55,12 @@ with st.sidebar:
50
 
51
  if st.session_state.master_data is not None:
52
  if run_btn:
53
- # Determine Universe and Benchmark
54
  is_fi = "Option A" in option
55
  univ = FI_TICKERS if is_fi else X_EQUITY_TICKERS
56
  bench = "AGG" if is_fi else "SPY"
57
 
58
- # Run the Quantitative Engine
59
  results = run_trend_module(st.session_state.master_data[univ],
60
  st.session_state.master_data[bench],
61
  st.session_state.master_data['SOFR_ANNUAL'],
@@ -63,51 +68,33 @@ if st.session_state.master_data is not None:
63
 
64
  st.title(f"πŸ“Š {option}: {sub_option}")
65
 
66
- # Row 1: Key Performance Metrics
67
  m1, m2, m3, m4 = st.columns(4)
68
  m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
69
  m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
70
  m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
71
  m4.metric("Current SOFR", f"{results['current_sofr']:.2%}")
72
 
73
- # Row 2: Performance Chart
74
  fig = go.Figure()
75
  fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
76
  fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
77
- fig.update_layout(
78
- title="Out-of-Sample Cumulative Performance",
79
- template="plotly_dark",
80
- xaxis_title="Timeline",
81
- yaxis_title="Growth of $1.00",
82
- hovermode="x unified"
83
- )
84
  st.plotly_chart(fig, use_container_width=True)
85
 
86
- # Row 3: Methodology & Next-Day Allocations
87
  st.divider()
88
- col_left, col_right = st.columns([1, 1.5])
89
 
90
- with col_left:
91
  st.subheader(f"🎯 Target Allocation: {results['next_day']}")
92
- # Filter out zero weights for the display table
93
  weights = results['current_weights'][results['current_weights'] > 0.0001].to_dict()
94
  weights['CASH (SOFR)'] = results['cash_weight']
95
-
96
- df_weights = pd.DataFrame.from_dict(weights, orient='index', columns=['Weight'])
97
- st.table(df_weights.style.format("{:.2%}"))
98
 
99
- with col_right:
100
  st.subheader("πŸ“š Methodology: Zarattini & Antonacci")
101
- st.markdown(f"""
102
- This strategy implements the **2025 Charles H. Dow Award** winning framework by **Andrea Zarattini** and **Michael Antonacci**.
103
-
104
- 1. **Regime Identification**: A dual 50/200-day SMA filter determines asset eligibility.
105
- 2. **Conviction Ranking**: Assets are ranked by their distance from the 200-day SMA (Trend Strength).
106
- 3. **Concentrated Sizing**: In **{sub_option}** mode, the risk budget is focused only on top leaders.
107
- 4. **Volatility Targeting**: Allocations are sized inversely to 60-day volatility to maintain a stable **{vol_target:.0%}** risk profile.
108
- 5. **Cash Buffer**: Remaining budget earns the live SOFR rate (Federal Reserve Bank of New York).
109
- """)
110
  else:
111
- st.info("πŸ’‘ Adjust your risk parameters in the sidebar and click 'Run Analysis' to see predicted allocations.")
112
  else:
113
- st.warning("⚠️ No data found. Please check your HF_TOKEN or click 'Seed Database'.")
 
6
 
7
  st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
8
 
9
+ # Initial Data Load
10
  if 'master_data' not in st.session_state:
11
  st.session_state.master_data = load_from_hf()
12
 
 
18
  st.session_state.master_data = seed_dataset_from_scratch()
19
  st.rerun()
20
  else:
21
+ # Show database status
22
  st.success(f"DB Last Entry: {st.session_state.master_data.index.max().date()}")
23
 
24
+ # Sync Action
25
  if st.button("πŸ”„ Sync New Data"):
26
+ updated_df, status_code = sync_incremental_data(st.session_state.master_data)
27
  st.session_state.master_data = updated_df
28
 
29
+ # Map logical codes to UI messages
30
+ messages = {
31
+ "success": "βœ… Data refreshed",
32
+ "already_current": "ℹ️ Already up-to-date",
33
+ "no_new_data_yet": "⏳ Market not yet closed",
34
+ "api_failure": "❌ Connection/API issue",
35
+ "error": "❌ Critical Error"
36
+ }
37
+ # Save message to session state so it survives the rerun
38
+ st.session_state.sync_status = messages.get(status_code, "❓ Unknown Status")
39
  st.rerun()
40
 
41
+ # Persistent status display
42
  if 'sync_status' in st.session_state:
43
+ st.sidebar.info(st.session_state.sync_status)
44
 
45
  st.divider()
46
 
47
+ # Strategy Parameters
48
  option = st.selectbox("Universe Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
49
  sub_option = st.selectbox("Conviction Strategy",
50
  ("All Trending ETFs", "3 Highest Conviction", "1 Highest Conviction"))
 
55
 
56
  if st.session_state.master_data is not None:
57
  if run_btn:
58
+ # Configuration mapping
59
  is_fi = "Option A" in option
60
  univ = FI_TICKERS if is_fi else X_EQUITY_TICKERS
61
  bench = "AGG" if is_fi else "SPY"
62
 
63
+ # Compute Results
64
  results = run_trend_module(st.session_state.master_data[univ],
65
  st.session_state.master_data[bench],
66
  st.session_state.master_data['SOFR_ANNUAL'],
 
68
 
69
  st.title(f"πŸ“Š {option}: {sub_option}")
70
 
71
+ # Display Metrics
72
  m1, m2, m3, m4 = st.columns(4)
73
  m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
74
  m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
75
  m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
76
  m4.metric("Current SOFR", f"{results['current_sofr']:.2%}")
77
 
78
+ # Performance Visualization
79
  fig = go.Figure()
80
  fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
81
  fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
82
+ fig.update_layout(title="Out-of-Sample Performance", template="plotly_dark", hovermode="x unified")
 
 
 
 
 
 
83
  st.plotly_chart(fig, use_container_width=True)
84
 
 
85
  st.divider()
86
+ col_l, col_r = st.columns([1, 1.5])
87
 
88
+ with col_l:
89
  st.subheader(f"🎯 Target Allocation: {results['next_day']}")
 
90
  weights = results['current_weights'][results['current_weights'] > 0.0001].to_dict()
91
  weights['CASH (SOFR)'] = results['cash_weight']
92
+ st.table(pd.DataFrame.from_dict(weights, orient='index', columns=['Weight']).style.format("{:.2%}"))
 
 
93
 
94
+ with col_r:
95
  st.subheader("πŸ“š Methodology: Zarattini & Antonacci")
96
+ st.markdown("Strategy uses 50/200 SMA filters, conviction ranking, and 60-day volatility targeting.")
 
 
 
 
 
 
 
 
97
  else:
98
+ st.info("πŸ’‘ Adjust your parameters and click 'Run Analysis'.")
99
  else:
100
+ st.warning("⚠️ Data source missing. Please Seed or check HF Token.")
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -13,34 +13,30 @@ if 'master_data' not in st.session_state:
13
  with st.sidebar:
14
  st.header("πŸ—‚οΈ Configuration")
15
 
16
- # Database Initialization / Status
17
  if st.session_state.master_data is None:
18
  if st.button("πŸš€ Seed Database"):
19
  st.session_state.master_data = seed_dataset_from_scratch()
20
  st.rerun()
21
  else:
22
- # Display the last available date in the dataset
23
- last_db_date = st.session_state.master_data.index.max().date()
24
- st.success(f"DB Last Entry: {last_db_date}")
25
 
26
- # Sync Button with UI Feedback
27
  if st.button("πŸ”„ Sync New Data"):
28
- with st.spinner("Checking Stooq & yFinance..."):
29
- # Captures the updated dataframe and the status message from the loader
30
- updated_df, status_msg = sync_incremental_data(st.session_state.master_data)
31
- st.session_state.master_data = updated_df
32
-
33
- # Visual Feedback Logic
34
- if "Refreshed" in status_msg and "Not" not in status_msg:
35
- st.sidebar.success(status_msg)
36
- st.toast(status_msg, icon="βœ…")
37
- else:
38
- st.sidebar.warning(status_msg)
39
- st.toast(status_msg, icon="⚠️")
40
 
41
- # Force UI to refresh to show new dates/data
42
  st.rerun()
43
 
 
 
 
 
44
  st.divider()
45
 
46
  # Strategy Inputs
@@ -114,4 +110,4 @@ if st.session_state.master_data is not None:
114
  else:
115
  st.info("πŸ’‘ Adjust your risk parameters in the sidebar and click 'Run Analysis' to see predicted allocations.")
116
  else:
117
- st.warning("⚠️ No data found. Please provide a Hugging Face Token in Secrets or click 'Seed Database'.")
 
13
  with st.sidebar:
14
  st.header("πŸ—‚οΈ Configuration")
15
 
 
16
  if st.session_state.master_data is None:
17
  if st.button("πŸš€ Seed Database"):
18
  st.session_state.master_data = seed_dataset_from_scratch()
19
  st.rerun()
20
  else:
21
+ st.success(f"DB Last Entry: {st.session_state.master_data.index.max().date()}")
 
 
22
 
23
+ # Sync Button with Persistent UI Feedback
24
  if st.button("πŸ”„ Sync New Data"):
25
+ updated_df, success = sync_incremental_data(st.session_state.master_data)
26
+ st.session_state.master_data = updated_df
27
+
28
+ # Store the result so it survives the rerun
29
+ if success:
30
+ st.session_state.sync_status = "βœ… Data refreshed"
31
+ else:
32
+ st.session_state.sync_status = "❌ Data refresh failed"
 
 
 
 
33
 
 
34
  st.rerun()
35
 
36
+ # Display the persisted status message if it exists in state
37
+ if 'sync_status' in st.session_state:
38
+ st.sidebar.write(st.session_state.sync_status)
39
+
40
  st.divider()
41
 
42
  # Strategy Inputs
 
110
  else:
111
  st.info("πŸ’‘ Adjust your risk parameters in the sidebar and click 'Run Analysis' to see predicted allocations.")
112
  else:
113
+ st.warning("⚠️ No data found. Please check your HF_TOKEN or click 'Seed Database'.")
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -5,7 +5,7 @@ from huggingface_hub import hf_hub_download, HfApi
5
  import os
6
  import streamlit as st
7
 
8
- # --- GLOBAL CONSTANTS (Required for app.py imports) ---
9
  X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
10
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
11
 
@@ -17,74 +17,51 @@ def get_safe_token():
17
  except: return os.getenv("HF_TOKEN")
18
 
19
  def load_from_hf():
20
- """Initial load function called by app.py"""
21
  token = get_safe_token()
22
- if not token:
23
- return None
24
  try:
25
  path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
26
  df = pd.read_csv(path, index_col=0, parse_dates=True)
27
- return df.ffill()
28
- except Exception as e:
29
- st.error(f"HF Load Error: {e}")
30
- return None
31
 
32
  def seed_dataset_from_scratch():
33
- """Initializes the CSV with full history"""
34
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
35
  data = yf.download(tickers, start="2008-01-01", progress=False)
36
-
37
- if 'Adj Close' in data.columns:
38
- master_df = data['Adj Close']
39
- else:
40
- master_df = data['Close']
41
-
42
  try:
43
  sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
44
  master_df['SOFR_ANNUAL'] = sofr / 100
45
  except:
46
  master_df['SOFR_ANNUAL'] = 0.045
47
-
48
  master_df = master_df.sort_index().ffill()
49
  master_df.to_csv(FILENAME)
50
  upload_to_hf(FILENAME)
51
  return master_df
52
 
53
  def sync_incremental_data(df):
54
- """Attempt to sync data and return (dataframe, status_message)"""
55
- if df is None:
56
- return None, "Error: No initial data to sync."
57
-
58
  last_date = pd.to_datetime(df.index.max())
59
  sync_start = last_date + pd.Timedelta(days=1)
60
 
61
- # Check if we are already current
62
  if sync_start > pd.Timestamp.now().normalize():
63
- return df, "Already Up-to-Date"
64
 
65
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
66
-
67
  try:
68
  new_data_raw = yf.download(tickers, start=sync_start, progress=False)
69
-
70
- if new_data_raw is None or new_data_raw.empty:
71
- return df, "Not Refreshed (No new data available yet)"
72
-
73
- if 'Adj Close' in new_data_raw.columns:
74
- new_data = new_data_raw['Adj Close']
75
- else:
76
- new_data = new_data_raw['Close']
77
 
 
78
  combined = pd.concat([df, new_data]).sort_index()
79
  combined = combined[~combined.index.duplicated(keep='last')].ffill()
80
 
81
  combined.to_csv(FILENAME)
82
  upload_to_hf(FILENAME)
83
-
84
- return combined, f"Data Refreshed (Updated to {combined.index.max().date()})"
85
-
86
- except Exception as e:
87
- return df, f"Not Refreshed (Error: {str(e)})"
88
 
89
  def upload_to_hf(path):
90
  token = get_safe_token()
 
5
  import os
6
  import streamlit as st
7
 
8
+ # --- GLOBAL CONSTANTS ---
9
  X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
10
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
11
 
 
17
  except: return os.getenv("HF_TOKEN")
18
 
19
  def load_from_hf():
 
20
  token = get_safe_token()
21
+ if not token: return None
 
22
  try:
23
  path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
24
  df = pd.read_csv(path, index_col=0, parse_dates=True)
25
+ return df.ffill()
26
+ except: return None
 
 
27
 
28
  def seed_dataset_from_scratch():
 
29
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
30
  data = yf.download(tickers, start="2008-01-01", progress=False)
31
+ master_df = data['Adj Close'] if 'Adj Close' in data.columns else data['Close']
 
 
 
 
 
32
  try:
33
  sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
34
  master_df['SOFR_ANNUAL'] = sofr / 100
35
  except:
36
  master_df['SOFR_ANNUAL'] = 0.045
 
37
  master_df = master_df.sort_index().ffill()
38
  master_df.to_csv(FILENAME)
39
  upload_to_hf(FILENAME)
40
  return master_df
41
 
42
  def sync_incremental_data(df):
43
+ """Syncs data and returns (df, success_bool)"""
44
+ if df is None: return None, False
 
 
45
  last_date = pd.to_datetime(df.index.max())
46
  sync_start = last_date + pd.Timedelta(days=1)
47
 
 
48
  if sync_start > pd.Timestamp.now().normalize():
49
+ return df, False
50
 
51
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
 
52
  try:
53
  new_data_raw = yf.download(tickers, start=sync_start, progress=False)
54
+ if new_data_raw.empty: return df, False
 
 
 
 
 
 
 
55
 
56
+ new_data = new_data_raw['Adj Close'] if 'Adj Close' in new_data_raw.columns else new_data_raw['Close']
57
  combined = pd.concat([df, new_data]).sort_index()
58
  combined = combined[~combined.index.duplicated(keep='last')].ffill()
59
 
60
  combined.to_csv(FILENAME)
61
  upload_to_hf(FILENAME)
62
+ return combined, True
63
+ except:
64
+ return df, False
 
 
65
 
66
  def upload_to_hf(path):
67
  token = get_safe_token()
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -6,35 +6,60 @@ from engine.trend_engine import run_trend_module
6
 
7
  st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
8
 
 
9
  if 'master_data' not in st.session_state:
10
  st.session_state.master_data = load_from_hf()
11
 
12
  with st.sidebar:
13
  st.header("πŸ—‚οΈ Configuration")
 
 
14
  if st.session_state.master_data is None:
15
  if st.button("πŸš€ Seed Database"):
16
  st.session_state.master_data = seed_dataset_from_scratch()
17
  st.rerun()
18
  else:
19
- st.success(f"Sync: {st.session_state.master_data.index.max().date()}")
 
 
 
 
20
  if st.button("πŸ”„ Sync New Data"):
21
- st.session_state.master_data = sync_incremental_data(st.session_state.master_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  st.rerun()
23
 
24
  st.divider()
 
 
25
  option = st.selectbox("Universe Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
26
  sub_option = st.selectbox("Conviction Strategy",
27
  ("All Trending ETFs", "3 Highest Conviction", "1 Highest Conviction"))
28
  start_yr = st.slider("OOS Start Year", 2008, 2026, 2018)
29
  vol_target = st.slider("Risk Target (%)", 5, 20, 12) / 100
 
30
  run_btn = st.button("πŸš€ Run Analysis", use_container_width=True, type="primary")
31
 
32
  if st.session_state.master_data is not None:
33
  if run_btn:
 
34
  is_fi = "Option A" in option
35
  univ = FI_TICKERS if is_fi else X_EQUITY_TICKERS
36
  bench = "AGG" if is_fi else "SPY"
37
 
 
38
  results = run_trend_module(st.session_state.master_data[univ],
39
  st.session_state.master_data[bench],
40
  st.session_state.master_data['SOFR_ANNUAL'],
@@ -42,30 +67,38 @@ if st.session_state.master_data is not None:
42
 
43
  st.title(f"πŸ“Š {option}: {sub_option}")
44
 
45
- # Row 1: Metrics (Annual Return First)
46
  m1, m2, m3, m4 = st.columns(4)
47
  m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
48
  m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
49
  m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
50
  m4.metric("Current SOFR", f"{results['current_sofr']:.2%}")
51
 
52
- # Row 2: Performance Chart (Interactive Years)
53
  fig = go.Figure()
54
  fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
55
  fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
56
- fig.update_layout(title="Out-of-Sample Performance", template="plotly_dark", xaxis_title="Year")
 
 
 
 
 
 
57
  st.plotly_chart(fig, use_container_width=True)
58
 
59
- # Row 3: Methodology & Allocations
60
  st.divider()
61
  col_left, col_right = st.columns([1, 1.5])
62
 
63
  with col_left:
64
- st.subheader(f"🎯 Allocation for {results['next_day']}")
65
- w = results['current_weights'][results['current_weights'] > 0.0001].to_dict()
66
- w['CASH (SOFR)'] = results['cash_weight']
67
- df_w = pd.DataFrame.from_dict(w, orient='index', columns=['Weight'])
68
- st.table(df_w.style.format("{:.2%}"))
 
 
69
 
70
  with col_right:
71
  st.subheader("πŸ“š Methodology: Zarattini & Antonacci")
@@ -79,4 +112,6 @@ if st.session_state.master_data is not None:
79
  5. **Cash Buffer**: Remaining budget earns the live SOFR rate (Federal Reserve Bank of New York).
80
  """)
81
  else:
82
- st.info("πŸ’‘ Adjust settings and click 'Run Analysis'.")
 
 
 
6
 
7
  st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
8
 
9
+ # Initialize Session State safely
10
  if 'master_data' not in st.session_state:
11
  st.session_state.master_data = load_from_hf()
12
 
13
  with st.sidebar:
14
  st.header("πŸ—‚οΈ Configuration")
15
+
16
+ # Database Initialization / Status
17
  if st.session_state.master_data is None:
18
  if st.button("πŸš€ Seed Database"):
19
  st.session_state.master_data = seed_dataset_from_scratch()
20
  st.rerun()
21
  else:
22
+ # Display the last available date in the dataset
23
+ last_db_date = st.session_state.master_data.index.max().date()
24
+ st.success(f"DB Last Entry: {last_db_date}")
25
+
26
+ # Sync Button with UI Feedback
27
  if st.button("πŸ”„ Sync New Data"):
28
+ with st.spinner("Checking Stooq & yFinance..."):
29
+ # Captures the updated dataframe and the status message from the loader
30
+ updated_df, status_msg = sync_incremental_data(st.session_state.master_data)
31
+ st.session_state.master_data = updated_df
32
+
33
+ # Visual Feedback Logic
34
+ if "Refreshed" in status_msg and "Not" not in status_msg:
35
+ st.sidebar.success(status_msg)
36
+ st.toast(status_msg, icon="βœ…")
37
+ else:
38
+ st.sidebar.warning(status_msg)
39
+ st.toast(status_msg, icon="⚠️")
40
+
41
+ # Force UI to refresh to show new dates/data
42
  st.rerun()
43
 
44
  st.divider()
45
+
46
+ # Strategy Inputs
47
  option = st.selectbox("Universe Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
48
  sub_option = st.selectbox("Conviction Strategy",
49
  ("All Trending ETFs", "3 Highest Conviction", "1 Highest Conviction"))
50
  start_yr = st.slider("OOS Start Year", 2008, 2026, 2018)
51
  vol_target = st.slider("Risk Target (%)", 5, 20, 12) / 100
52
+
53
  run_btn = st.button("πŸš€ Run Analysis", use_container_width=True, type="primary")
54
 
55
  if st.session_state.master_data is not None:
56
  if run_btn:
57
+ # Determine Universe and Benchmark
58
  is_fi = "Option A" in option
59
  univ = FI_TICKERS if is_fi else X_EQUITY_TICKERS
60
  bench = "AGG" if is_fi else "SPY"
61
 
62
+ # Run the Quantitative Engine
63
  results = run_trend_module(st.session_state.master_data[univ],
64
  st.session_state.master_data[bench],
65
  st.session_state.master_data['SOFR_ANNUAL'],
 
67
 
68
  st.title(f"πŸ“Š {option}: {sub_option}")
69
 
70
+ # Row 1: Key Performance Metrics
71
  m1, m2, m3, m4 = st.columns(4)
72
  m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
73
  m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
74
  m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
75
  m4.metric("Current SOFR", f"{results['current_sofr']:.2%}")
76
 
77
+ # Row 2: Performance Chart
78
  fig = go.Figure()
79
  fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
80
  fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
81
+ fig.update_layout(
82
+ title="Out-of-Sample Cumulative Performance",
83
+ template="plotly_dark",
84
+ xaxis_title="Timeline",
85
+ yaxis_title="Growth of $1.00",
86
+ hovermode="x unified"
87
+ )
88
  st.plotly_chart(fig, use_container_width=True)
89
 
90
+ # Row 3: Methodology & Next-Day Allocations
91
  st.divider()
92
  col_left, col_right = st.columns([1, 1.5])
93
 
94
  with col_left:
95
+ st.subheader(f"🎯 Target Allocation: {results['next_day']}")
96
+ # Filter out zero weights for the display table
97
+ weights = results['current_weights'][results['current_weights'] > 0.0001].to_dict()
98
+ weights['CASH (SOFR)'] = results['cash_weight']
99
+
100
+ df_weights = pd.DataFrame.from_dict(weights, orient='index', columns=['Weight'])
101
+ st.table(df_weights.style.format("{:.2%}"))
102
 
103
  with col_right:
104
  st.subheader("πŸ“š Methodology: Zarattini & Antonacci")
 
112
  5. **Cash Buffer**: Remaining budget earns the live SOFR rate (Federal Reserve Bank of New York).
113
  """)
114
  else:
115
+ st.info("πŸ’‘ Adjust your risk parameters in the sidebar and click 'Run Analysis' to see predicted allocations.")
116
+ else:
117
+ st.warning("⚠️ No data found. Please provide a Hugging Face Token in Secrets or click 'Seed Database'.")
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -5,7 +5,7 @@ from huggingface_hub import hf_hub_download, HfApi
5
  import os
6
  import streamlit as st
7
 
8
- # Explicitly define the lists first so they are available for export
9
  X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
10
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
11
 
@@ -24,7 +24,7 @@ def load_from_hf():
24
  try:
25
  path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
26
  df = pd.read_csv(path, index_col=0, parse_dates=True)
27
- return df.ffill() # Handle internal NaNs immediately
28
  except Exception as e:
29
  st.error(f"HF Load Error: {e}")
30
  return None
@@ -34,7 +34,6 @@ def seed_dataset_from_scratch():
34
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
35
  data = yf.download(tickers, start="2008-01-01", progress=False)
36
 
37
- # Robustly handle Column Multi-Index
38
  if 'Adj Close' in data.columns:
39
  master_df = data['Adj Close']
40
  else:
@@ -52,46 +51,40 @@ def seed_dataset_from_scratch():
52
  return master_df
53
 
54
  def sync_incremental_data(df):
55
- """The function triggered by the Sync Button"""
56
  if df is None:
57
- return seed_dataset_from_scratch()
58
 
59
  last_date = pd.to_datetime(df.index.max())
60
  sync_start = last_date + pd.Timedelta(days=1)
61
 
62
- # Check if data is already current to today
63
  if sync_start > pd.Timestamp.now().normalize():
64
- st.toast("Data is already up to date!", icon="βœ…")
65
- return df
66
 
67
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
68
 
69
  try:
70
  new_data_raw = yf.download(tickers, start=sync_start, progress=False)
71
 
72
- if new_data_raw.empty:
73
- st.toast("No new market sessions found.", icon="ℹ️")
74
- return df
75
 
76
- # Column selection
77
  if 'Adj Close' in new_data_raw.columns:
78
  new_data = new_data_raw['Adj Close']
79
  else:
80
  new_data = new_data_raw['Close']
81
 
82
- # Merge and clean
83
  combined = pd.concat([df, new_data]).sort_index()
84
  combined = combined[~combined.index.duplicated(keep='last')].ffill()
85
 
86
- # Save locally and push to cloud
87
  combined.to_csv(FILENAME)
88
  upload_to_hf(FILENAME)
89
 
90
- st.toast(f"Sync complete: {combined.index.max().date()}", icon="πŸš€")
91
- return combined
92
  except Exception as e:
93
- st.error(f"Sync failed: {e}")
94
- return df
95
 
96
  def upload_to_hf(path):
97
  token = get_safe_token()
 
5
  import os
6
  import streamlit as st
7
 
8
+ # --- GLOBAL CONSTANTS (Required for app.py imports) ---
9
  X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
10
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
11
 
 
24
  try:
25
  path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
26
  df = pd.read_csv(path, index_col=0, parse_dates=True)
27
+ return df.ffill()
28
  except Exception as e:
29
  st.error(f"HF Load Error: {e}")
30
  return None
 
34
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
35
  data = yf.download(tickers, start="2008-01-01", progress=False)
36
 
 
37
  if 'Adj Close' in data.columns:
38
  master_df = data['Adj Close']
39
  else:
 
51
  return master_df
52
 
53
  def sync_incremental_data(df):
54
+ """Attempt to sync data and return (dataframe, status_message)"""
55
  if df is None:
56
+ return None, "Error: No initial data to sync."
57
 
58
  last_date = pd.to_datetime(df.index.max())
59
  sync_start = last_date + pd.Timedelta(days=1)
60
 
61
+ # Check if we are already current
62
  if sync_start > pd.Timestamp.now().normalize():
63
+ return df, "Already Up-to-Date"
 
64
 
65
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
66
 
67
  try:
68
  new_data_raw = yf.download(tickers, start=sync_start, progress=False)
69
 
70
+ if new_data_raw is None or new_data_raw.empty:
71
+ return df, "Not Refreshed (No new data available yet)"
 
72
 
 
73
  if 'Adj Close' in new_data_raw.columns:
74
  new_data = new_data_raw['Adj Close']
75
  else:
76
  new_data = new_data_raw['Close']
77
 
 
78
  combined = pd.concat([df, new_data]).sort_index()
79
  combined = combined[~combined.index.duplicated(keep='last')].ffill()
80
 
 
81
  combined.to_csv(FILENAME)
82
  upload_to_hf(FILENAME)
83
 
84
+ return combined, f"Data Refreshed (Updated to {combined.index.max().date()})"
85
+
86
  except Exception as e:
87
+ return df, f"Not Refreshed (Error: {str(e)})"
 
88
 
89
  def upload_to_hf(path):
90
  token = get_safe_token()
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -1,49 +1,100 @@
1
- def sync_incremental_data(df):
2
- # Ensure index is datetime
3
- df.index = pd.to_datetime(df.index)
4
- last_date = df.index.max()
5
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
 
7
 
8
- # Calculate sync start (day after last record)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  sync_start = last_date + pd.Timedelta(days=1)
10
 
11
- # If sync_start is in the future, nothing to do
12
  if sync_start > pd.Timestamp.now().normalize():
13
- st.info("Data is already up to date.")
14
  return df
15
 
 
 
16
  try:
17
- # Download new data
18
  new_data_raw = yf.download(tickers, start=sync_start, progress=False)
19
 
20
  if new_data_raw.empty:
21
- st.warning("No new market data found to sync.")
22
  return df
23
 
24
- # Handle columns
25
  if 'Adj Close' in new_data_raw.columns:
26
  new_data = new_data_raw['Adj Close']
27
  else:
28
  new_data = new_data_raw['Close']
29
 
30
- # Clean NaNs before merging
31
- new_data = new_data.dropna(how='all')
32
-
33
- # Combine, sort, and deduplicate
34
  combined = pd.concat([df, new_data]).sort_index()
35
- combined = combined[~combined.index.duplicated(keep='last')]
36
 
37
- # Forward fill any holes in the middle, but don't fill the end
38
- combined = combined.ffill()
39
-
40
- # Save and Push
41
  combined.to_csv(FILENAME)
42
  upload_to_hf(FILENAME)
43
 
44
- st.success(f"Synced successfully up to {combined.index.max().date()}")
45
  return combined
46
-
47
  except Exception as e:
48
- st.error(f"Sync failed error: {e}")
49
  return df
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import pandas_datareader.data as web
3
+ import yfinance as yf
4
+ from huggingface_hub import hf_hub_download, HfApi
5
+ import os
6
+ import streamlit as st
7
+
8
+ # Explicitly define the lists first so they are available for export
9
+ X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
10
+ FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
11
+
12
+ REPO_ID = "P2SAMAPA/etf_trend_data"
13
+ FILENAME = "market_data.csv"
14
+
15
+ def get_safe_token():
16
+ try: return st.secrets["HF_TOKEN"]
17
+ except: return os.getenv("HF_TOKEN")
18
+
19
+ def load_from_hf():
20
+ """Initial load function called by app.py"""
21
+ token = get_safe_token()
22
+ if not token:
23
+ return None
24
+ try:
25
+ path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
26
+ df = pd.read_csv(path, index_col=0, parse_dates=True)
27
+ return df.ffill() # Handle internal NaNs immediately
28
+ except Exception as e:
29
+ st.error(f"HF Load Error: {e}")
30
+ return None
31
+
32
+ def seed_dataset_from_scratch():
33
+ """Initializes the CSV with full history"""
34
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
35
+ data = yf.download(tickers, start="2008-01-01", progress=False)
36
 
37
+ # Robustly handle Column Multi-Index
38
+ if 'Adj Close' in data.columns:
39
+ master_df = data['Adj Close']
40
+ else:
41
+ master_df = data['Close']
42
+
43
+ try:
44
+ sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
45
+ master_df['SOFR_ANNUAL'] = sofr / 100
46
+ except:
47
+ master_df['SOFR_ANNUAL'] = 0.045
48
+
49
+ master_df = master_df.sort_index().ffill()
50
+ master_df.to_csv(FILENAME)
51
+ upload_to_hf(FILENAME)
52
+ return master_df
53
+
54
+ def sync_incremental_data(df):
55
+ """The function triggered by the Sync Button"""
56
+ if df is None:
57
+ return seed_dataset_from_scratch()
58
+
59
+ last_date = pd.to_datetime(df.index.max())
60
  sync_start = last_date + pd.Timedelta(days=1)
61
 
62
+ # Check if data is already current to today
63
  if sync_start > pd.Timestamp.now().normalize():
64
+ st.toast("Data is already up to date!", icon="βœ…")
65
  return df
66
 
67
+ tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
68
+
69
  try:
 
70
  new_data_raw = yf.download(tickers, start=sync_start, progress=False)
71
 
72
  if new_data_raw.empty:
73
+ st.toast("No new market sessions found.", icon="ℹ️")
74
  return df
75
 
76
+ # Column selection
77
  if 'Adj Close' in new_data_raw.columns:
78
  new_data = new_data_raw['Adj Close']
79
  else:
80
  new_data = new_data_raw['Close']
81
 
82
+ # Merge and clean
 
 
 
83
  combined = pd.concat([df, new_data]).sort_index()
84
+ combined = combined[~combined.index.duplicated(keep='last')].ffill()
85
 
86
+ # Save locally and push to cloud
 
 
 
87
  combined.to_csv(FILENAME)
88
  upload_to_hf(FILENAME)
89
 
90
+ st.toast(f"Sync complete: {combined.index.max().date()}", icon="πŸš€")
91
  return combined
 
92
  except Exception as e:
93
+ st.error(f"Sync failed: {e}")
94
  return df
95
+
96
+ def upload_to_hf(path):
97
+ token = get_safe_token()
98
+ if token:
99
+ api = HfApi()
100
+ api.upload_file(path_or_fileobj=path, path_in_repo=FILENAME, repo_id=REPO_ID, repo_type="dataset", token=token)
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py CHANGED
@@ -3,71 +3,110 @@ import numpy as np
3
  import pandas_market_calendars as mcal
4
 
5
  def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr, sub_option):
6
- # 1. Trend & Conviction Logic
 
 
 
 
 
 
 
 
 
 
 
7
  sma_200 = price_df.rolling(200).mean()
8
  sma_50 = price_df.rolling(50).mean()
9
 
10
- # Conviction = Percentage distance above the 200 SMA
11
  conviction_score = (price_df / sma_200) - 1
12
- signals = (sma_50 > sma_200).astype(int)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- # 2. Risk Metrics
15
  returns = price_df.pct_change()
 
16
  asset_vol = returns.rolling(60).std() * np.sqrt(252)
17
 
18
- # 3. Apply Sub-Option Concentration
19
- if sub_option == "3 Highest Conviction":
20
- ranks = conviction_score.rank(axis=1, ascending=False)
21
- signals = ((ranks <= 3) & (signals == 1)).astype(int)
22
- elif sub_option == "1 Highest Conviction":
23
- ranks = conviction_score.rank(axis=1, ascending=False)
24
- signals = ((ranks <= 1) & (signals == 1)).astype(int)
25
 
26
- # 4. Volatility Target Weighting
27
- active_counts = signals.sum(axis=1)
28
- # Target Vol / Asset Vol, distributed across active signals
29
  raw_weights = (target_vol / asset_vol).divide(active_counts, axis=0).replace([np.inf, -np.inf], 0).fillna(0)
30
- final_weights = raw_weights * signals
31
 
32
- # 5. Leverage Cap (1.5x)
 
 
 
33
  total_exposure = final_weights.sum(axis=1)
34
- scale_factor = total_exposure.apply(lambda x: 1.5/x if x > 1.5 else 1.0)
 
 
35
  final_weights = final_weights.multiply(scale_factor, axis=0)
36
 
37
- # 6. Cash (SOFR) Allocation
38
- cash_weight = 1.0 - final_weights.sum(axis=1)
 
 
39
 
40
- # 7. Portfolio Returns
 
 
41
  portfolio_ret = (final_weights.shift(1) * returns).sum(axis=1)
42
  portfolio_ret += cash_weight.shift(1) * (sofr_series.shift(1) / 252)
43
 
44
- # 8. Out-of-Sample Slicing
45
  oos_mask = portfolio_ret.index.year >= start_yr
46
- equity_curve = (1 + portfolio_ret[oos_mask]).cumprod()
47
- bench_curve = (1 + bench_series.pct_change().fillna(0)[oos_mask]).cumprod()
 
 
 
 
 
 
48
 
49
  # Stats
50
- ann_ret = portfolio_ret[oos_mask].mean() * 252
51
- ann_vol = portfolio_ret[oos_mask].std() * np.sqrt(252)
52
- dd = (equity_curve / equity_curve.cummax()) - 1
 
 
 
53
 
54
- # --- NEXT DAY TRADING LOGIC ---
55
  nyse = mcal.get_calendar('NYSE')
56
- # Use real-world today to anchor the search for the NEXT session
57
  today_dt = pd.Timestamp.now().normalize()
58
  search_start = today_dt + pd.Timedelta(days=1)
59
  sched = nyse.schedule(start_date=search_start, end_date=search_start + pd.Timedelta(days=10))
60
- next_day = sched.index[0]
61
 
62
  return {
63
  'equity_curve': equity_curve,
64
  'bench_curve': bench_curve,
65
  'ann_ret': ann_ret,
66
- 'sharpe': (ann_ret - sofr_series.iloc[-1]) / ann_vol if ann_vol > 0 else 0,
67
- 'max_dd': dd.min(),
68
- 'avg_daily_dd': dd.mean(),
69
  'next_day': next_day.date(),
70
  'current_weights': final_weights.iloc[-1],
71
  'cash_weight': cash_weight.iloc[-1],
72
- 'current_sofr': sofr_series.iloc[-1]
73
  }
 
3
  import pandas_market_calendars as mcal
4
 
5
  def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr, sub_option):
6
+ """
7
+ Quantitative Engine based on Zarattini & Antonacci (2025).
8
+ Implements Volatility Targeting and Conviction-based ETF Allocation.
9
+ """
10
+
11
+ # --- 1. DATA CLEANING & PREPARATION ---
12
+ # Forward fill holes and drop assets with no data in the current window
13
+ price_df = price_df.ffill()
14
+ # Ensure benchmarks and SOFR are aligned
15
+ sofr_series = sofr_series.ffill()
16
+
17
+ # --- 2. TREND & CONVICTION SIGNALS ---
18
  sma_200 = price_df.rolling(200).mean()
19
  sma_50 = price_df.rolling(50).mean()
20
 
21
+ # Conviction = % distance above the 200 SMA (momentum strength)
22
  conviction_score = (price_df / sma_200) - 1
23
+ # Basic Signal: 50 SMA > 200 SMA
24
+ base_signals = (sma_50 > sma_200).astype(int)
25
+
26
+ # --- 3. CONVICTION FILTERING (Sub-Options) ---
27
+ if sub_option == "3 Highest Conviction":
28
+ # Rank assets daily; 1 is highest conviction.
29
+ # Only assets in a base trend (base_signals == 1) are eligible for ranking.
30
+ ranked_conviction = conviction_score.where(base_signals == 1)
31
+ ranks = ranked_conviction.rank(axis=1, ascending=False)
32
+ final_signals = ((ranks <= 3)).astype(int)
33
+ elif sub_option == "1 Highest Conviction":
34
+ ranked_conviction = conviction_score.where(base_signals == 1)
35
+ ranks = ranked_conviction.rank(axis=1, ascending=False)
36
+ final_signals = ((ranks <= 1)).astype(int)
37
+ else:
38
+ # "All Trending ETFs"
39
+ final_signals = base_signals
40
 
41
+ # --- 4. VOLATILITY TARGETING (RISK BUDGETING) ---
42
  returns = price_df.pct_change()
43
+ # 60-day Annualized Realized Volatility
44
  asset_vol = returns.rolling(60).std() * np.sqrt(252)
45
 
46
+ # Safety: If vol is NaN or 0, set to a very high number to prevent infinite weights
47
+ asset_vol = asset_vol.replace(0, np.nan).fillna(9.99)
 
 
 
 
 
48
 
49
+ # Methodology: Target Vol / Asset Vol, distributed across active signals
50
+ active_counts = final_signals.sum(axis=1)
51
+ # Avoid division by zero if no assets are in trend
52
  raw_weights = (target_vol / asset_vol).divide(active_counts, axis=0).replace([np.inf, -np.inf], 0).fillna(0)
 
53
 
54
+ # Multiply by signals to zero out non-trending assets
55
+ final_weights = raw_weights * final_signals
56
+
57
+ # --- 5. EXPOSURE & LEVERAGE MANAGEMENT ---
58
  total_exposure = final_weights.sum(axis=1)
59
+ # Cap total gross leverage at 1.5x (150%)
60
+ leverage_cap = 1.5
61
+ scale_factor = total_exposure.apply(lambda x: leverage_cap/x if x > leverage_cap else 1.0)
62
  final_weights = final_weights.multiply(scale_factor, axis=0)
63
 
64
+ # --- 6. CASH (SOFR) ALLOCATION ---
65
+ # Remainder of the 100% capital not used in the risk budget goes to SOFR
66
+ final_exposure = final_weights.sum(axis=1)
67
+ cash_weight = 1.0 - final_exposure
68
 
69
+ # --- 7. PERFORMANCE CALCULATION ---
70
+ # Strategy Return = (Weights * Asset Returns) + (Cash Weight * SOFR)
71
+ # We shift weights by 1 to prevent look-ahead bias (trading at today's close for tomorrow)
72
  portfolio_ret = (final_weights.shift(1) * returns).sum(axis=1)
73
  portfolio_ret += cash_weight.shift(1) * (sofr_series.shift(1) / 252)
74
 
75
+ # --- 8. OUT-OF-SAMPLE (OOS) METRICS ---
76
  oos_mask = portfolio_ret.index.year >= start_yr
77
+ oos_returns = portfolio_ret[oos_mask]
78
+
79
+ equity_curve = (1 + oos_returns).cumprod()
80
+ bench_returns = bench_series.pct_change().fillna(0)[oos_mask]
81
+ bench_curve = (1 + bench_returns).cumprod()
82
+
83
+ # Drawdowns
84
+ dd_series = (equity_curve / equity_curve.cummax()) - 1
85
 
86
  # Stats
87
+ ann_ret = oos_returns.mean() * 252
88
+ ann_vol = oos_returns.std() * np.sqrt(252)
89
+ current_sofr = sofr_series.ffill().iloc[-1]
90
+
91
+ # Sharpe Ratio: (Return - RiskFree) / Vol
92
+ sharpe = (ann_ret - current_sofr) / ann_vol if ann_vol > 0 else 0
93
 
94
+ # --- 9. NEXT TRADING DAY CALENDAR ---
95
  nyse = mcal.get_calendar('NYSE')
96
+ # Anchor to system clock to ensure we always look FORWARD
97
  today_dt = pd.Timestamp.now().normalize()
98
  search_start = today_dt + pd.Timedelta(days=1)
99
  sched = nyse.schedule(start_date=search_start, end_date=search_start + pd.Timedelta(days=10))
100
+ next_day = sched.index[0]
101
 
102
  return {
103
  'equity_curve': equity_curve,
104
  'bench_curve': bench_curve,
105
  'ann_ret': ann_ret,
106
+ 'sharpe': sharpe,
107
+ 'max_dd': dd_series.min(),
 
108
  'next_day': next_day.date(),
109
  'current_weights': final_weights.iloc[-1],
110
  'cash_weight': cash_weight.iloc[-1],
111
+ 'current_sofr': current_sofr
112
  }
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -1,90 +1,49 @@
1
- import pandas as pd
2
- import pandas_datareader.data as web
3
- import yfinance as yf
4
- from huggingface_hub import hf_hub_download, HfApi
5
- import os
6
- import streamlit as st
7
-
8
- # 1. Define the Ticker Lists
9
- X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
10
- FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
11
-
12
- REPO_ID = "P2SAMAPA/etf_trend_data"
13
- FILENAME = "market_data.csv"
14
-
15
- def get_safe_token():
16
- try: return st.secrets["HF_TOKEN"]
17
- except: return os.getenv("HF_TOKEN")
18
-
19
- # 2. Define load_from_hf
20
- def load_from_hf():
21
- token = get_safe_token()
22
- if not token:
23
- return None
24
- try:
25
- path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
26
- return pd.read_csv(path, index_col=0, parse_dates=True)
27
- except Exception as e:
28
- st.warning(f"Could not load from HuggingFace: {e}")
29
- return None
30
-
31
- # 3. Define seed_dataset_from_scratch
32
- def seed_dataset_from_scratch():
33
- tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
34
- data = yf.download(tickers, start="2008-01-01", progress=False)
35
-
36
- # Handle the 'Adj Close' multi-index issue
37
- if 'Adj Close' in data.columns:
38
- master_df = data['Adj Close']
39
- else:
40
- master_df = data['Close']
41
-
42
- # Add SOFR from FRED
43
- try:
44
- sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
45
- master_df['SOFR_ANNUAL'] = sofr / 100
46
- except:
47
- master_df['SOFR_ANNUAL'] = 0.045 # Default fallback
48
-
49
- master_df = master_df.sort_index().ffill()
50
- master_df.to_csv(FILENAME)
51
- upload_to_hf(FILENAME)
52
- return master_df
53
-
54
- # 4. Define sync_incremental_data
55
  def sync_incremental_data(df):
56
- last_date = pd.to_datetime(df.index.max())
 
 
 
57
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
 
 
58
  sync_start = last_date + pd.Timedelta(days=1)
59
 
60
- if sync_start > pd.Timestamp.now():
 
 
61
  return df
62
 
63
  try:
 
64
  new_data_raw = yf.download(tickers, start=sync_start, progress=False)
 
65
  if new_data_raw.empty:
 
66
  return df
67
 
 
68
  if 'Adj Close' in new_data_raw.columns:
69
  new_data = new_data_raw['Adj Close']
70
  else:
71
  new_data = new_data_raw['Close']
72
 
 
 
 
 
73
  combined = pd.concat([df, new_data]).sort_index()
74
  combined = combined[~combined.index.duplicated(keep='last')]
75
 
 
 
 
 
76
  combined.to_csv(FILENAME)
77
  upload_to_hf(FILENAME)
 
 
78
  return combined
 
79
  except Exception as e:
80
- st.error(f"Sync failed: {e}")
81
  return df
82
-
83
- def upload_to_hf(path):
84
- token = get_safe_token()
85
- if token:
86
- api = HfApi()
87
- try:
88
- api.upload_file(path_or_fileobj=path, path_in_repo=FILENAME, repo_id=REPO_ID, repo_type="dataset", token=token)
89
- except Exception as e:
90
- st.error(f"HF Upload failed: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  def sync_incremental_data(df):
2
+ # Ensure index is datetime
3
+ df.index = pd.to_datetime(df.index)
4
+ last_date = df.index.max()
5
+
6
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
7
+
8
+ # Calculate sync start (day after last record)
9
  sync_start = last_date + pd.Timedelta(days=1)
10
 
11
+ # If sync_start is in the future, nothing to do
12
+ if sync_start > pd.Timestamp.now().normalize():
13
+ st.info("Data is already up to date.")
14
  return df
15
 
16
  try:
17
+ # Download new data
18
  new_data_raw = yf.download(tickers, start=sync_start, progress=False)
19
+
20
  if new_data_raw.empty:
21
+ st.warning("No new market data found to sync.")
22
  return df
23
 
24
+ # Handle columns
25
  if 'Adj Close' in new_data_raw.columns:
26
  new_data = new_data_raw['Adj Close']
27
  else:
28
  new_data = new_data_raw['Close']
29
 
30
+ # Clean NaNs before merging
31
+ new_data = new_data.dropna(how='all')
32
+
33
+ # Combine, sort, and deduplicate
34
  combined = pd.concat([df, new_data]).sort_index()
35
  combined = combined[~combined.index.duplicated(keep='last')]
36
 
37
+ # Forward fill any holes in the middle, but don't fill the end
38
+ combined = combined.ffill()
39
+
40
+ # Save and Push
41
  combined.to_csv(FILENAME)
42
  upload_to_hf(FILENAME)
43
+
44
+ st.success(f"Synced successfully up to {combined.index.max().date()}")
45
  return combined
46
+
47
  except Exception as e:
48
+ st.error(f"Sync failed error: {e}")
49
  return df
 
 
 
 
 
 
 
 
 
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -1,40 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  def sync_incremental_data(df):
2
  last_date = pd.to_datetime(df.index.max())
3
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
4
  sync_start = last_date + pd.Timedelta(days=1)
5
 
6
- # Check if we even need to sync (avoiding weekend/pre-market errors)
7
  if sync_start > pd.Timestamp.now():
8
  return df
9
 
10
  try:
11
- # Download from yfinance with specific configuration to avoid Multi-Index issues
12
- new_data_raw = yf.download(tickers, start=sync_start, progress=False, group_by='column')
13
-
14
  if new_data_raw.empty:
15
  return df
16
 
17
- # Logic to handle different yfinance return structures
18
  if 'Adj Close' in new_data_raw.columns:
19
  new_data = new_data_raw['Adj Close']
20
- elif 'Close' in new_data_raw.columns:
21
- new_data = new_data_raw['Close']
22
  else:
23
- # If it's a single ticker or flattened
24
- new_data = new_data_raw
25
 
26
- # Standardize: Ensure we only have the tickers we want and no empty columns
27
- new_data = new_data[new_data.columns.intersection(tickers)]
28
-
29
- # Combine with master dataframe
30
  combined = pd.concat([df, new_data]).sort_index()
31
- # Keep the most recent data point if duplicates occur
32
  combined = combined[~combined.index.duplicated(keep='last')]
33
 
34
  combined.to_csv(FILENAME)
35
  upload_to_hf(FILENAME)
36
  return combined
37
-
38
  except Exception as e:
39
  st.error(f"Sync failed: {e}")
40
  return df
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import pandas_datareader.data as web
3
+ import yfinance as yf
4
+ from huggingface_hub import hf_hub_download, HfApi
5
+ import os
6
+ import streamlit as st
7
+
8
+ # 1. Define the Ticker Lists
9
+ X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
10
+ FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
11
+
12
+ REPO_ID = "P2SAMAPA/etf_trend_data"
13
+ FILENAME = "market_data.csv"
14
+
15
+ def get_safe_token():
16
+ try: return st.secrets["HF_TOKEN"]
17
+ except: return os.getenv("HF_TOKEN")
18
+
19
+ # 2. Define load_from_hf
20
+ def load_from_hf():
21
+ token = get_safe_token()
22
+ if not token:
23
+ return None
24
+ try:
25
+ path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
26
+ return pd.read_csv(path, index_col=0, parse_dates=True)
27
+ except Exception as e:
28
+ st.warning(f"Could not load from HuggingFace: {e}")
29
+ return None
30
+
31
+ # 3. Define seed_dataset_from_scratch
32
+ def seed_dataset_from_scratch():
33
+ tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
34
+ data = yf.download(tickers, start="2008-01-01", progress=False)
35
+
36
+ # Handle the 'Adj Close' multi-index issue
37
+ if 'Adj Close' in data.columns:
38
+ master_df = data['Adj Close']
39
+ else:
40
+ master_df = data['Close']
41
+
42
+ # Add SOFR from FRED
43
+ try:
44
+ sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
45
+ master_df['SOFR_ANNUAL'] = sofr / 100
46
+ except:
47
+ master_df['SOFR_ANNUAL'] = 0.045 # Default fallback
48
+
49
+ master_df = master_df.sort_index().ffill()
50
+ master_df.to_csv(FILENAME)
51
+ upload_to_hf(FILENAME)
52
+ return master_df
53
+
54
+ # 4. Define sync_incremental_data
55
  def sync_incremental_data(df):
56
  last_date = pd.to_datetime(df.index.max())
57
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
58
  sync_start = last_date + pd.Timedelta(days=1)
59
 
 
60
  if sync_start > pd.Timestamp.now():
61
  return df
62
 
63
  try:
64
+ new_data_raw = yf.download(tickers, start=sync_start, progress=False)
 
 
65
  if new_data_raw.empty:
66
  return df
67
 
 
68
  if 'Adj Close' in new_data_raw.columns:
69
  new_data = new_data_raw['Adj Close']
 
 
70
  else:
71
+ new_data = new_data_raw['Close']
 
72
 
 
 
 
 
73
  combined = pd.concat([df, new_data]).sort_index()
 
74
  combined = combined[~combined.index.duplicated(keep='last')]
75
 
76
  combined.to_csv(FILENAME)
77
  upload_to_hf(FILENAME)
78
  return combined
 
79
  except Exception as e:
80
  st.error(f"Sync failed: {e}")
81
  return df
82
+
83
+ def upload_to_hf(path):
84
+ token = get_safe_token()
85
+ if token:
86
+ api = HfApi()
87
+ try:
88
+ api.upload_file(path_or_fileobj=path, path_in_repo=FILENAME, repo_id=REPO_ID, repo_type="dataset", token=token)
89
+ except Exception as e:
90
+ st.error(f"HF Upload failed: {e}")
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -1,81 +1,40 @@
1
- import pandas as pd
2
- import pandas_datareader.data as web
3
- import yfinance as yf
4
- import time
5
- from huggingface_hub import hf_hub_download, HfApi
6
- import os
7
- import streamlit as st
8
-
9
- REPO_ID = "P2SAMAPA/etf_trend_data"
10
- FILENAME = "market_data.csv"
11
-
12
- X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
13
- FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
14
-
15
- def get_safe_token():
16
- """Bypasses Streamlit's hard-fail on missing secrets.toml by using environment fallback."""
17
- try:
18
- # Try Streamlit secrets first
19
- return st.secrets["HF_TOKEN"]
20
- except Exception:
21
- # Standard environment variable fallback (How HF actually stores them)
22
- return os.getenv("HF_TOKEN")
23
-
24
- def load_from_hf():
25
- token = get_safe_token()
26
- if not token: return None
27
- try:
28
- path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
29
- return pd.read_csv(path, index_col=0, parse_dates=True)
30
- except:
31
- return None
32
-
33
- def seed_dataset_from_scratch():
34
- # Include benchmarks for comparison logic
35
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
36
- master_df = pd.DataFrame()
37
- status = st.empty()
38
- progress = st.progress(0)
39
 
40
- for i, t in enumerate(tickers):
41
- status.text(f"πŸ›°οΈ Fetching {t} from Stooq...")
42
- try:
43
- data = web.DataReader(f"{t}.US", 'stooq', start='2008-01-01')
44
- if not data.empty:
45
- master_df[t] = data['Close'].sort_index()
46
- time.sleep(0.7) # Polite delay
47
- except:
48
- try:
49
- master_df[t] = yf.download(t, start="2008-01-01", progress=False)['Adj Close']
50
- except: pass
51
- progress.progress((i + 1) / len(tickers))
52
 
53
- # Add SOFR Rate (Cash Interest)
54
  try:
55
- sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
56
- master_df['SOFR_ANNUAL'] = sofr / 100
57
- except:
58
- master_df['SOFR_ANNUAL'] = 0.045
59
-
60
- master_df = master_df.sort_index().ffill()
61
- master_df.to_csv(FILENAME)
62
- upload_to_hf(FILENAME)
63
- return master_df
64
-
65
- def sync_incremental_data(df):
66
- last_date = df.index.max()
67
- tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
68
- new_data = yf.download(tickers, start=last_date, progress=False)['Adj Close']
69
- combined = pd.concat([df, new_data]).sort_index()
70
- combined = combined[~combined.index.duplicated(keep='last')]
71
- combined.to_csv(FILENAME)
72
- upload_to_hf(FILENAME)
73
- return combined
74
-
75
- def upload_to_hf(path):
76
- token = get_safe_token()
77
- if not token:
78
- st.error("❌ Cannot upload: HF_TOKEN is missing from Space Secrets.")
79
- return
80
- api = HfApi()
81
- api.upload_file(path_or_fileobj=path, path_in_repo=FILENAME, repo_id=REPO_ID, repo_type="dataset", token=token)
 
 
 
 
1
+ def sync_incremental_data(df):
2
+ last_date = pd.to_datetime(df.index.max())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
4
+ sync_start = last_date + pd.Timedelta(days=1)
 
 
5
 
6
+ # Check if we even need to sync (avoiding weekend/pre-market errors)
7
+ if sync_start > pd.Timestamp.now():
8
+ return df
 
 
 
 
 
 
 
 
 
9
 
 
10
  try:
11
+ # Download from yfinance with specific configuration to avoid Multi-Index issues
12
+ new_data_raw = yf.download(tickers, start=sync_start, progress=False, group_by='column')
13
+
14
+ if new_data_raw.empty:
15
+ return df
16
+
17
+ # Logic to handle different yfinance return structures
18
+ if 'Adj Close' in new_data_raw.columns:
19
+ new_data = new_data_raw['Adj Close']
20
+ elif 'Close' in new_data_raw.columns:
21
+ new_data = new_data_raw['Close']
22
+ else:
23
+ # If it's a single ticker or flattened
24
+ new_data = new_data_raw
25
+
26
+ # Standardize: Ensure we only have the tickers we want and no empty columns
27
+ new_data = new_data[new_data.columns.intersection(tickers)]
28
+
29
+ # Combine with master dataframe
30
+ combined = pd.concat([df, new_data]).sort_index()
31
+ # Keep the most recent data point if duplicates occur
32
+ combined = combined[~combined.index.duplicated(keep='last')]
33
+
34
+ combined.to_csv(FILENAME)
35
+ upload_to_hf(FILENAME)
36
+ return combined
37
+
38
+ except Exception as e:
39
+ st.error(f"Sync failed: {e}")
40
+ return df
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -6,7 +6,6 @@ from engine.trend_engine import run_trend_module
6
 
7
  st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
8
 
9
- # Initialize Session State safely
10
  if 'master_data' not in st.session_state:
11
  st.session_state.master_data = load_from_hf()
12
 
@@ -43,14 +42,14 @@ if st.session_state.master_data is not None:
43
 
44
  st.title(f"πŸ“Š {option}: {sub_option}")
45
 
46
- # Row 1: Metrics
47
  m1, m2, m3, m4 = st.columns(4)
48
  m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
49
  m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
50
  m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
51
  m4.metric("Current SOFR", f"{results['current_sofr']:.2%}")
52
 
53
- # Row 2: Performance Chart
54
  fig = go.Figure()
55
  fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
56
  fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
@@ -74,10 +73,10 @@ if st.session_state.master_data is not None:
74
  This strategy implements the **2025 Charles H. Dow Award** winning framework by **Andrea Zarattini** and **Michael Antonacci**.
75
 
76
  1. **Regime Identification**: A dual 50/200-day SMA filter determines asset eligibility.
77
- 2. **Conviction Ranking**: Assets are ranked by their distance from the 200-day SMA.
78
- 3. **Concentrated Sizing**: Under the **{sub_option}** setting, the system focuses the risk budget only on the top leaders.
79
  4. **Volatility Targeting**: Allocations are sized inversely to 60-day volatility to maintain a stable **{vol_target:.0%}** risk profile.
80
- 5. **Cash Buffer**: Remaining budget earns the live SOFR rate.
81
  """)
82
  else:
83
  st.info("πŸ’‘ Adjust settings and click 'Run Analysis'.")
 
6
 
7
  st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
8
 
 
9
  if 'master_data' not in st.session_state:
10
  st.session_state.master_data = load_from_hf()
11
 
 
42
 
43
  st.title(f"πŸ“Š {option}: {sub_option}")
44
 
45
+ # Row 1: Metrics (Annual Return First)
46
  m1, m2, m3, m4 = st.columns(4)
47
  m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
48
  m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
49
  m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
50
  m4.metric("Current SOFR", f"{results['current_sofr']:.2%}")
51
 
52
+ # Row 2: Performance Chart (Interactive Years)
53
  fig = go.Figure()
54
  fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
55
  fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
 
73
  This strategy implements the **2025 Charles H. Dow Award** winning framework by **Andrea Zarattini** and **Michael Antonacci**.
74
 
75
  1. **Regime Identification**: A dual 50/200-day SMA filter determines asset eligibility.
76
+ 2. **Conviction Ranking**: Assets are ranked by their distance from the 200-day SMA (Trend Strength).
77
+ 3. **Concentrated Sizing**: In **{sub_option}** mode, the risk budget is focused only on top leaders.
78
  4. **Volatility Targeting**: Allocations are sized inversely to 60-day volatility to maintain a stable **{vol_target:.0%}** risk profile.
79
+ 5. **Cash Buffer**: Remaining budget earns the live SOFR rate (Federal Reserve Bank of New York).
80
  """)
81
  else:
82
  st.info("πŸ’‘ Adjust settings and click 'Run Analysis'.")
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py CHANGED
@@ -25,6 +25,7 @@ def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr,
25
 
26
  # 4. Volatility Target Weighting
27
  active_counts = signals.sum(axis=1)
 
28
  raw_weights = (target_vol / asset_vol).divide(active_counts, axis=0).replace([np.inf, -np.inf], 0).fillna(0)
29
  final_weights = raw_weights * signals
30
 
@@ -50,17 +51,13 @@ def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr,
50
  ann_vol = portfolio_ret[oos_mask].std() * np.sqrt(252)
51
  dd = (equity_curve / equity_curve.cummax()) - 1
52
 
53
- # --- FIXED NEXT DAY LOGIC ---
54
  nyse = mcal.get_calendar('NYSE')
55
- last_dt = price_df.index[-1]
56
-
57
- # Generate schedule starting from the day AFTER last_dt to ensure we find the future open
58
- search_start = last_dt + pd.Timedelta(days=1)
59
  sched = nyse.schedule(start_date=search_start, end_date=search_start + pd.Timedelta(days=10))
60
-
61
- # Take the first valid trading day from the future schedule
62
  next_day = sched.index[0]
63
- # ----------------------------
64
 
65
  return {
66
  'equity_curve': equity_curve,
@@ -68,6 +65,7 @@ def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr,
68
  'ann_ret': ann_ret,
69
  'sharpe': (ann_ret - sofr_series.iloc[-1]) / ann_vol if ann_vol > 0 else 0,
70
  'max_dd': dd.min(),
 
71
  'next_day': next_day.date(),
72
  'current_weights': final_weights.iloc[-1],
73
  'cash_weight': cash_weight.iloc[-1],
 
25
 
26
  # 4. Volatility Target Weighting
27
  active_counts = signals.sum(axis=1)
28
+ # Target Vol / Asset Vol, distributed across active signals
29
  raw_weights = (target_vol / asset_vol).divide(active_counts, axis=0).replace([np.inf, -np.inf], 0).fillna(0)
30
  final_weights = raw_weights * signals
31
 
 
51
  ann_vol = portfolio_ret[oos_mask].std() * np.sqrt(252)
52
  dd = (equity_curve / equity_curve.cummax()) - 1
53
 
54
+ # --- NEXT DAY TRADING LOGIC ---
55
  nyse = mcal.get_calendar('NYSE')
56
+ # Use real-world today to anchor the search for the NEXT session
57
+ today_dt = pd.Timestamp.now().normalize()
58
+ search_start = today_dt + pd.Timedelta(days=1)
 
59
  sched = nyse.schedule(start_date=search_start, end_date=search_start + pd.Timedelta(days=10))
 
 
60
  next_day = sched.index[0]
 
61
 
62
  return {
63
  'equity_curve': equity_curve,
 
65
  'ann_ret': ann_ret,
66
  'sharpe': (ann_ret - sofr_series.iloc[-1]) / ann_vol if ann_vol > 0 else 0,
67
  'max_dd': dd.min(),
68
+ 'avg_daily_dd': dd.mean(),
69
  'next_day': next_day.date(),
70
  'current_weights': final_weights.iloc[-1],
71
  'cash_weight': cash_weight.iloc[-1],
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py CHANGED
@@ -17,16 +17,13 @@ def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr,
17
 
18
  # 3. Apply Sub-Option Concentration
19
  if sub_option == "3 Highest Conviction":
20
- # Rank daily: 1 is highest conviction
21
  ranks = conviction_score.rank(axis=1, ascending=False)
22
  signals = ((ranks <= 3) & (signals == 1)).astype(int)
23
  elif sub_option == "1 Highest Conviction":
24
  ranks = conviction_score.rank(axis=1, ascending=False)
25
  signals = ((ranks <= 1) & (signals == 1)).astype(int)
26
- # Else: "All Trending ETFs" uses the base signals
27
 
28
  # 4. Volatility Target Weighting
29
- # Methodology: Target Vol / Asset Vol, distributed across active signals
30
  active_counts = signals.sum(axis=1)
31
  raw_weights = (target_vol / asset_vol).divide(active_counts, axis=0).replace([np.inf, -np.inf], 0).fillna(0)
32
  final_weights = raw_weights * signals
@@ -53,10 +50,17 @@ def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr,
53
  ann_vol = portfolio_ret[oos_mask].std() * np.sqrt(252)
54
  dd = (equity_curve / equity_curve.cummax()) - 1
55
 
56
- # NYSE Calendar
57
  nyse = mcal.get_calendar('NYSE')
58
  last_dt = price_df.index[-1]
59
- next_day = nyse.schedule(start_date=last_dt, end_date=last_dt + pd.Timedelta(days=10)).index[1]
 
 
 
 
 
 
 
60
 
61
  return {
62
  'equity_curve': equity_curve,
 
17
 
18
  # 3. Apply Sub-Option Concentration
19
  if sub_option == "3 Highest Conviction":
 
20
  ranks = conviction_score.rank(axis=1, ascending=False)
21
  signals = ((ranks <= 3) & (signals == 1)).astype(int)
22
  elif sub_option == "1 Highest Conviction":
23
  ranks = conviction_score.rank(axis=1, ascending=False)
24
  signals = ((ranks <= 1) & (signals == 1)).astype(int)
 
25
 
26
  # 4. Volatility Target Weighting
 
27
  active_counts = signals.sum(axis=1)
28
  raw_weights = (target_vol / asset_vol).divide(active_counts, axis=0).replace([np.inf, -np.inf], 0).fillna(0)
29
  final_weights = raw_weights * signals
 
50
  ann_vol = portfolio_ret[oos_mask].std() * np.sqrt(252)
51
  dd = (equity_curve / equity_curve.cummax()) - 1
52
 
53
+ # --- FIXED NEXT DAY LOGIC ---
54
  nyse = mcal.get_calendar('NYSE')
55
  last_dt = price_df.index[-1]
56
+
57
+ # Generate schedule starting from the day AFTER last_dt to ensure we find the future open
58
+ search_start = last_dt + pd.Timedelta(days=1)
59
+ sched = nyse.schedule(start_date=search_start, end_date=search_start + pd.Timedelta(days=10))
60
+
61
+ # Take the first valid trading day from the future schedule
62
+ next_day = sched.index[0]
63
+ # ----------------------------
64
 
65
  return {
66
  'equity_curve': equity_curve,
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -6,19 +6,28 @@ from engine.trend_engine import run_trend_module
6
 
7
  st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
8
 
 
9
  if 'master_data' not in st.session_state:
10
  st.session_state.master_data = load_from_hf()
11
 
12
  with st.sidebar:
13
  st.header("πŸ—‚οΈ Configuration")
 
 
 
 
 
 
 
 
 
 
 
14
  option = st.selectbox("Universe Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
15
-
16
- # NEW SUB-OPTIONS
17
- sub_option = st.selectbox("Conviction Level",
18
  ("All Trending ETFs", "3 Highest Conviction", "1 Highest Conviction"))
19
-
20
- start_yr = st.slider("OOS Start", 2008, 2026, 2018)
21
- vol_target = st.slider("Volatility Target (%)", 5, 20, 12) / 100
22
  run_btn = st.button("πŸš€ Run Analysis", use_container_width=True, type="primary")
23
 
24
  if st.session_state.master_data is not None:
@@ -32,48 +41,43 @@ if st.session_state.master_data is not None:
32
  st.session_state.master_data['SOFR_ANNUAL'],
33
  vol_target, start_yr, sub_option)
34
 
35
- st.title(f"πŸ“Š {option} - {sub_option}")
36
 
37
- # Metrics
38
  m1, m2, m3, m4 = st.columns(4)
39
  m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
40
  m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
41
  m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
42
  m4.metric("Current SOFR", f"{results['current_sofr']:.2%}")
43
 
44
- # Chart
45
  fig = go.Figure()
46
  fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
47
  fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
48
- fig.update_layout(title="OOS Performance", template="plotly_dark")
49
  st.plotly_chart(fig, use_container_width=True)
50
 
51
- # Methodology & Target
52
  st.divider()
53
  col_left, col_right = st.columns([1, 1.5])
54
 
55
  with col_left:
56
- st.subheader(f"🎯 Target Allocation: {results['next_day']}")
57
  w = results['current_weights'][results['current_weights'] > 0.0001].to_dict()
58
  w['CASH (SOFR)'] = results['cash_weight']
59
- st.table(pd.DataFrame.from_dict(w, orient='index', columns=['Weight']).style.format("{:.2%}"))
 
60
 
61
  with col_right:
62
- st.subheader("πŸ“š Methodology: Zarattini & Antonacci (2025)")
63
  st.markdown(f"""
64
- This strategy implements the **2025 Charles H. Dow Award** framework authored by **Andrea Zarattini** and **Michael Antonacci**.
65
 
66
- * **Trend Detection**: Uses a 50/200 SMA dual-filter.
67
- * **Conviction Scoring**: Assets are ranked based on their relative distance from the 200-day trend line.
68
- * **Concentration**: Under **{sub_option}**, the engine filters the universe to only the top-tier trending assets.
69
- * **Risk Sizing**: Allocation is inversely proportional to 60-day volatility. If the selected ETFs cannot safely fill the **{vol_target:.0%}** risk budget, the remainder is held in **CASH (SOFR)**.
 
70
  """)
71
-
72
-
73
-
74
- ### Why this is powerful:
75
- * **The "3 Highest Conviction" sub-option** creates a "Best of the Best" portfolio. Instead of diluting your risk budget across 20 ETFs that are barely in trend, it puts the full 12% risk budget into the 3 strongest leaders.
76
- * **The "1 Highest Conviction" sub-option** is the ultimate momentum play, concentrating all allowed risk into the single strongest trend.
77
- * **Authorship**: Zarattini and Antonacci's names are now front-and-center in the methodology section.
78
-
79
- **Would you like me to add a "Drawdown Overlay" chart so you can compare the risk spikes between the Concentrated (1-ETF) and Broad (All ETFs) sub-options?**
 
6
 
7
  st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
8
 
9
+ # Initialize Session State safely
10
  if 'master_data' not in st.session_state:
11
  st.session_state.master_data = load_from_hf()
12
 
13
  with st.sidebar:
14
  st.header("πŸ—‚οΈ Configuration")
15
+ if st.session_state.master_data is None:
16
+ if st.button("πŸš€ Seed Database"):
17
+ st.session_state.master_data = seed_dataset_from_scratch()
18
+ st.rerun()
19
+ else:
20
+ st.success(f"Sync: {st.session_state.master_data.index.max().date()}")
21
+ if st.button("πŸ”„ Sync New Data"):
22
+ st.session_state.master_data = sync_incremental_data(st.session_state.master_data)
23
+ st.rerun()
24
+
25
+ st.divider()
26
  option = st.selectbox("Universe Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
27
+ sub_option = st.selectbox("Conviction Strategy",
 
 
28
  ("All Trending ETFs", "3 Highest Conviction", "1 Highest Conviction"))
29
+ start_yr = st.slider("OOS Start Year", 2008, 2026, 2018)
30
+ vol_target = st.slider("Risk Target (%)", 5, 20, 12) / 100
 
31
  run_btn = st.button("πŸš€ Run Analysis", use_container_width=True, type="primary")
32
 
33
  if st.session_state.master_data is not None:
 
41
  st.session_state.master_data['SOFR_ANNUAL'],
42
  vol_target, start_yr, sub_option)
43
 
44
+ st.title(f"πŸ“Š {option}: {sub_option}")
45
 
46
+ # Row 1: Metrics
47
  m1, m2, m3, m4 = st.columns(4)
48
  m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
49
  m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
50
  m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
51
  m4.metric("Current SOFR", f"{results['current_sofr']:.2%}")
52
 
53
+ # Row 2: Performance Chart
54
  fig = go.Figure()
55
  fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
56
  fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
57
+ fig.update_layout(title="Out-of-Sample Performance", template="plotly_dark", xaxis_title="Year")
58
  st.plotly_chart(fig, use_container_width=True)
59
 
60
+ # Row 3: Methodology & Allocations
61
  st.divider()
62
  col_left, col_right = st.columns([1, 1.5])
63
 
64
  with col_left:
65
+ st.subheader(f"🎯 Allocation for {results['next_day']}")
66
  w = results['current_weights'][results['current_weights'] > 0.0001].to_dict()
67
  w['CASH (SOFR)'] = results['cash_weight']
68
+ df_w = pd.DataFrame.from_dict(w, orient='index', columns=['Weight'])
69
+ st.table(df_w.style.format("{:.2%}"))
70
 
71
  with col_right:
72
+ st.subheader("πŸ“š Methodology: Zarattini & Antonacci")
73
  st.markdown(f"""
74
+ This strategy implements the **2025 Charles H. Dow Award** winning framework by **Andrea Zarattini** and **Michael Antonacci**.
75
 
76
+ 1. **Regime Identification**: A dual 50/200-day SMA filter determines asset eligibility.
77
+ 2. **Conviction Ranking**: Assets are ranked by their distance from the 200-day SMA.
78
+ 3. **Concentrated Sizing**: Under the **{sub_option}** setting, the system focuses the risk budget only on the top leaders.
79
+ 4. **Volatility Targeting**: Allocations are sized inversely to 60-day volatility to maintain a stable **{vol_target:.0%}** risk profile.
80
+ 5. **Cash Buffer**: Remaining budget earns the live SOFR rate.
81
  """)
82
+ else:
83
+ st.info("πŸ’‘ Adjust settings and click 'Run Analysis'.")
 
 
 
 
 
 
 
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py CHANGED
@@ -3,54 +3,55 @@ import numpy as np
3
  import pandas_market_calendars as mcal
4
 
5
  def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr, sub_option):
6
- # 1. Trend Signals & Conviction Scoring
7
  sma_200 = price_df.rolling(200).mean()
8
  sma_50 = price_df.rolling(50).mean()
9
 
10
- # Conviction Score = How far is the price above the 200 SMA?
11
  conviction_score = (price_df / sma_200) - 1
12
  signals = (sma_50 > sma_200).astype(int)
13
 
14
- # 2. Individual Asset Volatility
15
  returns = price_df.pct_change()
16
  asset_vol = returns.rolling(60).std() * np.sqrt(252)
17
 
18
- # 3. CONVICTION FILTERING (Sub-Options)
19
  if sub_option == "3 Highest Conviction":
20
- # Rank assets by score, keep only top 3 that are ALSO in trend
21
  ranks = conviction_score.rank(axis=1, ascending=False)
22
  signals = ((ranks <= 3) & (signals == 1)).astype(int)
23
  elif sub_option == "1 Highest Conviction":
24
- # Rank assets, keep only the top 1 that is ALSO in trend
25
  ranks = conviction_score.rank(axis=1, ascending=False)
26
  signals = ((ranks <= 1) & (signals == 1)).astype(int)
27
- # "All Trending" remains as is
28
 
29
- # 4. Volatility Scaling
 
30
  active_counts = signals.sum(axis=1)
31
- # Inverse vol weight per asset
32
  raw_weights = (target_vol / asset_vol).divide(active_counts, axis=0).replace([np.inf, -np.inf], 0).fillna(0)
33
  final_weights = raw_weights * signals
34
 
35
- # 5. Leverage Cap & Cash
36
  total_exposure = final_weights.sum(axis=1)
37
  scale_factor = total_exposure.apply(lambda x: 1.5/x if x > 1.5 else 1.0)
38
  final_weights = final_weights.multiply(scale_factor, axis=0)
39
 
 
40
  cash_weight = 1.0 - final_weights.sum(axis=1)
41
 
42
- # 6. Performance Calculation
43
  portfolio_ret = (final_weights.shift(1) * returns).sum(axis=1)
44
  portfolio_ret += cash_weight.shift(1) * (sofr_series.shift(1) / 252)
45
 
46
- # 7. OOS Filtering
47
  oos_mask = portfolio_ret.index.year >= start_yr
48
  equity_curve = (1 + portfolio_ret[oos_mask]).cumprod()
49
  bench_curve = (1 + bench_series.pct_change().fillna(0)[oos_mask]).cumprod()
50
 
51
- # 8. Stats
52
- dd = (equity_curve / equity_curve.cummax()) - 1
53
  ann_ret = portfolio_ret[oos_mask].mean() * 252
 
 
54
 
55
  # NYSE Calendar
56
  nyse = mcal.get_calendar('NYSE')
@@ -61,7 +62,7 @@ def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr,
61
  'equity_curve': equity_curve,
62
  'bench_curve': bench_curve,
63
  'ann_ret': ann_ret,
64
- 'sharpe': (ann_ret - sofr_series.iloc[-1]) / (portfolio_ret[oos_mask].std() * np.sqrt(252)),
65
  'max_dd': dd.min(),
66
  'next_day': next_day.date(),
67
  'current_weights': final_weights.iloc[-1],
 
3
  import pandas_market_calendars as mcal
4
 
5
  def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr, sub_option):
6
+ # 1. Trend & Conviction Logic
7
  sma_200 = price_df.rolling(200).mean()
8
  sma_50 = price_df.rolling(50).mean()
9
 
10
+ # Conviction = Percentage distance above the 200 SMA
11
  conviction_score = (price_df / sma_200) - 1
12
  signals = (sma_50 > sma_200).astype(int)
13
 
14
+ # 2. Risk Metrics
15
  returns = price_df.pct_change()
16
  asset_vol = returns.rolling(60).std() * np.sqrt(252)
17
 
18
+ # 3. Apply Sub-Option Concentration
19
  if sub_option == "3 Highest Conviction":
20
+ # Rank daily: 1 is highest conviction
21
  ranks = conviction_score.rank(axis=1, ascending=False)
22
  signals = ((ranks <= 3) & (signals == 1)).astype(int)
23
  elif sub_option == "1 Highest Conviction":
 
24
  ranks = conviction_score.rank(axis=1, ascending=False)
25
  signals = ((ranks <= 1) & (signals == 1)).astype(int)
26
+ # Else: "All Trending ETFs" uses the base signals
27
 
28
+ # 4. Volatility Target Weighting
29
+ # Methodology: Target Vol / Asset Vol, distributed across active signals
30
  active_counts = signals.sum(axis=1)
 
31
  raw_weights = (target_vol / asset_vol).divide(active_counts, axis=0).replace([np.inf, -np.inf], 0).fillna(0)
32
  final_weights = raw_weights * signals
33
 
34
+ # 5. Leverage Cap (1.5x)
35
  total_exposure = final_weights.sum(axis=1)
36
  scale_factor = total_exposure.apply(lambda x: 1.5/x if x > 1.5 else 1.0)
37
  final_weights = final_weights.multiply(scale_factor, axis=0)
38
 
39
+ # 6. Cash (SOFR) Allocation
40
  cash_weight = 1.0 - final_weights.sum(axis=1)
41
 
42
+ # 7. Portfolio Returns
43
  portfolio_ret = (final_weights.shift(1) * returns).sum(axis=1)
44
  portfolio_ret += cash_weight.shift(1) * (sofr_series.shift(1) / 252)
45
 
46
+ # 8. Out-of-Sample Slicing
47
  oos_mask = portfolio_ret.index.year >= start_yr
48
  equity_curve = (1 + portfolio_ret[oos_mask]).cumprod()
49
  bench_curve = (1 + bench_series.pct_change().fillna(0)[oos_mask]).cumprod()
50
 
51
+ # Stats
 
52
  ann_ret = portfolio_ret[oos_mask].mean() * 252
53
+ ann_vol = portfolio_ret[oos_mask].std() * np.sqrt(252)
54
+ dd = (equity_curve / equity_curve.cummax()) - 1
55
 
56
  # NYSE Calendar
57
  nyse = mcal.get_calendar('NYSE')
 
62
  'equity_curve': equity_curve,
63
  'bench_curve': bench_curve,
64
  'ann_ret': ann_ret,
65
+ 'sharpe': (ann_ret - sofr_series.iloc[-1]) / ann_vol if ann_vol > 0 else 0,
66
  'max_dd': dd.min(),
67
  'next_day': next_day.date(),
68
  'current_weights': final_weights.iloc[-1],
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -10,20 +10,14 @@ if 'master_data' not in st.session_state:
10
  st.session_state.master_data = load_from_hf()
11
 
12
  with st.sidebar:
13
- st.header("πŸ—‚οΈ Data Controls")
14
- if st.session_state.master_data is None:
15
- if st.button("πŸš€ Seed Database (FRED/Stooq)"):
16
- st.session_state.master_data = seed_dataset_from_scratch()
17
- st.rerun()
18
- else:
19
- st.success(f"DB Last Updated: {st.session_state.master_data.index.max().date()}")
20
- if st.button("πŸ”„ Sync Daily Data"):
21
- st.session_state.master_data = sync_incremental_data(st.session_state.master_data)
22
- st.rerun()
23
 
24
- st.divider()
25
- option = st.radio("Asset Universe", ("Option A - FI Trend", "Option B - Equity Trend"))
26
- start_yr = st.slider("Out-of-Sample Start", 2008, 2026, 2018)
 
 
27
  vol_target = st.slider("Volatility Target (%)", 5, 20, 12) / 100
28
  run_btn = st.button("πŸš€ Run Analysis", use_container_width=True, type="primary")
29
 
@@ -36,50 +30,50 @@ if st.session_state.master_data is not None:
36
  results = run_trend_module(st.session_state.master_data[univ],
37
  st.session_state.master_data[bench],
38
  st.session_state.master_data['SOFR_ANNUAL'],
39
- vol_target, start_yr)
40
 
41
- st.title(f"πŸ“ˆ {option} Performance Report")
42
 
43
- # Row 1: Key Metrics (Reordered)
44
  m1, m2, m3, m4 = st.columns(4)
45
  m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
46
  m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
47
- m3.metric("Max Drawdown", f"{results['max_dd_peak']:.1%}")
48
- m4.metric("Current SOFR (Live)", f"{results['current_sofr']:.2%}")
49
 
50
- # Row 2: Interactive Plotly Chart (Visible Years)
51
  fig = go.Figure()
52
  fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
53
  fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
54
- fig.update_layout(title="Growth of $1.00 (OOS)", template="plotly_dark", xaxis_title="Timeline")
55
  st.plotly_chart(fig, use_container_width=True)
56
 
57
- # Row 3: Methodology & Allocations
58
  st.divider()
59
  col_left, col_right = st.columns([1, 1.5])
60
 
61
  with col_left:
62
  st.subheader(f"🎯 Target Allocation: {results['next_day']}")
63
- weights_df = results['current_weights'][results['current_weights'] > 0.001].to_dict()
64
- weights_df['CASH (SOFR)'] = results['cash_weight']
65
-
66
- # Clean Table View
67
- final_df = pd.DataFrame.from_dict(weights_df, orient='index', columns=['Weight'])
68
- final_df['Weight'] = final_df['Weight'].apply(lambda x: f"{x:.2%}")
69
- st.table(final_df)
70
 
71
  with col_right:
72
- st.subheader("πŸ“š Strategy Methodology")
73
  st.markdown(f"""
74
- This engine implements the **'Century of Profitable Trends'** framework (2025 Dow Award):
75
 
76
- 1. **Regime Identification**: A dual 50/200-day Simple Moving Average (SMA) filter determines eligibility. Only assets in an uptrend are held.
77
- 2. **Inverse-Volatility Sizing**: Unlike equal weighting, each asset is sized based on its 60-day realized volatility. Lower volatility assets receive higher capital allocations.
78
- 3. **Portfolio Risk Targeting**: The system calculates a total portfolio weight to meet your **{vol_target:.0%} Volatility Target**.
79
- 4. **Cash Scaling (SOFR)**: If the combined risk of the trending assets exceeds the target, or if assets fall out of trend, capital is diverted to **CASH**, earning the live SOFR rate.
80
- 5. **Leverage Management**: Gross exposure is dynamically managed and capped at 1.5x to prevent excessive drawdown during regime shifts.
81
  """)
82
-
83
 
84
- else:
85
- st.info("πŸ’‘ Adjust your risk parameters and click 'Run Analysis' to see the predicted allocations.")
 
 
 
 
 
 
 
10
  st.session_state.master_data = load_from_hf()
11
 
12
  with st.sidebar:
13
+ st.header("πŸ—‚οΈ Configuration")
14
+ option = st.selectbox("Universe Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
 
 
 
 
 
 
 
 
15
 
16
+ # NEW SUB-OPTIONS
17
+ sub_option = st.selectbox("Conviction Level",
18
+ ("All Trending ETFs", "3 Highest Conviction", "1 Highest Conviction"))
19
+
20
+ start_yr = st.slider("OOS Start", 2008, 2026, 2018)
21
  vol_target = st.slider("Volatility Target (%)", 5, 20, 12) / 100
22
  run_btn = st.button("πŸš€ Run Analysis", use_container_width=True, type="primary")
23
 
 
30
  results = run_trend_module(st.session_state.master_data[univ],
31
  st.session_state.master_data[bench],
32
  st.session_state.master_data['SOFR_ANNUAL'],
33
+ vol_target, start_yr, sub_option)
34
 
35
+ st.title(f"πŸ“Š {option} - {sub_option}")
36
 
37
+ # Metrics
38
  m1, m2, m3, m4 = st.columns(4)
39
  m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
40
  m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
41
+ m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
42
+ m4.metric("Current SOFR", f"{results['current_sofr']:.2%}")
43
 
44
+ # Chart
45
  fig = go.Figure()
46
  fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
47
  fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
48
+ fig.update_layout(title="OOS Performance", template="plotly_dark")
49
  st.plotly_chart(fig, use_container_width=True)
50
 
51
+ # Methodology & Target
52
  st.divider()
53
  col_left, col_right = st.columns([1, 1.5])
54
 
55
  with col_left:
56
  st.subheader(f"🎯 Target Allocation: {results['next_day']}")
57
+ w = results['current_weights'][results['current_weights'] > 0.0001].to_dict()
58
+ w['CASH (SOFR)'] = results['cash_weight']
59
+ st.table(pd.DataFrame.from_dict(w, orient='index', columns=['Weight']).style.format("{:.2%}"))
 
 
 
 
60
 
61
  with col_right:
62
+ st.subheader("πŸ“š Methodology: Zarattini & Antonacci (2025)")
63
  st.markdown(f"""
64
+ This strategy implements the **2025 Charles H. Dow Award** framework authored by **Andrea Zarattini** and **Michael Antonacci**.
65
 
66
+ * **Trend Detection**: Uses a 50/200 SMA dual-filter.
67
+ * **Conviction Scoring**: Assets are ranked based on their relative distance from the 200-day trend line.
68
+ * **Concentration**: Under **{sub_option}**, the engine filters the universe to only the top-tier trending assets.
69
+ * **Risk Sizing**: Allocation is inversely proportional to 60-day volatility. If the selected ETFs cannot safely fill the **{vol_target:.0%}** risk budget, the remainder is held in **CASH (SOFR)**.
 
70
  """)
 
71
 
72
+
73
+
74
+ ### Why this is powerful:
75
+ * **The "3 Highest Conviction" sub-option** creates a "Best of the Best" portfolio. Instead of diluting your risk budget across 20 ETFs that are barely in trend, it puts the full 12% risk budget into the 3 strongest leaders.
76
+ * **The "1 Highest Conviction" sub-option** is the ultimate momentum play, concentrating all allowed risk into the single strongest trend.
77
+ * **Authorship**: Zarattini and Antonacci's names are now front-and-center in the methodology section.
78
+
79
+ **Would you like me to add a "Drawdown Overlay" chart so you can compare the risk spikes between the Concentrated (1-ETF) and Broad (All ETFs) sub-options?**
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py CHANGED
@@ -2,53 +2,57 @@ import pandas as pd
2
  import numpy as np
3
  import pandas_market_calendars as mcal
4
 
5
- def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr):
6
- # 1. Signal Logic: Dual SMA Crossover
7
- sma_fast = price_df.rolling(50).mean()
8
- sma_slow = price_df.rolling(200).mean()
9
- signals = (sma_fast > sma_slow).astype(int)
10
 
11
- # 2. Volatility Logic: 60-Day Realized Standard Deviation
 
 
 
 
12
  returns = price_df.pct_change()
13
  asset_vol = returns.rolling(60).std() * np.sqrt(252)
14
 
15
- # 3. Risk-Budgeted Weighting
16
- # Methodology: Allocation = (Target Vol / Asset Vol) / Number of Active Assets
17
- # This ensures that each trending asset contributes a fixed 'slice' of risk.
18
- active_counts = signals.sum(axis=1)
 
 
 
 
 
 
19
 
20
- # Weight per asset: Target Vol divided by Asset Vol, then distributed among active trends
21
- raw_weights = (target_vol / asset_vol).divide(active_counts, axis=0).fillna(0)
 
 
22
  final_weights = raw_weights * signals
23
 
24
- # 4. Leverage Cap & Cash Logic
25
- # We cap total gross exposure at 1.5x (150%) to prevent extreme tail risk
26
  total_exposure = final_weights.sum(axis=1)
27
  scale_factor = total_exposure.apply(lambda x: 1.5/x if x > 1.5 else 1.0)
28
  final_weights = final_weights.multiply(scale_factor, axis=0)
29
 
30
- # Recalculate exposure after capping
31
- final_exposure = final_weights.sum(axis=1)
32
- cash_weight = 1.0 - final_exposure
33
 
34
- # 5. Returns: Asset Performance + Cash (SOFR) Interest
35
- # If exposure is < 100%, the remainder earns SOFR interest
36
  portfolio_ret = (final_weights.shift(1) * returns).sum(axis=1)
37
  portfolio_ret += cash_weight.shift(1) * (sofr_series.shift(1) / 252)
38
 
39
- bench_returns = bench_series.pct_change().fillna(0)
40
-
41
- # 6. OOS Performance Slicing
42
  oos_mask = portfolio_ret.index.year >= start_yr
43
  equity_curve = (1 + portfolio_ret[oos_mask]).cumprod()
44
- bench_curve = (1 + bench_returns[oos_mask]).cumprod()
45
 
46
- # 7. Drawdown & Stats
47
- dd_series = (equity_curve / equity_curve.cummax()) - 1
48
  ann_ret = portfolio_ret[oos_mask].mean() * 252
49
- ann_vol = portfolio_ret[oos_mask].std() * np.sqrt(252)
50
 
51
- # NYSE Calendar for Next Session
52
  nyse = mcal.get_calendar('NYSE')
53
  last_dt = price_df.index[-1]
54
  next_day = nyse.schedule(start_date=last_dt, end_date=last_dt + pd.Timedelta(days=10)).index[1]
@@ -57,9 +61,8 @@ def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr):
57
  'equity_curve': equity_curve,
58
  'bench_curve': bench_curve,
59
  'ann_ret': ann_ret,
60
- 'sharpe': (ann_ret - sofr_series.iloc[-1]) / ann_vol if ann_vol > 0 else 0,
61
- 'max_dd_peak': dd_series.min(),
62
- 'avg_daily_dd': dd_series.mean(),
63
  'next_day': next_day.date(),
64
  'current_weights': final_weights.iloc[-1],
65
  'cash_weight': cash_weight.iloc[-1],
 
2
  import numpy as np
3
  import pandas_market_calendars as mcal
4
 
5
+ def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr, sub_option):
6
+ # 1. Trend Signals & Conviction Scoring
7
+ sma_200 = price_df.rolling(200).mean()
8
+ sma_50 = price_df.rolling(50).mean()
 
9
 
10
+ # Conviction Score = How far is the price above the 200 SMA?
11
+ conviction_score = (price_df / sma_200) - 1
12
+ signals = (sma_50 > sma_200).astype(int)
13
+
14
+ # 2. Individual Asset Volatility
15
  returns = price_df.pct_change()
16
  asset_vol = returns.rolling(60).std() * np.sqrt(252)
17
 
18
+ # 3. CONVICTION FILTERING (Sub-Options)
19
+ if sub_option == "3 Highest Conviction":
20
+ # Rank assets by score, keep only top 3 that are ALSO in trend
21
+ ranks = conviction_score.rank(axis=1, ascending=False)
22
+ signals = ((ranks <= 3) & (signals == 1)).astype(int)
23
+ elif sub_option == "1 Highest Conviction":
24
+ # Rank assets, keep only the top 1 that is ALSO in trend
25
+ ranks = conviction_score.rank(axis=1, ascending=False)
26
+ signals = ((ranks <= 1) & (signals == 1)).astype(int)
27
+ # "All Trending" remains as is
28
 
29
+ # 4. Volatility Scaling
30
+ active_counts = signals.sum(axis=1)
31
+ # Inverse vol weight per asset
32
+ raw_weights = (target_vol / asset_vol).divide(active_counts, axis=0).replace([np.inf, -np.inf], 0).fillna(0)
33
  final_weights = raw_weights * signals
34
 
35
+ # 5. Leverage Cap & Cash
 
36
  total_exposure = final_weights.sum(axis=1)
37
  scale_factor = total_exposure.apply(lambda x: 1.5/x if x > 1.5 else 1.0)
38
  final_weights = final_weights.multiply(scale_factor, axis=0)
39
 
40
+ cash_weight = 1.0 - final_weights.sum(axis=1)
 
 
41
 
42
+ # 6. Performance Calculation
 
43
  portfolio_ret = (final_weights.shift(1) * returns).sum(axis=1)
44
  portfolio_ret += cash_weight.shift(1) * (sofr_series.shift(1) / 252)
45
 
46
+ # 7. OOS Filtering
 
 
47
  oos_mask = portfolio_ret.index.year >= start_yr
48
  equity_curve = (1 + portfolio_ret[oos_mask]).cumprod()
49
+ bench_curve = (1 + bench_series.pct_change().fillna(0)[oos_mask]).cumprod()
50
 
51
+ # 8. Stats
52
+ dd = (equity_curve / equity_curve.cummax()) - 1
53
  ann_ret = portfolio_ret[oos_mask].mean() * 252
 
54
 
55
+ # NYSE Calendar
56
  nyse = mcal.get_calendar('NYSE')
57
  last_dt = price_df.index[-1]
58
  next_day = nyse.schedule(start_date=last_dt, end_date=last_dt + pd.Timedelta(days=10)).index[1]
 
61
  'equity_curve': equity_curve,
62
  'bench_curve': bench_curve,
63
  'ann_ret': ann_ret,
64
+ 'sharpe': (ann_ret - sofr_series.iloc[-1]) / (portfolio_ret[oos_mask].std() * np.sqrt(252)),
65
+ 'max_dd': dd.min(),
 
66
  'next_day': next_day.date(),
67
  'current_weights': final_weights.iloc[-1],
68
  'cash_weight': cash_weight.iloc[-1],
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py CHANGED
@@ -3,49 +3,65 @@ import numpy as np
3
  import pandas_market_calendars as mcal
4
 
5
  def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr):
6
- # 1. Full-period math for indicators
7
  sma_fast = price_df.rolling(50).mean()
8
  sma_slow = price_df.rolling(200).mean()
9
  signals = (sma_fast > sma_slow).astype(int)
10
 
 
11
  returns = price_df.pct_change()
12
- realized_vol = returns.rolling(60).std() * np.sqrt(252)
13
- weights = (target_vol / realized_vol).fillna(0).clip(upper=1.5)
14
 
15
- # 2. Strategy Returns
16
- asset_ret = (signals.shift(1) * weights.shift(1) * returns).mean(axis=1)
17
- cash_pct = 1 - signals.mean(axis=1)
18
- strat_returns = asset_ret + (cash_pct.shift(1) * (sofr_series.shift(1) / 252))
19
- bench_returns = bench_series.pct_change().fillna(0)
 
 
 
20
 
21
- # 3. Slice for OOS Period
22
- oos_mask = strat_returns.index.year >= start_yr
23
- oos_strat = strat_returns[oos_mask]
24
- oos_bench = bench_returns[oos_mask]
 
25
 
26
- equity_curve = (1 + oos_strat).cumprod()
27
- bench_curve = (1 + oos_bench).cumprod()
 
28
 
29
- # 4. Drawdowns
30
- hwm = equity_curve.cummax()
31
- dd_series = (equity_curve / hwm) - 1
 
 
 
32
 
33
- # 5. Next Day Trading Date
 
 
 
 
 
 
 
 
 
 
34
  nyse = mcal.get_calendar('NYSE')
35
  last_dt = price_df.index[-1]
36
- sched = nyse.schedule(start_date=last_dt, end_date=last_dt + pd.Timedelta(days=10))
37
- next_day = sched.index[1] if len(sched) > 1 else sched.index[0]
38
-
39
- ann_ret = oos_strat.mean() * 252
40
- ann_vol = oos_strat.std() * np.sqrt(252)
41
 
42
  return {
43
  'equity_curve': equity_curve,
44
  'bench_curve': bench_curve,
45
- 'sharpe': (ann_ret - 0.03) / ann_vol if ann_vol > 0 else 0,
46
  'ann_ret': ann_ret,
 
47
  'max_dd_peak': dd_series.min(),
48
  'avg_daily_dd': dd_series.mean(),
49
  'next_day': next_day.date(),
50
- 'current_signals': signals.iloc[-1]
 
 
51
  }
 
3
  import pandas_market_calendars as mcal
4
 
5
  def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr):
6
+ # 1. Signal Logic: Dual SMA Crossover
7
  sma_fast = price_df.rolling(50).mean()
8
  sma_slow = price_df.rolling(200).mean()
9
  signals = (sma_fast > sma_slow).astype(int)
10
 
11
+ # 2. Volatility Logic: 60-Day Realized Standard Deviation
12
  returns = price_df.pct_change()
13
+ asset_vol = returns.rolling(60).std() * np.sqrt(252)
 
14
 
15
+ # 3. Risk-Budgeted Weighting
16
+ # Methodology: Allocation = (Target Vol / Asset Vol) / Number of Active Assets
17
+ # This ensures that each trending asset contributes a fixed 'slice' of risk.
18
+ active_counts = signals.sum(axis=1)
19
+
20
+ # Weight per asset: Target Vol divided by Asset Vol, then distributed among active trends
21
+ raw_weights = (target_vol / asset_vol).divide(active_counts, axis=0).fillna(0)
22
+ final_weights = raw_weights * signals
23
 
24
+ # 4. Leverage Cap & Cash Logic
25
+ # We cap total gross exposure at 1.5x (150%) to prevent extreme tail risk
26
+ total_exposure = final_weights.sum(axis=1)
27
+ scale_factor = total_exposure.apply(lambda x: 1.5/x if x > 1.5 else 1.0)
28
+ final_weights = final_weights.multiply(scale_factor, axis=0)
29
 
30
+ # Recalculate exposure after capping
31
+ final_exposure = final_weights.sum(axis=1)
32
+ cash_weight = 1.0 - final_exposure
33
 
34
+ # 5. Returns: Asset Performance + Cash (SOFR) Interest
35
+ # If exposure is < 100%, the remainder earns SOFR interest
36
+ portfolio_ret = (final_weights.shift(1) * returns).sum(axis=1)
37
+ portfolio_ret += cash_weight.shift(1) * (sofr_series.shift(1) / 252)
38
+
39
+ bench_returns = bench_series.pct_change().fillna(0)
40
 
41
+ # 6. OOS Performance Slicing
42
+ oos_mask = portfolio_ret.index.year >= start_yr
43
+ equity_curve = (1 + portfolio_ret[oos_mask]).cumprod()
44
+ bench_curve = (1 + bench_returns[oos_mask]).cumprod()
45
+
46
+ # 7. Drawdown & Stats
47
+ dd_series = (equity_curve / equity_curve.cummax()) - 1
48
+ ann_ret = portfolio_ret[oos_mask].mean() * 252
49
+ ann_vol = portfolio_ret[oos_mask].std() * np.sqrt(252)
50
+
51
+ # NYSE Calendar for Next Session
52
  nyse = mcal.get_calendar('NYSE')
53
  last_dt = price_df.index[-1]
54
+ next_day = nyse.schedule(start_date=last_dt, end_date=last_dt + pd.Timedelta(days=10)).index[1]
 
 
 
 
55
 
56
  return {
57
  'equity_curve': equity_curve,
58
  'bench_curve': bench_curve,
 
59
  'ann_ret': ann_ret,
60
+ 'sharpe': (ann_ret - sofr_series.iloc[-1]) / ann_vol if ann_vol > 0 else 0,
61
  'max_dd_peak': dd_series.min(),
62
  'avg_daily_dd': dd_series.mean(),
63
  'next_day': next_day.date(),
64
+ 'current_weights': final_weights.iloc[-1],
65
+ 'cash_weight': cash_weight.iloc[-1],
66
+ 'current_sofr': sofr_series.iloc[-1]
67
  }
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -1,95 +1,85 @@
1
  import streamlit as st
2
  import pandas as pd
3
- import numpy as np
4
  from data.loader import load_from_hf, seed_dataset_from_scratch, sync_incremental_data, X_EQUITY_TICKERS, FI_TICKERS
5
  from engine.trend_engine import run_trend_module
6
 
7
- st.set_page_config(layout="wide", page_title="P2 Strategy Suite | 2025 Dow Award Edition")
8
 
9
- # --- SAFE SESSION INITIALIZATION ---
10
  if 'master_data' not in st.session_state:
11
  st.session_state.master_data = load_from_hf()
12
 
13
- # --- SIDEBAR UI ---
14
  with st.sidebar:
15
- st.header("πŸ—‚οΈ Data Management")
16
  if st.session_state.master_data is None:
17
- st.error("Dataset not found.")
18
- if st.button("πŸš€ Seed Database (2008-2026)", use_container_width=True):
19
  st.session_state.master_data = seed_dataset_from_scratch()
20
  st.rerun()
21
  else:
22
- last_dt = pd.to_datetime(st.session_state.master_data.index).max()
23
- st.success(f"Database Active: {last_dt.date()}")
24
- if st.button("πŸ”„ Sync Daily Data", use_container_width=True):
25
  st.session_state.master_data = sync_incremental_data(st.session_state.master_data)
26
  st.rerun()
27
 
28
  st.divider()
29
- st.header("βš™οΈ Strategy Settings")
30
- option = st.radio("Strategy Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
31
- start_yr = st.slider("OOS Start Year", 2008, 2026, 2018)
32
- vol_target = st.slider("Ann. Vol Target (%)", 5, 25, 12) / 100
33
  run_btn = st.button("πŸš€ Run Analysis", use_container_width=True, type="primary")
34
 
35
- # --- MAIN OUTPUT UI ---
36
  if st.session_state.master_data is not None:
37
  if run_btn:
38
- with st.spinner("Analyzing Market Regimes..."):
39
- # 1. Setup Universe and Benchmark
40
- is_fi = "Option A" in option
41
- univ = FI_TICKERS if is_fi else X_EQUITY_TICKERS
42
- bench_ticker = "AGG" if is_fi else "SPY"
43
-
44
- # 2. Filter Data (Using Start Year as OOS boundary)
45
- # The engine uses data prior to start_yr for signal lookback (Training/Buffer)
46
- df = st.session_state.master_data
47
-
48
- # 3. Execute Engine
49
- results = run_trend_module(df[univ], df[bench_ticker], df['SOFR_ANNUAL'], vol_target, start_yr)
50
-
51
- # 4. KPI Header
52
- st.title(f"πŸ“ˆ {option} Performance vs {bench_ticker}")
53
- m1, m2, m3, m4 = st.columns(4)
54
- m1.metric("OOS Sharpe", f"{results['sharpe']:.2f}")
55
- m2.metric("Ann. Return", f"{results['ann_ret']:.1%}")
56
- m3.metric("Peak-to-Trough DD", f"{results['max_dd_peak']:.1%}")
57
- m4.metric("Avg Daily DD", f"{results['avg_daily_dd']:.2%}")
58
 
59
- # 5. Equity Curve Chart
60
- chart_df = pd.DataFrame({
61
- "Strategy Portfolio": results['equity_curve'],
62
- f"Benchmark ({bench_ticker})": results['bench_curve']
63
- })
64
- st.subheader("Cumulative Growth of $1.00 (Out-of-Sample)")
65
- st.line_chart(chart_df)
66
 
67
- # 6. Actionable Allocation (Next Trading Day)
68
- st.divider()
69
- c1, c2 = st.columns([1, 2])
70
- with c1:
71
- st.subheader("πŸ“… Next Trading Session")
72
- st.info(f"**NYSE Market Date:** {results['next_day']}\n\n**Action:** Execute at Open")
73
- with c2:
74
- st.subheader("🎯 Required Allocation")
75
- active = results['current_signals'][results['current_signals'] > 0].index.tolist()
76
- if active:
77
- st.success(f"**Long Positions:** {', '.join(active)}")
78
- else:
79
- st.warning("βš–οΈ **Position:** 100% CASH (Market Neutral)")
80
 
81
- # 7. Methodology Footer
82
- st.divider()
83
- with st.expander("πŸ“š Methodology & 2025 Dow Award Reference"):
84
- st.markdown("""
85
- ### A Century of Profitable Trends (Zarattini & Antonacci, 2025)
86
- This model implements the framework from the 2025 Charles H. Dow Award winning paper:
87
- * **Regime Filter:** Dual SMA logic (50/200 crossover) proxying for Keltner/Donchian channels.
88
- * **Volatility Targeting:** Positions sized by $Weight = \sigma_{target} / \sigma_{realized}$, capped at 1.5x.
89
- * **Benchmarking:** Equity trends are compared to SPY; Fixed Income to AGG.
90
- * **OOS Testing:** The analysis shown above represents the **Out-of-Sample** period. Data prior to the start year is used solely for initial indicator 'burn-in'.
91
- """)
 
 
92
  else:
93
- st.info("πŸ’‘ Adjust your parameters in the sidebar and click **'Run Analysis'**.")
94
- else:
95
- st.warning("πŸ‘ˆ Please click 'Seed Database' to initialize historical data.")
 
1
  import streamlit as st
2
  import pandas as pd
3
+ import plotly.graph_objects as go
4
  from data.loader import load_from_hf, seed_dataset_from_scratch, sync_incremental_data, X_EQUITY_TICKERS, FI_TICKERS
5
  from engine.trend_engine import run_trend_module
6
 
7
+ st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
8
 
 
9
  if 'master_data' not in st.session_state:
10
  st.session_state.master_data = load_from_hf()
11
 
 
12
  with st.sidebar:
13
+ st.header("πŸ—‚οΈ Data Controls")
14
  if st.session_state.master_data is None:
15
+ if st.button("πŸš€ Seed Database (FRED/Stooq)"):
 
16
  st.session_state.master_data = seed_dataset_from_scratch()
17
  st.rerun()
18
  else:
19
+ st.success(f"DB Last Updated: {st.session_state.master_data.index.max().date()}")
20
+ if st.button("πŸ”„ Sync Daily Data"):
 
21
  st.session_state.master_data = sync_incremental_data(st.session_state.master_data)
22
  st.rerun()
23
 
24
  st.divider()
25
+ option = st.radio("Asset Universe", ("Option A - FI Trend", "Option B - Equity Trend"))
26
+ start_yr = st.slider("Out-of-Sample Start", 2008, 2026, 2018)
27
+ vol_target = st.slider("Volatility Target (%)", 5, 20, 12) / 100
 
28
  run_btn = st.button("πŸš€ Run Analysis", use_container_width=True, type="primary")
29
 
 
30
  if st.session_state.master_data is not None:
31
  if run_btn:
32
+ is_fi = "Option A" in option
33
+ univ = FI_TICKERS if is_fi else X_EQUITY_TICKERS
34
+ bench = "AGG" if is_fi else "SPY"
35
+
36
+ results = run_trend_module(st.session_state.master_data[univ],
37
+ st.session_state.master_data[bench],
38
+ st.session_state.master_data['SOFR_ANNUAL'],
39
+ vol_target, start_yr)
40
+
41
+ st.title(f"πŸ“ˆ {option} Performance Report")
42
+
43
+ # Row 1: Key Metrics (Reordered)
44
+ m1, m2, m3, m4 = st.columns(4)
45
+ m1.metric("Annual Return", f"{results['ann_ret']:.1%}")
46
+ m2.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
47
+ m3.metric("Max Drawdown", f"{results['max_dd_peak']:.1%}")
48
+ m4.metric("Current SOFR (Live)", f"{results['current_sofr']:.2%}")
 
 
 
49
 
50
+ # Row 2: Interactive Plotly Chart (Visible Years)
51
+ fig = go.Figure()
52
+ fig.add_trace(go.Scatter(x=results['equity_curve'].index, y=results['equity_curve'], name='Strategy'))
53
+ fig.add_trace(go.Scatter(x=results['bench_curve'].index, y=results['bench_curve'], name=f'Benchmark ({bench})'))
54
+ fig.update_layout(title="Growth of $1.00 (OOS)", template="plotly_dark", xaxis_title="Timeline")
55
+ st.plotly_chart(fig, use_container_width=True)
 
56
 
57
+ # Row 3: Methodology & Allocations
58
+ st.divider()
59
+ col_left, col_right = st.columns([1, 1.5])
60
+
61
+ with col_left:
62
+ st.subheader(f"🎯 Target Allocation: {results['next_day']}")
63
+ weights_df = results['current_weights'][results['current_weights'] > 0.001].to_dict()
64
+ weights_df['CASH (SOFR)'] = results['cash_weight']
65
+
66
+ # Clean Table View
67
+ final_df = pd.DataFrame.from_dict(weights_df, orient='index', columns=['Weight'])
68
+ final_df['Weight'] = final_df['Weight'].apply(lambda x: f"{x:.2%}")
69
+ st.table(final_df)
70
 
71
+ with col_right:
72
+ st.subheader("πŸ“š Strategy Methodology")
73
+ st.markdown(f"""
74
+ This engine implements the **'Century of Profitable Trends'** framework (2025 Dow Award):
75
+
76
+ 1. **Regime Identification**: A dual 50/200-day Simple Moving Average (SMA) filter determines eligibility. Only assets in an uptrend are held.
77
+ 2. **Inverse-Volatility Sizing**: Unlike equal weighting, each asset is sized based on its 60-day realized volatility. Lower volatility assets receive higher capital allocations.
78
+ 3. **Portfolio Risk Targeting**: The system calculates a total portfolio weight to meet your **{vol_target:.0%} Volatility Target**.
79
+ 4. **Cash Scaling (SOFR)**: If the combined risk of the trending assets exceeds the target, or if assets fall out of trend, capital is diverted to **CASH**, earning the live SOFR rate.
80
+ 5. **Leverage Management**: Gross exposure is dynamically managed and capped at 1.5x to prevent excessive drawdown during regime shifts.
81
+ """)
82
+
83
+
84
  else:
85
+ st.info("πŸ’‘ Adjust your risk parameters and click 'Run Analysis' to see the predicted allocations.")
 
 
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py CHANGED
@@ -3,7 +3,7 @@ import numpy as np
3
  import pandas_market_calendars as mcal
4
 
5
  def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr):
6
- # 1. Full period calculations for signals (Training + OOS)
7
  sma_fast = price_df.rolling(50).mean()
8
  sma_slow = price_df.rolling(200).mean()
9
  signals = (sma_fast > sma_slow).astype(int)
@@ -18,7 +18,7 @@ def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr):
18
  strat_returns = asset_ret + (cash_pct.shift(1) * (sofr_series.shift(1) / 252))
19
  bench_returns = bench_series.pct_change().fillna(0)
20
 
21
- # 3. Filter for Out-of-Sample (OOS) Period
22
  oos_mask = strat_returns.index.year >= start_yr
23
  oos_strat = strat_returns[oos_mask]
24
  oos_bench = bench_returns[oos_mask]
@@ -26,11 +26,11 @@ def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr):
26
  equity_curve = (1 + oos_strat).cumprod()
27
  bench_curve = (1 + oos_bench).cumprod()
28
 
29
- # 4. Drawdown Stats
30
  hwm = equity_curve.cummax()
31
  dd_series = (equity_curve / hwm) - 1
32
 
33
- # 5. NYSE Next Day
34
  nyse = mcal.get_calendar('NYSE')
35
  last_dt = price_df.index[-1]
36
  sched = nyse.schedule(start_date=last_dt, end_date=last_dt + pd.Timedelta(days=10))
 
3
  import pandas_market_calendars as mcal
4
 
5
  def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr):
6
+ # 1. Full-period math for indicators
7
  sma_fast = price_df.rolling(50).mean()
8
  sma_slow = price_df.rolling(200).mean()
9
  signals = (sma_fast > sma_slow).astype(int)
 
18
  strat_returns = asset_ret + (cash_pct.shift(1) * (sofr_series.shift(1) / 252))
19
  bench_returns = bench_series.pct_change().fillna(0)
20
 
21
+ # 3. Slice for OOS Period
22
  oos_mask = strat_returns.index.year >= start_yr
23
  oos_strat = strat_returns[oos_mask]
24
  oos_bench = bench_returns[oos_mask]
 
26
  equity_curve = (1 + oos_strat).cumprod()
27
  bench_curve = (1 + oos_bench).cumprod()
28
 
29
+ # 4. Drawdowns
30
  hwm = equity_curve.cummax()
31
  dd_series = (equity_curve / hwm) - 1
32
 
33
+ # 5. Next Day Trading Date
34
  nyse = mcal.get_calendar('NYSE')
35
  last_dt = price_df.index[-1]
36
  sched = nyse.schedule(start_date=last_dt, end_date=last_dt + pd.Timedelta(days=10))
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -12,36 +12,45 @@ FILENAME = "market_data.csv"
12
  X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
13
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
14
 
 
 
 
 
 
 
 
 
 
15
  def load_from_hf():
 
 
16
  try:
17
- token = st.secrets.get("HF_TOKEN") or os.getenv("HF_TOKEN")
18
- if not token: return None
19
  path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
20
  return pd.read_csv(path, index_col=0, parse_dates=True)
21
  except:
22
  return None
23
 
24
  def seed_dataset_from_scratch():
25
- # Include benchmarks SPY and AGG
26
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
27
  master_df = pd.DataFrame()
28
  status = st.empty()
29
  progress = st.progress(0)
30
 
31
  for i, t in enumerate(tickers):
32
- status.text(f"Fetching {t} from Stooq...")
33
  try:
34
  data = web.DataReader(f"{t}.US", 'stooq', start='2008-01-01')
35
  if not data.empty:
36
  master_df[t] = data['Close'].sort_index()
37
- time.sleep(0.6)
38
  except:
39
  try:
40
  master_df[t] = yf.download(t, start="2008-01-01", progress=False)['Adj Close']
41
  except: pass
42
  progress.progress((i + 1) / len(tickers))
43
 
44
- # Add SOFR Rate
45
  try:
46
  sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
47
  master_df['SOFR_ANNUAL'] = sofr / 100
@@ -64,6 +73,9 @@ def sync_incremental_data(df):
64
  return combined
65
 
66
  def upload_to_hf(path):
 
 
 
 
67
  api = HfApi()
68
- token = st.secrets.get("HF_TOKEN") or os.getenv("HF_TOKEN")
69
  api.upload_file(path_or_fileobj=path, path_in_repo=FILENAME, repo_id=REPO_ID, repo_type="dataset", token=token)
 
12
  X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
13
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
14
 
15
+ def get_safe_token():
16
+ """Bypasses Streamlit's hard-fail on missing secrets.toml by using environment fallback."""
17
+ try:
18
+ # Try Streamlit secrets first
19
+ return st.secrets["HF_TOKEN"]
20
+ except Exception:
21
+ # Standard environment variable fallback (How HF actually stores them)
22
+ return os.getenv("HF_TOKEN")
23
+
24
  def load_from_hf():
25
+ token = get_safe_token()
26
+ if not token: return None
27
  try:
 
 
28
  path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
29
  return pd.read_csv(path, index_col=0, parse_dates=True)
30
  except:
31
  return None
32
 
33
  def seed_dataset_from_scratch():
34
+ # Include benchmarks for comparison logic
35
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
36
  master_df = pd.DataFrame()
37
  status = st.empty()
38
  progress = st.progress(0)
39
 
40
  for i, t in enumerate(tickers):
41
+ status.text(f"πŸ›°οΈ Fetching {t} from Stooq...")
42
  try:
43
  data = web.DataReader(f"{t}.US", 'stooq', start='2008-01-01')
44
  if not data.empty:
45
  master_df[t] = data['Close'].sort_index()
46
+ time.sleep(0.7) # Polite delay
47
  except:
48
  try:
49
  master_df[t] = yf.download(t, start="2008-01-01", progress=False)['Adj Close']
50
  except: pass
51
  progress.progress((i + 1) / len(tickers))
52
 
53
+ # Add SOFR Rate (Cash Interest)
54
  try:
55
  sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
56
  master_df['SOFR_ANNUAL'] = sofr / 100
 
73
  return combined
74
 
75
  def upload_to_hf(path):
76
+ token = get_safe_token()
77
+ if not token:
78
+ st.error("❌ Cannot upload: HF_TOKEN is missing from Space Secrets.")
79
+ return
80
  api = HfApi()
 
81
  api.upload_file(path_or_fileobj=path, path_in_repo=FILENAME, repo_id=REPO_ID, repo_type="dataset", token=token)
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py CHANGED
@@ -2,12 +2,8 @@ import pandas as pd
2
  import numpy as np
3
  import pandas_market_calendars as mcal
4
 
5
- def run_trend_module(price_df, benchmark_df, sofr_series, target_vol=0.12):
6
- """
7
- Enhanced Engine for 2025 Dow Award Logic.
8
- Includes Dual Drawdowns and Benchmark Comparison.
9
- """
10
- # 1. Signals & Weights
11
  sma_fast = price_df.rolling(50).mean()
12
  sma_slow = price_df.rolling(200).mean()
13
  signals = (sma_fast > sma_slow).astype(int)
@@ -16,43 +12,40 @@ def run_trend_module(price_df, benchmark_df, sofr_series, target_vol=0.12):
16
  realized_vol = returns.rolling(60).std() * np.sqrt(252)
17
  weights = (target_vol / realized_vol).fillna(0).clip(upper=1.5)
18
 
19
- # 2. Returns Calculation
20
- # Strategy
21
  asset_ret = (signals.shift(1) * weights.shift(1) * returns).mean(axis=1)
22
  cash_pct = 1 - signals.mean(axis=1)
23
  strat_returns = asset_ret + (cash_pct.shift(1) * (sofr_series.shift(1) / 252))
 
24
 
25
- # Benchmark (Buy & Hold)
26
- bench_returns = benchmark_df.pct_change().fillna(0)
 
 
27
 
28
- # Equity Curves
29
- equity_curve = (1 + strat_returns).cumprod()
30
- bench_curve = (1 + bench_returns).cumprod()
31
 
32
- # 3. Drawdown Calculations
33
- def get_dd_stats(curve):
34
- hwm = curve.cummax()
35
- dd = (curve / hwm) - 1
36
- return dd.min(), dd # Max DD and the full DD series
37
 
38
- max_dd_peak, dd_series = get_dd_stats(equity_curve)
39
-
40
- # 4. Next Trading Day & Allocations (NYSE Calendar)
41
  nyse = mcal.get_calendar('NYSE')
42
- last_date = price_df.index[-1]
43
- next_day = nyse.valid_days(start_date=last_date + pd.Timedelta(days=1), end_date=last_date + pd.Timedelta(days=10))[0]
 
44
 
45
- # Current Allocations (Based on most recent signals)
46
- current_signals = signals.iloc[-1]
47
- active_assets = current_signals[current_signals > 0].index.tolist()
48
 
49
  return {
50
  'equity_curve': equity_curve,
51
  'bench_curve': bench_curve,
52
- 'strat_ret_series': strat_returns,
53
- 'max_dd_peak': max_dd_peak,
54
- 'dd_series': dd_series,
55
- 'next_trading_day': next_day.date(),
56
- 'active_assets': active_assets,
57
- 'signals': current_signals
58
  }
 
2
  import numpy as np
3
  import pandas_market_calendars as mcal
4
 
5
+ def run_trend_module(price_df, bench_series, sofr_series, target_vol, start_yr):
6
+ # 1. Full period calculations for signals (Training + OOS)
 
 
 
 
7
  sma_fast = price_df.rolling(50).mean()
8
  sma_slow = price_df.rolling(200).mean()
9
  signals = (sma_fast > sma_slow).astype(int)
 
12
  realized_vol = returns.rolling(60).std() * np.sqrt(252)
13
  weights = (target_vol / realized_vol).fillna(0).clip(upper=1.5)
14
 
15
+ # 2. Strategy Returns
 
16
  asset_ret = (signals.shift(1) * weights.shift(1) * returns).mean(axis=1)
17
  cash_pct = 1 - signals.mean(axis=1)
18
  strat_returns = asset_ret + (cash_pct.shift(1) * (sofr_series.shift(1) / 252))
19
+ bench_returns = bench_series.pct_change().fillna(0)
20
 
21
+ # 3. Filter for Out-of-Sample (OOS) Period
22
+ oos_mask = strat_returns.index.year >= start_yr
23
+ oos_strat = strat_returns[oos_mask]
24
+ oos_bench = bench_returns[oos_mask]
25
 
26
+ equity_curve = (1 + oos_strat).cumprod()
27
+ bench_curve = (1 + oos_bench).cumprod()
 
28
 
29
+ # 4. Drawdown Stats
30
+ hwm = equity_curve.cummax()
31
+ dd_series = (equity_curve / hwm) - 1
 
 
32
 
33
+ # 5. NYSE Next Day
 
 
34
  nyse = mcal.get_calendar('NYSE')
35
+ last_dt = price_df.index[-1]
36
+ sched = nyse.schedule(start_date=last_dt, end_date=last_dt + pd.Timedelta(days=10))
37
+ next_day = sched.index[1] if len(sched) > 1 else sched.index[0]
38
 
39
+ ann_ret = oos_strat.mean() * 252
40
+ ann_vol = oos_strat.std() * np.sqrt(252)
 
41
 
42
  return {
43
  'equity_curve': equity_curve,
44
  'bench_curve': bench_curve,
45
+ 'sharpe': (ann_ret - 0.03) / ann_vol if ann_vol > 0 else 0,
46
+ 'ann_ret': ann_ret,
47
+ 'max_dd_peak': dd_series.min(),
48
+ 'avg_daily_dd': dd_series.mean(),
49
+ 'next_day': next_day.date(),
50
+ 'current_signals': signals.iloc[-1]
51
  }
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -9,86 +9,61 @@ import streamlit as st
9
  REPO_ID = "P2SAMAPA/etf_trend_data"
10
  FILENAME = "market_data.csv"
11
 
12
- # The 27 Equity X-ETFs and 15 FI ETFs from the 2025 Paper
13
  X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
14
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
15
 
16
- def get_hf_token():
17
- """Safely retrieves the token from secrets or environment."""
18
- try:
19
- return st.secrets["HF_TOKEN"]
20
- except:
21
- return os.getenv("HF_TOKEN")
22
-
23
  def load_from_hf():
24
- """Reads dataset from Hugging Face if it exists."""
25
- token = get_hf_token()
26
- if not token:
27
- return None
28
  try:
 
 
29
  path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
30
  return pd.read_csv(path, index_col=0, parse_dates=True)
31
  except:
32
  return None
33
 
34
  def seed_dataset_from_scratch():
35
- """Initial download of 18 years of data using Stooq primarily."""
36
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
37
  master_df = pd.DataFrame()
38
-
39
  status = st.empty()
40
- progress_bar = st.progress(0)
41
 
42
- for i, ticker in enumerate(tickers):
43
- status.text(f"Fetching {ticker} from Stooq...")
44
  try:
45
- # Stooq primary (requires .US suffix for ETFs)
46
- data = web.DataReader(f"{ticker}.US", 'stooq', start='2008-01-01')
47
  if not data.empty:
48
- master_df[ticker] = data['Close'].sort_index()
49
- time.sleep(0.6)
50
  except:
51
- # YFinance fallback if Stooq fails for a ticker
52
  try:
53
- yf_data = yf.download(ticker, start="2008-01-01", progress=False)['Adj Close']
54
- master_df[ticker] = yf_data
55
- except:
56
- pass
57
- progress_bar.progress((i + 1) / len(tickers))
58
 
59
- # Add SOFR Rate (Cash interest)
60
  try:
61
  sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
62
  master_df['SOFR_ANNUAL'] = sofr / 100
63
  except:
64
- master_df['SOFR_ANNUAL'] = 0.045 # Conservative proxy
65
 
66
  master_df = master_df.sort_index().ffill()
67
  master_df.to_csv(FILENAME)
68
-
69
  upload_to_hf(FILENAME)
70
  return master_df
71
 
72
- def sync_incremental_data(df_existing):
73
- """Updates only new data since last index date using YFinance for speed."""
74
- last_date = pd.to_datetime(df_existing.index).max()
75
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
76
-
77
  new_data = yf.download(tickers, start=last_date, progress=False)['Adj Close']
78
- combined = pd.concat([df_existing, new_data])
79
- combined = combined[~combined.index.duplicated(keep='last')].sort_index()
80
-
81
  combined.to_csv(FILENAME)
82
  upload_to_hf(FILENAME)
83
  return combined
84
 
85
  def upload_to_hf(path):
86
  api = HfApi()
87
- token = get_hf_token()
88
- api.upload_file(
89
- path_or_fileobj=path,
90
- path_in_repo=FILENAME,
91
- repo_id=REPO_ID,
92
- repo_type="dataset",
93
- token=token
94
- )
 
9
  REPO_ID = "P2SAMAPA/etf_trend_data"
10
  FILENAME = "market_data.csv"
11
 
 
12
  X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
13
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
14
 
 
 
 
 
 
 
 
15
  def load_from_hf():
 
 
 
 
16
  try:
17
+ token = st.secrets.get("HF_TOKEN") or os.getenv("HF_TOKEN")
18
+ if not token: return None
19
  path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
20
  return pd.read_csv(path, index_col=0, parse_dates=True)
21
  except:
22
  return None
23
 
24
  def seed_dataset_from_scratch():
25
+ # Include benchmarks SPY and AGG
26
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
27
  master_df = pd.DataFrame()
 
28
  status = st.empty()
29
+ progress = st.progress(0)
30
 
31
+ for i, t in enumerate(tickers):
32
+ status.text(f"Fetching {t} from Stooq...")
33
  try:
34
+ data = web.DataReader(f"{t}.US", 'stooq', start='2008-01-01')
 
35
  if not data.empty:
36
+ master_df[t] = data['Close'].sort_index()
37
+ time.sleep(0.6)
38
  except:
 
39
  try:
40
+ master_df[t] = yf.download(t, start="2008-01-01", progress=False)['Adj Close']
41
+ except: pass
42
+ progress.progress((i + 1) / len(tickers))
 
 
43
 
44
+ # Add SOFR Rate
45
  try:
46
  sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
47
  master_df['SOFR_ANNUAL'] = sofr / 100
48
  except:
49
+ master_df['SOFR_ANNUAL'] = 0.045
50
 
51
  master_df = master_df.sort_index().ffill()
52
  master_df.to_csv(FILENAME)
 
53
  upload_to_hf(FILENAME)
54
  return master_df
55
 
56
+ def sync_incremental_data(df):
57
+ last_date = df.index.max()
 
58
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
 
59
  new_data = yf.download(tickers, start=last_date, progress=False)['Adj Close']
60
+ combined = pd.concat([df, new_data]).sort_index()
61
+ combined = combined[~combined.index.duplicated(keep='last')]
 
62
  combined.to_csv(FILENAME)
63
  upload_to_hf(FILENAME)
64
  return combined
65
 
66
  def upload_to_hf(path):
67
  api = HfApi()
68
+ token = st.secrets.get("HF_TOKEN") or os.getenv("HF_TOKEN")
69
+ api.upload_file(path_or_fileobj=path, path_in_repo=FILENAME, repo_id=REPO_ID, repo_type="dataset", token=token)
 
 
 
 
 
 
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -1,68 +1,95 @@
1
  import streamlit as st
 
 
2
  from data.loader import load_from_hf, seed_dataset_from_scratch, sync_incremental_data, X_EQUITY_TICKERS, FI_TICKERS
3
  from engine.trend_engine import run_trend_module
4
 
5
- st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
6
 
7
- # --- INITIALIZATION ---
8
  if 'master_data' not in st.session_state:
9
  st.session_state.master_data = load_from_hf()
10
 
11
- # --- SIDEBAR: DATA CONTROLS ---
12
  with st.sidebar:
13
  st.header("πŸ—‚οΈ Data Management")
14
  if st.session_state.master_data is None:
15
- st.error("No dataset detected.")
16
  if st.button("πŸš€ Seed Database (2008-2026)", use_container_width=True):
17
  st.session_state.master_data = seed_dataset_from_scratch()
18
  st.rerun()
19
  else:
20
- last_dt = st.session_state.master_data.index.max()
21
  st.success(f"Database Active: {last_dt.date()}")
22
- if st.button("πŸ”„ Sync New Data", use_container_width=True):
23
  st.session_state.master_data = sync_incremental_data(st.session_state.master_data)
24
  st.rerun()
25
 
26
  st.divider()
27
  st.header("βš™οΈ Strategy Settings")
28
- option = st.radio("Universe Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
29
- start_yr = st.slider("Backtest Start Year", 2008, 2026, 2015)
30
- vol_target = st.slider("Target Vol (%)", 5, 20, 12) / 100
31
-
32
- st.divider()
33
- run_btn = st.button("πŸš€ Run Strategy Analysis", use_container_width=True, type="primary")
34
 
35
- # --- MAIN PAGE: DISPLAY ---
36
  if st.session_state.master_data is not None:
37
  if run_btn:
38
- with st.spinner("Crunching data..."):
39
- # Universe Selection
40
- univ = FI_TICKERS if "Option A" in option else X_EQUITY_TICKERS
41
- # Slice by date
42
- df = st.session_state.master_data[st.session_state.master_data.index.year >= start_yr]
43
 
44
- # Execute Engine
45
- results = run_trend_module(df[univ], df['SOFR_ANNUAL'], vol_target)
 
46
 
47
- # Show Metrics
48
- st.title(f"πŸ“Š {option} Performance Report")
49
- m1, m2, m3 = st.columns(3)
50
- m1.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
51
- m2.metric("Annual Return", f"{results['ann_ret']:.1%}")
52
- m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
53
 
54
- # Equity Curve
55
- st.subheader("Cumulative Growth (vs Cash)")
56
- st.line_chart(results['equity_curve'])
57
-
58
- # Allocation Check
 
 
 
 
 
 
 
 
 
 
 
 
59
  st.divider()
60
- st.subheader("Current Market Status")
61
- active_assets = results['current_signals'][results['current_signals'] > 0].index.tolist()
62
- st.write(f"**In-Trend Assets:** {', '.join(active_assets) if active_assets else 'All Cash'}")
63
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  else:
65
- st.title("Welcome to the 2025 Trend Suite")
66
- st.info("πŸ‘ˆ Use the sidebar to manage your data and click 'Run Strategy Analysis' to begin.")
67
  else:
68
- st.warning("Please initialize the database using the 'Seed' button in the sidebar.")
 
1
  import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
  from data.loader import load_from_hf, seed_dataset_from_scratch, sync_incremental_data, X_EQUITY_TICKERS, FI_TICKERS
5
  from engine.trend_engine import run_trend_module
6
 
7
+ st.set_page_config(layout="wide", page_title="P2 Strategy Suite | 2025 Dow Award Edition")
8
 
9
+ # --- SAFE SESSION INITIALIZATION ---
10
  if 'master_data' not in st.session_state:
11
  st.session_state.master_data = load_from_hf()
12
 
13
+ # --- SIDEBAR UI ---
14
  with st.sidebar:
15
  st.header("πŸ—‚οΈ Data Management")
16
  if st.session_state.master_data is None:
17
+ st.error("Dataset not found.")
18
  if st.button("πŸš€ Seed Database (2008-2026)", use_container_width=True):
19
  st.session_state.master_data = seed_dataset_from_scratch()
20
  st.rerun()
21
  else:
22
+ last_dt = pd.to_datetime(st.session_state.master_data.index).max()
23
  st.success(f"Database Active: {last_dt.date()}")
24
+ if st.button("πŸ”„ Sync Daily Data", use_container_width=True):
25
  st.session_state.master_data = sync_incremental_data(st.session_state.master_data)
26
  st.rerun()
27
 
28
  st.divider()
29
  st.header("βš™οΈ Strategy Settings")
30
+ option = st.radio("Strategy Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
31
+ start_yr = st.slider("OOS Start Year", 2008, 2026, 2018)
32
+ vol_target = st.slider("Ann. Vol Target (%)", 5, 25, 12) / 100
33
+ run_btn = st.button("πŸš€ Run Analysis", use_container_width=True, type="primary")
 
 
34
 
35
+ # --- MAIN OUTPUT UI ---
36
  if st.session_state.master_data is not None:
37
  if run_btn:
38
+ with st.spinner("Analyzing Market Regimes..."):
39
+ # 1. Setup Universe and Benchmark
40
+ is_fi = "Option A" in option
41
+ univ = FI_TICKERS if is_fi else X_EQUITY_TICKERS
42
+ bench_ticker = "AGG" if is_fi else "SPY"
43
 
44
+ # 2. Filter Data (Using Start Year as OOS boundary)
45
+ # The engine uses data prior to start_yr for signal lookback (Training/Buffer)
46
+ df = st.session_state.master_data
47
 
48
+ # 3. Execute Engine
49
+ results = run_trend_module(df[univ], df[bench_ticker], df['SOFR_ANNUAL'], vol_target, start_yr)
 
 
 
 
50
 
51
+ # 4. KPI Header
52
+ st.title(f"πŸ“ˆ {option} Performance vs {bench_ticker}")
53
+ m1, m2, m3, m4 = st.columns(4)
54
+ m1.metric("OOS Sharpe", f"{results['sharpe']:.2f}")
55
+ m2.metric("Ann. Return", f"{results['ann_ret']:.1%}")
56
+ m3.metric("Peak-to-Trough DD", f"{results['max_dd_peak']:.1%}")
57
+ m4.metric("Avg Daily DD", f"{results['avg_daily_dd']:.2%}")
58
+
59
+ # 5. Equity Curve Chart
60
+ chart_df = pd.DataFrame({
61
+ "Strategy Portfolio": results['equity_curve'],
62
+ f"Benchmark ({bench_ticker})": results['bench_curve']
63
+ })
64
+ st.subheader("Cumulative Growth of $1.00 (Out-of-Sample)")
65
+ st.line_chart(chart_df)
66
+
67
+ # 6. Actionable Allocation (Next Trading Day)
68
  st.divider()
69
+ c1, c2 = st.columns([1, 2])
70
+ with c1:
71
+ st.subheader("πŸ“… Next Trading Session")
72
+ st.info(f"**NYSE Market Date:** {results['next_day']}\n\n**Action:** Execute at Open")
73
+ with c2:
74
+ st.subheader("🎯 Required Allocation")
75
+ active = results['current_signals'][results['current_signals'] > 0].index.tolist()
76
+ if active:
77
+ st.success(f"**Long Positions:** {', '.join(active)}")
78
+ else:
79
+ st.warning("βš–οΈ **Position:** 100% CASH (Market Neutral)")
80
+
81
+ # 7. Methodology Footer
82
+ st.divider()
83
+ with st.expander("πŸ“š Methodology & 2025 Dow Award Reference"):
84
+ st.markdown("""
85
+ ### A Century of Profitable Trends (Zarattini & Antonacci, 2025)
86
+ This model implements the framework from the 2025 Charles H. Dow Award winning paper:
87
+ * **Regime Filter:** Dual SMA logic (50/200 crossover) proxying for Keltner/Donchian channels.
88
+ * **Volatility Targeting:** Positions sized by $Weight = \sigma_{target} / \sigma_{realized}$, capped at 1.5x.
89
+ * **Benchmarking:** Equity trends are compared to SPY; Fixed Income to AGG.
90
+ * **OOS Testing:** The analysis shown above represents the **Out-of-Sample** period. Data prior to the start year is used solely for initial indicator 'burn-in'.
91
+ """)
92
  else:
93
+ st.info("πŸ’‘ Adjust your parameters in the sidebar and click **'Run Analysis'**.")
 
94
  else:
95
+ st.warning("πŸ‘ˆ Please click 'Seed Database' to initialize historical data.")
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py CHANGED
@@ -1,47 +1,58 @@
1
  import pandas as pd
2
  import numpy as np
 
3
 
4
- def run_trend_module(price_df, sofr_series, target_vol=0.12):
5
  """
6
- Implements 2025 Dow Award Logic.
 
7
  """
8
- # 1. Dual-Trend Signal (Fast vs Slow SMA)
9
  sma_fast = price_df.rolling(50).mean()
10
  sma_slow = price_df.rolling(200).mean()
11
- # Signal is 1 if in trend, 0 if cash
12
  signals = (sma_fast > sma_slow).astype(int)
13
 
14
- # 2. Volatility Targeting (Inverse Vol Sizing)
15
  returns = price_df.pct_change()
16
  realized_vol = returns.rolling(60).std() * np.sqrt(252)
17
- # Weights = Target Vol / Realized Vol
18
- weights = (target_vol / realized_vol).fillna(0)
19
- weights = weights.clip(upper=1.5) # Cap leverage at 150%
20
 
21
- # 3. Portfolio Returns
22
- # Position = Signal * Weight
23
- asset_returns = (signals.shift(1) * weights.shift(1) * returns).mean(axis=1)
 
 
24
 
25
- # 4. Interest on Cash (SOFR)
26
- # If signals are 0 (in cash), we earn SOFR
27
- cash_percentage = 1 - signals.mean(axis=1)
28
- interest_returns = (cash_percentage.shift(1) * (sofr_series.shift(1) / 252))
29
 
30
- total_returns = asset_returns + interest_returns
31
- equity_curve = (1 + total_returns).fillna(0).cumprod()
 
32
 
33
- # 5. Metrics
34
- ann_ret = total_returns.mean() * 252
35
- ann_vol = total_returns.std() * np.sqrt(252)
36
- sharpe = (ann_ret - 0.035) / ann_vol if ann_vol > 0 else 0
 
37
 
38
- dd = equity_curve / equity_curve.cummax() - 1
39
- max_dd = dd.min()
 
 
 
 
 
 
 
 
40
 
41
  return {
42
  'equity_curve': equity_curve,
43
- 'sharpe': sharpe,
44
- 'ann_ret': ann_ret,
45
- 'max_dd': max_dd,
46
- 'current_signals': signals.iloc[-1]
 
 
 
47
  }
 
1
  import pandas as pd
2
  import numpy as np
3
+ import pandas_market_calendars as mcal
4
 
5
+ def run_trend_module(price_df, benchmark_df, sofr_series, target_vol=0.12):
6
  """
7
+ Enhanced Engine for 2025 Dow Award Logic.
8
+ Includes Dual Drawdowns and Benchmark Comparison.
9
  """
10
+ # 1. Signals & Weights
11
  sma_fast = price_df.rolling(50).mean()
12
  sma_slow = price_df.rolling(200).mean()
 
13
  signals = (sma_fast > sma_slow).astype(int)
14
 
 
15
  returns = price_df.pct_change()
16
  realized_vol = returns.rolling(60).std() * np.sqrt(252)
17
+ weights = (target_vol / realized_vol).fillna(0).clip(upper=1.5)
 
 
18
 
19
+ # 2. Returns Calculation
20
+ # Strategy
21
+ asset_ret = (signals.shift(1) * weights.shift(1) * returns).mean(axis=1)
22
+ cash_pct = 1 - signals.mean(axis=1)
23
+ strat_returns = asset_ret + (cash_pct.shift(1) * (sofr_series.shift(1) / 252))
24
 
25
+ # Benchmark (Buy & Hold)
26
+ bench_returns = benchmark_df.pct_change().fillna(0)
 
 
27
 
28
+ # Equity Curves
29
+ equity_curve = (1 + strat_returns).cumprod()
30
+ bench_curve = (1 + bench_returns).cumprod()
31
 
32
+ # 3. Drawdown Calculations
33
+ def get_dd_stats(curve):
34
+ hwm = curve.cummax()
35
+ dd = (curve / hwm) - 1
36
+ return dd.min(), dd # Max DD and the full DD series
37
 
38
+ max_dd_peak, dd_series = get_dd_stats(equity_curve)
39
+
40
+ # 4. Next Trading Day & Allocations (NYSE Calendar)
41
+ nyse = mcal.get_calendar('NYSE')
42
+ last_date = price_df.index[-1]
43
+ next_day = nyse.valid_days(start_date=last_date + pd.Timedelta(days=1), end_date=last_date + pd.Timedelta(days=10))[0]
44
+
45
+ # Current Allocations (Based on most recent signals)
46
+ current_signals = signals.iloc[-1]
47
+ active_assets = current_signals[current_signals > 0].index.tolist()
48
 
49
  return {
50
  'equity_curve': equity_curve,
51
+ 'bench_curve': bench_curve,
52
+ 'strat_ret_series': strat_returns,
53
+ 'max_dd_peak': max_dd_peak,
54
+ 'dd_series': dd_series,
55
+ 'next_trading_day': next_day.date(),
56
+ 'active_assets': active_assets,
57
+ 'signals': current_signals
58
  }
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py CHANGED
@@ -1,40 +1,47 @@
1
- import numpy as np
2
  import pandas as pd
 
3
 
4
- def run_trend_module(prices, daily_sofr, vol_target):
5
- # 1. Dual-Trend Signal
6
- d_high = prices.rolling(20).max()
7
- sma = prices.rolling(20).mean()
8
- atr = (prices.rolling(20).max() - prices.rolling(20).min()) / 2
9
- k_upper = sma + (2 * atr)
10
-
11
- entry_band = np.minimum(d_high, k_upper)
12
- signals = (prices > entry_band.shift(1)).astype(int)
13
-
14
- # 2. Risk Parity Position Sizing
15
- returns = prices.pct_change()
16
- realized_vol = returns.rolling(21).std() * np.sqrt(252)
17
-
18
- n = len(prices.columns)
19
- # Target weight = (Target Vol / Total Assets) / Individual Asset Vol
20
- target_weights = (vol_target / n) / realized_vol.shift(1)
21
-
22
- # 3. Strategy Returns (Positions + SOFR on Cash)
23
- pos_rets = (signals.shift(1) * target_weights.shift(1) * returns).sum(axis=1)
24
- weight_used = (signals.shift(1) * target_weights.shift(1)).sum(axis=1)
25
- cash_rets = (1 - weight_used).clip(0, 1) * (daily_sofr / 252)
26
-
27
- strat_rets = pos_rets + cash_rets
28
- equity_curve = (1 + strat_rets).fillna(0).cumprod()
29
-
30
- # 4. Target Allocation for Tomorrow
31
- tomorrow_sig = (prices.iloc[-1] > entry_band.iloc[-1]).astype(int)
32
- tomorrow_w = (vol_target / n) / realized_vol.iloc[-1]
33
-
34
- alloc = pd.DataFrame({
35
- "Ticker": prices.columns,
36
- "Signal": ["LONG" if s == 1 else "CASH" for s in tomorrow_sig],
37
- "Weight (%)": (tomorrow_sig * tomorrow_w * 100).round(2)
38
- })
39
-
40
- return {"curve": equity_curve, "alloc": alloc}
 
 
 
 
 
 
 
 
 
1
  import pandas as pd
2
+ import numpy as np
3
 
4
+ def run_trend_module(price_df, sofr_series, target_vol=0.12):
5
+ """
6
+ Implements 2025 Dow Award Logic.
7
+ """
8
+ # 1. Dual-Trend Signal (Fast vs Slow SMA)
9
+ sma_fast = price_df.rolling(50).mean()
10
+ sma_slow = price_df.rolling(200).mean()
11
+ # Signal is 1 if in trend, 0 if cash
12
+ signals = (sma_fast > sma_slow).astype(int)
13
+
14
+ # 2. Volatility Targeting (Inverse Vol Sizing)
15
+ returns = price_df.pct_change()
16
+ realized_vol = returns.rolling(60).std() * np.sqrt(252)
17
+ # Weights = Target Vol / Realized Vol
18
+ weights = (target_vol / realized_vol).fillna(0)
19
+ weights = weights.clip(upper=1.5) # Cap leverage at 150%
20
+
21
+ # 3. Portfolio Returns
22
+ # Position = Signal * Weight
23
+ asset_returns = (signals.shift(1) * weights.shift(1) * returns).mean(axis=1)
24
+
25
+ # 4. Interest on Cash (SOFR)
26
+ # If signals are 0 (in cash), we earn SOFR
27
+ cash_percentage = 1 - signals.mean(axis=1)
28
+ interest_returns = (cash_percentage.shift(1) * (sofr_series.shift(1) / 252))
29
+
30
+ total_returns = asset_returns + interest_returns
31
+ equity_curve = (1 + total_returns).fillna(0).cumprod()
32
+
33
+ # 5. Metrics
34
+ ann_ret = total_returns.mean() * 252
35
+ ann_vol = total_returns.std() * np.sqrt(252)
36
+ sharpe = (ann_ret - 0.035) / ann_vol if ann_vol > 0 else 0
37
+
38
+ dd = equity_curve / equity_curve.cummax() - 1
39
+ max_dd = dd.min()
40
+
41
+ return {
42
+ 'equity_curve': equity_curve,
43
+ 'sharpe': sharpe,
44
+ 'ann_ret': ann_ret,
45
+ 'max_dd': max_dd,
46
+ 'current_signals': signals.iloc[-1]
47
+ }
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -14,7 +14,7 @@ X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XL
14
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
15
 
16
  def get_hf_token():
17
- """Safely retrieves the token without triggering a SecretNotFoundError crash."""
18
  try:
19
  return st.secrets["HF_TOKEN"]
20
  except:
@@ -32,7 +32,7 @@ def load_from_hf():
32
  return None
33
 
34
  def seed_dataset_from_scratch():
35
- """Downloads 2008-Present data from STOOQ."""
36
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
37
  master_df = pd.DataFrame()
38
 
@@ -42,13 +42,13 @@ def seed_dataset_from_scratch():
42
  for i, ticker in enumerate(tickers):
43
  status.text(f"Fetching {ticker} from Stooq...")
44
  try:
45
- # Stooq primary
46
  data = web.DataReader(f"{ticker}.US", 'stooq', start='2008-01-01')
47
  if not data.empty:
48
  master_df[ticker] = data['Close'].sort_index()
49
- time.sleep(0.6) # Anti-rate limit
50
  except:
51
- # YFinance fallback
52
  try:
53
  yf_data = yf.download(ticker, start="2008-01-01", progress=False)['Adj Close']
54
  master_df[ticker] = yf_data
@@ -56,12 +56,12 @@ def seed_dataset_from_scratch():
56
  pass
57
  progress_bar.progress((i + 1) / len(tickers))
58
 
59
- # Add SOFR Rate
60
  try:
61
  sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
62
  master_df['SOFR_ANNUAL'] = sofr / 100
63
  except:
64
- master_df['SOFR_ANNUAL'] = 0.05
65
 
66
  master_df = master_df.sort_index().ffill()
67
  master_df.to_csv(FILENAME)
@@ -70,11 +70,10 @@ def seed_dataset_from_scratch():
70
  return master_df
71
 
72
  def sync_incremental_data(df_existing):
73
- """Updates only new data since last index date."""
74
  last_date = pd.to_datetime(df_existing.index).max()
75
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
76
 
77
- # Simple incremental fetch
78
  new_data = yf.download(tickers, start=last_date, progress=False)['Adj Close']
79
  combined = pd.concat([df_existing, new_data])
80
  combined = combined[~combined.index.duplicated(keep='last')].sort_index()
 
14
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
15
 
16
  def get_hf_token():
17
+ """Safely retrieves the token from secrets or environment."""
18
  try:
19
  return st.secrets["HF_TOKEN"]
20
  except:
 
32
  return None
33
 
34
  def seed_dataset_from_scratch():
35
+ """Initial download of 18 years of data using Stooq primarily."""
36
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
37
  master_df = pd.DataFrame()
38
 
 
42
  for i, ticker in enumerate(tickers):
43
  status.text(f"Fetching {ticker} from Stooq...")
44
  try:
45
+ # Stooq primary (requires .US suffix for ETFs)
46
  data = web.DataReader(f"{ticker}.US", 'stooq', start='2008-01-01')
47
  if not data.empty:
48
  master_df[ticker] = data['Close'].sort_index()
49
+ time.sleep(0.6)
50
  except:
51
+ # YFinance fallback if Stooq fails for a ticker
52
  try:
53
  yf_data = yf.download(ticker, start="2008-01-01", progress=False)['Adj Close']
54
  master_df[ticker] = yf_data
 
56
  pass
57
  progress_bar.progress((i + 1) / len(tickers))
58
 
59
+ # Add SOFR Rate (Cash interest)
60
  try:
61
  sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
62
  master_df['SOFR_ANNUAL'] = sofr / 100
63
  except:
64
+ master_df['SOFR_ANNUAL'] = 0.045 # Conservative proxy
65
 
66
  master_df = master_df.sort_index().ffill()
67
  master_df.to_csv(FILENAME)
 
70
  return master_df
71
 
72
  def sync_incremental_data(df_existing):
73
+ """Updates only new data since last index date using YFinance for speed."""
74
  last_date = pd.to_datetime(df_existing.index).max()
75
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
76
 
 
77
  new_data = yf.download(tickers, start=last_date, progress=False)['Adj Close']
78
  combined = pd.concat([df_existing, new_data])
79
  combined = combined[~combined.index.duplicated(keep='last')].sort_index()
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -1,45 +1,68 @@
1
  import streamlit as st
2
- import pandas as pd
3
- from data.loader import load_from_hf, seed_dataset_from_scratch, sync_incremental_data
4
 
5
- st.set_page_config(layout="wide", page_title="P2 Trend Suite")
6
 
7
- # --- SIDEBAR: DATA MANAGEMENT ---
8
- st.sidebar.title("πŸ—‚οΈ Data Management")
9
-
10
- # Initialize Session State
11
  if 'master_data' not in st.session_state:
12
  st.session_state.master_data = load_from_hf()
13
 
14
- # LOGIC: If no data, show SEED. If data exists, show SYNC.
15
- if st.session_state.master_data is None:
16
- st.sidebar.warning("Database not found.")
17
- if st.sidebar.button("πŸš€ Step 1: Seed Database (2008-2026)"):
18
- with st.spinner("Downloading full history..."):
 
19
  st.session_state.master_data = seed_dataset_from_scratch()
20
- st.sidebar.success("Database Seeded!")
21
  st.rerun()
22
- else:
23
- st.sidebar.success(f"Database Active: {st.session_state.master_data.index.max()}")
24
-
25
- # SYNC BUTTON for daily incremental updates
26
- if st.sidebar.button("πŸ”„ Step 2: Sync Daily Data"):
27
- with st.spinner("Pinging Stooq/FRED for new data..."):
28
  st.session_state.master_data = sync_incremental_data(st.session_state.master_data)
29
- st.sidebar.success("Incremental Sync Complete!")
30
  st.rerun()
 
 
 
 
 
 
 
 
 
31
 
32
- # --- SIDEBAR: STRATEGY CONTROLS ---
33
- st.sidebar.divider()
34
- st.sidebar.title("βš™οΈ Strategy Settings")
35
- option = st.sidebar.radio("Select Module", ("Option A - FI Trend", "Option B - Equity Trend"))
36
- start_year = st.sidebar.slider("Start Year", 2008, 2026, 2015)
37
- vol_target = st.sidebar.slider("Annual Vol Target", 0.05, 0.25, 0.126)
38
-
39
- # --- MAIN UI: ANALYSIS ---
40
  if st.session_state.master_data is not None:
41
- # Your strategy execution code here...
42
- st.title(f"πŸ“Š {option}")
43
- # ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  else:
45
- st.info("Please use the sidebar to Seed the database first.")
 
1
  import streamlit as st
2
+ from data.loader import load_from_hf, seed_dataset_from_scratch, sync_incremental_data, X_EQUITY_TICKERS, FI_TICKERS
3
+ from engine.trend_engine import run_trend_module
4
 
5
+ st.set_page_config(layout="wide", page_title="P2 Strategy Suite")
6
 
7
+ # --- INITIALIZATION ---
 
 
 
8
  if 'master_data' not in st.session_state:
9
  st.session_state.master_data = load_from_hf()
10
 
11
+ # --- SIDEBAR: DATA CONTROLS ---
12
+ with st.sidebar:
13
+ st.header("πŸ—‚οΈ Data Management")
14
+ if st.session_state.master_data is None:
15
+ st.error("No dataset detected.")
16
+ if st.button("πŸš€ Seed Database (2008-2026)", use_container_width=True):
17
  st.session_state.master_data = seed_dataset_from_scratch()
 
18
  st.rerun()
19
+ else:
20
+ last_dt = st.session_state.master_data.index.max()
21
+ st.success(f"Database Active: {last_dt.date()}")
22
+ if st.button("πŸ”„ Sync New Data", use_container_width=True):
 
 
23
  st.session_state.master_data = sync_incremental_data(st.session_state.master_data)
 
24
  st.rerun()
25
+
26
+ st.divider()
27
+ st.header("βš™οΈ Strategy Settings")
28
+ option = st.radio("Universe Selection", ("Option A - FI Trend", "Option B - Equity Trend"))
29
+ start_yr = st.slider("Backtest Start Year", 2008, 2026, 2015)
30
+ vol_target = st.slider("Target Vol (%)", 5, 20, 12) / 100
31
+
32
+ st.divider()
33
+ run_btn = st.button("πŸš€ Run Strategy Analysis", use_container_width=True, type="primary")
34
 
35
+ # --- MAIN PAGE: DISPLAY ---
 
 
 
 
 
 
 
36
  if st.session_state.master_data is not None:
37
+ if run_btn:
38
+ with st.spinner("Crunching data..."):
39
+ # Universe Selection
40
+ univ = FI_TICKERS if "Option A" in option else X_EQUITY_TICKERS
41
+ # Slice by date
42
+ df = st.session_state.master_data[st.session_state.master_data.index.year >= start_yr]
43
+
44
+ # Execute Engine
45
+ results = run_trend_module(df[univ], df['SOFR_ANNUAL'], vol_target)
46
+
47
+ # Show Metrics
48
+ st.title(f"πŸ“Š {option} Performance Report")
49
+ m1, m2, m3 = st.columns(3)
50
+ m1.metric("Sharpe Ratio", f"{results['sharpe']:.2f}")
51
+ m2.metric("Annual Return", f"{results['ann_ret']:.1%}")
52
+ m3.metric("Max Drawdown", f"{results['max_dd']:.1%}")
53
+
54
+ # Equity Curve
55
+ st.subheader("Cumulative Growth (vs Cash)")
56
+ st.line_chart(results['equity_curve'])
57
+
58
+ # Allocation Check
59
+ st.divider()
60
+ st.subheader("Current Market Status")
61
+ active_assets = results['current_signals'][results['current_signals'] > 0].index.tolist()
62
+ st.write(f"**In-Trend Assets:** {', '.join(active_assets) if active_assets else 'All Cash'}")
63
+
64
+ else:
65
+ st.title("Welcome to the 2025 Trend Suite")
66
+ st.info("πŸ‘ˆ Use the sidebar to manage your data and click 'Run Strategy Analysis' to begin.")
67
  else:
68
+ st.warning("Please initialize the database using the 'Seed' button in the sidebar.")
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -9,10 +9,22 @@ import streamlit as st
9
  REPO_ID = "P2SAMAPA/etf_trend_data"
10
  FILENAME = "market_data.csv"
11
 
12
- # Make sure these match exactly what app.py expects
 
 
 
 
 
 
 
 
 
 
13
  def load_from_hf():
14
- token = st.secrets.get("HF_TOKEN")
15
- if not token: return None
 
 
16
  try:
17
  path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
18
  return pd.read_csv(path, index_col=0, parse_dates=True)
@@ -20,10 +32,64 @@ def load_from_hf():
20
  return None
21
 
22
  def seed_dataset_from_scratch():
23
- # ... (Your Stooq download logic here)
24
- # Ensure this function name matches the import in app.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  return master_df
26
 
27
  def sync_incremental_data(df_existing):
28
- # ... (Your incremental update logic here)
 
 
 
 
 
 
 
 
 
 
29
  return combined
 
 
 
 
 
 
 
 
 
 
 
 
9
  REPO_ID = "P2SAMAPA/etf_trend_data"
10
  FILENAME = "market_data.csv"
11
 
12
+ # The 27 Equity X-ETFs and 15 FI ETFs from the 2025 Paper
13
+ X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
14
+ FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
15
+
16
+ def get_hf_token():
17
+ """Safely retrieves the token without triggering a SecretNotFoundError crash."""
18
+ try:
19
+ return st.secrets["HF_TOKEN"]
20
+ except:
21
+ return os.getenv("HF_TOKEN")
22
+
23
  def load_from_hf():
24
+ """Reads dataset from Hugging Face if it exists."""
25
+ token = get_hf_token()
26
+ if not token:
27
+ return None
28
  try:
29
  path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
30
  return pd.read_csv(path, index_col=0, parse_dates=True)
 
32
  return None
33
 
34
  def seed_dataset_from_scratch():
35
+ """Downloads 2008-Present data from STOOQ."""
36
+ tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
37
+ master_df = pd.DataFrame()
38
+
39
+ status = st.empty()
40
+ progress_bar = st.progress(0)
41
+
42
+ for i, ticker in enumerate(tickers):
43
+ status.text(f"Fetching {ticker} from Stooq...")
44
+ try:
45
+ # Stooq primary
46
+ data = web.DataReader(f"{ticker}.US", 'stooq', start='2008-01-01')
47
+ if not data.empty:
48
+ master_df[ticker] = data['Close'].sort_index()
49
+ time.sleep(0.6) # Anti-rate limit
50
+ except:
51
+ # YFinance fallback
52
+ try:
53
+ yf_data = yf.download(ticker, start="2008-01-01", progress=False)['Adj Close']
54
+ master_df[ticker] = yf_data
55
+ except:
56
+ pass
57
+ progress_bar.progress((i + 1) / len(tickers))
58
+
59
+ # Add SOFR Rate
60
+ try:
61
+ sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
62
+ master_df['SOFR_ANNUAL'] = sofr / 100
63
+ except:
64
+ master_df['SOFR_ANNUAL'] = 0.05
65
+
66
+ master_df = master_df.sort_index().ffill()
67
+ master_df.to_csv(FILENAME)
68
+
69
+ upload_to_hf(FILENAME)
70
  return master_df
71
 
72
  def sync_incremental_data(df_existing):
73
+ """Updates only new data since last index date."""
74
+ last_date = pd.to_datetime(df_existing.index).max()
75
+ tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
76
+
77
+ # Simple incremental fetch
78
+ new_data = yf.download(tickers, start=last_date, progress=False)['Adj Close']
79
+ combined = pd.concat([df_existing, new_data])
80
+ combined = combined[~combined.index.duplicated(keep='last')].sort_index()
81
+
82
+ combined.to_csv(FILENAME)
83
+ upload_to_hf(FILENAME)
84
  return combined
85
+
86
+ def upload_to_hf(path):
87
+ api = HfApi()
88
+ token = get_hf_token()
89
+ api.upload_file(
90
+ path_or_fileobj=path,
91
+ path_in_repo=FILENAME,
92
+ repo_id=REPO_ID,
93
+ repo_type="dataset",
94
+ token=token
95
+ )
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -9,47 +9,21 @@ import streamlit as st
9
  REPO_ID = "P2SAMAPA/etf_trend_data"
10
  FILENAME = "market_data.csv"
11
 
12
- def seed_dataset():
13
- tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
14
- master_df = pd.DataFrame()
15
-
16
- st.info("πŸ›°οΈ Initializing Stooq Data Fetch (2008-Present)...")
17
- progress_bar = st.progress(0)
18
-
19
- for i, ticker in enumerate(tickers):
20
- # Stooq ticker format is usually 'TICKER.US'
21
- stooq_symbol = f"{ticker}.US"
22
- try:
23
- # PRIMARY: STOOQ
24
- data = web.DataReader(stooq_symbol, 'stooq', start='2008-01-01')
25
- if not data.empty:
26
- # Stooq returns data in reverse chronological order; we sort it.
27
- master_df[ticker] = data['Close'].sort_index()
28
-
29
- # Anti-Rate Limit: 0.8s delay between requests
30
- time.sleep(0.8)
31
-
32
- except Exception as e:
33
- st.warning(f"⚠️ Stooq failed for {ticker}. Attempting YFinance fallback...")
34
- try:
35
- # BACKUP: YFinance
36
- yf_data = yf.download(ticker, start="2008-01-01", progress=False)['Adj Close']
37
- master_df[ticker] = yf_data
38
- except:
39
- st.error(f"❌ Failed to fetch {ticker} from all sources.")
40
-
41
- progress_bar.progress((i + 1) / len(tickers))
42
-
43
- # Add SOFR (Cash Rate) from FRED
44
  try:
45
- sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
46
- master_df['SOFR_ANNUAL'] = sofr / 100
47
  except:
48
- master_df['SOFR_ANNUAL'] = 0.05 # Conservative fallback
49
 
50
- master_df = master_df.sort_index().ffill()
51
-
52
- # Save & Upload
53
- master_df.to_csv(FILENAME)
54
- upload_to_hf(FILENAME)
55
  return master_df
 
 
 
 
 
9
  REPO_ID = "P2SAMAPA/etf_trend_data"
10
  FILENAME = "market_data.csv"
11
 
12
+ # Make sure these match exactly what app.py expects
13
+ def load_from_hf():
14
+ token = st.secrets.get("HF_TOKEN")
15
+ if not token: return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  try:
17
+ path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
18
+ return pd.read_csv(path, index_col=0, parse_dates=True)
19
  except:
20
+ return None
21
 
22
+ def seed_dataset_from_scratch():
23
+ # ... (Your Stooq download logic here)
24
+ # Ensure this function name matches the import in app.py
 
 
25
  return master_df
26
+
27
+ def sync_incremental_data(df_existing):
28
+ # ... (Your incremental update logic here)
29
+ return combined
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -1,26 +1,55 @@
1
- def sync_incremental_data(df_existing):
2
- """Downloads only missing data since last update and saves to HF."""
3
- import yfinance as yf
4
-
5
- # Identify last date in the CSV
6
- last_date = pd.to_datetime(df_existing.index).max()
 
 
 
 
 
 
7
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
 
 
 
 
8
 
9
- # Fetch new bars from yfinance or stooq
10
- new_data = yf.download(tickers, start=last_date, progress=False)['Adj Close']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- # Combine (Drop duplicates to avoid double-counting the last day)
13
- combined = pd.concat([df_existing, new_data])
14
- combined = combined[~combined.index.duplicated(keep='last')].sort_index()
 
 
 
 
 
15
 
16
- # Save & Push
17
- combined.to_csv(FILENAME)
18
- api = HfApi()
19
- api.upload_file(
20
- path_or_fileobj=FILENAME,
21
- path_in_repo=FILENAME,
22
- repo_id=REPO_ID,
23
- repo_type="dataset",
24
- token=st.secrets["HF_TOKEN"]
25
- )
26
- return combined
 
1
+ import pandas as pd
2
+ import pandas_datareader.data as web
3
+ import yfinance as yf
4
+ import time
5
+ from huggingface_hub import hf_hub_download, HfApi
6
+ import os
7
+ import streamlit as st
8
+
9
+ REPO_ID = "P2SAMAPA/etf_trend_data"
10
+ FILENAME = "market_data.csv"
11
+
12
+ def seed_dataset():
13
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
14
+ master_df = pd.DataFrame()
15
+
16
+ st.info("πŸ›°οΈ Initializing Stooq Data Fetch (2008-Present)...")
17
+ progress_bar = st.progress(0)
18
 
19
+ for i, ticker in enumerate(tickers):
20
+ # Stooq ticker format is usually 'TICKER.US'
21
+ stooq_symbol = f"{ticker}.US"
22
+ try:
23
+ # PRIMARY: STOOQ
24
+ data = web.DataReader(stooq_symbol, 'stooq', start='2008-01-01')
25
+ if not data.empty:
26
+ # Stooq returns data in reverse chronological order; we sort it.
27
+ master_df[ticker] = data['Close'].sort_index()
28
+
29
+ # Anti-Rate Limit: 0.8s delay between requests
30
+ time.sleep(0.8)
31
+
32
+ except Exception as e:
33
+ st.warning(f"⚠️ Stooq failed for {ticker}. Attempting YFinance fallback...")
34
+ try:
35
+ # BACKUP: YFinance
36
+ yf_data = yf.download(ticker, start="2008-01-01", progress=False)['Adj Close']
37
+ master_df[ticker] = yf_data
38
+ except:
39
+ st.error(f"❌ Failed to fetch {ticker} from all sources.")
40
+
41
+ progress_bar.progress((i + 1) / len(tickers))
42
 
43
+ # Add SOFR (Cash Rate) from FRED
44
+ try:
45
+ sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
46
+ master_df['SOFR_ANNUAL'] = sofr / 100
47
+ except:
48
+ master_df['SOFR_ANNUAL'] = 0.05 # Conservative fallback
49
+
50
+ master_df = master_df.sort_index().ffill()
51
 
52
+ # Save & Upload
53
+ master_df.to_csv(FILENAME)
54
+ upload_to_hf(FILENAME)
55
+ return master_df
 
 
 
 
 
 
 
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -1,38 +1,26 @@
1
- import pandas as pd
2
- import yfinance as yf
3
- import pandas_datareader.data as web
4
- from huggingface_hub import hf_hub_download, HfApi
5
- import os
6
- import streamlit as st
7
-
8
- REPO_ID = "P2SAMAPA/etf_trend_data"
9
- FILENAME = "market_data.csv"
10
-
11
- X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
12
- FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
13
-
14
- def load_from_hf():
15
- try:
16
- token = st.secrets["HF_TOKEN"]
17
- path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
18
- return pd.read_csv(path, index_col=0, parse_dates=True)
19
- except:
20
- return None
21
-
22
- def seed_dataset():
23
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
24
- # Download Wide Format
25
- df = yf.download(tickers, start="2008-01-01")['Adj Close']
26
 
27
- # Add SOFR
28
- sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
29
- df['SOFR_ANNUAL'] = sofr / 100
30
- df = df.sort_index().ffill()
 
 
31
 
32
- df.to_csv(FILENAME)
33
- upload_to_hf(FILENAME)
34
- return df
35
-
36
- def upload_to_hf(path):
37
  api = HfApi()
38
- api.upload_file(path_or_fileobj=path, path_in_repo=FILENAME, repo_id=REPO_ID, repo_type="dataset", token=st.secrets["HF_TOKEN"])
 
 
 
 
 
 
 
 
1
+ def sync_incremental_data(df_existing):
2
+ """Downloads only missing data since last update and saves to HF."""
3
+ import yfinance as yf
4
+
5
+ # Identify last date in the CSV
6
+ last_date = pd.to_datetime(df_existing.index).max()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
 
 
8
 
9
+ # Fetch new bars from yfinance or stooq
10
+ new_data = yf.download(tickers, start=last_date, progress=False)['Adj Close']
11
+
12
+ # Combine (Drop duplicates to avoid double-counting the last day)
13
+ combined = pd.concat([df_existing, new_data])
14
+ combined = combined[~combined.index.duplicated(keep='last')].sort_index()
15
 
16
+ # Save & Push
17
+ combined.to_csv(FILENAME)
 
 
 
18
  api = HfApi()
19
+ api.upload_file(
20
+ path_or_fileobj=FILENAME,
21
+ path_in_repo=FILENAME,
22
+ repo_id=REPO_ID,
23
+ repo_type="dataset",
24
+ token=st.secrets["HF_TOKEN"]
25
+ )
26
+ return combined
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -1,35 +1,45 @@
1
  import streamlit as st
2
  import pandas as pd
3
- import pandas_market_calendars as mcal
4
- from datetime import datetime
5
- from data.loader import load_from_hf, seed_dataset, X_EQUITY_TICKERS, FI_TICKERS
6
- from engine.trend_engine import run_trend_module
7
 
8
  st.set_page_config(layout="wide", page_title="P2 Trend Suite")
9
 
10
- # Sidebar Logic
11
- st.sidebar.title("Configuration")
12
- option = st.sidebar.radio("Select Strategy", ("Option A - FI Trend Follower", "Option B - Equity Trend Follower"))
13
- start_year = st.sidebar.slider("Start Year", 2008, 2026, 2015)
14
- vol_target = st.sidebar.slider("Annual Vol Target", 0.05, 0.25, 0.12)
15
 
16
- # Data Initialization
17
- if 'data' not in st.session_state:
18
- st.session_state.data = load_from_hf()
19
 
20
- if st.session_state.data is None:
21
- if st.button("πŸš€ First Time Setup: Seed 2008-2026 Data"):
22
- st.session_state.data = seed_dataset()
23
- st.rerun()
 
 
 
 
24
  else:
25
- # RUN STRATEGY
26
- universe = FI_TICKERS if "Option A" in option else X_EQUITY_TICKERS
27
- bench = "AGG" if "Option A" in option else "SPY"
28
-
29
- # Filter by Year
30
- d = st.session_state.data[st.session_state.data.index.year >= start_year]
31
- results = run_trend_module(d[universe], d['SOFR_ANNUAL'], vol_target)
32
 
33
- # UI OUTPUTS (Sharpe, Max DD, etc.)
34
- st.title(f"πŸ“ˆ {option} Performance")
35
- # ... (Insert Metric & Chart code here)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import pandas as pd
3
+ from data.loader import load_from_hf, seed_dataset_from_scratch, sync_incremental_data
 
 
 
4
 
5
  st.set_page_config(layout="wide", page_title="P2 Trend Suite")
6
 
7
+ # --- SIDEBAR: DATA MANAGEMENT ---
8
+ st.sidebar.title("πŸ—‚οΈ Data Management")
 
 
 
9
 
10
+ # Initialize Session State
11
+ if 'master_data' not in st.session_state:
12
+ st.session_state.master_data = load_from_hf()
13
 
14
+ # LOGIC: If no data, show SEED. If data exists, show SYNC.
15
+ if st.session_state.master_data is None:
16
+ st.sidebar.warning("Database not found.")
17
+ if st.sidebar.button("πŸš€ Step 1: Seed Database (2008-2026)"):
18
+ with st.spinner("Downloading full history..."):
19
+ st.session_state.master_data = seed_dataset_from_scratch()
20
+ st.sidebar.success("Database Seeded!")
21
+ st.rerun()
22
  else:
23
+ st.sidebar.success(f"Database Active: {st.session_state.master_data.index.max()}")
 
 
 
 
 
 
24
 
25
+ # SYNC BUTTON for daily incremental updates
26
+ if st.sidebar.button("πŸ”„ Step 2: Sync Daily Data"):
27
+ with st.spinner("Pinging Stooq/FRED for new data..."):
28
+ st.session_state.master_data = sync_incremental_data(st.session_state.master_data)
29
+ st.sidebar.success("Incremental Sync Complete!")
30
+ st.rerun()
31
+
32
+ # --- SIDEBAR: STRATEGY CONTROLS ---
33
+ st.sidebar.divider()
34
+ st.sidebar.title("βš™οΈ Strategy Settings")
35
+ option = st.sidebar.radio("Select Module", ("Option A - FI Trend", "Option B - Equity Trend"))
36
+ start_year = st.sidebar.slider("Start Year", 2008, 2026, 2015)
37
+ vol_target = st.sidebar.slider("Annual Vol Target", 0.05, 0.25, 0.126)
38
+
39
+ # --- MAIN UI: ANALYSIS ---
40
+ if st.session_state.master_data is not None:
41
+ # Your strategy execution code here...
42
+ st.title(f"πŸ“Š {option}")
43
+ # ...
44
+ else:
45
+ st.info("Please use the sidebar to Seed the database first.")
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -1,6 +1,6 @@
1
  import pandas as pd
2
- import pandas_datareader.data as web
3
  import yfinance as yf
 
4
  from huggingface_hub import hf_hub_download, HfApi
5
  import os
6
  import streamlit as st
@@ -12,49 +12,27 @@ X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XL
12
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
13
 
14
  def load_from_hf():
15
- """Reads the dataset from Hugging Face."""
16
  try:
17
- # Note: Use st.secrets if token is not in env
18
- token = st.secrets.get("HF_TOKEN") or os.getenv("HF_TOKEN")
19
  path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
20
  return pd.read_csv(path, index_col=0, parse_dates=True)
21
- except Exception as e:
22
- print(f"Dataset load failed: {e}")
23
  return None
24
 
25
- def seed_dataset_from_scratch():
26
- """Download full history from 2008 and upload to HF."""
27
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
28
- master_df = pd.DataFrame()
29
-
30
- status = st.empty()
31
- progress_bar = st.progress(0)
32
-
33
- for i, t in enumerate(tickers):
34
- status.text(f"Seeding {t}...")
35
- try:
36
- # Fetching from 2008 for initial dataset
37
- data = yf.download(t, start="2008-01-01", progress=False)['Adj Close']
38
- master_df[t] = data
39
- except:
40
- continue
41
- progress_bar.progress((i + 1) / len(tickers))
42
 
43
  # Add SOFR
44
  sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
45
- master_df['SOFR_ANNUAL'] = sofr / 100
46
- master_df = master_df.sort_index().ffill()
47
-
48
- master_df.to_csv(FILENAME)
49
 
50
- # Upload
 
 
 
 
51
  api = HfApi()
52
- token = st.secrets.get("HF_TOKEN") or os.getenv("HF_TOKEN")
53
- api.upload_file(
54
- path_or_fileobj=FILENAME,
55
- path_in_repo=FILENAME,
56
- repo_id=REPO_ID,
57
- repo_type="dataset",
58
- token=token
59
- )
60
- return master_df
 
1
  import pandas as pd
 
2
  import yfinance as yf
3
+ import pandas_datareader.data as web
4
  from huggingface_hub import hf_hub_download, HfApi
5
  import os
6
  import streamlit as st
 
12
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
13
 
14
  def load_from_hf():
 
15
  try:
16
+ token = st.secrets["HF_TOKEN"]
 
17
  path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
18
  return pd.read_csv(path, index_col=0, parse_dates=True)
19
+ except:
 
20
  return None
21
 
22
+ def seed_dataset():
 
23
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
24
+ # Download Wide Format
25
+ df = yf.download(tickers, start="2008-01-01")['Adj Close']
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  # Add SOFR
28
  sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
29
+ df['SOFR_ANNUAL'] = sofr / 100
30
+ df = df.sort_index().ffill()
 
 
31
 
32
+ df.to_csv(FILENAME)
33
+ upload_to_hf(FILENAME)
34
+ return df
35
+
36
+ def upload_to_hf(path):
37
  api = HfApi()
38
+ api.upload_file(path_or_fileobj=path, path_in_repo=FILENAME, repo_id=REPO_ID, repo_type="dataset", token=st.secrets["HF_TOKEN"])
 
 
 
 
 
 
 
 
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -1,23 +1,35 @@
1
  import streamlit as st
2
- from data.loader import load_from_hf, seed_dataset_from_scratch, X_EQUITY_TICKERS, FI_TICKERS
3
- # ... other imports
 
 
 
4
 
5
- st.sidebar.title("Data Management")
6
 
7
- # Check if data exists
8
- if 'master_data' not in st.session_state:
9
- st.session_state.master_data = load_from_hf()
 
 
10
 
11
- if st.session_state.master_data is None:
12
- st.warning("Dataset not found on Hugging Face. Please Seed the Database.")
13
- if st.sidebar.button("πŸš€ Step 1: Seed Database (2008-Present)"):
14
- with st.spinner("Downloading 18 years of data... this takes a few minutes."):
15
- st.session_state.master_data = seed_dataset_from_scratch()
16
- st.success("Database seeded and uploaded to HF!")
17
- else:
18
- if st.sidebar.button("πŸ”„ Step 2: Daily Incremental Sync"):
19
- # (Existing incremental sync logic here)
20
- st.sidebar.write("Last Data Point:", st.session_state.master_data.index.max())
21
 
22
- # --- REST OF THE UI ---
23
- # Run Option A/B logic using st.session_state.master_data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import pandas as pd
3
+ import pandas_market_calendars as mcal
4
+ from datetime import datetime
5
+ from data.loader import load_from_hf, seed_dataset, X_EQUITY_TICKERS, FI_TICKERS
6
+ from engine.trend_engine import run_trend_module
7
 
8
+ st.set_page_config(layout="wide", page_title="P2 Trend Suite")
9
 
10
+ # Sidebar Logic
11
+ st.sidebar.title("Configuration")
12
+ option = st.sidebar.radio("Select Strategy", ("Option A - FI Trend Follower", "Option B - Equity Trend Follower"))
13
+ start_year = st.sidebar.slider("Start Year", 2008, 2026, 2015)
14
+ vol_target = st.sidebar.slider("Annual Vol Target", 0.05, 0.25, 0.12)
15
 
16
+ # Data Initialization
17
+ if 'data' not in st.session_state:
18
+ st.session_state.data = load_from_hf()
 
 
 
 
 
 
 
19
 
20
+ if st.session_state.data is None:
21
+ if st.button("πŸš€ First Time Setup: Seed 2008-2026 Data"):
22
+ st.session_state.data = seed_dataset()
23
+ st.rerun()
24
+ else:
25
+ # RUN STRATEGY
26
+ universe = FI_TICKERS if "Option A" in option else X_EQUITY_TICKERS
27
+ bench = "AGG" if "Option A" in option else "SPY"
28
+
29
+ # Filter by Year
30
+ d = st.session_state.data[st.session_state.data.index.year >= start_year]
31
+ results = run_trend_module(d[universe], d['SOFR_ANNUAL'], vol_target)
32
+
33
+ # UI OUTPUTS (Sharpe, Max DD, etc.)
34
+ st.title(f"πŸ“ˆ {option} Performance")
35
+ # ... (Insert Metric & Chart code here)
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -8,51 +8,53 @@ import streamlit as st
8
  REPO_ID = "P2SAMAPA/etf_trend_data"
9
  FILENAME = "market_data.csv"
10
 
11
- # Universes
12
  X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
13
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
14
 
 
 
 
 
 
 
 
 
 
 
 
15
  def seed_dataset_from_scratch():
16
- """Download full history from 2008 for all 42+ tickers and upload to HF."""
17
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
18
  master_df = pd.DataFrame()
19
 
 
20
  progress_bar = st.progress(0)
 
21
  for i, t in enumerate(tickers):
 
22
  try:
23
- # We use yfinance for the heavy initial lift as it handles long historical ranges reliably
24
  data = yf.download(t, start="2008-01-01", progress=False)['Adj Close']
25
  master_df[t] = data
26
- except Exception as e:
27
- st.warning(f"Failed to fetch {t}: {e}")
28
  progress_bar.progress((i + 1) / len(tickers))
29
 
30
- # Add SOFR (Cash Interest)
31
  sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
32
  master_df['SOFR_ANNUAL'] = sofr / 100
 
33
 
34
- master_df = master_df.sort_index().ffill().dropna(how='all')
35
-
36
- # Save and Upload
37
  master_df.to_csv(FILENAME)
38
- upload_to_hf(FILENAME)
39
- return master_df
40
-
41
- def upload_to_hf(local_path):
42
- """Pushes the local CSV to your Hugging Face Dataset repo."""
43
  api = HfApi()
 
44
  api.upload_file(
45
- path_or_fileobj=local_path,
46
  path_in_repo=FILENAME,
47
  repo_id=REPO_ID,
48
  repo_type="dataset",
49
- token=st.secrets["HF_TOKEN"]
50
  )
51
-
52
- def load_from_hf():
53
- """Reads the dataset from Hugging Face."""
54
- try:
55
- path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=st.secrets["HF_TOKEN"])
56
- return pd.read_csv(path, index_col=0, parse_dates=True)
57
- except:
58
- return None
 
8
  REPO_ID = "P2SAMAPA/etf_trend_data"
9
  FILENAME = "market_data.csv"
10
 
 
11
  X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
12
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
13
 
14
+ def load_from_hf():
15
+ """Reads the dataset from Hugging Face."""
16
+ try:
17
+ # Note: Use st.secrets if token is not in env
18
+ token = st.secrets.get("HF_TOKEN") or os.getenv("HF_TOKEN")
19
+ path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=token)
20
+ return pd.read_csv(path, index_col=0, parse_dates=True)
21
+ except Exception as e:
22
+ print(f"Dataset load failed: {e}")
23
+ return None
24
+
25
  def seed_dataset_from_scratch():
26
+ """Download full history from 2008 and upload to HF."""
27
  tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
28
  master_df = pd.DataFrame()
29
 
30
+ status = st.empty()
31
  progress_bar = st.progress(0)
32
+
33
  for i, t in enumerate(tickers):
34
+ status.text(f"Seeding {t}...")
35
  try:
36
+ # Fetching from 2008 for initial dataset
37
  data = yf.download(t, start="2008-01-01", progress=False)['Adj Close']
38
  master_df[t] = data
39
+ except:
40
+ continue
41
  progress_bar.progress((i + 1) / len(tickers))
42
 
43
+ # Add SOFR
44
  sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
45
  master_df['SOFR_ANNUAL'] = sofr / 100
46
+ master_df = master_df.sort_index().ffill()
47
 
 
 
 
48
  master_df.to_csv(FILENAME)
49
+
50
+ # Upload
 
 
 
51
  api = HfApi()
52
+ token = st.secrets.get("HF_TOKEN") or os.getenv("HF_TOKEN")
53
  api.upload_file(
54
+ path_or_fileobj=FILENAME,
55
  path_in_repo=FILENAME,
56
  repo_id=REPO_ID,
57
  repo_type="dataset",
58
+ token=token
59
  )
60
+ return master_df
 
 
 
 
 
 
 
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -1,99 +1,23 @@
1
  import streamlit as st
2
- import pandas as pd
3
- import numpy as np
4
- import pandas_market_calendars as mcal
5
- from datetime import datetime
6
- from data.loader import refresh_market_data, X_EQUITY_TICKERS, FI_TICKERS
7
- from engine.trend_engine import run_trend_module
8
-
9
- st.set_page_config(layout="wide", page_title="P2 Trend Suite")
10
-
11
- # --- SIDEBAR UI ---
12
- st.sidebar.title("Strategy Controls")
13
-
14
- # 1. Module Toggle
15
- option = st.sidebar.radio("Select Module",
16
- ("Option A - FI Trend Follower", "Option B - Equity Trend Follower"))
17
-
18
- # 2. Year Slider
19
- start_year = st.sidebar.slider("Start Year", 2008, 2026, 2015)
20
-
21
- # 3. Parameters
22
- vol_target = st.sidebar.slider("Annual Vol Target", 0.05, 0.25, 0.126)
23
-
24
- if st.sidebar.button("πŸ”„ Sync Market Data"):
25
- with st.spinner("Fetching Data..."):
26
- refresh_market_data()
27
- st.sidebar.success("Data Synced!")
28
-
29
- # --- DATA PROCESSING ---
30
- try:
31
- data = pd.read_csv("market_data.csv", index_col=0, parse_dates=True)
32
-
33
- # Filter by Year
34
- data = data[data.index.year >= start_year]
35
-
36
- # Assign Universe & Benchmark
37
- if "Option B" in option:
38
- universe = X_EQUITY_TICKERS
39
- benchmark_ticker = "SPY"
40
- else:
41
- universe = FI_TICKERS
42
- benchmark_ticker = "AGG"
43
-
44
- # Run Analysis
45
- results = run_trend_module(data[universe], data['SOFR_ANNUAL'], vol_target)
46
-
47
- # --- CALCULATE METRICS ---
48
- curve = results['curve']
49
- rets = results['returns']
50
-
51
- # Sharpe (Excess over 0)
52
- sharpe = (rets.mean() * 252) / (rets.std() * np.sqrt(252))
53
-
54
- # Annualized Return
55
- total_days = (curve.index[-1] - curve.index[0]).days
56
- ann_return = (curve.iloc[-1]**(365/total_days) - 1)
57
-
58
- # Drawdowns
59
- rolling_max = curve.cummax()
60
- drawdown = (curve - rolling_max) / rolling_max
61
- max_dd_peak = drawdown.min()
62
- max_dd_daily = rets.min()
63
-
64
- # NYSE Calendar for Next Day
65
- nyse = mcal.get_calendar('NYSE')
66
- schedule = nyse.schedule(start_date=datetime.now(), end_date='2026-12-31')
67
- next_day = schedule.index[0].strftime('%Y-%m-%d')
68
-
69
- # --- OUTPUT UI ---
70
- st.title(f"πŸ“Š {option}")
71
-
72
- # Stats Row
73
- c1, c2, c3, c4, c5 = st.columns(5)
74
- c1.metric("Sharpe Ratio", f"{sharpe:.2f}")
75
- c2.metric("Annual Return", f"{ann_return:.2%}")
76
- c3.metric("Max DD (P-to-T)", f"{max_dd_peak:.2%}")
77
- c4.metric("Max DD (Daily)", f"{max_dd_daily:.2%}")
78
- c5.metric("Next Trade Date", next_day)
79
-
80
- # Allocation Table
81
- st.subheader(f"πŸ“ Target Allocation for {next_day}")
82
- alloc = results['alloc']
83
- st.dataframe(alloc[alloc['Weight (%)'] > 0].sort_values("Weight (%)", ascending=False), use_container_width=True)
84
-
85
- # Performance Chart
86
- st.subheader(f"Cumulative Return vs {benchmark_ticker}")
87
- bench_curve = (1 + data[benchmark_ticker].pct_change().fillna(0)).cumprod()
88
- # Normalize benchmark to start at 1.0 at start_year
89
- bench_curve = bench_curve / bench_curve.iloc[0]
90
-
91
- chart_df = pd.DataFrame({
92
- "Strategy": curve,
93
- f"Benchmark ({benchmark_ticker})": bench_curve
94
- })
95
- st.line_chart(chart_df)
96
-
97
- except Exception as e:
98
- st.info("Please Click 'Sync Market Data' in the sidebar to initialize the engine.")
99
- st.error(f"Waiting for data... (Technical details: {e})")
 
1
  import streamlit as st
2
+ from data.loader import load_from_hf, seed_dataset_from_scratch, X_EQUITY_TICKERS, FI_TICKERS
3
+ # ... other imports
4
+
5
+ st.sidebar.title("Data Management")
6
+
7
+ # Check if data exists
8
+ if 'master_data' not in st.session_state:
9
+ st.session_state.master_data = load_from_hf()
10
+
11
+ if st.session_state.master_data is None:
12
+ st.warning("Dataset not found on Hugging Face. Please Seed the Database.")
13
+ if st.sidebar.button("πŸš€ Step 1: Seed Database (2008-Present)"):
14
+ with st.spinner("Downloading 18 years of data... this takes a few minutes."):
15
+ st.session_state.master_data = seed_dataset_from_scratch()
16
+ st.success("Database seeded and uploaded to HF!")
17
+ else:
18
+ if st.sidebar.button("πŸ”„ Step 2: Daily Incremental Sync"):
19
+ # (Existing incremental sync logic here)
20
+ st.sidebar.write("Last Data Point:", st.session_state.master_data.index.max())
21
+
22
+ # --- REST OF THE UI ---
23
+ # Run Option A/B logic using st.session_state.master_data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -1,32 +1,58 @@
 
1
  import pandas_datareader.data as web
2
  import yfinance as yf
3
- import pandas as pd
 
4
  import streamlit as st
5
 
6
- # 27 "X-" EQUITY ETFS
7
- X_EQUITY_TICKERS = [
8
- "XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF",
9
- "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE",
10
- "XSW", "XTN", "XTL", "XNTK", "XITK"
11
- ]
12
 
13
- # 15 FIXED INCOME / COMPARISON
 
14
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
15
 
16
- def refresh_market_data():
17
- """Syncs Stooq/FRED data to local CSV and HF."""
18
- all_prices = {}
19
- # Download all groups + SPY Benchmark
20
- for t in list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY"])):
 
 
21
  try:
22
- all_prices[t] = web.DataReader(f"{t}.US", "stooq")['Close']
23
- except:
24
- all_prices[t] = yf.download(t, progress=False)['Adj Close']
25
-
26
- # Fetch SOFR (Cash Yield) from FRED
27
- sofr = web.DataReader('SOFR', 'fred').ffill()
 
 
 
 
28
 
29
- df = pd.DataFrame(all_prices).sort_index().ffill()
30
- df['SOFR_ANNUAL'] = sofr / 100
31
- df.to_csv("market_data.csv")
32
- return df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
  import pandas_datareader.data as web
3
  import yfinance as yf
4
+ from huggingface_hub import hf_hub_download, HfApi
5
+ import os
6
  import streamlit as st
7
 
8
+ REPO_ID = "P2SAMAPA/etf_trend_data"
9
+ FILENAME = "market_data.csv"
 
 
 
 
10
 
11
+ # Universes
12
+ X_EQUITY_TICKERS = ["XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF", "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE", "XSW", "XTN", "XTL", "XNTK", "XITK"]
13
  FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
14
 
15
+ def seed_dataset_from_scratch():
16
+ """Download full history from 2008 for all 42+ tickers and upload to HF."""
17
+ tickers = list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY", "AGG"]))
18
+ master_df = pd.DataFrame()
19
+
20
+ progress_bar = st.progress(0)
21
+ for i, t in enumerate(tickers):
22
  try:
23
+ # We use yfinance for the heavy initial lift as it handles long historical ranges reliably
24
+ data = yf.download(t, start="2008-01-01", progress=False)['Adj Close']
25
+ master_df[t] = data
26
+ except Exception as e:
27
+ st.warning(f"Failed to fetch {t}: {e}")
28
+ progress_bar.progress((i + 1) / len(tickers))
29
+
30
+ # Add SOFR (Cash Interest)
31
+ sofr = web.DataReader('SOFR', 'fred', start="2008-01-01").ffill()
32
+ master_df['SOFR_ANNUAL'] = sofr / 100
33
 
34
+ master_df = master_df.sort_index().ffill().dropna(how='all')
35
+
36
+ # Save and Upload
37
+ master_df.to_csv(FILENAME)
38
+ upload_to_hf(FILENAME)
39
+ return master_df
40
+
41
+ def upload_to_hf(local_path):
42
+ """Pushes the local CSV to your Hugging Face Dataset repo."""
43
+ api = HfApi()
44
+ api.upload_file(
45
+ path_or_fileobj=local_path,
46
+ path_in_repo=FILENAME,
47
+ repo_id=REPO_ID,
48
+ repo_type="dataset",
49
+ token=st.secrets["HF_TOKEN"]
50
+ )
51
+
52
+ def load_from_hf():
53
+ """Reads the dataset from Hugging Face."""
54
+ try:
55
+ path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME, repo_type="dataset", token=st.secrets["HF_TOKEN"])
56
+ return pd.read_csv(path, index_col=0, parse_dates=True)
57
+ except:
58
+ return None
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -11,78 +11,89 @@ st.set_page_config(layout="wide", page_title="P2 Trend Suite")
11
  # --- SIDEBAR UI ---
12
  st.sidebar.title("Strategy Controls")
13
 
14
- # 1. Option Selection
15
- option = st.sidebar.radio("Select Strategy Module",
16
  ("Option A - FI Trend Follower", "Option B - Equity Trend Follower"))
17
 
18
  # 2. Year Slider
19
- start_year = st.sidebar.slider("Start Year (OOS Period)", 2008, 2025, 2015)
20
 
21
- # 3. Vol Target & Sync
22
  vol_target = st.sidebar.slider("Annual Vol Target", 0.05, 0.25, 0.126)
 
23
  if st.sidebar.button("πŸ”„ Sync Market Data"):
24
- refresh_market_data()
 
25
  st.sidebar.success("Data Synced!")
26
 
27
- # --- CALENDAR LOGIC ---
28
- nyse = mcal.get_calendar('NYSE')
29
- today = datetime.now().strftime('%Y-%m-%d')
30
- schedule = nyse.schedule(start_date=today, end_date='2026-12-31')
31
- next_trading_day = schedule.index[0].strftime('%A, %b %d, %Y')
32
-
33
- # --- EXECUTION ---
34
- if st.button("β–Ά Run Analysis"):
35
  data = pd.read_csv("market_data.csv", index_col=0, parse_dates=True)
36
 
37
- # Filter by Start Year
38
  data = data[data.index.year >= start_year]
39
-
40
- # Select Universe & Benchmark
41
  if "Option B" in option:
42
  universe = X_EQUITY_TICKERS
43
  benchmark_ticker = "SPY"
44
- module_name = "Equity"
45
  else:
46
  universe = FI_TICKERS
47
  benchmark_ticker = "AGG"
48
- module_name = "Fixed Income"
49
 
50
- # Run Engine
51
  results = run_trend_module(data[universe], data['SOFR_ANNUAL'], vol_target)
52
 
53
- # Metrics Calculation
54
- returns = results['returns']
55
- cum_returns = results['curve']
56
- bench_returns = data[benchmark_ticker].pct_change().fillna(0)
57
- bench_curve = (1 + bench_returns).cumprod()
58
 
59
- # Stats
60
- ann_return = (cum_returns.iloc[-1]**(252/len(returns)) - 1)
61
- sharpe = (returns.mean() * 252) / (returns.std() * np.sqrt(252))
62
 
63
- rolling_max = cum_returns.cummax()
64
- drawdown = (cum_returns - rolling_max) / rolling_max
65
- max_dd_peak = drawdown.min()
66
 
 
 
 
 
 
 
 
 
 
 
 
67
  # --- OUTPUT UI ---
68
- st.header(f"πŸ“Š {option} Results")
69
 
70
- # Target Allocation Section
71
- st.subheader(f"πŸ“… Next Day Target Allocation: {next_trading_day}")
72
- alloc_df = results['alloc']
73
- st.table(alloc_df[alloc_df['Weight (%)'] > 0].sort_values("Weight (%)", ascending=False))
 
 
 
74
 
75
- # Metrics Row
76
- m1, m2, m3, m4 = st.columns(4)
77
- m1.metric("Annualized Return", f"{ann_return:.2%}")
78
- m2.metric("Sharpe Ratio", f"{sharpe:.2f}")
79
- m3.metric("Max DD (Peak-to-Trough)", f"{max_dd_peak:.2%}")
80
- m4.metric("Last Daily Return", f"{returns.iloc[-1]:.2%}")
81
 
82
- # Chart
83
  st.subheader(f"Cumulative Return vs {benchmark_ticker}")
84
- chart_data = pd.DataFrame({
85
- "Strategy": cum_returns,
 
 
 
 
86
  f"Benchmark ({benchmark_ticker})": bench_curve
87
  })
88
- st.line_chart(chart_data)
 
 
 
 
 
11
  # --- SIDEBAR UI ---
12
  st.sidebar.title("Strategy Controls")
13
 
14
+ # 1. Module Toggle
15
+ option = st.sidebar.radio("Select Module",
16
  ("Option A - FI Trend Follower", "Option B - Equity Trend Follower"))
17
 
18
  # 2. Year Slider
19
+ start_year = st.sidebar.slider("Start Year", 2008, 2026, 2015)
20
 
21
+ # 3. Parameters
22
  vol_target = st.sidebar.slider("Annual Vol Target", 0.05, 0.25, 0.126)
23
+
24
  if st.sidebar.button("πŸ”„ Sync Market Data"):
25
+ with st.spinner("Fetching Data..."):
26
+ refresh_market_data()
27
  st.sidebar.success("Data Synced!")
28
 
29
+ # --- DATA PROCESSING ---
30
+ try:
 
 
 
 
 
 
31
  data = pd.read_csv("market_data.csv", index_col=0, parse_dates=True)
32
 
33
+ # Filter by Year
34
  data = data[data.index.year >= start_year]
35
+
36
+ # Assign Universe & Benchmark
37
  if "Option B" in option:
38
  universe = X_EQUITY_TICKERS
39
  benchmark_ticker = "SPY"
 
40
  else:
41
  universe = FI_TICKERS
42
  benchmark_ticker = "AGG"
 
43
 
44
+ # Run Analysis
45
  results = run_trend_module(data[universe], data['SOFR_ANNUAL'], vol_target)
46
 
47
+ # --- CALCULATE METRICS ---
48
+ curve = results['curve']
49
+ rets = results['returns']
 
 
50
 
51
+ # Sharpe (Excess over 0)
52
+ sharpe = (rets.mean() * 252) / (rets.std() * np.sqrt(252))
 
53
 
54
+ # Annualized Return
55
+ total_days = (curve.index[-1] - curve.index[0]).days
56
+ ann_return = (curve.iloc[-1]**(365/total_days) - 1)
57
 
58
+ # Drawdowns
59
+ rolling_max = curve.cummax()
60
+ drawdown = (curve - rolling_max) / rolling_max
61
+ max_dd_peak = drawdown.min()
62
+ max_dd_daily = rets.min()
63
+
64
+ # NYSE Calendar for Next Day
65
+ nyse = mcal.get_calendar('NYSE')
66
+ schedule = nyse.schedule(start_date=datetime.now(), end_date='2026-12-31')
67
+ next_day = schedule.index[0].strftime('%Y-%m-%d')
68
+
69
  # --- OUTPUT UI ---
70
+ st.title(f"πŸ“Š {option}")
71
 
72
+ # Stats Row
73
+ c1, c2, c3, c4, c5 = st.columns(5)
74
+ c1.metric("Sharpe Ratio", f"{sharpe:.2f}")
75
+ c2.metric("Annual Return", f"{ann_return:.2%}")
76
+ c3.metric("Max DD (P-to-T)", f"{max_dd_peak:.2%}")
77
+ c4.metric("Max DD (Daily)", f"{max_dd_daily:.2%}")
78
+ c5.metric("Next Trade Date", next_day)
79
 
80
+ # Allocation Table
81
+ st.subheader(f"πŸ“ Target Allocation for {next_day}")
82
+ alloc = results['alloc']
83
+ st.dataframe(alloc[alloc['Weight (%)'] > 0].sort_values("Weight (%)", ascending=False), use_container_width=True)
 
 
84
 
85
+ # Performance Chart
86
  st.subheader(f"Cumulative Return vs {benchmark_ticker}")
87
+ bench_curve = (1 + data[benchmark_ticker].pct_change().fillna(0)).cumprod()
88
+ # Normalize benchmark to start at 1.0 at start_year
89
+ bench_curve = bench_curve / bench_curve.iloc[0]
90
+
91
+ chart_df = pd.DataFrame({
92
+ "Strategy": curve,
93
  f"Benchmark ({benchmark_ticker})": bench_curve
94
  })
95
+ st.line_chart(chart_df)
96
+
97
+ except Exception as e:
98
+ st.info("Please Click 'Sync Market Data' in the sidebar to initialize the engine.")
99
+ st.error(f"Waiting for data... (Technical details: {e})")
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -1,39 +1,88 @@
1
  import streamlit as st
2
  import pandas as pd
 
 
 
3
  from data.loader import refresh_market_data, X_EQUITY_TICKERS, FI_TICKERS
4
  from engine.trend_engine import run_trend_module
5
 
6
- st.set_page_config(layout="wide", page_title="P2 ETF Trend Suite")
7
 
8
- st.sidebar.title("Settings")
9
- vol_target = st.sidebar.slider("Annual Vol Target", 0.05, 0.25, 0.126)
 
 
 
 
 
 
 
10
 
11
- if st.sidebar.button("πŸ”„ Refresh Market Data"):
 
 
12
  refresh_market_data()
13
- st.sidebar.success("Data Updated from Stooq/SOFR!")
 
 
 
 
 
 
14
 
15
- if st.button("β–Ά Run All Modules"):
 
16
  data = pd.read_csv("market_data.csv", index_col=0, parse_dates=True)
17
 
18
- # Run Modules
19
- eq_res = run_trend_module(data[X_EQUITY_TICKERS], data['SOFR_ANNUAL'], vol_target)
20
- fi_res = run_trend_module(data[FI_TICKERS], data['SOFR_ANNUAL'], vol_target)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- # Performance Comparison
23
- spy_curve = (1 + data['SPY'].pct_change()).cumprod()
24
- comparison = pd.DataFrame({
25
- "X-ETF Strategy": eq_res['curve'],
26
- "SPY Benchmark": spy_curve
27
- }).dropna()
28
-
29
- st.header("πŸ“ˆ Performance: Equity Strategy vs. SPY")
30
- st.line_chart(comparison)
31
 
32
- # Target Allocations
33
- col1, col2 = st.columns(2)
34
- with col1:
35
- st.subheader("πŸ›‘οΈ Equity Allocation (Next Day)")
36
- st.dataframe(eq_res['alloc'][eq_res['alloc']['Weight (%)'] > 0])
37
- with col2:
38
- st.subheader("🏦 FI Comparison Allocation")
39
- st.dataframe(fi_res['alloc'][fi_res['alloc']['Weight (%)'] > 0])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import pandas as pd
3
+ import numpy as np
4
+ import pandas_market_calendars as mcal
5
+ from datetime import datetime
6
  from data.loader import refresh_market_data, X_EQUITY_TICKERS, FI_TICKERS
7
  from engine.trend_engine import run_trend_module
8
 
9
+ st.set_page_config(layout="wide", page_title="P2 Trend Suite")
10
 
11
+ # --- SIDEBAR UI ---
12
+ st.sidebar.title("Strategy Controls")
13
+
14
+ # 1. Option Selection
15
+ option = st.sidebar.radio("Select Strategy Module",
16
+ ("Option A - FI Trend Follower", "Option B - Equity Trend Follower"))
17
+
18
+ # 2. Year Slider
19
+ start_year = st.sidebar.slider("Start Year (OOS Period)", 2008, 2025, 2015)
20
 
21
+ # 3. Vol Target & Sync
22
+ vol_target = st.sidebar.slider("Annual Vol Target", 0.05, 0.25, 0.126)
23
+ if st.sidebar.button("πŸ”„ Sync Market Data"):
24
  refresh_market_data()
25
+ st.sidebar.success("Data Synced!")
26
+
27
+ # --- CALENDAR LOGIC ---
28
+ nyse = mcal.get_calendar('NYSE')
29
+ today = datetime.now().strftime('%Y-%m-%d')
30
+ schedule = nyse.schedule(start_date=today, end_date='2026-12-31')
31
+ next_trading_day = schedule.index[0].strftime('%A, %b %d, %Y')
32
 
33
+ # --- EXECUTION ---
34
+ if st.button("β–Ά Run Analysis"):
35
  data = pd.read_csv("market_data.csv", index_col=0, parse_dates=True)
36
 
37
+ # Filter by Start Year
38
+ data = data[data.index.year >= start_year]
39
+
40
+ # Select Universe & Benchmark
41
+ if "Option B" in option:
42
+ universe = X_EQUITY_TICKERS
43
+ benchmark_ticker = "SPY"
44
+ module_name = "Equity"
45
+ else:
46
+ universe = FI_TICKERS
47
+ benchmark_ticker = "AGG"
48
+ module_name = "Fixed Income"
49
+
50
+ # Run Engine
51
+ results = run_trend_module(data[universe], data['SOFR_ANNUAL'], vol_target)
52
+
53
+ # Metrics Calculation
54
+ returns = results['returns']
55
+ cum_returns = results['curve']
56
+ bench_returns = data[benchmark_ticker].pct_change().fillna(0)
57
+ bench_curve = (1 + bench_returns).cumprod()
58
+
59
+ # Stats
60
+ ann_return = (cum_returns.iloc[-1]**(252/len(returns)) - 1)
61
+ sharpe = (returns.mean() * 252) / (returns.std() * np.sqrt(252))
62
 
63
+ rolling_max = cum_returns.cummax()
64
+ drawdown = (cum_returns - rolling_max) / rolling_max
65
+ max_dd_peak = drawdown.min()
 
 
 
 
 
 
66
 
67
+ # --- OUTPUT UI ---
68
+ st.header(f"πŸ“Š {option} Results")
69
+
70
+ # Target Allocation Section
71
+ st.subheader(f"πŸ“… Next Day Target Allocation: {next_trading_day}")
72
+ alloc_df = results['alloc']
73
+ st.table(alloc_df[alloc_df['Weight (%)'] > 0].sort_values("Weight (%)", ascending=False))
74
+
75
+ # Metrics Row
76
+ m1, m2, m3, m4 = st.columns(4)
77
+ m1.metric("Annualized Return", f"{ann_return:.2%}")
78
+ m2.metric("Sharpe Ratio", f"{sharpe:.2f}")
79
+ m3.metric("Max DD (Peak-to-Trough)", f"{max_dd_peak:.2%}")
80
+ m4.metric("Last Daily Return", f"{returns.iloc[-1]:.2%}")
81
+
82
+ # Chart
83
+ st.subheader(f"Cumulative Return vs {benchmark_ticker}")
84
+ chart_data = pd.DataFrame({
85
+ "Strategy": cum_returns,
86
+ f"Benchmark ({benchmark_ticker})": bench_curve
87
+ })
88
+ st.line_chart(chart_data)
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/trend_engine.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+
4
+ def run_trend_module(prices, daily_sofr, vol_target):
5
+ # 1. Dual-Trend Signal
6
+ d_high = prices.rolling(20).max()
7
+ sma = prices.rolling(20).mean()
8
+ atr = (prices.rolling(20).max() - prices.rolling(20).min()) / 2
9
+ k_upper = sma + (2 * atr)
10
+
11
+ entry_band = np.minimum(d_high, k_upper)
12
+ signals = (prices > entry_band.shift(1)).astype(int)
13
+
14
+ # 2. Risk Parity Position Sizing
15
+ returns = prices.pct_change()
16
+ realized_vol = returns.rolling(21).std() * np.sqrt(252)
17
+
18
+ n = len(prices.columns)
19
+ # Target weight = (Target Vol / Total Assets) / Individual Asset Vol
20
+ target_weights = (vol_target / n) / realized_vol.shift(1)
21
+
22
+ # 3. Strategy Returns (Positions + SOFR on Cash)
23
+ pos_rets = (signals.shift(1) * target_weights.shift(1) * returns).sum(axis=1)
24
+ weight_used = (signals.shift(1) * target_weights.shift(1)).sum(axis=1)
25
+ cash_rets = (1 - weight_used).clip(0, 1) * (daily_sofr / 252)
26
+
27
+ strat_rets = pos_rets + cash_rets
28
+ equity_curve = (1 + strat_rets).fillna(0).cumprod()
29
+
30
+ # 4. Target Allocation for Tomorrow
31
+ tomorrow_sig = (prices.iloc[-1] > entry_band.iloc[-1]).astype(int)
32
+ tomorrow_w = (vol_target / n) / realized_vol.iloc[-1]
33
+
34
+ alloc = pd.DataFrame({
35
+ "Ticker": prices.columns,
36
+ "Signal": ["LONG" if s == 1 else "CASH" for s in tomorrow_sig],
37
+ "Weight (%)": (tomorrow_sig * tomorrow_w * 100).round(2)
38
+ })
39
+
40
+ return {"curve": equity_curve, "alloc": alloc}
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/engine/engine/trend_engine.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+
4
+ def run_trend_module(prices, daily_sofr, vol_target):
5
+ # 1. Signals (20-day Keltner/Donchian)
6
+ d_high = prices.rolling(20).max()
7
+ k_sma = prices.rolling(20).mean()
8
+ atr = (prices.rolling(20).max() - prices.rolling(20).min()) / 2
9
+ k_upper = k_sma + (2 * atr)
10
+
11
+ entry_band = np.minimum(d_high, k_upper)
12
+ signals = (prices > entry_band.shift(1)).astype(int)
13
+
14
+ # 2. Risk Parity Weighting
15
+ rets = prices.pct_change()
16
+ real_vol = rets.rolling(21).std() * np.sqrt(252)
17
+
18
+ n = len(prices.columns)
19
+ weights = (vol_target / n) / real_vol.shift(1)
20
+
21
+ # 3. Strategy Returns (Positions + Cash Interest)
22
+ strat_rets = (signals.shift(1) * weights.shift(1) * rets).sum(axis=1)
23
+ unused_cap = 1 - (signals.shift(1) * weights.shift(1)).sum(axis=1)
24
+ strat_rets += unused_cap.clip(0, 1) * (daily_sofr / 252)
25
+
26
+ equity_curve = (1 + strat_rets).cumprod()
27
+
28
+ # Next Day Allocation
29
+ tomorrow_sig = (prices.iloc[-1] > entry_band.iloc[-1]).astype(int)
30
+ tomorrow_w = (vol_target / n) / real_vol.iloc[-1]
31
+ alloc = pd.DataFrame({
32
+ "Ticker": prices.columns,
33
+ "Weight (%)": (tomorrow_sig * tomorrow_w * 100).round(2)
34
+ })
35
+
36
+ return {"curve": equity_curve, "alloc": alloc}
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/app.py CHANGED
@@ -1,54 +1,39 @@
1
  import streamlit as st
 
 
 
2
 
3
- st.set_page_config(page_title="P2 ETF Trend Suite", layout="wide")
4
 
5
- st.title("πŸ“Š P2 ETF Trend Suite")
6
- st.markdown("Stooq-Primary Data Engine + HF Integration")
7
 
8
- # Sidebar Controls
9
- st.sidebar.header("Parameters")
10
- initial_capital = st.sidebar.number_input("Initial Capital", value=100000)
11
- vol_target = st.sidebar.slider("Target Volatility", 0.05, 0.30, 0.15)
12
- lookback = st.sidebar.slider("Lookback (Days)", 50, 300, 200)
13
 
14
- st.sidebar.markdown("---")
15
- st.sidebar.header("Hugging Face Sync")
16
- hf_repo = st.sidebar.text_input("Repo ID", placeholder="user/dataset-name")
17
- hf_token = st.sidebar.text_input("HF Token", type="password")
18
-
19
- run_button = st.sidebar.button("β–Ά Run Full Process")
20
-
21
- if run_button:
22
- from data.loader import load_data, push_to_hf
23
- from engine.backtest import run_backtest
24
- from analytics.metrics import compute_metrics
25
-
26
- # Phase 1: Data Fetching
27
- with st.spinner("Fetching data from Stooq..."):
28
- df = load_data()
29
 
30
- if not df.empty:
31
- st.subheader("πŸ“ˆ Market Data Preview")
32
- st.dataframe(df.tail(5), use_container_width=True)
33
-
34
- # Phase 2: Backtesting
35
- with st.spinner("Calculating Trend Strategy..."):
36
- results = run_backtest(df, initial_capital, vol_target, lookback)
37
- metrics = compute_metrics(results["returns"])
38
-
39
- # Display Results
40
- st.success("Analysis Complete")
41
- c1, c2, c3 = st.columns(3)
42
- c1.metric("CAGR", f"{metrics['cagr']:.2%}")
43
- c2.metric("Sharpe", f"{metrics['sharpe']:.2f}")
44
- c3.metric("Max Drawdown", f"{metrics['max_dd']:.2%}")
45
-
46
- st.line_chart(results["equity_curve"])
47
-
48
- # Phase 3: HF Sync
49
- if hf_repo and hf_token:
50
- with st.spinner("Syncing to Hugging Face..."):
51
- push_to_hf(df, hf_repo, hf_token)
52
- st.sidebar.success("βœ… Dataset Synced!")
53
- else:
54
- st.error("Data fetch failed. Verify ticker symbols.")
 
1
  import streamlit as st
2
+ import pandas as pd
3
+ from data.loader import refresh_market_data, X_EQUITY_TICKERS, FI_TICKERS
4
+ from engine.trend_engine import run_trend_module
5
 
6
+ st.set_page_config(layout="wide", page_title="P2 ETF Trend Suite")
7
 
8
+ st.sidebar.title("Settings")
9
+ vol_target = st.sidebar.slider("Annual Vol Target", 0.05, 0.25, 0.126)
10
 
11
+ if st.sidebar.button("πŸ”„ Refresh Market Data"):
12
+ refresh_market_data()
13
+ st.sidebar.success("Data Updated from Stooq/SOFR!")
 
 
14
 
15
+ if st.button("β–Ά Run All Modules"):
16
+ data = pd.read_csv("market_data.csv", index_col=0, parse_dates=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
+ # Run Modules
19
+ eq_res = run_trend_module(data[X_EQUITY_TICKERS], data['SOFR_ANNUAL'], vol_target)
20
+ fi_res = run_trend_module(data[FI_TICKERS], data['SOFR_ANNUAL'], vol_target)
21
+
22
+ # Performance Comparison
23
+ spy_curve = (1 + data['SPY'].pct_change()).cumprod()
24
+ comparison = pd.DataFrame({
25
+ "X-ETF Strategy": eq_res['curve'],
26
+ "SPY Benchmark": spy_curve
27
+ }).dropna()
28
+
29
+ st.header("πŸ“ˆ Performance: Equity Strategy vs. SPY")
30
+ st.line_chart(comparison)
31
+
32
+ # Target Allocations
33
+ col1, col2 = st.columns(2)
34
+ with col1:
35
+ st.subheader("πŸ›‘οΈ Equity Allocation (Next Day)")
36
+ st.dataframe(eq_res['alloc'][eq_res['alloc']['Weight (%)'] > 0])
37
+ with col2:
38
+ st.subheader("🏦 FI Comparison Allocation")
39
+ st.dataframe(fi_res['alloc'][fi_res['alloc']['Weight (%)'] > 0])
 
 
 
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/data/loader.py CHANGED
@@ -1,56 +1,32 @@
1
- import pandas as pd
2
  import pandas_datareader.data as web
3
  import yfinance as yf
4
- from datasets import Dataset
5
  import streamlit as st
6
- from datetime import datetime
7
 
8
- # Combined Universe (All will attempt Stooq first)
9
- TICKERS = ["SPY", "QQQ", "IWM", "TLT", "IEF", "SHY", "GLD"]
 
 
 
 
10
 
11
- def load_data(tickers=TICKERS):
12
- """Fetches data from Stooq with yfinance fallback."""
13
- all_series = {}
14
 
15
- for ticker in tickers:
16
- success = False
17
- # 1. Primary: Stooq
 
 
18
  try:
19
- # Stooq format: TICKER.US (e.g., TLT.US)
20
- stooq_symbol = f"{ticker}.US"
21
- df_stooq = web.DataReader(stooq_symbol, 'stooq')
22
 
23
- if not df_stooq.empty:
24
- # Stooq returns newest data first; sort to ascending for backtests
25
- all_series[ticker] = df_stooq['Close'].sort_index()
26
- st.toast(f"βœ… {ticker} loaded from Stooq")
27
- success = True
28
- except Exception as e:
29
- print(f"Stooq failed for {ticker}: {e}")
30
-
31
- # 2. Fallback: yfinance
32
- if not success:
33
- try:
34
- yf_df = yf.download(ticker, period="max", progress=False)
35
- if not yf_df.empty:
36
- # Use Adj Close to account for dividends/splits
37
- all_series[ticker] = yf_df['Adj Close']
38
- st.toast(f"⚠️ {ticker} loaded from yfinance (Fallback)")
39
- success = True
40
- except Exception as e:
41
- st.error(f"❌ Critical: Could not load {ticker} from any source.")
42
-
43
- if all_series:
44
- # Align all tickers on the same dates and drop missing values
45
- return pd.concat(all_series, axis=1).dropna()
46
- return pd.DataFrame()
47
-
48
- def push_to_hf(df, repo_id, token):
49
- """Pushes the current dataframe to Hugging Face Hub."""
50
- # Ensure Date is a column, not an index, for HF compatibility
51
- hf_export = df.reset_index()
52
- hf_export.columns = [str(col) for col in hf_export.columns] # Ensure string columns
53
 
54
- dataset = Dataset.from_pandas(hf_export)
55
- dataset.push_to_hub(repo_id, token=token)
56
- return True
 
 
 
1
  import pandas_datareader.data as web
2
  import yfinance as yf
3
+ import pandas as pd
4
  import streamlit as st
 
5
 
6
+ # 27 "X-" EQUITY ETFS
7
+ X_EQUITY_TICKERS = [
8
+ "XLK", "XLY", "XLP", "XLE", "XLV", "XLI", "XLB", "XLRE", "XLU", "XLC", "XLF",
9
+ "XBI", "XME", "XOP", "XHB", "XSD", "XRT", "XPH", "XES", "XAR", "XHS", "XHE",
10
+ "XSW", "XTN", "XTL", "XNTK", "XITK"
11
+ ]
12
 
13
+ # 15 FIXED INCOME / COMPARISON
14
+ FI_TICKERS = ["TLT", "IEF", "TIP", "TBT", "GLD", "SLV", "VGIT", "VCLT", "VCIT", "HYG", "PFF", "MBB", "VNQ", "LQD", "AGG"]
 
15
 
16
+ def refresh_market_data():
17
+ """Syncs Stooq/FRED data to local CSV and HF."""
18
+ all_prices = {}
19
+ # Download all groups + SPY Benchmark
20
+ for t in list(set(X_EQUITY_TICKERS + FI_TICKERS + ["SPY"])):
21
  try:
22
+ all_prices[t] = web.DataReader(f"{t}.US", "stooq")['Close']
23
+ except:
24
+ all_prices[t] = yf.download(t, progress=False)['Adj Close']
25
 
26
+ # Fetch SOFR (Cash Yield) from FRED
27
+ sofr = web.DataReader('SOFR', 'fred').ffill()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ df = pd.DataFrame(all_prices).sort_index().ffill()
30
+ df['SOFR_ANNUAL'] = sofr / 100
31
+ df.to_csv("market_data.csv")
32
+ return df
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/README.md CHANGED
@@ -1,19 +1,15 @@
1
  ---
2
- title: P2 ETF TREND SUITE
3
- emoji: πŸš€
4
- colorFrom: red
5
- colorTo: red
6
  sdk: docker
7
- app_port: 8501
8
- tags:
9
- - streamlit
10
  pinned: false
11
- short_description: Streamlit template space
12
  ---
13
 
14
- # Welcome to Streamlit!
 
15
 
16
- Edit `/src/streamlit_app.py` to customize this app to your heart's desire. :heart:
17
-
18
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
19
- forums](https://discuss.streamlit.io).
 
1
  ---
2
+ title: P2 ETF Trend Suite
3
+ emoji: πŸ“Š
4
+ colorFrom: blue
5
+ colorTo: indigo
6
  sdk: docker
7
+ app_port: 7860
 
 
8
  pinned: false
 
9
  ---
10
 
11
+ # πŸ“Š P2 ETF Trend Suite
12
+ Institutional ETF Trend + Volatility Targeting Engine.
13
 
14
+ ### πŸš€ Setup Info
15
+ This Space runs a Dockerized Streamlit app. It uses **Stooq** for market data with **yfinance** as a fallback.
 
 
hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/hf_space/Dockerfile CHANGED
@@ -1,26 +1,31 @@
1
- FROM python:3.10
 
 
 
 
 
 
2
 
3
  WORKDIR /app
4
 
5
- # Copy requirements first (better layer caching)
 
 
 
 
 
 
6
  COPY requirements.txt .
7
 
8
- RUN pip install --upgrade pip
9
- RUN pip install --no-cache-dir -r requirements.txt
 
10
 
11
- # Copy full project
12
  COPY . .
13
 
 
14
  EXPOSE 7860
15
 
16
- ENV STREAMLIT_SERVER_PORT=7860
17
- ENV STREAMLIT_SERVER_ADDRESS=0.0.0.0
18
- ENV STREAMLIT_BROWSER_GATHER_USAGE_STATS=false
19
-
20
- # Diagnostic startup command
21
- CMD ["bash", "-c", "echo '===== CONTAINER BOOTING ====='; \
22
- echo 'Python Version:'; python -V; \
23
- echo 'Current Directory:'; pwd; \
24
- echo 'Directory Listing:'; ls -la; \
25
- echo 'Starting Streamlit...'; \
26
- python -m streamlit run app.py --server.headless=true"]
 
1
+ # Use a lightweight but stable Python base
2
+ FROM python:3.10-slim
3
+
4
+ # Set environment variables for speed and logging
5
+ ENV PYTHONUNBUFFERED=1 \
6
+ PYTHONDONTWRITEBYTECODE=1 \
7
+ PIP_NO_CACHE_DIR=1
8
 
9
  WORKDIR /app
10
 
11
+ # Install system dependencies needed for pandas/datareader
12
+ RUN apt-get update && apt-get install -y \
13
+ build-essential \
14
+ curl \
15
+ && rm -rf /var/lib/apt/lists/*
16
+
17
+ # Copy only requirements first to leverage Docker cache
18
  COPY requirements.txt .
19
 
20
+ # Install dependencies (use --no-cache-dir to keep image small)
21
+ RUN pip install --upgrade pip && \
22
+ pip install -r requirements.txt
23
 
24
+ # Copy the rest of the application
25
  COPY . .
26
 
27
+ # Ensure the app runs on the port HF expects (7860 for Docker)
28
  EXPOSE 7860
29
 
30
+ # Correct entrypoint for Streamlit in a container
31
+ ENTRYPOINT ["streamlit", "run", "app.py", "--server.port=7860", "--server.address=0.0.0.0"]