Monketoo commited on
Commit
67848e0
·
verified ·
1 Parent(s): 9f60462

Add files using upload-large-folder tool

Browse files
Files changed (49) hide show
  1. samples/pdfs/2909063.pdf +0 -0
  2. samples/pdfs/4523932.pdf +0 -0
  3. samples/pdfs/4808858.pdf +0 -0
  4. samples/pdfs/7334540.pdf +0 -0
  5. samples/pdfs/904681.pdf +0 -0
  6. samples/sample_metadata.jsonl +99 -99
  7. samples/texts_merged/1168240.md +345 -0
  8. samples/texts_merged/1223200.md +300 -0
  9. samples/texts_merged/1259736.md +0 -0
  10. samples/texts_merged/1772599.md +1063 -0
  11. samples/texts_merged/1808935.md +409 -0
  12. samples/texts_merged/1836869.md +606 -0
  13. samples/texts_merged/1885128.md +507 -0
  14. samples/texts_merged/1973835.md +0 -0
  15. samples/texts_merged/199837.md +284 -0
  16. samples/texts_merged/2092097.md +346 -0
  17. samples/texts_merged/230879.md +885 -0
  18. samples/texts_merged/2634535.md +447 -0
  19. samples/texts_merged/2865847.md +129 -0
  20. samples/texts_merged/2918349.md +208 -0
  21. samples/texts_merged/3148538.md +141 -0
  22. samples/texts_merged/3193892.md +136 -0
  23. samples/texts_merged/3224121.md +735 -0
  24. samples/texts_merged/3327355.md +0 -0
  25. samples/texts_merged/339686.md +125 -0
  26. samples/texts_merged/3495399.md +382 -0
  27. samples/texts_merged/3603622.md +245 -0
  28. samples/texts_merged/3764397.md +278 -0
  29. samples/texts_merged/3884483.md +0 -0
  30. samples/texts_merged/393503.md +393 -0
  31. samples/texts_merged/4174805.md +578 -0
  32. samples/texts_merged/4239587.md +872 -0
  33. samples/texts_merged/4523932.md +952 -0
  34. samples/texts_merged/4579765.md +623 -0
  35. samples/texts_merged/4753802.md +646 -0
  36. samples/texts_merged/4971236.md +562 -0
  37. samples/texts_merged/500594.md +443 -0
  38. samples/texts_merged/6016935.md +0 -0
  39. samples/texts_merged/6218816.md +379 -0
  40. samples/texts_merged/6293016.md +525 -0
  41. samples/texts_merged/6426180.md +121 -0
  42. samples/texts_merged/6697438.md +416 -0
  43. samples/texts_merged/6813453.md +826 -0
  44. samples/texts_merged/7089754.md +0 -0
  45. samples/texts_merged/7334540.md +56 -0
  46. samples/texts_merged/7342615.md +309 -0
  47. samples/texts_merged/7563909.md +0 -0
  48. samples/texts_merged/7569662.md +198 -0
  49. samples/texts_merged/904681.md +228 -0
samples/pdfs/2909063.pdf ADDED
Binary file (60.3 kB). View file
 
samples/pdfs/4523932.pdf ADDED
Binary file (78.3 kB). View file
 
samples/pdfs/4808858.pdf ADDED
Binary file (55.4 kB). View file
 
samples/pdfs/7334540.pdf ADDED
Binary file (79.7 kB). View file
 
samples/pdfs/904681.pdf ADDED
Binary file (41.6 kB). View file
 
samples/sample_metadata.jsonl CHANGED
@@ -1,100 +1,100 @@
1
- {"doc_id": "6545431", "mean_proba": 0.8814646013908916, "num_pages": 36}
2
- {"doc_id": "3955960", "mean_proba": 0.9913606899125236, "num_pages": 56}
3
- {"doc_id": "213962", "mean_proba": 0.9412138696227754, "num_pages": 42}
4
- {"doc_id": "7332466", "mean_proba": 0.9958903292814892, "num_pages": 12}
5
- {"doc_id": "6729477", "mean_proba": 0.8684389293193817, "num_pages": 16}
6
- {"doc_id": "1666824", "mean_proba": 0.9380706186805452, "num_pages": 28}
7
- {"doc_id": "6189594", "mean_proba": 0.9947413866009032, "num_pages": 14}
8
- {"doc_id": "6361280", "mean_proba": 0.9950645359662862, "num_pages": 13}
9
- {"doc_id": "1693876", "mean_proba": 0.9907086342573166, "num_pages": 4}
10
- {"doc_id": "7156187", "mean_proba": 0.9797802279735434, "num_pages": 29}
11
- {"doc_id": "7755458", "mean_proba": 0.987359182192729, "num_pages": 13}
12
- {"doc_id": "4055151", "mean_proba": 0.912624683517676, "num_pages": 26}
13
- {"doc_id": "1172375", "mean_proba": 0.953002940524708, "num_pages": 11}
14
- {"doc_id": "1789294", "mean_proba": 0.8442278280854225, "num_pages": 4}
15
- {"doc_id": "2126836", "mean_proba": 0.8163998981609064, "num_pages": 272}
16
- {"doc_id": "324098", "mean_proba": 0.989006942510605, "num_pages": 5}
17
- {"doc_id": "5137227", "mean_proba": 0.989109086804092, "num_pages": 32}
18
- {"doc_id": "5658873", "mean_proba": 0.9915072917938232, "num_pages": 11}
19
- {"doc_id": "2932683", "mean_proba": 0.9361854828894138, "num_pages": 16}
20
- {"doc_id": "5999157", "mean_proba": 0.922621601819992, "num_pages": 15}
21
- {"doc_id": "2487380", "mean_proba": 0.9654558925401596, "num_pages": 21}
22
- {"doc_id": "6152053", "mean_proba": 0.8964151733595392, "num_pages": 46}
23
- {"doc_id": "3246292", "mean_proba": 0.9561361407532412, "num_pages": 17}
24
- {"doc_id": "647655", "mean_proba": 0.9763310998678209, "num_pages": 32}
25
- {"doc_id": "3336595", "mean_proba": 0.9988973836104076, "num_pages": 6}
26
- {"doc_id": "1188587", "mean_proba": 0.8714917524386261, "num_pages": 59}
27
- {"doc_id": "1378706", "mean_proba": 0.999192284213172, "num_pages": 9}
28
- {"doc_id": "7878336", "mean_proba": 0.9919242039322852, "num_pages": 12}
29
- {"doc_id": "668834", "mean_proba": 0.9523984690507252, "num_pages": 3}
30
- {"doc_id": "2665585", "mean_proba": 0.9995250713825226, "num_pages": 25}
31
- {"doc_id": "1378764", "mean_proba": 0.9841879036496668, "num_pages": 34}
32
- {"doc_id": "582263", "mean_proba": 0.9408483675548008, "num_pages": 7}
33
- {"doc_id": "2889479", "mean_proba": 0.978348558319026, "num_pages": 29}
34
- {"doc_id": "7173360", "mean_proba": 0.9976005894797187, "num_pages": 21}
35
- {"doc_id": "2947864", "mean_proba": 0.980615821149614, "num_pages": 9}
36
- {"doc_id": "2384710", "mean_proba": 0.9535730459860392, "num_pages": 14}
37
- {"doc_id": "841018", "mean_proba": 0.9902740854483384, "num_pages": 13}
38
- {"doc_id": "3880484", "mean_proba": 0.8303749095648527, "num_pages": 16}
39
- {"doc_id": "6159994", "mean_proba": 0.9970970955159928, "num_pages": 45}
40
  {"doc_id": "565481", "mean_proba": 0.9996201992034912, "num_pages": 4}
41
- {"doc_id": "5725464", "mean_proba": 0.9706398715314112, "num_pages": 76}
42
- {"doc_id": "2660370", "mean_proba": 0.9425864967153124, "num_pages": 368}
43
- {"doc_id": "4283718", "mean_proba": 0.8407226204872131, "num_pages": 2}
44
- {"doc_id": "4961582", "mean_proba": 0.991776500429426, "num_pages": 21}
45
- {"doc_id": "6038087", "mean_proba": 0.9867547584904564, "num_pages": 27}
46
- {"doc_id": "1640880", "mean_proba": 0.8685442879796028, "num_pages": 4}
47
- {"doc_id": "47713", "mean_proba": 0.9748652529331944, "num_pages": 31}
48
- {"doc_id": "218831", "mean_proba": 0.8872458606194227, "num_pages": 39}
49
- {"doc_id": "2710881", "mean_proba": 0.9833595033954172, "num_pages": 34}
50
- {"doc_id": "4742797", "mean_proba": 0.9487975366064348, "num_pages": 512}
51
- {"doc_id": "4054627", "mean_proba": 0.8568866426746051, "num_pages": 24}
52
- {"doc_id": "3863109", "mean_proba": 0.9898869842290878, "num_pages": 14}
53
- {"doc_id": "4767451", "mean_proba": 0.966353714466095, "num_pages": 2}
54
- {"doc_id": "6284605", "mean_proba": 0.9863201938569546, "num_pages": 24}
55
- {"doc_id": "1546286", "mean_proba": 0.9824597297645196, "num_pages": 41}
56
- {"doc_id": "5963949", "mean_proba": 0.8951789796352386, "num_pages": 10}
57
- {"doc_id": "3975828", "mean_proba": 0.9917463935338534, "num_pages": 13}
58
- {"doc_id": "4729919", "mean_proba": 0.9977401705349194, "num_pages": 17}
59
- {"doc_id": "7336068", "mean_proba": 0.9684559280673662, "num_pages": 12}
60
- {"doc_id": "1834803", "mean_proba": 0.9965955689549446, "num_pages": 4}
61
- {"doc_id": "6759244", "mean_proba": 0.924437294403712, "num_pages": 30}
62
- {"doc_id": "2753278", "mean_proba": 0.9987588660283522, "num_pages": 11}
63
- {"doc_id": "3441871", "mean_proba": 0.9961503624916076, "num_pages": 10}
64
- {"doc_id": "1768104", "mean_proba": 0.8594257831573486, "num_pages": 16}
65
- {"doc_id": "2251660", "mean_proba": 0.9966943013040644, "num_pages": 19}
66
- {"doc_id": "3395999", "mean_proba": 0.9834167063236235, "num_pages": 5}
67
- {"doc_id": "5577417", "mean_proba": 0.967733658850193, "num_pages": 4}
68
- {"doc_id": "5640834", "mean_proba": 0.9983150362968444, "num_pages": 2}
69
- {"doc_id": "6708780", "mean_proba": 0.9998646552364032, "num_pages": 12}
70
- {"doc_id": "7113096", "mean_proba": 0.9582548439502716, "num_pages": 1}
71
- {"doc_id": "2565362", "mean_proba": 0.9888773594911282, "num_pages": 26}
72
- {"doc_id": "4385907", "mean_proba": 0.8863706297495149, "num_pages": 176}
73
- {"doc_id": "1623821", "mean_proba": 1.0000049471855164, "num_pages": 1}
74
- {"doc_id": "7346654", "mean_proba": 0.8726378764425005, "num_pages": 7}
75
- {"doc_id": "93120", "mean_proba": 0.9986219868063926, "num_pages": 20}
76
- {"doc_id": "2234121", "mean_proba": 0.993724638223648, "num_pages": 10}
77
- {"doc_id": "7621530", "mean_proba": 0.9448085086686272, "num_pages": 14}
78
- {"doc_id": "4150074", "mean_proba": 0.9930534839630129, "num_pages": 10}
79
- {"doc_id": "6274397", "mean_proba": 0.8951933681964874, "num_pages": 1}
80
- {"doc_id": "5687555", "mean_proba": 0.8110953032970428, "num_pages": 5}
81
- {"doc_id": "7856253", "mean_proba": 0.8499215410815345, "num_pages": 27}
82
- {"doc_id": "7548747", "mean_proba": 0.9723348537006892, "num_pages": 37}
83
- {"doc_id": "1096954", "mean_proba": 0.9979800879955292, "num_pages": 12}
84
- {"doc_id": "4515563", "mean_proba": 0.9912579745054244, "num_pages": 10}
85
- {"doc_id": "1230197", "mean_proba": 0.948458981513977, "num_pages": 5}
86
- {"doc_id": "203609", "mean_proba": 0.9800784200429916, "num_pages": 40}
87
- {"doc_id": "1096347", "mean_proba": 0.992400233944257, "num_pages": 24}
88
- {"doc_id": "7693403", "mean_proba": 0.9032960954834433, "num_pages": 17}
89
- {"doc_id": "3611010", "mean_proba": 0.978197129182918, "num_pages": 93}
90
- {"doc_id": "2531237", "mean_proba": 0.9984967932105064, "num_pages": 16}
91
- {"doc_id": "4694300", "mean_proba": 0.9998620549837748, "num_pages": 3}
92
- {"doc_id": "6422547", "mean_proba": 0.9989109501379344, "num_pages": 109}
93
- {"doc_id": "2177428", "mean_proba": 0.905204855969974, "num_pages": 14}
94
- {"doc_id": "1922832", "mean_proba": 0.9945877194404602, "num_pages": 6}
95
- {"doc_id": "5573174", "mean_proba": 0.9889477075714814, "num_pages": 38}
96
- {"doc_id": "901380", "mean_proba": 0.955151192843914, "num_pages": 4}
97
- {"doc_id": "3863943", "mean_proba": 0.9524745146433512, "num_pages": 18}
98
- {"doc_id": "2796137", "mean_proba": 0.9830208400885264, "num_pages": 15}
99
- {"doc_id": "1323410", "mean_proba": 0.971997876962026, "num_pages": 6}
100
- {"doc_id": "7421586", "mean_proba": 0.9990615844726562, "num_pages": 4}
 
 
 
 
1
+ {"doc_id": "7569662", "mean_proba": 0.997152994076411, "num_pages": 6}
2
+ {"doc_id": "3327355", "mean_proba": 0.971603728334109, "num_pages": 60}
3
+ {"doc_id": "4971236", "mean_proba": 0.9744334369897842, "num_pages": 14}
4
+ {"doc_id": "904681", "mean_proba": 0.8895234366257985, "num_pages": 3}
5
+ {"doc_id": "1836869", "mean_proba": 0.8915040567517281, "num_pages": 8}
6
+ {"doc_id": "3884483", "mean_proba": 0.9886121082873572, "num_pages": 42}
7
+ {"doc_id": "7334540", "mean_proba": 0.9987283796072006, "num_pages": 2}
8
+ {"doc_id": "199837", "mean_proba": 0.972013454545628, "num_pages": 11}
9
+ {"doc_id": "1168240", "mean_proba": 0.997676532715559, "num_pages": 8}
10
+ {"doc_id": "6016935", "mean_proba": 0.9086370897643706, "num_pages": 34}
11
+ {"doc_id": "4523932", "mean_proba": 0.8853498250246048, "num_pages": 12}
12
+ {"doc_id": "1885128", "mean_proba": 0.8906278218093672, "num_pages": 19}
13
+ {"doc_id": "393503", "mean_proba": 0.9894506980975468, "num_pages": 6}
14
+ {"doc_id": "3193892", "mean_proba": 0.994115799665451, "num_pages": 6}
15
+ {"doc_id": "6813453", "mean_proba": 0.9944966547191144, "num_pages": 8}
16
+ {"doc_id": "6426180", "mean_proba": 0.9054293377058846, "num_pages": 7}
17
+ {"doc_id": "500594", "mean_proba": 0.8765622690320015, "num_pages": 20}
18
+ {"doc_id": "3495399", "mean_proba": 0.9484576561621256, "num_pages": 14}
19
+ {"doc_id": "6218816", "mean_proba": 0.9807607705394428, "num_pages": 12}
20
+ {"doc_id": "4239587", "mean_proba": 0.9929047502004182, "num_pages": 26}
21
+ {"doc_id": "7089754", "mean_proba": 0.9980307880676154, "num_pages": 33}
22
+ {"doc_id": "230879", "mean_proba": 0.9979761976462144, "num_pages": 13}
23
+ {"doc_id": "3148538", "mean_proba": 0.8522171427806219, "num_pages": 6}
24
+ {"doc_id": "2865847", "mean_proba": 0.9540428519248962, "num_pages": 2}
25
+ {"doc_id": "1772599", "mean_proba": 0.967163262458948, "num_pages": 26}
26
+ {"doc_id": "4579765", "mean_proba": 0.9975770957329694, "num_pages": 17}
27
+ {"doc_id": "7342615", "mean_proba": 0.9989697635173798, "num_pages": 13}
28
+ {"doc_id": "3224121", "mean_proba": 0.9953744477695888, "num_pages": 9}
29
+ {"doc_id": "2634535", "mean_proba": 0.8585290673531984, "num_pages": 19}
30
+ {"doc_id": "1259736", "mean_proba": 0.9411229211212004, "num_pages": 149}
31
+ {"doc_id": "4753802", "mean_proba": 0.9529886152595282, "num_pages": 16}
32
+ {"doc_id": "2092097", "mean_proba": 0.9999509155750276, "num_pages": 10}
33
+ {"doc_id": "7563909", "mean_proba": 0.9961217548166004, "num_pages": 35}
34
+ {"doc_id": "1973835", "mean_proba": 0.9965763115569164, "num_pages": 38}
35
+ {"doc_id": "3764397", "mean_proba": 0.9639263451099396, "num_pages": 12}
36
+ {"doc_id": "4174805", "mean_proba": 0.961553082746618, "num_pages": 17}
 
 
 
37
  {"doc_id": "565481", "mean_proba": 0.9996201992034912, "num_pages": 4}
38
+ {"doc_id": "339686", "mean_proba": 0.99940624833107, "num_pages": 4}
39
+ {"doc_id": "3603622", "mean_proba": 0.986870400607586, "num_pages": 12}
40
+ {"doc_id": "1223200", "mean_proba": 0.921684911617866, "num_pages": 13}
41
+ {"doc_id": "6697438", "mean_proba": 0.8831472884524952, "num_pages": 22}
42
+ {"doc_id": "6293016", "mean_proba": 0.9937160038031064, "num_pages": 13}
43
+ {"doc_id": "2918349", "mean_proba": 0.9964490483204524, "num_pages": 6}
44
+ {"doc_id": "1808935", "mean_proba": 0.8179429352283478, "num_pages": 15}
45
+ {"doc_id": "3295535", "mean_proba": 0.9766881407962904, "num_pages": 36}
46
+ {"doc_id": "3723390", "mean_proba": 0.9646476159493128, "num_pages": 6}
47
+ {"doc_id": "3438890", "mean_proba": 0.9317811191082, "num_pages": 10}
48
+ {"doc_id": "3251599", "mean_proba": 0.9982280433177948, "num_pages": 7}
49
+ {"doc_id": "276850", "mean_proba": 0.998798830942674, "num_pages": 11}
50
+ {"doc_id": "4994833", "mean_proba": 0.998662695288658, "num_pages": 8}
51
+ {"doc_id": "6743834", "mean_proba": 0.9462647065520288, "num_pages": 4}
52
+ {"doc_id": "825446", "mean_proba": 0.911532184252372, "num_pages": 13}
53
+ {"doc_id": "6838080", "mean_proba": 0.9977947799488902, "num_pages": 32}
54
+ {"doc_id": "7604074", "mean_proba": 0.993289651779028, "num_pages": 13}
55
+ {"doc_id": "5647681", "mean_proba": 0.8188157608875861, "num_pages": 13}
56
+ {"doc_id": "6724971", "mean_proba": 0.9973200474466596, "num_pages": 21}
57
+ {"doc_id": "822209", "mean_proba": 0.9606187572846046, "num_pages": 26}
58
+ {"doc_id": "6470527", "mean_proba": 0.9523119360208512, "num_pages": 10}
59
+ {"doc_id": "305525", "mean_proba": 0.9593745129449028, "num_pages": 14}
60
+ {"doc_id": "4808858", "mean_proba": 0.9973472654819489, "num_pages": 1}
61
+ {"doc_id": "879988", "mean_proba": 0.9999016573031744, "num_pages": 6}
62
+ {"doc_id": "5893423", "mean_proba": 0.8851124197244644, "num_pages": 8}
63
+ {"doc_id": "2515306", "mean_proba": 0.9936049990355968, "num_pages": 8}
64
+ {"doc_id": "3450399", "mean_proba": 0.9164572358131408, "num_pages": 3}
65
+ {"doc_id": "503850", "mean_proba": 0.850948595574924, "num_pages": 14}
66
+ {"doc_id": "7618174", "mean_proba": 0.968082971572876, "num_pages": 25}
67
+ {"doc_id": "6332297", "mean_proba": 0.9995016634464264, "num_pages": 10}
68
+ {"doc_id": "1131204", "mean_proba": 0.9986574649810792, "num_pages": 9}
69
+ {"doc_id": "7774888", "mean_proba": 0.9932470734302814, "num_pages": 26}
70
+ {"doc_id": "3461249", "mean_proba": 0.965369338169694, "num_pages": 16}
71
+ {"doc_id": "6772016", "mean_proba": 0.9998548328876496, "num_pages": 6}
72
+ {"doc_id": "3147359", "mean_proba": 0.935382536866448, "num_pages": 22}
73
+ {"doc_id": "692782", "mean_proba": 0.9983158349990844, "num_pages": 5}
74
+ {"doc_id": "213815", "mean_proba": 0.9887832254171371, "num_pages": 6}
75
+ {"doc_id": "598288", "mean_proba": 0.9456826879366024, "num_pages": 370}
76
+ {"doc_id": "2763593", "mean_proba": 0.9346814155578612, "num_pages": 6}
77
+ {"doc_id": "7642017", "mean_proba": 0.9068743400275708, "num_pages": 16}
78
+ {"doc_id": "2909063", "mean_proba": 0.9436505238215128, "num_pages": 3}
79
+ {"doc_id": "2590883", "mean_proba": 0.9913453095489078, "num_pages": 18}
80
+ {"doc_id": "5718759", "mean_proba": 0.9894875958561896, "num_pages": 8}
81
+ {"doc_id": "250922", "mean_proba": 0.9653690908406232, "num_pages": 37}
82
+ {"doc_id": "6859646", "mean_proba": 0.8590865818893207, "num_pages": 34}
83
+ {"doc_id": "4872902", "mean_proba": 0.9977191276848316, "num_pages": 8}
84
+ {"doc_id": "6535016", "mean_proba": 0.8693619471675944, "num_pages": 53}
85
+ {"doc_id": "6080891", "mean_proba": 0.9882900759577752, "num_pages": 20}
86
+ {"doc_id": "1117773", "mean_proba": 0.8673347405024937, "num_pages": 7}
87
+ {"doc_id": "4409661", "mean_proba": 0.9995375604465092, "num_pages": 58}
88
+ {"doc_id": "450057", "mean_proba": 0.9242128431797028, "num_pages": 34}
89
+ {"doc_id": "6026555", "mean_proba": 0.9770366474986076, "num_pages": 4}
90
+ {"doc_id": "7081601", "mean_proba": 0.9810841436739322, "num_pages": 27}
91
+ {"doc_id": "6376231", "mean_proba": 0.9136003098554082, "num_pages": 36}
92
+ {"doc_id": "4364106", "mean_proba": 0.939790777862072, "num_pages": 16}
93
+ {"doc_id": "5396754", "mean_proba": 0.9896406365765466, "num_pages": 9}
94
+ {"doc_id": "3226827", "mean_proba": 0.991751770178477, "num_pages": 6}
95
+ {"doc_id": "2779026", "mean_proba": 0.9987509816884994, "num_pages": 20}
96
+ {"doc_id": "174916", "mean_proba": 0.9992432685998768, "num_pages": 13}
97
+ {"doc_id": "88513", "mean_proba": 0.9059492826461792, "num_pages": 5}
98
+ {"doc_id": "7100604", "mean_proba": 0.9811755003113496, "num_pages": 38}
99
+ {"doc_id": "6324184", "mean_proba": 0.9863000105727804, "num_pages": 11}
100
+ {"doc_id": "3594993", "mean_proba": 0.974035307765007, "num_pages": 8}
samples/texts_merged/1168240.md ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # Approximating quadratic programming with bound constraints
5
+
6
+ Yinyu Ye*
7
+
8
+ Department of Management Sciences
9
+ The University of Iowa
10
+ Iowa City, Iowa 52242, U.S.A.
11
+
12
+ March 31, 1997
13
+
14
+ ## Abstract
15
+
16
+ We consider the problem of approximating the global maximum of a quadratic program (QP) with $n$ variables subject to bound constraints. Based on the results of Goemans and Williamson [4] and Nesterov [6], we show that a $4/7$ approximate solution can be obtained in polynomial time.
17
+
18
+ **Key words.** Quadratic programming, global maximizer, approximation algorithm
19
+
20
+ *This author is supported in part by NSF grant DMI-9522507.
21
+ ---PAGE_BREAK---
22
+
23
+ # 1 Introduction
24
+
25
+ Consider the quadratic programming (QP) problem
26
+
27
+ $$
28
+ \begin{array}{ll}
29
+ \text{(QP)} & q(Q) := \text{Maximize} \quad q(x) := x^T Q x \\
30
+ & \text{Subject to} \quad -e \leq x \leq e,
31
+ \end{array}
32
+ $$
33
+
34
+ where $Q \in \mathbb{R}^{n \times n}$ is given and $e \in \mathbb{R}^n$ is the vector of all ones. Let $x = x(Q)$ be a maximizer of the problem. In this paper, without loss of generality, we assume that $x \neq 0$.
35
+
36
+ Normally, there is a linear term in the objective function: $q(x) = x^T Q x + c^T x$. However, the problem can be homogenized as
37
+
38
+ $$
39
+ \begin{array}{ll}
40
+ \text{Maximize} & q(x) := x^T Q x + tc^T x \\
41
+ \text{Subject to} & -e \leq x \leq e, \quad -1 \leq t \leq 1
42
+ \end{array}
43
+ $$
44
+
45
+ by adding a scalar variable $t$. There always is an optimal solution $(x, t)$ for this problem in which $t=1$ or $t=-1$. If $t=1$, then $x$ is also optimal for the non-homogeneous problem; if $t=-1$, then $-x$ is optimal for the non-homogeneous problem. Thus, without loss of generality, we can let $q(x) = x^T Q x$ throughout this paper.
46
+
47
+ The function $q(x)$ has a minimizer and a maximizer over the bounded feasible set $-e \leq x \leq e$. Let $\underline{q} := -q(-Q)$ and $q := q(Q)$ denote their minimal and maximal objective values, respectively. An $\epsilon$-maximal solution or $\epsilon$-maximizer, $\epsilon \in [0, 1]$, for (QP) is defined as an $-e \leq x \leq e$ such that
48
+
49
+ $$ \frac{\underline{q} - q(x)}{\underline{q} - q} \leq \epsilon. $$
50
+
51
+ Note that according to this definition any feasible solution $x$ is a 1-maximizer.
52
+
53
+ Recently, there were several significant results on approximating specific quadratic problems. Goemans and Williamson [4] proved an approximation result for the Maxcut problem where $\epsilon \le 1 - 0.878$. Nesterov [6] generalized their result to approximating a boolean QP problem
54
+
55
+ $$
56
+ \begin{array}{ll}
57
+ \text{Maximize} & q(x) = x^T Q x \\
58
+ \text{Subject to} & |x_j| = 1, \ j = 1, \dots, n.
59
+ \end{array}
60
+ $$
61
+
62
+ where $\epsilon \le 4/7$. Some negative results were given by Bellare and Rogaway [1].
63
+
64
+ There are also several approximation algorithms developed for approximating (QP) when the feasible set is a convex polytope. Pardalos and Rosen [8] developed a partitioning and linear programming based algorithm with an approximation bound $\epsilon = \epsilon(Q)$, where $\epsilon(Q)$, a function of the QP data, is less than 1. Vavasis [10] and Ye [11] developed a polynomial-time algorithm, based on solving a ball-constrained quadratic problem, to compute an $(1 - \frac{1}{n^2})$-maximal solution. When
65
+ ---PAGE_BREAK---
66
+
67
+ the polytope is {$x: -e \le x \le e$}, Fu, Luo and Ye [2] further proved a $(1-\frac{1}{n})$ polynomial-time algorithm.
68
+
69
+ In this note, we extend Goemans and Williamson and Nesterov's result to approximating (QP). We establish the same 4/7 result for approximating this problem. This result is based on a modification of Goemans and Williamson's algorithm and a generalization of Nesterov's proving technique.
70
+
71
+ ## 2 Positive Semi-Definite Relaxation
72
+
73
+ The approximation algorithm for (QP) is to solve a positive semi-definite programming (SDP) relaxation problem
74
+
75
+ $$
76
+ \begin{array}{l@{\quad}c@{\quad}l}
77
+ \text{(SDP)} & \mathcal{s}(Q) := & \underset{\mathbf{X}}{\text{Maximize}} \quad \langle \mathbf{Q}, \mathbf{X} \rangle \\
78
+ & & \text{Subject to} \quad d(\mathbf{X}) \le e, \mathbf{X} \succeq \mathbf{0}.
79
+ \end{array}
80
+ \tag{1}
81
+ $$
82
+
83
+ Here, $X \in \Re^{n \times n}$ is a symmetric matrix, $\langle \cdot, \cdot \rangle$ is the matrix inner product $\langle Q, X \rangle = \operatorname{trace}(QX)$, $d(X)$ is a vector containing the diagonal components of $X$, and $X \succeq Z$ means that $X - Z$ is positive semi-definite.
84
+
85
+ The dual of the problem is
86
+
87
+ $$
88
+ \begin{array}{l@{\quad}c@{\quad}l}
89
+ \mathcal{s}(\mathbf{Q}) = & \text{Minimize} & e^T y \\
90
+ \text{Subject to} & D(y) & \succeq Q, y \ge 0,
91
+ \end{array}
92
+ \tag{2}
93
+ $$
94
+
95
+ where $D(y)$ is the diagonal matrix such that $d(D(y)) = y \in \Re^n$. Denote by $X(Q)$ and $y(Q)$ an optimal solution pair for the primal (1) and dual (2).
96
+
97
+ The positive semi-definite relaxation was first proposed by Lovász and Shrijver [5], also see recent papers by Fujie and Kojima [3] and Polijak, Rendl and Wolkowicz [9]. This relaxation problem can be solved in polynomial time, e.g., see Nesterov and Nemirovskii [7].
98
+
99
+ We have the following relations between (QP) and (SDP).
100
+
101
+ **Proposition 1** Let $q = q(Q), \underline{q} = -q(-Q), s = s(Q), \underline{s} = -s(-Q), \text{ and } \underline{y} = -y(Q)$. Then,
102
+
103
+ 1. $\underline{q}$ is the minimal objective value of $x^T Q x$ in the feasible set of (QP);
104
+
105
+ 2. $\underline{s} = e^T \underline{y}$ and it is the minimal objective value of $\langle Q, X \rangle$ in the feasible set of (SDP);
106
+
107
+ 3.
108
+
109
+ $$ \underline{s} = -s(-Q) \le \underline{q} = -q(-Q) \le q(Q) = q \le s(Q) = s. $$
110
+ ---PAGE_BREAK---
111
+
112
+ **Proof.** The first and second statements are straightforward to verify. Let $X = x(Q)x(Q)^T \in \mathbb{R}^{n \times n}$.
113
+ Then $X \succeq 0$, $d(X) \le e$ and $\langle Q, X \rangle = q(x(Q)) = q(Q)$. Thus, we have $q(Q) = \langle Q, X \rangle \le s(Q)$.
114
+ Similarly, we can prove $q(-Q) \le s(-Q)$, or $-s(-Q) \le -q(-Q)$. ■
115
+
116
+ In what follows, we also let $x = x(Q)$, $X = X(Q)$. Since $X$ is positive semi-definite, there is a factorization matrix $V = (v_1, \dots, v_n) \in \mathbb{R}^{n \times n}$, i.e., $v_j$ is the $j$th column of $V$, such that $X = V^T V$.
117
+ The algorithm, similar to Goemans and Williamson [4], generates a random vector $u$ uniformly distributed on an $n$-dimensional unit ball and then assigns
118
+
119
+ $$ \hat{x} = D\sigma(V^Tu), \quad (3) $$
120
+
121
+ where
122
+
123
+ $$ D = \operatorname{diag}(\|v_1\|, \dots, \|v_n\|) = \operatorname{diag}(\sqrt{x_{11}}, \dots, \sqrt{x_{nn}}), $$
124
+
125
+ and for any $x \in \mathbb{R}^n$, $\sigma(x)$ is the vector whose components are $\operatorname{sign}(x_j)$, $j = 1, \dots, n$, that is,
126
+ $\operatorname{sign}(x_j) = 1$ if $x_j \ge 0$ and $\operatorname{sign}(x_j) = -1$ otherwise.
127
+
128
+ It is easily seen that $\hat{x}$ is a feasible point for (QP) and we will show later that the expected objective value, $E_u q(\hat{x})$, satisfies
129
+
130
+ $$ \frac{q - E_u q(\hat{x})}{q - \underline{q}} \le \frac{\pi}{2} - 1 \le \frac{4}{7}. $$
131
+
132
+ # 3 Approximation Analysis
133
+
134
+ The following two lemmas are analogues to Lemmas 1 and 2 of Nesterov [6].
135
+
136
+ **Lemma 1**
137
+
138
+ $$
139
+ \begin{array}{l@{\quad}l}
140
+ \text{Maximize} & \sigma(V^T u)^T D Q D \sigma(V^T u) \\
141
+ \text{Subject to} & \|v_j\| \le 1, \quad j = 1, \dots, n, \quad \|u\| = 1, \\
142
+ \text{where} & D = \operatorname{diag}(\|v_1\|, \dots, \|v_n\|).
143
+ \end{array}
144
+ $$
145
+
146
+ **Proof.** Since $D\sigma(V^Tu)$ is a feasible point for (QP) for any feasible $V$ and $u$, we have
147
+
148
+ $$ q(Q) \geq \sigma(V^T u)^T D Q D \sigma(V^T u). $$
149
+
150
+ On the other hand, for any fixed $u$ with $\|u\| = 1$, we let $v_j = x_j u$, $j = 1, \dots, n$. Then $D\sigma(V^Tu) = x$.
151
+ Thus, for a particular feasible $V$ and $u$ we have
152
+
153
+ $$ q(Q) = q(x) \leq \sigma(V^T u)^T D Q D \sigma(V^T u). $$
154
+
155
+ These two give the desired result. ■
156
+ ---PAGE_BREAK---
157
+
158
+ **Lemma 2**
159
+
160
+ $$
161
+ \begin{array}{ll}
162
+ q(Q) = & \text{Maximize} \quad \mathbb{E}_u(\sigma(V^T u)^T D Q D \sigma(V^T u)) \\
163
+ & \text{Subject to} \quad \|v_j\| \le 1, j = 1, \dots, n, \\
164
+ \text{where} & \\
165
+ & D = \text{diag}(\|v_1\|, \dots, \|v_n\|).
166
+ \end{array}
167
+ $$
168
+
169
+ **Proof.** Again, since $D\sigma(V^T u)$ is a feasible point for (QP), we have for any feasible $V$
170
+
171
+ $$
172
+ q(Q) \geq \mathbb{E}_u (\sigma(V^T u)^T D Q D \sigma(V^T u)).
173
+ $$
174
+
175
+ On the other hand, for any fixed $u$ with $\|u\| = 1$, we have
176
+
177
+ $$
178
+ \mathbb{E}_u (\sigma(V^T u)^T D Q D \sigma(V^T u)) = \sum_{i=1}^{n} \sum_{j=1}^{n} q_{ij} \|v_i\| \|v_j\| \mathbb{E}_u (\sigma(v_i^T u) \sigma(v_j^T u)). \quad (4)
179
+ $$
180
+
181
+ Let us choose $v_i = \frac{\bar{x}_i}{\|\bar{x}\|} x$, $i = 1, \dots, n$. Then
182
+
183
+ $$
184
+ \mathbb{E}_u(\sigma(v_i^T u)\sigma(v_j^T u)) = \begin{cases} 1 & \text{if } \sigma(x_i) = \sigma(x_j) \\ -1 & \text{otherwise.} \end{cases}
185
+ $$
186
+
187
+ Thus,
188
+
189
+ $$
190
+ \|v_i\| \|v_j\| \mathbb{E}_u (\sigma(v_i^T u) \sigma(v_j^T u)) = x_i x_j
191
+ $$
192
+
193
+ which implies that for a particular feasible V
194
+
195
+ $$
196
+ q(Q) = q(x) \leq \mathbb{E}_u (\sigma(V^T u)^T D Q D \sigma(V^T u)).
197
+ $$
198
+
199
+ These two give the desired result. ■
200
+
201
+ For any function of one variable $f(t)$ and $X \in \mathbb{R}^{n \times n}$, let $f[X] \in \mathbb{R}^{n \times n}$ be the matrix with the components $f(x_{ij})$. For example, $[X]^p$ denotes a matrix with the components $x_{ij}^p$. Nesterov [6] has also proved the next technical lemma.
202
+
203
+ **Lemma 3** Let $X \succeq 0$ and $d(X) \le 1$. Then $\arcsin[X] \succeq X$. ■
204
+
205
+ Now we are ready to prove the following theorem.
206
+
207
+ **Theorem 1**
208
+
209
+ $$
210
+ \begin{array}{ll}
211
+ q(Q) = & \text{Supremum} \quad \frac{2}{\pi} \langle Q, D \arcsin[D^{\top} X D^{-1}] D \rangle \\
212
+ & \text{Subject to} \quad d(X) \le e, X > 0,
213
+ \end{array}
214
+ $$
215
+
216
+ where
217
+
218
+ $$
219
+ D = \operatorname{diag}(\sqrt{x_{11}}, \ldots, \sqrt{x_{nn}}).
220
+ $$
221
+ ---PAGE_BREAK---
222
+
223
+ **Proof.** For any $X = V^T V > 0$, $d(X) \le e$, we have
224
+
225
+ $$E_u(\sigma(v_i^T u)\sigma(v_j^T u)) = 1 - 2\text{Pr}\{\sigma(v_i^T u) \neq \sigma(v_j^T u)\} = 1 - 2\text{Pr}\{\sigma(\frac{v_i^T u}{\|v_i\|}) \neq \sigma(\frac{v_j^T u}{\|v_j\|})\}.$$
226
+
227
+ From Lemma 1.2 of Goemans and Williamson [4], we have
228
+
229
+ $$\mathrm{Pr}\{\sigma(\frac{v_i^T u}{\|v_i\|}) \neq \sigma(\frac{v_j^T u}{\|v_j\|})\} = \frac{1}{\pi} \arccos(\frac{v_i^T v_j}{\|v_i\|\|v_j\|}).$$
230
+
231
+ Using the above lemma and equality (4) and noting $\arcsin(t)+\arccos(t) = \frac{\pi}{2}$ give the desired result.
232
+
233
+ Theorem 1 leads us to
234
+
235
+ **Theorem 2** We have
236
+
237
+ 1.
238
+
239
+ $$q - s \geq \frac{2}{\pi}(s - s).$$
240
+
241
+ 2.
242
+
243
+ $$s - q \geq \frac{2}{\pi}(s - s).$$
244
+
245
+ 3.
246
+
247
+ $$s - s \geq q - q \geq \frac{4 - \pi}{\pi}(s - s).$$
248
+
249
+ **Proof.** Recall $y = -y(-Q) \le 0$, $s = -s(-Q) = e^T y$, and $Q - D(y) \ge 0$. Thus, for any $X > 0$, $d(X) \le e$ and $D = \operatorname{diag}(\sqrt{x_{11}}, \dots, \sqrt{x_{nn}})$, we have from Theorem 1
250
+
251
+ $$
252
+ \begin{align*}
253
+ q = q(Q) &\ge \frac{2}{\pi} \langle Q, D \arcsin[D^T X D]^T D \rangle \\
254
+ &= \frac{2}{\pi} \langle Q - D(y) + D(y), D \arcsin[D^T X D]^T D \rangle \\
255
+ &= \frac{2}{\pi} \left( \langle Q - D(y), D \arcsin[D^T X D]^T D \rangle + \langle D(y), D \arcsin[D^T X D]^T D \rangle \right) \\
256
+ &\ge \frac{2}{\pi} \left( \langle Q - D(y), D D^T X D^T D \rangle + \langle D(y), D \arcsin[D^T X D^T D] \rangle \right) \\
257
+ &\quad (\text{since } Q - D(y) \ge 0 \text{ and } \arcsin[D^T X D]^T D \ge D^T X D^T) \\
258
+ &= \frac{2}{\pi} \left( \langle Q - D(y), X \rangle + \langle D(y), D \arcsin[D^T X D]^T D \rangle \right) \\
259
+ &= \frac{2}{\pi} \left( \langle Q, X \rangle - \langle D(y), X \rangle + \langle D(y), D \arcsin[D^T X D]^T D \rangle \right) \\
260
+ &= \frac{2}{\pi} \left( \langle Q, X \rangle - y^T d(X) + y^T d(D \arcsin[D^T X D]^T D) \right)
261
+ \end{align*}
262
+ $$
263
+ ---PAGE_BREAK---
264
+
265
+ $$
266
+ \begin{align*}
267
+ &= \frac{2}{\pi} \left( \langle Q, X \rangle - \underline{y}^T d(X) + \overline{y}^T \left( \frac{\pi}{2} d(X) \right) \right) \\
268
+ &= \frac{2}{\pi} \left( \langle Q, X \rangle + \left(\frac{\pi}{2} - 1\right) \underline{y}^T d(X) \right) \\
269
+ &\geq \frac{2}{\pi} \left( \langle Q, X \rangle + \left(\frac{\pi}{2} - 1\right) \overline{y}^T e \rangle \right) \\
270
+ &\quad (\text{since } 0 \le d(X) \le e \text{ and } \underline{y} \le 0) \\
271
+ &= \frac{2}{\pi} \left( \langle Q, X \rangle + \left(\frac{\pi}{2} - 1\right) \underline{s} \right).
272
+ \end{align*}
273
+ $$
274
+
275
+ Let $X$ converge to $X$, then $\langle Q, X \rangle \to s$ and we have the desired first inequality.
276
+
277
+ Replacing $Q$ with $-Q$ proves the second inequality in the theorem.
278
+
279
+ Adding the first two inequalities gives the third statement in the theorem. ■
280
+
281
+ The result indicates that the positive semi-definite relaxation value $s - \underline{s}$ is a constant approximation of $q - \underline{q}$.
282
+
283
+ The following corollary can be derived from the proof of the above theorem.
284
+
285
+ **Corollary 1** Let $X = V^T V > 0$, $d(X) \le e$, $D = \operatorname{diag}(\sqrt{x_{11}}, \dots, \sqrt{x_{nn}})$, and $\hat{x} = D\sigma(V^T u)$ where $u$ with $\|u\| = 1$ is a random vector uniformly distributed on the unit ball. Moreover, let $X \to X$. Then,
286
+
287
+ $$
288
+ \lim_{X \succcurlyeq \bar{X}} E_u(q(\hat{x})) = \lim_{X \succcurlyeq \bar{X}} \frac{2}{\pi} \langle Q, D \arcsin[D^{-1}XD^{-1}]D \rangle \geq \frac{2}{\pi}s + (1 - \frac{2}{\pi})\underline{s}.
289
+ $$
290
+
291
+ Finally, we have
292
+
293
+ **Theorem 3** Let $\hat{x}$ be generated above from $X = X$. Then
294
+
295
+ $$
296
+ \frac{q - E_u q(\hat{x})}{q - \underline{q}} \leq \frac{\pi}{2} - 1.
297
+ $$
298
+
299
+ **Proof.** Noting that
300
+
301
+ $$
302
+ s \ge q \ge \frac{2}{\pi}s + (1-\frac{2}{\pi})s \ge (1-\frac{2}{\pi})s + \frac{2}{\pi}\underline{s} \ge \underline{q} \ge \underline{s}
303
+ $$
304
+
305
+ we have
306
+
307
+ $$
308
+ \begin{align*}
309
+ \frac{q - E_u q(\hat{x})}{q - \underline{q}} &\le \frac{q - \frac{2}{\pi}s - (1 - \frac{2}{\pi})s}{q - \underline{q}} \\
310
+ &\le \frac{q - \frac{2}{\pi}s - (1 - \frac{2}{\pi})s}{q - (1 - \frac{2}{\pi})s - \frac{2}{\pi}\underline{s}} \\
311
+ &\le \frac{s - \frac{2}{\pi}s - (1 - \frac{2}{\pi})s}{s - (1 - \frac{2}{\pi})s - \frac{2}{\pi}\underline{s}}
312
+ \end{align*}
313
+ $$
314
+ ---PAGE_BREAK---
315
+
316
+ $$
317
+ \begin{aligned}
318
+ &= \frac{(1 - \frac{2}{\pi})(s - s)}{\frac{2}{\pi}(s - s)} \\
319
+ &= \frac{(1 - \frac{2}{\pi})}{\frac{2}{\pi}} = \frac{\pi}{2} - 1.
320
+ \end{aligned}
321
+ $$
322
+
323
+ References
324
+
325
+ [1] M. Bellare and P. Rogaway, "The complexity of approximating a nonlinear program," *Mathematical Programming* 69 (1995) 429-442.
326
+
327
+ [2] M. Fu, Z.-Q. Luo and Y. Ye, "Approximation algorithms for quadratic programming," manuscript, Department of Electrical and Computer Engineering, McMaster University, Hamilton, Ontario, CANADA L8S 4K1, 1996.
328
+
329
+ [3] T. Fujie and M. Kojima, "Semidefinite programming relaxation for nonconvex quadratic programs," Research Report B-298, Dept. of Mathematical and Computing Sciences, Tokyo Institute of Technology, Meguro, Tokyo 152, May 1995. To appear in *Journal of Global Optimization*.
330
+
331
+ [4] M. X. Goemans and D. P. Williamson, "Improved approximation algorithms for Maximum Cut and Satisfiability problems using semidefinite programming," *Journal of ACM* 42 (1995) 1115-1145.
332
+
333
+ [5] L. Lovász and A. Shrijver, "Cones of matrices and setfunctions, and 0-1 optimization," *SIAM Journal on Optimization* 1 (1990) 166-190.
334
+
335
+ [6] Yu. E. Nesterov, "Quality of semidefinite relaxation for nonconvex quadratic optimization," CORE Discussion Paper, #9719, Belgium, March 1997.
336
+
337
+ [7] Yu. E. Nesterov and A. S. Nemirovskii, *Interior Point Polynomial Methods in Convex Programming: Theory and Algorithms* (SIAM Publications, SIAM, Philadelphia, 1993).
338
+
339
+ [8] P. M. Pardalos and J. B. Rosen, *Constrained Global Optimization: Algorithms and Applications* (Springer-Verlag, Lecture Notes in Computer Sciences 268, 1987).
340
+
341
+ [9] S. Polijak, F. Rendl and H. Wolkowicz, "A recipe for semidefinite relaxation for 0-1 quadratic programming," *Journal of Global Optimization* 7 (1995) 51-73.
342
+
343
+ [10] S. A. Vavasis, *Nonlinear Optimization: Complexity Issues* (Oxford Science, New York, 1991).
344
+
345
+ [11] Y. Ye, "On affine scaling algorithms for nonconvex quadratic programming," *Mathematical Programming* 56 (1992) 285-300.
samples/texts_merged/1223200.md ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # Slotted Aloha as a game with partial information
5
+
6
+ Eitan Altman<sup>a</sup>, Rachid El Azouzi<sup>a,b,*</sup>, Tania Jiménez<sup>c</sup>
7
+
8
+ <sup>a</sup> INRIA, 2004 Route des Lucioles, Projet Mistral, 06902 Sophia Antipolis Cote d'Azur Cedex, France
9
+
10
+ <sup>b</sup> LIAACERI, Université d'Avignon, Agroparc, BP 1228, 84911 Avignon, France
11
+
12
+ <sup>c</sup> CESIMO, Facultad de Ingeniería, Universidad de Los Andes, Mérida, Venezuela
13
+
14
+ Received 13 March 2003; received in revised form 19 February 2004; accepted 25 February 2004
15
+ Available online 7 April 2004
16
+
17
+ Responsible Editor: E.K.P. Chong
18
+
19
+ ## Abstract
20
+
21
+ This paper studies distributed choice of retransmission probabilities in slotted ALOHA. Both the cooperative team problem as well as the noncooperative game problem are considered. Unlike some previous work, we assume that mobiles do not know the number of backlogged packets at other nodes. A Markov chain analysis is used to obtain optimal and equilibrium retransmission probabilities and throughput. We then investigate the impact of adding re-transmission costs (which may represent the disutility for power consumption) on the equilibrium and show how this pricing can be used to make the equilibrium throughput coincide with the optimal team throughput.
22
+
23
+ © 2004 Elsevier B.V. All rights reserved.
24
+
25
+ **Keywords:** Slotted Aloha; Nash equilibrium; Markov chain; Pricing
26
+
27
+ ## 1. Introduction
28
+
29
+ Aloha [4] and slotted Aloha [14] have long been used as random distributed medium access protocols for radio channels. They are in use in both satellite as well as cellular telephone networks for the sporadic transfer of data packets. In these protocols, packets are transmitted sporadically by various users. If packets are sent simultaneously by more than one user then they collide. After the end of the transmission of a packet, the transmitter receives the information on whether there has been a collision (and retransmission is needed) or whether it was well received. All packets involved in a collision are assumed to be corrupted and are retransmitted after some random time. We focus in this paper on the slotted Aloha (which is known to have a better achievable throughput than the unslotted version, [5]) in which time is
30
+
31
+ * Corresponding author. Address: INRIA, 2004 Route des Lucioles, Projet Mistral, 06902 Sophia Antipolis Cote d'Azur Cedex, France. Tel.: +33-492387628; fax: +33-492387971.
32
+ * E-mail address: rachid.elazouzi@sophia.inria.fr (R. El Azouzi).
33
+ ---PAGE_BREAK---
34
+
35
+ divided into units. At each time unit a packet may be transmitted, and at the end of the time interval, the sources get the feedback on whether there was zero, one or more transmissions (collision) during the time slot. A packet that arrives at a source is immediately transmitted. Packets that are involved in a collision are backlogged and are scheduled for retransmission after a random time.
36
+
37
+ The determination of the above random time can be considered as a stochastic control problem. The information structure, however, is not a classical one: sources do not have full state information as they do not know how many packets are backlogged. Nor do they know how many packets have been involved in a collision.
38
+
39
+ We study this control problem in two different frameworks:
40
+
41
+ 1. As a team problem, i.e. where there is a common goal to all nodes in the network (such as maximizing the system throughput).
42
+
43
+ 2. As a problem in a noncooperative framework: each node wishes to maximize its own throughput. This gives rise to a game theoretical formulation.
44
+
45
+ Our main finding is that as the workload increases (i.e. as the packet arrival rate increases), sources become more aggressive at equilibrium in the game setting (in comparison with the team problem) and this results in a dramatic decrease in the total system's throughput. To avoid this collapse of system's throughput, we study the effect of adding a cost for transmissions and retransmissions (which can, in particular, represent the battery power cost). We show that this additional cost improves the system's performance and that an appropriate pricing can be chosen that yields an equilibrium performance that coincides with the team one.
46
+
47
+ Previous game formulations of the slotted ALOHA have been proposed in [10–12]. In the two last references, a full information game is considered, in which each user knows how many backlogged packets there are in all the network. Moreover, it is assumed in [11,12] that a packet that is to be transmitted for the first time waits for a random time in the same way as a backlogged packet. Our goal is to study the slotted Aloha avoiding these two assumptions; relaxing the assumptions allows to model more accurately the original versions of Aloha, and in particular, relaxing the first assumption allows for more distributed implementations of Aloha. In [10] it is assumed that nodes have always packets to send. Thus there is only one trivial state in the system (all nodes are backlogged) which is known to all users.
48
+
49
+ For more background on the use of stochastic control and of game theory in communication networks, see [1–3]. We note that the game formulation of our problem is similar to game formulation of retrial queues, in which customers retry to make a call after some random time if they find the line busy [7,9]. The difference is, however, that in retrial queues there are no collisions.
50
+
51
+ The structure of the paper is as follows. We begin by introducing in Section 2 the general model and formulate the team and the game problems. We provide a Markov analysis for both the team and the game problem. This analysis is used in Section 3 to numerically study and compare the properties of the team and the game solutions. The model with pricing is then introduced in Section 4 and is investigated numerically in Section 5. We end with a concluding section.
52
+
53
+ ## 2. Model and problem formulation
54
+
55
+ We use a Markovian model based on [5]. We assume that there are a finite number of sources without buffers. The arrival flow of packets to source *i* follows a Bernoulli process with parameter $q_a$ (i.e. at each time slot, there is a probability $q_a$ of a new arrival at a source, and all arrivals are independent). As long as there is a packet at a source (i.e. as long as it is not successfully transmitted) new packets to that
56
+ ---PAGE_BREAK---
57
+
58
+ source are blocked and lost.¹ The arrival processes to different sources are independent. A backlogged packet at source *i* is retransmitted with probability $q_r^i$. We shall restrict in our control and game problems to simple policies in which $q_r^i$ does not change in time. Since sources are symmetric, we shall further restrict to finding a symmetric optimal solution, that is retransmission probabilities $q_r^i$ that do not depend on *i*. We assume that if more than one source attempt transmission in a time slot, all packets are lost.
59
+
60
+ **Remark 1.** Other models for ALOHA have been also studied in the literature. A commonly used model is one with infinite many sources [5] with no buffers, in which the process of total number of (non-blocked) arrivals at a time slot is Poisson with parameter $\lambda$ and the process of combined transmissions and retransmissions attempts forms a Poisson process with parameter *G*. Analysis of this model shows that it has two quasi-stable operation modes (as long as $\lambda < \exp(-1)$), one corresponding to a congested system (in which there are many backlogged packets and many retransmissions) and one corresponding to an uncongested system (with small amount of backlogged packets). In this model, both operation points turn out to have the same throughput. In our model with finitely many sources we has also two quasi-stable operation modes but the throughput during congestion periods is lower than in the non-congested periods [5]. We also note that in the case of infinitely many nodes, retransmissions with a fixed positive probability renders the system unstable [8]. Finally, we should mention that there are also models in which not all packets involved in a collision are corrupted and lost, see [15] and references therein.
61
+
62
+ **Remark 2.** Quite frequently one uses the ALOHA protocol for sporadic transmissions of signaling packets such as packets for making reservation for a dedicated channel for other transmissions (that do not use ALOHA), see e.g. the description of the SPADE on demand transmission protocol for satellite communications in [16]. In the context of signaling, it is natural to assume that a source does not start generating a new signaling packet (e.g. a new reservation) as long as the current signaling packet is not transmitted. In that case, the process of attempts to retransmit a new packet from a source after the previous packet has been successfully transmitted coincides with our no buffer model.
63
+
64
+ We shall use as the state of the system the number of backlogged nodes (or equivalently, of backlogged packets) at the beginning of a slot, and denote it frequently with *n*. For any choice of values $q_r^j \in (0, 1]$, the state process is a Markov chain that contains a single ergodic chain (and possibly transient states as well). Define $q_r$ to be the vector of retransmission probabilities for all users (whose $j$th entry is $q_r^j$). Let $\pi(q_r)$ be the corresponding vector of steady state probabilities where its $n$th entry, $\pi_n(q_r)$, denotes the probability of $n$ backlogged nodes. When all entries of $q_r$ are the same, say $q$, we shall write (with some abuse of notation) $\pi(q)$ instead of $\pi(q_r)$.
65
+
66
+ We introduce further notation. Assume that there are $n$ backlogged packets, and all use the same value $q_r$ as retransmission probability. Let $Q_r(i,n)$ be the probability that $i$ out of the $n$ backlogged packets retransmit at the slot. Then
67
+
68
+ $$ Q_r(i, n) = \binom{n}{i} (1 - q_r)^{n-i} [q_r]^i. \quad (1) $$
69
+
70
+ ¹ In considering the number of packets in the system, this assumption is equivalent to saying that a source does not generate new packets as long as a previous packet is not successfully transmitted.
71
+ ---PAGE_BREAK---
72
+
73
+ Assume that *m* is the number of nodes and let $Q_a(i, n)$ be the probability that *i* unbacklogged nodes transmit packets in a given slot (i.e. that *i* arrivals occurred at nodes without backlogged packets). Then
74
+
75
+ $$
76
+ Q_a(i,n) = \binom{m-n}{i} (1-q_a)^{m-n-i} [q_a]^i. \tag{2}
77
+ $$
78
+
79
+ Let $Q_r(1,0) = 0$ and $Q_a(1,m) = 0$.
80
+
81
+ In case all nodes use the same value *q* for *q*<sub>*r*</sub>, the transition probabilities of the Markov chain are given by
82
+ [5]:
83
+
84
+ $$
85
+ P_{n,n+i}(q) =
86
+ \begin{cases}
87
+ Q_a(i,n), & 2 \le i \le m-n, \\
88
+ Q_a(1,n)[1-Q_r(0,n)], & i=1, \\
89
+ Q_a(1,n)Q_r(0,n) + Q_a(0,n)[1-Q_r(1,n)], & i=0, \\
90
+ Q_a(0,n)Q_r(1,n), & i=-1.
91
+ \end{cases}
92
+ $$
93
+
94
+ The system throughput (defined as the sample average of the number of packets that are successfully transmitted) is given almost surely by the constant
95
+
96
+ $$
97
+ \mathrm{thp}(q) = \sum_{n=1}^{m} \pi_n(q) [P_{n,n-1}(q) + Q_a(1,n)Q_r(0,n)] + \pi_0(q) Q_a(1,0) = q_a \sum_{n=0}^{m} \pi_n(q)(m-n).
98
+ $$
99
+
100
+ Note: the first equality follows from the fact that if the state at the beginning of the slot is *n* > 0 then there is a departure of a backlogged packet during that slot with probability *P*<sub>*n*,n−1</sub>(*q*), and of a new arriving packet with probability *Q*<sub>*a*</sub>(1,*n*)*Q*<sub>*r*</sub>(0,*n*); Moreover, if the state is 0 then there is a departure with probability *Q*<sub>*a*</sub>(1,0). The second equality simply expresses the expected number of arrivals at a time slot (which actually enter the system), which should equal to the expected number of departures (and thus the throughput) at stationary regime.
101
+
102
+ The team problem is therefore given as the solution of the optimization problem:
103
+
104
+ $$
105
+ \[
106
+ \max_q \quad \text{thp}(q) \quad \text{s.t.} \quad
107
+ \left\{
108
+ \begin{array}{l@{\quad}l@{\quad}l}
109
+ \pi(q) &= \pi(q)P(q), & \\
110
+ \pi_n(q) &\ge 0, & n = 0, \dots, m, \\
111
+ \displaystyle\sum_{n=0}^m \pi_n(q) &= 1.
112
+ \end{array}
113
+ \right.
114
+ \]
115
+ $$
116
+
117
+ A solution to the team problem can be obtained by computing recursively the steady state probabilities, as in Problem 4.1 in [5], and thus obtain an explicit expression for $thp(q)$ as a function of $q$.
118
+
119
+ *Singularity at q = 0.* The only point where *P* does not have a single stationary distribution is at *q* = 0, where it has two absorbing states: *n* = *m* and *n* = *m* − 1. All other states are transient (for any *q*<sub>*a*</sub> > 0), and the probability to end at one of the absorbing states depend on the initial distribution of the Markov chain. We note that if the state *m* − 1 is reached then the throughput is *q*<sub>*a*</sub> w.p.1, where as if the state *m* is reached then the throughput equals 0. It is thus a deadlock state. For *q*<sub>*a*</sub> > 0 and *q*<sub>*r*</sub> = 0, the deadlock state is reached with positive probability from any initial state other than *m* − 1. We shall therefore exclude *q*<sub>*r*</sub> = 0 and optimize only on the range ε ≤ *q*<sub>*r*</sub> ≤ 1. We choose throughout the paper ε = 10⁻⁴.
120
+
121
+ *Existence of a solution.* The steady state probabilities π(q) are continuous over 0 < q ≤ 1. Since this is not a close interval, a solution need not exist. However, as we restrict to the closed interval q ∈ [ε, 1] where ε > 0, an optimal solution indeed exists. Note also that the limit lim<sub>q→0</sub> π(q) exists since π(q) is a rational function of q at the neighborhood of zero. Therefore for any δ > 0, there exists some q > 0 which is δ-optimal. (q*^* > 0 is said to be δ-optimal if it satisfies thp(q*) ≥ thp(q) – δ for all q ∈ (0, 1].)
122
+
123
+ Next, we formulate the game problem. For a given policy vector **q**<sub>r</sub> of retransmission probabilities for all users (whose jth entry is **q**'<sub>r</sub>), define (([**q**<sub>r</sub>]<sup>-i</sup>, ˆ**q**'<sub>r</sub>) to be a retransmission policy where user j retransmits at a slot with probability **q**'<sub>r</sub> for all j ≠ i and where user i retransmits with probability ˆ**q**'<sub>r</sub>. Each user i seeks to
124
+ ---PAGE_BREAK---
125
+
126
+ maximize his own throughput $thp_i$. The problem we are interested in is then to find a symmetric equilibrium policy $\mathbf{q}_r^* = (q_r, q_r, \dots, q_r)$ such that for any user $i$ and any retransmission probability $q_r^i$ for that user,
127
+
128
+ $$ \mathrm{thp}_i(\mathbf{q}_r^*) \ge \mathrm{thp}_i([\mathbf{q}_r^*]^{-i}, q_r^i). \quad (3) $$
129
+
130
+ Since we restrict to symmetric $\mathbf{q}_r^*$, we shall also identify it (with some abused of notation) with the actual transmission probability (which is the same for all users). Next we show how to obtain an equilibrium policy. We first note that due to symmetry, to see whether $\mathbf{q}_r^*$ is an equilibrium it suffices to check (3) for a single player. We shall thus assume that there are $m+1$ users all together, and that the first $m$ users retransmit with a given probability $\mathbf{q}_r^{-(m+1)} = (q^o, \dots, q^o)$ and user $m+1$ retransmits with probability $\mathbf{q}_r^{(m+1)}$. Define the set
131
+
132
+ $$ \mathcal{Q}^{m+1}(\mathbf{q}_r^o) = \underset{q_r^{(m+1)} \in [\underline{e},1]}{\operatorname{argmax}} (\operatorname{thp}_{m+1}([\mathbf{q}_r^o]^{-(m+1)}, q_r^{(m+1)}), $$
133
+
134
+ where $\mathbf{q}_r^o$ denotes (with some abuse of notation) the policy where all users retransmit with probability $q_r^o$, and where the maximization is taken with respect to $q_r^{(m+1)}$. Then $q_r^*$ is a symmetric equilibrium if
135
+
136
+ $$ q_r^* \in \mathcal{Q}_r^{m+1}(q_r^*). $$
137
+
138
+ To compute $\mathrm{thp}_{m+1}([\mathbf{q}_r^o]^{-i}, q_r^i)$, we introduce again a Markov chain with a two dimensional state. The first state component corresponds to the number of backlogged packets among the users 1, ..., m, and the second component is the number of backlogged packets (either 1 or 0) of user m + 1. The transition probabilities are given by
139
+
140
+ $$ P_{(n,i),(n+k,j)}(\mathbf{q}_r^o, \mathbf{q}_r^{(m+1)}) = \left\{ \begin{array}{ll} \begin{array}{l} Q_a(k,n), \\ Q_a(k,n)(1-q_a), \\ Q_a(k,n)q_a, \end{array} & i=j=1 \\[2ex] \begin{array}{l} Q_a(1,n)[1-Q_r(0,n)(1-q_r^{(m+1)})], \\ Q_a(1,n)[1-Q_r(0,n)](1-q_a), \\ Q_a(1,n)q_a, \end{array} & i=0,j=1 \\[2ex] \begin{array}{ll} (1-q_r^{(m+1)})Z + q_r(1-Q_r(0,n))Q_a(0,n), & i=j=1 \\ (1-q_a)Z + q_aQ_a(0,n)Q_r(0,n), & i=j=0 \\ q_aQ_a(0,n)[1-Q_r(0,n)], & i=0,j=1 \\ q_r^{(m+1)}Q_a(0,n)Q_r(0,n), & i=1,j=0 \\[2ex] Q_a(0,n)Q_r(1,n)(1-q_r^{(m+1)}), & i=j=1 \\ Q_a(0,n)Q_r(1,n)(1-q_a), & i=j=0 \end{array} & k=1, \\[2ex] 0 & k=-1, \end{array} \right. $$
141
+
142
+ where $Z = (Q_a(1,n)Q_r(0,n) + Q_a(0,n))[1 - Q_r(1,n)]$ and where $Q_a$ and $Q_r$ are given in (1) and (2), respectively (with $q_r^o$ replacing $q_r$).
143
+
144
+ The throughput of user $m+1$ is given by
145
+
146
+ $$ \mathrm{thp}_{m+1}([\mathbf{q}_r^o]^{-(m+1)}, q_r^{(m+1)}) = q_a \sum_{n=0}^{m} \pi_{n,0}([\mathbf{q}_r^o]^{-(m+1)}, q_r^{(m+1)}). \quad (4) $$
147
+
148
+ ### 3. Numerical investigation
149
+
150
+ In this section we shall obtain the retransmission probabilities which solve the team and the game problem. We investigate their dependence and the dependence of the throughput that they imply on the arrival probabilities $q_a$ and on the number of mobiles.
151
+ ---PAGE_BREAK---
152
+
153
+ Figs. 1 and 2 provide the total throughput and optimal retransmission probabilities $q_r$ for $m = 2, m = 10$ and $m = 50$ for the team problem, as a function of the arrival probability $q_a$. We see that in heavy traffic, the throughput decreases when the number of mobiles increases. Also, we observe that the optimal retransmission policy is more and more small when the arrival probability increases or the number of mobiles increases: as the system becomes more congested (larger arrival probability or large number of mobiles) the transmission probability decreases so as to counter expected collisions. But for light traffic, we observe that the slotted Aloha is very efficient when the number of mobiles is large: in that regime, the optimal throughput achieved increases as the number of mobiles increases.
154
+
155
+ The intuitive reason that the team optimal retransmission probabilities are close to 0 when arrival probabilities are close to one is that if a mobile finds all other mobiles backlogged then it can transmit for very long time all its packets at a rate of almost one packet per slot, without fearing collisions. Since its arrival probabilities are close to one, then throughput is not wasted during such periods. (Note however that a throughput close to 1 cannot be achieved since with some nonnegligible probability, all mobiles will be backlogged during long periods when retransmission probabilities are very low.) The behavior we see could remind of CDMA systems in which best performance is sometime achieved by “time-sharing” the access between users in order to decrease interference [13].
156
+
157
+ Next, we show in Figs. 3 and 4 the total optimal throughput versus the number of mobiles for some fixed arrival probabilities ($q_a = 0.7, 0.8, 0.9$). In Fig. 3 we observe that the optimal throughput converges to some value when the number of mobiles goes to infinity, and convergence is faster when the arrival probability $q_a$ is larger. In fact, for heavy traffic with large number of mobiles, the optimal retransmission probability is seen to be $\epsilon$. Thus, the steady state probabilities $\pi$ are then close to $\pi_m = 1/2, \pi_{m-1} = 1/2$ and $\pi_n = 0$ $\forall n < m-1$. Hence the total throughput becomes $q_a/2$. If we look at the value of the throughput on the y-axis of Fig. 3 we observe that the throughput indeed converges to 0.35 (resp. 0.4, 0.45) for $q_a = 0.7$ (resp. $q_a = 0.8, q_a = 0.9$).
158
+
159
+ Now, we present the results we obtain when we use the game problem. Figs. 5 and 6 show total throughput at equilibrium (obtained by multiplying the expression in Eq. (4) by the number of mobiles) and the retransmission probability at equilibrium as a function of the arrival probability for the game scenario. We see that for game problem, in contrast to team problem, the equilibrium retransmission becomes more and more aggressive as the arrival probability increases or the number of mobiles increases which explains the dramatic decrease in the system's throughput. Moreover, the equilibrium retransmission quickly increases to 1 when the number of mobiles increases. In particular *the throughput is zero when $m > 5$ for each arrival probability*. In conclusion, the game solution is very inefficient for heavy traffic, and even for light traffic it becomes inefficient when the number of mobiles is larger than five.
160
+
161
+ We note that a similar aggressive behavior at equilibrium has been observed in [6] in the context of flow control by several competing users that share a common drop tail buffer. However in that context, the most aggressive behavior (of transmission at maximum rate) is the “equilibrium” solution for *any arrival rate*, and not just at high rates as in our case. We may thus wonder why retransmission probabilities of 1 are not an equilibrium in our slotted Aloha problem (in the case of light traffic). An intuitive reason could be that if a mobile deviates and retransmits with probability one, (while other continue to retransmit with the equilibrium probability $q^* < 1$) the total congestion in the system (i.e. the number of backlogged mobiles) increases; this provokes more retransmissions from other mobiles which then causes sufficiently more collisions of packets from the deviating mobile so as to cause a decrease in its throughput.
162
+
163
+ ## 4. Adding costs for retransmissions
164
+
165
+ In this section we consider the problem where there is an extra cost $\theta$ per each transmission and retransmission. This can represent the disutility for the consumption of battery energy, which is a scarce
166
+ ---PAGE_BREAK---
167
+
168
+ Fig. 1. Optimal throughput for the team case as a function of the arrival probabilities $q_a$ for $m = 2, 10, 50$.
169
+
170
+ Fig. 2. The optimal retransmission probabilities in the team case as a function of the arrival probabilities $q_a$ for $m = 2, 10, 50$.
171
+
172
+ Fig. 3. Optimal throughput for the team case as a function of the number of mobiles $m$ for $q_a = 0.7, 0.8, 0.9$.
173
+
174
+ Fig. 4. The optimal retransmission probabilities in the team case as a function of the number of mobiles $m$ for $q_a = 0.7, 0.8, 0.9$.
175
+
176
+ Fig. 5. Optimal throughput for the game case as a function of the arrival probabilities $q_a$ for $m = 2, 4, 6$.
177
+
178
+ Fig. 6. The optimal retransmission probabilities in the game case as a function of the arrival probabilities $q_a$ for $m = 2, 4, 6$.
179
+ ---PAGE_BREAK---
180
+
181
+ resource. For a given symmetric $q$ for all users, the steady-state retransmission cost is $\theta q \sum_{n=0}^{m} \pi_n(q)n$, where as the transmission cost of arriving packets (i.e. packets that enter the system and are not rejected) is $\theta \text{thp}(q)$. (This is because the expected number of arrival packets equals to the expected number of departing packets at steady-state, and each time a packet arrives at the system it is immediately transmitted.)
182
+
183
+ Thus the new team problem is
184
+
185
+ $$ \max_q \left\{ \text{thp}(q)(1-\theta) - \theta q \sum_{n=0}^{m} \pi_n(q)n \right\}. $$
186
+
187
+ For the noncooperative problem, the retransmission cost for a symmetric retransmission policy $q_r^o$ of users 1, ..., m and a retransmission probability $q_r^{(m+1)}$ of user $m+1$ is
188
+
189
+ $$ \theta q_r^{(m+1)} \sum_{n=0}^{m} \pi_{n,1}([\mathbf{q}_r^o]^{-(m+1)}, q_r^{(m+1)}). $$
190
+
191
+ User $m+1$ is thus faced with the problem:
192
+
193
+ $$ \max_{q_r^{m+1}} J_{m+1}(q_r^o, q_r^{(m+1)}) $$
194
+
195
+ where
196
+
197
+ $$ J_{m+1}(q_r^o, q_r^{(m+1)}) = \text{thp}_{m+1}([\mathbf{q}_r^o]^{-(m+1)}, q_r^{(m+1)})(1-\theta) - \theta q_r^{(m+1)} \sum_{n=0}^{m} \pi_{n,1}([\mathbf{q}_r^o]^{-(m+1)}, q_r^{(m+1)}). $$
198
+
199
+ Define as we did before
200
+
201
+ $$ \bar{D}_r^{m+1}(q_r^o) = \underset{q_r^{(m+1)} \in [\underline{e}, \overline{1}]}{\operatorname{argmax}} \left( J_{m+1}([\underline{q}_r^o]^{-(m+1)}, q_r^{(m+1)}) \right). $$
202
+
203
+ Then we seek for the value $q_r^*$ of retransmission probability that satisfies
204
+
205
+ $$ q_r^* \in \bar{D}_r^{m+1}(q_r^*), $$
206
+
207
+ which is the Nash equilibrium for the game problem.
208
+
209
+ # 5. Numerical investigation
210
+
211
+ In this section we obtain the retransmission probabilities which solve the team and the game problems with the extra transmission costs. We shall investigate the dependence of the solution on the value $\theta$.
212
+
213
+ In Figs. 7–12 we depict the throughput obtained at the optimal solution and the optimal retransmission probabilities, respectively, as a function of the arrival probability, for the team problem with $m = 2, 10, 50$, for various values of $\theta$. We see that both the throughput as well as the retransmission probabilities are monotone decreasing in the cost. This can be expected since retransmissions become more costly with increasing $\theta$. An interesting feature is that for any fixed $\theta \neq 0$, the retransmission probabilities first increase in the arrival probability and then decrease. For $\theta = 0$, in contrast, the optimal retransmission probability decreases in the arrival probability (which is natural since congestion in the system increases as $q_a$ increases).
214
+
215
+ Next we consider the game problem with $m = 2, 10, 50$ mobiles.
216
+
217
+ Figs. 13–18 show the impact of $\theta$ on the total throughput and equilibrium retransmission probability $q_r$, as a function of the arrival $q_a$. We see that increasing the cost $\theta$ results in decreasing the retransmission probabilities. Furthermore with extra cost, the equilibrium retransmission is more and more small when the cost $\theta$ increases. We see also that indeed the throughput is improved considerably by adding a cost on
218
+ ---PAGE_BREAK---
219
+
220
+ Fig. 7. Throughput at optimal $q_r$ for the team case as a function of $q_a$ for $m = 2$ and $\theta = 0, 0.4, 0.7, 0.9$.
221
+
222
+ Fig. 8. The optimal retransmission probabilities in the team case as a function of $q_a$ for $m = 2$ and $\theta = 0, 0.4, 0.7, 0.9$.
223
+
224
+ Fig. 9. Throughput at optimal $q_r$ for the team case as a function of $q_a$ for $m = 10$ and $\theta = 0, 0.4, 0.7, 0.9$.
225
+
226
+ Fig. 10. The optimal retransmission probabilities in the team case as a function of $q_a$ for $m = 10$ and $\theta = 0, 0.4, 0.7, 0.9$.
227
+
228
+ Fig. 11. Throughput at optimal $q_r$ for the team case as a function of $q_a$ for $m = 50$ and $\theta = 0, 0.4, 0.7, 0.9$.
229
+
230
+ Fig. 12. The optimal retransmission probabilities in the team case as a function of $q_a$ for $m = 50$ and $\theta = 0, 0.4, 0.7, 0.9$.
231
+ ---PAGE_BREAK---
232
+
233
+ Fig. 13. Total throughput for the game case as a function of the arrival probabilities $q_a$ for $m = 2$ (number of mobiles) and $\theta = 0, 0.4, 0.7, 0.9$.
234
+
235
+ Fig. 14. The equilibrium retransmission probabilities in the game case as function of the arrival probabilities $q_a$ for $m = 2$ (number of mobiles) and $\theta = 0, 0.4, 0.7, 0.9$.
236
+
237
+ Fig. 15. Total throughput for the game case as a function of the arrival probabilities $q_a$ for $m = 10$ (number of mobiles) and $\theta = 0, 0.4, 0.7, 0.9$.
238
+
239
+ Fig. 16. The equilibrium retransmission probabilities in the game case as function of the arrival probabilities $q_a$ for $m = 10$ (number of mobiles) and $\theta = 0, 0.4, 0.7, 0.9$.
240
+
241
+ Fig. 17. Total throughput for the game case as a function of the arrival probabilities $q_a$ for $m = 50$ (number of mobiles) and $\theta = 0, 0.4, 0.7, 0.9$.
242
+
243
+ Fig. 18. The equilibrium retransmission probabilities in the game case as function of the arrival probabilities $q_a$ for $m = 50$ (number of mobiles) and $\theta = 0, 0.4, 0.7, 0.9$.
244
+ ---PAGE_BREAK---
245
+
246
+ Fig. 19. The retransmission cost $\theta$ such that the optimal retransmission in the game coincides with that of the original team problem, as function of the arrival probabilities $q_a$ for $m = 2, 10, 50$.
247
+
248
+ retransmission, especially for large arrival probabilities or for $m > 3$. However, we believe that these bad performances of game problem can be potentially be solved by using this extra cost even with large number of mobiles. We observe also that for different values of $q_a$, we obtain different costs $\theta$ which gives best throughput. For example, in Fig. 13, for $q_a = 0.4$, $\theta = 0.4$ gives best throughput and for $q_a = 0.9$, $\theta = 0.9$ gives best throughput. We then compute the cost $\theta$ that is necessary for the equilibrium retransmission probabilities to coincide with those obtained for the team problem. This is the value of $\theta$ that will yield the optimal system throughput. The results are presented in Fig. 19 for $m = 2, 10$ and $50$.
249
+
250
+ From Fig. 19, we see that as the number of mobiles is large ($\ge 10$), the value of $\theta$ that gives the team solution depends less and less on the number of mobiles. This is an appealing property since it suggests that for a large number of mobiles ($m \ge 10$), we may have a pricing choice $\theta$ that can be chosen in a robust way, and may perform close to the team case even if the number of mobiles change.
251
+
252
+ ## 6. Concluding remarks
253
+
254
+ We have studied three approaches for choosing retransmission probabilities in a slotted Aloha system. First, we studied the team problem, then the noncooperative game problem. The objective was initially to maximize the throughput. We saw that as the arrival probabilities increased, the behavior of mobiles became more and more aggressive (as compared to the team problem) which resulted in a global deterioration of the system throughput. This is in contrast to the team problem in which throughput increased with the arrival probabilities. We also considered additional costs on transmissions and showed numerically that pricing could be used to enforce an equilibrium whose throughput corresponds to the team optimal solution.
255
+
256
+ ## Acknowledgements
257
+
258
+ This work was partially supported by the EURO NGI network of excellence as well as by the PRIXNET ARC Inria collaboration grant.
259
+ ---PAGE_BREAK---
260
+
261
+ ## References
262
+
263
+ [1] E. Altman, Applications of Markov decision processes in communication networks: a survey, in: E. Feinberg, A. Shwartz (Eds.), Markov Decision Processes, Models, Methods, Directions, and Open Problems, Kluwer, Dordrecht, 2001, pp. 488–536.
264
+
265
+ [2] E. Altman, T. Boulogne, R. El Azouzi, T. Jiménez, L. Wynter, A survey on networking games in telecommunications, Computers and Operations Research, in press. doi:10.1016/j.cor.2004.06.005.
266
+
267
+ [3] E. Altman, L. Wynter, Equilibrium, games, and pricing in transportation and telecommunication networks, Crossovers between Transportation Planning and Telecommunications 4 (1) (2004) 7–21.
268
+
269
+ [4] N. Abramson, The Aloha system—another alternative for computer communications, AFIPS Conference Proceedings 36 (1970) 295–298.
270
+
271
+ [5] D. Bertsekas, R. Gallager, Data Networks, Prentice Hall, Englewood Cliffs, NJ, 1987.
272
+
273
+ [6] D. Dutta, A. Goel, J. Heidemann, Oblivious AQM and Nash equilibria, IEEE Infocom, 2003.
274
+
275
+ [7] A. Elcan, Optimal customer return rate for an M/M/1 queueing system with retrials, Probability in the Engineering and Informational Sciences 8 (1994) 521–539.
276
+
277
+ [8] G. Fayolle, E. Gelenbe, J. Labetoulle, Stability and optimal control of the packet switching broadcast channel, Journal of the Association for Computing Machinery 24 (3) (1977) 375–386.
278
+
279
+ [9] R. Hassin, M. Haviv, On optimal and equilibrium retrial rates in a busy system, Probability in the Engineering and Informational Sciences 10 (1996) 223–227.
280
+
281
+ [10] Y. Jin, G. Kesidis, Equilibria of a noncooperative game for heterogeneous users of an ALOHA network, IEEE Communication Letters 6 (7) (2002) 282–284.
282
+
283
+ [11] A.B. MacKenzie, S.B. Wicker, Selfish users in Aloha: a game theoretic approach, in: Proceedings of the Fall 2001 IEEE Vehicular Technology Conference, 2001.
284
+
285
+ [12] A.B. MacKenzie, S.B. Wicker, Stability of multipacket slotted Aloha with selfish user users and perfect information, in: Proceedings of IEEE Infocom, San Francisco, 2003.
286
+
287
+ [13] S. Ramakrishna, J.M. Holtzman, A scheme for throughput maximization in a dual-class CDMA system, IEEE Journal Selected Areas in Communication 16 (1998) 830–844.
288
+
289
+ [14] L.G. Roberts, Aloha packet system with and without slots and capture, Tech. Rep. Ass Note 8, Stanford Research Institute, Advance Research Projects Agency, Network Information Center, 1972.
290
+
291
+ [15] J.H. Sarker, M. Hassan, S.J. Halme, Power level selection schemes to improve throughput and stability of slotted ALOHA under heavy load, Computer Communications 25 (2002) 1719–1726.
292
+
293
+ [16] M. Schwartz, Information, Transmission, Modulation and Noise, third ed., McGraw-Hill, New York, 1980.
294
+
295
+ **Eitan Altman** received the B.Sc. degree in Electrical Engineering (1984), the B.A. degree in Physics (1984) and the Ph.D. degree in Electrical Engineering (1990), all from the Technion-Israel Institute, Haifa. In 1990 he further received his B.Mus. degree in Music Composition in Tel-Aviv university. Since 1990, he has been with INRIA (National research institute in informatics and control) in Sophia-Antipolis, France. His current research interests include performance evaluation and control of telecommunication networks, stochastic control and dynamic games. In recent years, he has applied control theoretical techniques in several joint projects with the French telecommunications company—France Télécom. Since 2000, he has also been with CESIMO, Facultad de Ingeniería, Univesidad de Los Andes, Mérida, Venezuela.
296
+
297
+ **Rachid El Azouzi** received the Ph.D. degree in Applied Mathematics from the Mohammed V University, Rabat, Morocco (2000). He joined INRIA (National research institute in informatics and control) Sophia-Antipolis for post-doctoral and Research Engineer positions. Since 2003, he has been a researcher at the University of Avignon, France. Her research interests are mobile networks, performance evaluation, the TCP protocol, error control in wireless networks, resource allocation, networking games and pricing.
298
+ ---PAGE_BREAK---
299
+
300
+ Tania Jiménez received her DEA (equivalent to M.Sc.) at 1997, and Ph.D. at 2000, both in University of Nice Sophia-Antipolis, in Networks and Distributed Systems. Her research interests include simulation as well as optimization and control of telecommunication networks. She has been a teaching and research assistant at Nice university, teaching computer science courses. She is now a lecturer at CESIMO, Facultad de Ingenieria, Universidad de Los Andes, Merida, Venezuela.
samples/texts_merged/1259736.md ADDED
The diff for this file is too large to render. See raw diff
 
samples/texts_merged/1772599.md ADDED
@@ -0,0 +1,1063 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ Stability Properties of Linear File-Sharing Networks
5
+
6
+ L. Leskelä, Philippe Robert, Florian Simatos
7
+
8
+ ► To cite this version:
9
+
10
+ L. Leskelä, Philippe Robert, Florian Simatos. Stability Properties of Linear File-Sharing Networks.
11
+ 2009. inria-00401104
12
+
13
+ HAL Id: inria-00401104
14
+
15
+ https://hal.inria.fr/inria-00401104
16
+
17
+ Preprint submitted on 2 Jul 2009
18
+
19
+ **HAL** is a multi-disciplinary open access
20
+ archive for the deposit and dissemination of sci-
21
+ entific research documents, whether they are pub-
22
+ lished or not. The documents may come from
23
+ teaching and research institutions in France or
24
+ abroad, or from public or private research centers.
25
+
26
+ L'archive ouverte pluridisciplinaire **HAL**, est
27
+ destinée au dépôt et à la diffusion de documents
28
+ scientifiques de niveau recherche, publiés ou non,
29
+ émanant des établissements d'enseignement et de
30
+ recherche français ou étrangers, des laboratoires
31
+ publics ou privés.
32
+ ---PAGE_BREAK---
33
+
34
+ # STABILITY PROPERTIES OF LINEAR FILE-SHARING NETWORKS
35
+
36
+ LASSE LESKELÄ, PHILIPPE ROBERT, AND FLORIAN SIMATOS
37
+
38
+ **ABSTRACT.** File-sharing networks are distributed systems used to disseminate files among a subset of the nodes of the Internet. A file is split into several pieces called chunks, the general simple principle is that once a node of the system has retrieved a chunk, it may become a server for this chunk. A stochastic model is considered for arrival times and durations of time to download chunks. One investigates the maximal arrival rate that such a network can accommodate, i.e., the conditions under which the Markov process describing this network is ergodic. Technical estimates related to the survival of interacting branching processes are key ingredients to establish the stability of these systems. Several cases are considered: networks with one and two chunks where a complete classification is obtained and several cases of a network with *n* chunks.
39
+
40
+ ## CONTENTS
41
+
42
+ 1. Introduction 1
43
+
44
+ 2. Analysis of the Single-Chunk Network 4
45
+
46
+ 3. Yule Processes with Deletions 9
47
+
48
+ 4. Analysis of the Multi-Chunk Network 15
49
+
50
+ Appendix A. Proof of Proposition 3.3 21
51
+
52
+ References 24
53
+
54
+ ## 1. INTRODUCTION
55
+
56
+ File-sharing networks are distributed systems used to disseminate information among a subset of the nodes of the Internet (overlay network). The general simple principle is the following: once a node of the system has retrieved a file it becomes a server for this file. The advantage of this scheme is that it disseminates information in a very efficient way as long as the number of servers is growing rapidly. The growth of the number of servers is not necessarily without bounds since a node having this file may stop being a server after some time. These schemes have been used for some time now in peer-to-peer systems such as BitTorrent or Emule, for example to distribute large files over the Internet.
57
+
58
+ An improved version of this principle consists in splitting the original file into several pieces (called “chunks”) so that a given node can retrieve simultaneously several chunks of the same file from different servers. In this case, the rate to get a given file may thus increase significantly. At the same time, the global capacity of
59
+
60
+ *Date:* July 2, 2009.
61
+
62
+ *Key words and phrases.* Peer-to-Peer Algorithms; Killed Branching Processes;
63
+
64
+ Work partially supported by SCALP Project funded by EEC Network of Excellence Euro-FGI, and the Academy of Finland.
65
+ ---PAGE_BREAK---
66
+
67
+ the file-sharing system is also increased since a node becomes a server of a chunk as soon as it has retrieved it and not only when it has the whole file. This improvement has interesting algorithmic implications since each node has to establish a matching between chunks and servers. Strategies to maximize the global efficiency of the file sharing systems have to be devised. See for instance Massoulié and Vojnović [12], Bonald et al. [4] and Massoulié and Twigg [11].
68
+
69
+ The efficiency of these systems can be considered from different points of view.
70
+
71
+ **Transient behavior:** A new file is owned by one node, given there are potentially *N* other nodes interested by it, how long does it take so that a given node retrieves it ? significant fraction $\alpha \in (0, 1]$ of the *N* nodes retrieve it ? See Yang and de Veciana [26] and Simatos et al. [22]. See also Robert and Simatos [19].
72
+
73
+ **Stationary behavior:** A constant flow of requests enters, is the capacity of the file-sharing system sufficient to cope with this flow ?
74
+
75
+ In this paper, the stationary behavior is investigated in a stochastic context: arrival times are random as well as chunk transmission times. In this setting mathematical studies are quite scarce, see Qiu and Srikant [17], Simatos et al. [22], Susitaival et al. [24] and references therein. A simple strategy to disseminate chunks is considered: chunks are retrieved sequentially and a given node can be the server of only the last chunk it got. See Massoulié and Vojnović [12] and Parvez et al. [16] for a detailed motivation of this situation.
76
+
77
+ In this paper, the sequential scheme for disseminating a file that is divided into
78
+ n chunks is analyzed. New requests arrive according to a Poisson process at rate $\lambda$,
79
+ and become downloaders of chunk 1. Users who have obtained chunks 1,...,k act
80
+ simultaneously as uploaders of chunk k and downloaders of chunk k + 1, and the
81
+ users who have all the chunks leave the network at rate $\nu$. The transmission rate
82
+ of chunk k is denoted by $\mu_k$, and $x_k$ is the number of users having obtained chunks
83
+ 1,...,k. In this way, the total transmission rate of chunk k in the network is $\mu_k x_k$.
84
+ The flow of users can be modeled as the linear network depicted in Figure 1.
85
+
86
+ FIGURE 1. Transition rates of the linear network outside boundaries.
87
+
88
+ The main problem analyzed in the paper is the determination of a constant $\lambda^*$ such that if $\lambda < \lambda^*$ [resp. $\lambda > \lambda^*$], then the associated Markov process is ergodic [resp. transient]. As it will be seen, the constant $\lambda^*$ may be infinite in some cases so that the file-sharing network is always stable independently of the value of $\lambda$. The main technical difficulty to prove stability/instability results for this class of stochastic networks is that, except for the input, the Markov process has unbounded jump rates, in fact proportional to one of the coordinates of the current state. Note that loss networks have also this characteristic but in this case, the stability problem is trivial since the state space is finite. See Kelly [8].
89
+
90
+ **Fluid Limits for File-Sharing Networks.** Classically, to analyze the stability properties of stochastic networks, one can use the limits of a scaling of the Markov
91
+ ---PAGE_BREAK---
92
+
93
+ process, the so-called fluid limits. The scaling consists in speeding up time by
94
+ the norm $\|x\|$ of the initial state $x$, by scaling the state vector by $1/\|x\|$ and by
95
+ letting $\|x\|$ go to infinity. See Bramson [5], Chen and Yao [6] and Robert [18] for
96
+ example. This scaling is, however, better suited to "locally additive" processes, that
97
+ is, Markov processes that behave locally as random walks. Since the transition rates
98
+ are unbounded, it may occur that the corresponding fluid limits have discontinuities;
99
+ this complicates a lot the analysis of a possible limiting dynamical system. Roughly
100
+ speaking, this is due to the fact that, because of the unbounded transition rates,
101
+ events occur on the time scale $t \mapsto t \log \|x\|$ instead of $t \mapsto \|x\|t$. See the case of
102
+ the $M/M/\infty$ queue in Chapter 9 of Robert [18], and Simatos and Tibi [23] for a
103
+ discussion of this phenomenon in a related context.
104
+
105
+ A "fluid scaling" is nevertheless available for file-sharing networks. A possible description for a possible candidate $(x_i(t))$ for this limiting picture would satisfy the following differential equations,
106
+
107
+ $$ (1) \qquad \begin{cases} \dot{x}_0(t) = \lambda - \mu_1 x_1(t), \\ \dot{x}_i(t) = \mu_i x_i(t) - \mu_{i+1} x_{i+1}(t), & 1 \le i \le n-1, \\ \dot{x}_n(t) = \mu_n x_n(t) - \nu x_n(t). \end{cases} $$
108
+
109
+ For the sake of simplicity the behavior at the boundaries {$x : x_i = 0$}, $i \ge 1$ is
110
+ ignored in the above equations. This has been, up to now, one of the main tools to
111
+ investigate mathematical models of file-sharing networks. See Qiu and Srikant [17],
112
+ Núñez-Queija and Prabhu [15] for example. In the context of loss networks, an
113
+ analogous limiting picture can be rigorously justified when the input rates and
114
+ buffer sizes are scaled by some $N$ and the state variable by $1/N$. This scaling is not
115
+ useful here, since the problem is precisely of determining the values of $\lambda$ for which
116
+ the associated Markov is ergodic whereas in the above scaling $\lambda$ is scaled. From
117
+ this point of view Equations (1) are therefore quite informal. They can nevertheless
118
+ give some insight into the qualitative behavior of these networks but they cannot
119
+ apparently be used to prove stability results. Their interpretation near boundaries
120
+ is in particular not clear.
121
+
122
+ **Interacting Branching Processes.** Since scaling techniques do not apply here, one needs to resort to different techniques to study stability: coupling the linear file-sharing network with interacting branching processes is a key idea. For $i \ge 1$, without the departures the process $(X_i(t))$ would be a branching process where individuals give birth to one child at rate $\mu_i$. This description of such a file-sharing system as a branching process is quite natural. It has been used to analyze the transient behavior of these systems. See Yang and de Veciana [26], Dang *et al.* [7] and Simatos *et al.* [22]. A departure for $(X_i(t))$ can be seen as a death of an individual of class *i* and at the same time as a birth of an individual of class *i*+1. The file-sharing network can thus be described as a system of interacting branching processes with a constant input rate $\lambda$.
123
+
124
+ To tackle the general problem of stability, several key ingredients are used in
125
+ this paper: Lyapunov functions, coupling arguments and precise estimations of
126
+ the growth of a branching process killed by another branching process. As it will
127
+ be seen, several results used come from the branching process formulation of the
128
+ stochastic model. In particular Section 3 is devoted to the derivation of results
129
+ concerning killed branching processes. The stability properties of networks with
130
+ ---PAGE_BREAK---
131
+
132
+ a single-chunk file are analyzed in detail in Section 2. In Section 4, file-sharing networks with $n$ chunks are studied and the case $n = 2$ is investigated thoroughly.
133
+
134
+ **Acknowledgements.**
135
+
136
+ This paper has benefited from various interesting discussions with S. Borst, I. Norros, R. Núñez-Queija, B.J. Prabhu, and H. Reittu.
137
+
138
+ ## 2. ANALYSIS OF THE SINGLE-CHUNK NETWORK
139
+
140
+ This section is devoted to the study of a class of two-dimensional Markov jump processes $(X_0(t), X_1(t))$, the corresponding Q-matrix $\Omega_r$ is given, for $x = (x_0, x_1) \in \mathbb{N}^2$, by
141
+
142
+ $$ (2) \quad \begin{cases} \Omega_r[(x_0, x_1), (x_0 + 1, x_1)] = \lambda, \\ \Omega_r[(x_0, x_1), (x_0 - 1, x_1 + 1)] = \mu r(x_0, x_1) (x_1 \lor 1) \mathbf{1}_{\{x_0>0\}}, \\ \Omega_r[(x_0, x_1), (x_0, x_1 - 1)] = \nu x_1, \end{cases} $$
143
+
144
+ where $x \mapsto r(x)$, referred to as the *rate function*, is some fixed function on $\mathbb{N}^2$ with values in $[0, 1]$ and $n \lor m$ denotes $\max(n, m)$ for $n, m \in \mathbb{N}^2$. This corresponds to a more general model than the linear file-sharing network of Figure 1 in the case $n=1$, where for the sake of simplicity $\mu_1$ is noted $\mu$ in this section.
145
+
146
+ From a modeling perspective, this Markov process describes the following system. Requests for a single file arrive with rate $\lambda$, the first component $X_0(t)$ is the number of requests which did not get the file, whereas the second component is the number of requests having the file and acting as servers until they leave the file-sharing network. The constant $\mu$ can be viewed as the file transmission rate, and $\nu$ as the rate at which servers having all chunks leave. The term $r(x_0, x_1)$ describes the interaction of downloaders and uploaders in the system. The term $x_1 \lor 1$ can be interpreted so that there is one server permanent server in the network, which is contacted if there are no other上传er nodes in the system. A related system where there is always one permanent server for the file can be modeled by replacing the term $x_1 \lor 1$ by $x_1 + 1$. See the remark at the end of this section.
147
+
148
+ Several related examples of this class of models have been recently investigated. The case
149
+
150
+ $$ r(x_0, x_1) = \frac{x_0}{x_0 + x_1} $$
151
+
152
+ is considered in Núñez-Queija and Prabhu [15] and Massoulié and Vojnović [12]; in this case the downloading time of the file is neglected. Susitaival et al. [24] analyzes the rate function $r(x)$
153
+
154
+ $$ r(x_0, x_1) = 1 \wedge \left( \alpha \frac{x_0}{x_1} \right) $$
155
+
156
+ with $\alpha > 0$ and $a \land b$ denotes $\min(a, b)$ for $a, b \in \mathbb{R}$. This model allows to take into account that a request cannot be served by more than one server. See also Qiu and Srikant [17].
157
+
158
+ With a slight abuse of notation, for $0 < \delta \le 1$, the matrix $\Omega_\delta$ will refer to the case when the function $r$ is identically equal to $\delta$. Note that the boundary condition $x_1 \lor 1$ for departures from the first queue prevents the second coordinate from ending up in the absorbing state 0. Other possibilities are discussed at the end of this section. In the following $(X^r(t)) = (X_0^r(t), X_1^r(t))$ [resp. $(X^\delta(t))$] will denote a Markov process with Q-matrix $\Omega_r$ [resp. $\Omega_\delta$].
159
+ ---PAGE_BREAK---
160
+
161
+ **Free Process.** For $\delta > 0$, $Q_\delta$ denotes the following $Q$-matrix
162
+
163
+ $$ (3) \qquad \begin{cases} Q_\delta[(y_0, y_1), (y_0 + 1, y_1)] = \lambda, \\ Q_\delta[(y_0, y_1), (y_0 - 1, y_1 + 1)] = \mu\delta(y_1 \vee 1), \\ Q_\delta[(y_0, y_1), (y_0, y_1 - 1)] = \nu y_1. \end{cases} $$
164
+
165
+ The process $(Y^\delta(t)) = (Y_0^\delta(t), Y_1^\delta(t))$, referred to as the free process, will denote a Markov process with $Q$-matrix $Q_\delta$. Note that the first coordinate $Y_0^\delta$ may become negative. The second coordinate $(Y_1^\delta(t))$ of the free process is a classical birth-and-death process. It is easily checked that if $\rho_\delta$ defined as $\delta\mu/\nu$ is such that $\rho_\delta < 1$, then $(Y_1^\delta(t))$ is an ergodic Markov process converging in distribution to $Y_1^\delta(\infty)$ and that
166
+
167
+ $$ (4) \quad \lambda^*(\delta) \stackrel{\text{def.}}{=} \nu \mathbb{E}(Y_1^\delta(\infty)) = \mu \mathbb{E}(Y_1^\delta(\infty) \vee 1) = \frac{\delta \mu}{(1 - \rho_\delta)(1 - \log(1 - \rho_\delta))}. $$
168
+
169
+ When $\rho_\delta > 1$, then the process $(Y^\delta(t))$ converges almost surely to infinity. In the sequel $\lambda^*(1)$ is simply denoted $\lambda^*$.
170
+
171
+ In the following it will be assumed, Condition (C) below, that the rate function $r$ converges to 1 as the first coordinate goes to infinity; as will be seen, the special case $r \equiv 1$ then plays a special role, and so before analyzing the stability properties of $(X^r(t))$, one begins with an informal discussion when the rate function $r$ is identically equal to 1. Since the departure rate from the system is proportional to the number of requests/servers in the second queue, a large number of servers in the second queue gives a high departure rate, irrespectively of the state of the first queue. The input rate of new requests being constant, the real bottleneck with respect to stability is therefore when the first queue is large. The interaction of the two processes $(X_0^1(t))$ and $(X_1^1(t))$ is expressed through the indicator function of the set $\{X_0^1(t) > 0\}$. The second queue $(X_1^1(t))$ locally behaves like the birth-and-death process $(Y_1^1(t))$ as long as $(X_0^1(t))$ is away from 0. The two cases $\rho_1 > 1$ and $\rho_1 < 1$ are considered.
172
+
173
+ If $\rho_1 > 1$, i.e., $\mu > \nu$, the process $(X_1^1(t))$ is a transient process as long as the first coordinate is non-zero. Consequently, departures from the second queue occur faster and faster. Since, on the other hand, arrivals occur at a steady rate, departures eventually outpace arrivals. The fact that the second queue grows when $(X_0(t))$ is away from 0 stabilizes the system independently of the value of $\lambda$, and so the system should be stable for any $\lambda > 0$.
174
+
175
+ If $\rho_1 < 1$, and as long as $(X_0(t))$ is away from 0, the coordinate $(X_1^1(t))$ locally behaves like the ergodic Markov process $(Y_1^1(t))$. Hence if $(X_0^1(t))$ is non-zero for long enough, the requests in the first queue see in average $\mathbb{E}(Y_1^1(\infty) \vee 1)$ servers which work at rate $\mu$. Therefore, the stability condition for the first queue should be
176
+
177
+ $$ \lambda < \mu \mathbb{E}(Y_1^1(\infty) \vee 1) = \lambda^* $$
178
+
179
+ where $\lambda^* = \lambda^*(1)$ is defined by Equation (4). Otherwise if $\lambda > \lambda^*$, the system should be unstable.
180
+
181
+ **Markovian Notations.** In the following, one will use the following convention, if $(U(t))$ is a Markov process, the index $u$ of $\mathbb{P}_u((U(t)) \in \cdot)$ will refer to the initial condition of this Markov process.
182
+ ---PAGE_BREAK---
183
+
184
+ **Transience and Recurrence Criteria for $(X^r(t))$.**
185
+
186
+ **Proposition 2.1 (Coupling).** If $X^r(0) = Y^1(0) \in \mathbb{N}^2$, there exists a coupling of the processes $(X^r(t))$ and $(Y^1(t))$ such that the relation
187
+
188
+ $$ (5) \qquad X_0^r(t) \ge Y_0^1(t) \text{ and } X_1^r(t) \le Y_1^1(t), $$
189
+
190
+ holds for all $t \ge 0$ and for any sample path.
191
+
192
+ For any $0 \le \delta \le 1$, if
193
+
194
+ $$ \tau_{\delta} = \inf\{t \ge 0 : r(X^r(t)) \le \delta\} \text{ and } \sigma = \inf\{t \ge 0 : X_0^r(t) = 0\}, $$
195
+
196
+ and if $X^1(0) = Y^\delta(0) \in \mathbb{N}^2$ then there exists a coupling of the processes $(X^r(t))$ and $(Y^\delta(t))$ such that, for any sample path, the relation
197
+
198
+ $$ (6) \qquad X_0^r(t) \le Y_0^\delta(t) \text{ and } X_1^r(t) \ge Y_1^\delta(t) $$
199
+
200
+ holds for all $t \le \tau_\delta \wedge \sigma$.
201
+
202
+ *Proof.* Let $X^r(0) = (x_0, x_1)$ and $Y^1(0) = (y_0, y_1)$ be such that $x_0 \ge y_0$ and $x_1 \le y_1$, one has to prove that the processes $(X^r(t))$ and $(Y^1(t))$ can be constructed such that Relation (5) holds at the time of the next jump of one of them. See Leskelä [10] for the existence of couplings using analytical, nonconstructive techniques.
203
+
204
+ The arrival rates in the first queue are the same for both processes. If $x_1 < y_1$, a departure from the second queue for $(Y^1(t))$ or $(X^r(t))$ preserves the order relation (5) and if $x_1 = y_1$, this departure occurs at the same rate for both processes and thus the corresponding instant can be chosen at the same (exponential) time. For the departures from the first to the second queue, the departure rate for $(X^r(t))$ is $\mu r(x_0, x_1)(x_1 \vee 1)\mathbb{I}_{\{x_0>0\}} \le \mu(y_1 \vee 1)$ which is the departure rate for $(Y^1(t))$, hence the corresponding departure instants can be taken in the reverse order so that Relation (5) also holds at the next jump instant. The first part of the proposition is proved.
205
+
206
+ The rest of the proof is done in a similar way: The initial states $X^r(0) = (x_0, x_1)$ and $Y^\delta(0) = (y_0, y_1)$ are such that $x_0 \le y_0$ and $x_1 \ge y_1$. With the killing of the processes at time $\tau_\delta \wedge \sigma$ one can assume additionally that $x_0 \neq 0$ and that the relation $r(x_0, x_1) \ge \delta$ holds; Under these assumptions one can check by inspecting the next transition that (6) holds. The proposition is proved. $\square$
207
+
208
+ **Proposition 2.2.** *Under the condition $\mu < \nu$, the relation*
209
+
210
+ $$ \liminf_{t \to +\infty} \frac{X_0^r(t)}{t} \geq \lambda - \lambda^* $$
211
+
212
+ holds almost surely. In particular, if $\mu < \nu$ and $\lambda > \lambda^*$, then the process $(X^r(t))$ is transient.
213
+
214
+ *Proof.* By Proposition 2.1, one can assume that there exists a version of $(Y^1(t))$ such that $X_0^r(0) = Y_0^1(0)$ and the relation $X_0^r(t) \ge Y_0^1(t)$ holds for any $t \ge 0$. From Definition (3) of the Q-matrix of $(Y^1(t))$, one has, for $t \ge 0$,
215
+
216
+ $$ Y^{1}(t) = Y^{1}(0) + N_{\lambda}(t) - A(t), $$
217
+
218
+ where $(N_\lambda(t))$ is a Poisson process with parameter $\lambda$ and $(A(t))$ is the number of arrivals (jumps of size 1) for the second coordinate $(Y_1^1(t))$: in particular
219
+
220
+ $$ \mathbb{E}(A(t)) = \mu \mathbb{E} \left( \int_{0}^{t} Y_{1}^{1}(s) \vee 1 ds \right). $$
221
+ ---PAGE_BREAK---
222
+
223
+ Since $(Y_1^1(t))$ is an ergodic Markov process under the condition $\mu < \nu$, the ergodic theorem in this setting gives that
224
+
225
+ $$ \lim_{t \to +\infty} \frac{1}{t} A(t) = \lim_{t \to +\infty} \frac{1}{t} \mathbb{E}(A(t)) = \mu \mathbb{E} (Y_1^1(\infty) \lor 1) = \lambda^*, $$
226
+
227
+ by Equation (4), hence $(Y_0^1(t)/t)$ converges almost surely to $\lambda - \lambda^*$. The proposition is proved. $\square$
228
+
229
+ The next result establishes the ergodicity result of this section.
230
+
231
+ **Proposition 2.3.** If the rate function $r$ is such that, for any $x_1 \in \mathbb{N}$,
232
+
233
+ (C)
234
+
235
+ $$ \lim_{x_0 \to +\infty} r(x_0, x_1) = 1, $$
236
+
237
+ and if $\mu \ge \nu$, or if $\mu < \nu$ and $\lambda < \lambda^*$ with
238
+
239
+ $$ \lambda^* = \frac{\mu}{(1-\rho)(1-\log(1-\rho))}, $$
240
+
241
+ and $\rho = \mu/\nu$, then $(X^r(t))$ is an ergodic Markov process.
242
+
243
+ Note that Condition (C) is satisfied for the functions $r$ considered in the models considered by Núñez-Queija and Prabhu [15] and in Susitaival et al. [24]. See above.
244
+
245
+ *Proof.* If $x = (x_0, x_1) \in \mathbb{R}^2$, $|x|$ denotes the norm of $x$, $|x| = |x_0| + |x_1|$. The proof uses Foster's criterion as stated in Robert [18, Theorem 9.7]. If there exist constants $K_0, K_1, t_0, t_1$ and $\eta > 0$ such that, for $x = (x_0, x_1) \in \mathbb{N}^2$,
246
+
247
+ (8)
248
+
249
+ $$ \mathbb{E}_{(x_0,x_1)}(|X^r(t_1)| - |x|) \leq -t_1, \text{ if } x_1 \geq K_1, $$
250
+
251
+ (9)
252
+
253
+ $$ \mathbb{E}_{(x_0,x_1)}(|X^r(t_0)| - |x|) \leq -\eta t_0, \text{ if } x_0 \geq K_0 \text{ and } x_1 < K_1, $$
254
+
255
+ then the Markov process $(X^r(t))$ is ergodic.
256
+
257
+ Relation (8) is straightforward to establish: if $x_1 \ge K_1$, one gets, by considering only $K_1$ of the $x_1$ initial servers in the second queue and the Poisson arrivals, that
258
+
259
+ $$ \mathbb{E}_{(x_0,x_1)}(|X^r(1)| - |x|) \leq \lambda - K_1(1 - e^{-\nu}), $$
260
+
261
+ hence it is enough to take $t_1 = 1$ and $K_1 = (\lambda+1)/(1-e^{-\nu})$ to have Relation (8).
262
+
263
+ One has therefore to establish Inequality (9). Let $\tau_\delta$ and $\sigma$ be the stopping times introduced in Proposition 2.1, one first proves an intermediate result: for any $t > 0$ and any $x_1 \in \mathbb{N}$,
264
+
265
+ $$ (10) \quad \lim_{x_0 \to +\infty} \mathbb{P}_{(x_0,x_1)}(\sigma \wedge \tau_\delta \le t) = 0. $$
266
+
267
+ Fix $x_1 \in \mathbb{N}$ and $t \ge 0$: for $\varepsilon > 0$, there exists $D_1$ such that
268
+
269
+ $$ \mathbb{P}_{x_1} \left( \sup_{0 \le s \le t} Y_1^1(s) \ge D_1 \right) \le \varepsilon, $$
270
+
271
+ from Proposition 2.1, this gives the relation valid for all $x_0 \ge 0$,
272
+
273
+ $$ \mathbb{P}_{(x_0,x_1)} \left( \sup_{0 \le s \le t} X_1^r(s) \ge D_1 \right) \le \varepsilon. $$
274
+
275
+ By Condition (C), there exists $\gamma \ge 0$ (that depends on $x_1$) such that $r(x_0, x_1) \ge \delta$ when $x_0 \ge \gamma$. As long as $(X^r(t))$ stays in the subset $\{(y_0, y_1) : y_1 \le D_1\}$, the transition rates of the first component $(X_0^r(t))$ are uniformly bounded. Consequently,
276
+ ---PAGE_BREAK---
277
+
278
+ there exists $K$ such that, for $x_0 \ge K$,
279
+
280
+ $$ \mathbb{P}_{(x_0,x_1)} \left[ \sup_{s \le t} X_0^r(s) \le \gamma, \sup_{s \le t} X_1^r(s) \le D_1 \right] \le \varepsilon. $$
281
+
282
+ Relation (10) follows from the last two inequalities and the identity
283
+
284
+ $$ \mathbb{P}_{(x_0,x_1)}(\sigma \wedge \tau_\delta \le t) \le \mathbb{P}_{(x_0,x_1)}\left(\sup_{s \le t} X_0^r(s) \le \gamma\right). $$
285
+
286
+ One returns to the proof of Inequality (9). By definition of the Q-matrix of the process $(X^r(t))$,
287
+
288
+ $$ \mathbb{E}_{(x_0,x_1)}(|X^r(t)| - |x|) = \lambda t - \nu \int_0^t \mathbb{E}_{(x_0,x_1)}(X_1^r(u)) du, x \in \mathbb{N}^2, t \ge 0. $$
289
+
290
+ For any $x \in \mathbb{N}^2$, there exists a version of $(Y^\delta(t))$ with initial condition $Y^\delta(0) = X^r(0) = x$, and such that Relation (6) holds for $t < \tau_\delta \wedge \sigma$, in particular
291
+
292
+ $$ \begin{aligned} \mathbb{E}_x(X_1^r(t)) &\geq \mathbb{E}_x(X_1^r(t); t < \tau_\delta \wedge \sigma) \\ &\geq \mathbb{E}_x(Y_1^\delta(t); t < \tau_\delta \wedge \sigma) = \mathbb{E}_x(Y_1^\delta(t)) - \mathbb{E}_x(Y_1^\delta(t); t \geq \tau_\delta \wedge \sigma). \end{aligned} $$
293
+
294
+ Cauchy-Schwarz inequality shows that for any $t \ge 0$ and $x \in \mathbb{N}^2$
295
+
296
+ $$ \begin{aligned} \int_0^t \mathbb{E}_x(Y_1^\delta(u); \tau_\delta \wedge \sigma \le u) du &\le \int_0^t \sqrt{\mathbb{E}_x\left[(Y_1^\delta(u))^2\right]} \sqrt{\mathbb{P}_x(\tau_\delta \wedge \sigma \le u)} du \\ &\le \sqrt{\mathbb{P}_x(\tau_\delta \wedge \sigma \le t)} \int_0^t \sqrt{\mathbb{E}_x\left[(Y_1^\delta(u))^2\right]} du, \end{aligned} $$
297
+
298
+ by gathering these inequalities, and by using the fact that the process $(Y_1^\delta(t))$ depends only on $x_1$ and not $x_0$, one finally gets the relation
299
+
300
+ $$ (11) \quad \frac{1}{t} \mathbb{E}_x(|X(t)| - |x|) \leq \lambda - \frac{\nu}{t} \int_0^t \mathbb{E}_{x_1}(Y_1^\delta(u)) du + c(x_1, t) \sqrt{\mathbb{P}_x(\tau_\delta \wedge \sigma \le t)} $$
301
+
302
+ with
303
+
304
+ $$ c(x_1, t) = \frac{\nu}{t} \int_{0}^{t} \sqrt{\mathbb{E}_{x_1} [ (Y_1^{\delta}(u))^2 ]} du. $$
305
+
306
+ Two cases are considered.
307
+
308
+ (1) If $\mu > \nu$, if $\delta < 1$ is such that $\delta\mu > \nu$, the process $(Y_1^\delta(t))$ is transient, so that
309
+
310
+ $$ \lim_{t \to +\infty} \frac{1}{t} \int_0^t \mathbb{E}_{x_1}(Y_1^\delta(u)) du = +\infty, $$
311
+
312
+ for each $x_1 \ge 0$.
313
+
314
+ (2) If $\mu < \nu$, one takes $\delta = 1$, or if $\mu = \nu$, one takes $\delta < 1$ close enough to 1 so that $\lambda < \lambda^*(\delta)$. In both cases, $\lambda < \lambda^*(\delta)$ and the process $(Y_1^\delta(t))$ converges in distribution, hence
315
+
316
+ $$ \lim_{t \to +\infty} \frac{1}{t} \int_0^t \mathbb{E}_{x_1}(Y_1^\delta(u)) du = \nu E(Y_1^\delta(\infty)) = \lambda^*(\delta) > \lambda $$
317
+
318
+ for each $x_1 \ge 0$.
319
+ ---PAGE_BREAK---
320
+
321
+ Consequently in both cases, there exist constants $\eta > 0$, $\delta < 1$ and $t_0 > 0$ such that for any $x_1 \le K_1$,
322
+
323
+ $$ (12) \qquad \lambda - \nu \frac{1}{t_0} \int_0^{t_0} \mathbb{E}_{x_1}(Y_1^\delta(u)) du \le -\eta, $$
324
+
325
+ with Relation (11), one gets that if $x_1 \le K_1$ then
326
+
327
+ $$ \frac{1}{t_0} \mathbb{E}_x(|X(t_0)| - |x|) \le -\eta + c^* \sqrt{\mathbb{P}_x(\tau_\delta \wedge \sigma \le t_0)}, $$
328
+
329
+ where $c^* = \max(c(n, t_0), 0 \le n \le K_1)$. By Identity (10), there exists $K_0$ such that, for all $x_0 \ge K_0$ and $x_1 \le K_1$, the relation
330
+
331
+ $$ c^* \sqrt{\mathbb{P}_{(x_0,x_1)}(\tau_{\delta} \wedge \sigma \le t_0)} \le \frac{\eta}{2} $$
332
+
333
+ holds. This relation and the inequalities (12) and (11) give Inequality (9). The proposition is proved. $\square$
334
+
335
+ **Another Boundary Condition.** The boundary condition $x_1 \lor 1$ in the transition rates of $(X(t))$, Equation (2), prevents the second coordinate from ending up in the absorbing state 0. It amounts to suppose that a permanent server gets activated when no node may offer the file. Another way to avoid this absorbing state is to suppose that a permanent node is always active, which gives transition rates with $x_1+1$ instead. This choice was for instance made in Núñez-Queija and Prabhu [15]. All our results apply for this other boundary condition: the only difference that is when $\nu > \mu$, the value of the threshold $\lambda^*$ of Equation (4) is given by the quantity $\lambda^* = \mu\nu/(\nu - \mu)$.
336
+
337
+ ### 3. YULE PROCESSES WITH DELETIONS
338
+
339
+ This section introduces the tools which are necessary in order to generalize the results of the previous section to the multi-chunk case $n \ge 2$. A Yule process $(Y(t))$ with rate $\mu > 0$ is a Markovian branching process with Q-matrix
340
+
341
+ $$ (13) \qquad q_Y(x, x+1) = \mu x, \quad \forall x \ge 0. $$
342
+
343
+ An individual gives birth to a child, or equivalently splits into two particles, with rate $\mu$. Let $(\sigma_n)$ be the split times of a Yule process started with one particle, it is not difficult to check that, for $n \ge 1$,
344
+
345
+ $$ \sigma_n \stackrel{\text{dist.}}{=} \sum_{\ell=1}^{n} \frac{E_{\ell}^{\mu}}{\ell} \stackrel{\text{dist.}}{=} \max(E_1^{\mu}, \dots, E_n^{\mu}), $$
346
+
347
+ where $(E_{\ell}^{\mu})$ are i.i.d. exponential random variables with parameter $\mu$. If $\lambda > \mu$ then, by using Fubini's Theorem,
348
+
349
+ $$ (14) \qquad
350
+ \begin{aligned}
351
+ \mathbb{E}\left(\sum_{\ell=1}^{+\infty} e^{-\lambda\sigma_\ell}\right) &= \mathbb{E}\left(\sum_{\ell=1}^{+\infty} \int_0^{+\infty} \lambda e^{-\lambda x} 1_{\{\sigma_\ell \le x\}} dx\right) = \int_0^{+\infty} \lambda e^{-\lambda x} \sum_{\ell=1}^{+\infty} \mathbb{P}(\sigma_\ell \le x) dx \\
352
+ &= \int_0^{+\infty} \lambda e^{-\lambda x} \frac{1-e^{-\mu x}}{e^{-\mu x}} dx = \frac{\mu}{\lambda-\mu} < +\infty.
353
+ \end{aligned}
354
+ $$
355
+
356
+ In this section one considers some specific results on variants of this stochastic model when some individuals are killed. In terms of branching processes, this amounts to prune the tree, i.e., to cut some edges of the tree, and the subtree attached to
357
+ ---PAGE_BREAK---
358
+
359
+ it. This procedure is fairly common for branching processes, in the Crump-Mode-
360
+ Jagers model for example, see Kingman [9]. See also Neveu [14] or Aldous and
361
+ Pitman [1]. Two situations are considered: the first one when the deletions are
362
+ part of the internal dynamics, so that each individual dies out after an exponential
363
+ time, and the other when killings are given by an exogenous process and occur at
364
+ fixed (random or deterministic) epochs.
365
+
366
+ **Constant Death Rate and Regeneration.** Let $(Z(t))$ be the birth-and-death process whose $Q$-matrix $Q_Z$ is given by, for $\mu_Z > 0$ and $\nu > 0$,
367
+
368
+ $$
369
+ (15) \qquad q_Z(z, z+1) = \mu_Z(z \lor 1) \text{ and } q_Z(z, z-1) = \nu z.
370
+ $$
371
+
372
+ The lifetime of an individual is exponentially distributed with parameter $v$, and the
373
+ process restarts with one individual after some time when it hits 0. This process
374
+ can be described equivalently as a time-changed $M/M/1$ queue or as a sequence
375
+ of independent branching processes. As it will be seen these two viewpoints are
376
+ complementary.
377
+
378
+ In the rest of this part, $\mu_Z$ and $\nu$ are fixed, $(Z(t))$ is the Markov process with $Q$-matrix $Q_Z$, $(\sigma_n)$ is the sequence of times of its positive jumps, the birth instants, and $(B_\sigma(t))$ is the corresponding counting process of $(\sigma_n)$, for $t \ge 0$,
379
+
380
+ $$
381
+ B_{\sigma}(t) = \sum_{i \ge 1} 1_{\{\sigma_i \le t\}}.
382
+ $$
383
+
384
+ **Proposition 3.1 (Queueing Representation).** If $Z(0) = z \in \mathbb{N}$, then
385
+
386
+ $$
387
+ (16) \qquad (Z(t), t \ge 0) \stackrel{\text{dist.}}{=} (L(C(t)), t \ge 0),
388
+ $$
389
+
390
+ where $(L(t))$ is the process of the number of jobs of an $M/M/1$ queue with input
391
+ rate $\mu_Z$ and service rate $\nu$ and with $L(0) = z$ and $C(t) = \inf\{s > 0 : A(s) > t\}$,
392
+ where
393
+
394
+ $$
395
+ A(t) = \int_{0}^{t} \frac{1}{1 \vee L(u)} du.
396
+ $$
397
+
398
+ *Proof.* It is not difficult to check that the process $(M(t)) \stackrel{\text{def.}}{=} (L(C(t)))$ has the Markov property. Let $Q_M$ be its $Q$-matrix. For $z \ge 0$,
399
+
400
+ $$
401
+ \P(L(C(h)) = z + 1 | L(0) = z) = \mu_Z \mathbb{E}(C(h)) + o(h) = \mu_Z (z \vee 1)h + o(h),
402
+ $$
403
+
404
+ hence $q_M(z, z + 1) = \mu_Z(z \vee 1)$. Similarly $q_M(z, z - 1) = \nu z$. The proposition is proved. $\square$
405
+
406
+ **Corollary 3.1.** For any $\gamma > (\mu_Z - \nu) \lor 0$ and $z = Z(0) \in \mathbb{N}$,
407
+
408
+ $$
409
+ (17) \qquad \mathbb{E}_z \left( \sum_{n=1}^{+\infty} e^{-\gamma \sigma_n} \right) < +\infty.
410
+ $$
411
+
412
+ *Proof.* Proposition 3.1 shows that, in particular, the sequences of positive jumps of $(Z(t))$ and of $(L(C(t)))$ have the same distribution. Hence, if $N_{\mu_Z} = (t_n)$ is the arrival process of the $M/M/1$ queue, a Poisson process with parameter $\mu_Z$, then, with the notations of the above proposition, the relation
413
+
414
+ $$
415
+ (\sigma_n) \stackrel{\text{dist.}}{=} (A(t_n))
416
+ $$
417
+ ---PAGE_BREAK---
418
+
419
+ holds. By using standard martingale properties of stochastic integrals with respect to Poisson processes, see Rogers and Williams [20], one gets for $t \ge 0$,
420
+
421
+ $$ (18) \qquad \begin{aligned} \mathbb{E}_z \left( \sum_{n \ge 1} e^{-\gamma A(t_n)} \right) &= \mathbb{E}_z \left( \int_0^\infty e^{-\gamma A(s)} N_{\mu_Z}(ds) \right) = \mu_Z \mathbb{E}_z \left( \int_0^\infty e^{-\gamma A(s)} ds \right) \\ &= \mu_Z \int_0^\infty e^{-\gamma u} \mathbb{E}_z (Z(u) \vee 1) du, \end{aligned} $$
422
+
423
+ where Relation (16) has been used for the last equality. Kolmogorov's equation for the process $(Z(t))$ gives that
424
+
425
+ $$ \begin{aligned} \phi(t) &\stackrel{\text{def.}}{=} \mathbb{E}_z(Z(t)) = \mu_Z \int_0^t \mathbb{E}_z(Z(u) \vee 1) du - \nu \int_0^t \mathbb{E}_z(Z(u)) du \\ &\le (\mu_Z - \nu) \int_0^t \phi(u) du + \mu_Z t, \end{aligned} $$
426
+
427
+ therefore, by Gronwall's Lemma,
428
+
429
+ $$ \phi(t) \le \phi(0) + \mu_Z \int_0^t ue^{(\mu_Z - \nu)u} du \le z + \frac{\mu_Z}{\mu_Z - \nu} te^{(\mu_Z - \nu)t}. $$
430
+
431
+ From Equation (18), one concludes that
432
+
433
+ $$ \mathbb{E}_z \left( \sum_n e^{-\gamma \sigma_n} \right) = \mathbb{E}_z \left( \sum_n e^{-\gamma A(t_n)} \right) < +\infty. $$
434
+
435
+ The proposition is proved. $\square$
436
+
437
+ **A Branching Process.** Before hitting 0, the Markov process $(Z(t))$ whose Q-matrix is given by Relation (15) can be seen a Bellman-Harris branching process. Its Malthusian parameter is given by $\alpha = \mu_Z - \nu$. See Athreya and Ney [3]. In this setting, it describes the evolution of a population of independent particles, at rate $\lambda \stackrel{\text{def.}}{=} \mu_Z + \nu$ each of these particles either splits into two particles with probability $p \stackrel{\text{def.}}{=} \mu_Z / (\mu_Z + \nu)$ or dies. These processes will be referred to as $(p, \lambda)$-branching processes in the sequel.
438
+
439
+ A $(p, \lambda)$-branching process survives with positive probability only when $p > 1/2$, in which case the probability of extinction $q$ is equal to $q = (1-p)/p = \nu/\mu_Z$. The main (and only) difference with a branching process is that $Z$ regenerates after hitting 0. When it regenerates, it again behaves as a $(p, \lambda)$-branching process (started with one particle), until it hits 0 again.
440
+
441
+ **Proposition 3.2 (Branching Representation).** If $Z(0) = z \in \mathbb{N}$ and $(\tilde{Z}(t))$ is a $(p, \lambda)$-branching process started with $z \in \mathbb{N}$ particles and $\tilde{T}$ its extinction time, then
442
+
443
+ $$ (Z(t), 0 \le t \le T) \stackrel{\text{dist.}}{=} (\tilde{Z}(t), 0 \le t \le \tilde{T}), $$
444
+
445
+ where $T = \inf\{t \ge 0 : Z(t) = 0\}$ is the hitting time of 0 by $(Z(t))$.
446
+
447
+ **Corollary 3.2.** Suppose that $\mu_Z > \nu$. Then $\mathbb{P}_z$-almost surely for any $z \ge 0$, there exists a finite random variable $Z(\infty)$ such that,
448
+
449
+ $$ \lim_{t \to +\infty} e^{-(\mu_Z - \nu)t} Z(t) = Z(\infty) \quad \text{and} \quad Z(\infty) > 0. $$
450
+ ---PAGE_BREAK---
451
+
452
+ *Proof.* When $\mu_Z > \nu$, the process $(Z(t))$ couples in finite time with a supercritical $(p, \lambda)$-branching process $(\tilde{Z}(t))$ conditioned on non-extinction; this follows readily from Proposition 3.2 (or see the Appendix for details). Since for any supercritical $(p, \lambda)$-branching process, $(\exp(-(\mu_Z - \nu)t)\tilde{Z}(t))$ converges almost surely to a finite random variable $\tilde{Z}(\infty)$, positive on the event of non-extinction (see Nerman [13]), one gets the desired result. $\square$
453
+
454
+ Due to its technicality, the proof of the following result is postponed to the Appendix; this result is used in the proof of Proposition 3.5.
455
+
456
+ **Proposition 3.3.** Suppose that $\mu_Z > \nu$, if
457
+
458
+ $$ (19) \qquad \eta^*(x) = \frac{2 - x - \sqrt{x(4-3x)}}{2(1-x)}, \quad 0 < x < 1, $$
459
+
460
+ then for any $0 < \eta < \eta^*(\nu/\mu_Z)$,
461
+
462
+ $$ \sup_{z \ge 0} \left[ \mathbb{E}_z \left( \sup_{t \ge \sigma_1} \left( e^{\eta(\mu_Z - \nu)t} B_\sigma(t)^{-\eta} \right) \right) \right] < +\infty. $$
463
+
464
+ **A Yule Process Killed at Fixed Instants.** In this part, it is assumed that, provided that it is non-empty, at epochs $\sigma_n$, $n \ge 1$, an individual is removed from the population of an ordinary Yule process ($Y(t)$) with rate $\mu_W$ starting with $Y(0) = w \in \mathbb{N}$ individuals. It is assumed that $(\sigma_n)$ is some fixed non-decreasing sequence. It will be shown that the process $(W(t))$ obtained by killing one individual of $Y(t)$) at each of the successive instants $(\sigma_n)$ survives with positive probability when the series with general term $(\exp(-\mu_W\sigma_n))$ converges.
465
+
466
+ In the following, a related result will be considered in the case where the sequence $(\sigma_n)$ is given by the sequence of birth times of the process $(Z(t))$ introduced above. See Alsmeyer [2] and the references therein for related models.
467
+
468
+ One denotes
469
+
470
+ $$ \kappa = \inf\{n \ge 1 : W(\sigma_n) = 0\}. $$
471
+
472
+ The process $(W(t))$ can be represented in the following way:
473
+
474
+ $$ (20) \qquad W(t) = Y(t) - \sum_{i=1}^{\kappa} X_i(t) 1_{\{\sigma_i \le t\}}, $$
475
+
476
+ where, for $1 \le i \le \kappa$ and $t \ge \sigma_i$, $X_i(t)$ is the total number of children at time $t$ in the original Yule process of the $i$th individual killed at time $\sigma_i$. In terms of trees, $(W(t))$ can be seen as a subtree of $(Y(t))$: for $1 \le i \le \kappa$, $(X_i(t))$ is the subtree of $(Y(t))$ associated with the $i$th particle killed at time $\sigma_i$.
477
+
478
+ It is easily checked that $(X_i(t - \sigma_i), t \ge \sigma_i)$ is a Yule process starting with one individual and, since a killed individual cannot have one of his descendants killed, that the processes
479
+
480
+ $$ (\tilde{X}_i(t)) = (X_i(t + \sigma_i), t \ge 0), \quad 1 \le i \le \kappa, $$
481
+
482
+ are independent Yule processes.
483
+
484
+ For any process $(U(t))$, one denotes:
485
+
486
+ $$ (21) \qquad (M_U(t)) \stackrel{\text{def.}}{=} (e^{-\mu_W t} U(t)). $$
487
+ ---PAGE_BREAK---
488
+
489
+ If $(\tilde{X}(t))$ is a Yule process with rate $\mu_W$, the martingale $(M_{\tilde{X}}(t))$ converges almost surely and in $L_2$ to a random variable $M_{\tilde{X}}(\infty)$ with an exponential distribution with mean $\tilde{X}(0)$, and by Doob's Inequality
490
+
491
+ $$ \mathbb{E}\left(\sup_{t \ge 0} M_{\tilde{X}}(t)^2\right) \le 2 \sup_{t \ge 0} \mathbb{E}\left(M_{\tilde{X}}(t)^2\right) < +\infty. $$
492
+
493
+ See Athreya and Ney [3]. Consequently
494
+
495
+ $$ e^{-\mu_W t} W(t) = M_Y(t) - \sum_{i=1}^{\kappa} e^{-\mu_W \sigma_i} M_{\tilde{X}_i}(t - \sigma_i) 1_{\{\sigma_i \le t\}}, $$
496
+
497
+ and for any $t \ge 0$,
498
+
499
+ $$ \sum_{i=1}^{\kappa} e^{-\mu_W \sigma_i} M_{\tilde{X}_i}(t-\sigma_i) 1_{\{\sigma_i \le t\}} \le \sum_{i=1}^{\kappa} e^{-\mu_W \sigma_i} \sup_{s \ge 0} M_{\tilde{X}_i}(s). $$
500
+
501
+ Assume now that $\sum_{i \ge 1} e^{-\mu_W \sigma_i} < +\infty$: then the last expression is integrable, and Lebesgue's Theorem implies that $(M_W(t)) = (\exp(-\mu_W t)W(t))$ converges almost surely and in $L_2$ to
502
+
503
+ $$ M_W(\infty) = M_Y(\infty) - \sum_{i=1}^{\kappa} e^{-\mu_W \sigma_i} M_{\tilde{X}_i}(\infty). $$
504
+
505
+ Clearly, for some $w^*$ large enough and then for any $w \ge w^*$, one has
506
+
507
+ $$ \mathbb{E}_w(M_W(\infty)) \ge w - \sum_{i=1}^{+\infty} e^{-\mu_W \sigma_i} > 0, $$
508
+
509
+ in particular $\mathbb{P}_w(M_W(\infty) > 0) > 0$ and $\mathbb{P}_w(W(t) \ge 1, \forall t \ge 0) > 0$. If $Y(0) = w < w^*$ and $\sigma_1 > 0$, then $\mathbb{P}_w(Y(\sigma_1) \ge w^* + 1) > 0$ and therefore, by translation at time $\sigma_1$, the same conclusion holds when the sequence $(\exp(-\mu_W \sigma_i))$ has a finite sum. The following proposition has thus been proved.
510
+
511
+ **Proposition 3.4.** Let $(W(t))$ be a process growing as a Yule process with rate $\mu_W$ and for which individuals are killed at non-decreasing instants $(\sigma_n)$ with $\sigma_1 > 0$. If
512
+
513
+ $$ \sum_{i=1}^{+\infty} e^{-\mu_W \sigma_i} < +\infty, $$
514
+
515
+ then as $t$ gets large, and for any $w \ge 1$, the variable $(\exp(-\mu_W t)W(t))$ converges $\mathbb{P}_w$-almost surely and in $L_2$ to a finite random variable $M_W(\infty)$ such that $\mathbb{P}_w(M_W(\infty) > 0) > 0$.
516
+
517
+ The previous proposition establishes the minimal results needed in Section 4. However, Kolmogorov's Three-Series, see Williams [25], can be used in conjunction with Fatou's Lemma to show that $(W(t))$ dies out almost surely when the series with general term $(\exp(-\mu_W \sigma_n))$ diverges.
518
+
519
+ **A Yule Process Killed at the Birth Instants of a Bellman-Harris Process.**
520
+
521
+ In this subsection, one considers a Yule process $(Y(t))$ with parameter $\mu_W$ with Q-matrix defined by Relation (13) and an independent Markov process $(Z(t))$ with Q-matrix defined by Relation (15). In particular $\mu_Z - \nu$ is the Malthusian parameter of $(Z(t))$. A process $(W(t))$ is defined by killing one individual of $(Y(t))$ at each of
522
+ ---PAGE_BREAK---
523
+
524
+ the birth instants $(\sigma_n)$ of $(Z(t))$. As before $(B_\sigma(t))$ denotes the counting process association to the non-decreasing sequence $(\sigma_n)$,
525
+
526
+ $$B_{\sigma}(t) = \sum_{i \ge 1} 1_{\{\sigma_i \le t\}}.$$
527
+
528
+ **Proposition 3.5.** Assume that $\mu_Z - \nu > \mu_W$, and let $H_0$ be the extinction time of $(W(t))$, i.e.,
529
+
530
+ $$H_0 = \inf\{t \ge 0 : W(t) = 0\},$$
531
+
532
+ then the random variable $H_0$ is almost surely finite and:
533
+
534
+ (i) $Z(H_0) - Z(0) \le e^{\mu_W H_0} M_Y^*$ where
535
+ $$M_Y^* = \sup_{t \ge 0} e^{-\mu_W t} Y(t).$$
536
+
537
+ (ii) There exists a finite constant $C$ such that for any $z \ge 0$ and $w \ge 1$,
538
+
539
+ $$ (22) \qquad \mathbb{E}_{(w,z)}(H_0) \le C (\log(w) + 1). $$
540
+
541
+ Note that the subscript $(w, z)$ refers to the initial state of the Markov process $(W(t), Z(t))$.
542
+
543
+ *Proof.* Define $\alpha = \mu_Z - \nu$. Concerning the almost sure finiteness of $H_0$, note that Equation (20) entails that $W(t) \le Y(t) - B_\sigma(t)$ for all $t \ge 0$ on the event $\{H_0 = +\infty\}$. As $t$ goes to infinity, both $\exp(-\mu_W t)Y(t)$ and $\exp(-\alpha t)B_\sigma(t)$ converge almost surely to positive and finite random variables (see Nerman [13]), which implies, when $\alpha = \mu_Z - \nu > \mu_W$, that $W(t)$ converges to $-\infty$ on $\{H_0 = +\infty\}$, and so this event is necessarily of probability zero.
544
+
545
+ The first point (i) of the proposition comes from Identity (20) at $t = H_0$:
546
+
547
+ $$ (23) \qquad Z(H_0) - Z(0) \le B_\sigma(H_0) \le Y(H_0) \le e^{\mu_W H_0} M_Y^*. $$
548
+
549
+ By using the relation $\exp(x) \ge x$, Equation (22) follows from the following bound: for any $\eta < \eta^*(\nu/\mu_Z)$ (recall that $\eta^*$ is given by Equation (19)),
550
+
551
+ $$ (24) \qquad \sup_{w \ge 1, z \ge 0} \left[ w^{-\eta} \mathbb{E}_{(w,z)} \left( e^{\eta(\alpha - \mu_W)H_0} \right) \right] < +\infty. $$
552
+
553
+ So all is left to prove is this bound. Under $\mathbb{P}_{(w,z)}$, $(Y(t))$ can be represented as the sum of $w$ i.i.d. Yule processes, and so $M_Y^* \le M_{Y,1}^* + \cdots + M_{Y,w}^*$ with $(M_{Y,i}^*)$ i.i.d. distributed like $M_Y^*$ under $\mathbb{P}_{(1,z)}$; Inequality (23) then entails that
554
+
555
+ $$ e^{(\alpha - \mu_W)H_0} \le \left( \sum_{i=1}^{w} M_{Y,i}^{*} \right) \times \sup_{t \ge \sigma_1} \left( e^{\alpha t} / B_{\sigma}(t) \right). $$
556
+
557
+ By independence of $(M_{Y,i}^*)$ and $(B_\sigma(t))$, Jensen's inequality gives for any $\eta < 1$:
558
+
559
+ $$ \mathbb{E}_{(w,z)} (e^{\eta(\alpha - \mu_W)H_0}) \le w^\eta (\mathbb{E}(M_{Y,1}^*))^\eta \mathbb{E}_z \left( \sup_{t \ge \sigma_1} (e^{\eta\alpha t} B_\sigma(t)^{-\eta}) \right), $$
560
+
561
+ hence the bound (24) follows from Proposition 3.3. $\square$
562
+
563
+ One concludes this section with a Markov chain which will be used in Section 4. Define recursively the sequence $(V_n)$ by, $V_0 = v$ and
564
+
565
+ $$ (25) \qquad V_{n+1} = \sum_{k=1}^{A_n(V_n)} I_k, n \ge 0, $$
566
+ ---PAGE_BREAK---
567
+
568
+ where $(I_k)$ are identically distributed integer valued random variables independent of $V_n$ and $A_n(V_n)$, and such that $\mathbb{E}(I_1) = p$ for some $p \in (0, 1)$. For $v > 0$, $A_n(v)$ is an independent random variable with the same distribution as $Z(H_0)$ under $\mathbb{P}_{(1,v)}$, i.e., with the initial condition $(W(0), Z(0)) = (1, v)$.
569
+
570
+ The above equation (25) can be interpreted as a branching process with immi-
571
+ gration, see Seneta [21], or also as an autoregressive model.
572
+
573
+ **Proposition 3.6.** Under the condition $\mu_Z - \nu > \mu_W$, if $(V_n)$ is the Markov chain defined by Equation (25) and, for $K \ge 0$,
574
+
575
+ $$N_K = \inf\{n \ge 0 : V_n \le K\},$$
576
+
577
+ then there exist $\gamma > 0$ and $K \in \mathbb{N}$ such that
578
+
579
+ $$
580
+ (26) \quad \mathbb{E}(N_K|V_0 = v) \le \frac{1}{\gamma} \log(1+v), \quad \forall v \ge 0.
581
+ $$
582
+
583
+ The Markov chain (V_n) is in particular positive recurrent.
584
+
585
+ *Proof.* For $V_0 = v \in \mathbb{N}$, Jensen's Inequality and Definition (25) give the relation
586
+
587
+ $$
588
+ (27) \quad \mathbb{E}_v \log \left( \frac{1+V_1}{1+v} \right) \le \mathbb{E}_{(1,v)} \log \left[ \frac{1+pZ(H_0)}{1+v} \right].
589
+ $$
590
+
591
+ From Proposition 3.5 and by using the same notations, one gets that, under $\mathbb{P}_{(1,v)}$,
592
+
593
+ $$
594
+ Z(H_0) \leq v + e^{\mu_w H_0} M_Y^*,
595
+ $$
596
+
597
+ where $(Y(t))$ is a Yule process starting with one individual. By looking at the birth instants of $(Z(t))$, it is easily checked that the random variable $H_0$ under $\mathbb{P}_{(1,v)}$ is stochastically bounded by $H_0$ under $\mathbb{P}_{(1,0)}$. The integrability of $H_0$ under $\mathbb{P}_{(1,0)}$ (proved in Proposition 3.5) and of $M_Y^*$ give that the expression
598
+
599
+ $$
600
+ \log \left( \frac{1 + p(v + e^{\mu_w H_0} M_Y^*)}{1 + v} \right)
601
+ $$
602
+
603
+ bounding the right hand side of Relation (27) is also an integrable random variable
604
+ under $\mathbb{P}_{(1,0)}$. Lebesgue's Theorem gives therefore that
605
+
606
+ $$
607
+ \limsup_{v \to +\infty} \left[ \mathbb{E}_v \log \left( \frac{1+V_1}{1+v} \right) \right] \leq \log p < 0.
608
+ $$
609
+
610
+ Consequently, one concludes that $v \mapsto \log(1+v)$ is a Lyapunov function for the Markov chain $(V_n)$, i.e., if $\gamma = -(\log p)/2$, there exists $K$ such that for $v \ge K$,
611
+
612
+ $$
613
+ \mathbb{E}_v \log (1 + V_1) - \log (1 + v) \le -\gamma.
614
+ $$
615
+
616
+ Foster's criterion, see Theorem 8.6 of Robert [18], implies that $(V_n)$ is indeed ergodic
617
+ and that Relation (26) holds. $\square$
618
+
619
+ 4. ANALYSIS OF THE MULTI-CHUNK NETWORK
620
+
621
+ In this section it is assumed that a file of *n* chunks is distributed by the file-sharing network within the following framework, corresponding to Figure 1. Chunks are delivered in the sequential order, and, for *k* ≥ 1, requests with chunks 1, ..., *k* provide service for requests with one less chunk.
622
+
623
+ For $0 \le k < n$ and $t \ge 0$, the variable $X_k(t)$ denotes the number of requests downloading the $(k+1)$st chunk; for $k = n$, $X_n(t)$ is the number of requests having all the chunks. When taking into account the boundaries in the transition rates
624
+ ---PAGE_BREAK---
625
+
626
+ described in Figure 1, one gets the following $Q$-matrix for the $(n+1)$-dimensional
627
+ Markov process $(X_k(t), 0 \le k \le n)$:
628
+
629
+ $$
630
+ \begin{equation}
631
+ \begin{aligned}
632
+ Q(f)(x) ={}& \lambda[f(x+e_0)-f(x)] + \sum_{k=1}^{n} \mu_k(x_k \lor 1)[f(x+e_k-e_{k-1})-f(x)]1_{\{x_{k-1}>0\}} \\
633
+ & + \nu x_n[f(x-e_n)-f(x)],
634
+ \end{aligned}
635
+ \end{equation}
636
+ $$
637
+
638
+ where $x \in \mathbb{N}^{n+1}$, $f: \mathbb{N}^{n+1} \to \mathbb{R}_+$ is a function and for, $0 \le k \le n$, $e_k \in \mathbb{N}^{n+1}$ is the $k$th unit vector. Note that, as before, to avoid absorbing states, it is assumed that there is a server for the $k$th chunk when $x_k = 0$. The first section corresponds to the case $n = 2$ in a more general setting.
639
+
640
+ It is first shown in Proposition 4.1 that the network is stable for sufficiently small input rate $\lambda$. Proposition 4.2 studies the analog of the two-dimensional case with $\mu > \nu$, i.e., when $\mu_1 > \cdots > \mu_{n-1} > \mu_n - \nu > 0$, it is proved that the network is stable for any input rate $\lambda$. When this condition fails, it is shown that for $n = 2$ the network can only accommodate a finite input rate.
641
+
642
+ **Proposition 4.1.** *Under the condition*
643
+
644
+ $$
645
+ (28) \qquad \sum_{k=1}^{n} \frac{\lambda}{\mu_k} < 1,
646
+ $$
647
+
648
+ the Markov process (X(t)) is ergodic for any $\nu > 0$.
649
+
650
+ Condition (28) is obviously not sharp as can be seen in the case $n=1$ analyzed
651
+ in Section 2. But the proposition shows that there is always a positive threshold
652
+ $\lambda^*$ such that the system is stable when $\lambda < \lambda^*$.
653
+
654
+ *Proof.* For $x \in \mathbb{N}^{n+1}$ and $(\alpha_k) \in \mathbb{R}^{n+1}$, define $f(x) = \alpha_0 x_0 + \dots + \alpha_n x_n$, then
655
+
656
+ $$
657
+ Q(f)(x) = \lambda\alpha_0 - \sum_{k=1}^{n} (\alpha_{k-1} - \alpha_k)\mu_k(x_k \vee 1)1_{\{x_{k-1}>0\}} - \nu x_n \alpha_n.
658
+ $$
659
+
660
+ For $\varepsilon > 0$, one can choose $(\alpha_k)$ so that $\alpha_0 = 1$ and
661
+
662
+ $$
663
+ \alpha_{k-1} - \alpha_k = \frac{\lambda}{\mu_k} + \varepsilon, \quad 1 \le k \le n,
664
+ $$
665
+
666
+ hence
667
+
668
+ $$
669
+ \alpha_n = 1 - \left( n\varepsilon + \sum_{i=1}^{n} \frac{\lambda}{\mu_k} \right),
670
+ $$
671
+
672
+ so that, for $\varepsilon$ small enough, the $\alpha_k$'s, $0 \le k \le n$ are decreasing and positive under
673
+ the condition of the proposition; in particular the set $\{x : f(x) \le K\}$ is finite for
674
+ any $K \ge 0$.
675
+
676
+ Take $K = (1+\lambda)/\nu$, then if $x \in \mathbb{N}^{n+1}$ is such that $f(x) \ge K$, either $x_k > 0$ for some $0 \le k \le n-1$ and in this case
677
+
678
+ $$
679
+ Q(f)(x) \leq \lambda - \mu_{k+1}(\alpha_k - \alpha_{k+1}) = -\varepsilon\mu_{k+1} < 0,
680
+ $$
681
+
682
+ or $x_n \ge K$ so that
683
+
684
+ $$
685
+ Q(f)(x) \leq \lambda - \nu K = -1 < 0.
686
+ $$
687
+
688
+ A Lyapunov function criteria for Markov processes shows that this implies that
689
+ the Markov process $(X(t))$ is ergodic. See Proposition 8.14 of Robert [18] for
690
+ example. □
691
+ ---PAGE_BREAK---
692
+
693
+ **Decreasing Service Rates.** The analog of the “good” case $\mu > \nu$ is proved in the next proposition.
694
+
695
+ **Proposition 4.2.** *Under the condition $\mu_1 > \mu_2 > \cdots > \mu_{n-1} > \mu_n - \nu > 0$, the Markov process $(X(t)) = (X_k(t), 0 \le k \le n)$ describing the linear file-sharing network is ergodic for any $\lambda \ge 0$.*
696
+
697
+ *Proof.* The proof proceeds in two steps: first coupling arguments with Yule processes allow to prove (30); then one can use the same technique as in the proof of Proposition 2.3, see Robert [18, Theorem 9.7].
698
+
699
+ *Step 1 (coupling).* Let $(W_n(t))$ be the process with $Q$-matrix defined by Relation (15) with $\mu_Z = \mu_n$ and starting at $W_n(0) = w_n \ge 1$. Since $\mu_n > \nu$, the process $(\exp(-(\mu_n-\nu)t)W_n(t))$ converges almost surely to a finite and positive random variable $M_{W_n}(\infty)$ by Corollary 3.2. Moreover, since $\mu_{n-1} > \mu_n - \nu > 0$, Corollary 3.1 entails that the birth instants $(\sigma_\ell^n)$ of this process are such that
700
+
701
+ $$ \sum_{\ell \ge 1} e^{-\mu_{n-1} \sigma_\ell^n} < +\infty, \text{ almost surely.} $$
702
+
703
+ Let $(Y_{n-1}(t))$ be an independent Yule process with parameter $\mu_{n-1}$ with initial condition $Y_{n-1}(0) = w_{n-1} \ge 1$ and $(W_{n-1}(t))$ the resulting process when its individuals are killed at the instants $(\sigma_\ell^n)$ of births of $(W_n(t))$: the previous equation and Proposition 3.4 show that $(W_{n-1}(t))$ can survive forever with a positive probability.
704
+
705
+ Let $(Y_{n-2}(t))$ be an independent Yule process starting from $w_{n-2} \ge 1$ with parameter $\mu_{n-2}$. Define $(W_{n-2}(t))$ the resulting process when the individuals of $(Y_{n-2}(t))$ are killed at the birth instants $(\tilde{\sigma}_\ell^{n-1})$ of $(W_{n-1}(t))$. Since $\mu_{n-2} > \mu_{n-1}$, the birth instants $(\tilde{\sigma}_\ell^{n-1})$ of $(Y_{n-1}(t))$ satisfy
706
+
707
+ $$ \sum_{\ell=1}^{+\infty} e^{-\mu_{n-2}\tilde{\sigma}_{\ell}^{n-1}} < +\infty $$
708
+
709
+ almost surely by Equation (14) (which still holds for a Yule process starting with more than one particle). Since the birth instants $(\sigma_\ell^{n-1})$ of $(W_{n-1}(t))$ are a subsequence of $(\tilde{\sigma}_\ell^{n-1})$, the same relationship holds for $(\sigma_\ell^{n-1})$, and therefore, with a positive probability, the three processes $(e^{-(\mu_n-\nu)t}W_n(t))$, $(e^{-\mu_{n-1}t}W_{n-1}(t))$ and $(e^{-\mu_{n-2}t}W_{n-2}(t))$ converge simultaneously to positive and finite random variables $M_{W_n}(\infty)$, $M_{W_{n-1}}(\infty)$ and $M_{W_{n-2}}(\infty)$, respectively. This construction can be repeated inductively to give the existence of $n$ processes $(W_k(t), k = 1, \dots, n)$ such that $(\sigma_\ell^k)$ is the sequence of birth times of $W_k$, $W_n$ is the birth-and-death process with $Q$-matrix (15), $W_k$ for $1 \le k \le n-1$ is a Yule process with parameter $\mu_k$ killed at $(\sigma_\ell^{k+1})$, and the event $\mathcal{E} = \{M_{W_1}(\infty) > 0, \dots, M_{W_n}(\infty) > 0\}$ has a positive probability. On this event, $W_k(t) \ge 1$ for all $t \ge 0$ and $1 \le k \le n-1$, and
710
+
711
+ $$ \lim_{t \to +\infty} W_n(t) = +\infty. $$
712
+ ---PAGE_BREAK---
713
+
714
+ For $0 \le k \le n-1$, one defines $(X_k^S(t)) = (X_{k,n-k}^S(t), \dots, X_{k,n}^S(t))$, the $k$th saturated system, as the $(k+1)$-dimensional Markov process with generator
715
+
716
+ $$
717
+ \begin{equation}
718
+ \begin{split}
719
+ (Q_k^S(f)(x) = \mu_{n-k}(x_{n-k} \lor 1)[f(x + e_{n-k}) - f(x)] \\
720
+ \qquad & + \sum_{\ell=1}^k \mu_{n-k+\ell}(x_{n-k+\ell} \lor 1)[f(x + e_{n-k+\ell} - e_{n-k+\ell-1}) - f(x)] \mathbf{1}_{\{x_{n-k+\ell-1} > 0\}} \\
721
+ \qquad & + \nu x_n[f(x - e_n) - f(x)],
722
+ \end{split}
723
+ \end{equation}
724
+ $$
725
+
726
+ where $x \in \mathbb{N}^{k+1}$ and $f : \mathbb{N}^{k+1} \to \mathbb{R}_+$ is an arbitrary function. Compared with the process $(X_\ell(t), 1 \le \ell \le n)$ with generator $Q$, it amounts to look at the $k+1$ last queues $(X_{n-k}(t), \dots, X_n(t))$ under the assumption that the queue $n-k-1$ is saturated, i.e., $X_{n-k-1}(t) \equiv +\infty$ for all $t \ge 0$.
727
+
728
+ Note that for any $0 \le k \le n-1$, the transition rates of the Markov processes $(W_{n-\ell}(t), 0 \le \ell \le k)$ and $(X_{k,n-\ell}^S(t), 0 \le \ell \le k)$ are identical as long as no coordinate hits 0; one thus concludes that, with positive probability, the relation
729
+
730
+ $$
731
+ \lim_{t \to +\infty} X_{k,n}^{S}(t) = +\infty
732
+ $$
733
+
734
+ holds when $X_{k,n-l}^S(0) \ge 1$, $l=0,\dots,k$. Consequently, since the set $(\mathbb{N}-\{0\})^{k+1}$ can be reached with positive probability from any initial state in $\mathbb{N}^{k+1}$ by $(X_k^S(t))$, then
735
+
736
+ $$
737
+ (30) \qquad \lim_{t \to +\infty} \mathbb{E}(X_{k,n}^S(t)) = +\infty.
738
+ $$
739
+
740
+ Step 2 (Foster's criterion). We use Foster's criterion as stated in Theorem 9.7 of Robert [18]. First we inspect the case when $X_n(0)$ is large, then the case when $X_n(0)$ is bounded and $X_{n-1}(0)$ is large, etc... The key idea is that when $X_{n-k-1}(0)$ is large, then the process $(X_{n-k}(t), \dots, X_n(t))$ essentially behaves as the process $(X_k^S(t))$, for which Relation (30) ensures that the output rate is arbitrarily large.
741
+
742
+ Let $X(0) = x = (x_k) \in \mathbb{N}^{n+1}$, since the last queue serves at rate $\nu$ each request, for $t \ge 0$,
743
+
744
+ $$
745
+ \mathbb{E}(\|X(t)\|) \le \|x\| + \lambda t - x_n (1 - e^{-\nu t}),
746
+ $$
747
+
748
+ where $\|x\| = x_0 + \dots + x_n$ for $x = (x_0, \dots, x_n) \in \mathbb{N}^{n+1}$. Define $t_n = 1$ and let $K_n$ be such that $\lambda t_n - K_1(1 - \exp(-\nu)) \le -1$, so that the relation
749
+
750
+ $$
751
+ \mathbb{E}_x(\|X(t_n)\|) - \|x\| \le -1,
752
+ $$
753
+
754
+ holds when $x_n \ge K_n$.
755
+
756
+ From Equation (30) with $k=0$, one gets that there exists some $t_{n-1}$ such that for any $x_n \le K_n$,
757
+
758
+ $$
759
+ \nu \int_0^{t_{n-1}} \mathbb{E}_{x_n} (X_{0,n}^S(u)) du \geq \lambda t_{n-1} + 2.
760
+ $$
761
+
762
+ The two processes $(X_0^S(t))$ and $(X(t))$ can be built on the same probability space such that if they start from the same initial state, then the two processes $(X_{0,n}^S(t))$ and $(X_n(t))$ are identical as long as $X_{n-1}(t)$ stays positive. Since moreover the hitting time $\inf\{t \ge 0 : X_{n-1}(t) = 0\}$ goes to infinity as $x_{n-1}$ goes to infinity
763
+ ---PAGE_BREAK---
764
+
765
+ for any $x_n \le K_n$, one gets that there exists $K_{n-1}$ such that if $x_{n-1} \ge K_{n-1}$ and $x_n < K_n$, then the relation
766
+
767
+ $$
768
+ \begin{align*}
769
+ \mathbb{E}_x(\|X(t_{n-1})\|) - \|x\| &= \lambda t_{n-1} - \nu \int_0^{t_{n-1}} \mathbb{E}_x(X_n(u)) du \\
770
+ &\le \lambda t_{n-1} - \left( \nu \int_0^{t_{n-1}} \mathbb{E}_{x_n}(X_{0,n}^S(u)) du - 1 \right) \le -1
771
+ \end{align*}
772
+ $$
773
+
774
+ holds.
775
+
776
+ By induction, one gets in a similar way that there exist constants $t_n, \dots, t_0$ and $K_n, \dots, K_0$ such that for any $0 \le l \le n$, if $x_n \le K_n$, $x_{n-1} \le K_{n-1}$, $\dots$, $x_{n-l+1} \le K_{n-l+1}$ and $x_{n-l} > K_{n-l}$, then
777
+
778
+ $$
779
+ \mathbb{E}_x (\|X(t_{n-l})\|) - \|x\| \le -1.
780
+ $$
781
+
782
+ Theorem 8.13 of Robert [18] shows that (X(t)) is an ergodic Markov process. The proposition is proved. $\square$
783
+
784
+ **Analysis of the Two-Chunk Network.** In this subsection, one investigates the case when the monotonicity condition $\mu_1 > \cdots > \mu_{n-1} > \mu_n - \nu > 0$ fails. In general we conjecture the existence of bottlenecks which implies that the network can only accommodate a finite input rate. For instance, when $\mu_n - \nu < 0$, then it is easily seen that the network is unstable for $\lambda > \lambda^*$ where $\lambda^*$ is defined in Equation (32) below.
785
+
786
+ The first non-trivial case occurs for $n=2$, for which the monotonicity condition breaks in two situations, either when $\mu_2 - \nu > \mu_1$ or when $\mu_2 < \nu$. The latter case can be dealt in fact with the exact same arguments as before. See Proposition 4.4.
787
+
788
+ The actual difficulty is when $\mu_2 - \nu > \mu_1$: then the stationary behavior of $(X_2(t))$ is linked to the stationary behavior of the first saturated model $(X_1^S(t))$ defined through its Q-matrix (29). The difficulty in this case is that one needs to compare two processes which grow exponentially fast.
789
+
790
+ **Proposition 4.3.** Assume that $\mu_2 - \nu > \mu_1$, then the first saturated process $(X_1^S(t))$ with Q-matrix defined by Equation (29) is ergodic.
791
+
792
+ **Corollary 4.1.** If $\mu_2 - \nu > \mu_1$ and if
793
+
794
+ $$
795
+ \lambda_2^* \stackrel{\text{def.}}{=} \nu \mathbb{E}_{\pi^S} (X_{1,2}^S(0)),
796
+ $$
797
+
798
+ where $\pi^S$ is the invariant distribution of the Markov process $(X_1^S(t))$, then the process $(X(t)) = (X_k(t), k = 0, 1, 2)$ describing the linear file-sharing network with parameters $\lambda, \mu_1, \mu_2$ and $\nu$ is ergodic for $\lambda < \lambda_2^*$ and transient for $\lambda > \lambda_2^*$.
799
+
800
+ *Sketch of Proof.* The proof of the transience when $\lambda > \lambda_2^*$ follows similarly as in Section 2: when $X_0(0)$ is large, the process $(X_1(t), X_2(t))$ can be coupled for some time with the second saturated system $(X_1^S(t))$. Since the output rate $\lambda_2^*$ of this system is smaller than the input rate $\lambda$, this implies that $(X_0(t))$ builds up, and it can indeed be shown that $X_0(t)/t$ converges almost surely to $\lambda - \lambda_2^*$.
801
+
802
+ The ergodicity when $\lambda < \lambda_2^*$ is slightly more complicated, but it involves the same arguments as the ones employed in the proof of Proposition 4.2. The details are omitted. $\square$
803
+ ---PAGE_BREAK---
804
+
805
+ *Proof of Proposition 4.3.* Denote $(X_1^S(t)) = (X_{1,1}^S(t), X_{1,2}^S(t))$, then as long as the first coordinate $X_{1,1}^S$ is positive, the process $(X_1^S(t))$ has the same distribution as $(W(t), Z(t))$ introduced in Section 3: $(Z(t))$ is a Bellman-Harris process with Malthusian parameter $\mu_2 - \nu$ and $(W(t))$ is a Yule process with parameter $\mu_1$ killed at times of births of $(Z(t))$.
806
+
807
+ By Proposition 3.5 and since $\mu_2 - \nu > \mu_1$, one has that $(X_{1,1}^S(t))$ returns infinitely often to 0. When $(X_{1,1}^S(t))$ is at 0 it jumps to 1 after an exponential time with parameter $\mu_1$, one denotes by $(E_{\mu_1,n})$ the corresponding i.i.d. sequence of successive residence times at 0. One defines the sequence $(S_n)$ by induction, $S_0 = 0$ and then
808
+
809
+ $$S_{n+1} = \inf\{t > S_n : X_{1,1}^S(t) = 0\} + E_{\mu_1, n+1}, \quad n \ge 0.$$
810
+
811
+ For $n \ge 1$, $X_{1,1}^S(S_n) = 1$ and for $n \ge 0$, define $M_n \stackrel{\text{def.}}{=} X_{1,2}^S(S_n)$. With the notations of Proposition 3.5, $(X_{1,1}^S(t))$ hits 0 after a duration of $H_{0,n}$ and at that time $(X_{1,2}^S(t))$ is at $Z(H_{0,n})$ with the initial condition $Z(0) = M_n$; while $X_{1,1}^S$ is still at 0, the dynamics of $X_{1,2}^S$ is simple, since it just empties. Finally, at time $S_{n+1} = S_n + H_{0,n} + E_{\mu_1,n+1}$, $(X_{1,1}^S(t))$ returns to 1 and at this instant the location of $(X_{1,2}^S(t))$ is given by
812
+
813
+ $$X_{1,2}^{S}(S_{n+1}) = M_{n+1} = \sum_{i=1}^{Z(H_{0,n})} 1_{\{E_{\nu,i}>E_{\mu_1,n+1}\}},$$
814
+
815
+ where $(E_{\nu,i})$ are i.i.d. exponential random variables with parameter $\nu$, the ith variable being the residence time of the ith request in node 2. Consequently, $(M_n, n \ge 1)$ is a Markov chain whose transitions are defined by Relation (25) with $p = \nu / (\nu + \mu_1)$; note that $(M_n, n \ge 0)$ has the same dynamics only when $X_{1,1}^S(0) = 1$.
816
+
817
+ Define for any $K > 0$ the stopping time $T_K$
818
+
819
+ $$T_K = \inf\{t \ge 0 : X_{1,2}^S(t) \le K, X_{1,1}^S(t) = 1\}.$$
820
+
821
+ The ergodicity of $(X_1^S(t))$ will follow from the finiteness of $\mathbb{E}_{(x_1,x_2)}(T_K)$ for some $K$ large enough and for arbitrary $x = (x_1, x_2) \in \mathbb{N}^2$. The strong Markov property of $(X_1^S(t))$ applied at time $S_1$ gives
822
+
823
+ $$\mathbb{E}_{(x_1,x_2)}(T_K) \le 2\mathbb{E}_{(x_1,x_2)}(S_1) + \mathbb{E}_{(x_1,x_2)}\left[\mathbb{E}_{(1,X_{1,2}^S(S_1))}(T_K)\right],$$
824
+
825
+ and so one only needs to study $T_K$ conditioned on $\{X_{1,1}^S(0) = 1\}$ since $\mathbb{E}_{(x_1,x_2)}(S_1)$ is finite in view of Proposition 3.5.
826
+
827
+ Then, on this event and with $N_K$ defined in Proposition 3.6, the identity
828
+
829
+ $$ (31) \qquad T_K = \sum_{i=0}^{N_K} (H_{0,i} + E_{\mu_1,i}) $$
830
+
831
+ holds. For $i \ge 0$, the Markov property of $(M_n, n \ge 0)$ gives
832
+
833
+ $$ \mathbb{E}_{(x_1,x_2)}(H_{0,i} 1_{\{i \le N_K\}}) = \mathbb{E}_{(x_1,x_2)}(\mathbb{E}_{(1,M_i)}(H_0) 1_{\{i \le N_K\}}) $$
834
+
835
+ With the same argument as in the proof of Proposition 3.6, one has
836
+
837
+ $$ \mathbb{E}_{(1,M_i)}(H_0) \le \mathbb{E}_{(1,0)}(H_0) < +\infty, $$
838
+ ---PAGE_BREAK---
839
+
840
+ with Equations (31) and (26) of Proposition (3.6), one gets that for some $\gamma > 0$ and some $K > 0$,
841
+
842
+ $$ \mathbb{E}_{(x_1,x_2)}(T_K) \leq 2\mathbb{E}_{(x_1,x_2)}(S_1) + C \left(1 + \mathbb{E}_{(x_1,x_2)}\left[\log\left(1 + X_{1,2}^S(S_1)\right)\right]\right) $$
843
+
844
+ with the constant $C = (\mathbb{E}_{(1,0)}(H_0) + 1/\mu_2)/\gamma$. This last term is finite for any $(x_1, x_2)$ in view of Proposition 3.5, which proves the proposition. $\square$
845
+
846
+ **Proposition 4.4.** If $\nu > \mu_2$ and
847
+
848
+ $$ (32) \qquad \lambda^* \stackrel{\text{def.}}{=} \frac{\mu_2}{(1 - \mu_2/\nu)(1 - \log(1 - \mu_2/\nu))}, $$
849
+
850
+ then the Markov process $(X(t)) = (X_k(t), k = 0, 1, 2)$ is transient if $\lambda > \lambda^*$ and ergodic if $\lambda < \lambda^*$.
851
+
852
+ *Sketch of Proof.* The result for transience comes directly from the fact that the last coordinate is stochastically dominated by the birth-and-death process $(Y_1^1(t))$ of Section 2.
853
+
854
+ As before, the arguments employed in the proof of Proposition 4.2 to prove ergodicity can also be used, for this reason they are only sketched. One has in fact to consider the following situations.
855
+
856
+ — If there are many customers in the last queue, then the total number of customers instantaneously decreases.
857
+
858
+ — If there are many customers in the second queue, then the last queue has time to get close to stationarity, the input rate is $\lambda$ and the output rate is $\lambda^*$.
859
+
860
+ — Finally, if there are many customers in the first queue, then it is easily seen that the second queue builds up, since it grows like a Yule process killed at times $(\sigma_n)$ where the sequence $(\sigma_n)$ essentially grows linearly since the last queue is stable. Hence the second queue reaches high values and the last queue offers an output rate of $\lambda^*$.
861
+
862
+ Hence when $\lambda < \lambda^*$, the Markov process $(X(t))$ is ergodic. $\square$
863
+
864
+ ## APPENDIX A. PROOF OF PROPOSITION 3.3
865
+
866
+ In this appendix the notations of Section 3 are used. Since the random variable $(B_\sigma(t) | Z(0) = 0)$ is stochastically smaller than $(B_\sigma(t) | Z(0) = z)$ for any $z \in \mathbb{N}$, it is enough to show that for $\eta < \eta^*(\nu/\mu_Z)$
867
+
868
+ $$ \mathbb{E}_0 \left[ \sup_{t \ge \sigma_1} (e^{\eta \alpha t} B_\sigma(t)^{-\eta}) \right] < +\infty, $$
869
+
870
+ where $\alpha = \mu_Z - \nu > 0$.
871
+
872
+ Note that the process $(B_\sigma(t+\sigma_1), t \ge 0)$ under $\mathbb{P}_0$ has the same distribution as $(B_\sigma(t)+1, t \ge 0)$ under $\mathbb{P}_1$, and by independence of $\sigma_1$, an exponentially random variable with parameter $\mu_Z$, and $(B_\sigma(t+\sigma_1), t \ge 0)$, one gets
873
+
874
+ $$ \mathbb{E}_0 \left[ \sup_{t \ge \sigma_1} (e^{\eta \alpha t} B_\sigma(t)^{-\eta}) \right] = \mathbb{E}_0 (e^{\eta \alpha \sigma_1}) \mathbb{E}_1 \left[ \sup_{t \ge 0} (e^{\eta \alpha t} (B_\sigma(t) + 1)^{-\eta}) \right]. $$
875
+
876
+ Since $\alpha < \mu_Z$ and $\eta^*(\nu/\mu_Z) < 1$, then $\mathbb{E}_0(\exp(\eta\alpha\sigma_1))$ is finite, and all one needs to prove is that the second term is finite as well.
877
+
878
+ Define $\tau$ as the last time $Z(t) = 0$:
879
+
880
+ $$ \tau = \sup\{t \ge 0 : Z(t) = 0\}, $$
881
+ ---PAGE_BREAK---
882
+
883
+ with the convention that $\tau = +\infty$ if $(Z(t))$ never returns to 0. Recall that, because of the assumption $\mu_Z > \nu$, with probability 1, the process $(Z(t))$ returns to 0 a finite number of times.
884
+
885
+ Conditioned on the event $\{\tau = +\infty\}$, the process $(Z(t))$ is a $(p, \lambda)$-branching process conditioned on survival, with $\lambda = \mu_Z + \nu$ and $p = \mu_Z/\lambda$. Such a branching process conditioned on survival can be decomposed as $Z = Z_{(1)} + Y$, where $(Y(t))$ is a Yule process $(Y(t))$ with parameter $\alpha$. See Athreya and Ney [3]. Consequently, for any $0 < \eta < 1$,
886
+
887
+ $$ \mathbb{E}_1 \left[ \sup_{t \ge 0} \left( e^{\eta \alpha t} (B_\sigma(t) + 1)^{-\eta} \right) | \tau = +\infty \right] \le \mathbb{E}_1 \left[ \sup_{t \ge 0} \left( e^{\eta \alpha t} Y(t)^{-\eta} \right) \right]. $$
888
+
889
+ Since the nth split time $t_n$ of $(Y(t))$ is distributed like the maximum of n i.i.d. exponential random variables, $Y(t)$ for $t \ge 0$ is geometrically distributed with parameter $1 - e^{-\alpha t}$, hence,
890
+
891
+ $$
892
+ \begin{aligned}
893
+ \sup_{t \ge 0} \left[ e^{\eta \alpha t} \mathbb{E}_1 \left( \frac{1}{Y(t)^{\eta}} \right) \right] &= \sup_{t \ge 0} \left[ e^{-(1-\eta)\alpha t} \sum_{k \ge 1} \frac{(1-e^{-\alpha t})^{k-1}}{k^{\eta}} \right] \\
894
+ &\le \sup_{0 \le u \le 1} \left[ (1-u)^{1-\eta} \sum_{k \ge 1} \frac{u^{k-1}}{k^{\eta}} \right].
895
+ \end{aligned}
896
+ $$
897
+
898
+ For $0 < u < 1$, the relation
899
+
900
+ $$
901
+ \begin{aligned}
902
+ (1-u)^{1-\eta} \sum_{k \ge 1} \frac{u^{k-1}}{k^\eta} &\le (1-u)^{1-\eta} \int_0^\infty \frac{u^x}{(1+x)^\eta} dx, \\
903
+ &= \left(\frac{1-u}{-\log u}\right)^{1-\eta} \int_0^\infty \frac{e^{-x}}{(x-\log u)^\eta} dx,
904
+ \end{aligned}
905
+ $$
906
+
907
+ holds, hence
908
+
909
+ $$ \sup_{t \ge 0} \left[ e^{\eta \alpha t} \mathbb{E}_1 \left( \frac{1}{Y(t)^{\eta}} \right) \right] < +\infty. $$
910
+
911
+ The process $(e^{-\alpha t}Y(t))$ being a martingale, by convexity the process $(e^{\eta\alpha t}Y(t)^{-\eta})$
912
+ is a non-negative sub-martingale. For any $\eta \in (0, 1)$ Doob's $L_p$ inequality gives the
913
+ existence of a finite $q(\eta) > 0$ such that
914
+
915
+ $$ \mathbb{E}_1 \left[ \sup_{t \ge 0} (e^{\eta \alpha t} Y(t)^{-\eta}) \right] \le q(\eta) \sup_{t \ge 0} \left[ e^{\eta \alpha t} \mathbb{E}_1 \left( \frac{1}{Y(t)^{\eta}} \right) \right] < +\infty. $$
916
+
917
+ The following result has therefore been proved.
918
+
919
+ **Lemma A.1.** For any $0 < \eta < 1$,
920
+
921
+ $$ \mathbb{E}_1 \left[ \sup_{t \ge 0} \left( e^{\eta \alpha t} (B_{\sigma}(t) + 1)^{-\eta} \right) | \tau = +\infty \right] < +\infty. $$
922
+ ---PAGE_BREAK---
923
+
924
+ On the event $\{\tau < +\infty\}$, $(Z(t))$ hits a geometric number of times 0 and then couples with a $(p, \lambda)$-branching process conditioned on survival. On this event,
925
+
926
+ $$
927
+ \begin{align*}
928
+ & \sup_{t \ge 0} (e^{\eta \alpha t} (B_{\sigma}(t) + 1)^{-\eta}) \\
929
+ &= \max \left( \sup_{0 \le t \le \tau} (e^{\eta \alpha t} (B_{\sigma}(t) + 1)^{-\eta}), \sup_{t \ge \tau} (e^{\eta \alpha t} (B_{\sigma}(t) + 1)^{-\eta}) \right) \\
930
+ &\le e^{\eta \alpha \tau} \left( 1 + \sup_{t \ge 0} (e^{\eta \alpha t} (B'_{\sigma}(t) + 1)^{-\eta}) \right)
931
+ \end{align*}
932
+ $$
933
+
934
+ where $B'_\sigma(t)$ for $t \ge \tau$ is the number of births in $(\tau, t]$ of a $(p, \lambda)$-branching process conditioned on survival and independent of the variable $\tau$, consequently
935
+
936
+ $$
937
+ \mathbb{E}_1 \left[ \sup_{t \ge 0} \left( e^{\eta \alpha t} (B_\sigma(t) + 1)^{-\eta} \right) \middle| \tau < +\infty \right] \le \mathbb{E}_1 (e^{\eta \alpha \tau} | \tau < +\infty) \\
938
+ \times \left( 1 + \mathbb{E}_1 \left[ \sup_{t \ge 0} \left( e^{\eta \alpha t} (B_\sigma(t) + 1)^{-\eta} \right) \middle| \tau = +\infty \right] \right).
939
+ $$
940
+
941
+ In view of Lemma A.1, the proof of Proposition 3.3 will be finished if one can prove that
942
+
943
+ $$
944
+ \mathbb{E}_1 (e^{\eta \alpha \tau} |\tau < +\infty) < +\infty,
945
+ $$
946
+
947
+ which actually comes from the following decomposition: under $\mathbb{P}_1(\cdot | \tau < +\infty)$, the
948
+ random variable $\tau$ can be written as
949
+
950
+ $$
951
+ \tau = \sum_{k=1}^{1+G} (T_k + E_{\mu_Z,k})
952
+ $$
953
+
954
+ where G is a geometric random variable with parameter q = ν/μ<sub>Z</sub>, (T<sub>k</sub>) is an i.i.d.
955
+ sequence with the same distribution as the extinction time of a (p, λ)-branching
956
+ process starting with one particle and conditioned on extinction and (E<sub>μ<sub>Z</sub>,k</sub>) are
957
+ i.i.d. exponential random variables with parameter μ<sub>Z</sub>.
958
+
959
+ Since *q* is the probability of extinction of a (*p*, *λ*)-branching process started with
960
+ one particle, *G* + 1 represents the number of times (*Z*(*t*)) hits 0 before going to
961
+ infinity. This representation entails
962
+
963
+ $$
964
+ \mathbb{E}_1 (e^{\eta \alpha \tau} | \tau < +\infty) = \mathbb{E} (\gamma(\eta)^{G+1}) \quad \text{where} \quad \gamma(\eta) = \mathbb{E} (e^{\eta \alpha (T_1 + E_{\mu_Z,1})}).
965
+ $$
966
+
967
+ A (p, λ)-branching process conditioned on extinction is actually a (1 − p, λ)-branching process. See again Athreya and Ney [3]. Thus T₁ satisfies the following recursive distributional equation:
968
+
969
+ $$
970
+ T_1^{\text{dist.}} := E_{\lambda} + 1_{\{\xi=2\}}(T_1 \lor T_2),
971
+ $$
972
+
973
+ where $\mathbb{P}(\xi = 2) = 1-p$ and $E_\lambda$ is an exponential random variable with parameter $\lambda$. This equation yields
974
+
975
+ $$
976
+ \P(T_1 \ge t) \le e^{-\lambda t} + 2\lambda(1-p) \int_0^t \P(T_1 \ge t-u) e^{-\lambda u} du,
977
+ $$
978
+
979
+ and Gronwall's Lemma applied to the function $t \mapsto \exp(\lambda t)\mathbb{P}(T_1 \ge t)$ gives that
980
+
981
+ $$
982
+ \P(T_1 \ge t) \le e^{(\lambda - 2\lambda_p)t} = e^{(\nu - \mu_Z)t}
983
+ $$
984
+ ---PAGE_BREAK---
985
+
986
+ hence for any $0 < \eta < 1$,
987
+
988
+ $$\mathbb{E}_1(e^{\eta\alpha T_1}) \le \frac{1}{1-\eta}.$$
989
+
990
+ Since *G* is a geometric random variable with parameter *q*, $\mathbb{E}(\gamma(\eta)^G)$ is finite if and only if $\gamma(\eta) < q$. Since finally
991
+
992
+ $$\gamma(\eta) = \frac{\mu_Z}{\mu_Z - \eta\alpha} \mathbb{E} (e^{\eta\alpha T_1}) \le \frac{\mu_Z}{(1-\eta)(\mu_Z - \eta\alpha)}$$
993
+
994
+ one can easily check that $\gamma(\eta) < q$ for $\eta < \eta^*(\nu/\mu_Z)$ as defined by Equation (19), which concludes the proof of Proposition 3.3.
995
+
996
+ REFERENCES
997
+
998
+ [1] David Aldous and Jim Pitman, *Tree-valued Markov chains derived from Galton-Watson processes*, Annales de l'Institut Henri Poincaré. Probabilités et Statistiques **34** (1998), no. 5, 637-686.
999
+
1000
+ [2] Gerold Alsmeyer, *On the Galton-Watson Predator-Prey Process*, Annals of Applied Probability **3** (1993), no. 1, 198-211.
1001
+
1002
+ [3] K. B. Athreya and P. E. Ney, *Branching processes*, Springer, 1972.
1003
+
1004
+ [4] Thomas Bonald, Laurent Massoulié, Fabien Mathieu, Diego Perino, and Andrew Twigg, *Epidemic live streaming: optimal performance trade-offs*, Proceedings of SIGMETRICS'08 (New York, NY, USA), ACM, 2008, pp. 325-336.
1005
+
1006
+ [5] Maury Bramson, *Stability of queueing networks*, Lecture Notes in Mathematics, vol. 1950, Springer, Berlin, 2008, Lectures from the 36th Probability Summer School held in Saint-Flour, July 2-15, 2006.
1007
+
1008
+ [6] Hong Chen and David D. Yao, *Fundamentals of queueing networks*, Springer-Verlag, New York, 2001, Performance, asymptotics, and optimization, Stochastic Modelling and Applied Probability.
1009
+
1010
+ [7] T. D. Dang, R. Pereczes, and S. Molnár, *Modeling the population of file-sharing peer-to-peer networks with branching processes*, IEEE Symposium on Computers and Communications (ISCC'07) (Aveiro, Portugal), July 2007.
1011
+
1012
+ [8] F.P. Kelly, *Loss networks*, Annals of Applied Probability **1** (1991), no. 3, 319-378.
1013
+
1014
+ [9] J. F. C. Kingman, *The first birth problem for an age-dependent branching process.*, Annals of Probability **3** (1975), no. 5, 790-801.
1015
+
1016
+ [10] L. Leskelä, *Stochastic relations of random variables and processes*, J. Theor. Probab. (2009), To appear.
1017
+
1018
+ [11] Laurent Massoulié and Andrew Twigg, *Rate-optimal schemes for Peer-to-Peer live streaming*, Performance Evaluations **65** (2008), no. 11-12, 804-822.
1019
+
1020
+ [12] Laurent Massoulié and Milan Vojnović, *Coupon replication systems*, Proceedings of SIGMETRICS'05 (Banff, Alberta, Canada), no. 1, June 2005, pp. 2-13.
1021
+
1022
+ [13] Olle Nerman, *On the convergence of supercritical general (C-M-J) branching processes*, Z. Wahrscheinlichkeitstheorie verw. Gebiete **57** (1981), 365-395.
1023
+
1024
+ [14] J. Neveu, *Erasing a branching tree*, Advances in Applied Probability (1986), no. suppl., 101-108.
1025
+
1026
+ [15] R. Núñez-Queija and B. J. Prabhu, *Scaling laws for file dissemination in P2P networks with random contacts*, Proceedings of IWQoS, 2008.
1027
+
1028
+ [16] Nadim Parvez, Carey Williamson, Anirban Mahanti, and Niklas Carlsson, *Analysis of bittorrent-like protocols for on-demand stored media streaming*, SIGMETRICS '08: Proceedings of the 2008 ACM SIGMETRICS international conference on Measurement and modeling of computer systems (New York, NY, USA), ACM, 2008, pp. 301-312.
1029
+
1030
+ [17] Dongyu Qiu and R. Srikant, *Modeling and performance analysis of bittorrent-like peer-to-peer networks*, SIGCOMM '04: Proceedings of the 2004 conference on Applications, technologies, architectures, and protocols for computer communications (New York, NY, USA), ACM, 2004, pp. 367-378.
1031
+
1032
+ [18] Philippe Robert, *Stochastic networks and queues*, Stochastic Modelling and Applied Probability Series, vol. 52, Springer, New-York, June 2003.
1033
+ ---PAGE_BREAK---
1034
+
1035
+ [19] Philippe Robert and Florian Simatos, Occupancy schemes associated to Yule processes, Advances in Applied Probability 41 (2009), no. 2, To Appear.
1036
+
1037
+ [20] L. C. G. Rogers and David Williams, *Diffusions, Markov processes, and martingales. Vol. 2: Itô calculus*, John Wiley & Sons Inc., New York, 1987.
1038
+
1039
+ [21] E. Seneta, *On the supercritical branching process with immigration*, Mathematical Biosciences 7 (1970), 9-14.
1040
+
1041
+ [22] Florian Simatos, Philippe Robert, and Fabrice Guillemin, *A queueing system for modeling a file sharing principle*, Proceedings of SIGMETRICS'08 (New York, NY, USA), ACM, 2008, pp. 181-192.
1042
+
1043
+ [23] Florian Simatos and Danielle Tibi, *Spatial homogenization in a stochastic network with mobility*, Annals of Applied Probability (2009), To Appear.
1044
+
1045
+ [24] Riikka Susitaival, Samuli Aalto, and Jorma Virtamo, *Analyzing the dynamics and resource usage of P2P file sharing by a spatio-temporal model*, International Workshop on P2P for High Performance Computation Sciences, 2006.
1046
+
1047
+ [25] David Williams, *Probability with martingales*, Cambridge University Press, 1991.
1048
+
1049
+ [26] Xiangying Yang and Gustavo de Veciana, *Service capacity of peer to peer networks*, Proceedings of IEEE Infocom'04, ACM, 2004, pp. 2242-2252.
1050
+
1051
+ (L. Leskelä) HELSINKI UNIVERSITY OF TECHNOLOGY, DEPARTMENT OF MATHEMATICS AND SYSTEMS ANALYSIS, PO BOX 1100, 02015 TKK, FINLAND
1052
+
1053
+ *E-mail address:* lasse.leskela@iki.fi
1054
+
1055
+ *URL:* http://www.iki.fi/lsl
1056
+
1057
+ (Ph. Robert, F. Simatos) INRIA PARIS — ROCQUENCOURT, DOMAINE DE VOLUCEAU, BP 105, 78153 LE CHESNAY, FRANCE.
1058
+
1059
+ *E-mail address:* Philippe.Robert@inria.fr
1060
+
1061
+ *E-mail address:* Florian.Simatos@inria.fr
1062
+
1063
+ *URL:* http://www-rocq.inria.fr/~robert
samples/texts_merged/1808935.md ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # Ostensive Automatic Schema Mapping for Taxonomy-based Peer-to-Peer Systems
5
+
6
+ Yannis Tzitzikas¹ and Carlo Meghini
7
+
8
+ Istituto di Scienza e Tecnologie dell' Informazione [ISTI]
9
+ Consiglio Nazionale delle Ricerche [CNR], Pisa, Italy
10
+ Email: {tzitzik|meghini}@iei.pi.cnr.it
11
+
12
+ **Abstract** This paper considers Peer-to-Peer systems in which peers employ taxonomies for describing the contents of their objects and for formulating semantic-based queries to the other peers of the system. As each peer can use its own taxonomy, peers are equipped with inter-taxonomy mappings in order to carry out the required translation tasks. As these systems are ad-hoc, the peers should be able to create or revise these mappings on demand and at run-time. For this reason, we introduce an ostensive data-driven method for automatic mapping and specialize it for the case of taxonomies.
13
+
14
+ ## 1 Introduction
15
+
16
+ There is a growing research interest on peer-to-peer systems like Napster, Gnutella, FreeNet and many others. A peer-to-peer (P2P) system is a distributed system in which participants (the peers) rely on one another for service, rather than solely relying on dedicated and often centralized servers. Many examples of P2P systems have emerged recently, most of which are wide-area, large-scale systems that provide content sharing [4], storage services [19], or distributed "grid" computation [2, 1]. Smaller-scale P2P systems also exist, such as federated, server-less file systems [10, 7] and collaborative workgroup tools [3].
17
+
18
+ Existing peer-to-peer (P2P) systems have focused on specific application domains (e.g. music file sharing) or on providing file-system-like capabilities. These systems do not yet provide semantic-based retrieval services. In most of the cases, the name of the object (e.g. the title of a music file) is the only means for describing the contents of the object. Semantic-based retrieval in P2P systems is a great challenge. In general, the language that can be used for indexing the objects of the domain and for formulating semantic-based queries, can be *free* (e.g natural language) or *controlled*, i.e. object descriptions and queries may have to conform to a specific vocabulary and syntax. The first case, resembles distributed Information Retrieval (IR) systems and this approach is applicable in the case where the objects of the domain have a textual content (e.g. see
19
+
20
+ ¹ Work done during the postdoctoral studies of the author at CNR-ISTI as an ERCIM fellow.
21
+ ---PAGE_BREAK---
22
+
23
+ [23]). In this paper we focus on the second case where the objects of a peer are indexed according to a specific conceptual model represented in a data model (e.g. relational, object-oriented, logic-based, etc), and content searches are formulated using a specific query language. This approach, which can be called "database approach", starts to receive noteworthy attention by the researchers, as is believed that the database and knowledge base research has much to contribute to the P2P grand challenge through its wealth of techniques for sophisticated semantics-based data models and query processing techniques (e.g. see [14, 9, 18, 15, 32]). A P2P system might impose a single conceptual model on all participants to enforce uniform, global access, but this will be too restrictive. Alternatively, a limited number of conceptual models may be allowed, so that traditional information mediation and integration techniques will likely apply (with the restriction that there is no central authority). The case of fully heterogeneous conceptual models makes uniform global access extremely challenging and this is the case that we are interested in.
24
+
25
+ The first and basic question that we have to investigate is which conceptual modeling approach is appropriate for the P2P paradigm. We would like a scalable conceptual modeling approach which also allows bridging the various kinds of heterogeneity in a systematic and easy manner. As there are no central servers, or mediators, each participating source must have (or be able to create) *mappings*, or articulations, between its conceptual model and the conceptual models of its neighbors in order to be able to translate the received queries to queries that can be understood (and thus answered) by the recipient sources. These mapping could be established manually (as in the case of Semantic Web [8]) but the more appropriate approach for a P2P network, and the more challenging, is the *automatic mapping*. For all these reasons, a simple, conceptually clear, and application-independent conceptual modeling approach seems to be advantageous.
26
+
27
+ In this paper we consider the case where peers employ *taxonomies*. Note that it is quite easy to create a taxonomy for a source or a mediator. Even ordinary Web users can design this kind of conceptual model. Taxonomies can be constructed either from scratch, or by extracting them from existing taxonomies (e.g. from the taxonomy of Yahoo! or ODP) using special-purpose languages and tools (e.g. see [30]). Furthermore, the design of taxonomies can be done more systematically if done following a faceted approach (e.g. see [27, 26]). In addition, thanks to techniques that have emerged recently [31], taxonomies of compound terms can be also defined in a flexible and systematic manner. However, the more important for P2P systems, advantage of taxonomies is that their simplicity and modeling uniformity allows integrating the contents of several sources without having to tackle complex structural differences. Indeed, as it is shown in [32], inter-taxonomy mappings offer a *uniform* method for bridging *naming, contextual and granularity* heterogeneities between the taxonomies of the sources. Given this conceptual modeling approach, a mediator does not have to tackle complex structural differences between the sources, as it happens with relational mediators (e.g. see [22, 21]) and Description Logics-based medi-
28
+ ---PAGE_BREAK---
29
+
30
+ ators (e.g. see [17, 11]). Moreover, it allows the integration of *schema* and *data* in a uniform manner. Another advantage of this conceptual modeling approach is that query evaluation in taxonomy-based sources and mediators can be done efficiently (polynomial time).
31
+
32
+ In this paper we introduce a data-driven method for automatic taxonomy articulation. We call this method *ostensive* because the meaning of each term is explained by ostension, i.e. by pointing to something (here, to a set of objects) to which the term applies. For example, the word "rose" can be defined ostensively by pointing to a rose and saying "that is a rose". Instead, the verbal methods of term definition (e.g. the synonyms or the analytic method) presuppose that the learner already knows some other terms and, thus, they are useless to someone who does not know these terms; e.g. verbal word definitions are useless to a small child who has not learnt any words at all.
33
+
34
+ Specifically, in this paper we describe an ostensive articulation method that can be used for articulating both single terms and queries, and it can be implemented efficiently by a communication protocol. However, ostensive articulation is possible in a P2P system only if the domain of the peers is not disjoint. If it is disjoint then we cannot derive any articulation. This problem can be tackled by employing *reference collections*. For instance, each peer can have its own taxonomy, but before joining the network it must first index the objects of a small reference object set. Consequently, peers can build automatically the desired articulations by running the articulation protocol on this reference collection.
35
+
36
+ The rest of this paper is organized as follows: Section 2 introduces a general formal framework for ostensive articulation. Section 3 specializes and describes ostensive articulation for taxonomy-based sources. Section 4 discusses the application of ostensive articulation in P2P systems of taxonomy-based sources, and finally, Section 5 concludes the paper.
37
+
38
+ ## 2 Ostensive Articulation
39
+
40
+ Let us first introduce the general framework. We view a source $S$ as a function $S: Q \to \mathcal{A}$ where $Q$ is the set of all queries that $S$ can answer, and $\mathcal{A}$ is the set of all answers, i.e. $\mathcal{A}=\{S(q) | q \in Q\}$. As we focus on retrieval queries, we assume that $\mathcal{A}$ is a subset of $\mathcal{P}(Obj)$ where `Obj` is the set of all objects stored at the source.
41
+
42
+ The ostensive articulation technique that we shall introduce requires a "naming service", i.e. a method for computing one (or may more) name (e.g. query) for each set of objects $R \subseteq Obj$. Let $Q_N$ denote the set of all names. In general, $Q_N = Q$, however we introduce $Q_N$ because we may want names to be queries of a specific form. For supporting the naming service we would like a function $n: \mathcal{P}(Obj) \to Q_N$ such that for each $R \subseteq Obj$, $S(n(R)) = R$. Having such a function, we would say that $n(R)$ is an exact name for $R$. Note that if $S$ is an onto function and $Q_N = Q$, then the naming function $n$ coincides with the inverse relation of $S$, i.e. with the relation $S^{-1}: \mathcal{P}(Obj) \to Q$. However, this
43
+ ---PAGE_BREAK---
44
+
45
+ is not always the case, as more often than not, *S* is not an onto function, i.e. *A* ⊂ *P*(Obj). For this reason we shall introduce two naming functions, a lower naming function *n*⁻ and an upper naming function *n*⁺. To define these functions, we first need to define an ordering over queries. Given two queries, *q* and *q'* in *Q*, we write *q* ≤ *q'* if *S(q) ⊆ S(q')*, and we write *q* ∼ *q'*, if both *q* ≤ *q'* and *q'* ≤ *q* hold. Note that ∼ is an equivalence relation over *Q*, and let *Q~* denote the set of equivalence classes induced by ∼ over *Q*. Note that ≤ is a partial order over *Q~*.
46
+
47
+ Now we can define the function $n^-$ and $n^+$ as follows:
48
+
49
+ $$
50
+ \begin{align*}
51
+ n^{-}(R) &= \text{lub}\{\, q \in Q_{N} \mid S(q) \subseteq R \} \\
52
+ n^{+}(R) &= \text{glb}\{\, q \in Q_{N} \mid S(q) \supseteq R \}
53
+ \end{align*}
54
+ $$
55
+
56
+ where $R$ is any subset of $Obj$. Now let $R$ be a subset of $Obj$ for which both $n^{-}(R)$ and $n^{+}(R)$ are defined (i.e. the above lub and glb exist). It is clear that in this case it holds:
57
+
58
+ $$
59
+ S(n^{-}(R)) \subseteq R \subseteq S(n^{+}(R))
60
+ $$
61
+
62
+ and that $n^-(R)$ and $n^+(R)$ are the best "approximations" of the exact name of $R$. Note that if $S(n^-(R)) = S(n^+(R))$ then both $n^-(R)$ and $n^+(R)$ are exact names of $R$.
63
+
64
+ If $Q_N$ is a query language that (a) supports disjunction ($\vee$) and conjunction ($\wedge$) and is closed with respect to these, and (b) has a top ($\top$) and a bottom ($\bot$) element such that $S(\top) = Obj$ and $S(\bot) = \emptyset$, then the functions $n^-$ and $n^+$ are defined for every subset $R$ of $Obj$. Specifically, in this case $(Q_\sim, \le)$ is a complete lattice, thus these functions are defined as:
65
+
66
+ $$
67
+ \begin{align*}
68
+ n^{-}(R) &= \bigvee \{ q \in Q_{N} \mid S(q) \subseteq R \} \\
69
+ n^{+}(R) &= \bigwedge \{ q \in Q_{N} \mid S(q) \supseteq R \}
70
+ \end{align*}
71
+ $$
72
+
73
+ As $Q_N$ is usually an infinite language, $n^-(R)$ and $n^+(R)$ are queries of infinite length. This means that in practice we also need for a method for computing a query of finite length that is equivalent to $n^-(R)$ and another one that is equivalent to $n^+(R)$.
74
+
75
+ If however $Q_N$ does not satisfy the above ((a) and (b)) conditions, then $n^-(R)$ and $n^+(R)$ may not exist. For example, this happens if we want to establish relationships between single terms of two taxonomy-based sources, or between atomic concepts of two Description Logics-based sources. For such cases, we can define $n^-$ and $n^+$ as follows:
76
+
77
+ $$
78
+ \begin{align*}
79
+ n^{-}(R) &= \max\{ q \in Q_{N} \mid S(q) \subseteq R \} \\
80
+ n^{+}(R) &= \min\{ q \in Q_{N} \mid S(q) \supseteq R \}
81
+ \end{align*}
82
+ $$
83
+
84
+ where max returns the maximal element(s), and min the minimal(s). Clearly, in this case we may have several lower and upper names for a given R.
85
+
86
+ We can now proceed and describe the ostensive articulation. Consider two sources $S_i : Q_i \to P(Obj_i)$, and $S_j : Q_j \to P(Obj_j)$. Ostensive articulation is
87
+ ---PAGE_BREAK---
88
+
89
+ possible only if their domains are not disjoint, i.e. if $Obj_i \cap Obj_j \neq \emptyset$. Let $C$ denote their common domain, i.e. $C = Obj_i \cap Obj_j$. The method that we shall describe yields relationships that are extensionally valid in $C$.
90
+
91
+ Suppose that $S_i$ wants to establish an articulation $a_{i,j}$ to a source $S_j$. An articulation $a_{i,j}$ can contain relationships of the form:
92
+
93
+ (i) $q_i \geq q_j$,
94
+
95
+ (ii) $q_i \leq q_j$
96
+
97
+ where $q_i \in Q_i$, $q_j \in Q_j$. These relationships have the following meaning:
98
+
99
+ (i) $q_i \geq q_j$ means that $S_i(q_i) \cap C \supseteq S_j(q_j) \cap C$
100
+
101
+ (ii) $q_i \leq q_j$ means that $S_i(q_i) \cap C \subseteq S_j(q_j) \cap C$
102
+
103
+ Before describing ostensive articulation let us make a couple of remarks. The first is that the form (i or ii) of the relationships of an articulation depends on the internal structure and functioning of the source that uses the articulation. For instance, suppose that $S_i$ acts as a mediator over $S_j$. If $S_i$ wants to compute complete (with respect to $C$) answers, then it should use only relationships of type (i) during query translation. On the other hand, if $S_i$ wants to compute sound (with respect to $C$) answers then it should use relationships of type (ii) (e.g. see [21]).
104
+
105
+ Another interesting remark is that if $S_i$ is a mediator that adopts a global-as-view modeling approach, then all $q_i$ that appear in $a_{i,j}$ are primitive concepts. On the other hand, if $S_i$ adopts a local-as-view approach then all $q_j$ that appear in $a_{i,j}$ are primitive concepts of $S_j$.
106
+
107
+ Below we describe ostensive articulation for the more general case where $S_i$ is interested in relationships of both, (i) and (ii), types, and where $q_i, q_j$ can be arbitrary queries. Let $n_j^-$ and $n_j^+$ be the naming functions of $S_j$ as defined earlier. Also let $S_i^c(q) = S_i(q) \cap C$ and $S_j^c(q) = S_j(q) \cap C$. Now suppose that $S_i$ wants to articulate a query $q_i \in Q_i$. The query $q_i$ should be articulated as follows:
108
+
109
+ $$
110
+ \begin{aligned}
111
+ -q_i &\ge n_j^-(S_i^c(q_i)) && \text{if } S_i^c(q_i) \supseteq S_j^c(n_j^-(S_i^c(q_i))) \\
112
+ -q_i &\le n_j^-(S_i^c(q_i)) && \text{if } S_i^c(q_i) \subseteq S_j^c(n_j^-(S_i^c(q_i))) \\
113
+ -q_i &\ge n_j^+(S_i^c(q_i)) && \text{if } S_i^c(q_i) \supseteq S_j^c(n_j^+(S_i^c(q_i))) \\
114
+ -q_i &\le n_j^+(S_i^c(q_i)) && \text{if } S_i^c(q_i) \subseteq S_j^c(n_j^+(S_i^c(q_i)))
115
+ \end{aligned}
116
+ $$
117
+
118
+ Observe the role of the naming functions. $S_j$ instead of checking all queries in $Q_j$, it just uses its naming functions in order to compute the lower and the upper name of the set $S_i(q_i) \cap C$. Recall that the naming functions (by definition) return the most precise (semantically close) mapping for $q_i$, thus this is all that we need.
119
+
120
+ Furthermore, as we shall see below, the above relationships can be obtained without extensive communication. In fact, they can be obtained by a quite simple and efficient (in terms of exchanged messages) distributed protocol. The protocol
121
+ ---PAGE_BREAK---
122
+
123
+ $$S_i: \begin{array}{l} (1) A := S_i(q_i); \\ (2) \n\end{array}$$
124
+
125
+ $$S_j: \begin{array}{l} (3) F := A \setminus Obj_j \\ (4) A := A \cap Obj_j; \\ (5) down := n_j^-(A); Bdown := S_j(\text{down}); \\ (6) up := n_j^+(A); Bup := S_j(\text{up}); \\ (7) \end{array}$$
126
+
127
+ $$S_i: \begin{array}{l} (8) \text{If } (A \setminus F) \supseteq (Bdown \cap Obj_i) \text{ then set } q_i \geq \text{down}; \\ (9) \text{If } (A \setminus F) \subseteq (Bdown \cap Obj_i) \text{ then set } q_i \leq \text{down}; \\ (10) \text{If } (A \setminus F) \supseteq (Bup \cap Obj_i) \text{ then set } q_i \geq \text{up}; \\ (11) \text{If } (A \setminus F) \subseteq (Bup \cap Obj_i) \text{ then set } q_i \leq \text{up} \end{array}$$
128
+
129
+ Fig. 1. The ostensive articulation protocol
130
+
131
+ is sketched in Figure 1. Note that only two messages have to be exchanged between $S_i$ and $S_j$ for articulating the query $q_i$.
132
+
133
+ Another interesting point is that $S_i$ and $S_j$ do not have to a-priori know (or compute) their common domain $C$, as $C$ is "discovered" during the run of the protocol (this is the reason why $S_j$ stores in $F$ and sends to $S_i$ those terms that do not belong to $Obj_j$).
134
+
135
+ In the case where $Q_N \subset Q$, the only difference is that the message that $S_j$ sends to $S_i$ may contain more than one *up* and *down* queries.
136
+
137
+ A source can run the above protocol in order to articulate one, several or all of its terms (or queries).
138
+
139
+ ## 3 Ostensive Articulation for Taxonomy-based Sources
140
+
141
+ Here we shall specialize ostensive articulation for the case of taxonomy-based sources. Examples of this kind of sources include Web Catalogs (like Yahoo!, Open Directory) and Classification Schemes used in Library and Information Science
142
+
143
+ We view a taxonomy-based source $S$ as a quadruple $S = (T, \preceq, I, Q)$ where:
144
+
145
+ - $T$ is a finite set of names called *terms*, e.g. **Caranies**, **Birds**.
146
+
147
+ - $\preceq$ is a reflexive and transitive binary relation over $T$ called *subsumption*, e.g. **Canaries** $\preceq$ **Birds**.
148
+
149
+ - $I$ is a function $I: T \to P(Obj)$ called *interpretation* where *Obj* is a finite set of objects. For example *Obj* = {1, ..., 100} and $I(\mathbf{Canaries}) = \{1, 3, 4\}$.
150
+
151
+ - $Q$ is the set of all queries defined by the grammar $q ::= t | q^\wedge q' | q^\vee q' | \neg q | (q)$ where $t$ is a term in $T$.
152
+
153
+ Figure 2 shows an example of a source consisting of 8 terms and 3 objects².
154
+
155
+ We assume that every terminology $T$ also contains two special terms, the *top term*, denoted by $\top$, and the *bottom term*, denoted by $\bot$. The top term subsumes
156
+
157
+ ² We illustrate only the Hasse diagram of the subsumption relation.
158
+ ---PAGE_BREAK---
159
+
160
+ **Fig. 2.** Graphical representation of a source
161
+
162
+ every other term *t*, i.e. *t* ≲ ⊤. The bottom term is strictly subsumed by every
163
+ other term *t* different than top and bottom, i.e. ⊥ ≲ ⊥, ⊥ ≲ ⊤, and ⊥ ≺ *t*,
164
+ for every *t* such that *t* ≠ ⊤ and *t* ≠ ⊥. We also assume that *I*(⊥) = ∅ in every
165
+ interpretation *I*.
166
+
167
+ The answer $S(q)$ of a query $q$ is defined as follows (for more see [33]):
168
+
169
+ $$
170
+ \begin{align*}
171
+ S(t) &= \bigcup \{ I(t') \mid t' \preceq t \} \\
172
+ S(q \land q') &= S(q) \cap S(q') \\
173
+ S(q \lor q') &= S(q) \cup S(q') \\
174
+ S(\neg q) &= \mathit{Obj} \setminus S(q)
175
+ \end{align*}
176
+ $$
177
+
178
+ For example, in Figure 2 we have $S(\text{DB}) = \{\text{1,2}\}$, as $S(\text{DB}) = I(\text{DB}) \cup I(\text{Databases}) \cup I(\text{RDB}) = \{\text{1,2}\}$, and $S(\text{DB} \land \text{JournalArticle}) = \{\text{1}\}$. We define the *index* of an object *o* with respect to an interpretation *I*, denoted by $D_I(o)$, as follows: $D_I(o) = \bigwedge \{t \in T \mid o \in I(t)\}$. For example, in the source of Figure 2 we have $D_I(3) = \text{JournalArticle}$ and $D_I(1) = \text{RDB} \land \text{JournalArticle}$.
179
+
180
+ Let us now define the naming functions for this kind of sources. We define the
181
+ set of names $Q_N$ as follows: $Q_N = \{ q \in Q \mid q \text{ does not contain negation "¬"} \}$.
182
+ We exclude queries with negation because, as showed in [32], if such queries
183
+ appear in articulations then we may get systems which do not have a unique
184
+ minimal model and this makes query evaluation more complicated and less effi-
185
+ cient.
186
+
187
+ The lower and upper name of a set $R \subseteq Obj$ are defined as in the general
188
+ framework and clearly ($Q_N, \leq$) is a complete lattice. What remains is to find
189
+ finite length queries that are equivalent to $n^-(R)$ and $n^+(R)$.
190
+
191
+ **Theorem 1.**
192
+
193
+ $$
194
+ \begin{align*}
195
+ n^{-}(R) &\sim \bigvee \{ D_{I}(o) \mid o \in R, S(D_{I}(o)) \subseteq R \} \\
196
+ n^{+}(R) &\sim \bigvee \{ D_{I}(o) \mid o \in R \}
197
+ \end{align*}
198
+ $$
199
+
200
+ The proof is given in [34]. It is clear that the above queries have finite length,
201
+ hence they are the queries that we are looking for. For this purpose, hereafter
202
+ we shall use $n^-(R)$ and $n^+(R)$ to denote the above queries. Note that if the
203
+ set $\{o \in R, S(D_I(o)) \subseteq R\}$ is empty then we consider that $n(R)^- = \perp$. Some
204
+ examples from the source shown in Figure 3 follow:
205
+ ---PAGE_BREAK---
206
+
207
+ Fig. 3. Example of a source
208
+
209
+ $$
210
+ \begin{align*}
211
+ n^+({1,3}) &= (\text{tomatoes} \land \text{red}) \lor (\text{apples} \land \text{green}) \\
212
+ n^-({1,3}) &= (\text{tomatoes} \land \text{red}) \lor (\text{apples} \land \text{green}) \\
213
+ n^+({1,3,5}) &= (\text{tomatoes} \land \text{red}) \lor (\text{apples} \land \text{green}) \lor (\text{apples} \land \text{red}) \\
214
+ n^-({1,3,5}) &= (\text{tomatoes} \land \text{red}) \lor (\text{apples} \land \text{green})
215
+ \end{align*}
216
+ $$
217
+
218
+ Let us now demonstrate the articulation protocol for taxonomy-based sources.
219
+ Consider the sources shown in Figure 4 and suppose that $S_1$ wants to articulate
220
+ its terms with queries of $S_2$. In the following examples we omit the set $F$ (from
221
+ the message of line (7) of Figure 1) as it is always empty.
222
+
223
+ Fig. 4. An example of two sources S₁ and S₂
224
+
225
+ The steps for articulating the term **cabbages** follow:
226
+
227
+ $$
228
+ \begin{array}{l}
229
+ S_1 \rightarrow S_2 : \{\text{1}\} \\
230
+ S_2 \rightarrow S_1 : (\bot, \emptyset), (\mathbf{green}, \{1,5,6\}) \\
231
+ S_1 : \mathbf{cabbages} \preceq \mathbf{green}
232
+ \end{array}
233
+ $$
234
+
235
+ The steps for articulating the term apples follow:
236
+
237
+ $$
238
+ \begin{array}{l}
239
+ S_1 \rightarrow S_2 : \{\mathbf{4}, \mathbf{5}\} \\
240
+ S_2 \rightarrow S_1 : (\bot, \emptyset), (\mathbf{red} \lor \mathbf{green}, \{\mathbf{1}, \mathbf{2}, \mathbf{3}, \mathbf{4}, \mathbf{5}, \mathbf{6}\}) \\
241
+ S_1 : \mathbf{apples} \preceq \mathbf{red} \lor \mathbf{green}
242
+ \end{array}
243
+ $$
244
+
245
+ The steps for articulating the term foods follow:
246
+ ---PAGE_BREAK---
247
+
248
+ $$
249
+ \begin{array}{l@{\quad}l}
250
+ S_1 \to S_2 & : \{1,2,3,4,5,6,7\} \\
251
+ S_2 \to S_1 & : (\text{red} \lor \text{green}, \{1,2,3,4,5,6\}), \\
252
+ & (\text{red} \lor \text{green} \lor \text{yellow}, \{1,2,3,4,5,6,7,8\}) \\
253
+ S_1 & : \text{foods} \succeq \text{red} \lor \text{green}, \\
254
+ & \qquad \text{foods} \sim \text{red} \lor \text{green} \lor \text{yellow}
255
+ \end{array}
256
+ $$
257
+
258
+ If $S_1$ runs the protocol for each term of its taxonomy, it will infer the following relationships:
259
+
260
+ cabbages $\preceq$ green
261
+ tomatoes $\preceq$ red
262
+ apples $\preceq$ red $\vee$ green
263
+ bananas $\preceq$ green $\vee$ yellow
264
+ vegetables $\preceq$ green $\vee$ red
265
+ fruits $\preceq$ red $\vee$ green $\vee$ yellow
266
+ foods $\succeq$ red $\vee$ green
267
+ foods $\sim$ red $\vee$ green $\vee$ yellow
268
+
269
+ If $S_2$ runs this protocol for each term of its taxonomy, it will infer the following relationships:
270
+
271
+ red $\succeq$ tomatoes
272
+ red $\preceq$ tomatoes $\vee$ apples
273
+ green $\succeq$ cabbages
274
+ green $\preceq$ cabbages $\vee$ apples $\vee$ bananas
275
+ yellow $\preceq$ bananas
276
+ color $\sim$ cabbages $\vee$ tomatoes $\vee$ apples $\vee$ bananas
277
+
278
+ The protocol can be used not only for articulating single terms to queries, but also for articulating queries to queries. For example, the steps for articulating the query apples $\lor$ bananas follow:
279
+
280
+ $$
281
+ \begin{array}{l}
282
+ S_1 \to S_2 : \{4, 5, 6, 7\} \\
283
+ S_2 \to S_1 : (\text{red} \lor \text{green} \lor \text{yellow}, \{1,2,3,4,5,6,7,8\}) \\
284
+ S_1 : \text{apples} \lor \text{bananas} \preceq \text{red} \lor \text{green} \lor \text{yellow}
285
+ \end{array}
286
+ $$
287
+
288
+ Now consider the case where we do not want to articulate terms with queries, but terms with *single terms* only, i.e. consider the case where $Q_N = T$. Note that now $lub\{t \in T | S(t) \subseteq R\}$ and $glb\{t \in T | S(t) \supseteq R\}$ do not always exist. For example, consider the source shown in Figure 5.(a). Note that $n^+(\{1\}) = glb\{t, t'\}$ which does not exist. For the source shown in Figure 5.(b) note that $n^-(\{1,2\}) = lub\{t,t'\}$ which does not exist. Therefore, we can define the upper and lower names of a set $R$ as follows: $n^-(R) = max(\{t \in T | S(t) \subseteq R\})$ and $n^+(R) = min(\{t \in T | S(t) \supseteq R\})$. Consider for example the source shown in Figure 5.(c). Here we have:
289
+
290
+ $$ n^{-}(\{1, 2, 3\}) = max(\{c, d, e, b\}) = \{b\} $$
291
+
292
+ $$ n^{+}(\{1, 2, 3\}) = min(\{b, a\}) = \{b\} $$
293
+ ---PAGE_BREAK---
294
+
295
+ Fig. 5. An example of three sources
296
+
297
+ Certainly, the relationships obtained by the term-to-term articulation are less expressive than the relationships obtained by the term-to-queries articulation. For instance, suppose that we want to articulate the terms of the source $S_1$ in each one of the three examples that are shown in Figure 6. Table 1 shows the articulation $a_{1,2}$ that is derived by the *term-to-term* articulation and the *term-to-queries* articulation in each of these three examples.
298
+
299
+ Fig. 6. Three examples
300
+
301
+ <table><thead><tr><th>Example</th><th colspan="2">$a_{1,2}$</th></tr><tr><th></th><th>term-to-term art.</th><th>term-to-query art.</th></tr></thead><tbody><tr><td>Figure 6.(a)</td><td>$a \supseteq b$<br>$a \supseteq b'$</td><td>$a \sim b \lor b'$</td></tr><tr><td>Figure 6.(b)</td><td>$a \preceq b$<br>$a \preceq b'$</td><td>$a \sim b \land b'$<br>$a' \preceq b \lor b'$</td></tr><tr><td>Figure 6.(c)</td><td></td><td>$a \preceq b \lor b'$<br>$a' \preceq b \lor b'$</td></tr></tbody></table>
302
+
303
+ **Table 1.** Term-to-term vs term-to-query articulation
304
+
305
+ # 4 Ostensive Articulation in Taxonomy-based P2P Systems
306
+
307
+ We demonstrated how ostensive articulation can be applied on taxonomy-based
308
+ sources for constructing inter-taxonomy articulations. Ostensive articulation is
309
+ ---PAGE_BREAK---
310
+
311
+ possible in a P2P system only if the domain of the peers is not disjoint. We also assume that every object of *Obj* has the same identity (e.g. object identifier, URI) in all sources. For domains where no accepted identity/naming standards exist, mapping tables such as the ones proposed in [18] can be employed to tackle this problem. Also techniques from the area of information fusion (that aim at recognizing different objects that represent the same reality) could be also employed for the same purpose. If however the domain of the peers is disjoint then we cannot derive any articulation. One method to tackle this problem is to employ reference collections. For instance, each peer can have its own taxonomy, but before joining the network it must first index the objects of a small object set. Consequently, peers can build automatically the desired articulations by running the articulation protocol on this reference collection. Running the protocol on the reference collection *C* means that the sources $S_1$ and $S_2$ instead of using $S_1(q_1)$ and $S_2(q_2)$, they use $S_1(q_1) \cap C$ and $S_2(q_2) \cap C$ respectively. Also note that the employment of reference collections can: (a) enhance the accuracy of the resulting articulation, and/or (b) enhance efficiency. For instance, if *C* corresponds to a well known, thus well-indexed set of objects then it can improve the quality of the obtained articulations. For example in the case where $S_1$ and $S_2$ are bibliographic sources, *C* can be a set of 100 famous papers in computer science. A reference collection can also enhance the efficiency of the protocol since a smaller number of objects go back and forth. This is very important, especially in P2P where involved sources are distant.
312
+
313
+ In a P2P system of taxonomy-based sources, a source apart from object queries now accepts content-based queries, i.e. queries (e.g. boolean expressions) expressed in terms of its taxonomy. For answering a query a source may have to query the neighbor sources. The role of articulations during query evaluation has been described in [33] (for the mediator paradigm) and in [32] (for the P2P paradigm). Roughly, a source in a P2P network can serve any or all of the following roles: primary source, mediator, and query initiator. As a *primary* source it provides original content to the system and is the authoritative source of that data. Specifically, it consists of a taxonomy (i.e. a pair (*T*, $\le$)) plus an object base (i.e. an interpretation *I*) that describes a set of objects (*Obj*) in terms of the taxonomy. As a *mediator* it has a taxonomy but does not store or provide any content: its role is to provide a uniform query interface to other sources, i.e. it forwards the received queries after first selecting the sources to be queried and formulating the query to be sent to each one of them. These tasks are determined by the articulations of the mediator. As a *query initiator* it acts as client in the system and poses new queries. Figure 7 sketches graphically the architecture of a network consisting of four peers $S_1, ..., S_4$; two primary sources ($S_3$ and $S_4$), one mediator ($S_2$) and one source that is both primary and mediator ($S_1$). Triangles denote taxonomies, cylinders object bases, and circles inter-taxonomy mappings. $S_2$ is a mediator over $S_1, S_3$ and $S_4$, while $S_1$ is a mediator over $S_2$ and $S_3$. For more about this architecture and the associated semantics and query evaluation methods please refer to [32].
314
+ ---PAGE_BREAK---
315
+
316
+ Fig. 7. A P2P network based on taxonomies and inter-taxonomy mappings
317
+
318
+ 5 Conclusion
319
+
320
+ The contribution of this paper is a formal framework for ostensive data-driven articulation. Roughly, the approaches for linking two conceptual models or tax-onomies can be broadly classified as either *model*-driven or *data*-driven.
321
+
322
+ The model-driven approach starts with a (theoretical) model of how the two taxonomies are constructed and how they are used. Subsequently, the mapping approaches have to address the compatibility, structural and semantic differences and heterogeneities that exist. This is done using software tools (that usually rely on lexical resources) that assist the designer during the articulation process (e.g. see [25, 29, 5, 24]).
323
+
324
+ On the other hand, in the *data-driven* approach the mappings are *discovered* by examining how terms are used in indexing the objects. The advantage of such an approach is that it does not make any assumptions on how the two taxonomies are constructed, or how they are used. All it requires is the presence of two databases that contain several objects in common. However, the data-driven approach does have inherent difficulties. First, unless one has a large collection of objects that have been indexed using *both* taxonomies, spurious correlation can result in inappropriate linking. Second, if a term is not assigned to any of the common objects, one cannot establish a link for that term. Third, rarely occurring terms can result in statistically insignificant links. Finally, the validation of data-driven approaches can only be statistical in nature. In spite of these inherent difficulties, data-driven approaches can be formalized and automated. However, most of the data-driven approaches that can be found in the literature are applicable only if the domain is a set of documents (texts) (e.g. [6, 16, 12, 20, 28]), and they cannot establish mappings between queries.
325
+
326
+ The technique described in this paper is quite general and expressive as it can be used for articulating not only single terms but also queries. Furthermore, it can be used for articulating the desired set of terms or queries (it is not obligatory to articulate the entire taxonomies). Another distinctive feature of this technique is that it can be implemented efficiently by a communication protocol, thus the involved sources do not have to reside on the same machine. Therefore it seems appropriate for automatic articulation in P2P systems which is probably the more challenging issue in P2P computing [9].
327
+
328
+ We also demonstrated how it can be applied to taxonomy-based sources. An interesting remark is that the proposed method can be applied not only to manually constructed taxonomies but also to taxonomies derived automatically on the basis of an inference service. For instance, it can be applied on sources
329
+ ---PAGE_BREAK---
330
+
331
+ indexed using taxonomies of compound terms which are defined algebraically [31]. Furthermore it can be applied on concept lattices formed using Description Logics (DL) [13].
332
+
333
+ One issue for further research, is to investigate how a source that wants to articulate a set $F \subseteq Q$ must use the described protocol in order to obtain the desired articulation with the minimal number of exchanged messages and the less network throughput. Another issue for further research is to investigate ostensive articulation for other kinds of sources.
334
+
335
+ ## Acknowledgements
336
+
337
+ The first author wants to thank his wife Tonia for being an endless source of happiness and inspiration.
338
+
339
+ ## References
340
+
341
+ 1. "About LEGION - The Grid OS" (www.appliedmeta.com/legion/about.html), 2000.
342
+
343
+ 2. "How Entropia Works" (www.entropia.com/how.asp), 2000.
344
+
345
+ 3. "Groove" (www.groove.net), 2001.
346
+
347
+ 4. "Napster" (www.naptster.com), 2001.
348
+
349
+ 5. Bernd Amann and Irini Fundulaki. "Integrating Ontologies and Thesauri to Build RDF Schemas". In *Proceedings of the Third European Conference for Digital Libraries ECDL '99*, Paris, France, 1999.
350
+
351
+ 6. S. Amba. "Automatic Linking of Thesauri". In *Proceeding of SIGIR '96*, Zurich, Switzerland, 1996. ACM Press.
352
+
353
+ 7. T.E. Anderson, M. Dahlin, J. M. Neefe, D. A. Patterson, D. S. Roselli, and R. Wang. "Serveless Network File Systems". *SOSP*, 29(5), 1995.
354
+
355
+ 8. Tim Berners-Lee, James Hendler, and Ora Lassila. "The Semantic Web". *Scientific American*, May 2001.
356
+
357
+ 9. Philip A. Bernstein, F. Giunchiglia, A. Kementsietsidis, J. Mylopoulos, L. Serafini, and I. Zaihrayeu. "Data Management for Peer-to-Peer Computing: A Vision". In *Proceedings of WebDB02*, Madison, Wisconsin, June 2002.
358
+
359
+ 10. W. J. Bolosky, J. R. Douceur, D. Ely, and M. Theimer. "Feasibility of a Serveless Distributed File System Deployed on an Existing Set of Desktop PCs". In *Proceedings of Measurement and Modeling of Computer Systems*, June 2000.
360
+
361
+ 11. Diego Calvanese, Giuseppe De Giacomo, and Maurizio Lenzerini. A framework for ontology integration. In *Proc. of the 2001 Int. Semantic Web Working Symposium (SWWS 2001)*, pages 303-316, 2001.
362
+
363
+ 12. A. Doan, J. Madhavan, P. Domingos, and A. Halevy. "Learning to Map between Ontologies on the Semantic Web". In *Proceedings of the World-Wide Web Conference (WWW-2002)*, 2002.
364
+
365
+ 13. F.M. Donini, M. Lenzerini, D. Nardi, and A. Schaerf. "Reasoning in Description Logics", chapter 1. CSLI Publications, 1997.
366
+
367
+ 14. Steven Gribble, Alon Halevy, Zachary Ives, Maya Rodrig, and Dan Suiu. "What can Databases do for Peer-to-Peer?". In *Proceedings of WebDB01*, Santa Barbara, CA, 2001.
368
+ ---PAGE_BREAK---
369
+
370
+ 15. Alon Halevy, Zachary Ives, Peter Mork, and Igor Tatarinov. "Piazza: Data Management Infrastructure for Semantic Web Applications". In *Proceedings of WWW'2003*, May 2003.
371
+
372
+ 16. Heiko Helleg, Jurgen Krause, Thomas Mandl, Jutta Marx, Matthias Muller, Peter Mutschke, and Robert Strogen. "Treatment of Semantic Heterogeneity in Information Retrieval". Technical Report 23, Social Science Information Centre, May 2001. (http://www.gesis.org/en/publications/reports/iz working papers/).
373
+
374
+ 17. Vipul Kashyap and Amit Sheth. "Semantic Heterogeneity in Global Information Systems: the Role of Metadata, Context and Ontologies". In *Cooperative Information Systems: Trends and Directions*. Academic Press, 1998.
375
+
376
+ 18. A. Kementsietsidis, Marcelo Arenas, and Rene J. Miller. "Mapping Data in Peer-to-Peer Systems: Semantics and Algorithmic Issues". In *Int. Conf. on Management of Data, SIGMOD'2003*, San Diego, California, June 2003.
377
+
378
+ 19. J. Kubiatowicz, D. Bindel, Y. Chen, S. Czerwinski, P. Eaton, D. Geels, R. Gum-madi, S. Rhea, H. Weatherspoon, W. Weimer, C. Wells, and B. Zhao. "Oceanstore: An Architecture for Global-Scale Persistent Storage". In *ASPLOS*, November 2000.
379
+
380
+ 20. M. Lacher and G. Groh. "Facilitating the Exchange of Explicit Knowledge Through Ontology Mappings". In *Proceedings of the 14th Int. FLAIRS Conference*, 2001.
381
+
382
+ 21. Maurizio Lenzerini. Data integration: A theoretical perspective. In *Proc. ACM PODS 2002*, pages 233–246, Madison, Wisconsin, USA, June 2002.
383
+
384
+ 22. Alon Y. Levy. "Answering Queries Using Views: A Survey". *VLDB Journal*, 2001.
385
+
386
+ 23. Bo Ling, Zhiguo Lu, Wee Siong Ng, BengChin Ooi, Kian-Lee Tan, and Aoying Zhou. "A Content-Based Resource Location Mechanism in PeerIS". In *Proceedings of the 3rd International Conference on Web Information Systems Engineering, WISE 2002*, Singapore, December 2002.
387
+
388
+ 24. Bernardo Magnini, Luciano Serafini, and Manuela Speranza. "Making Explicit the Hidden Semantics of Hierarchical Classification". In *Atti dell'Ottavo Congresso Nazionale dell'Associazione Italiana per l'Intelligenza Artificiale, LNCS. Springer Verlag*, 2003.
389
+
390
+ 25. P. Mitra, G. Wiederhold, and J. Jannink. "Semi-automatic Integration of Knowledge sources". In *Proc. of the 2nd Int. Conf. On Information FUSION*, 1999.
391
+
392
+ 26. Ruben Prieto-Diaz. "Implementing Faceted Classification for Software Reuse". *Communications of the ACM*, 34(5), 1991.
393
+
394
+ 27. S. R. Ranganathan. "The Colon Classification". In Susan Artandi, editor, *Vol IV of the Rutgers Series on Systems for the Intellectual Organization of Information*. New Brunswick, NJ: Graduate School of Library Science, Rutgers University, 1965.
395
+
396
+ 28. I. Ryutaro, T. Hideaki, and H. Shinichi. "Rule Induction for Concept Hierarchy Allignment". In *Proceedings of the 2nd Workshop on Ontology Learning at the 17th Int. Conf. on AI (IJCAI)*, 2001.
397
+
398
+ 29. Marios Sintichakis and Panos Constantopoulos. "A Method for Monolingual The-sauri Merging". In *Proceedings of 20th International Conference on Research and Development in Information Retrieval, ACM SIGIR'97*, Philadelphia, PA, USA, July 1997.
399
+
400
+ 30. Nicolas Spyratos, Yannis Tzitzikas, and Vassilis Christophides. "On Personaliz-ing the Catalogs of Web Portals". In *15th International FLAIRS Conference, FLAIRS'02*, Pensacola, Florida, May 2002.
401
+
402
+ 31. Yannis Tzitzikas, Anastasia Analyti, Nicolas Spyratos, and Panos Constantopou-los. "An Algebraic Approach for Specifying Compound Terms in Faceted Tax-onomies". In *13th European-Japanese Conference on Information Modelling and Knowledge Bases*, Kitakyushu, Japan, June 2003.
403
+ ---PAGE_BREAK---
404
+
405
+ 32. Yannis Tzitzikas, Carlo Meghini, and Nicolas Spyratos. "Taxonomy-based Conceptual Modeling for Peer-to-Peer Networks". In *Proceedings of 22th Int. Conf. on Conceptual Modeling, ER'2003*, Chicago, Illinois, October 2003.
406
+
407
+ 33. Yannis Tzitzikas, Nicolas Spyratos, and Panos Constantopoulos. "Mediators over Ontology-based Information Sources". In *Second International Conference on Web Information Systems Engineering, WISE 2001*, Kyoto, Japan, December 2001.
408
+
409
+ 34. Yannis T. Tzitzikas. "*Collaborative Ontology-based Information Indexing and Retrieval*". PhD thesis, Department of Computer Science - University of Crete, September 2002.
samples/texts_merged/1836869.md ADDED
@@ -0,0 +1,606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # Exact and Efficient Inference for Collective Flow
5
+ Diffusion Model via Minimum Convex Cost Flow Algorithm
6
+
7
+ Yasunori Akagi,¹ Takuya Nishimura,¹ Yusuke Tanaka,¹ Takeshi Kurashima,¹ Hiroyuki Toda¹
8
+
9
+ ¹NTT Service Evolution Laboratories, NTT Corporation,
10
+ 1-1 Hikari-no-oka, Yokosuka-Shi, Kanagawa, 239-0847, Japan
11
+ {yasunori.akagi.cu, takuya.nishimura.fk, yusuke.tanaka.rh, takeshi.kurashima.uf, hiroyuki.toda.xb}@hco.ntt.co.jp
12
+
13
+ ## Abstract
14
+
15
+ Collective Flow Diffusion Model (CFDM) is a general framework to find the hidden movements underlying aggregated population data. The key procedure in CFDM analysis is MAP inference of hidden variables. Unfortunately, existing approaches fail to offer exact MAP inferences, only approximate versions, and take a lot of computation time when applied to large scale problems. In this paper, we propose an exact and efficient method for MAP inference in CFDM. Our key idea is formulating the MAP inference problem as a combinatorial optimization problem called Minimum Convex Cost Flow Problem (C-MCFP) with no approximation or continuous relaxation. On the basis of this formulation, we propose an efficient inference method that employs the C-MCFP algorithm as a subroutine. Our experiments on synthetic and real datasets show that the proposed method is effective both in single MAP inference and people flow estimation with EM algorithm.
16
+
17
+ ## 1. Introduction
18
+
19
+ With recent advances in GPS, Wi-Fi, and various sensors, the importance of location information has grown and is being utilized in various fields. However, it is often difficult to obtain data about individual movements because of privacy concerns or the difficulty of tracking individuals over time. Instead, aggregated count data is relatively easy to obtain as it does not include individual movement information. For example, mobile spatial statistics (Terada, Nagata, and Kobayashi 2013), which is the hourly population data of fixed size square grids calculated from mobile network operational data, are available for purchase in Japan. As another example, traffic data is often obtained not in the form of tracking data of individual cars, but in the form of count data acquired by cameras or sensors installed on road networks (Yang and Zhou 1998; Morimura, Osogami, and Idé 2013).
20
+
21
+ Although there are various uses for these aggregated count data, their applicability is limited because they do not contain explicit information about people movements. In order to utilize such data, Collective Graphical Model
22
+
23
+ (CGM)(Sheldon and Dietterich 2011), which enables us to conduct learning and inference with aggregated count data, was proposed. In particular, Collective Flow Diffusion Model (CFDM) (Kumar, Sheldon, and Srivastava 2013), which is a special case of CGM, has been proposed to infer people flows between the areas by modeling individual movements via a Markov chain approach; it has been applied to the analysis of the hidden movements behind observed count data in a traffic network (Kumar, Sheldon, and Srivastava 2013), urban space (Iwata et al. 2017; Akagi et al. 2018; Iwata and Shimizu 2019), amusement park (Du, Kumar, and Varakantham 2014) and exhibition halls (Tanaka et al. 2018).
24
+
25
+ An important function in CFDM analysis is MAP (maximum a posteriori) inference of the number of moving people from observed population data and parameters of the probabilistic model. This process is mainly used in two ways: (i) As a method for recovering people flow given observed population data and a human mobility model. Even if we can design a probabilistic model of human mobility using domain knowledge or estimate the model using another small set of movement (trajectory) data, we have to conduct MAP inference in order to know the number of people moving between areas. (ii) As a method for conducting E-step in the EM (Expectation Maximization) algorithm to estimate people flow and parameters of the probabilistic model simultaneously. Although E-step was implemented by the well-designed MCMC (Sheldon and Dietterich 2011) in the original CFDM proposal, its scalability was problematic. In order to address this issue, a method that uses MAP inference as an alternative to the regular expectation operation was widely used in subsequent research (Iwata et al. 2017; Akagi et al. 2018; Tanaka et al. 2018).
26
+
27
+ Although methods for realizing MAP inference for CFDM are very important, previous proposals have several crucial drawbacks. (i) They do not provide exact MAP inference because they use continuous relaxation and Stirling's approximation. (ii) Each optimum solution element is non-integer because of continuous relaxation. As a result, the optimum solutions are dense with many non-zero elements and each solution occupies a lot of memory. (iii) When we deal with large scale problems, a lot of computation time is still
28
+
29
+ Copyright © 2020, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.
30
+ ---PAGE_BREAK---
31
+
32
+ needed to solve the optimization problem.
33
+
34
+ In this paper, we propose a novel method for MAP inference in CFDM that overcomes the shortcomings of previous approaches. Our key idea is formulating the MAP inference problem in CFDM as a combinatorial optimization problem called (non-linear) Minimum Cost Flow Problem (MCFP). Moreover, we prove that all cost functions of the MCFP are "discrete convex functions", discrete analogues of the continuous convex function. This fact indicates that the formulated MCFP is a Minimum Convex Cost Flow Problem (C-MCFP) variant, which is an efficiently solvable subclass of MCFP. On the basis of this formulation, we propose an efficient inference method that employs the C-MCFP algorithm as a subroutine. The proposal has the following advantages:
35
+
36
+ 1. It offers exact MAP inference as no approximation is used.
37
+
38
+ 2. Optimum solution elements are integers, which is consistent with the number of moving people. Moreover, the solution tends to be sparse and we can hold it with less memory by use of the sparse matrix data structure.
39
+
40
+ 3. By utilizing efficient algorithms for C-MCFP, fast estimation is possible. In addition, it is easy to use in practice because it is not necessary to set hyperparameters, and the calculation time is relatively insensitive to the probabilistic models and the optimum solutions.
41
+
42
+ Our results are significant in that they bridge two distinct research topics, graph algorithms and CFDM inference. This work is the first to regard CFDM inference as a discrete optimization problem on a graph (all efficient existing methods transform the inference problem into a continuous optimization problem via approximation). Our non-trivial finding of the discrete convexity of the cost function is an important key in revealing the hidden relationship between graph algorithms and inference in collective flow diffusion.
43
+
44
+ Experiments on synthetic and real datasets show that the proposed method is effective for MAP inference in terms of both running time and solution quality such as sparsity. Of particular note, running time is accelerated 10 times or more and sparsity of optimum solution is dramatically increased in most cases. Moreover, we use the proposal to conduct people flow estimation via the EM algorithm and confirm its effectiveness.
45
+
46
+ ## 2. Problem Setting
47
+
48
+ For positive integer $k$, we denote $[k] := \{1, \dots, k\}$. Suppose that the target space is discretized into $n$ distinct areas. The people who were in area $i \in [n]$ at timestep $t$ will stay in $i$ or move to another area to be observed in area $j \in \Gamma_i$ at timestep $t+1$, where $\Gamma_i \subseteq [n]$ is the set of areas that can be moved to from area $i$. This process will be repeated for each $t \in [T-1]$, where $T$ is the total number of timesteps.
49
+
50
+ The problem we address in this paper is formulated as follows. Suppose we are given the population of area $i$ at timestep $t$, $\dot{N}_{t,i}$ ($i \in [n], t \in [T]$). Our goal is to estimate the number of people who leave area $i$ at time $t$ and whose next area is $j$ at time $t+1$, $M_{tij}$ ($i \in [n], j \in [n], t \in [T-1]$). Figure 1 shows an example of this problem setting.
51
+
52
+ Figure 1: An example of the problem setting where the number of areas $n = 3$ and the number of total timesteps $T = 3$.
53
+
54
+ # 3. Background
55
+
56
+ ## 3.1 Collective Flow Diffusion Model (CFDM)
57
+
58
+ Let $\theta_i = \{\theta_{ij}\}_{j \in \Gamma_i} (\sum_{j \in \Gamma_i} \theta_{ij} = 1)$ be the transition probability from area $i$ to other areas (including $i$ itself). We here assume $\theta_i$ does not depend on timestep $t$. Given population $N_{t,i}$ and transition probability $\theta_i$, the transition population $M_{ti} = \{M_{tij}\}_{j \in \Gamma_i} (t \in [T-1], i \in [n])$ is assumed to be decided by the following multinomial distribution: $M_{ti} \sim \text{Multi}(N_{t,i}, \theta_i)$. Given $\mathcal{N} = \{N_{t,i} | t \in [T], i \in [n]\}$ and $\mathcal{M} = \{M_{ti} | t \in [T-1], i \in [n]\}$, the likelihood function of $\theta = \{\theta_i | i \in [n]\}$ is given by
59
+
60
+ $$ P(\mathcal{M} | N, \theta) \propto \prod_{t=1}^{T-1} \prod_{i \in [n]} \left( \frac{N_{t,i}!}{\prod_{j \in \Gamma_i} M_{tij}!} \prod_{j \in \Gamma_i} \theta_{ij}^{M_{tij}} \right). \quad (1) $$
61
+
62
+ In addition, the population in each area, $N_{t,i}$, and the transition population between areas, $M_{ti}$, satisfy the following two relationships $N_{t,i} = \sum_{j \in \Gamma_i} M_{tij}$, $N_{t+1,i} = \sum_{j \in \Gamma_i} M_{tji}$ ($t \in [T-1], i \in [n]$), which represent the law of conservation in the number of people.
63
+
64
+ Our purpose is to estimate the true number of people moving between areas. We consider two problems: (i) estimation of $\mathcal{M}$ given $\mathcal{N}$ and $\theta$, and (ii) estimation of $\mathcal{M}$ and $\theta$ given only $\mathcal{N}$. The first problem, includes, for example, the case where it is possible to design a human movement model (i.e. $\theta$) in the target space based on domain knowledge, geographical information, or other data related to people movement such as a small amount of trajectory data. The second problem corresponds to the case that there is no clue as to $\theta$ and it is necessary to estimate everything from $\mathcal{N}$.
65
+
66
+ In any case, an important subroutine in achieving our pur-
67
+ ---PAGE_BREAK---
68
+
69
+ pose is solving the following MAP inference problem:
70
+
71
+ $$
72
+ \begin{align}
73
+ \max_{M} \quad & P(M | N, \theta) \nonumber \\
74
+ \text{s.t.} \quad & N_{t,i} = \sum_{j \in \Gamma_i} M_{tij} \quad (t \in [T-1], i \in [n]), \tag{2} \\
75
+ & N_{t+1,i} = \sum_{j \in \Gamma_i} M_{tji} \quad (t \in [T-1], i \in [n]), \nonumber \\
76
+ & M_{tij} \in \mathbb{Z}_{\ge 0} \quad (t \in [T-1], i \in [n], j \in \Gamma_i). \nonumber
77
+ \end{align}
78
+ $$
79
+
80
+ In the first problem, the optimum solution of (2) is the de-
81
+ sired answer. A common approach to solving the second
82
+ problem is to estimate, alternatively, *M* and *θ* by the EM
83
+ algorithm considering *M* as a hidden variable and *θ* as pa-
84
+ rameter of a probabilistic model. Since high computational
85
+ cost is incurred in calculating the expected value of hidden
86
+ variable *M* by MCMC, a method to replace the expected
87
+ value with the solution of the MAP inference problem has
88
+ already been proposed (Sheldon et al. 2013) and is being
89
+ widely used to conduct E-step. This approach solves the opt-
90
+ imization problem (2) iteratively.
91
+
92
+ **3.2 Minimum Cost Flow Problems**
93
+
94
+ (Non-linear) Minimum Cost Flow Problem (MCFP) is a
95
+ combinatorial optimization problem defined as follows. Let
96
+ $G = (V, E)$ be a directed graph, where each node $i \in V$ has
97
+ supply value $b_i \in \mathbb{Z}$, and each edge $(i, j) \in E$ has capac-
98
+ ity $l_{ij} \in \mathbb{Z}_{\ge 0}$ and cost function $c_{ij}: \mathbb{Z}_{\ge 0} \rightarrow \mathbb{R}$. If $b_i > 0$
99
+ we call node $i$ to be source, and if $b_i < 0$ we call sink.
100
+ MCFP is the problem of finding a minimum cost flow on $G$
101
+ that satisfies the supply constraints at all nodes and capacity
102
+ constraints at all edges. MCFP can be described as follows:
103
+
104
+ $$
105
+ \begin{align}
106
+ \min_{x \in \mathbb{Z}^{|E|}} \quad & \sum_{(i,j) \in E} c_{ij}(x_{ij}) \notag \\
107
+ \text{s.t.} \quad & \sum_{j:(i,j) \in E} x_{ij} - \sum_{j:(j,i) \in E} x_{ji} = b_i \quad (i \in V), \tag{3} \\
108
+ & 0 \le x_{ij} \le l_{ij} \quad ((i,j) \in E). \notag
109
+ \end{align}
110
+ $$
111
+
112
+ Note that this paper considers the problems that restrict fea-
113
+ sible **x** to integer values i.e. **x** ∈ Z^{|E|}. Generally, MCFP (3)
114
+ is NP-hard and difficult to solve efficiently. However, spe-
115
+ cial cost functions make it possible to derive efficient opti-
116
+ mization algorithms. For example, MCFP with linear cost
117
+ functions, which is the most famous special case of MCFP,
118
+ is polynomial-time solvable and many efficient algorithms
119
+ have been developed (Kiraly and Kovacs 2012). Moreover,
120
+ Minimum Convex Cost Flow Problem (C-MCFP), in which
121
+ every cost function $c_{ij}$ satisfies “discrete convexity” $c_{ij}(x + 1) + c_{ij}(x - 1) \ge 2 \cdot c_{ij}(x)$ ($x = 1, 2, ...$), is known to be
122
+ an efficiently solvable subclass of MCFP (Ahuja, Magnanti,
123
+ and Orlin 1993).
124
+
125
+ 4. Proposed Method
126
+
127
+ 4.1 Formulation as C-MCFP
128
+
129
+ We show that the optimization problem (2) can be
130
+ formulated as C-MCFP. After taking the logarithm of
131
+
132
+ Figure 2: An example of MCFP formulation when the num-
133
+ ber of areas *n* = 3. *o* is the source and *d* is the sink of the
134
+ flow network. The capacity of edge (*o*, *u*ᵢ) equals to *N*ᵡᵢ and
135
+ the capacity of edge (*v*ᵢ, *d*) equals to *N*ᵡ₁ᵢ.
136
+
137
+ the objective function (1) and omitting terms that
138
+ do not depend on M, the objective function equals
139
+ ∑<sub>t∈[T-1]</sub> ∑<sub>i∈[n]</sub> ∑<sub>j∈Γ<sub>i</sub></sub> (-log M<sub>tij</sub>! + M<sub>tij</sub> log θ<sub>ij</sub>). Since
140
+ we can split (2) into independently solvable T - 1 subprob-
141
+ lems by t, all we have to do is solve the minimization prob-
142
+ lems described as follows for each t ∈ [T - 1]:
143
+
144
+ $$
145
+ \begin{equation}
146
+ \begin{array}{ll}
147
+ \min_{M_t} & \displaystyle \sum_{i \in [n]} \sum_{j \in \Gamma_i} (\log M_{tij}! - M_{tij} \log \theta_{ij}) \\
148
+ \text{s.t.} & N_{t,i} = \sum_{j \in \Gamma_i} M_{tij} \quad (i \in [n]), \\
149
+ & N_{t+1,i} = \sum_{j \in \Gamma_i} M_{tji} \quad (i \in [n]), \\
150
+ & M_{tij} \in \mathbb{Z}_{\ge 0} \quad (i \in [n], j \in \Gamma_i).
151
+ \end{array}
152
+ \tag{4}
153
+ \end{equation}
154
+ $$
155
+
156
+ In order to formulate the problem (4) as MCFP, we con-
157
+ struct an instance by the procedure described below (an ex-
158
+ ample is shown in Figure 2):
159
+
160
+ 1. Let $V = \{u_i\}_{i \in [n]} \cup \{v_i\}_{i \in [n]} \cup \{o, d\}$. $u_i$ and $v_i$ correspond to area $i$ at timestep $t$ and timestep $t+1$, respectively. $o$ is the source node and $d$ is the sink node of the flow network.
161
+
162
+ 2. For $i \in [n]$, add edge $(o, u_i)$ with cost function $0$ (constant function) and capacity $N_{t,i}$.
163
+
164
+ 3. For $i \in [n]$, add edge $(v_i, d)$ with cost function $0$ and capacity $N_{t+1,i}$.
165
+
166
+ 4. For $i \in [n]$ and $j \in \Gamma_i$, add edge $(u_i, v_j)$ with cost function $f_{ij}(x) := \log x! - x \cdot \log \theta_{ij}$ and capacity $+\infty$.
167
+
168
+ 5. Set $b_o = \sum_{i \in [n]} N_{t,i}$, $b_d = -b_o = -\sum_{i \in [n]} N_{t,i}$ and $b_{u_i} = b_{v_i} = 0$ ($i \in [n]$).
169
+
170
+ For the MCFP instance constructed above, the following
171
+ holds.
172
+
173
+ **Proposition 1.** For $M_i^*$ defined by $M_{tij}^* = x_{ui,v_j}^* (i \in [n], j \in \Gamma_i)$ where $\boldsymbol{x}^*$ is the optimum solution of the MCFP
174
+ ---PAGE_BREAK---
175
+
176
+ instance constructed above, $M_t^*$ is an optimum solution of
177
+ the optimization problem (4).
178
+
179
+ **Proof of Proposition 1.** Let $\boldsymbol{x}$ to be a feasible solution of the constructed MCFP. From the non-negativity of $x_{ij}$ and flow conservation constraints at node $o$ and $d$, $x_{ou_i} = N_{ti}$ and $x_{v_{id}} = N_{t+1,i}$ ($\forall i \in [n]$) must be satisfied. From these facts and flow conservation constraints at node $u_i$ and $v_i$, $N_{t,i} = \sum_{j \in \Gamma_i} x_{u_i v_j}$ and $N_{t+1,i} = \sum_{j \in \Gamma_i} x_{v_j u_i}$ ($\forall i \in [n]$) hold. Since we restrict $\boldsymbol{x}$ to integer values and total MCFP cost is $\sum_{i \in [n]} \sum_{j \in \Gamma_i} (\log x_{u_i v_j}! - x_{u_i v_j} \log \theta_{ij})$, the constructed MCFP is equivalent to the optimization problem (4), so the proposition holds. $\square$
180
+
181
+ **Proposition 2.** For the MCFP instance constructed above, all cost functions satisfy discrete convexity, i.e. $c_{ij}(x+1)+c_{ij}(x-1) \ge 2 \cdot c_{ij}(x)$ ($x=1,2,\dots$).
182
+
183
+ *Proof of Proposition 2.* It is clear that a constant function satisfies discrete convexity, so it is sufficient to check for $f_{ij}$. We have $f_{ij}(x+1) + f_{ij}(x-1) - 2 \cdot f_{ij}(x) = \log(x+1)! + \log(x-1)! - 2 \cdot \log x! = \log(x+1) - \log x \ge 0$. This confirms the discrete convexity of $f_{ij}$. $\square$
184
+
185
+ Proposition 1 says that by solving MCFP we can get an
186
+ optimum solution of problem (4). Proposition 2 shows that
187
+ the constructed MCFP instance belongs to C-MCFP. Since
188
+ C-MCFP is an efficiently solvable subclass of MCFP as de-
189
+ scribed in 3.2, we can design efficient algorithms to tackle
190
+ the original MAP inference problem (4).
191
+
192
+ Note that problem (4) may not have any feasible solution if $\sum_{i \in [n]} N_{t,i} \neq \sum_{i \in [n]} N_{t+1,i}$ holds or $|\Gamma_i|$ ($i \in [n]$) is small. Such cases occur frequently when dealing with noisy real data. Even in such cases, our method with slight modification can output reasonable solutions. We describe this modification in Section 4.3.
193
+
194
+ ## 4.2 Algorithm
195
+
196
+ We describe here an algorithm that can find exact optimum
197
+ solutions of C-MCFP, called Capacity Scaling algorithm
198
+ (CS) (Minoux 1986). CS is an algorithm that successively
199
+ augments flow along the shortest path from source to sink
200
+ in a residual graph, which is an auxiliary graph calculated
201
+ from the current flow. By maintaining a scalar value, called
202
+ potential, on each node and modifying edge costs to ensure
203
+ that they are non-negative, we can utilize Dijkstra's algo-
204
+ rithm (Dijkstra 1959), which is a fast algorithm for shortest
205
+ path search in graphs with non-negative edge costs. In or-
206
+ der to reduce the number of shortest path searches, CS is
207
+ designed to carry sufficiently large number of flows in each
208
+ path augmentation. The algorithm utilized in our work is the
209
+ one described in Chapter 14.5 of (Ahuja, Magnanti, and Or-
210
+ lin 1993). Although this algorithm is based on the idea of
211
+ (Minoux 1986), some changes have been made, so its com-
212
+ putation complexity differs from that of (Minoux 1986).
213
+
214
+ Given a C-MCFP instance with graph $G = (V, E)$, The-
215
+ orem 14.1 of (Ahuja, Magnanti, and Orlin 1993) claims that
216
+ CS runs in $O(|E| \cdot \log U \cdot S)$, where $U := \max_{i \in V} |b_i|$ is the
217
+ maximum absolute value of flow demand and $S$ is the time
218
+ complexity for solving a shortest path problem in graph $G$
219
+
220
+ **Algorithm 1** Algorithm for solving MAP inference problem (2) via capacity scaling algorithm
221
+
222
+ **Require:** Population of each area and time *N*, transition matrix $\theta$
223
+ **for all** *t* ∈ [*T* − 1] **do**
224
+ Construct C-MCFP instance based on *N*<sub>*t*</sub>, *N*<sub>*t*+1</sub>, θ by the procedure described in Section 4.1
225
+ Get optimum solution *x*<sup>*</sup> of constructed C-MCFP by capacity scaling algorithm
226
+ **for all** *i* ∈ [*n*] **do**
227
+ **for all** *j* ∈ Γ<sub>*i*</sub> **do**
228
+ *M*<sub>*tij*</sub><sup>*</sup> ← *x*<sub>*u*_i*v*_j*</sub>
229
+ **end for**
230
+ **end for**
231
+ **end for**
232
+ **return** *M*<sup>*</sup>
233
+
234
+ with non-negative edge costs. According to Dijkstra's algo-
235
+ rithm with binary heap, *S* is bounded by *O*(*|E|* · log *|V|*), so
236
+ the total time complexity is *O*(*|E|*<sup>2</sup> · log *|V|* · log *U*). When
237
+ this algorithm is used to solve problem (4), its time complex-
238
+ ity is *O*(*m*<sup>2</sup> · log *n* · log *F*), where *n* is the number of areas,
239
+ *m* is the number of edges of the adjacency graph between
240
+ the areas determined by Γ<sub>*i*</sub> (*i* ∈ [*n*]) and *F* := ∑<sub>*i*∈[*n*]</sub> *N*<sub>*t*,i*</sub>
241
+ is the total population of targeted areas. Note that, the to-
242
+ tal complexity does not depend on the maximum value of
243
+ edge capacity, and it is guaranteed that the algorithm runs
244
+ efficiently even if the graph contains an edge with infinite
245
+ capacity.
246
+
247
+ CS is a suitable algorithm for solving our problem in
248
+ the following sense: When dealing with real-world datasets,
249
+ sometimes *F* is extremely large (for example, in mobile spa-
250
+ tial statistics in the Greater Tokyo Area, which consists of
251
+ population distribution data by time and area, *F* is about
252
+ 10<sup>6</sup>–10<sup>7</sup>). Therefore, the algorithm used to solve the formu-
253
+ lated C-MCFP should have sub-linear time complexity with
254
+ respect to *F*. Accordingly, CS is appropriate since its time
255
+ complexity is proportional to log *F*.
256
+
257
+ The overall algorithm for solving the original MAP infer-
258
+ ence problem (2) is summarized in Algorithm 1.
259
+
260
+ ## 4.3 Handling with Infeasible Cases
261
+
262
+ As mentioned in Section 4.1, when dealing with real-world
263
+ data, there may not be feasible solution to problem (4). To
264
+ address this problem and output a reasonable solution, we
265
+ add a few more steps in the instance construction procedure
266
+ described in Section 4.1.
267
+
268
+ First, we add edge (o,d) with linear cost function Cx,
269
+ where C is a sufficiently large constant, and capacity +∞.
270
+ Next, we set b_o = S, b_d = -S, b_{u_i} = b_{v_i} = 0 (i ∈ [n]),
271
+ where S := max(∑_{i∈[n]} N_{t,i}, ∑_{i∈[n]} N_{t+1,i}). This newly
272
+ formulated MCFP always has a feasible solution and still
273
+ belongs to C-MCFP, so we can solve this by CS.
274
+
275
+ In this case, $M_t^*$ calculated from the optimum solu-
276
+ tion of the MCFP does not necessarily satisfy the pop-
277
+ ulation conservation law $N_{t,i} = \sum_{j \in \Gamma_i} M_{tij}^*, N_{t+1,i} = \sum_{j \in \Gamma_i} M_{tji}^*(i \in [n])$, which are the constraints of the origi-
278
+ ---PAGE_BREAK---
279
+
280
+ nal problem (4). We can interpret these discrepancies as fol-
281
+ lows: $N_{t,i} - \sum_{j \in \Gamma_i} M_{tij}^*$ is outflow from area $i$ to some-
282
+ where outside the targeted areas, and $N_{t+1,i} - \sum_{j \in \Gamma_i} M_{tji}^*$ is inflow from somewhere outside the targeted areas to area
283
+ $i$ between timesteps $t$ and $t + 1$.
284
+
285
+ 5. Experimental results
286
+
287
+ Here, we use numerical experiments to demonstrate the practical utility of the proposed method. All experiments are conducted on a 64-bit CentOS 7.3 machine with Xeon(R) Gold 6126 CPU(2.60GHz)x2 and 512 GB memory. The capacity scaling algorithm is implemented in C++ (g++ 4.8.5 with the -O3 option); other codes were written in python 2.7.12 with SciPy (Jones et al. 2001).
288
+
289
+ 5.1 Compared methods
290
+
291
+ We compare the proposed method with commonly used ones used in CFDM inference (Iwata et al. 2017; Akagi et al. 2018; Tanaka et al. 2018). In this method, we solve an optimization problem that has the following objective function $f(M_t) + \frac{\lambda}{2} \cdot g(M_t)$ under constraints $M_{tij} \in \mathbb{R}_{\ge 0}$, where
292
+
293
+ $$
294
+ f(\mathbf{M}_t) = \sum_{i \in [n], j \in \Gamma_i} (M_{tij} \log M_{tij} - M_{tij}(1 + \log \theta_{ij})),
295
+ $$
296
+
297
+ $$
298
+ g(\mathbf{M}_t) = \sum_{i \in [n]} \left[ (N_{t,i} - \sum_{j \in \Gamma_i} M_{tij})^2 + (N_{t+1,i} - \sum_{j \in \Gamma_i} M_{tji})^2 \right]
299
+ $$
300
+
301
+ and $\lambda$ is a hyperparameter. This problem is derived by applying Stirling’s approximation and continuous relaxation to the objective function of (4), and adding constraints of people conservation law to objective function as penalty terms. $\lambda$ controls the strength of penalty terms. This optimization problem has a convex objective function and bound constraints, so we can get the global optimum by L-BFGS-B method (Byrd et al. 1995), which is implemented in scipy\.optimize. Our experiments explored three methods with $\lambda$ values of \{1, 10, 100\}.
302
+
303
+ 5.2 MAP inference: Synthetic data
304
+
305
+ First, we compare running times and characteristics of the
306
+ optimum solutions of MAP inference problem (2) obtained
307
+ by each method using synthetic data. We randomly generate
308
+ synthetic instances of the MAP inference problem (2). We
309
+ consider an L × L grid space, where each cell corresponds
310
+ to one area. Γᵢ is set to be [n] for ∀i ∈ [n] (i.e. we consider
311
+ the “fully connected” situation). We set T = 2 and Nₜ,ᵢ ~
312
+ Multi(F, p_t) (t = 1, 2), where F is the total population in
313
+ the grid space and p₁, p₂ ~ Dirichlet(1). θ is generated in
314
+ two ways as follows.
315
+
316
+ 1. $\theta_i \sim \text{Dirichlet}(1)$ for each $i \in [n]$ independently. We call this generation procedure "Dirichlet".
317
+
318
+ 2. $\theta_{ij} = \exp(-\text{dist}(i, j)) / \sum_{j \in \Gamma_i} \exp(-\text{dist}(i, j))$, where $\text{dist}(i, j)$ is the Euclidean distance between cell $i$ and $j$. We call this procedure "Exponential decay". This procedure reflects the characteristics typical of movements that people are likely to take over short distances rather than long ones.
319
+
320
+ To clarify the dependence of computation time on the num-
321
+ ber of areas, L², and total population, F, we solve the MAP
322
+ inference problem for L = 10, 20, 30 fixing F to 10⁴, and
323
+ for F = 10⁴, 10⁵, 10⁶ fixing L to 20. We generate 10 ran-
324
+ dom instances for each evaluation.
325
+
326
+ The average running times (seconds) for 10 instances by
327
+ each algorithm are summarized in Table 1. Each experiment
328
+ is executed with the time limit of 1000 seconds. If run-
329
+ ning time exceeds the time limit, running time of the trial
330
+ is recorded as 1000 seconds. In such a case, the averaged
331
+ value is underestimated. To clarify this, we tag average run-
332
+ ning time in the table with "> " if the time limit is exceeded
333
+ in even one instance. In the parentheses, standard deviation
334
+ of running times are shown if all 10 trials are completed in
335
+ the time limit. L-BFGS-B methods have longer running time
336
+ than the proposed method and varies with parameter settings
337
+ and instances. This unstable behavior will be problematic in
338
+ practical usage. The proposed method outperforms all other
339
+ methods in all settings. In particular, it offers the advantage
340
+ that it can solve problems with small computational time and
341
+ work stably even when L and F are large.
342
+
343
+ In order to compare the characteristics of optimum solu-
344
+ tions output by the proposed method and L-BFGS-B ($\lambda$ =
345
+ 1), we solve two examples with $L = 5$, $F = 10^2$, “Expo-
346
+ nential decay” and $L = 5$, $F = 10^3$, “Exponential decay”
347
+ instances and checked the solutions in detail. The results are
348
+ shown in Figure 3. In this figure, the $L^2 \times L^2$ optimum so-
349
+ lution matrix obtained by each method are presented as a
350
+ heatmap. To investigate the sparsity structure of the solution,
351
+ the maximum value of heatmap is set to 1 and minimum
352
+ value to 0. While the solution obtained by L-BFGS-B is
353
+ blurred and contains a lot of small but non-zero elements (el-
354
+ ements with light colors) because of continuous relaxation,
355
+ proposed method is able to produce sparse solutions. We cal-
356
+ culated the sparseness of each solution by (# of near-zero
357
+ (< 10⁻⁴) elements)/(# of whole elements); the yielded val-
358
+ ues are 90%, 67% with proposed method and 0%, 0 % with
359
+ L-BFGS-B. This implies that the memory needed to hold the
360
+ solution can be reduced significantly by using sparse matrix
361
+ structure. Although we can get sparse solutions by rounding
362
+ the solutions of existing methods, this operation violates the
363
+ constraint of population conservation and degrades solution
364
+ quality.
365
+
366
+ **5.3 MAP inference: Real data**
367
+
368
+ We evaluate running times and characteristics of the opti-
369
+ mum solutions using real-world spatio-temporal population
370
+ data. We use mobile spatial statistics (Terada, Nagata, and
371
+ Kobayashi 2013), which is the hourly population data for
372
+ fixed size square grids calculated from mobile network op-
373
+ erational data. We use Tokyo and Kanagawa prefecture data,
374
+ which is the main part of the capital region of Japan, on
375
+ April 1st, 2015 (weekday) and April 5th, 2014 (holiday).
376
+ *N*<sub>*t*</sub> is the population of each area at the clock time of *t*-
377
+ hour for *t* ∈ {0, 1, ..., 22} on each day. In order to com-
378
+ pare the performances of the methods at different cell width,
379
+ we aggregate population data of each cell and made datasets
380
+ with cell sizes of 5km × 5km, 2km × 2km, and 1km ×
381
+ 1km. The resulting datasets contain 200, 1017, 3711 cells,
382
+ ---PAGE_BREAK---
383
+
384
+ Table 1: The average running time (seconds) of 10 synthetic instances when *F* is fixed to 10⁴ (above) and when *L* is fixed to 20 (below). The best running time is highlighted for each problem size. Values with "> " are underestimates due to the time limit. Standard deviation is shown in parentheses if all 10 trials are completed in the time limit.
385
+
386
+ <table><thead><tr><th rowspan="2">type of θ<br>L</th><th colspan="3">Dirichlet</th><th colspan="3">Exponential decay</th></tr><tr><th>10</th><th>20</th><th>30</th><th>10</th><th>20</th><th>30</th></tr></thead><tbody><tr><td>Proposed</td><td><b>0.05 (0.00)</b></td><td><b>0.61 (0.01)</b></td><td><b>4.54 (0.16)</b></td><td><b>0.03 (0.00)</b></td><td><b>0.46 (0.03)</b></td><td><b>6.29 (2.60)</b></td></tr><tr><td>L-BFGS-B (λ = 1)</td><td>6.51 (0.91)</td><td>132.86 (15.46)</td><td>357.32 (39.76)</td><td>13.51 (2.00)</td><td>273.25 (18.86)</td><td>&gt;911.22 (-)</td></tr><tr><td>L-BFGS-B (λ = 10)</td><td>7.40 (1.27)</td><td>143.14 (13.25)</td><td>387.09 (56.31)</td><td>13.87 (1.69)</td><td>281.40 (19.18)</td><td>&gt;936.14 (-)</td></tr><tr><td>L-BFGS-B (λ = 100)</td><td>9.65 (2.01)</td><td>169.83 (17.19)</td><td>440.77 (69.87)</td><td>15.79 (1.36)</td><td>297.40 (20.42)</td><td>&gt;975.64 (-)</td></tr></tbody></table>
387
+
388
+ <table><thead><tr><th rowspan="2">type of θ<br>F</th><th colspan="3">Dirichlet</th><th colspan="3">Exponential decay</th></tr><tr><th>10<sup>4</sup></th><th>10<sup>5</sup></th><th>10<sup>6</sup></th><th>10<sup>4</sup></th><th>10<sup>5</sup></th><th>10<sup>6</sup></th></tr></thead><tbody><tr><td>Proposed</td><td><strong>0.71 (0.09)</strong></td><td><strong>4.19 (0.85)</strong></td><td><strong>14.25 (1.56)</strong></td><td><strong>0.68 (0.22)</strong></td><td><strong>2.44 (0.58)</strong></td><td><strong>4.93 (0.94)</strong></td></tr><tr><td>L-BFGS-B (λ = 1)</td><td>140.16 (15.34)</td><td>434.25 (114.80)</td><td>&gt;804.52 (-)</td><td>323.87 (30.86)</td><td>&gt;1000.00 (-)</td><td>&gt;1000.00 (-)</td></tr><tr><td>L-BFGS-B (λ = 10)</td><td>149.29 (14.35)</td><td>503.72 (117.16)</td><td>&gt;880.68 (-)</td><td>340.96 (41.54)</td><td>&gt;1000.00 (-)</td><td>&gt;1000.00 (-)</td></tr><tr><td>L-BFGS-B (λ = 100)</td><td>175.65 (18.26)</td><td>793.54 (146.68)</td><td>&gt;899.83 (-)</td><td>356.24 (48.56)</td><td>&gt;1000.00 (-)</td><td>&gt;887.22 (-)</td></tr></tbody></table>
389
+
390
+ Table 2: The average running time (seconds) for real data. The best running time is highlighted for each cell width. Values with "> " are underestimates due to the time limit. Standard deviation is shown in parentheses if all 10 trials are completed in the time limit.
391
+
392
+ <table>
393
+ <thead>
394
+ <tr>
395
+ <th rowspan="2">dataset<br/>cell width</th>
396
+ <th colspan="3">April 1st, 2015</th>
397
+ <th colspan="3">April 5th, 2015</th>
398
+ </tr>
399
+ <tr>
400
+ <th>5km</th>
401
+ <th>2km</th>
402
+ <th>1km</th>
403
+ <th>5km</th>
404
+ <th>2km</th>
405
+ <th>1km</th>
406
+ </tr>
407
+ </thead>
408
+ <tbody>
409
+ <tr>
410
+ <th scope="row">Proposed</th>
411
+ <td><strong>0.84 (0.16)</strong></td>
412
+ <td><strong>9.16 (1.49)</strong></td>
413
+ <td><strong>59.40 (22.38)</strong></td>
414
+ <td><strong>0.41 (0.01)</strong></td>
415
+ <td><strong>6.52 (1.15)</strong></td>
416
+ <td><strong>54.00 (10.70)</strong></td>
417
+ </tr>
418
+ <tr>
419
+ <th scope="row">L-BFGS-B (&lambda; = 1)</th>
420
+ <td>196.46 (139.61)</td>
421
+ <td>&gt;1000.00 (-)</td>
422
+ <td>&gt;1000.00 (-)</td>
423
+ <td>68.76 (25.43)</td>
424
+ <td>&gt;940.84 (-)</td>
425
+ <td>&gt;1000.00 (-)</td>
426
+ </tr>
427
+ <tr>
428
+ <th scope="row">L-BFGS-B (&lambda; = 10)</th>
429
+ <td>14.96 (34.63)</td>
430
+ <td>&gt;1000.00 (-)</td>
431
+ <td>&gt;1000.00 (-)</td>
432
+ <td>10.90 (19.85)</td>
433
+ <td>&gt;1000.00 (-)</td>
434
+ <td>&gt;1000.00 (-)</td>
435
+ </tr>
436
+ <tr>
437
+ <th scope="row">L-BFGS-B (&lambda; = 100)</th>
438
+ <td>2.04 (0.73)</td>
439
+ <td>&gt;811.94 (-)</td>
440
+ <td>&gt;1000.00 (-)</td>
441
+ <td>0.99 (0.89)</td>
442
+ <td>&gt;697.78 (-)</td>
443
+ <td>&gt;1000.00 (-)</td>
444
+ </tr>
445
+ </tbody>
446
+ </table>
447
+
448
+ respectively.
449
+
450
+ We construct $\theta$ by the same procedure as “Exponen-
451
+ tial decay” in the synthetic data experiment and set
452
+ $\Gamma_i = \{j \mid j \in [n], \text{dist}(i, j) \le 5\}$, where $\text{dist}(i, j)$ is Eu-
453
+ clidean distance between cell $i$ and cell $j$ in the grid space.
454
+
455
+ The results are summarized in Table 2. Time limit is set to be 1000 seconds, and average running time standard deviation are calculated in the same way as in the experiment on synthetic data.
456
+
457
+ As shown, proposed method is able to solve
458
+ all instances in about 60 seconds.
459
+
460
+ On the other hand, com-
461
+ pared methods fail to process 2km × 2km and 1km × 1km
462
+ datasets regardless of the value of λ.
463
+
464
+ This shows the effec-
465
+ tiveness of the proposed method.
466
+
467
+ **5.4 EM algorithm: Synthetic data**
468
+
469
+ As mentioned, MAP inference is used for conducting E-step of EM algorithm to estimate the number of moving people and probabilistic model parameters.
470
+
471
+ Here, we compare EM algorithm performance achieved with the proposed method and with the existing method using simulation data.
472
+
473
+ We consider people movement in an *L* × *L* sized grid space (*L* = 10, 12). We construct transition matrix $\theta^{\text{true}}$ by $\theta_{ij} \propto s_i \cdot \exp(-\beta \cdot \text{dist}(i, j))$, where $s_i > 0$ ($i \in [n]$) is a parameter that represents how likely people are to gather at area *j*, and $\beta$ is a parameter that controls the decay of transition probability with increasing distance between *i* and *j*. This transition matrix is a variant of the one used in (Akagi et al. 2018).
474
+
475
+ We set $\beta^{\text{true}} = 0.5$ and $s_i^{\text{true}}$ as follows: first, we randomly selected 3 areas from $[n]$ and set $s_i^{\text{true}} = 10$. For other areas, we set $s_i^{\text{true}} = 1$. We generate the population
476
+
477
+ of each area, *N*, and number of moving people between areas, *M*, by simulating people movement following the procedure written in Section 3.1 until timestep *T* = 10 using transition matrix $\theta^{\text{true}}$. We set initial population $N_{1,i}$ to $10^4$ ($i \in [n]$).
478
+
479
+ Our task is to estimate the number of moving peo-
480
+ ple, M, from observed population N by the EM algo-
481
+ rithm. For details of the EM algorithm, please see (Ak-
482
+ agi et al. 2018). In the algorithms, Γᵢ is set to be [n] for
483
+ ∀i ∈ [n]. We evaluate algorithm performance by Normal-
484
+ ized Absolute Error (NAE) of M, which is calculated by
485
+ ∑<sub>t,i,j</sub> |M<sub>tij</sub><sup>true</sup> - M<sub>tij</sub><sup>estimated</sup>| / ∑<sub>t,i,j</sub> M<sub>tij</sub><sup>true</sup>. EM algorithm
486
+ is iterated 200 times for each method.
487
+
488
+ Figure 4 plots NAE versus the elapsed time for the EM algorithm with proposed method and previous method.
489
+
490
+ It can be seen that the proposed method yields better NAE values more quickly than the previous method, especially at large *L*. For example, in the case of *L* = 12, it took the L-BFGS-B method about 9657 seconds to reach 1.15 for NAE (the dashed line in Figure 4). The proposed method, on the other hand, took only 24 seconds or so, which is about 400 times faster.
491
+
492
+ **6. Related Work**
493
+
494
+ Several methods have been proposed to realize MAP in-
495
+ ference efficiently in CGM, which is a general framework
496
+ including CFDM, (Sheldon et al. 2013; Sun, Sheldon, and
497
+ Kumar 2015; Nguyen et al. 2016; Vilnis et al. 2015). Note
498
+ that existing methods provide non-exact MAP inference and
499
+ output non-integer solutions.
500
+
501
+ In (Akagi et al. 2018), an ef-
502
+ ---PAGE_BREAK---
503
+
504
+ Figure 3: Comparison of optimum solution matrix in an $L \times L$ grid space obtained by proposed method and L-BFGS-B ($\lambda = 1$) with $\theta$ type of “Exponential decay”. The left is when $(L, F) = (5, 10^2)$ and the right is when $(L, F) = (5, 10^3)$, where F is the total population of the targeted areas. Sparsity pattern of obtained $L^2 \times L^2$ solution matrix is presented as a heatmap. $(i, j)$-element of solution matrix represents the number of moving people from area *i* to area *j*. In order to investigate sparsity structure of solutions, maximum value of color map is set to be 1 and minimum value is 0. The output of L-BFGS-B method is blurred and contains a lot of small but non-zero elements. In contrast, solution by proposed method is noticeably sparse.
505
+
506
+ ficient optimization method for CFDM is proposed, but it
507
+ can be used only under a specially factorized probabilistic
508
+ model, which is designed to model human movements in ur-
509
+ ban spaces. In contrast, the proposal of this paper is widely
510
+ available and poses no excessive constraints on the underly-
511
+ ing transition model structure.
512
+
513
+ There is a lot of work on people flow estimation via CFDM. For example, (Iwata et al. 2017; Akagi et al. 2018; Iwata and Shimizu 2019) deal with the estimation of people flows in urban spaces by utilizing variational inference, a factorized probabilistic model, or neural networks. In (Kumar, Sheldon, and Srivastava 2013) and (Tanaka et al. 2018), the inflow and outflow of each area at each timestep are assumed to be available, while (Tanaka et al. 2018) considers a time delay between before and after movement. Thus, there are many variations in terms of the observation model and the probabilistic model underlying movement. The method proposed herein can be used as a subroutine in any of these approaches by appropriately constructing instances of MCFP to suit the problem.
514
+
515
+ Attempts to estimate human movement from aggregated
516
+
517
+ Figure 4: NAE (Normalized Absolute Error) as a function of elapsed time for EM algorithm with each MAP inference method.
518
+
519
+ count data have received a lot of attention. As a particularly
520
+ relevant study, Xue et al. proposed an algorithm for recov-
521
+ ering personal trajectories from aggregated count data for
522
+ the purpose of evaluating privacy risk for publishing such
523
+ data (Xu et al. 2017). Sheldon et al. proposed a method
524
+ to reconstruct sample paths of a Markov chain from par-
525
+ tial observations for the purpose of analyzing bird migra-
526
+ tion patterns (Sheldon, Elmohamed, and Kozen 2008). Al-
527
+ though those methods are similar to our method in the sense
528
+ of solving combinatorial assignment problems to recover
529
+ movement from aggregated data, there are two distinct dif-
530
+ ferences: (i) Those methods focus on recovering each indi-
531
+ vidual trajectory, not the collective movement of targets. (ii)
532
+ Those method do not have a mechanism to estimate the pa-
533
+ rameters of movement models.
534
+
535
+ Many studies on another direction, predicting population
536
+ or people flow in cities, have been published (Konishi et al.
537
+ 2016; Zhang et al. 2019; Jiang et al. 2019). Their approach is
538
+ to forecast future city dynamics at each area from past data
539
+ or other features in a supervised way, using classical regres-
540
+ sion models or deep learning architecture, etc. Our purpose
541
+ is estimating people flows between areas from only popu-
542
+ lation snapshots at incremental timesteps in a unsupervised
543
+ way, which is a totally different task from future prediction.
544
+
545
+ **7. Conclusion**
546
+
547
+ In this paper, we proposed a novel method for MAP infer-
548
+ ence in collective flow diffusion model. First, we showed
549
+ that the MAP inference problem can be formulated as a min-
550
+ imum convex cost flow problem. Based on this formulation,
551
+ we proposed an efficient algorithm for MAP inference prob-
552
+ ---PAGE_BREAK---
553
+
554
+ lem using capacity scaling algorithm. Extensive evaluations on both real and synthetic datasets showed that our algorithm outperforms previous alternatives in terms of running time and optimum solution quality.
555
+
556
+ ## References
557
+
558
+ Ahuja, R. K.; Magnanti, T. L.; and Orlin, J. B. 1993. *Network Flows: Theory, Algorithms, and Applications*. Prentice-Hall, Inc.
559
+
560
+ Akagi, Y.; Nishimura, T.; Kurashima, T.; and Toda, H. 2018. A fast and accurate method for estimating people flow from spatiotemporal population data. In *IJCAI*, 3293–3300.
561
+
562
+ Byrd, R. H.; Lu, P.; Nocedal, J.; and Zhu, C. 1995. A limited memory algorithm for bound constrained optimization. *SIAM Journal on Scientific Computing* 16(5):1190–1208.
563
+
564
+ Dijkstra, E. W. 1959. A note on two problems in connexion with graphs. *Numerische mathematik* 1(1):269–271.
565
+
566
+ Du, J.; Kumar, A.; and Varakantham, P. 2014. On understanding diffusion dynamics of patrons at a theme park. In *AAMAS*, 1501–1502.
567
+
568
+ Iwata, T., and Shimizu, H. 2019. Neural collective graphical models for estimating spatio-temporal population flow from aggregated data. In *AAAI*, 3935–3942.
569
+
570
+ Iwata, T.; Shimizu, H.; Naya, F.; and Ueda, N. 2017. Estimating people flow from spatiotemporal population data via collective graphical mixture models. *ACM Transactions on Spatial Algorithms and Systems* 3(1):1–18.
571
+
572
+ Jiang, R.; Song, X.; Huang, D.; Song, X.; Xia, T.; Cai, Z.; Wang, Z.; Kim, K.-S.; and Shibasaki, R. 2019. Deepurban-event: A system for predicting citywide crowd dynamics at big events. In *KDD*, 2114–2122. ACM.
573
+
574
+ Jones, E.; Oliphant, T.; Peterson, P.; et al. 2001–. SciPy: Open source scientific tools for Python.
575
+
576
+ Kiraly, Z., and Kovacs, P. 2012. Efficient implementations of minimum-cost flow algorithms. *Acta Univ. Sapientiae* 4(1):67–118.
577
+
578
+ Konishi, T.; Maruyama, M.; Tsubouchi, K.; and Shimosaka, M. 2016. Cityprophet: City-scale irregularity prediction using transit app logs. In *Ubicomp*, 752–757. ACM.
579
+
580
+ Kumar, A.; Sheldon, D.; and Srivastava, B. 2013. Collective diffusion over networks: Models and inference. In *UAI*.
581
+
582
+ Minoux, M. 1986. Solving integer minimum cost flows with separable convex cost objective polynomially. In *Netflow at Pisa*. Springer. 237–239.
583
+
584
+ Morimura, T.; Osogami, T.; and Idé, T. 2013. Solving inverse problem of Markov chain with partial observations. In *NIPS*, 1655–1663.
585
+
586
+ Nguyen, T.; Kumar, A.; Lau, H. C.; and Sheldon, D. 2016. Approximate inference using DC programming for collective graphical models. In *AISTATS*, 685–693.
587
+
588
+ Sheldon, D. R., and Dietterich, T. G. 2011. Collective graphical models. In *NIPS*, 1161–1169.
589
+
590
+ Sheldon, D.; Sun, T.; Kumar, A.; and Dietterich, T. 2013. Approximate inference in collective graphical models. In *ICML*, 1004–1012.
591
+
592
+ Sheldon, D.; Elmohamed, M.; and Kozen, D. 2008. Collective inference on markov models for modeling bird migration. In *NIPS*, 1321–1328.
593
+
594
+ Sun, T.; Sheldon, D.; and Kumar, A. 2015. Message passing for collective graphical models. In *ICML*, 853–861.
595
+
596
+ Tanaka, Y.; Iwata, T.; Kurashima, T.; Toda, H.; and Ueda, N. 2018. Estimating latent people flow without tracking individuals. In *IJCAI*, 3556–3563.
597
+
598
+ Terada, M.; Nagata, T.; and Kobayashi, M. 2013. Population estimation technology for mobile spatial statistics. *NTT DOCOMO Technical Journal* 14(3):10–15.
599
+
600
+ Vilnis, L.; Belanger, D.; Sheldon, D.; and McCallum, A. 2015. Bethe projections for non-local inference. In *UAI*, 892–901.
601
+
602
+ Xu, F.; Tu, Z.; Li, Y.; Zhang, P.; Fu, X.; and Jin, D. 2017. Trajectory recovery from ash: User privacy is not preserved in aggregated mobility data. In *WWW*, 1241–1250.
603
+
604
+ Yang, H., and Zhou, J. 1998. Optimal traffic counting locations for origin-destination matrix estimation. *Transportation Research Part B: Methodological* 32(2):109–126.
605
+
606
+ Zhang, J.; Zheng, Y.; Sun, J.; and Qi, D. 2019. Flow prediction in spatio-temporal networks based on multitask deep learning. *IEEE Transactions on Knowledge and Data Engineering*.
samples/texts_merged/1885128.md ADDED
@@ -0,0 +1,507 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ We are IntechOpen,
5
+ the world's leading publisher of
6
+ Open Access books
7
+ Built by scientists, for scientists
8
+
9
+ 5,300
10
+ Open access books available
11
+
12
+ 131,000
13
+ International authors and editors
14
+
15
+ 160M
16
+ Downloads
17
+
18
+ Our authors are among the
19
+ TOP 1%
20
+ most cited scientists
21
+
22
+ 154
23
+ Countries delivered to
24
+
25
+ 12.2%
26
+ Contributors from top 500 universities
27
+
28
+ WEB OF SCIENCE™
29
+
30
+ Selection of our books indexed in the Book Citation Index
31
+ in Web of Science™ Core Collection (BKCI)
32
+
33
+ Interested in publishing with us?
34
+ Contact book department@intechopen.com
35
+
36
+ Numbers displayed above are based on latest data collected.
37
+ For more information visit www.intechopen.com
38
+ ---PAGE_BREAK---
39
+
40
+ Low Sampling Rate Time Acquisition Schemes
41
+ and Channel Estimation Algorithms of
42
+ Ultra-Wideband Signals
43
+
44
+ Wei Xu and Jiaxiang Zhao
45
+
46
+ Nankai University
47
+ China
48
+
49
+ # 1. Introduction
50
+
51
+ Ultra-wideband (UWB) communication is a viable technology to provide high data rates over broadband wireless channels for applications, including wireless multimedia, wireless Internet access, and future-generation mobile communication systems (Karaoguz, 2001; Stoica et al., 2005). Two of the most critical challenges in the implementation of UWB systems are the timing acquisition and channel estimation. The difficulty in them arises from UWB signals being the ultra short low-duty-cycle pulses operating at very low power density. The Rake receiver (Turin, 1980) as a prevalent receiver structure for UWB systems utilizes the high diversity in order to effectively capture signal energy spread over multiple paths and boost the received signal-to-noise ratio (SNR). However, to perform maximal ratio combining (MRC), the Rake receiver needs the timing information of the received signal and the knowledge of the channel parameters, namely, gains and tap delays. Timing errors as small as fractions of a nanosecond could seriously degrade the system performance (Lovelace & Townsend, 2002; Tian & Giannakis, 2005). Thus, accurate timing acquisition and channel estimation is very essentially for UWB systems.
52
+
53
+ Many research efforts have been devoted to the timing acquisition and channel estimation of UWB signals. However, most reported methods suffer from the restrictive assumptions, such as, demanding a high sampling rates, a set of high precision time-delay systems or invoking a line search, which severally limits their usages. In this chapter, we are focusing on the low sampling rate time acquisition schemes and channel estimation algorithms of UWB signals. First, we develop a novel optimum data-aided (DA) timing offset estimator that utilizes only symbol-rate samples to achieve the channel delay spread scale timing acquisition. For this purpose, we exploit the statistical properties of the power delay profile of the received signals to design a set of the templates to ensure the effective multipath energy capture at any time. Second, we propose a novel optimum data-aided channel estimation scheme that only relies on frame-level sampling rate data to derive channel parameter estimates from the received waveform. The simulations are provided to demonstrate the effectiveness of the proposed approach.
54
+ ---PAGE_BREAK---
55
+
56
+ ## 2. The channel model
57
+
58
+ From the channel model described in (Foerster, 2003), the impulse response of the channel is
59
+
60
+ $$h(t) = X \sum_{n=1}^{N} \sum_{k=1}^{K(n)} \alpha_{nk} \delta(t - T_n - \tau_{nk}) \quad (1)$$
61
+
62
+ where $X$ is the log-normal shadowing effect. $N$ and $K(n)$ represent the total number of the clusters, and the number of the rays in the $n$th cluster, respectively. $T_n$ is the time delay of the $n$th cluster relative to a reference at the receiver, and $\tau_{nk}$ is the delay of the $k$th multipath component in the $n$th cluster relative to $T_n$. From (Foerster, 2003), the multipath channel coefficient $\alpha_{nk}$ can be expressed as $\alpha_{nk} = p_{nk}\beta_{nk}$ where $p_{nk}$ assumes either +1 or -1 with equal probability, and $\beta_{nk} > 0$ has log-normal distribution.
63
+
64
+ The power delay profile (the mean square values of $\{\beta_{nk}^2\}$) is exponential decay with respect to $\{T_n\}$ and $\{\tau_{nk}\}$, i.e.,
65
+
66
+ $$\langle \beta_{nk}^2 \rangle = \langle \beta_{00}^2 \rangle \exp(-\frac{T_n}{\Gamma}) \exp(-\frac{\tau_{nk}}{\gamma}) \quad (2)$$
67
+
68
+ where $\langle \beta_{00}^2 \rangle$ is the average power gain of the first multipath in the first cluster. $\Gamma$ and $\gamma$ are power-delay time constants for the clusters and the rays, respectively.
69
+
70
+ The model (1) is employed to generate the impulse responses of the propagation channels in our simulation. For simplicity, an equivalent representation of (1) is
71
+
72
+ $$h(t) = \sum_{l=0}^{L-1} \alpha_l \delta(t - \tau_l) \quad (3)$$
73
+
74
+ where $L$ represents the total number of the multipaths, $\alpha_l$ includes log-normal shadowing and multipath channel coefficients, and $\tau_l$ denotes the delay of the $l$th multipath relative to the reference at the receiver. Without loss of generality, we assume $\tau_0 < \tau_1 < \dots < \tau_{L-1}$. Moreover, the channel only allows to change from burst to burst but remains invariant (i.e., $\{\alpha_l, \tau_l\}_{l=0}^{L-1}$ are constants) over one transmission burst.
75
+
76
+ ## 3. Low sampling rate time acquisition schemes
77
+
78
+ One of the most acute challenges in realizing the potentials of the UWB systems is to develop the time acquisition scheme which relies only on symbol-rate samples. Such a low sampling rate time acquisition scheme can greatly lower the implementation complexity. In addition, the difficulty in UWB synchronization also arises from UWB signals being the ultrashort low-duty-cycle pulses operating at very low power density. Timing errors as small as fractions of a nanosecond could seriously degrade the system performance (Lovelace & Townsend, 2002; Tian & Giannakis, 2005).
79
+
80
+ A number of timing algorithms are reported for UWB systems recently. Some of the timing algorithms(Tian & Giannakis, 2005; Yang & Giannakis, 2005; Carbonelli & Mengali, 2006; He & Tepedelenlioglui, 2008) involve the sliding correlation that usually used in traditional narrowband systems. However, these approaches inevitably require a searching procedure and are inherently time-consuming. Too long synchronization time will affect
81
+ ---PAGE_BREAK---
82
+
83
+ symbol detection. Furthermore, implementation of such techniques demands very fast
84
+ and expensive A/D converters and therefore will result in high power consumption.
85
+ Another approach (Carbonelli & Mengali, 2005; Furusawa et al., 2008; Cheng & Guan, 2008;
86
+ Sasaki et al., 2010) is to synchronize UWB signals through the energy detector. The merits
87
+ of using energy detectors are that the design of timing acquisition scheme could benefit
88
+ from the statistical properties of the power delay profile of the received signals. Unlike
89
+ the received UWB waveforms which is unknown to receivers due to the pulse distortions,
90
+ the statistical properties of the power delay profile are invariant. Furthermore, as shown
91
+ in (Carbonelli & Mengali, 2005), an energy collection based receiver can produce a low
92
+ complexity, low cost and low power consumption solution at the cost of reduced channel
93
+ spectral efficiency.
94
+
95
+ In this section, a novel optimum data-aided timing offset estimator that only relies on
96
+ symbol-rate samples for frame-level timing acquisition is derived. For this purpose, we
97
+ exploit the statistical properties of the power delay profile of the received signals to design
98
+ a set of the templates to ensure the effective multipath energy capture at any time. We show
99
+ that the frame-level timing offset acquisition can be transformed into an equivalent amplitude
100
+ estimation problem. Thus, utilizing the symbol-rate samples extracted by our templates and
101
+ the ML principle, we obtain channel-dependent amplitude estimates and optimum timing
102
+ offset estimates.
103
+
104
+ **3.1 The signal model**
105
+
106
+ During the acquisition stage, a training sequence is transmitted. Each UWB symbol is transmitted over a time-interval of $T_s$ seconds that is subdivided into $N_f$ equal size frame-intervals of length $T_f$. A single frame contains exactly one data modulated ultrashort pulse $p(t)$ of duration $T_p$. And the transmitted waveform during the acquisition has the form as
107
+
108
+ $$s(t) = \sqrt{E_f} \sum_{j=0}^{NN_f-1} d_{[j]_{N_{ds}}} p(t - jT_f - a_{\lfloor \frac{j}{N_f} \rfloor}) \quad (4)$$
109
+
110
+ where {$d_l$}$_{l=0}^{{N_{ds}}-1}$ with $d_l \in \{\pm 1\}$ is the DS sequence. The time shift $\Delta$ is chosen to be $T_h/2$ with $T_h$ being the delay spread of the channel. The assumption that there is no inter-frame interference suggests $T_h \le T_f$. For the simplicity, we assume $T_h = T_f$ and derive the acquisition algorithm. Our scheme can easily be extended to the case where $T_f \ge T_h$. The training sequence {$a_n$}$_{n=0}^{N-1}$ is designed as
111
+
112
+ $$\underbrace{\{0, 0, 0, \dots, 0}_{n=0,1,\dots,N_0-1}, \underbrace{1, 0, 1, 0, \dots, 1, 0}_{n=N_0,N_0+1,\dots,N-1}}$$
113
+
114
+ (5)
115
+
116
+ i.e., the first $N_0$ consecutive symbols are chosen to be 0, and the rest symbols alternately switch between 1 and 0.
117
+
118
+ The transmitted signal propagates through an L-path fading channel as shown in (3). Using the first arriving time $\tau_0$, we define the relative time delay of each multipath as $\tau_{l,0} = \tau_l - \tau_0$
119
+ ---PAGE_BREAK---
120
+
121
+ Fig. 1. The block diagram of acquisition approach.
122
+
123
+ for $1 \le l \le L - 1$. Thus the received signal is
124
+
125
+ $$r(t) = \sqrt{E_f} \sum_{j=0}^{NN_f-1} d_{[j]_{N_{ds}}} p_R(t-jT_f - a_{\lfloor \frac{j}{N_f} \rfloor} \Delta - \tau_0) + n(t) \quad (6)$$
126
+
127
+ where $n(t)$ is the zero-mean additive white Gaussian noise (AWGN) with double-side power spectral density $\sigma_n^2/2$ and $p_R(t) = \sum_{l=0}^{L-1} \alpha_l p(t - \tau_{l,0})$ represents the convolution of the channel impulse response (3) with the transmitted pulse $p(t)$.
128
+
129
+ The timing information of the received signal is contained in the delay $\tau_0$ which can be decomposed as
130
+
131
+ $$\tau_0 = n_s T_s + n_f T_f + \zeta \quad (7)$$
132
+
133
+ with $n_s = \lfloor \frac{\tau_0}{T_s} \rfloor$, $n_f = \lfloor \frac{\tau_0 - n_s T_s}{T_f} \rfloor$ and $\zeta \in [0, T_f)$.
134
+
135
+ In the next section, we present an DA timing acquisition scheme based on the following assumptions: 1) There is no interframe interference, i.e., $\tau_{L-1,0} \le T_f$. 2) The channel is assumed to be quasi-static, i.e., the channel is constant over a block duration. 3) Since the symbol-level timing offset $n_s$ can be estimated from the symbol-rate samples through the traditional estimation approach, we assumed $n_s = 0$. In this chapter, we focus on acquiring timing with frame-level resolution, which relies on only symbol-rate samples.
136
+
137
+ ## 3.2 Analysis of symbol-rate sampled data $Y_0[n]$
138
+
139
+ As shown in Fig. 1, the received signal (6) first passes through a square-law detector. Then, the resultant output is separately correlated with the pre-devised templates $W_0(t)$, $W_1(t)$ and $W_2(t)$, and sampled at $nT_s$ which yields $\{Y_0[n]\}_{n=1}^{N-1}$, $\{Y_1[n]\}_{n=1}^{N-1}$ and $\{Y_2[n]\}_{n=1}^{N-1}$. Utilizing these samples, we derive an optimal timing offset estimator $\hat{n}_f$.
140
+
141
+ In view of (6), the output of the square-law detector is
142
+
143
+ $$ \begin{aligned} R(t) &= r_s^2(t) = (r_s(t) + n(t))^2 = r_s^2(t) + m(t) \\ &= E_f \sum_{j=0}^{NN_f-1} p_R^2(t - jT_f - a_{\lfloor \frac{j}{N_f} \rfloor} \Delta - \tau_0) + m(t) \end{aligned} \quad (8) $$
144
+ ---PAGE_BREAK---
145
+
146
+ where $m(t) = 2r_s(t)n(t) + n^2(t)$. When the template $W(t)$ is employed, the symbol rate sampled data $Y[n]$ is
147
+
148
+ $$ Y[n] = \int_{0}^{T_s} R(t+nT_s)W(t)dt. \quad (9) $$
149
+
150
+ Now we derive the decomposition of $Y_0[n]$, i.e., the symbol-rate samples when the template $W_0(t)$ defined as
151
+
152
+ $$ W_0(t) = \sum_{k=0}^{N_f-1} w(t-kT_f), \quad w(t) = \begin{cases} 1, & 0 \le t < \frac{T_f}{2} \\ -1, & \frac{T_f}{2} \le t < T_f \\ 0, & \text{others} \end{cases} \quad (10) $$
153
+
154
+ is employed. Substituting $W_0(t)$ for $W(t)$ in (9), we obtain symbol-rate sampled data $Y_0[n]$. Recalling (5), we can derive the following proposition of $Y_0[n]$.
155
+
156
+ **Proposition 1:** 1) For $1 \le n < N_0$, $Y_0[n]$ can be expressed as
157
+
158
+ $$ Y_0[n] = N_f I_{\xi,0} + M_0[n], \quad (11) $$
159
+
160
+ 2) For $N_0 \le n \le N-1$, $Y_0[n]$ can be represented as
161
+
162
+ $$ Y_0[n] = \begin{cases} (2\Psi - N_f)I_{\xi,a_{n-1}} + M_0[n], & \zeta \in [0, T_\eta) \\ (2\Psi - N_f + 1)I_{\xi,a_{n-1}} + M_0[n], & \zeta \in [T_\eta, T_\eta + \frac{T_f}{2}) \\ (2\Psi - N_f + 2)I_{\xi,a_{n-1}} + M_0[n], & \zeta \in [T_\eta + \frac{T_f}{2}, T_f) \end{cases} \quad (12) $$
163
+
164
+ where $\Psi \triangleq n_f - \frac{1}{2}\epsilon$, $\epsilon \in [-\frac{1}{2}, \frac{1}{2}]$ and $T_\eta \in [\frac{T_f}{4}, \frac{T_f}{2}]$. $M_0[n]$ is the sampled noise, and $I_{\xi,a_n}$ is defined as
165
+
166
+ $$ I_{\xi,a_n} \triangleq E_f \int_0^{T_f} \sum_{m=0}^2 p_R^2(t+mT_f-a_n\Delta-\xi)w(t)dt. \quad (13) $$
167
+
168
+ We prove the Proposition 1 and the fact that the sampled noise $M_0[n]$ can be approximated by a zero mean Gaussian variable in (Xu et al., 2009) in Appendix A and Appendix B respectively. There are some remarks on the Proposition 1:
169
+
170
+ 1) The fact of $a_{n-1} \in \{0, 1\}$ suggests that $I_{\xi,a_{n-1}}$ in (12) is equal to either $I_{\xi,0}$ or $I_{\xi,1}$. Furthermore, $I_{\xi,0}$ and $I_{\xi,1}$ satisfy $I_{\xi,1} = -I_{\xi,0}$ whose proof is contained in *Fact 1* of Appendix I.
171
+
172
+ 2) Equation (12) suggests that the decomposition of $Y_0[n]$ varies when $\zeta$ falls in different subintervals, so correctly estimating $n_f$ need to determine to which region $\zeta$ belongs.
173
+
174
+ 3) *Fact 2* of Appendix A which states
175
+
176
+ $$ \left\{ \begin{array}{ll} I_{\xi,0} > 0, & \zeta \in [0, T_{\eta}) \cup [T_{\eta} + \frac{T_f}{2}, T_f] \\ I_{\xi,0} < 0, & \zeta \in [T_{\eta}, T_{\eta} + \frac{T_f}{2}) \end{array} \right. \quad (14) $$
177
+
178
+ suggests that it is possible to utilize the sign of $I_{\xi,0}$ to determine to which subinterval $\zeta$ belongs. However, when $I_{\xi,0} > 0$, $\zeta$ could belong to either $[0, T_{\eta})$ or $[T_{\eta} + \frac{T_f}{2}, T_f)$. To resolve this difficulty, we introduce the second template $W_1(t)$ in the next section.
179
+ ---PAGE_BREAK---
180
+
181
+ ### 3.3 Analysis of symbol-rate sampled data $Y_1[n]$
182
+
183
+ The symbol-rate sampled data $Y_1[n]$ is obtained when the template $W_1(t)$ is employed. $W_1(t)$ is a delayed version of $W_0(t)$ with the delayed time $T_d$ where $T_d \in [0, \frac{T_f}{2}]$. Our simulations show that we obtain the similar performance for the different choices of $T_d$. For the simplicity, we choose $T_d = \frac{T_f}{4}$ for the derivation. Thus, we have
184
+
185
+ $$
186
+ \begin{aligned}
187
+ Y_1[n] &= \int_{\frac{T_f}{4}}^{T_s+\frac{T_f}{4}} R(t+nT_s)W_0\left(t-\frac{T_f}{4}\right)dt \\
188
+ &= \int_0^{T_s} R(t+nT_s+\frac{T_f}{4})W_0(t)dt.
189
+ \end{aligned}
190
+ \quad (15) $$
191
+
192
+ Then we can derive the following proposition of $Y_1[n]$.
193
+
194
+ **Proposition 2:1)** For $1 \le n < N_0$, $Y_1[n]$ can be expressed as
195
+
196
+ $$ Y_1[n] = N_f J_{\zeta,0} + M_0[n]. \quad (16) $$
197
+
198
+ 2) For $N_0 \le n \le N-1$, $Y_1[n]$ can be decomposed as
199
+
200
+ $$ Y_1[n] = \begin{cases} (2\Psi - N_f - 1)J_{\zeta, a_{n-1}} + M_1[n], & \zeta \in [0, T_\eta - \frac{T_f}{4}) \\ (2\Psi - N_f)J_{\zeta, a_{n-1}} + M_1[n], & \zeta \in [T_\eta - \frac{T_f}{4}, T_\eta + \frac{T_f}{4}) \\ (2\Psi - N_f + 1)J_{\zeta, a_{n-1}} + M_1[n], & \zeta \in [T_\eta + \frac{T_f}{4}, T_f) \end{cases} \quad (17) $$
201
+
202
+ where $J_{\zeta,0}$ satisfies
203
+
204
+ $$ \left\{
205
+ \begin{array}{ll}
206
+ J_{\zeta,0} < 0, & \zeta \in [0, T_{\eta} - \frac{T_f}{4}) \cup [T_{\eta} + \frac{T_f}{4}, T_f) \\
207
+ J_{\zeta,0} > 0, & \zeta \in [T_{\eta} - \frac{T_f}{4}, T_{\eta} + \frac{T_f}{4}).
208
+ \end{array}
209
+ \right.
210
+ \quad (18) $$
211
+
212
+ Equation (14) and (18) suggest that the signs of $I_{\zeta,0}$ and $J_{\zeta,0}$ can be utilized jointly to determine the range of $\zeta$, which is summarized as follows:
213
+
214
+ **Proposition 3:** $\zeta \in [0, T_f]$ defined in (7) satisfies
215
+
216
+ 1. If $I_{\zeta,0} > 0$ and $J_{\zeta,0} > 0$, then $\zeta \in (T_{\eta} - \frac{T_f}{4}, T_{\eta})$.
217
+
218
+ 2. If $I_{\zeta,0} < 0$ and $J_{\zeta,0} > 0$, then $\zeta \in (T_{\eta}, T_{\eta} + \frac{T_f}{4})$.
219
+
220
+ 3. If $I_{\zeta,0} < 0$ and $J_{\zeta,0} < 0$, then $\zeta \in (T_{\eta} + \frac{T_f}{4}, T_{\eta} + \frac{T_f}{2})$.
221
+
222
+ 4. If $I_{\zeta,0} > 0$ and $J_{\zeta,0} < 0$, then $\zeta \in (0, T_{\eta} - \frac{T_f}{4}) \cup (T_{\eta} + \frac{T_f}{2}, T_f)$.
223
+
224
+ The last case of Proposition 3 suggests that using the signs of $I_{\zeta,0}$ and $J_{\zeta,0}$ is not enough to determine whether we have $\zeta \in (0, T_{\eta} - \frac{T_f}{4})$ or $\zeta \in (T_{\eta} + \frac{T_f}{2}, T_f)$. To resolve this difficulty, the third template $W_2(t)$ is introduced. $W_2(t)$ is an auxiliary template and is defined as
225
+
226
+ $$ W_2(t) = \sum_{k=0}^{N_f-1} v(t-kT_f), \quad v(t) = \begin{cases} 1, & T_f - 2T_v \le t < T_f - T_v \\ -1, & T_f - T_v \le t < T_f \\ 0, & \text{others} \end{cases} \quad (19) $$
227
+
228
+ where $T_v \in (0, T_f/10]$. Similar to the proof of (14), we can prove that in this case, either $K_{\zeta,0} > 0$ for $0 < \zeta < T_{\eta} - \frac{T_f}{4}$ or $K_{\zeta,0} < 0$ for $T_{\eta} + \frac{T_f}{4} < \zeta < T_f$ is valid, which yields the information to determine which region $\zeta$ belongs to.
229
+ ---PAGE_BREAK---
230
+
231
+ ### 3.4 The computation of the optimal timing offset estimator $\hat{n}_f$
232
+
233
+ To seek the estimate of $n_f$, we first compute the optimal estimates of $I_{\xi,0}$ and $J_{\xi,0}$ using (11) and (16). Then, we use the estimate $\hat{I}_{\xi,0}, \hat{J}_{\xi,0}$ and Proposition 3 to determine the region to which $\xi$ belongs. The estimate $\hat{\Psi}$ therefore can be derived using the proper decompositions of (12) and (17). Finally, recalling the definition in (12) $\Psi = n_f - \frac{\epsilon}{2}$ with $\epsilon \in [-\frac{1}{2}, \frac{1}{2}]$, we obtain $\hat{n}_f = [\hat{\Psi}]$, where $[\cdot]$ stands for the round operation.
234
+
235
+ According to the signs of $\hat{I}_{\xi,0}$ and $\hat{J}_{\xi,0}$, we summarize the ML estimate $\hat{\Psi}$ as follows:
236
+
237
+ **Proposition 4:**
238
+
239
+ * When $\hat{I}_{\xi,0} > 0$ and $\hat{J}_{\xi,0} > 0$, $\hat{\Psi} = \frac{1}{A} \sum_{n=N_0}^{N-1} [Z_n + N_f(I_{\xi,0}^2 + J_{\xi,0}^2)]$.
240
+
241
+ * When $\hat{I}_{\xi,0} < 0$ and $\hat{J}_{\xi,0} > 0$, $\hat{\Psi} = \frac{1}{A} \sum_{n=N_0}^{N-1} [Z_n + (N_f - 1)I_{\xi,0}^2 + N_f J_{\xi,0}^2]$.
242
+
243
+ * When $\hat{I}_{\xi,0} < 0$ and $\hat{J}_{\xi,0} < 0$, $\hat{\Psi} = \frac{1}{A} \sum_{n=N_0}^{N-1} [Z_n + (N_f - 1)(I_{\xi,0}^2 + J_{\xi,0}^2)]$.
244
+
245
+ * When $\hat{I}_{\xi,0} > 0$ and $\hat{J}_{\xi,0} < 0$, $\hat{\Psi} = \begin{cases} \frac{1}{A} \sum_{n=N_0}^{N-1} [Z_n + N_f I_{\xi,0}^2 + (N_f + 1) J_{\xi,0}^2] & , \hat{K}_{\xi,0} > 0 \\ \frac{1}{A} \sum_{n=N_0}^{N-1} [Z_n + (N_f - 2) I_{\xi,0}^2 + (N_f - 1) J_{\xi,0}^2] & , \hat{K}_{\xi,0} < 0 \end{cases}$
246
+
247
+ where $A \triangleq 2(N - N_0)(I_{\xi,0}^2 + J_{\xi,0}^2)$ and $Z_n \triangleq Y_0[n]I_{\xi,a_{n-1}} + Y_1[n]J_{\xi,a_{n-1}}$. The procedures of computing the optimal ML estimate $\hat{\Psi}$ in Proposition 4 are identical. Therefore, we only present the computation steps when $\hat{I}_{\xi,0} > 0$ and $\hat{J}_{\xi,0} > 0$.
248
+
249
+ 1. Utilizing (11) and (16), we obtain the ML estimates
250
+
251
+ $$ \hat{I}_{\xi,0} = \frac{1}{(N_0-1)N_f} \sum_{n=1}^{N_0-1} Y_0[n], \quad \hat{J}_{\xi,0} = \frac{1}{(N_0-1)N_f} \sum_{n=1}^{N_0-1} Y_1[n]. \qquad (20) $$
252
+
253
+ 2. From (1) of Proposition 3, it follows that $T_\eta - \frac{T_f}{4} < \zeta < T_\eta$ when $\hat{I}_{\xi,0} > 0$ and $\hat{J}_{\xi,0} > 0$.
254
+
255
+ 3. According to the region of $\zeta$, we can select the right equations from (12) and (17) as
256
+
257
+ $$ Y_0[n] = (2\Psi - N_f)I_{\zeta,a_{n-1}} + M_0[n] \qquad (21) $$
258
+
259
+ $$ Y_1[n] = (2\Psi - N_f)J_{\zeta,a_{n-1}} + M_1[n]. \qquad (22) $$
260
+
261
+ Thus the log-likelihood function $\ln p(y; \Psi, I_{\zeta,a_{n-1}}, J_{\zeta,a_{n-1}})$ is
262
+
263
+ $$ \sum_{n=N_0}^{N-1} \left\{ [Y_0[n] - (2\Psi - N_f) I_{\zeta,a_{n-1}}]^2 + [Y_1[n] - (2\Psi - N_f) J_{\zeta,a_{n-1}}]^2 \right\}. $$
264
+
265
+ It follows the ML estimate $\hat{\Psi} = \frac{1}{A}\sum_{n=N_0}^{N-1}[Z_n + N_f(I_{\zeta,0}^2 + J_{\zeta,0}^2)]$.
266
+
267
+ ### 3.5 Simulation
268
+
269
+ In this section, computer simulations are performed. We use the second-order derivative of the Gaussian pulse to represent the UWB pulse. The propagation channels are generated
270
+ ---PAGE_BREAK---
271
+
272
+ Fig. 2. MSE performance under CM2 with $d = 4m$..
273
+
274
+ Fig. 3. BER performance under CM2 with $d = 4m$..
275
+
276
+ by the channel model CM2 described in (Foerster, 2003). Other parameters are selected as follows: $T_p = 1$ns, $N_f = 25$, $T_f = 100$ns, $T_v = T_f/10$ and the transmitted distance $d = 4m$. In all the simulations, we assume that $n_f$ and $\zeta$ are uniformly distributed over $[0, N_f - 1]$ and $[0, T_f]$ respectively. To evaluate the effect of the estimate $\hat{n}_f$ on the bit-error-rates (BERs) performance, we assume there is an optimal channel estimator at the receiver to obtain the perfect template for tracking and coherent demodulation. The signal-to-noise ratios (SNRs)
277
+ ---PAGE_BREAK---
278
+
279
+ in all figures are computed through $E_s/\sigma_n^2$ where $E_s$ is the energy spread over each symbol at the transmitter and $\sigma_n^2$ is the power spectral density of the noise.
280
+
281
+ In Fig. 2 present the normalized mean-square error (MSE: $E\{|\hat{n}_f - n_f|/N_f\}^2\}$) of the proposed algorithm in contrast to the approach using noisy template proposed in (Tian & Giannakis, 2005). The figure shows that the proposed algorithm (blue curve) outperforms that in (Tian & Giannakis, 2005) (red curve) when the SNR is larger than 10dB. For both algorithms, the acquisition performance improves with an increase in the length of training symbols $N$, as illustrated by the performance gap among $N = 12$ and $N = 30$. Fig. 3 illustrates the BER performance for the both algorithms. The BERs corresponding to perfect timing (green curve) and no timing (Magenta curve) are also plotted for comparisons.
282
+
283
+ ## 4. Low sampling rate channel estimation algorithms
284
+
285
+ The channel estimation of UWB systems is essential to effectively capture signal energy spread over multiple paths and boost the received signal-to-noise ratio (SNR). The low sampling rate channel estimation algorithms have the merits that can greatly lower the implementation complexity and reduce the costs. However, the development of low sampling rate channel estimation algorithms is extremely challenging. This is primarily due to the facts that the propagation models of UWB signals are frequency selective and far more complex than traditional radio transmission channels.
286
+
287
+ Classical approaches to this problem are using the maximum likelihood (ML) method or approximating the solutions of the ML problem. The main drawback of these approaches is that the computational complexity could be prohibitive since the number of parameters to be estimated in a realistic UWB channel is very high (Lottici et al., 2002). Other approaches reported are the minimum mean-squared error schemes which have the reduced complexity at the cost of performance (Yang & Giannakis, 2004). Furthermore, sampling rate of the received UWB signal is not feasible with state-of-the-art analog-to-digital converters (ADC) technology. Since UWB channels exhibit clusters (Cramer et al., 2002), a cluster-based channel estimation method is proposed in (Carbonelli & Mitra, 2007). Different methods such as subspace approach (Xu & Liu, 2003), first-order cyclostationary-based method (Wang & Yang, 2004) and compressed sensing based method (Paredes et al., 2007; Shi et al., 2010) proposed for UWB channel estimation are too complex to be implemented in actual systems.
288
+
289
+ In this section, we develop a novel optimum data-aided channel estimation scheme that only relies on frame-level sampling rate data to derive channel parameter estimates from the received waveform. To begin with, we introduce a set of especially devised templates for the channel estimation. The received signal is separately correlated with these pre-devised templates and sampled at frame-level rate. We show that each frame-level rate sample of any given template can be decomposed to a sum of a frequency-domain channel parameter and a noise sample. The computation of time-domain channel parameter estimates proceeds through the following two steps: In step one, for each fixed template, we utilize the samples gathered at this template and the maximum likelihood criterion to compute the ML estimates of the frequency-domain channel parameters of these samples. In step two, utilizing the computed frequency-domain channel parameters, we can compute the time-domain channel parameters via inverse fast transform (IFFT). As demonstrated in the simulation example,
290
+ ---PAGE_BREAK---
291
+
292
+ Fig. 4. The block diagram of channel estimation scheme.
293
+
294
+ when the training time is fixed, more templates used for the channel estimation yield the better (BER) performance.
295
+
296
+ ## 4.1 The signal model
297
+
298
+ During the channel estimation process, a training sequence is transmitted. Each UWB symbol is transmitted over a time-interval of $T_s$ seconds that is subdivided into $N_f$ equal size frame-intervals of length $T_f$, i.e., $T_s = N_f T_f$. A frame is divided into $N_c$ chips with each of duration $T_c$, i.e., $T_f = N_c T_c$. A single frame contains exactly one data modulated ultrashort pulse $p(t)$ (so-called monocycle) of duration $T_p$ which satisfies $T_p \le T_c$. The pulse $p(t)$ normalized to satisfy $\int p(t)^2 dt = 1$ can be Gaussian, Rayleigh or other. Then the waveform for the training sequence can be written as
299
+
300
+ $$s(t) = \sqrt{E_f} \sum_{n=0}^{N_s-1} \sum_{j=0}^{N_f-1} b_n p(t - nT_s - jT_f) \quad (23)$$
301
+
302
+ where $E_f$ represents the energy spread over one frame and $N_s$ is the length of the training sequence; $b_n$ denotes data, which is equal to 1 during training phase.
303
+ Our goal is to derive the estimate of the channel parameter sequence $\mathbf{h} = [h_0, h_1, \dots, h_{L-1}]$. Since from the assumption $L$ is unknown, we define a $N_c$-length sequence $\mathbf{p}$ as
304
+
305
+ $$\mathbf{p} = [h_0, h_1, \dots, h_{L-1}, h_L, h_{L+1}, \dots, h_{N_c-1}] \quad (24)$$
306
+
307
+ where $h_l = 0$ for $l \ge L$. The transmitted signal propagates through an $L$-path fading channel as shown in (3). Thus the received signal is
308
+
309
+ $$r(t) = \sqrt{E_f} \sum_{n=0}^{N_s-1} \sum_{j=0}^{N_f-1} \sum_{l=0}^{N_c-1} h_l p(t - nT_s - jT_f - lT_c) + n(t) \quad (25)$$
310
+
311
+ where $n(t)$ is the zero-mean additive white Gaussian noise (AWGN) with double-side power spectral density $\sigma_n^2/2$.
312
+ ---PAGE_BREAK---
313
+
314
+ ## 4.2 The choices of templates
315
+
316
+ In this section, a novel channel estimation method that relies on symbal-level samples is derived. As shown in Fig. 4, the received signal (25) is separately correlated with the pre-devised templates $W_0(t), W_1(t), \dots, W_S(t)$, and sampled at $nT_m$ where sampling period $T_m$ is on the order of $T_f$. Let $Y_i[n]$ denote the n-th sample corresponding to the template $W_i(t)$, that is,
317
+
318
+ $$ Y_i[n] = \int_0^{T_m} r(t + nT_m)W_i(t)dt \quad (26) $$
319
+
320
+ with $i = 0, 1, \dots, S$. Utilizing these samples, we derive the ML estimate of the channel parameter sequence **p** in (24).
321
+
322
+ First we introduce a set of $S+1$ templates used for the channel estimation. The number $S$ is chosen as a positive integer factor of $N_c/2$ by assuming that $N_c$ which represents the number of chips $T_c$ in each frame is an even number. That is, we have $N_c = 2SM$ with $M$ also being defined as a positive integer factor of $N_c/2$. The $i$-th template is defined as
323
+
324
+ $$ W_i(t) = \sqrt{E_f} \sum_{k=0}^{N_o-1} \omega_{N_o}^{ik} [p(t - kT_c) + p(t - T_f - kT_c)] \quad (27) $$
325
+
326
+ with $N_o = 2S = N_c/M$, $\omega_{N_o}^{ik} = e^{-j\frac{2\pi ik}{N_o}}$ and $i \in \{0, 1, \dots, S\}$. The duration of each template $W_i(t)$ is equal to the sampling period $T_m$ which can be expressed as
327
+
328
+ $$ T_m = (N_c + N_o)T_c = T_f + N_o T_c. \quad (28) $$
329
+
330
+ ## 4.3 The computation of the channel parameter sequence p
331
+
332
+ In this section, we derive the channel estimation scheme that only relies on frame-level sampling rate data. To begin with, let us introduce some notations. Recalling the equation $N_o = N_c/M$ following (27), we divide the $N_c$-length sequence **p** into $M$ blocks each of size $N_o$. Therefore, equation (24) becomes
333
+
334
+ $$ \mathbf{p} = [\mathbf{h}_0, \mathbf{h}_1, \dots, \mathbf{h}_m, \dots, \mathbf{h}_{M-1}] \quad (29) $$
335
+
336
+ where the *m*-th block $\mathbf{h}_m$ is defined as
337
+
338
+ $$ \mathbf{h}_m = [h_{mN_o}, h_{mN_o+1}, \dots, h_{mN_o+N_o-1}] \quad (30) $$
339
+
340
+ with $m \in \{0, 1, \dots, M-1\}$. Let $\mathbf{F}_i$ denote the $N_o$-length coefficient sequence of the $i$-th template $W_i(t)$ in (27), i.e.,
341
+
342
+ $$ \mathbf{F}_i = [\omega_{N_o}^0 \omega_{N_o}^i \omega_{N_o}^{2i} \dots \omega_{N_o}^{(N_o-1)i}] . \quad (31) $$
343
+
344
+ The discrete Fourier transform (DFT) of the $N_o$-length sequence $\mathbf{h}_m = [h_{mN_o}, h_{mN_o+1}, \dots, h_{mN_o+N_o-1}]$ is denoted as
345
+
346
+ $$ \mathbf{H}_m = [H_m^0, H_m^1, \dots, H_m^i, \dots, H_m^{N_o-1}] \quad (32) $$
347
+ ---PAGE_BREAK---
348
+
349
+ where the frequency-domain channel parameter $H_m^i$ is
350
+
351
+ $$ H_m^i = \mathbf{F}_i \mathbf{h}_m^T = \sum_{k=0}^{N_o-1} \omega_{N_o}^{ik} h_{mN_o+k} \quad (33) $$
352
+
353
+ with $m \in \{0, 1, \dots, M-1\}$ and $i \in \{0, 1, \dots, S\}$.
354
+
355
+ Our channel estimation algorithm proceeds through the following two steps.
356
+
357
+ **Step 1:** Utilizing the set of frame-level samples $\{Y_i[n]\}_{n=1}^N$ generated from the i-th template, we compute the ML estimates of the frequency-domain channel parameters $\{H_m^i\}_{m=1}^M$ for $i \in \{0, 1, \dots, S\}$. To do this, we show that the samples $\{Y_i[n]\}_{n=0}^{N-1}$ from the i-th template has the following decomposition.
358
+
359
+ **Proposition 1:** Every sample in the set $\{Y_i[n]\}_{n=0}^{N-1}$ can be decomposed into the sum of a frequency-domain channel parameter and a noise sample, that is,
360
+
361
+ $$ \left\{ \begin{array}{l} Y_i[qM] = 2E_f H_0^i + Z_i[qM] \\ Y_i[qM+1] = 2E_f H_1^i + Z_i[qM+1] \\ \vdots \\ Y_i[qM+m] = 2E_f H_m^i + Z_i[qM+m] \\ \vdots \\ Y_i[qM+M-1] = 2E_f H_{M-1}^i + Z_i[qM+M-1] \end{array} \right. \qquad (34) $$
362
+
363
+ where $Z_i[n]$ represents the noise sample. The parameter $q$ belongs to the set $\{0, 1, \dots, Q-1\}$ with $Q = \lfloor \frac{N}{M} \rfloor$.
364
+
365
+ Performing ML estimation to the $(m+1)$-th equation in (34) for $q=0, 1, \dots, Q-1$, we can compute the ML estimate $\hat{H}_m^i$ for the frequency-domain channel parameter $H_m^i$ as
366
+
367
+ $$ \hat{H}_m^i = \frac{1}{2E_f Q} \sum_{q=0}^{Q-1} Y_i[qM+m] \quad (35) $$
368
+
369
+ with $m \in \{0, 1, \dots, M-1\}$ and $i \in \{0, 1, \dots, S\}$.
370
+
371
+ **Step 2:** Utilizing the computed frequency-domain channel parameters $\{\hat{H}_m^i\}_{i=0}^S$ from the Step 1, we derive the estimate of the time-domain channel sequence $\mathbf{h}_m$ for $m \in \{0, 1, \dots, M-1\}$. From the symmetry of the DFT, the time-domain channel parameter sequence $\mathbf{h}_m = [h_{mN_o} \ h_{mN_o+1} \ \dots \ h_{mN_o+N_o-1}]$ is a real valued sequence, which suggests that the DFT of $\mathbf{h}_m$ satisfies
372
+
373
+ $$ H_m^{N_o-i} = (\hat{H}_m^i)^* \quad (36) $$
374
+
375
+ with $i \in \{0, 1, \dots, S\}$ and $S = N_o/2$.
376
+
377
+ Utilizing equation (36), we obtain the estimate for the $N_o$-point DFT of $\mathbf{h}_m$ as
378
+
379
+ $$ \hat{\mathbf{H}}_m = [\hat{H}_m^0, \hat{H}_m^1, \dots, \hat{H}_m^S, (\hat{H}_m^{S-1})^*, \dots, (\hat{H}_m^2)^*, (\hat{H}_m^1)^*] \quad (37) $$
380
+ ---PAGE_BREAK---
381
+
382
+ The estimate of the time-domain channel parameter $\hat{h}_m$ can be computed via $N_o$-point IFFT. In view of equation (29), the estimated channel parameter sequence **p** in (24) is given by
383
+
384
+ $$ \hat{\mathbf{p}} = [\hat{\mathbf{h}}_0, \hat{\mathbf{h}}_1, \dots, \hat{\mathbf{h}}_{M-1}]. \quad (38) $$
385
+
386
+ Fig. 5. MSE performance of the algorithm proposed in (Wang & Ge, 2007) and the proposed algorithm with different number of templates ($S = 4, 8, 16$), when the length of the training sequence $N_s$ is 30.
387
+
388
+ ## 4.4 Simulation
389
+
390
+ In this section, computer simulations are performed to test the proposed algorithm. The propagation channels are generated by the channel model CM 4 described in (Foerster, 2003). We choose the second-order derivative of the Gaussian pulse as the transmitted pulse with duration $T_p = 1$ ns. Other parameters are selected as follows: $T_f = 64$ ns, $T_c = 1$ ns, $N_c = 64$ and $N_f = 24$.
391
+
392
+ Fig. 5 presents the normalized mean-square error (MSE) of our channel estimation algorithm with different number of templates ($S = 4, 8, 16$) when the length of the training sequence $N_s$ is 30. As a comparison, we also plot the MSE curve of the approach in (Wang & Ge, 2007) which needs chip-level sampling rate. Fig. 6 illustrates the bit-error-rates (BERs) performance for the both algorithms. The BERs corresponding to the perfect channel estimation (Perfect CE) is also plotted for comparisons. From these figures, the MSE and BER performances of our algorithm improve as the number of templates increases. In particular, as shown in Fig. 5 and Fig. 6, the MSE and BER performances of our algorithm that relies only on the frame-level sampling period $T_f = 64$ ns is comparable to that of the approach proposed in (Wang & Ge, 2007) which requires chip-level sampling period $T_c = 1$ ns.
393
+ ---PAGE_BREAK---
394
+
395
+ Fig. 6. BER performance of Perfect CE, the algorithm proposed in (Wang & Ge, 2007) and the proposed algorithm with different number of templates ($S = 4, 8, 16$), when the length of the training sequence $N_s$ is 30.
396
+
397
+ ## 5. Conclusion
398
+
399
+ In this chapter, we are focusing on the low sampling rate time acquisition schemes and channel estimation algorithms of UWB signals. First, we develop a novel optimum data-aided (DA) timing offset estimator that utilizes only symbol-rate samples to achieve the channel delay spread scale timing acquisition. For this purpose, we exploit the statistical properties of the power delay profile of the received signals to design a set of the templates to ensure the effective multipath energy capture at any time. Second, we propose a novel optimum data-aided channel estimation scheme that only relies on frame-level sampling rate data to derive channel parameter estimates from the received waveform.
400
+
401
+ ## 6. References
402
+
403
+ * Karaoguz, J. (2001). High-rate wireless personal area networks, *IEEE Commun. Mag.*, vol. 39, pp. 96-102.
404
+
405
+ * Lovelace, W. M. & Townsend, J. K. (2002). The effect of timing jitter and tracking on the performance of impulse radio, *IEEE J. Sel. Areas Commun.*, vol. 20, no. 9, pp. 1646-1651.
406
+
407
+ * Tian, Z. & Giannakis, G. B. (2005). BER sensitivity to mistiming in ultrawideband impulse radios-part I: modeling, *IEEE Trans. Signal Processing*, vol. 53, no. 4, pp. 1550-1560.
408
+
409
+ * Tian, Z. & Giannakis, G. B. (2005). A GLRT approach to data-aided timing acquisition in UWB radios-Part I: Algorithms, *IEEE Trans. Wireless Commun.*, vol. 53, no. 11, pp. IV.2956-2967.
410
+
411
+ * Yang, L. & Giannakis, G. B. (2005). Timing Ultra-wideband Signals with Dirty Templates, *IEEE Trans. on Commun.*, vol. 53, pp. 1952-1963.
412
+ ---PAGE_BREAK---
413
+
414
+ Carbonelli, C. & Mengali, U. (2006). Synchronization algorithms for UWB signals, *IEEE Trans. on Commun.*, vol. 54, no. 2, pp. 329-338.
415
+
416
+ He, N. & Tepedelenlioglui, C. (2008). Joint Pulse and Symbol Level Acquisition of UWB Receivers, *IEEE Trans. on Wireless Commun.*, vol. 7, no. 1, pp. 6-14.
417
+
418
+ Carbonelli, C. & Mengali, U. (2005). Low complexity synchronization for UWB noncoherent receivers, in *Proc. 2005 Vehicular Technology Conf.*, vol. 2, pp. 1350-1354.
419
+
420
+ Furusawa, K.; Sasaki, M.; Hioki, J.; Itami, M.; (2008). Schemes of optimization of energy detection receivers for UWB-IR communication systems under different channel model, *IEEE International Conference on Ultra-Wideband*, pp.157 - 160, Leibniz Universitat Hannover, Germany.
421
+
422
+ Cheng, X. & Guan, Y. (2008). Effects of synchronization errors on energy detection of UWB signals, *IEEE International Conference on Ultra-Wideband*, pp.161 - 164, Leibniz Universitat Hannover, Germany.
423
+
424
+ Sasaki, M.; Ohno, J.; Ohno, H.; Ohno, K.; Itami, M. (2010). A study on multi-user access in energy detection UWB-IR receiver, *2010 IEEE 11th International Symposium on Spread Spectrum Techniques and Applications (ISITA)* pp.141 - 146, Taichung, Taiwan.
425
+
426
+ Xu, W.; Zhao,J.; Wang, D. (2009). A Frame-Level Timing Acquisition Scheme of Ultra-wideband Signals Using Multi-templates, *The 6th International Symposium on Wireless Communication Systems*, pp.61 - 65, Tuscany, Italy.
427
+
428
+ J. Foerster, Channel modeling sub-committee report final, *IEEE P802.15-02/490*.
429
+
430
+ Stoica, L.; Rabbachin, A.; Repo, H.; Tiuraniemi,T.; Oppermann, I. (2005). An ultra-wideband system architecture for tag based wireless sensor networks, *IEEE Trans. on Veh. Technol.*, vol. 54, no. 5, pp. 1632-1645.
431
+
432
+ Turin, G. L. (1980). Introduction to spread-spectrum antimultipath techniques and their application to urban digital radio, *Proc. IEEE*, vol. 68, pp. 328-353.
433
+
434
+ Lottici, V; D'Andrea, A. N.; Mengali, U. (2002). Channel estimation for ultra-wideband communications, *IEEE J. Select. Areas Commun.*, vol. 20, no. 9, pp. 1638-1645.
435
+
436
+ Yang, L. & Giannakis, G. B. (2004). Optimal pilot waveform assisted modulation for ultra-wideband communications, *IEEE Trans. Wireless Commun.*, vol. 3, no. 4, pp. 1236-1249.
437
+
438
+ Cramer, R. J. M.; Scholtz, R. A.; Win, M. Z. (2002). Evaluation of an ultra wideband propagation channel, *IEEE Trans. Antennas Propagat.*, vol. 50, No. 5.
439
+
440
+ Carbonelli, C. & Mitra, U. (2007). Clustered ML Channel Estimation for Ultra-Wideband Signals, *IEEE Trans. Wireless Commun.*, vol. 6, No. 7,pp.2412 - 2416.
441
+
442
+ Paredes, J.L.; Arce, G.R.; Wang, Z. (2007). Ultra-Wideband Compressed Sensing: Channel Estimation, *IEEE Journal of Selected Topics in Signal Processing*, vol. 1, No. 3,pp.383 - 395.
443
+
444
+ Shi, L.; Zhou, Z.; Tang, L.; Yao, H.; Zhang, J. (2010). Ultra-wideband channel estimation based on Bayesian compressive sensing, *2010 International Symposium on Communications and Information Technologies (ISCIT)*, pp.779 - 782, Tokyo, Japan.
445
+
446
+ Wang, X. & Ge, H. (2007). On the CRLB and Low-Complexity Channel Estimation for UWB Communications. *IEEE 41st Annual Conference on Information Sciences and Systems*, Baltimore, pp. 151-153.
447
+ ---PAGE_BREAK---
448
+
449
+ Xu, Z. & Liu, P. (2003). A subspace approach to blind estimation of ultrawideband channels, in *Proc. IEEE Thirty-Seventh Asilomar Conference on Signals, Systems & Computers*. vol. 2, pp. 1249-1253.
450
+
451
+ Wang, Z. & Yang, X. (2004). Ultra wide-band communications with blind channel estimation based on first-order statistics, in *Proc. IEEE (ICASSP-04)*. vol. 4, pp. iv-529 - iv-532, Montreal, Canada.
452
+ ---PAGE_BREAK---
453
+
454
+ ULTRA WIDEBAND
455
+ COMMUNICATIONS
456
+
457
+ NOVEL TRENDS - SYSTEM, ARCHITECTURE
458
+ AND IMPLEMENTATION
459
+
460
+ Edited by Mohammad A. Matin
461
+
462
+ Ultra Wideband Communications: Novel Trends - System,
463
+ Architecture and Implementation
464
+
465
+ Edited by Dr. Mohammad Matin
466
+
467
+ ISBN 978-953-307-461-0
468
+
469
+ Hard cover, 348 pages
470
+
471
+ Publisher InTech
472
+
473
+ Published online 27, July, 2011
474
+
475
+ Published in print edition July, 2011
476
+
477
+ This book has addressed few challenges to ensure the success of UWB technologies and covers several research areas including UWB low cost transceiver, low noise amplifier (LNA), ADC architectures, UWB filter, and high power UWB amplifiers. It is believed that this book serves as a comprehensive reference for graduate students in UWB technologies.
478
+
479
+ ## How to reference
480
+
481
+ In order to correctly reference this scholarly work, feel free to copy and paste the following:
482
+
483
+ Wei Xu and Jiaxiang Zhao (2011). Low Sampling Rate Time Acquisition Schemes and Channel Estimation Algorithms of Ultra-Wideband Signals, Ultra Wideband Communications: Novel Trends - System, Architecture and Implementation, Dr. Mohammad Matin (Ed.), ISBN: 978-953-307-461-0, InTech, Available from: http://www.intechopen.com/books/ultra-wideband-communications-novel-trends-system-architecture-and-implementation/low-sampling-rate-time-acquisition-schemes-and-channel-estimation-algorithms-of-ultra-wideband-signa
484
+
485
+ ## INTECH
486
+
487
+ open science | open minds
488
+
489
+ ### InTech Europe
490
+
491
+ University Campus STeP Ri
492
+ Slavka Krautzeka 83/A
493
+ 51000 Rijeka, Croatia
494
+ Phone: +385 (51) 770 447
495
+ Fax: +385 (51) 686 166
496
+ www.intechopen.com
497
+
498
+ ### InTech China
499
+
500
+ Unit 405, Office Block, Hotel Equatorial Shanghai
501
+ No.65, Yan An Road (West), Shanghai, 200040, China
502
+ 中国上海市延安西路65号上海国际贵都大饭店办公楼405单元
503
+ Phone: +86-21-62489820
504
+ Fax: +86-21-62489821
505
+ ---PAGE_BREAK---
506
+
507
+ © 2011 The Author(s). Licensee IntechOpen. This chapter is distributed under the terms of the [Creative Commons Attribution-NonCommercial-ShareAlike-3.0 License](http://creativecommons.org/licenses/by-nc-nd/3.0/), which permits use, distribution and reproduction for non-commercial purposes, provided the original is properly cited and derivative works building on this content are distributed under the same license.
samples/texts_merged/1973835.md ADDED
The diff for this file is too large to render. See raw diff
 
samples/texts_merged/199837.md ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ POLYNOMIAL SYSTEMS, H-BASES, AND
5
+ AN APPLICATION FROM KINEMATIC
6
+ TRANSFORMS
7
+
8
+ Tomas Sauer and Dominik Wagenfuehr
9
+
10
+ **Abstract.** We review some algebraic methods to solve systems of polynomial equations and illustrate these methods with a real-world problem that comes from computing kinematic transforms in robotics.
11
+
12
+ *Keywords:* Gröbner basis, H-basis, polynomial system, kinematic transform
13
+
14
+ *AMS classification:* 65H10, 13P10, 70B15
15
+
16
+ §1. Introduction
17
+
18
+ Polynomial systems of equations and the structure of their solutions play a crucial role in many fields of theoretical and applied mathematics. The importance of polynomial equations in applications is often due to the need to determine locations of points from given euclidian distances which obviously leads to quadratic equations.
19
+
20
+ The mathematical formulation is as follows: Suppose we are given a finite set $F \subset \mathbb{K}[x] = \mathbb{K}[x_1, \dots, x_n]$ of polynomials in the $n$ variables $x_1, \dots, x_n$ with coefficients in the field $\mathbb{K}$, where usually $\mathbb{K} = \mathbb{Q}, \mathbb{R}, \mathbb{C}$, i.e., the rational, real or complex numbers. Given the equations $F$, the goal is to find the solutions $X \subset \overline{\mathbb{K}}^n$ of the system $F(X) = 0$ in the algebraic closure $\overline{\mathbb{K}}$ of $\mathbb{K}$, that is,
21
+
22
+ $$ X = \{ x \in \overline{\mathbb{K}}^n : f(x) = 0, f \in F \}. \qquad (1) $$
23
+
24
+ Note that there are two major differences to the “standard approach” for solving nonlinear equations by means of Newton’s method: The number of equations, $\#F$, need not coincide with the number of variables, $n$, and we are not interested in a single solution, but in the set of all solutions of $F(X) = 0$.
25
+
26
+ The equations $f(X) = 0, f \in F$, trivially remain valid if each of them is multiplied by an arbitrary polynomial $q_f \in \mathbb{K}[x]$ and if any such modified equations are added. Hence,
27
+
28
+ $$ F(X) = 0 \Leftrightarrow \langle F \rangle(X) = 0, \quad \langle F \rangle = \left\{ \sum_{f \in F} q_f f : q_f \in \mathbb{K}[x] \right\}, \quad (2) $$
29
+
30
+ where $\langle F \rangle$ is the *ideal generated by* $F$; recall that an ideal $\mathcal{I}$ is a subset of $\mathbb{K}[x]$ which is closed under addition and multiplication by arbitrary polynomials, cf. [4]. A subset $G$ of an ideal $\mathcal{I}$ is called a *basis* for the ideal $\mathcal{I}$ if $G$ generates the ideal, i.e., $\mathcal{I} = \langle G \rangle$. With this terminology
31
+ ---PAGE_BREAK---
32
+
33
+ at hand, we can rephrase (2) as that the solution $X$ depends only on the ideal $\mathcal{I}$, but not on the
34
+ individual basis $F$. This simple observation is the fundamental idea behind all the algebraic
35
+ methods to solve polynomial systems by interpreting the original equations as a basis of an
36
+ ideal and then computing another basis for the same ideal from which the solution of the
37
+ polynomial system is more easily accessible. In other words: Algebraic methods transform a
38
+ given system of equations into a simpler or more useful form.
39
+
40
+ §2. Gröbner bases, H-bases and eigenvalues
41
+
42
+ Gröbner bases as well as H-bases are special ideal bases which provide representations of minimal degree, where these two types of bases differ by being related to different notions of degree. For Gröbner bases, we need the concept of a term order "<" on $\mathbb{N}_0^n$, that is, a well-ordering on $\mathbb{N}_0^n$ which is compatible with addition, cf. [4]. With respect to this order, any polynomial
43
+
44
+ $$f(x) = \sum_{\alpha \in \mathbb{N}_0^n} f_\alpha x^\alpha, \quad f_\alpha \in \mathbb{K}, \quad \#\{\alpha : f_\alpha \neq 0\} < \infty,$$
45
+
46
+ has a maximal nonzero coefficient $f_\alpha$ and $\alpha$ is called the *(multi)degree* of the polynomial
47
+ while $f_\alpha x^\alpha$ is usually named the *leading term* of $f$. For H-bases, on the other hand, the
48
+ degree is not a multiindex, but a number, namely the maximal length $|\alpha| = \alpha_1 + \cdots + \alpha_n$ of
49
+ the indices of nonzero coefficients – the usual *total degree*. Nevertheless, we will write the
50
+ degree of a polynomial $f$ as $\delta(f)$, regardless of whether $\delta(f) \in \mathbb{N}_0^n$ or $\delta(f) \in \mathbb{N}_0$; indeed,
51
+ there is a joint framework in terms of graded rings, see [5], and [10] for the application in
52
+ ideal bases and interpolation. A finite set $H \subset \mathbb{K}[x]$ is called *Gröbner basis* or *H-basis*,
53
+ depending on whether $\delta$ is based on on a term order or on the total degree, if any $f \in \langle H \rangle$
54
+ can be written as
55
+
56
+ $$f = \sum_{h \in H} f_h h, \quad f_h \in \mathbb{K}[x], \quad \delta(f) \ge \delta(f_h h), \quad h \in H. \tag{3}$$
57
+
58
+ The crucial point of Gröbner bases and H-bases is the degree constraint in (3) which helps to avoid a certain redundancy: Assume that one term in the sum on the right hand side were of higher degree than $f$, then there must be at least a second term of the same or higher degree compensating its leading term, and the representation would be redundant, all the terms of degree higher than that of $f$ unneeded. But the main practical advantage of Gröbner bases and the main reason for their development in [2] is the fact that they permit the *algorithmic computation* of a unique remainder $r$,
59
+
60
+ $$f = \sum_{h \in H} f_h h + r. \quad (4)$$
61
+
62
+ This can be extended to the grading by total degree [6, 9] and even to arbitrary gradings in
63
+ such a way that the remainder $r$ depends only on $\langle H \rangle$ and the parameters of the grading,
64
+ see [11] for details. Thus, we have a method to compute a normal form $\nu_{\langle H \rangle}$ modulo $\langle H \rangle$
65
+ and to efficiently perform arithmetic in the quotient ring $\mathcal{P} := \mathbb{K}[x]/\langle H \rangle$. Moreover, $\mathcal{P}$ is a
66
+ ---PAGE_BREAK---
67
+
68
+ finite dimensional space if and only if the ideal $\mathcal{I} = \langle H \rangle$ has dimension zero which is in turn equivalent to a finite number of solutions $X$ for $H(X) = 0$.
69
+
70
+ So here is the first part of the algebraic simplification: Starting with a finite set $F$ of polynomial equations, one computes a Gröbner basis or H-basis $H$ for the ideal $\langle F \rangle$ from which it can be decided whether $F(X) = 0$ has no solution (this happens if and only if $1 \in H$), a finite number of solutions or infinitely many solutions. It is even possible, see [4], to determine the dimension of the algebraic variety formed by the solutions. But in this paper let us assume that $X$ were nonempty and finite.
71
+
72
+ The classical method [13], see also [1, 4], to find $X$ is by means of elimination ideals: A purely lexicographical Gröbner basis for a zero dimensional ideal contains some univariate polynomials whose greatest common divisor vanishes at the projections of the common zeros to this coordinate. Solving and substituting the solutions eliminates the variable and continuing this process, one can systematically find all the common zeros. Unfortunately, this process has a terrible complexity and can be very sensitive to perturbations of the coefficients, cf. [7], which limits its use in practical applications.
73
+
74
+ There is, however, a different approach proposed by Möller and Stetter [8, 12] which is based on multiplication tables on the quotient space $\mathcal{P}$. To that end, observe that multiplication of $f, g \in \mathcal{P}$ is defined as $\nu_{\mathcal{I}}(fg)$ and that for fixed $g \in \mathbb{K}[x]$ the operation
75
+
76
+ $$f \mapsto M_g(f) := \nu_{\mathcal{I}}(fg)$$
77
+
78
+ is a linear operator on $\mathcal{P}$ that can be represented with respect to a basis of $\mathcal{P}$ by a matrix $M_g$ – the so called multiplication table. For $j = 1, \dots, n$ let now $M_j$ denote the multiplication table for the coordinate polynomials $g(x) = x_j$, then the $M_j$ generalize the classical Frobenius companion matrix, form a commuting family of matrices, have joint eigenvectors and the respective eigenvalues are the coordinates of the common zeros. Thus, the solutions of $F(X) = 0$ can be found by relying on well-developed methods from Numerical Linear Algebra and the flexibility of H-bases now offers an approach that changes continuously with the parameters and thus is much less sensitive to perturbations, see again [7] for an example.
79
+
80
+ ### §3. Practical Examples
81
+
82
+ In this section we want to apply and illustrate the mathematical concepts of the preceding chapters. To that end, we take a look at two slightly different kinematics. First, we will consider a simple example in three dimensions to show how we obtain the equations needed as starting ideal basis for the computation of a Gröber basis or H-basis. Then we present a kinematic that still appears to be quite simple but leads monstrous Gröbner bases and H-bases and also point out how crucial it is to incorporate “implicit” physical restrictions into the system of equations.
83
+
84
+ All our kinematics follow the same basic layout: The manipulator (in most cases used for melding or milling) is connected to three (or more) rods of variable length. In the inverse kinematic transform we know the position of the manipulator and want to compute the “machine parameters”, i.e., the lengths of the rods, while in the forward kinematic transform the location of the manipulator is to be determined from the lengths of the rods. In both cases the ideal basis which we first must construct is the same, namely the implicit system of equations.
85
+ ---PAGE_BREAK---
86
+
87
+ Figure 1: Simple 3D kinematic.
88
+
89
+ The only difference consists of the choice which of the parameters are considered variables to be solved.
90
+
91
+ **3.1. A Simple 3D-Kinematic**
92
+
93
+ The first example is really easy to solve and we only use it to demonstrate how to obtain
94
+ the equations from which we compute the Gröbner- or H-Basis. First we take a look at the
95
+ construction. In figure 1 the construction is fixed in three points A₁, A₂ and A₃, coplanar
96
+ with the origin {0}, and have the same distance *a* to {0}. Furthermore, the distance between
97
+ every two points is constant. Now it is easy to see how to obtain the equations we need.
98
+ Consider the projection S of T = (x, y, z) in the plane generated by A₁, A₂ and A₃. With
99
+ Pythagoras we have
100
+
101
+ $$
102
+ l_i = y^2 + \|A_i - S\|_2^2, \quad i = 1, 2, 3,
103
+ $$
104
+
105
+ which directly leads to the set of equations
106
+
107
+ $$
108
+ y^2 + x^2 + (a-z)^2 - l_1^2 = 0,
109
+ $$
110
+
111
+ $$
112
+ y^2 + \left(-\frac{\sqrt{3}}{2}a - x\right)^2 + \left(\frac{-1}{2}a - z\right)^2 - l_2^2 = 0,
113
+ $$
114
+
115
+ $$
116
+ y^2 + \left(\frac{\sqrt{3}}{2}a - x\right)^2 + \left(\frac{-1}{2}a - z\right)^2 - l_3^2 = 0.
117
+ $$
118
+
119
+ In Maple notation, the ideal is thus generated by $F := [x^2 + y^2 + (a-z)^2 - l_1^2, y^2 + (-\frac{\sqrt{3}}{2}a-x)^2 + (\frac{1}{2}a-z)^2 - l_2^2, y^2 + (\frac{\sqrt{3}}{2}a-x)^2 + (\frac{-1}{2}a-z)^2 - l_3^2]$.
120
+ ---PAGE_BREAK---
121
+
122
+ Because we used the (square of the) lengths $l_1, l_2$ and $l_3$ explicitly in our ideal basis we can give the solution of the inverse kinematic transform directly as
123
+
124
+ $$l_1 = \sqrt{y^2 + x^2 + (a-z)^2},$$
125
+
126
+ $$l_2 = \sqrt{y^2 + \left(-\frac{\sqrt{3}}{2}a - x\right)^2 + \left(\frac{-1}{2}a - z\right)^2},$$
127
+
128
+ $$l_3 = \sqrt{y^2 + \left(\frac{\sqrt{3}}{2}a - x\right)^2 + \left(\frac{-1}{2}a - z\right)^2}.$$
129
+
130
+ For the forward transform we switch the roles of variables and constants which are now declared as $x, y, z$ and $a, b, l_1, l_2, l_3$, respectively. Without further problems we compute an H-basis of $F$ as $H = [9a^2y^2 - 3l_1^2a^2 + l_4^2 - l_3^2l_2^2 + l_2^4 + 9a^4 - l_2^2l_1^2 - 3a^2l_2^2 - 3a^2l_3^2 + l_1^4 - l_1^2l_3^2, 6az - l_2^2 + 2l_1^2 - l_3^2, 12ax + 2\sqrt{3}l_3^2 - 2\sqrt{3}l_2^2]$ and by means of multiplication tables of $\mathcal{P}$ and the corresponding eigenvectors we find that
131
+
132
+ $$x = \frac{\sqrt{3}(l_3^2 - l_1^2)}{6a},$$
133
+
134
+ $$y = \frac{\sqrt{-l_2^4 + 3l_1^2a^2 - l_3^4 + l_3^2l_2^2 + 3a^2l_3^2 - 9a^4 + l_2^2l_1^2 + 3a^2l_2^2 - l_1^4 + l_1^2l_3^2}}{-3a},$$
135
+
136
+ $$z = \frac{-2l_1^2 + l_2^2 + l_3^2}{6a}.$$
137
+
138
+ Note that the equations for $x$ and $z$ are significantly simpler than the one for $y$.
139
+
140
+ Since $y$ appears quadratically in the H-basis, it follows that together with $(x, y, z)$ also $(x, -y, z)$ is a solution of the system. However, this second solution is impossible in physical reality because the rods are flexible but fixed and cannot cross themselves. Unfortunately, it appears impossible to eliminate this unwanted "solution" a priori by adding more equations to the system; in fact, the only way to distinguish between the two solutions is by means of inequalities.
141
+
142
+ **Remark 1.** It is worthwhile to mention that not for all values of $l_1, l_2$ and $l_3$ the solution belongs to the real domain as in some cases the solution gains an additional imaginary part because the three rods have no common point. Though physically impossible this is absolutely correct mathematically. Finding additional constraints that eliminate complex solutions would consist of determining the associated *real* ideal.
143
+
144
+ ## 3.2. The realistic problem
145
+
146
+ Now we want to take a close look at a slightly extended version of the latter three dimensional kinematic used in practical applications. In figure 2 the upper part of the construction equals the one in figure 1 while the lower part differs with the manipulator being attached centrally under a platform which is held and moved by the rods. To make things simpler, we assume that the vertices $B_1, B_2$ and $B_3$ of the platform form an equilateral triangle with distance $b$ between the points and barycenter $T = (x, y, z)$. To stabilize the construction, the platform
147
+ ---PAGE_BREAK---
148
+
149
+ Figure 2: Complex 3D kinematic.
150
+
151
+ is also linked to the origin $\{0\}$ by an additionally guiding rod which is attached perpendicular
152
+ in $T$.
153
+
154
+ We will not discuss the ideal basis construction in full detail but should mention a few
155
+ facts. First, it is not possible to compute the value of $T$ directly, but it is easily found as
156
+ midpoint of the triangle formed by $B_1, B_2, B_3$ once these locations are determined. The
157
+ lengths $l_1, l_2$ and $l_3$ are just as easy to obtain as before from the equations
158
+
159
+ $$
160
+ \|S - A_i\|_2^2 + \|S - B_i\|_2^2 = \|B_i - A_i\|_2^2, \quad i = 1, 2, 3,
161
+ $$
162
+
163
+ in which *S* is the projection of *T*, leading to
164
+
165
+ $$
166
+ \begin{align*}
167
+ x_1^2 + (z_1 - a)^2 + y_1^2 &= l_1^2, \\
168
+ \left(x_2 + \frac{\sqrt{3}a}{2}\right)^2 + \left(z_2 + \frac{a}{2}\right)^2 + y_2^2 &= l_2^2, \\
169
+ \left(x_3 - \frac{\sqrt{3}a}{2}\right)^2 + \left(z_3 + \frac{a}{2}\right)^2 + y_3^2 &= l_3^2.
170
+ \end{align*}
171
+ $$
172
+
173
+ As mentioned previously the triangle is equilateral giving us the additional three equations
174
+
175
+ $$
176
+ (x_i - x_j)^2 + (y_i - y_j)^2 + (z_i - z_j)^2 = b^2, \quad 1 \le i < j \le 3.
177
+ $$
178
+
179
+ The orthogonality of the system can finally be described by the inner products $(T - B_i, T) =$
180
+ ---PAGE_BREAK---
181
+
182
+ 0, i = 1, ..., 3, which leads to
183
+
184
+ $$
185
+ \begin{align*}
186
+ (x - x_1) x + (y - y_1) y + (z - z_1) z &= 0, \\
187
+ (x - x_2) x + (y - y_2) y + (z - z_2) z &= 0, \\
188
+ (x - x_3) x + (y - y_3) y + (z - z_3) z &= 0.
189
+ \end{align*}
190
+ $$
191
+
192
+ Finally we need the fact that the midpoint T of the triangle can be written as sum of the outer points $T = \frac{B_1+B_2+B_3}{3}$ yielding three more equations
193
+
194
+ $$
195
+ (x_1 + x_2 + x_3) = 3x, \quad (y_1 + y_2 + y_3) = 3y, \quad (z_1 + z_2 + z_3) = 3z.
196
+ $$
197
+
198
+ Together, these twelve equations forms our initial ideal basis $F := [x_1^2 + (z_1 - a)^2 + y_1^2 - l_1^2, (x_2 + \frac{\sqrt{3}a}{2})^2 + (z_2 + \frac{a}{2})^2 + y_2^2 - l_2^2, (x_3 - \frac{\sqrt{3}a}{2})^2 + (z_3 + \frac{a}{2})^2 + y_3^2 - l_3^2, (x_1 - x_2)^2 + (y_1 - y_2)^2 + (z_1 - z_2)^2 - b^2, (x_1 - x_3)^2 + (y_1 - y_3)^2 + (z_1 - z_3)^2 - b^2, (x_2 - x_3)^2 + (y_2 - y_3)^2 + (z_2 - z_3)^2 - b^2, (x - x_1)x + (y - y_1)y + (z - z_1)z, (x - x_2)x + (y - y_2)y + (z - z_2)z, (x - x_3)x + (y - y_3)y + (z - z_3)z, (x_1+x_2+x_3)-3x, (y_1+y_2+y_3)-3y, (z_1+z_2+z_3)-3z]$.
199
+
200
+ This time we begin with the more interesting forward kinematic transformation and are only interested in the dimension of the variety of the solutions $F(X) = 0$. To do so, we substitute some numerical values for the constants $l_1, l_2, l_3, a$ and $b$ and compute a Gröbner basis which can be done without many problems but with a little bit of time (a tdeg ordered basis has no less than 56 elements). Computing the dimension, we surprisingly realize that the ideal is one-dimensional and not zero-dimensional as it should be if we wanted a finite number of solutions and to apply multiplication tables for their computation.
201
+
202
+ So the first question is why we found a one-dimensional variety. For convenience, we substitute (as before) {$a = \sqrt{3}, b = 3, l_i = 4 \mid i = 1, 2, 3$} (see figure 3), and the desired final solution for the platform is
203
+
204
+ $$
205
+ T = (0, 4, 0)^T, \quad B_1 = (0, 4, \sqrt{3})^T, \quad B_2 = \left(-\frac{3}{2}, 4, -\frac{\sqrt{3}}{2}\right)^T, \quad B_3 = \left(\frac{3}{2}, 4, -\frac{\sqrt{3}}{2}\right)^T.
206
+ $$
207
+
208
+ If we rotate the lower triangle counterclockwise around the origin, so that $B_2$ is below $A_3$, $B_1$ below $A_2$ and $B_3$ below $A_1$ (see figure 4), we find that the point $T' = (0, \sqrt{7}, 0)^T$ resulting from
209
+
210
+ $$
211
+ B'_1 = \left(-\frac{3}{2}, \sqrt{7}, -\frac{\sqrt{3}}{2}\right)^T, \quad B'_2 = \left(\frac{3}{2}, \sqrt{7}, -\frac{\sqrt{3}}{2}\right)^T, \quad B'_3 = \left(0, \sqrt{7}, \sqrt{3}\right)^T.
212
+ $$
213
+
214
+ is another solution of our polynomial system.
215
+
216
+ Consequently, we obtain, by simple rotation, a one-parameter family of solutions and that is precisely the reason why our ideal is not zero-dimensional, so that we have add more equations to the ideal basis in order to prevent rotations. In such situations, it is a good idea to give a closer look to reality and indeed it turns out that such torsions of the robot are impossible since the guiding rod is connected to the upper part by a *universal joint* that can only move forwards/backwards and left/right but does not permit rotational movement.
217
+ ---PAGE_BREAK---
218
+
219
+ Figure 3: Simple Substitution.
220
+
221
+ Figure 4: Simple Rotated Substitution.
222
+ ---PAGE_BREAK---
223
+
224
+ Again, we will not discuss the modeling of the joint in detail, but here is the basic idea behind our approach: If we know the center $T = (x, y, z)$ of the triangle, the position of the outer points $B_1, B_2, B_3$ is fixed. So take a look at the point $S := (0, -\sqrt{x^2+y^2+z^2}, 0)$ which is just the position of $T$ if the kinematic is not moved to any side ("rest position"). We can calculate the angle $\alpha$ between $S$ and $T$, more precisely the term $c_{\alpha} = \cos\alpha$. Let the points $B'_1, B'_2, B'_3$ be the vertices of the lower triangle in this rest position. With the help of rotation matrices and the angle $\alpha$ we can then compute the solution for the points $B_1, B_2, B_3$ explicitly. Doing so adds eleven further equations to our former ideal basis which makes us end up with $F := [x_1^2 + (z_1-a)^2 + y_1^2 - l_1^2, (x_2 + \frac{\sqrt{3}a}{2})^2 + (z_2 + \frac{a}{2})^2 + y_2^2 - l_2^2, (x_3 - \frac{\sqrt{3}a}{2})^2 + (z_3 + \frac{a}{2})^2 + y_3^2 - l_3^2, (x_1 - x_2)^2 + (y_1 - y_2)^2 + (z_1 - z_2)^2 - b^2, (x_1 - x_3)^2 + (y_1 - y_3)^2 + (z_1 - z_3)^2 - b^2, (x_2 - x_3)^2 + (y_2 - y_3)^2 + (z_2 - z_3)^2 - b^2, (x - x_1)x + (y - y_1)y + (z - z_1)z, (x - x_2)x + (y - y_2)y + (z - z_2)z, (x - x_3)x + (y - y_3)y + (z - z_3)z, (x_1 + x_2 + x_3) - 3x, (y_1 + y_2 + y_3) - 3y, (z_1 + z_2 + z_3) - 3z], \sqrt{3}dl(x-x_1) - bxz, \sqrt{3}dl(y-y_1) - byz, \sqrt{3}l(z-z_1) + bd, \sqrt{3}lby + 2\sqrt{3}dl(x-x_2) + bxz, -\sqrt{3}lbx + 2\sqrt{3}dl(y-y_2) + byz, 2\sqrt{3}l(z-z_2) - bd, -\sqrt{3}lby + 2\sqrt{3}dl(x-x_3) + bxz, \sqrt{3}lbx + 2\sqrt{3}dl(y-y_3) + byz, 2\sqrt{3}l(z-z_3) - bd, x^2 + y^2 - d^2, x^2 + y^2 + z^2 - l^2]$, where $d = \sqrt{x^2+y^2}$ and $l = \sqrt{x^2+y^2+z^2}$.
225
+
226
+ To solve the inverse kinematic problem, we choose the variables as $x_1, y_1, z_1, x_2, y_2, z_2, x_3, y_3, z_3, l, d, l_1, l_2, l_3$ and the constants as $x, y, z, a, b$. The H-Basis can be easily computed as $H = [(y^2+x^2)x_1-2xz_z-xy^2+2z^2x-x^3, z_1+2z_3-3z, (2x^2+2y^2)y_2+2zy_z+xbd-2y^3-2yx^2-2yz^2, z_2-z_3, (2x^2+2y^2)x_3+2xz_z+ybd-2z^2x-2xy^2-2x^3, (2x^2+2y^2)y_3+2zy_z-xbd-2y^3-2yx^2-2yz^2, (2x^2+2y^2)x_2+2xz_z-ybd-2z^2x-2x^3-2xy^2, y^2+x^2)y_1-2zy_z-yx^2-y^3+2yz^2, (z^2+y^2+x^2)d^2-2y^2x^2-x^4-x^2z^2-y^4-z^2y^2, (6z^2+6y^2+6x^2)z_3d+(b\sqrt{3}x^2+b\sqrt{3}y^2)l+(-6z^3-6zy^2-6zx^2)d, (12z^2+12y^2+12x^2)z_3^2+(-24zy^2-24zx^2-24z^3)z_3+12z^4+12x^2z^2-b^2x^2-y^2b^2+12z^2y^2, 3bld+(6x^2\sqrt{3}+6\sqrt{3}z^2+6y\sqrt{3})z_3-6\sqrt{3}z^3-6\sqrt{3}zx^2-6y^2z\sqrt{3}, 6z_3l-6zl+\sqrt{3}bd, l^2-x^2-y^2-z^2, 3l_l^1-12az_3-3x^2-b^2-3z^2+18za-3y^2-3a^2, 6y^2+6x^2)l_l^{\frac{1}{5}}+(6xz\sqrt{3}a-6ax^4-6ay^4)z_l-3ayb\sqrt{3}d-6x^4a^4-6xz_za\sqrt{3}-6y^4-6x^4a\sqrt{3}-6xa\sqrt{3}y^4-6x^4a^4-18y^4x^4-18y^4x^4-6a\sqrt{3}y^4-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-(3z^2+3y^2+3x^2))^{-1}/(3z+3)^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9$
227
+ ---PAGE_BREAK---
228
+
229
+ $$6x^6 + 6xa\sqrt{3}y^4 - 6x^2a^2z^2 - 12x^2a^2y^2 + 12x^3a\sqrt{3}y^2 - 24y^2x^2z^2 - 2b^2x^2z^2 - 4b^2x^2y^2 - 6a^2y^2z^2 - 2y^2b^2z^2 - 6x^4a^2 - 12y^4z^2 - 18y^4x^2 - 12x^4z^2 - 18x^4y^2 - 2b^2x^4 - 6a^2y^4 - 2y^4b^2 - 12zax^2y^2 + 6x^5a\sqrt{3} - 3ay^3b\sqrt{3}d - 3ayb\sqrt{3}dz^2 - 3ayb\sqrt{3}dx^2 + 6x^3a\sqrt{3}z^2 + 3laxzbd + lax^2\sqrt{3}bd + lay^2\sqrt{3}bd / (6z^2y^2 + 6y^4 + 12y^2x^2 + 6x^2z^2 + 6x^4))^{1/2}, \text{ where}$$
230
+
231
+ $$d = \sqrt{x^2 + y^2} \text{ and } l = \sqrt{x^2 + y^2 + z^2}.$$
232
+
233
+ For the forward transform, the variables are $x_1, y_1, z_1, x_2, y_2, z_2, x_3, y_3, z_3, x, y, zl, d$ and the constants $l_1, l_2, l_3$. Because both Computer Algebra systems we used, Singular and Maple, cannot even compute a Gröbner basis for the ideal as it is given in this form, we had to relocate the points $A_1, A_2$ and $A_3$ to the next integer grid value. Furthermore, we will substitute $\{a = 2, b = 4, l_i = 3 \mid i = 1, 2, 3\}$ because the symbolic solution is still too complex, thus changing the ideal to $F = [x_1^2+y_1^2+(2-z_1)^2-9, (-2-x_2)^2+y_2^2+(-1-z_2)^2-9, (2-x_3)^2+y_3^2+(-1-z_3)^2-9, (x-x_1)x+(y-y_1)y+(z-z_1)z, (x-x_2)x+(y-y_2)y+(z-z_2)z, (x-x_3)x+(y-y_3)y+(z-z_3)z, (x_1-x_2)^2+(y_1-y_2)^2+(z_1-z_2)^2-13, (x_3-x_2)^2+(y_3-y_2)^2+(z_3-z_2)^2-16, (x_1-x_3)^2+(y_1-y_3)^2+(z_1-z_3)^2-13, x_1+x_2+x_3-3x, y_1+y_2+y_3-3y, z_1+z_2+z_3-3z, 2dl(x-x_1)-4xz, 2dl(y-y_1)-4yz, 2l(z-z_1)+4d, 8ly+4dl(x-x_2)+4xz, -8lx+4dl(y-y_2)+4yz, 4l(z-z_2)-4d, -8ly+4dl(x-x_3)+4xz, 8lx+4dl(y-y_3)+4yz, 4l(z-z_3)-4d, x^2+y^2-d^2, x^2+y^2+z^2-l^2]$.
234
+
235
+ A (tdeg-ordered) Gröbner basis contains no less than 83 elements and therefore cannot be called very small. But at least we can figure out that there are 40 solutions to the equations and with the algorithm from [3, p. 134ff] we can compute the number of real solutions and discover that there are only four of them, thus, up to symmetry, the desired solution and probably one with crossed rods as before.
236
+
237
+ In summary one can say that presently the realistic problem is inaccessible, but its terrible complexity originates from “contamination” by the 36 complex solutions which correspond to physically impossible configurations. This is one more major drawback of algebraic methods which can find the solutions only in the algebraic closure of the original field.
238
+
239
+ ## References
240
+
241
+ [1] ADAMS, W. W., AND LOUSTAUNAU, P. *An Introduction to Groebner Bases*, vol. 3 of *Graduate Studies in Mathematics*. AMS, 1994.
242
+
243
+ [2] BUCHBERGER, B. *Ein Algorithmus zum Auffinden der Basiselemente des Restklassen-rings nach einem nulldimensionalen Polonomideal*. PhD thesis, Innsbruck, 1965.
244
+
245
+ [3] COHEN, A. M., CUYPERS, H., AND STERK, M., Eds. *Some Tapas of Computer Algebra*, vol. 4 of *Algorithms and Computations in Mathematics*. Springer, 1999.
246
+
247
+ [4] COX, D., LITTLE, J., AND O'SHEA, D. *Ideals, Varieties and Algorithms*, 2. ed. Undergraduate Texts in Mathematics. Springer-Verlag, 1996.
248
+
249
+ [5] EISENBUD, D. *Commutative Algebra with a View Toward Algebraic Geometry*, vol. 150 of *Graduate Texts in Mathematics*. Springer, 1994.
250
+
251
+ [6] MÖLLER, H. M., AND SAUER, T. H-bases for polynomial interpolation and system solving. *Advances Comput. Math.* **12** (2000), 335–362.
252
+ ---PAGE_BREAK---
253
+
254
+ [7] MÖLLER, H. M., AND SAUER, T. H-bases II: Applications to numerical problems. In *Curve and Surface fitting: Saint-Malo 1999* (2000), A. Cohen, C. Rabut, and L. L. Schumaker, Eds., Vanderbilt University Press, pp. 333–342.
255
+
256
+ [8] MÖLLER, H. M., AND STETTER, H. J. Multivariate polynomial equations with multiple zeros solved by matrix eigenproblems. *Numer. Math.* **70** (1995), 311–329.
257
+
258
+ [9] SAUER, T. Gröbner bases, H-bases and interpolation. *Trans. Amer. Math. Soc.* **353** (2001), 2293–2308.
259
+
260
+ [10] SAUER, T. Ideal bases for graded polynomial rings and applications to interpolation. In *Multivariate Approximation and Interpolation with Applications* (2002), M. Gasca, Ed., vol. 20 of *Monograph. Academia de Ciencias de Zaragoza*, Academia de Ciencias Zaragoza, pp. 97–110.
261
+
262
+ [11] SAUER, T. Polynomial interpolation in several variables: Lattices, differences, and ideals. In *Multivariate Approximation and Interpolation*, M. Buhmann, W. Hausmann, K. Jetter, W. Schaback, and J. Stöckler, Eds. Elsevier, 2006, pp. 189–228.
263
+
264
+ [12] STETTER, H. J. Matrix eigenproblems at the heart of polynomial system solving. *SIGSAM Bull.* **30**, 4 (1995), 22–25.
265
+
266
+ [13] TRINKS, W. Über B. Buchbergers Verfahren, Systeme algebraischer Gleichungen zu lösen. *J. Number Theory* **10** (1978), 475–488.
267
+
268
+ Tomas Sauer
269
+
270
+ Lehrstuhl für Numerische Mathematik
271
+ Universität Giessen
272
+
273
+ Heinrich-Buff-Ring 44
274
+ D-35392 Gießen, Germany
275
+
276
+ Dominik Wagenführ
277
+
278
+ Siemens AG
279
+ A&D MC RD 7
280
+
281
+ Frauenauracher Str. 80
282
+ D-91056 Erlangen, Germany
283
+
284
+ Tomas.Sauer@math.uni-giessen.de Dominik.Wagenfuehr@automation.siemens.co
samples/texts_merged/2092097.md ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # A GROUP OF AUTOMORPHISMS OF THE HOMOTOPY GROUPS
5
+
6
+ HIROSHI UEHARA
7
+
8
+ It is well known that the fundamental group $\pi_1(X)$ of an arcwise connected topological space $X$ operates on the $n$-th homotopy group $\pi_n(X)$ of $X$ as a group of automorphisms. In this paper I intend to construct geometrically a group $\mathcal{H}(X)$ of automorphisms of $\pi_n(X)$, for every integer $n \ge 1$, which includes a normal subgroup isomorphic to $\pi_1(X)$, so that the factor group of $\mathcal{H}(X)$ by $\pi_1(X)$ is completely determined by some invariant $\mathcal{L}(X)$ of the space $X$. The complete analysis of the operation of the group on $\pi_n(X)$ is given in §3, §4, and §5.
9
+
10
+ Throughout the whole paper, $X$ denotes an arcwise connected topological space which has such suitable homotopy extension properties as a polyhedron does, and all mappings are continuous transformations.
11
+
12
+ ## §1. Definition of the group $\mathcal{H}(X)$.
13
+
14
+ Let $x_0$ be an arbitrary point of the space $X$, and $\Omega$ a collection $\mathcal{X}^*(x_0, x_0)$ of all the mappings that transform $X$ into $X$ and $x_0$ into $x_0$. For two maps $a, b \in \Omega$, $a$ is said to be homotopic to $b$ (in notation : $a \sim b$) if there exists a homotopy $h_t \in \Omega$ (for $1 \le t \le 0$) such that $h_0 = a$ and $h_1 = b$. A mapping $a \in \Omega$ is called to have a (two sided) homotopy inverse, if there is a map $\varphi \in \Omega$ such that $\alpha\varphi \sim 1$ and $\varphi\alpha \sim 1$, where 1 denotes the identity transformation of $X$ onto itself. Let $\Omega^*$ be the collection of all the mappings belonging to $\Omega$, each of which has a homotopy inverse.
15
+
16
+ Now let $X \times I$ be the topological product of $X$ and the line segment $I$ between 0 and 1, and let us consider the totality $U$ of the mappings $\vartheta : X \times I \rightarrow X$ which satisfy the following conditions :
17
+
18
+ $$ (1.1) \qquad \begin{aligned} \text{i)} & \quad \theta |_{X \times 0} \in \Omega^* \\ \text{ii)} & \quad \theta(x_0, 1) = x_0 \end{aligned} \} $$
19
+
20
+ For two maps $\theta, \theta' \in U$, $\theta$ is homotopic to $\theta'$ (notation : $\theta \sim \theta'$) if there exists a homotopy $h_t : X \times I \to X$ (for $1 \le t \le 0$) such that
21
+
22
+ Received Oct. 25, 1950.
23
+
24
+ I should like to express my sincere gratitude for the courtesies extended to me by Professor S. T. Hu. This paper is inspired by his paper, "On the Whitehead group of automorphisms of the relative homotopy groups."
25
+ ---PAGE_BREAK---
26
+
27
+ $$ (1.2) \qquad \begin{alignedat}{2} \text{i)} \qquad & h_0 &&= \theta, \\ \text{ii)} \qquad & h_t(x_0, 0) &&= h_t(x_0, 1) = x_0. \end{alignedat} $$
28
+
29
+ It is easily verified that this relation is an equivalent relation, and therefore $U$ is divided into equivalent classes in this sense.
30
+
31
+ We shall denote by $[\theta]$ the class containing $\theta$. For $\theta \in U$ we construct a mapping $\sigma_0 \in U$ as follows: a mapping $\bar{\sigma}_\theta$ which is defined continuously on the set $((X \times 0) \uplus (x_0 \times I))$ such that $\bar{\sigma}_\theta(x, 0) = x$ and $\bar{\sigma}_\theta(x_0, t) = \theta(x_0, t)$, can be extended to a mapping $\sigma_0 \in U$, provided that $\{x_0\}$ has a homotopy extension property in $X$ relative to $X$. The extended mapping is, of course, not unique but the homotopy class containing $\sigma_0$ is uniquely determined if the set $((x_0 \times I) \uplus (X \times 0) \uplus (X \times 1))$ has a homotopy extension property in $X \times I$ relative to $X$; another arbitrarily extended map $\sigma'_0$ is homotopic to $\sigma_0$. Now two maps $\theta_1, \theta_2 \in U$ are 'multiplied' together by the rule,
32
+
33
+ $$ (1.3) \qquad \theta_1 \times \theta_2(x, t) = \begin{cases} \rho(x, 2t), & \frac{1}{2} \le t \le 0, \\ \sigma_{\theta_2}(\rho(x, 1), 2t-1), & 1 \le t \le \frac{1}{2}, \end{cases} $$
34
+
35
+ where $\rho(x, t) = \theta_2(\theta_1(x, t), 0)$. Then we have
36
+
37
+ **LEMMA 1.1** $\theta_1 \times \theta_2$ is again a member of the collection $U$.
38
+
39
+ *Proof.* Let $a_1(x) = \theta_1(x, 0)$, $a_2(x) = \theta_2(x, 0)$, then both $a_1$ and $a_2$ belong to $\Omega^*$, so that $a_1$ and $a_2$ have homotopy inverses $\varphi_1, \varphi_2$ respectively. From the considerations that $\varphi_1\varphi_2$ is a homotopy inverse of $\omega a_1$ and that $\theta_1 \times \theta_2(x, 0) = \rho(x, 0) = \theta_2(\theta_1(x, 0), 0) = \theta_2(a_1(x), 0) = a_2(a_1(x))$, we have $\theta_1 \times \theta_2 | X \times 0 = \Omega^*$ and therefore the condition (1.1) i) is satisfied. Also we have $\theta_1 \times \theta_2(x_0, 1) = \sigma_{\theta_2}(\rho(x_0, 1), 1) = \sigma_{\theta_2}(x_0, 1) = \theta_2(x_0, 1) = x_0$. This proves the Lemma.
40
+
41
+ **LEMMA 1.2** The class $[\theta_1 \times \theta_2]$ depends only on the classes $[\theta_1]$ and $[\theta_2]$.
42
+
43
+ *Proof.* Let $\theta'_1 \in [\theta_1]$ and $\theta'_2 \in [\theta_2]$, then there exist two homotopies $h_s$, $k_s$: $X \times I \rightarrow X$ ($1 \le s \le 0$) such that $h_0 = \theta'_1$, $h_1 = \theta'_2$, $k_0 = \theta'_2$, and $k_1 = \theta'_2$. Putting $\rho_s(x, t) = k_s(h_s(x, t), 0)$, we have
44
+
45
+ $$ (1.4) \qquad \left. \begin{aligned} \text{i)} & \rho_s(x, t) = \theta_s(\theta'_1(x, t), 0), \quad \rho_s(t) = \theta'_2(\theta'_1(t), 0), \\ \text{ii)} & \rho_s(x_0, 0) = k_s(h_s(x_0, 0), 0) = k_s(x_0, 0) = x_0, \\ \text{iii)} & \rho_s(x_0, 1) = k_s(h_s(x_0, 1), 0) = k_s(x_0, 0) = x_0. \end{aligned} \right\} $$
46
+
47
+ Since $k_s(x_0, 0) = k_s(x_0, 1) = x_0$, we can construct, in virtue of the homotopy extension properties previously mentioned, $\sigma_{k_s} \in U$ ($1 \le s \le 0$), which is also continuous with respect to $\epsilon$, just as in case of $\sigma_\epsilon$. Then clearly we have $\sigma_{k_s}(x, 0) = x$ and $\sigma_{k_s}(x_0, t) = k_s(x_0, t)$ by the construction of the function $\sigma_{k_s}$.
48
+
49
+ $$ H_s(x, t) = \begin{cases} \rho_s(x, 2t), & \frac{1}{2} \le t \le 0, \\ \sigma_{k_s}(\rho_s(x, 1), 2t-1), & 1 \le t \le \frac{1}{2}, \end{cases} $$
50
+ ---PAGE_BREAK---
51
+
52
+ is obviously continuous and satisfies the conditions (1.2) of the homotopy; as
53
+ to the condition ii), we have $H_3(x_0, 0) = \rho_5(x_0, 0) = x_0$ from (1.4) ii) and
54
+ $H_3(x_0, 1) = \sigma_{k_8}(\rho_5(x_0, 1), 1) = \sigma_{k_5}(x_0, 1) = k_5(x_0, 1) = x_0$ from (1.4) iii).
55
+
56
+ Since (1.2) i) is evidently satisfied from (1.4) i), the lemma has been proved.
57
+ Thus the multiplication in $U$ induces a multiplication in the set of the homotopy
58
+ classes ; $[\theta_1] \times [\theta_3] \equiv [\theta_1 \times \theta_2]$.
59
+
60
+ **THEOREM 1.** By the multiplication defined above, all the homotopy classes of $U$ constitute a group $\mathfrak{A}(X)$ with $x_0$ as the base point.
61
+
62
+ *Proof.* Let us prove that the multiplicatoin is associative. Let $\theta_1, \theta_2, \theta_3 \in U$,
63
+ then $([\theta_1] \times [\theta_2]) \times [\theta_3]$ and $[\theta_1] \times ([\theta_2] \times [\theta_3])$ are represented by mappings
64
+ $(\theta_1 \times \theta_2) \times \theta_3$ and $\theta_1 \times (\theta_2 \times \theta_3)$ respectively. By definition
65
+
66
+ $$
67
+ \begin{align*}
68
+ (\theta_1 \times \theta_2) \times \theta_3 (x, t) &= \begin{cases}
69
+ \theta_3 (\theta_2 (\theta_1 (x, 4t), 0), 0), & \frac{1}{2} \ge t \ge 0, x \in X, \\
70
+ \theta_3 (\sigma_{\theta_2} (\theta_2 (\theta_1 (x, 1), 0), 4t-1), 0), & \frac{1}{2} \ge t \ge \frac{1}{4}, x \in X, \\
71
+ \sigma_{\theta_3} (\theta_3 (\sigma_{\theta_2} (\theta_2 (\theta_1 (x, 1), 0), 1), 0), 2t-1), & 1 \ge t \ge \frac{3}{4}, x \in X,
72
+ \end{cases}
73
+ \\[1em]
74
+ \theta_4 \times (\theta_2 \times \theta_3) (x, t) &= \begin{cases}
75
+ (\theta_3 (\theta_2 (\theta_1 (x, 2t), 0), 0), & \frac{1}{2} \ge t \ge 0, x \in X, \\
76
+ \sigma_{\theta_2 \times \theta_3} (\theta_3 (\theta_2 (\theta_1 (x, 1), 0), 0), 2t-1), & 1 \ge t \ge \frac{1}{4}, x \in X.
77
+ \end{cases}
78
+ \end{align*}
79
+ $$
80
+
81
+ As it is rather difficult to show directly the existence of homotopy between
82
+ ($\theta_1 \times \theta_2$) $\times$ $\theta_3$ and $\theta_1 \times (\theta_2' \times \theta_3)$, we prove it by making use of the homotopy
83
+ extension property referred to above. From the relation above we have ($\theta_1 \times \theta_2$)
84
+ $\times \theta_5 (x, 0) = \theta_3 (\theta_5 (\theta_1 (x, 0), 0), 0) = \theta_1 \times (\theta_2 \times \theta_3) (x, 0)$, and from the property
85
+ of $\sigma_\theta$ we have
86
+
87
+ $$
88
+ (1.6) \quad (\theta_1 \times \theta_2) \times \theta_3(x_0, t) = \begin{cases} \theta_3(\theta_2(\theta_1(x_0, 4t), 0), 0), & \frac{1}{2} \ge t \ge 1 \\ \theta_3(\theta_2(x_0, 4t-1), 0), & \frac{1}{2} \le t \le 1 \\ \theta_3(x_0, 2t-1), & 1 \le t \le \frac{1}{2} \end{cases}
89
+ $$
90
+
91
+ Since $\sigma_{\theta_0 \times \theta_3}(\theta_3(\theta_2(\theta_1(x_0, 1), 0), 0), 2t-1) =: \sigma_{\theta_0 \times \theta_3}(x_0, 2t-1) =$
92
+
93
+ $$
94
+ \begin{align*}
95
+ &\theta_{\delta}(0, 0), &&\frac{1}{2} \ge t \geq \frac{1}{2}, \\
96
+ &\sigma_{\delta_{\delta}}(0, 0), &&\frac{1}{2} \ge t \geq 1, \\
97
+ &\sigma_{\delta_{\delta}}(x_{\delta}, 4t - 3) &&= \sigma_{\delta_{\delta}}(x_{\delta}, 4t - 3) = \theta_{\delta}(x_{\delta}, 4t - 3), &&1 \ge t \ge \frac{3}{4},
98
+ \end{align*}
99
+ $$
100
+
101
+ we have
102
+
103
+ $$
104
+ (1.7) \quad \theta_t \times (\theta_s \times \theta_d)(x_o, t) = \begin{cases} \theta_s(\theta_t(\theta_s(x_o, 2t), 0), 0), & \frac{1}{2} \ge t \ge 0, \\ \theta_s(\theta_t(\theta_s(x_o, 4t-2), 0)), & \frac{1}{4} \le t \le \frac{3}{2}, \\ \theta_s(x_o, 4t-3), & 1 \le t \le 1. \end{cases}
105
+ $$
106
+
107
+ From (1.6) and (1.7) there exists a homotopy $h(x, s, t)$ defined on $\{x_n\} \times I^s \times I^t$
108
+ ---PAGE_BREAK---
109
+
110
+ such that
111
+
112
+ $$h(x_0, 0, t) = (\theta_1 \times \theta_2) \times \theta_3(x_0, t), \quad 1 \ge t \ge 0,$$
113
+
114
+ $$h(x_0, 1, t) = \theta_1 \times (\theta_2 \times \theta_3)(x_0, t), \quad 1 \ge t \ge 0,$$
115
+
116
+ $$h(x_0, s, 0) = h(x_0, s, 1) = x_0, \quad 1 \ge s \ge 0.$$
117
+
118
+ and
119
+
120
+ Moreover putting
121
+
122
+ $$h(x, 0, t) = (\theta_1 \times \theta_2) \times \theta_3 (x, t), \quad x \in X, \ 1 \ge t \ge 0,$$
123
+
124
+ $$h(x, 1, t) = \theta_1 \times (\theta_2 \times \theta_3)(x, t), \quad x \in X, \ 1 \ge t \ge 0,$$
125
+
126
+ $$h(x, s, 0) = \theta_3(\theta_2(\theta_1(x, 0), 0), 0), \quad x \in X, \ 1 \ge s \ge 0,$$
127
+
128
+ and
129
+
130
+ $h$ is defined continuously on the set $\{(X \times \frac{s}{I} \times 0) \cup [(x_0 \times \frac{s}{I}) \cup (X \times 0) \cup (X \times 1)] \\ \times \frac{t}{I}\}$. Thus, if $\{(x_0 \times I) \cup (X \times 0) \cup (X \times 1)\}$ has a homotopy extension property in $X \times I$ relative to $X$, $h$ can be extended to a mapping $X \times \frac{s}{I} \times \frac{t}{I} \to X$, which gives a homotopy between $(\theta_1 \times \theta_2) \times \theta_3$ and $\theta_1 \times (\theta_2 \times \theta_3)$.
131
+
132
+ Next we must prove the existence of the unity in $\mathfrak{A}(X)$. Let $\theta_0(x, t) = x$, then clearly $\theta_0 \in U$. For any $\theta \in U$ we have from the definition of multiplication
133
+
134
+ $$ (\theta \times \theta_0)(x, t) = \begin{cases} \rho(x, 2t), & x \in X, \quad \frac{1}{2} \le t \le 0, \\ \sigma_{\theta_0}(\rho(x, 1), 2t-1), & x \in X, \quad 1 \le t \le \frac{1}{2}, \end{cases} $$
135
+
136
+ where $\rho(x, 2t) = \theta_0(\theta(x, 2t), 0) = \theta(x, 2t)$, and $\sigma_{\theta_0}(x, t) = x$ may be assumed. Since $\sigma_{\theta_0}(\rho(x, 1), 2t-1) = \rho(x, 1) = \theta_0(\theta(x, 1), 0) = \theta(x, 1)$ for $1 \le t \le \frac{1}{2}$, we have
137
+
138
+ $$ (\theta \times \theta_0)(x, t) = \begin{cases} \theta(x, 2t), & x \in X, \quad \frac{1}{2} \le t \le 0, \\ \theta(x, 1), & x \in X, \quad 1 \le t \le \frac{1}{2}. \end{cases} $$
139
+
140
+ Let us define a homotopy $h_s(x, t)$ for $1 \le s \le 0$ as follows;
141
+
142
+ $$ h_s(x, t) = \begin{cases} \theta\left(x, \frac{2t}{1+s}\right), & x \in X, \quad \frac{s+1}{2} \le t \le 0, \\ \theta(x, 1), & x \in X, \quad 1 \le t \le \frac{s+1}{2}, \end{cases} $$
143
+
144
+ then $h_s$ satisfies the conditions of the homotopy (1.2), so that $h_0 = \theta \times \theta_0$ and $h_1 = 0$. Thus $\theta_0$ represents the right side unity of the group $\mathfrak{A}(X)$.
145
+
146
+ Lastly we proceed to show the existence of the inverse element of any element $[\theta] \in \mathfrak{A}(X)$. By the assumption on an element $\theta$ in $U$, we have $\theta |_{X \times 0} = 0$, so that $\theta |_{X \times 0}$ has a homotopy inverse $\varphi |_{\Omega^*}$. Now we define a mapping $\theta^{-1} : U$ as follows: if we put
147
+
148
+ $$
149
+ \begin{align*}
150
+ \theta^{-1}(x, 0) &= \varphi(x), && x \in X, \\
151
+ \theta^{-1}(x_0, t) &= \varphi(\theta(x_0, 1-t)), && 1 \ge t \ge 0.
152
+ \end{align*}
153
+ $$
154
+
155
+ then $\theta^{-1}$ can be extended to a map: $X \times I \to X$ because of the homotopy
156
+ ---PAGE_BREAK---
157
+
158
+ extension property of {$x_0$}. This extended map $\theta^{-1}$ is shown to represent the inverse of $[\theta]$. Indeed, we have
159
+
160
+ $$ \theta \times \theta^{-1}(x, t) = \begin{cases} \rho(x, 2t), & \frac{1}{2} \le t \le 0, x \in X, \\ \sigma_{\theta^{-1}}(\rho(x, 1), 2t-1), & 1 \le t \le \frac{1}{2}, x \in X, \end{cases} $$
161
+
162
+ where $\rho(x, t) = \theta^{-1}(\theta(x, t), 0) = \varphi(\theta(x, t))$, $\sigma_{\theta^{-1}}(x, 0) = x$, and $\sigma_{\theta^{-1}}(x_0, t) = \theta^{-1}(x_0, t) = \varphi(\theta(x_0, 1-t))$. As $\varphi$ is a homotopy inverse of $\theta |_{X \times 0}$, and on the other hand $\sigma_{\theta^{-1}}|_{X_0 \times I}$ represents the inverse element of $[\rho |_{X_0 \times I}]$, we have a continuous function $h$ defined on $((X \times \overline{I} \times 0) \cup (X \times 0) \cup (X \times 1)) \cap (X_0 \times \overline{I}) \times \overline{I})$ such that
163
+
164
+ $$ h(x, s, 0) = k(x, s), \quad x \in X, s \in \overline{I}, $$
165
+
166
+ $$ h(x_0, s, t) = l(s, t), \quad s \in \overline{I}, t \in \overline{I}, $$
167
+
168
+ $$ h(x, 0, t) = \theta \times \theta^{-1}(x, t), \quad x \in X, t \in \overline{I}, $$
169
+
170
+ $$ h(x, 1, t) = x, \quad x \in X, t \in \overline{I}, $$
171
+
172
+ where $k$ is a homotopy obtained by the relation $\varphi\theta \sim 1$, and $l$ is also a homotopy whose existence is assured by $\rho(x_0, 1-t) = \sigma_{\theta^{-1}}(x_0, t)$. Again, by the aid of a homotopy extension property of $((x_0 \times I) \cup (X \times 0) \cup (X \times 1)))$, $h$ can be extended to a map $X \times I \times I \to X$, which gives a desired homotopy. This completes the proof.
173
+
174
+ In order to clarify the conditions preassigned to the space $X$ we put down here all the homotopy extension properties assumed in the arguments of the above Theorem;
175
+
176
+ i) $\{x_0\}$ has a homotopy extension property in $X$ relative to $X$,
177
+
178
+ (1.8) ii) $\{(x_0 \times I) \cup (X \times 0) \cup (X \times 1)\}$ has a homotopy extension property in $X \times I$ relative to $X$.
179
+
180
+ These assumptions are, of course, satisfied by a polyhedron.
181
+
182
+ ## § 2. A group of automorphisms $\Sigma(X)$ and the structure of $\mathfrak{A}(X)$.
183
+
184
+ Now we define a group $\Sigma(X)$, which operates on $\pi_n(X)$, as we shall see later, as a group of automorphisms, and study a homomorphism of $\mathfrak{A}(X)$ onto $\Sigma(X)$, the kernel of which is isomorphic to the fundamental group $\pi_1(X)$ of $X$.
185
+
186
+ Let us define a homotopy concept in $\Omega^*$ in the following sense: we shall write $a \sim b$ for $a, b \in \Omega^*$ if there exists a homotopy $h_t \in \Omega$ ($1 \le t \le 0$) such that $h_0 = a$ and $h_1 = b$. Then $\Omega^*$ is divided into homotopy classes. Let us denote by $\Sigma(X)$ the set of all the homotopy classes. For two maps $a, b \in \Omega^*$ we define $(a \times b)(x) = b(a(x))$ for any $x \in X$. Then $a \times b \in \Omega^*$ because $a \times b \in \Omega$ follows immediately from the definition and, if $\varphi$ and $\psi$ are homotopy inverses of $a$
187
+ ---PAGE_BREAK---
188
+
189
+ and $b$ respectively, $\psi \times \varphi \in \Omega^*$ is a homotopy inverse of $a \times b$. Furthermore, if $a \sim a'$ and $a \sim b'$, $a \times b \sim a' \times b'$. Thus the multiplication in $\Omega^*$ induces a multiplication in $\Sigma(X)$.
190
+
191
+ **THEOREM 2.** $\Sigma(X)$ constitutes a group.
192
+
193
+ *Proof.* It is evident from the definition of multiplication that the associative law holds. As to the existence of unity, let $E$ be a class containing the identity transformation of $X$, then $E \cdot A = A$ and $A \cdot E = A$ for any $A \in \Sigma(X)$. Lastly for any $A = [a]$ we choose $A^{-1} = [\varphi]$ containing a homotopy inverse $\varphi$ of $a$. Then $AA^{-1} = E$ and $A^{-1}A = E$ is clear from the definition of homotopy inverse.
194
+
195
+ **THEOREM 3.** $\Sigma(X)$ operates on the *n*-th homotopy group $\pi_n(X, x_0)$, for every integer $n \ge 1$, as a group of automorphisms.
196
+
197
+ *Proof.* Let $f$ be a representative of an element $\alpha$ of $\pi_n(X)$ and let $\alpha$ be a representative of $A \in \Sigma(X)$. Let us take the mapping $\text{af}: S^n \to X$ as a representative of $A\alpha$. The correspondence $A; \alpha \to A\alpha$ is a transformation of $\pi_n(X)$ into itself because, if $f'$ is another representative of $\alpha$, we have $\text{af} \sim \text{af}'$, and if $\alpha'$ is another representative of $A$, we have also $\text{af} \sim \alpha'f$. Then it is easily proved that this correspondence is an automorphism of $\pi_n(X)$.
198
+
199
+ *Example of $\Sigma(X)$:*
200
+
201
+ Let $X$ be an $n$-sphere $S^n$, then from the concept of Brouwer's degree we have $\Sigma(S^n) = \{E = [1], A = [-1]\}$ where $E$ is a class containing the identity transformation and $A$ is a class containing a mapping of degree $-1$. Since clearly $A^2 = A \cdot A = E$, the group is a cyclic group of order 2.
202
+
203
+ Now we intend to define a homomorphism $\varphi$ of $\mathfrak{A}(X)$ onto $\Sigma(X)$. Let $\theta \in U$ be a representative of an element of $\mathfrak{A}(X)$, then $a_\theta = \theta | X \times 0$ represents an element of $\Sigma(X)$. From the homotopy concepts given in §1 and §2, it is obvious that if $\theta \sim \theta'$, we have $a_\theta \sim a_{\theta'}$. By the correspondence $\varphi: [\theta] \to [a_\theta]$ we have the following theorem.
204
+
205
+ **THEOREM 4.** $\varphi$ is a homomorphism of $\mathfrak{A}(X)$ onto $\Sigma(X)$, the kernel of which is isomorphic to the fundamental group $\pi_1(X)$.
206
+
207
+ *Proof.* For two elements $[\theta_1], [\theta_2] \in \mathfrak{A}(X)$, we have $\varphi([\theta_1]) = [a_{\theta_1}]$ and $\varphi([\theta_2]) = [a_{\theta_2}]$. By definition $\varphi([\theta_1] \times [\theta_2]) = \varphi([\theta_1 \times \theta_2])$ may be represented by a mapping $\theta_1 \times \theta_2 | X \times 0 = \rho(x, 0) = \theta_2(\theta_1(x, 0), 0)$, so that $\theta_1 \times \theta_2 | X \times 0 = a_{\theta_1} \times a_{\theta_2}$. Thus $\varphi([\theta_1] \times [\theta_2]) = \varphi([\theta_1]) \times \varphi([\theta_2])$ is proved. Clearly $\varphi$ is an onto-homomorphism from the definition of the group.
208
+
209
+ Lastly, in order to complete the proof it is sufficient to prove that the kernel of $\varphi$ is isomorphic to $\pi_1(X)$. If $\varphi([\theta]) = [a_\theta]$ is unity, we may take without loss of generality a representative $\theta$ of $[\theta]$ as follows :
210
+ ---PAGE_BREAK---
211
+
212
+ $$ (2.1) \qquad \left. \begin{array}{l} \text{i)} \quad \theta: X \times I \to X, \\ \text{ii)} \quad \vartheta(x, 0) = x, \\ \text{iii)} \quad \vartheta(x_{\theta}, 1) = x_0, \end{array} \right\} $$
213
+
214
+ for (1.8) is assumed. To any element $[\theta]$ belonging to the kernel of $\varphi$ let there correspond an element $[\xi_0]$ of the fundamental group $\pi_1(X)$ by the rule,
215
+
216
+ $$ (2.2) \qquad \xi_0(t) = \theta(x_0, t). $$
217
+
218
+ This correspondence $\lambda$ has a definite meaning because, if $\theta \sim \theta'$, $\xi_0$ and $\xi_0'$ represent the same element of $\pi_1(X)$. Let us prove that $\lambda$ is an isomorphism. Let $[\theta_1], [\theta_2]$ be two elements belonging to the kernel of $\varphi$, then $[\theta_1] \times [\theta_2]$ is represented by a map $\theta_1 \times \theta_2$,
219
+
220
+ $$ \theta_1 \times \theta_2(x, t) = \begin{cases} \theta_3(\theta_1(x, 2t), 0), & 1 \le t \le 0, x \in X, \\ \sigma_{\theta_2}(\theta_3(\theta_1(x, 1), 0), 2t-1), & 1 \le t \le \frac{1}{2}, x \in X. \end{cases} $$
221
+
222
+ Since from (2.1) we have $\theta_2(x, 0) = x$, $\theta_2(\theta_1(x, 2t), 0) = \theta_1(x, 2t)$ and $\sigma_{\theta_2}(\theta_2(\theta_1(x, 1), 0), 2t-1) = \sigma_{\theta_2}(\theta_1(x, 1), 2t-1)$ so that by (2.2)
223
+
224
+ $$ \hat{\xi}_{\theta_1 \times \theta_2}(t) = \begin{cases} \theta_1(x_{\theta_1}, 2t), & \frac{1}{2} \le t \le 0, \\ \sigma_{\theta_2}(\theta_1(x_{\theta_2}, 1), 2t-1), & 1 \le t \le \frac{1}{2}. \end{cases} $$
225
+
226
+ Since $\theta_1(x_0, 1) = x_0$ and $\sigma_{\theta_2}(x_0, t) = \theta_1(x_0, t)$, we have $\sigma_{\theta_2}(\theta_1(x_0, 1), 2t-1) = \theta_2(x_0, 2t-1)$. Now $\xi_{\theta_1 \times \theta_2}(t)$ may be described as follows:
227
+
228
+ $$ \hat{\xi}_{\theta_1 \times \theta_2}(t) = \begin{cases} \theta_1(x_0, 2t), & \frac{1}{2} \le t \le 0, \\ \theta_2(x_0, 2t-1), & 1 \le t \le \frac{1}{2}. \end{cases} $$
229
+
230
+ On the other hand, we have, by the definition of the fundamental group,
231
+
232
+ $$ \lambda([\theta_1] \times [\theta_2]) = [\hat{\xi}_{\theta_1 \times \theta_2}] = [\hat{\xi}_{\theta_1}] \circ [\hat{\xi}_{\theta_2}] = \lambda[\theta_1] \circ \lambda[\theta_2], $$
233
+
234
+ so that the homomorphism is established.
235
+
236
+ Clearly $\lambda$ is an onto-homomorphism, because of the homotopy extension property (1.3) i). It remains only to prove that from $\xi_{\theta_1} \sim \xi_{\theta_2}$ follows $\theta_1 \sim \theta_2$. It may be assumed that $\theta_1(x, 0) = x$ and $\theta_2(x, 0) = 0$. Since $\xi_{\theta_1} = \xi_{\theta_2}$, a homotopy $h_s(t)$ ($1 \le s \le 0$) exists such that $h_0(t) = \theta_1(x_0, t)$, $h_1(t) = \theta_2(x_0, t)$ and $h_s(0) = h_s(1) = x_0$. A continuous function $h$ may be defined on the set $\{(X \times I)^s (0)^\tau [(X \times 0)^\tau (X \times 1)^\tau (x_0 \times I)] \times I\}$ as follows:
237
+
238
+ $$ h(x, s, 0) = x, \quad x \in X, s \in I^s, \\ h(x, 0, t) = \theta_1(x, t), \quad x \in X, t \in I^t, \\ h(x, 1, t) = \theta_2(x, t), \quad x \in X, t \in I^t, \\ h(x_0, s, t) = h_s(t), \quad s \in I^s, t \in I^t. $$
239
+
240
+ If (1.3) ii) is assumed, it is proved by the aid of the extended map $h: X \times I^s \times I^t$
241
+ ---PAGE_BREAK---
242
+
243
+ → X that $\theta_1$ is homotopic to $\theta_2$. This completes the proof.
244
+
245
+ ### § 3. Operation of $\mathfrak{A}(X)$ on the homotopy groups.
246
+
247
+ Let $f$ be a representative of an element $\alpha \in \pi_n(X)$ and $\theta$ be a representative of an element $\vartheta \in \mathfrak{A}(X)$. Let us define $\vartheta\alpha = [h] \in \pi_n(X)$ by the rule,
248
+
249
+ $$ (3.1) \qquad h(x) \equiv \theta(f(x), 1). $$
250
+
251
+ This definition has a definite meaning in the sense that $[h]$ depends only on $\alpha$ and $\vartheta$. Then we have,
252
+
253
+ **THEOREM 5.** $\vartheta\alpha = (A\alpha)^{\xi}$ where $A = \varphi(\vartheta) \in \Sigma(X)$ and $\xi$ is an element of $\pi_1(X)$ represented by $\theta(x_0, t)$ ($1 \ge t \ge 0$).
254
+
255
+ *Proof.* From the definition of homomorphism $\varphi$, $A$ is represented by $a_0(x) = \theta(x, 0)$, and therefore $\theta(f(x), 0) = a_0f(x)$. It is an immediate consequence of the operation of $A$ that $a_0f$ represents an element $A\alpha$ of $\pi_n(X)$. Moreover if $f(p) = x_0$ for a fixed point $p \in S^n$, $\theta(f(p), t) = \theta(x_0, t)$ represents an element $\xi$ of $\pi_1(X)$, so that according to the operation of $\pi_1$ on $\pi_n$ due to Eilenberg $h(x) = \theta(f(x), 1)$ represents an element $(A\alpha)^{\xi} \in \pi_n$. This completes the proof.
256
+
257
+ As a direct consequence of Theorem 5 we have,
258
+
259
+ **THEOREM 6.** $\mathfrak{A}(X)$ is a group of automorphisms of $\pi_n(X)$ for every integer $n \ge 1$.
260
+
261
+ *Proof.* Because of the combination of automorphisms $A$ and $\xi$, the operation of $\vartheta \in \mathfrak{A}(X)$ on $\pi_n$ is also an automorphism of $\pi_n(X)$.
262
+
263
+ ### § 4. Algebraic construction of $\mathfrak{A}(X)$.
264
+
265
+ Now that the operation of $\mathfrak{A}(X)$ on $\pi_n$ has been clarified by Theorem 5, we can construct the group $\mathfrak{A}(X)$ from a purely algebraic standpoint. Let $\chi(X) = \{(A, \xi)\}; A \in \Sigma(X), \xi \in \pi_1(X)\}$; the totality of all the ordered pairs consisting of an arbitrarily chosen element of $\Sigma(X)$ and of an arbitrarily chosen element of $\pi_1(X)$. Defining $(A, \xi)(\alpha) = (A\alpha)^{\xi}$ for any $\alpha \in \pi_n(X)$, $(A, \xi)$ operates on $\pi_n(X)$, for every integer $\pi \ge 1$, as an automorphism. If we define a multiplication in the set $\chi(X)$ of automorphisms just defined by the rule,
266
+
267
+ $$ (B, \eta)(A, \xi)(\alpha) = (B, \eta)((A, \xi)(\alpha)), $$
268
+
269
+ then we have $(B, \eta)(A, \xi) \in \chi(X)$. In order to prove this, we need the following lemma.
270
+
271
+ **LEMMA 4.1** $A(\alpha^{\xi}) = (A\alpha)^{\xi} = (A, A_{\xi})(\alpha)$ for any $\alpha \in \pi_n$, where $A_{\xi}$ can be interpreted in the sense that $\Sigma(X) \ni A$ operates on the homotopy group of any dimension, especially on the fundamental group too.
272
+
273
+ *Proof.* Let $\alpha$ be represented by a mapping $f: S^n \to X, S^n \ni p_0 \to x_0$ and let
274
+ ---PAGE_BREAK---
275
+
276
+ ξ = [e(t), 1 ≡ t ≡ 0]. We have a mapping F : {Sⁿ × (0) ⌣ (p₀) × I} → X such that F(x, 0) ≅ f(x) for any x ∈ Sⁿ, and F(p₀, t) ≅ e(t). From the homotopy extension property of a polyhedron we have an extended map $\bar{F}: S^n \times I \to X$ of F. Since $\bar{F}(x, 0) = f(x)$ and $\bar{F}(p_0, t) = e(t)$, $\bar{F}(x, 1)$ represents an element $a^t \in \pi_n(X)$. Let a be a representative of A. Putting $a(\bar{F}(x, t)) \equiv G(x, t): S^n \times I \to X$ we have $[G(x, 0)] = A\alpha$ from $G(x, 0) = a(f(x))$ and $[G(x, 1)] = A(\alpha^t)$ from $G(x, 1) = a(\bar{F}(x, 1))$. Also, from $G(x_0, t) = a(e(t))$ follows $[G(x_0, t)] = A\xi$. Thus we have $A(\alpha^t) = (A\alpha)^{A^t}$. Making use of the lemma, we have
277
+
278
+ $$
279
+ \begin{align*}
280
+ (B, \eta)(A, \xi)(\alpha) &\equiv (B, \eta)((A, \xi)(\alpha)) = (B, \eta)((A\alpha)^{\eta}) \\
281
+ &= (B((A\alpha)^{\eta}))^{\eta} \\
282
+ &= ((B(A\alpha))^{B\eta})^{\eta} \\
283
+ &= (B(A\alpha))^{B\eta\cdot\eta} \equiv (A \cdot B, B\xi \cdot \eta)(\alpha).
284
+ \end{align*}
285
+ $$
286
+
287
+ Thus
288
+ $(B, \eta)(A, \xi) = (A \cdot B, B\xi \cdot \eta) \in \chi(X).$
289
+
290
+ **THEOREM 7.** By this multiplication $\chi(X)$ forms a group.
291
+
292
+ *Proof.* As to the associative law we have
293
+
294
+ $$
295
+ \begin{align*}
296
+ (C, \zeta)(B, \eta)(A, \xi) &= (C, \zeta)(AB, B\xi \cdot \eta) \\
297
+ &= (AB \cdot C, C(B\xi \cdot \eta) \cdot \zeta) \\
298
+ &= (ABC, BC\xi \cdot C\eta \cdot \zeta)
299
+ \end{align*}
300
+ $$
301
+
302
+ $$
303
+ \begin{align*}
304
+ ((C, \zeta)(B, \eta))(A, \xi) &= (BC, C\eta \cdot \zeta)(A, \xi) \\
305
+ &= (A \cdot BC, BC\xi(C\eta \cdot \zeta)) \\
306
+ &= (ABC, BC\xi \cdot C\eta \cdot \zeta)
307
+ \end{align*}
308
+ $$
309
+
310
+ Thus
311
+ $$
312
+ (C, \zeta)((B, \eta)(A, \xi)) = ((C, \zeta)(B, \eta))(A, \xi)
313
+ $$
314
+
315
+ The existence of the unity is proved as follows :
316
+
317
+ ($E$, $e$)(A, $\xi$) = ($AE$, $E\xi \cdot e$) = (A, $\xi$) where E, e are the unities of $\Sigma(X)$ and $\pi_1(X)$ respectively.
318
+
319
+ The existence of an inverse element is proved thus :
320
+
321
+ $$
322
+ (A^{-1}, A^{-1}\xi^{-1})(A, \xi) = (AA^{-1}, A^{-1}\xi \cdot A^{-1}\xi^{-1}) = (E, A^{-1}(\xi\xi^{-1})) = (E, e).
323
+ $$
324
+
325
+ This completes the proof.
326
+
327
+ Now the following MAIN THEOREM concerning the relation of two groups $\mathfrak{A}(X)$ and $\chi(X)$ imparts the complete analysis to the structure of $\mathfrak{A}(X)$ and also to the operation of $\mathfrak{A}(X)$ on $\pi_n(X)$ for every integer $n \ge 1$.
328
+
329
+ **MAIN THEOREM 8.** $\mathfrak{A}(x)$ is isomorphic to the group $\chi(X)$. Moreover, an isomorphism can be established between these groups, preserving the operation on the homotopy groups.
330
+
331
+ *Proof.* The method of proof being analogous as for Theorems 4, 5, we shall
332
+ ---PAGE_BREAK---
333
+
334
+ restrict ourselves to show the correspondence between two groups. Let $\theta$ be a representative of $\partial \mathfrak{U}(X)$ and let $a_0 = \theta | X \times 0, \xi_0 = \theta | x_0 \times I$. Then to $\partial$ let there correspond $([a_0], [\xi_0]) \in \chi(X)$. It can be shown that this correspondence is an isomorphism and that the operations of $\partial$ and of the corresponding element $([a_0], [\xi_0])$ on $\pi_n$ are the same.
335
+
336
+ § 5. Some remarks on the group $\mathfrak{U}(X)$.
337
+
338
+ By the aid of the main theorem it is advantageous to use $\chi(X)$ in place of $\mathfrak{U}(X)$ in calculating the invariant $\mathfrak{U}(X)$ of the space $X$. As is easily seen, two distinct elements of $\chi(X)$ do not always operate differently on $\pi_n$ so that as the group of the operation on $\pi_n$, $\chi(X)$ may be reduced to a smaller group. This reduction gives rise to an analogous classification of the space $X$ as the simplicity of a space due to Eilenberg.
339
+
340
+ Let $\chi^*(X)$ be the totality of all elements in $\chi(X)$ whose operations on any element of $\pi_n(X)$ are trivial; i.e. $\chi^*(X) = \{(A, \xi) ; (A, \xi)(\alpha) = \alpha$ for any element $\alpha \in \pi_n(X)\}$. Then $\chi^*(X)$ is clearly a normal subgroup of $\chi(X)$. Similarly, put $\chi^{**}(X) = \{(A, e) ; (A, e)(\alpha) = \alpha$ for any $\alpha \in \pi_n(X)\}$ and $\chi^{***}(X) = \{(E, \xi) ; (E, \xi)(\alpha) = \alpha$ for any $\alpha \in \pi_n(X)\}$, then these two groups are also normal in $\Sigma(X)$ and $\pi_1(X)$ respectively as well as in $\chi(X)$. It is well known that the space is $n$-simple in the sense of Eilenberg if $\chi^{***}(X) \cong \pi_1(X)$. It may be an interesting problem to consider the spaces satisfying the conditions such as $\chi^*(X) = \chi(X)$ or $\chi^{**}(X) \cong \Sigma(X)$.
341
+
342
+ BIBLIOGRAPHY
343
+
344
+ [1] Eilenberg, S., On the relation between the fundamental group of a space and higher homotopy groups, Fundamenta Math. 22 (1939).
345
+
346
+ [2] Hu, S. T., On the Whitehead Group of automorphisms of the relative homotopy groups, Portugaliae Math. 7 (1948).
samples/texts_merged/230879.md ADDED
@@ -0,0 +1,885 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # Imaging Below the Diffraction Limit: A Statistical Analysis
5
+
6
+ Morteza Shahram and Peyman Milanfar, Senior Member, IEEE
7
+
8
+ **Abstract**—The present paper is concerned with the statistical analysis of the resolution limit in a so-called “diffraction-limited” imaging system. The canonical case study is that of incoherent imaging of two closely-spaced sources of possibly unequal brightness. The objective is to study how far beyond the classical Rayleigh limit of resolution one can reach at a given signal to noise ratio. The analysis uses tools from statistical detection and estimation theory. Specifically, we will derive explicit relationships between the minimum detectable distance between two closely-spaced point sources imaged incoherently at a given SNR. For completeness, asymptotic performance analysis for the estimation of the unknown parameters is carried out using the Cramér-Rao bound. To gain maximum intuition, the analysis is carried out in one dimension, but can be well extended to the two-dimensional case and to more practical models.
9
+
10
+ **Index Terms**—Cramér-Rao bound, diffraction, estimation, hypothesis test, imaging, Rayleigh limit, resolution, super-resolution.
11
+
12
+ ## I. INTRODUCTION
13
+
14
+ IN incoherent optical imaging systems the image of an ideal point source is captured as a spatially extended pattern known as the point-spread function (PSF), as shown for the one-dimensional case in Fig. 1. In two dimensions, this function is the well-known Airy diffraction pattern [1]. When two closely-located point sources are measured through this kind of optical imaging system, the measured signal is the incoherent sum of the respective shifted point spread functions. According to the classical Rayleigh criterion, two incoherent point sources are “barely resolved” when the central peak of the diffraction pattern generated by one point source falls exactly on the first zero of the pattern generated by the second one. A more detailed and complete explanation of incoherent imaging and related topics can be found in [1] and [2].
15
+
16
+ The Rayleigh criterion for resolution in an imaging system is generally considered as an accurate estimate of limits in practice. But under certain conditions related to signal-to-noise ratio (SNR), resolution beyond the Rayleigh limit is indeed possible. This can be called the super-resolution limit [3]. Indeed, at sufficiently high sampling rates, and in the absence of noise, arbitrarily small details can be resolved.
17
+
18
+ To gain maximum intuition and perspective from the foregoing analysis, all discussion herein will be carried out in the
19
+
20
+ Fig. 1. Image of point source captured by diffraction-limited imaging.
21
+
22
+ one-dimensional case, which can later be extended to the two-dimensional case. To begin, let us assume that the original signal of interest is the sum of two impulse functions separated by a small distance $d$:¹
23
+
24
+ $$ \sqrt{\alpha\delta}\left(x - \frac{d}{2}\right) + \sqrt{\beta\delta}\left(x + \frac{d}{2}\right). \quad (1) $$
25
+
26
+ As mentioned before, the image will be the incoherent sum of two point spread functions, resulting from an imaging aperture (or slit in one-dimensional case, as seen in Fig. 2)
27
+
28
+ $$ s(x; \alpha, \beta, d) = \alpha h\left(x - \frac{d}{2}\right) + \beta h\left(x - \frac{d}{2}\right) \quad (2) $$
29
+
30
+ where for our specific case of incoherent imaging $h(x) = \sin(\pi^2 x) = [\sin(\pi x)/\pi x]^2$, but other PSF's can also be considered. Finally, the measured signal includes discretized samples corrupted with additive (readout) noise. Given samples at $x_k$ ($k = 1, \dots, N$) of the measured signal, we can rewrite the measurement model as
31
+
32
+ $$ g(x_k) = s(x_k; \alpha, \beta, d) + w(x_k) \\ = \alpha h\left(x_k - \frac{d}{2}\right) + \beta h\left(x_k - \frac{d}{2}\right) + w(x_k) \quad (3) $$
33
+
34
+ where $w(x_k)$ is assumed to be a zero-mean Gaussian white noise process with variance $\sigma^2$.
35
+
36
+ With the present definition, the Rayleigh limit corresponds to $d=1$ as can be seen in Figs. 1 and 2. This means that for values $d < 1$, the two point sources are (in the classical Rayleigh sense)
37
+
38
+ Manuscript received March 3, 2003; revised November 3, 2003. This work was supported in part by NSF CAREER Grant CCR-9984246. The associate editor coordinating the review of this manuscript and approving it for publication was Dr. Thierry Blu.
39
+
40
+ The authors are with the Department of Electrical Engineering, University of California, Santa Cruz, CA 95064 USA (e-mail: shahram@ee.ucsc.edu; milanfar@ee.ucsc.edu).
41
+
42
+ Digital Object Identifier 10.1109/TIP.2004.826096
43
+
44
+ ¹From now on we refer to $\alpha$ and $\beta$ as intensities and also we assume that $\alpha, \beta > 0$. Also, note that this model (for now) assumes point sources symmetrically placed about the (known) origin. This model will be generalized later in the paper.
45
+ ---PAGE_BREAK---
46
+
47
+ Fig. 2. Incoherent imaging of two closely located point sources.
48
+
49
+ "unresolvable." It is important to note that the Rayleigh criterion does not consider the presence of noise.
50
+
51
+ In the last forty years or so, there have been several attempts, and more recently surveys, of the problem of resolution from the statistical viewpoint. Of these, the most significant earliest works were done by Helstrom [4]–[6]. In particular, in [5] and [6], he derived lower bounds on the mean-squared error of unbiased estimators for the source positions, the distance between the sources, and the radiance values, using the Cramér-Rao inequality. In [5], he considered two separate situations. In the first, the problem of whether any signal was present or not was treated, whereas in the second, the question of whether one or two sources were present was treated. (This second scenario is, of course, what interests us in the present paper.) Helstrom described a geometrical optics field model of the problem involving a general radiance distribution and point spread function, for objects with arbitrary shape. To study the case of the circular aperture and point sources, he applied a complex and remarkable set of approximations and simplifications of the initial model. Also, he assumed that the distance between the point sources is known to the detector.
52
+
53
+ In [3] and [7], an approximate statistical theory was given to compute the required number of detected photons (similar to the notion of signal to noise ratio) for a certain desired resolution, and the value of achievable resolution by image restoration techniques was also investigated by numerical and iterative deconvolution. In these papers the definition of resolution was made as the separation of the two point sources that can be resolved through a deconvolution procedure. In [7], the analysis of the achievable resolution in deconvolved astronomical images was studied based on a criterion similar to Rayleigh's.
54
+
55
+ In [9] and [12] two-point resolution of imaging systems was studied using a model fitting theory where the probability of resolution was computed based on the structural change of the stationary points of the likelihood function. Also in [11] the Cramér-Rao lower bound formulation was used to study the limits to attainable precision of estimated distance between the two point sources. Assuming a Gaussian PSF, they determined a lower bound for the estimation error variance. Also, in [10], the reader can find a very comprehensive review of past and present approaches to the concept of resolution. In this paper,
56
+
57
+ we also compute the Cramér-Rao (CR) lower bound in exact, closed form for two different cases. This analysis is in fact extendable to any point spread function.
58
+
59
+ Finally, an interesting, more recent paper [13] views the resolution problem from the information theory perspective. This line of thinking, again with simplifying approximations, is used to compute limits of resolution enhancement using Shannon's theorem of maximum transferable information via a noisy channel. The paper [13] considers the case of equally bright nearby point sources and derives an expression relating resolution (here defined as the inverse of the discernable distance between two equally bright point sources), logarithmically to the SNR.
60
+
61
+ The results of our paper extend, illuminate, and unify the earlier works in this field using more modern tools in statistical signal processing. Namely, we use locally optimal tests, which lead to more explicit, readily interpreted, and applicable results. In addition, we study various cases including unknown and/or unequal intensities, which have not been considered in their full complexity before.² The present results clarify, arguably for the first time, the specific effects of the relevant parameters on the definition of resolution, and its limits, as needed in practice.
62
+
63
+ In this paper we formulate the problem of two-point resolution in terms of statistical estimation/detection. Our approach is to precisely define a quantitative measure of resolution in statistical terms by addressing the following question: what is the minimum separation between two point sources (maximum attainable resolution limit) that is detectable at a given signal-to-noise ratio (SNR). In contrast to earlier definitions of resolution, there is little ambiguity in our proposed definition, and all parameters (PSF, noise variance, sampling rate, etc.) will be explicitly present in the formulation. Our earlier work on this problem was presented in [14], which essentially covers the material in Section IV-A of this paper.
64
+
65
+ The organization of the paper is as follows. Section II will explain and formulate our definition, and the corresponding statistical framework and models, in detail. In Section III, in order to use linear detection/estimation structures, we will discuss a signal approximation approach. In Section IV, we will present our statistical analysis for different cases of increasing generality. The asymptotic performance of the maximum likelihood estimate of the unknown parameters in terms of the Cramér-Rao lower bound will be discussed in Section V. Finally, some comments and conclusion will be presented in Section VI.
66
+
67
+ ## II. STATISTICAL ANALYSIS FRAMEWORK
68
+
69
+ The question of whether one or two peaks are present in the measured signal can be formulated in statistical terms. Specifically, for the proposed model the equivalent question is whether the parameter *d* is equal to zero or not. If *d* = 0 then we only have one peak and if *d* > 1 then there are two resolved peaks according to the Rayleigh criterion. So the problem of interest revolves around values of *d* in the range of 0 ≤ *d* < 1. Therefore, we can define two hypotheses, which will form the basis of our statistical framework. Namely, let $\hat{H}_0$ denote the null hy-
70
+
71
+ ²Reference [9] considered the case of unequal intensities in a different framework.
72
+ ---PAGE_BREAK---
73
+
74
+ pothesis that $d = 0$ (one peak present) and let $\Pi_1$ denote the
75
+ alternate hypothesis that $d > 0$ (two peaks present)
76
+
77
+ $$
78
+ \begin{equation}
79
+ \begin{cases}
80
+ H_0: d = 0 & \text{One peak is present} \\
81
+ H_1: d > 0 & \text{Two peaks are present}
82
+ \end{cases}
83
+ \tag{4}
84
+ \end{equation}
85
+ $$
86
+
87
+ Given discrete samples of the measured signal, we can rewrite
88
+ the problem as
89
+
90
+ $$
91
+ \left\{
92
+ \begin{array}{ll}
93
+ H_0: & \mathbf{g} = \mathbf{s}_0 + \mathbf{w} \\
94
+ H_1: & \mathbf{g} = \mathbf{s} + \mathbf{w}
95
+ \end{array}
96
+ \right.
97
+ \qquad (5)
98
+ $$
99
+
100
+ where
101
+
102
+ $$
103
+ \begin{align*}
104
+ \mathbf{g} &= [g(x_1), \dots, g(x_N)]^T, \\
105
+ \mathbf{w} &= [w(x_1), \dots, w(x_N)]^T, \\
106
+ \mathbf{s} &= [s(x_1; \alpha, \beta, d), \dots, s(x_N; \alpha, \beta, d)]^T, \\
107
+ \mathbf{s}_0 &= [s_0(x_1), \dots, s_0(x_N)]^T,
108
+ \end{align*}
109
+ $$
110
+
111
+ and
112
+
113
+ $$
114
+ s(x_k; \alpha, \beta, d) = \alpha h \left( x_k - \frac{d}{2} \right) + \beta h \left( x_k + \frac{d}{2} \right) \quad (6)
115
+ $$
116
+
117
+ $$
118
+ s_0(x_k) = s(x_k; \alpha, \beta, d)|_{d=0} = (\alpha + \beta)h(x_k). \quad (7)
119
+ $$
120
+
121
+ This is a problem of detecting a deterministic signal with unknown parameters $(\alpha, \beta$, and $d$, in general). From (5), since the probability density function (PDF) under $H_1$ is not known exactly, it is not possible to design optimal detectors (in the Neyman-Pearson sense) by simply forming the likelihood ratio. The general structure of composite hypothesis testing is involved when unknown parameters appear in the PDF's [16, p. 248]. There are two major approaches for composite hypothesis testing. The first is to use explicit prior knowledge as to the likely values of parameters of interest and apply a Bayesian method to this detection problem. However, there is generally no such a priori information available. Alternately, the second approach, the Generalized Likelihood Ratio Test (GLRT) first computes maximum likelihood (ML) estimates of the unknown parameters, and then will use these estimated value to form the standard Neyman-Pearson (NP) detector. Our focus will be on GLRT-type methods because of less restrictive assumptions and easier computation and implementation; but most importantly, because uniformly most powerful (UMP) and locally most powerful (LMP) tests can be developed for the parameter range $0 \le d < 1$.
122
+
123
+ To be a bit more specific, consider the case where it is known
124
+ that $\alpha = \beta = 1$, with the parameter $d$ unknown. The GLRT
125
+ approach offers to decide $\Pi_1$ if
126
+
127
+ $$
128
+ L(\mathbf{g}) = \frac{\max_{d} p(\mathbf{g}, d, H_1)}{p(\mathbf{g}, H_0)} = \frac{p(\mathbf{g}, \hat{d}, H_1)}{p(\mathbf{g}, H_0)} > \gamma \quad (8)
129
+ $$
130
+
131
+ where $\hat{d}$ denotes the ML estimate of $d$, and $p(\mathbf{g}, d; H_1)$ and $p(\mathbf{g}; H_0)$ are PDF's under $\Pi_1$ and $\Pi_0$, respectively. Assuming additive white Gaussian noise (AWGN) with variance $\sigma^2$ and $\hat{\mathbf{s}} = [s(x_1; 1, 1, \hat{d}), \dots, s(x_N; 1, 1, \hat{d})]^T$ we will have:
132
+
133
+ $$
134
+ \begin{align*}
135
+ L(\mathbf{g}) &= \frac{\frac{1}{(2\pi\sigma^2)^{N/2}} \exp\left(-\frac{1}{2\sigma^2} ||\mathbf{g} - \hat{\mathbf{s}}||^2\right)}{\frac{1}{(2\pi\sigma^2)^{N/2}} \exp\left(-\frac{1}{2\sigma^2} ||\mathbf{g} - \mathbf{s}_0||^2\right)} \\
136
+ &= \exp\left(-\frac{1}{2\sigma^2}\left(-||\hat{\mathbf{s}}||^2 + ||\mathbf{s}_0||^2 + 2\mathbf{g}^T(\hat{\mathbf{s}} - \mathbf{s}_0)\right)\right).
137
+ \end{align*}
138
+ $$
139
+
140
+ Therefore, $\Pi_1$ will be chosen if
141
+
142
+ $$
143
+ - \| \hat{\mathbf{s}} \|^{2} + 2 \mathbf{g}^{T} (\hat{\mathbf{s}} - \mathbf{s}_{0}) > \gamma'. \quad (9)
144
+ $$
145
+
146
+ Equivalently,
147
+
148
+ $$
149
+ \sum_{k=1}^{N} & -\left[\alpha h\left(x_k - \frac{\hat{d}}{2}\right) + \beta h\left(x_k + \frac{\hat{d}}{2}\right)\right]^2 \\
150
+ & + 2\left[\alpha h\left(x_k - \frac{\hat{d}}{2}\right) + \beta h\left(x_k + \frac{\hat{d}}{2}\right)\right] \\
151
+ & - (\alpha + \beta)h(x_k) \\[-0.3em]
152
+ & g(x_k) > \gamma' \qquad (10)
153
+ $$
154
+
155
+ where the ML estimate of $d$ in the above involves solving the
156
+ following minimization problem
157
+
158
+ $$
159
+ \min_{d} \sum_{k=1}^{N} \left[ \alpha h \left( x_k - \frac{d}{2} \right) + \beta h \left( x_k + \frac{d}{2} \right) - g(x_k) \right]^2 \Rightarrow \hat{d} \quad (11)
160
+ $$
161
+
162
+ It should be clear from the above that this detection/estimation problem is highly nonlinear. However, since the range of interest are the values of $0 \le d < 1$, these representing resolution beyond the Rayleigh limit, it is quite appropriate for the purposes of the our analysis to consider approximating the model of the signal around $d = 0$, and to apply locally optimal detectors. This is the approach we take.
163
+
164
+ III. (QUADRATIC) MODEL APPROXIMATION
165
+
166
+ Much of the complexity we encountered in the earlier formu-
167
+ lation of the problem can be remedied by appealing to an ap-
168
+ proximation of the signal model. This approximate model is de-
169
+ rived by expanding the signal about the small parameter values
170
+ around $d = 0$. As alluded to earlier, this approximation is quite
171
+ adequate in the sense that all the parameter values of interest for
172
+ resolution beyond the Rayleigh diffraction limit are contained in
173
+ the range $[0, 1]$ anyway.
174
+
175
+ We consider the Taylor series expansion of $s(x_k; \alpha, \beta, d)$ around $d = 0$, with all other variables fixed.³ More specifically,
176
+
177
+ $$
178
+ s(x_k; \alpha, \beta, d) \approx (\alpha + \beta)h(x_k) + \frac{\beta - \alpha}{2}dh_1(x_k) \\ + \frac{\alpha + \beta}{8}d^2h_2(x_k) \quad (12)
179
+ $$
180
+
181
+ where $h_1(\cdot)$ and $h_2(\cdot)$ denote the first and second order derivatives of $h(\cdot)$ and where for $h(x) = \sin c^2(x)$
182
+
183
+ $$
184
+ \begin{align}
185
+ h_1(x_k) &= \left. \frac{\partial h(x)}{\partial x} \right|_{x=x_k} \\
186
+ &= \frac{2\sin(\pi x_k)(\sin(\pi x_k) - \pi x_k \cos(\pi x_k))}{\pi^2 x_k^3} \tag{13}
187
+ \end{align}
188
+ $$
189
+
190
+ $$
191
+ h_2(x_k) = \left. \frac{\partial^2 h(x)}{\partial x^2} \right|_{x=x_k} \\
192
+ = \frac{(4\pi^2 x_k^2 - 3) \cos(2\pi x_k) - 4\pi x_k \sin(2\pi x_k) + 3}{2\pi^2 x_k^4}. \quad (14)
193
+ $$
194
+
195
+ ³It is important here to note that this is an approximation about the *parameter* of interest *d*, and not the variable *x*; as such it therefore is a global approximation of the function.
196
+ ---PAGE_BREAK---
197
+
198
+ In the above approximation, we elect to keep terms up to order 2 of the Taylor expansion. This gives a rather more accurate representation of the signal, and more importantly, if we only kept the first order term, then in the case $\alpha = \beta$, the first order term would simply vanish and *no* term in $d$ would appear in the approximation. The reader can find a more detailed discussion on the accuracy of this approximation in Appendix A. The proposed approximation simplifies the hypothesis testing problem to essentially a linear detection problem (as we will see in the next section). The approximation is helpful in that we can carry out our analysis more simply. In addition, it leads to a general form of locally optimum detectors [16, p. 217] as will be discussed later.
199
+
200
+ Continuing with vector notation we have:
201
+
202
+ $$ s \approx (\alpha + \beta)\mathbf{h} + \frac{\beta - \alpha}{2} d\mathbf{h}_1 + \frac{\alpha + \beta}{8} d^2\mathbf{h}_2 \quad (15) $$
203
+
204
+ where
205
+
206
+ $$
207
+ \begin{aligned}
208
+ \mathbf{h} &= [h(x_1), \dots, h(x_N)]^T \\
209
+ \mathbf{h}_1 &= [h_1(x_1), \dots, h_1(x_N)]^T \\
210
+ \mathbf{h}_2 &= [h_2(x_1), \dots, h_2(x_N)]^T.
211
+ \end{aligned}
212
+ $$
213
+
214
+ Writing in the form of hypotheses described earlier in (5)
215
+
216
+ $$
217
+ \left\{
218
+ \begin{array}{l}
219
+ H_0: \tilde{\mathbf{g}} = (\alpha + \beta)\mathbf{h} + \mathbf{w} \\
220
+ H_1: \tilde{\mathbf{g}} = (\alpha + \beta)\mathbf{h} + \frac{\beta-\alpha}{2} d\mathbf{h}_1 + \frac{\alpha-\beta}{8} d^2\mathbf{h}_2 + \mathbf{w}
221
+ \end{array}
222
+ \right.
223
+ \quad (16)
224
+ $$
225
+
226
+ where we distinguish $\tilde{\mathbf{g}}$ from $\mathbf{g}$ due to the approximated model. According to this model, we define the measured signal-to-noise ratio (per sample) as follows:
227
+
228
+ $$ \text{SNR} = \frac{1}{N\sigma^2} \left\| (\alpha + \beta)\mathbf{h} + \frac{\beta - \alpha}{2} d\mathbf{h}_1 + \frac{\alpha + \beta}{8} d^2\mathbf{h}_2 \right\|^2 . \quad (17) $$
229
+
230
+ For any symmetric PSF ($h(x)$) and in the case of above-Nyquist sampling, the following relations can be verified
231
+
232
+ $$
233
+ \begin{aligned}
234
+ \mathbf{h}^T \mathbf{h}_1 &= 0 \\
235
+ \mathbf{h}_2^T \mathbf{h}_1 &= 0 \\
236
+ \mathbf{h}^T \mathbf{h}_2 &= -\mathbf{h}_1^T \mathbf{h}_1.
237
+ \end{aligned}
238
+ $$
239
+
240
+ Therefore, we can rewrite (17) in the following form:
241
+
242
+ $$
243
+ \begin{aligned}
244
+ \text{SNR} ={}& \frac{1}{N\sigma^2} \left[ (\alpha + \beta)^2 E_0 + \left(\frac{\beta - \alpha}{2}\right)^2 d^2 E_1 \right. \\
245
+ & \qquad \left. + \left(\frac{\alpha + \beta}{8}\right)^2 d^4 E_2 - \left(\frac{\alpha + \beta}{2}\right)^2 d^2 E_1 \right] \\
246
+ ={}& \frac{1}{N\sigma^2} \left[ (\alpha + \beta)^2 E_0 - \alpha\beta d^2 E_1 + \left(\frac{\alpha + \beta}{8}\right)^2 d^4 E_2 \right]
247
+ \end{aligned}
248
+ \quad (18) $$
249
+
250
+ where we define
251
+
252
+ $$ E_0 = \mathbf{h}^T \mathbf{h} = f_s \int_{-\infty}^{+\infty} h^2(x) dx \quad (19) $$
253
+
254
+ $$ E_1 = h_1^T h_1 = f_s \int_{-\infty}^{-\infty} \left[ \frac{\partial h(x)}{\partial x} \right]^2 dx \quad (20) $$
255
+
256
+ $$ E_2 = h_2^T h_2 = f_s \int_{-\infty}^{-\infty} \left[ \frac{\partial^2 h(x)}{\partial x^2} \right]^2 dx \quad (21) $$
257
+
258
+ as energy terms.⁴
259
+
260
+ ⁴In above-Nyquist sampling, SNR is independent of $N$ (and $f_s$) since energy terms are all proportional to $f_s$. See Appendix B for details and explicit computations of these energy terms for the case of $h(x) = \text{sinc}^2(x)$.
261
+
262
+ IV. DETECTION THEORY FOR THE APPROXIMATED MODEL
263
+
264
+ In this section, we develop detection strategies for the hypothesis testing problem of interest based upon the approximated model. It is illuminating to study the various cases of interest in order. Our earlier assumptions were equal, known intensities, symmetrically located point sources about a given center, and the energy constraint $\alpha + \beta = 2$. In the interest of clarity and ease of exposition, we start with the case when all these assumptions hold. Then we will extend the discussion in order of increasing levels of generality by relaxing an assumption in each step. Namely, we will treat the problem for the following cases:
265
+
266
+ • the case of equal, known intensities $\alpha = \beta = 1$, with symmetrically located point sources;
267
+
268
+ • the case of unknown intensities but $\alpha + \beta = 2$, with symmetrically located point sources;
269
+
270
+ • the case of unknown intensities but $\alpha + \beta = 2$, asymmetrically located point sources;
271
+
272
+ • the case of unknown intensities, asymmetrically located point sources.
273
+
274
+ By considering (16), we notice that when $\alpha + \beta = 2$ is known to the detector (the first three cases), $(\alpha+\beta)\mathbf{h}$ is a common known term in both hypotheses and it is independent from $d$. Therefore, we may simplify further
275
+
276
+ $$
277
+ \left\{
278
+ \begin{array}{l}
279
+ H_0: y = w \\
280
+ H_1: y = \frac{\beta-\alpha}{2} d\mathbf{h}_1 + \frac{\alpha-\beta}{8} d^2\mathbf{h}_2 + w
281
+ \end{array}
282
+ \right.
283
+ \quad (22)
284
+ $$
285
+
286
+ where $y = \tilde{\mathbf{g}} - (\alpha + \beta)\mathbf{h}$. As we began to describe earlier, when $\alpha = \beta$, the hypothesis test will be reduced to the case of detecting a known signal with unknown positive amplitude ($D = d^2$). For this case, there exist well-known optimal detection strategies.
287
+
288
+ A. The Case of Equal Intensities, Symmetrically Located Point Sources
289
+
290
+ When $\alpha = \beta = 1$, (22) is reduced to
291
+
292
+ $$
293
+ \left\{
294
+ \begin{array}{l}
295
+ H_0: y = w \\
296
+ H_1: y = \frac{d^2}{16}\mathbf{h}_2 + w
297
+ \end{array}
298
+ \right.
299
+ \quad (23)
300
+ $$
301
+
302
+ It is readily shown that given this model, the ML estimate for the parameter $d^2$ is given by
303
+
304
+ $$ d^2 = 4 (\mathbf{h}_2^T \mathbf{h}_2)^{-1} \mathbf{h}_2^T y. \quad (24) $$
305
+
306
+ Next, the test statistic resulting from the (generalized) Neyman-Pearson likelihood ratio is given by
307
+
308
+ $$ T'(y) = \frac{1}{\sigma^2} (\mathbf{h}_2^T \mathbf{h}_2)^{-1} (\mathbf{h}_2^T y)^2 . \quad (25) $$
309
+
310
+ We note that the expression for the test-statistic is essentially an energy detector with the condition that the value of $d^2$ is in fact estimated from the data itself. The detector structure, due to our knowledge of the sign of the unknown distance parameter, is effectively producing a one-sided test, and hence is in fact a Uniformly Most Powerful (UMP) detector in the sense that it produces the highest detection probability for all values of the unknown parameter, and for a given false-alarm rate [16, p. 194]. Therefore, the above test-statistic can be simply replaced by
311
+
312
+ $$ T'(y) = \sqrt{T(y)} = \sqrt{\frac{1}{\sigma^2} (\mathbf{h}_2^T \mathbf{h}_2)^{-1} (\mathbf{h}_2^T y)}. \quad (26) $$
313
+
314
+ ⁵Where point sources are located at $-d_1$ and $+d_2$ instead of $-(d/2)$ and $(d/2)$.
315
+ ---PAGE_BREAK---
316
+
317
+ For any given data set y, we decide $H_1$ if the statistic exceeds a specified threshold
318
+
319
+ $$T'(y) > \gamma. \quad (27)$$
320
+
321
+ The choice of $\gamma$ is motivated by the level of tolerable false alarm (or false-positive) in a given problem, but is typically kept very low.⁶ The detection rate ($P_d$) and false-alarm rate ($P_f$) for this detector are related as [16, p. 254]
322
+
323
+ $$P_d = Q(Q^{-1}(P_f) - \sqrt{\eta}) \quad (28)$$
324
+
325
+ where
326
+
327
+ $$\eta = \frac{d^2}{4} \sqrt{\frac{E_2}{\sigma^2}} \quad (29)$$
328
+
329
+ and $Q$ is the right-tail probability function for a standard Gaussian random variable (zero mean and unit variance); and $Q^{-1}$ is the inverse of this function [16, p. 20]. A particularly intriguing and useful relationship is the behavior of the smallest peak separation $d$, which can be detected with very high probability (say 0.99), and very low false alarm rate (say $10^{-6}$) at a given SNR. According to (18), (28), and (29), the relation between $d_{min}$ and required SNR can be made explicit
330
+
331
+ $$ \begin{align} \text{SNR} &= (Q^{-1}(P_f) - Q^{-1}(P_d))^2 \frac{64E_0 - 16d^2E_1 + d^4E_2}{Nd^4E_2} \tag{30} \\ &= \frac{1}{N}(Q^{-1}(P_f) - Q^{-1}(P_d))^2 \nonumber \\ &\quad \times \left( \frac{64E_0}{E_2} \frac{1}{d^4} - \frac{16E_1}{E_2} \frac{1}{d^2} + 1 \right). \tag{31} \end{align} $$
332
+
333
+ The above expression gives an implicit relation between the smallest detectable distance between the two (equal intensity) sources, at the particular SNR. As an example, for $h(x) = \operatorname{sinc}^2(x)$ and for the specified choice of $P_d = 0.99$ and $P_f = 10^{-6}$, if we collect $N$ equally spaced samples at $\{x_k\}$ within the interval $[-10, 10]$, at the Nyquist rate, we have
334
+
335
+ $$ \begin{aligned} \text{SNR} &= 50.12 \frac{\frac{140}{\pi^4} - \frac{14}{\pi^2}d^2 + d^4}{Nd^4} \\ &= \frac{72.04 - 71.1d^2 + 50.12d^4}{Nd^4} \end{aligned} $$
336
+
337
+ A plot of this function is shown in Fig. 3. It is worth noting that in (31), the term involving $d^{-1}$ dominates for small $d$. Therefore, a reasonably informative (but approximate) way to write SNR is
338
+
339
+ $$\text{SNR} \approx \frac{1}{N} (Q^{-1}(P_f) - Q^{-1}(P_d))^2 \frac{E_0}{E_2} \frac{1}{d^4} = \frac{c}{Nd^4} \quad (32)$$
340
+
341
+ where the coefficient $c$ is a function only of the selected $P_f$ and $P_d$. It is worth noting that for any sampling rate higher than the Nyquist rate, we can rewrite $c$ in (32) as follows:
342
+
343
+ $$c = 64(Q^{-1}(P_f) - Q^{-1}(P_d))^2 \frac{\int_{-\infty}^{-\infty} h^2(x) dx}{\int_{-\infty}^{-\infty} \left[ \frac{\partial^2 h(x)}{\partial x^2} \right]^2 dx} \quad (33)$$
344
+
345
+ ⁶In [9] and [12] a similar criterion (in a different framework) has been proposed, where they applied a sign test (i.e., a fixed threshold) to decide if there is one or two point sources present. This approach gives a detector with a fixed false alarm rate.
346
+
347
+ Fig. 3. Minimum detectable *d* as a function of SNR (in dB) at the Nyquist rate (exact and approximate).
348
+
349
+ Fig. 4. Minimum detectable *d* versus SNR (in dB) at Nyquist rate, and at twice Nyquist rate.
350
+
351
+ A plot of the approximate expression in (32) is also shown in Fig. 3 to be compared against the exact expression (31). The above relation (32) is a neat and rather intuitive power law that one can use to, for instance, understand the required SNR to achieve a particular resolution level of interest below the diffraction limit. Fig. 4 shows the curves defined by (30) for different sampling rates; namely Nyquist rate and twice Nyquist. As one would expect, the minimum detectable *d* becomes smaller as the number of samples increases, but it does not do so at a very fast rate because of the proportionality between SNR and the sampling rate.⁷
352
+
353
+ ## B. The Case of Unknown α and β, Symmetrically Located Point Sources
354
+
355
+ In this section we discuss a more general case where neither the intensities α and β, nor the distance *d*, are known.⁸ Equation
356
+
357
+ ⁷Similar analysis for the two-dimensional extension of this problem is presented in [22].
358
+
359
+ ⁸But we assume that $\alpha + \beta = 2$ is known to the detector.
360
+ ---PAGE_BREAK---
361
+
362
+ (22) leads to a detection problem defined in terms of a linear
363
+ model over the parameter set $\theta$ defined as follows:
364
+
365
+ $$
366
+ \begin{align}
367
+ & y = H\theta + w \tag{34} \\
368
+ & H = [h_1; h_2] \tag{35} \\
369
+ & \theta = \begin{bmatrix} d(\alpha - \beta) \\ \frac{d^2}{4} \end{bmatrix} \tag{36}
370
+ \end{align}
371
+ $$
372
+
373
+ where we note that the matrix $H$ has orthogonal columns.
374
+ Specifically, the detection problem is now posed as
375
+
376
+ $$
377
+ \left\{
378
+ \begin{array}{ll}
379
+ H_0: & A\theta = b \\
380
+ H_1: & A\theta \neq b
381
+ \end{array}
382
+ \right.
383
+ \tag{37}
384
+ $$
385
+
386
+ where
387
+
388
+ $$
389
+ A = \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix} \quad b = \begin{bmatrix} 0 \\ 0 \end{bmatrix} \qquad (38)
390
+ $$
391
+
392
+ The GLRT for this problem is given by ([16], p. 274):
393
+
394
+ $$
395
+ T(y) = \frac{1}{\sigma^2} \hat{\theta}' A^{-T} [A(H' H)^{-1} A']^{-1} A \hat{\theta} \quad (39)
396
+ $$
397
+
398
+ $$
399
+ = \frac{1}{\sigma^2} \left( \frac{(h_1^T y)^2}{E_1} + \frac{(h_2^T y)^2}{E_2} \right) \quad (40)
400
+ $$
401
+
402
+ where
403
+
404
+ $$
405
+ \hat{\theta} = (\mathbf{H}^T \mathbf{H})^{-1} \mathbf{H} \mathbf{y}. \quad (41)
406
+ $$
407
+
408
+ The performance of this detector is characterized by
409
+
410
+ $$
411
+ P_f = Q_{\chi_2^2}(\gamma) \qquad (42)
412
+ $$
413
+
414
+ $$
415
+ P_d = Q_{\chi_2^2}(\lambda) \quad (43)
416
+ $$
417
+
418
+ $$
419
+ \lambda = \frac{1}{\sigma^2} \theta^T A^{-1} [A(H^T H)^{-1} A']^{-1} A \theta \quad (44)
420
+ $$
421
+
422
+ $$
423
+ = \frac{1}{\sigma^2} \left( \left( \frac{\alpha - \beta}{2} \right)^2 d^2 E_1 + \frac{1}{16} d^4 E_2 \right) \quad (45)
424
+ $$
425
+
426
+ where $Q_{\chi_2^2}$ is the right tail probability for a Central Chi-Squared PDF with 2 degrees of freedom, and $Q_{\chi_2^2(\lambda)}$ is the right tail probability for a noncentral Chi-Squared PDF with 2 degrees of freedom and noncentrality parameter $\lambda$. In order to perform the same analysis as Section 4.1 (i.e., $d_{min}$ versus SNR curve), we start by computing the required $\lambda$ from the above expressions, based on the fixed values of $P_d$ and $P_f$. Then, using the relation (18), we will have
427
+
428
+ $$
429
+ SNR = \frac{\lambda(P_f, P_d)}{\bar{N}} \frac{64E_0 - 16\alpha\beta d^2 E_1 + d^4 E_2}{4(\alpha - \beta)^2 d^2 E_1 + d^4 E_2} \quad (46)
430
+ $$
431
+
432
+ where $\lambda(P_f, P_d)$ represents the required value of noncentrality parameter as a function of the desired $P_f$ and $P_d$. For instance, for the case of $h(x) = \text{sinc}^2(x)$, with $P_d = 0.99$ and $P_f = 10^{-6}$ we have
433
+
434
+ $$
435
+ SNR = \frac{56.29 \frac{140}{\pi^4} - \frac{14}{\pi^2} \alpha \beta d^2 + d^4}{N \frac{7}{2\pi^2} (\alpha - \beta)^2 d^2 + d^4}. \quad (47)
436
+ $$
437
+
438
+ It is useful to compare the performance of this detector (in terms
439
+ of minimum detectable *d*) against the "best" case where the pa-
440
+ rameters *d*, *α* and *β* are actually known. In fact, a comparison in
441
+ Fig. 5 demonstrates that, happily (and perhaps rather unexpect-
442
+ edly), the curves are very close, implying that the performance
443
+
444
+ Fig. 5. $d_{\min}$ versus SNR (dB) for $\alpha = 1.2$ and $\beta = 0.8$.
445
+
446
+ Fig. 6. GLRT for $\alpha \neq \beta$ and the case $\alpha = \beta$, symmetric sources; $d_{\min}$ versus SNR(dB).
447
+
448
+ of GLRT is very close to the optimal detector for which all pa-
449
+ rameters are known.
450
+
451
+ An interesting observation arises from a comparison of the
452
+ minimum detectable *d* for the cases *α* = *β* and *α* ≠ *β*, shown
453
+ in Fig. 6. It is seen that unequal *α* and *β* yield better detec-
454
+ tion. That is, for a fixed *d*, the required SNR for resolving two
455
+ closely-spaced unequally bright point sources is *smaller* than
456
+ the SNR required to resolve two *equally spaced* sources. This
457
+ result seems counter-intuitive. Yet, the reason behind it is some-
458
+ what clear in hindsight. Equal *α* and *β* produce a perfectly
459
+ symmetric signal (without noise) and therefore result in redun-
460
+ dancy in the measured signal content. With unequal *α* and *β*,
461
+ an anti-symmetric part is added to the signal information and
462
+ better decision is made possible. This phenomenon is a result
463
+ of by the assumption of symmetry of point sources around the
464
+ origin (*x* = 0). If the center of the point sources is not known,
465
+ the results can be different, as we will explain in the next section.
466
+
467
+ C. The Case of Unknown Intensities But α + β = 2 with Asymmetrically Located Point Sources
468
+
469
+ With the earlier machinery in place, in this section, we study
470
+ the case where the point sources are not located symmetrically
471
+ ---PAGE_BREAK---
472
+
473
+ around the origin ($x=0$). We consider the following model for this case:
474
+
475
+ $$
476
+ \begin{aligned}
477
+ g(x_k) &= s(x_k; \alpha, \beta, d_1, d_2) + w(x_k) \\
478
+ &= \alpha h(x_k - d_1) + \beta h(x_k + d_2) + w(x_k)
479
+ \end{aligned}
480
+ \quad (48) $$
481
+
482
+ where $d_1$ and $d_2$ are unknown and $d = d_1 + d_2$ is the distance between the point sources. The Taylor expansion for the signal term in (48) around $(d_1, d_2) = (0, 0)$ is given by
483
+
484
+ $$ s(x_k; \alpha, \beta, d_1, d_2) = (\alpha + \beta)h(x_k) + \frac{(\alpha d_1 + \beta d_2)h_1(x_k)}{2} + \frac{\alpha d_1^2 + \beta d_2^2}{2}h_2(x_k). \quad (49) $$
485
+
486
+ Here we consider the general case of unknown $\alpha$ and $\beta$ but $\alpha+\beta=2$ is known to the detector. However, we assume that the test for determining whether one peak is present or two peaks are present is performed at some point located between the two point sources. Hence, the hypothesis test can be expressed as
487
+
488
+ $$ H_0: [d_1 \ d_2] = [0 \ 0] \\ H_1: [d_1 \ d_2] \neq [0 \ 0] \quad (50) $$
489
+
490
+ or equivalently (see (51) at the bottom of the page). By removing the known common term $(\alpha + \beta)h(x_k)$, we have the following linear model:
491
+
492
+ $$ y = H\theta_a + w $$
493
+
494
+ where
495
+
496
+ $$
497
+ \begin{align*}
498
+ H &= [\mathbf{h}_1, \mathbf{h}_2] \\
499
+ \theta_a &= \begin{bmatrix} -\alpha d_1 + \beta d_2 \\ \frac{\alpha d_1^2 + \beta d_2^2}{2} \end{bmatrix} \tag{52}
500
+ \end{align*} $$
501
+
502
+ and where the subscript “a” an $\theta_a$ is denoting the asymmetric case, to be distinguished from (36). Then, the corresponding hypotheses are given by
503
+
504
+ $$ H_0: A\theta_a = b \\ H_1: A\theta_a \neq b \quad (53) $$
505
+
506
+ where
507
+
508
+ $$ A = \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix}, \quad b = \begin{bmatrix} 0 \\ 0 \end{bmatrix} $$
509
+
510
+ just as in Section IV-B. The GLRT for (53) will be
511
+
512
+ $$ T(y) = \frac{1}{\sigma^2} \left( \frac{(\mathbf{h}_1^t y)^2}{E_1} + \frac{(\mathbf{h}_2^t y)^2}{E_2} \right). \quad (54) $$
513
+
514
+ From (54), the performance of this detector is characterized by
515
+
516
+ $$
517
+ \begin{align*}
518
+ P_f &= Q_{\chi_d^2}(\gamma) \\
519
+ P_d &= Q_{\lambda_d^2(\lambda)}(\gamma) \\
520
+ \lambda &= \frac{1}{\sigma^2} \left( (-\alpha d_1 + \beta d_2)^2 E_1 + \left( \frac{\alpha d_1^2 + \beta d_2^2}{2} \right)^2 E_2 \right). \quad (55)
521
+ \end{align*} $$
522
+
523
+ Now, to obtain the relation between SNR and ($d_1, d_2$), we first need to compute the SNR for the model of (48), which is given by
524
+
525
+ $$ \text{SNR} = \frac{1}{N\sigma^2} \left[ (\alpha + \beta)^2 E_0 - (\alpha + \beta)(d_1 + d_2)^2 E_1 + \left( \frac{\alpha d_1^2 + \beta d_2^2}{2} \right)^2 E_2 \right]. \quad (56) $$
526
+
527
+ The value of $\sigma^2$ in (55) can be obtained for the desired $P'_d$ and $P'_f$. By substituting this value in (56) we will have (57), shown at the bottom of the page. In order to present the results in this case, let us assume that⁹ $\alpha d_1 \approx \beta d_2$ (i.e., we perform the test at a point which is closer to the stronger peak.). It can be easily shown that the value of $\lambda$ in (55) is maximized for the case of $\alpha = \beta$. This shows that when $\alpha d_1 \approx \beta d_2$, the performance for the case of equal intensities is better than the performance of the case with unequal intensities. Fig. 7 confirms this result by showing the curves for $d_{\min}$ versus SNR for two cases: equal intensities and unequal intensities (we assume $h(x) = \sin c^2(x)$). By comparing this results and that of the previous section, we conclude that the assumption of symmetrically located point sources around the test point plays a very important role in the performance of the detector. Also, it is worth mentioning that with the assumption of $\alpha d_1 \approx \beta d_2$, we can approximate (57) for the range of small $d_1$ and $d_2$ in the following informative ways:
528
+
529
+ $$
530
+ \begin{align}
531
+ \text{SNR} &= \frac{\lambda(P_f, P_d)}{N} \frac{4(\alpha + \beta)^2}{(\alpha d_1^2 + \beta d_2^2)^2} \frac{E_0}{E_2} = \frac{\lambda(P_f, P_d)}{N} \frac{4}{d_1^2 d_2^2} \frac{E_0}{E_2} \nonumber \\
532
+ &= \frac{\lambda(P_f, P_d)}{N} \frac{4(\alpha + \beta)^4 E_0}{\alpha^2 \beta^2 d^4 E_2} \tag{58}
533
+ \end{align} $$
534
+
535
+ ⁹See Appendix C for a justification.
536
+
537
+ $$
538
+ \begin{cases}
539
+ H_0: \tilde{g}(x_k) = (\alpha + \beta)h(x_k) + w(x_k) \\
540
+ H_1: \tilde{g}(x_k) = (\alpha + \beta)h(x_k) + (-\alpha d_1 + \beta d_2)h_1(x_k) + \frac{\alpha d_1^2 - \beta d_2^2}{2}h_2(x_k) + w(x_k)
541
+ \end{cases}
542
+ \quad (51) $$
543
+
544
+ $$ \text{SNR} = \frac{\lambda(P_f, P_d)}{N} \frac{(\alpha + \beta)^2 E_0 - (\alpha\beta(d_1 + d_2))^2 E_1 + \left(\frac{\alpha d_1^2 + \beta d_2^2}{2}\right)^2 E_2}{(-\alpha d_1 + \beta d_2)^2 E'_1 + \left(\frac{\alpha d_1^2}{2}\right)^2 E'_2}. \quad (57) $$
545
+ ---PAGE_BREAK---
546
+
547
+ Fig. 7. $d_{\min}$ versus SNR(dB); $d = d_1 + d_2$ and $\alpha d_1 = \beta d_2$; equal intensities and unequal intensities.
548
+
549
+ Fig. 8. $d_{\min}$ versus SNR(dB); $d = d_1 + d_2$ and $\alpha d_1 = \beta d_2$ detectors with and without the assumption of $\alpha + \beta = 2$.
550
+
551
+ ### D. The Case of Unknown Intensities, Asymmetrically Located Point Sources
552
+
553
+ Here, we analyze the most general case in which we assume that the energy of point sources ($\alpha + \beta$) is unknown to the detector, as well as the individual $\alpha, \beta, d_1$, and $d_2$. Recalling (51), we can set up another linear model as follows:
554
+
555
+ $$ \tilde{\mathbf{g}} = \mathbf{H}_u \boldsymbol{\theta}_u + \mathbf{w} $$
556
+
557
+ where
558
+
559
+ $$ \begin{aligned} \mathbf{H}_u &= [\mathbf{h}, \mathbf{h}_1, \mathbf{h}_2] \\ \boldsymbol{\theta}_u &= \begin{bmatrix} \alpha + \beta \\ -\alpha d_1 + \beta d_2 \\ \frac{\alpha d_1^2 - \beta d_2^2}{2} \end{bmatrix} \end{aligned} \quad (59) $$
560
+
561
+ and the subscript "u" denotes the completely unknown parameters. The above setup leads to the following hypothesis test:
562
+
563
+ $$ \begin{cases} H_0: & \mathbf{A}_u \boldsymbol{\theta}_u = \mathbf{b} \\ H_1: & \mathbf{A}_u \boldsymbol{\theta}_u \neq \mathbf{b} \end{cases} \quad (60) $$
564
+
565
+ where
566
+
567
+ $$ \mathbf{A}_u = \begin{bmatrix} 0 & 1 & 0 \\ 0 & 0 & 1 \end{bmatrix} , \quad \mathbf{b} = \begin{bmatrix} 0 \\ 0 \end{bmatrix} . $$
568
+
569
+ The GLRT for (60) will be
570
+
571
+ $$ T'(\tilde{\mathbf{g}}) = \frac{1}{\sigma^2} \left( \frac{(\mathbf{h}_1^T \tilde{\mathbf{g}})^2}{E_1} + \frac{(E_2 \mathbf{h}_1^T \tilde{\mathbf{g}} + E_0 \mathbf{h}_2^T \tilde{\mathbf{g}})^2}{E_0(E_0 E_2 - E_1^2)} \right). \quad (61) $$
572
+
573
+ The performance of this detector is given by¹⁰
574
+
575
+ $$ P_f = Q_{\chi_2^2}(\gamma) \\ P_d = Q_{\chi_2^2(\lambda)}(\gamma) \\ \lambda = \frac{1}{\sigma^2} \left( (-\alpha d_1 + \beta d_2)^2 E_1 + \left( \frac{\alpha d_1^2 + \beta d_2^2}{2} \right)^2 \left( E_2 - \frac{E_1^2}{E_0} \right) \right). \quad (62) $$
576
+
577
+ Consequently, the relation between ($d_1, d_2$) and SNR is given by (63) as shown at the bottom of the page. By comparing (57) and (63), it can be readily shown that because of the negative term $-(E_1^2/E_0)$, the detector without the knowledge of $\alpha + \beta$ performs more poorly than the detector which knows $\alpha + \beta = 2$. Fig. 8 displays the performance of these two different detectors in terms of the minimum detectable $d$ versus SNR for the case of $h(x) = \text{sinc}^2(x)$.
578
+
579
+ ## V. THE CRAMÉR-RAO LOWER BOUND ON ESTIMATION OF THE UNKNOWN PARAMETERS
580
+
581
+ In the interest of completeness, in this section we present results on the estimation of the unknown parameters of the model. In particular, we study the asymptotic performance of ML estimate of the unknown parameters, using the Cramér-Rao lower bound (CRLB). CRLB [15, p. 27] is a covariance inequality bound which treats the parameters as unknown deterministic quantities and provides a local bound on the mean square error (MSE) of their estimate. Being able to compute a lower bound
582
+
583
+ ¹⁰Note that according to the Cauchy-Schwarz inequality $E_0 E_2 \ge E_1^2$.
584
+
585
+ $$ \text{SNR} = \frac{\lambda(P_f; P_d)}{N} \frac{(\alpha + \beta)^2 E_0 - \alpha \beta (d_1 + d_2)^2 E_1 + \left(\frac{\alpha d_1^2 + \beta d_2^2}{2}\right)^2 E_2}{(-\alpha d_1 + \beta d_2)^2 E_1 + \left(\frac{\alpha d_1^2 - \beta d_2^2}{2}\right)^2 (E_2 - \frac{E_1^2}{E_0})} \quad (63) $$
586
+ ---PAGE_BREAK---
587
+
588
+ Fig. 9. $\sqrt{\text{CRLB}(\hat{d})}$ versus $\hat{d}$ for two different cases.
589
+
590
+ on the variance of the parameter $d$, in particular, is rather helpful
591
+ in verifying and confirming the earlier results of this paper. For
592
+ example we shall see how the difference between $\alpha$ and $\beta$ af-
593
+ fects the variance of the estimate in different cases. Here, we
594
+ compute the CRLB for following cases:
595
+
596
+ • the signal model in (3), i.e., known intensities but un-
597
+ known *d*;
598
+
599
+ * the signal model in (48), i.e., unknown α, β, d₁, and d₂.
600
+ To verify the details of the calculations (carried out mostly in
601
+ the frequency domain), we refer the reader to Appendix B. Re-
602
+ calling (3), the CRLB for the parameter *d* (assuming �� and β
603
+ known), is given by (64) and (65) at the bottom of the page. To
604
+ compute the CRLB for the second case, when α, β, d₁, and d₂
605
+ are unknown, the Fisher Information matrix is computed.¹¹ We
606
+ have
607
+
608
+ $$
609
+ \operatorname{cov}(\hat{d}_1, \hat{d}_2, \hat{\alpha}, \hat{\beta}) \geq \Psi^{-1}(d_1, d_2, \alpha, \beta) \quad (66)
610
+ $$
611
+
612
+ where $\Psi$ is the 4 × 4 symmetric Fisher Information matrix with
613
+ its elements defined by the equations at the bottom of the next
614
+ page. The bound on the variance of $\hat{d}_1$ and $\hat{d}_2$ can be obtained
615
+ by taking the elements (1, 1) and (2, 2) of the inverse Fisher
616
+ information matrix $\Psi^{-1}$, respectively. Also, the CRLB on $d =$
617
+ $d_1 + d_2$ is computed from
618
+
619
+ $$
620
+ \mathrm{CRLB}(\hat{d}) = [\Psi^{-1}]_{11} + [\Psi^{-1}]_{22} + 2[\Psi^{-1}]_{12}. \quad (67)
621
+ $$
622
+
623
+ ¹¹We thank Prof. Jeff Fessler for sharing with us his calculations for the con-
624
+ tinuous data case.
625
+
626
+ Fig. 10. $\sqrt{\text{CRLB}(\hat{d})}$ versus $\alpha$ for two different cases.
627
+
628
+ Fig. 9 shows the square-root of the CRLB (to maintain the same units as *d*) for *d*, for fixed values of the intensities *α* and *β*, versus the parameter value *d*, for two different cases; namely, the known intensity case with symmetrically located point sources, and the unknown *α*, *β*, *d*₁ and *d*₂ case. In this figure, we observe that the curves in each case are rather close for *d* > 0.5, and they are distinct when *α* is unknown and *d* is smaller than 0.5. In Fig. 10, the value of *d* = 0.3 is fixed, and the square-root of CRLB for *d̂* is shown over a range of values of *α*. The graph demonstrates the effect of the difference of *α* and *β* on the CRLB. As seen in this figure, the CRLB for the second case (unknown *α*, *β*, *d*₁ and *d*₂) increases rapidly when moving away from (*α*, *β*) = (1, 1); but for known *α* and *β*, there is a (rather slow) decay away from the position *α* = *β* = 1. The observed phenomenon is counter-intuitive, but can be readily explained by looking at the derivatives we computed in the calculation of the CRLB. When point sources are located symmetrically, with unequal intensities, the shape of the overall signal is dramatically different than the case when *α* = *β* = 1. This difference is accentuated further as the value of *α* − *β* becomes larger. Whereas for second case, because of uncertainty about the center and intensities of point sources, if *α* − *β* ≠ 0, the overall shape looks more like a single peak is present. The observed behavior is consistent with what we saw before where we demonstrated that unequal *α* and *β* yields improved detection if the center is known and vice versa.
629
+
630
+ VI. CONCLUSION
631
+
632
+ We have set out in this paper to address the question of
633
+ resolution from a sound statistical viewpoint. In particular, we
634
+
635
+ $$
636
+ \begin{align}
637
+ \operatorname{var}(\hat{d}) &\ge \frac{\sigma^2}{\sum_k \left( \frac{\partial S(x_k, d)}{\partial d} \right)^2} = \frac{\sigma^2}{\frac{1}{2\pi} \int_{-\pi}^{\pi} \left| \frac{\partial S(\omega, d)}{\partial d} \right|^2 d\omega} \tag{64} \\
638
+ &= \frac{\sigma^2}{f_s \frac{\pi^2}{15} (\alpha^2 + \beta^2) + \frac{\alpha^3}{\pi^3 d^3} \left[ (\pi^2 d^2 - 3) \sin(2\pi d) + 3\pi d \cos(2\pi d) + 3\pi d \right]} \tag{65}
639
+ \end{align}
640
+ $$
641
+ ---PAGE_BREAK---
642
+
643
+ have explicitly answered a very practical question: What is the minimum detectable distance between two point sources imaged incoherently at a given signal-to-noise ratio? Or equivalently, what is the minimum SNR required to discriminate two point sources separated by a distance smaller than the Rayleigh limit? Based on different assumptions and models, we explicitly studied four different cases in our detection-theoretic approach, from the simplest to the most general case. We employed a hypothesis testing framework using like locally
644
+
645
+ most powerful tests, where the original highly nonlinear problem was approximated using a quadratic model in the parameter *d*. We also discussed asymptotic performance for estimation of the unknown parameters. The analysis has been carried out in one dimension to facilitate the presentation and to yield maximum intuition. We have begun the analysis in 2-D, including studies as a function of different aperture shapes and lenses, and the complete 2-D (spatial integration) sampling model. This 2-D analysis is not so different in spirit from the
646
+
647
+ $$
648
+ \Psi(1, 1) = \frac{1}{\sigma^2} \sum_k \left( \frac{\partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial d_1} \right)^2 = \frac{\alpha^2}{2\pi\sigma^2} \int_{-\pi}^{\pi} |\omega f_s H(\omega, f_s)|^2 d\omega = \frac{f_s}{\sigma^2} \frac{4\pi^2 \alpha^2}{15}
649
+ $$
650
+
651
+ $$
652
+ \Psi(2, 2) = \frac{1}{\sigma^2} \sum_k \left( \frac{\partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial d_2} \right)^2 = \frac{\beta^2}{2\pi\sigma^2} \int_{-\pi}^{\pi} |\omega f_s H(\omega, f_s)|^2 d\omega = \frac{f_s}{\sigma^2} \frac{4\pi^2 \beta^2}{15}
653
+ $$
654
+
655
+ $$
656
+ \Psi(3, 3) = \frac{1}{\sigma^2} \sum_k \left( \frac{\partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial \alpha} \right)^2 = \frac{1}{2\pi\sigma^2} \int_{-\pi}^{\pi} |H(\omega, f_s)|^2 d\omega = \frac{f_s}{\sigma^2} \frac{2}{3}
657
+ $$
658
+
659
+ $$
660
+ \Psi(4, 4) = \frac{1}{\sigma^2} \sum_k \left( \frac{\partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial \beta} \right)^2 = \frac{1}{2\pi\sigma^2} \int_{\pi}^{\pi} |H(\omega, f_s)|^2 d\omega = \frac{f_s}{\sigma^2} \frac{2}{3}
661
+ $$
662
+
663
+ $$
664
+ \Psi(1, 2) = \frac{1}{\sigma^2} \sum_k \frac{\partial s(x_k; \alpha, \beta, d_1, d_2) \partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial d_1 \partial d_2}
665
+ $$
666
+
667
+ $$
668
+ = -\frac{\alpha\beta}{2\pi\sigma^2} \int_{-\pi}^{\pi} |\omega f_s H(\omega, f_s)|^2 \cos(\omega f_s (d_1 + d_2)) d\omega
669
+ $$
670
+
671
+ $$
672
+ = \frac{f_s 2\alpha\beta (\pi^2(d_1+d_2)^2 - 3)\sin(2\pi(d_1+d_2)) + 6\pi(d_1+d_2)\cos^2(\pi(d_1+d_2))}{\sigma^2 \pi^3 (d_1+d_2)^5}
673
+ $$
674
+
675
+ $$
676
+ \Psi(1, 3) = \frac{1}{\sigma^2} \sum_k \frac{\partial s(x_k; \alpha, \beta, d_1, d_2) \partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial d_1 \partial \alpha} = -\frac{\alpha}{2\pi\sigma^2} \int_{-\pi}^{\pi} \omega f_s |H(\omega, f_s)|^2 d\omega = 0
677
+ $$
678
+
679
+ $$
680
+ \Psi(1, 4) = \frac{1}{\sigma^2} \sum_k \frac{\partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial d_1} \frac{\partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial \beta}
681
+ $$
682
+
683
+ $$
684
+ = -\frac{\alpha}{2\pi\sigma^2} \int_{-\pi}^{\pi} \omega f_s |H(\omega, f_s)|^2 \sin(\omega f_s (d_1 + d_2)) d\omega
685
+ $$
686
+
687
+ $$
688
+ = \frac{f_s}{\sigma^2} \frac{\alpha}{2\pi^3} \frac{3\sin(2\pi(d_1 + d_2)) - 4\pi(d_1 + d_2)\cos^2(\pi(d_1 + d_2)) - 2\pi(d_1 + d_2)}{(d_1 + d_2)^4}
689
+ $$
690
+
691
+ $$
692
+ \Psi(2, 3) = \frac{1}{\sigma^2} \sum_k \frac{\partial s(x_k; \alpha, \beta, d_1, d_2) \partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial d_2 \partial \alpha}
693
+ $$
694
+
695
+ $$
696
+ = -\frac{\beta}{2\pi\sigma^2} \int_{-\pi}^{\pi} |\omega f_s| H(\omega, f_s|^2) \sin(\omega f_s (d_1 + d_2)) d\omega
697
+ $$
698
+
699
+ $$
700
+ = -\frac{f_s - \beta}{\sigma^2 2\pi^3} - -\frac{3\sin(2\pi(d_1 + d_2)) - 4\pi(d_1 + d_2)\cos^2(\pi(d_1 + d_2)) - 2\pi(d_1 + d_2)}{(d_1 + d_2)^4}
701
+ $$
702
+
703
+ $$
704
+ \Psi(2, 4) = -\frac{1}{\sigma^2} \sum_k \frac{\partial s(x_k; \alpha, \beta, d_1, d_2) \partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial d_2 \partial \beta} = -\frac{\beta}{2\pi\sigma^2} \int_{-\pi}^{\pi} w f_s |H(w; f_s)|^2 dw = 0
705
+ $$
706
+
707
+ $$
708
+ \Psi(3, 4) = -\frac{1}{\sigma^2} \sum_k \frac{\partial s(x_k; \alpha, \beta, d_1, d_2) \partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial a \\ \partial b}
709
+ $$
710
+
711
+ $$
712
+ = -\frac{1}{2\pi\sigma^2} \int_{-\pi}^{\pi} |H(\omega, f_s)|^2 \cos(\omega f_s (d_1 + d_2)) d\omega
713
+ $$
714
+
715
+ $$
716
+ = -\frac{f_s}{\sigma^2 2\pi^3} - -\frac{-\sin(2\pi(d_1 + d_2)) + 2\pi(d_1 + d_2)}{(d_1 + d_2)^4}
717
+ $$
718
+ ---PAGE_BREAK---
719
+
720
+ 1-D case, but is significantly more messy; so we have elected to defer its presentation to the near future.
721
+
722
+ The major conclusion of this paper is that for a given imaging scenario (in this case, incoherent imaging through a slit), with required probabilities of detection and false alarm, the minimum resolvable separation between two sources from uniformly sampled data can be derived explicitly as a function of the SNR per sample of the imaging array, and the sampling rate. The most useful rule of thumb we glean from these results is that for the case of equal intensities (or for the case of unequal intensities with a proper choice of test point), the minimum resolvable distance is essentially proportional to the inverse of the SNR to the fractional power of 1/4. The proportionality constant was shown to be a function of the probabilities of detection and false alarm, and the point spread function. In deriving these results, we have unified and generalized much of the literature on this topic that, while sparse, has spanned the course of roughly four decades.
723
+
724
+ Many interesting questions remain to be studied. Of these, the analysis of the problem as a function of the sampling rate and sampling strategy come to mind. For instance, it is useful to study the performance in the presence of aliasing (i.e., sub-Nyquist sampling). It would also be interesting to study the effect of nonuniform sampling on performance.
725
+
726
+ It is important to note that the strategy for the analysis of resolution we have put forward here is very generally applicable to other types of imaging systems. Once the point-spread function of the imaging system is known, the signal model $s(x; d)$ is determined, and the same line of reasoning can be carried out. The optical imaging scenario we have described here should really be thought of as a canonical example of the application of the general strategy we propose for studying resolution. Extensions of these ideas can also be considered to study limits to resolution for indirect imaging such as in computed tomography.
727
+
728
+ As for other extensions and applications in optical imaging, an appealing direction is to study the limits to super-resolution from video [23]–[25]. The analysis presented here can help answer questions regarding the ability of image super-resolution methods to integrate multiple low resolution frames to produce a high resolution image from aliased data.
729
+
730
+ Finally, we wish to mention that this paper, we hope, represents one step forward in an overall methodology for studying imaging and image processing that appeals directly to concepts in information theory. This approach and point of view has been sorely lacking in the imaging community, and we hope that it will become more pervasive in the years to come.
731
+
732
+ ## APPENDIX A
733
+
734
+ ### ON THE ACCURACY OF THE QUADRATIC APPROXIMATION
735
+
736
+ Here, we present an analysis to demonstrate the accuracy of the Taylor expansion proposed in Section 3. We consider the general model of (48) and its Taylor expansion in (49). Let us define residual percentage error of the approximation as follows:
737
+
738
+ $$ \epsilon = \frac{\left\| s - (\alpha + \beta)\mathbf{h} - (-\alpha d_1 + \beta d_2)\mathbf{h}_1 - \frac{\alpha d_1^2 - \beta d_2^2}{2}\mathbf{h}_2 \right\|^2}{\|\mathbf{s}\|^2} \quad (68) $$
739
+
740
+ Fig. 11. Residual percentage error of the quadratic model; $\alpha d_1 = \beta d_2$.
741
+
742
+ Fig. 12. Residual percentage error of the quadratic model; $\alpha = \beta = 1$.
743
+
744
+ Consider the case when $\alpha d_1 = \beta d_2$ (See Appendix C). Fig. 11 shows the upper bound when $(d = d_1 + d_2 = 1)$ for $\epsilon$ as a function of $\alpha$ for $h(x) = \text{sinc}^2(x)$ (Note that again for above-Nyquist sampling, $\epsilon$ is independent from the sampling rate.). The maximum of $\epsilon$ is less than 20% in any case. Also, as seen in this figure, the approximation error for $d = 0.7$ is always less than 2.5%. Fig. 12 shows the curve for $\epsilon$ versus $d$ which indicates that the approximation error is quite acceptable for the range of interest near $d = 0$. To have a picture of the local error in the approximation, the error term
745
+
746
+ $$ \epsilon(x; \alpha, \beta, d_1, d_2) = s(x; \alpha, \beta, d_1, d_2) - (\alpha + \beta)h(x) \\ - (-\alpha d_1 + \beta d_2)h_1(x) - \frac{\alpha d_1^2 + \beta d_2^2}{2}h_2(x) $$
747
+
748
+ is shown in Fig. 13 for two different values of $d$ over the range of the variable $x$ in [-10, 10].
749
+
750
+ ## APPENDIX B
751
+
752
+ ### FREQUENCY DOMAIN REPRESENTATION; PARSEVAL'S THEOREM FOR THE SIGNAL $s(x; d)$
753
+
754
+ Considering the sampled signal of the general model, where the point sources are located at $-d_1$ and $d_2$ we have
755
+
756
+ $$ \begin{aligned} s(n; \alpha, \beta, d_1, d_2) &= s(x; \alpha, \beta, d_1, d_2)|_{x=\frac{n}{f_s}} \\ &= \alpha h\left(\frac{n}{f_s} - d_1\right) + \beta h\left(\frac{n}{f_s} + d_2\right). \end{aligned} \quad (69) $$
757
+ ---PAGE_BREAK---
758
+
759
+ Fig. 13. Difference between the actual signal and the quadratic model; $\alpha = \beta = 1$.
760
+
761
+ For the case of above-Nyquist sampling,¹² in the frequency domain we will have the following 2π-periodic representation (see (70) at the bottom of the page) where $H(\omega, f_s) = (f_s^2/2\pi)((2\pi/f_s) - |\omega|)$ is the DTFT of $h(x_k)$ when $h(x) = \text{sinc}^2(x)$ and sampling rate is $f_s$. Correspondingly, for this case, the functions $h_1(x)$ and $h_2(x)$ can be written in the frequency domain as
762
+
763
+ $$ H_1(\omega, f_s) = \begin{cases} j \frac{\omega f_s}{2\pi} \left( \frac{2\pi}{f_s} - |\omega| \right) & |\omega| < \frac{2\pi}{f_s} \\ 0 & \frac{2\pi}{f_s} \le |\omega| \le 2\pi \end{cases} \quad (71) $$
764
+
765
+ $$ H_2(\omega, f_s) = \begin{cases} -\frac{\omega^2 f_s^4}{2\pi} \left(\frac{2\pi}{f_s} - |\omega|\right) & |\omega| < \frac{2\pi}{f_s} \\ 0 & \frac{2\pi}{f_s} \le |\omega| \le 2\pi \end{cases} \quad (72) $$
766
+
767
+ Using Parseval's identities [19]:
768
+
769
+ $$ \sum_{n=-\infty}^{\infty} |x(n)|^2 = \frac{1}{2\pi} \int_{-\pi}^{\pi} |X(\omega)|^2 d\omega \quad (73) $$
770
+
771
+ $$ \sum_{n=-\infty}^{\infty} x(n)y^{*}(n) = \frac{1}{2\pi} \int_{-\pi}^{\pi} X(\omega)Y^{*}(n) d\omega \quad (74) $$
772
+
773
+ we can easily compute the following terms:
774
+
775
+ $$ E_0 = h^T h = f_s \frac{2}{3} \quad (75) $$
776
+
777
+ $$ E_1 = h_1^T h_1 = f_s \frac{4\pi^2}{15} \quad (76) $$
778
+
779
+ $$ E_2 = h_2^T h_2 = f_s \frac{32\pi^4}{105} \quad (77) $$
780
+
781
+ and
782
+
783
+ $$ h_1^T s_0 = h_1^T h_2 = 0 \quad (78) $$
784
+
785
+ ¹²To recover exactly $s(x; d)$ would mathematically require an infinite number of measurements (or samples) $s(n; d)$ [21]. But since we have considered a fairly large range (−10 to 10) for sampling, and since the energy in the tails of the function in the range is very small, the effect of aliasing is essentially negligible.
786
+
787
+ Note that in every case the energy terms are proportional to the sampling rate. It can be shown [20] that the energy of any uniformly (super-critically) sampled version of a band-limited signal is proportional to the sampling rate.
788
+
789
+ ## APPENDIX C
790
+ IS $\alpha d_1 \approx \beta d_2$ A REASONABLE ASSUMPTION?
791
+
792
+ Suppose that we first wish to determine a location at which we carry out our hypothesis test. A reasonable way to find a good candidate is to compute the correlation of the signal with a shifted version of $h(x)$ and find the point where the correlation is maximum (this would yield a point near the brighter of the two peaks). Consider
793
+
794
+ $$ R_{sh}(|\tau|, \alpha, \beta, d_1, d_2) = \int_{-\infty}^{+\infty} (s(x; \alpha, \beta, d_1, d_2) + w(x))h(x + \tau) dx \quad (79) $$
795
+
796
+ $$ = \int_{-\infty}^{+\infty} (\alpha h(x - d_1) + \beta h(x + d_2) + w(x))h(x + \tau) dx \quad (80) $$
797
+
798
+ $$ = \alpha R_{hh}(|\tau| - d_1) + \beta R_{hh}(|\tau| + d_2) + u(|\tau|) \quad (81) $$
799
+
800
+ where $R_{sh}$ and $R_{hh}$ are the cross-correlation and autocorrelation functions, respectively, and
801
+
802
+ $$ u(|\tau|) = \int_{-\infty}^{-\infty} w(x)h(x + \tau) dx \quad (82) $$
803
+
804
+ is a noise term (with zero mean). It should be clear from the model that $R_{sh1}$ would be maximized at $\tau = 0$. Also, since $d_1$ and $d_2$ are assumed to be small, by using the Taylor expansion around $|\tau| - d_1 = 0$ and $|\tau| + d_2 = 0$, we will have
805
+
806
+ $$ R_{hh}(|\tau| - d_1) = \xi_0 + (|\tau| - d_1)\xi_1 + (|\tau| - d_1)^2\xi_2 \quad (83) $$
807
+
808
+ $$ R_{hh}(|\tau| + d_2) = \xi_0 + (|\tau| + d_2)\xi_1 + (|\tau| + d_2)^2\xi_2 \quad (84) $$
809
+
810
+ where $\xi_0$, $\xi_1$, and $\xi_2$ are some constant coefficients of the above Taylor expansion. Also, it can be shown that $\xi_1 = 0$. Therefore, we can write (81) as follows:
811
+
812
+ $$ R_{sh}(|\tau|, \alpha, \beta, d_1, d_2) = (\alpha + \beta)\xi_0 + (\alpha|\tau| - d_1)^2 + \beta(|\tau| - d_2)^2\xi_2 + u(|\tau|) \quad (85) $$
813
+
814
+ Taking derivative of $R_{sh}(|\tau|, \alpha, \beta, d_1, d_2)$ with respect to $\tau$ and setting it to zero will result in:
815
+
816
+ $$ (\alpha + \beta)|\tau| = \alpha d_1 - \beta d_2 \quad (86) $$
817
+
818
+ Hence, a proper selection of $\tau$ (i.e., the test point) will lead to $\alpha d_1 \approx \beta d_2$.
819
+
820
+ $$ S(\omega, d) = \begin{cases} H(\omega, f_s)(\alpha \exp(-j\omega f_s d_1) + \beta \exp(j\omega f_s d_2)) & |\omega| < \frac{2\pi}{f_s} \\ 0 & \frac{2\pi}{f_s} \le |\omega| \le 2\pi \end{cases} \quad (70) $$
821
+ ---PAGE_BREAK---
822
+
823
+ ACKNOWLEDGMENT
824
+
825
+ The authors wish to acknowledge Prof. A. Shakouri of U.C., Santa Cruz, for providing the early practical inspiration from the laboratory bench that led them to consider the questions addressed in this paper. They thank Prof. J. Fessler of the University of Michigan for his helpful suggestions for the CRLB analysis. They also thank the reviewers for their constructive comments and suggestions.
826
+
827
+ REFERENCES
828
+
829
+ [1] J. W. Goodman, *Introduction to Fourier Optics*. New York: McGraw-Hill, 1996.
830
+
831
+ [2] J. D. Gaskill, *Linear Systems, Fourier Transforms, and Optics*. New York: Wiley, 1978.
832
+
833
+ [3] L. B. Lucy, "Statistical limits to super-resolution," *Astron. Astrophys*, vol. 261, pp. 706-710, 1992.
834
+
835
+ [4] C. W. Helstrom, "The detection and resolution of optical signals," *IEEE Trans. Inf. Theory*, vol. IT-10, pp. 275-287, 1964.
836
+
837
+ [5] ——, "Detection and resolution of incoherent objects by a background-limited optical system," *J. Opt. Soc. Amer.*, vol. 59, pp. 164-175, 1969.
838
+
839
+ [6] ——, "Resolvability of objects from the standpoint of statistical parameter estimation," *J. Opt. Soc. Amer.*, vol. 60, pp. 659-666, 1970.
840
+
841
+ [7] L. B. Lucy, "Resolution limits for deconvolved images," *Astron. J.*, vol. 104, pp. 1260-1265, 1992.
842
+
843
+ [8] A. van den Bos, "Ultimate resolution: A mathematical framework," *Ultramicroscopy*, vol. 47, pp. 298-306, 1992.
844
+
845
+ [9] A. J. den Dekker, "Model-based optical resolution," *IEEE Trans. Instrum. Meas.*, vol. 46, pp. 798-802, 1997.
846
+
847
+ [10] A. J. den Dekker and A. van den Bos, "Resolution, a survey," *J. Opt. Soc. Amer.*, vol. 14, pp. 547-557, 1997.
848
+
849
+ [11] E. Bettens, D. Van Dyck, A. J. den Dekker, J. Sijbers, and A. van den Bos, "Model-based two-object resolution from observations having counting statistics," *Ultramicroscopy*, vol. 77, pp. 37-48, 1999.
850
+
851
+ [12] A. van den Bos, "Resolution in model-based measurements," *IEEE Trans. Instrum. Meas.*, vol. 51, pp. 1055-1060, 2002.
852
+
853
+ [13] E. L. Kosarev, "Shannon's superresolution limit for signal recovery," *Inverse Problem*, vol. 6, pp. 55-76, 1990.
854
+
855
+ [14] P. Milanfar and A. Shakouri, "A Statistical analysis of diffraction-limited imaging," in *Proc. Int. Conf. Image Processing*, Sept. 2002, pp. 864-867.
856
+
857
+ [15] S. M. Kay, *Fundamentals of Statistical Signal Processing, Estimation Theory*: Prentice-Hall, Inc., 1998.
858
+
859
+ [16] ——, *Fundamentals of Statistical Signal Processing, Detection Theory*. Englewood Cliffs, NJ: Prentice-Hall, 1998.
860
+
861
+ [17] ——, *Modern Spectral Estimation, Theory and Application*. Englewood Cliffs, NJ: Prentice-Hall, 1988.
862
+
863
+ [18] ——, "Spectrum analysis, a modern perspective," *Proc. IEEE*, vol. 69, no. 11, pp. 1380-1418, 1981.
864
+
865
+ [19] A. V. Oppenheim and R. W. Schafer, *Discrete-Time Signal Processing*. Englewood Cliffs, NJ: Prentice-Hall, 1993.
866
+
867
+ [20] P. P. Vaidyanathan, "Generalizations of the sampling theorem: Seven decades after Nyquist," *IEEE Trans. Circuits Syst.*, vol. 48, pp. 1094-1109, Sept. 2001.
868
+
869
+ [21] M. Vetterli, P. Marziliano, and T. Blu, "Sampling signals with finite rate of innovation," *IEEE Trans. Signal Processing*, vol. 50, pp. 1417-1428, June 2002.
870
+
871
+ [22] M. Shahram and P. Milanfar, "A statistical analysis of achievable resolution in incoherent imaging," in *Proc. SPIE Annual Meeting*, San Diego, CA, Aug. 2003, URL: http://www.soe.ucsc.edu/~milanfar/publications.htm.
872
+
873
+ [23] M. Elad and A. Feuer, "Restoration of single super-resolution image from several blurred, noisy and down-sampled measured images," *IEEE Trans. Image Processing*, vol. 6, pp. 1646-1658, Dec. 1997.
874
+
875
+ [24] N. Nguyen, P. Milanfar, and G. H. Golub, "A computationally efficient image superresolution algorithm," *IEEE Trans. Image Processing*, vol. 10, pp. 573-583, Apr. 2001.
876
+
877
+ [25] S. Farsiu, D. Robinson, M. Elad, and P. Milanfar, "Fast and robust multi-frame superresolution," *IEEE Trans. Image Processing*, to be published.
878
+
879
+ **Morteza Shahram** received the B.S. degree from the Amir-Kabir University of Technology, Tehran, Iran, in 1996 and the M.S. degree from the Sharif University of Technology, Tehran, in 1998 both in electrical engineering. He is currently pursuing the Ph.D. degree in electrical engineering at the University of California, Santa Cruz.
880
+ He was with the Signal Company, Tehran, as a Research Engineer from 1996 to 2001. His research interests are statistical signal and image processing and information-theoretic imaging.
881
+
882
+ **Peyman Milanfar** (S'90-M'93-SM'98) received the B.S. degree in electrical engineering/matematics from the University of California, Berkeley, in 1988, and the S.M., E.E., and Ph.D. degrees in electrical engineering from the Massachusetts Institute of Technology, Cambridge, in 1990, 1992, and 1993, respectively.
883
+ Until 1999, he was a Senior Research Engineer at SRI International, Menlo Park, CA. He is currently Associate Professor of Electrical Engineering at the University of California, Santa Cruz. He was a Consulting Assistant Professor of computer science at Stanford University from 1998-2000, and a visiting Associate Professor there from June to December 2002. His technical interests are in statistical signal and image processing, and inverse problems.
884
+ Dr. Milanfar won a National Science Foundation CAREER award in 2000.
885
+ He was an associate editor for the IEEE SIGNAL PROCESSING LETTERS from 1998 to 2001.
samples/texts_merged/2634535.md ADDED
@@ -0,0 +1,447 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ ORIGINAL ARTICLE
5
+
6
+ WILEY
7
+
8
+ # Why is free education so popular? A political economy explanation
9
+
10
+ Juan A. Correa¹ | Yijia Lu² | Francisco Parro³ | Mauricio Villena³
11
+
12
+ ¹Facultad de Economía y Negocios,
13
+ Universidad Andres Bello, Santiago,
14
+ Chile
15
+
16
+ ²School of Law, New York University,
17
+ New York, New York
18
+
19
+ ³School of Business, Universidad Adolfo
20
+ Ibáñez, Santiago, Chile
21
+
22
+ **Correspondence**
23
+
24
+ Francisco Parro, School of Business,
25
+ Universidad Adolfo Ibáñez, 7941169,
26
+ Santiago, Chile.
27
+ Email: fjparrog@gmail.com
28
+
29
+ ## Abstract
30
+
31
+ This paper analyzes the political support for different funding regimes of education in a one-person, one-vote democracy. We focus the analysis on four systems that have had a preponderant presence in the political debate on education: a private system, a public system that delivers the same resources to each student (universal-free education), a public system that intends to equalize results, and a public system that aims to maximize the output of the economy. We show that a system of universal free education is the Condorcet winner. The level of income inequality and the degree to which income distribution is skewed to the right are key factors behind this conclusion. We also show that the voting outcome of public versus private funding for education depends crucially on the type of public funding under consideration.
32
+
33
+ ## 1 | INTRODUCTION
34
+
35
+ Universal free education has become popular in several regions of the world. Western democracies have it at different stages of the educational ladder. European countries, such as France, provide free tuition to European students, and Germany offers free tuition even to international students. Argentina, the Czech Republic, and Greece supply free education at all educational levels. Most of the United States primary and secondary students attend public schools, which provide free education, funded by a mix of federal, regional, and local resources.¹ In other countries, such as Chile, South Africa, and the United Kingdom, where
36
+
37
+ ¹An extensive cross-country analysis of education's tuition fee schemes can be found in Bentaouet Kattan (2006).
38
+ ---PAGE_BREAK---
39
+
40
+ higher education is not free, social movements have pressured the authorities to implement a scheme of universal free education for higher education.² In this paper, we give a political economy explanation for the popularity of free education.
41
+
42
+ A system of universal free education allocates public funds equally across students. This system, however, is not completely consistent with the main implications of a strand of the literature that emphasizes, first, the importance of economic growth to improve living standards and, second, human capital investments as the engine to promote growth (Benhabib & Spiegel, 1994; Hanushek & Kimko, 2000; among others). This branch of the literature points to a system in which public resources for education should be allocated to students with higher skills (so as to maximize aggregate output), relying on alternative instruments for redistribution. Universal free education also implies that public funds are allocated regardless of the student's family income. However, studies such as Samoff (1996) and Larkin and Staton (2001) highlight the importance of equity in the allocation of public resources spent on education. This implies that disadvantaged students should be supported with more resources, which would allow equalizing human capital across students. Hence, universal free education does not point in the direction suggested by these two strands of the literature.
43
+
44
+ A third strand of the literature suggests that different public funding systems should be implemented at different stages of the educational system. Empirical studies document low returns to interventions targeting disadvantaged adolescents, but high economic returns for remedial investments targeting young disadvantaged children (Cunha & Heckman, 2007; Cunha, James, Lochner, & Masterov, 2006; Heckman, 2008; Heckman & Masterov, 2007). This evidence implies an equity-efficiency trade-off for late child investments but not for early investments (Cunha & Heckman, 2007). Thus, public resources for education should focus on low-income students at earlier stages. However, at later stages, when human capital inequalities are difficult to undo, public resources should be shifted toward high-human capital students so as to maximize output, relying on an alternative instrument for socially desirable redistribution. The popularity of free education at different stages of education is not completely aligned with the implications derived from this third strand of the literature.
45
+
46
+ Then, why is universal free education so popular in the world? This paper gives a political economy explanation for this popularity. We model a static economy populated by a continuum of heterogeneous agents or *parents*, and each of them has one child and must vote for the funding regime that will finance the education of the child. Parents are heterogeneous in terms of human capital, which equals the family income. The parents' human capital is exogenously given and distributed according to a lognormal distribution function, as in Glomm and Ravikumar (1992) and Becker (1993). We study the Condorcet winner among four funding regimes that frequently appear in the political debate: a private system, a public system that delivers the same resources to all students, a public system that intends to equalize results, and a public system that aims to maximize the output of the economy.
47
+
48
+ Our analysis shows that a public system that universally invests the same resources in each student is the Condorcet winner in a one-person, one-vote democracy. The intuition behind our
49
+
50
+ ²In Chile, the Confederation of Chilean Student Federations (CONFECH), a national body made up of students at Chilean universities, led a series of student protests across the country in 2011. The student movement demanded, among other things, an increase in state support for public universities and free public education. In South Africa, the "Fees Must Fall" movement emerged in 2015 after the government announced an increase in mandatory fees at the universities. Students were placated after the proposal for the increase was dropped. The 2010 United Kingdom student protests were a series of demonstrations held in opposition to the planned increase of the cap on tuition fees by the Conservative-Liberal Democrat coalition government. The biggest demonstration occurred in November 2010, officially known under the phrase of "Fund Our Future: Stop Education Cuts," where thousands of students marched through central London demanding free education.
51
+ ---PAGE_BREAK---
52
+
53
+ key finding relies on the lognormal distribution of income, which is skewed to the right. A public system that equalizes outcomes will channel more resources per student to a minority of poor students. The efficiency-oriented system, in contrast, diverts more resources per student to a minority of wealthy students. The majority therefore does not favor public systems that disproportionally benefit a small group of either poor or rich agents, in comparison to the system that equalizes resources across students. In addition, the lognormal income distribution also implies that the median income is below the per capita level. A proportional tax on income that is then redistributed evenly between all students benefits those whose income falls below the mean. Then, the latter agents, who are the majority, prefer the public system that invest the same amount in each student, rather than the private system.
54
+
55
+ Therefore, our paper provides a political economy explanation for the popularity of universal free education. We show that an ex ante egalitarian public funding system for education is the Condorcet winner when it is confronted by a private system, an ex post egalitarian public system, and an output-maximizing public system. In addition, we show that the voting outcome of public versus private funding for education depends crucially on the type of public funding under consideration. Concretely, we prove that voters might choose a private system when a government proposes as a single alternative either a public system that intends to equalize results or a public system that aims to maximize the output of the economy. Thus, the voting outcome of the public versus private funding systems is not a trivial issue. We also discuss extensions to the baseline model, to show that our main result holds in democracies with a limited degree of either elitism or populism and in a type of top-up education system.
56
+
57
+ Our work builds upon earlier studies of the political economy of education funding. Creedy and Francois (1990) examine the conditions under which an uneducated majority of individuals support the financing of a proportion of the costs of education through the tax system. Glomm and Ravikumar (1992) analyze the political support for private versus public education, but in their model, voters face only one public funding design. Fernandez and Rogerson (1995) claim that the net effect of public support for higher education is a transfer of resources from poor to rich agents. They show that the underlying factor behind this result is the fact that education is only partially publicly provided. Then, the rich and the middle class may vote for relatively low subsidies to exclude poorer agents from education in the presence of credit constraints to privately finance education. Epple and Romano (1996) study the existence and properties of voting equilibria over public school expenditure in the presence of a private alternative.
58
+
59
+ More recently, De Fraja (2001) studies the voting equilibrium when voters must choose between two higher education reforms: the imposition of an ability test for admission to a university and a uniform subsidy to university attendance financed by a proportional tax on income. In a similar line, Anderberg and Balestrino (2008) study the voting equilibrium when there are two options to finance higher education in an economy with credit constraints: A subsidy to those who participate in education and a proportional income tax. Borck and Wimbersky (2014) study the political determination of higher education finance. The authors focus their analysis on the factors that might contribute toward higher education reforms from a traditional tax-subsidy scheme to income-contingent loan schemes or graduate taxes.
60
+
61
+ These previous studies have not analyzed the political support for education funding systems when private education competes with public funding alternatives aiming at equalizing resources, equalizing results, or maximizing output. Including a complete list of public funding, alternatives is important since, as we show explicitly in this paper, the Condorcet winner indeed depends on the specific design for the public funding alternative. In this sense, the analysis developed by Glomm and Ravikumar (1992), who consider a single public funding system, does
62
+ ---PAGE_BREAK---
63
+
64
+ not contain straightforward implications about the Condorcet winner for the case in which the pool of alternatives for the voters includes several public funding schemes.
65
+
66
+ The rest of this paper is organized as follows. Section 2 presents the model and derives human capital formation under different education funding systems. Section 3 analyzes the political support for alternative education funding systems. Section 4 discusses extensions to our model. Finally, Section 5 concludes.
67
+
68
+ ## 2 | THE MODEL
69
+
70
+ Consider a static economy populated by a continuum of heterogeneous agents or *parents*, each with only one child.³ Children are differentiated by the human capital they inherit from their parents. This initial human capital of the child is an input for the child's formal education. Parent *i*'s initial human capital, $h_P^i$, is exogenously given and distributed according to a lognormal distribution function $G$ with parameters $\mu$ and $\sigma^2$ over support $(0, +\infty)$.⁴ We normalize the size of the population to 1.
71
+
72
+ Children do not make any decisions. They only receive education, which is used to accumulate human capital. Each parent decides how to allocate her income $h_P^i$ between consumption $c^i$ and her child's education $y^i$. We set labor to 1; thus, an agent's labor earnings equal her human capital. Parents cannot borrow against the future earnings of their children, since there is no capital market in this economy.⁵
73
+
74
+ All individuals have identical preferences. The preferences are for own consumption and for the total human capital they pass on to their descendants, as in Banerjee and Newman (1991).⁶ Specifically, agent *i* has the following utility function,
75
+
76
+ $$U(c^i, h_c^i) = \ln c^i + \lambda \ln h_c^i, \quad (1)$$
77
+
78
+ where $c^i$ is the agent's consumption and $h_c^i$ is the total human capital passed on to the child, discounted by $\lambda \in (0,1)$. The human capital passed on is determined by the following equation:⁷
79
+
80
+ $$h_c^i = \Theta(v^i + y^i)^{\gamma} (h_P^i)^{\delta}, \quad (2)$$
81
+
82
+ which depends upon agent *i*'s human capital $h_P^i$ and the total amount $v^i + y^i$ of resources invested in the education of the child, where $v^i$ are the resources (or voucher) invested in education by the government in the child of agent *i* and $y^i$ are the resources invested in education by agent *i*, the parent. The parameter $\Theta > 0$ is an exogenous constant. The parameter
83
+
84
+ ³Sleebos (2003) reports that the average fertility rate in OECD countries is about 1.6 children per woman. Docquier (2004) shows that there is no clear relation between income and fertility in developed countries. However, a more general model with endogenous fertility rates would be an interesting avenue for future research.
85
+
86
+ ⁴Since Gibrat (1931), the lognormal distribution has been extensively used to describe within- or between-country income distributions. The lognormal distribution has been empirically shown to explain most of the income distribution (see Clementi & Gallegati, 2005; Neal & Rosen, 2000; among others).
87
+
88
+ ⁵Several studies have highlighted capital market imperfections as an important aspect of the investment in human capital (e.g., Aghion & Bolton, 1992; Becker, 1993; Becker & Tomes, 1979; Galor, 2000; Moav, 2002; among others).
89
+
90
+ ⁶A more sophisticated formulation for altruism (Kohlberg, 1976; Loury, 1981; Becker, 1986; Banerjee & Newman, 1991; Becker, 1993, among others) leads to an untractable formulation when comparing different regimes.
91
+
92
+ ⁷The human capital that parents pass on their children can be interpreted either as the initial skills of preprimary students who starts formal education or as the amount of human capital with which a secondary student starts her tertiary education.
93
+ ---PAGE_BREAK---
94
+
95
+ $\gamma \in (0,1)$ captures the returns to investment in education and the parameter $\delta > 0$ captures the returns to the parental human capital.
96
+
97
+ The only difference between the educational systems studied is made by the constraints imposed upon $v^i$ and $y^i$. Under a purely private system, the government makes no investment in education, so $v^i = 0$. Agent $i$, therefore, divides her income $h_p^i$ between consumption $c^i$ and private investment in the education of her child $y^i$, with $h_p^i = c^i + y^i$. Under public education, only the government invests in education, so $y^i = 0$. Since agent $i$ spends nothing on education, all of the post-tax income $(1-\tau)h_p^i$ goes into consumption: $c^i = (1-\tau)h_p^i$, where $\tau$ is the tax rate on the agent's income. The total revenue raised by the government is $\tau H_p$, where $H_p = \int h_p dG(h)$. This revenue is distributed among the students in the following three ways in the public education systems we study: (a) equally ex ante, with $v^i = v^j$, $\forall i,j$; (b) equally ex post, so that $h_c^i = h_c^j, \forall i,j$; and (c) output maximizing, so that $dh_c^i/dv^i = dh_c^j/dv^j, \forall i,j$. In all three cases, budget balance requires $\mathbb{E}[v] = \tau H_p$, where $\mathbb{E}$ denotes the expectation operator.
98
+
99
+ ## 2.1 | The private education system (S1)
100
+
101
+ In this section, we study the optimal investment in education under a purely private funding system, where the government's investment in education is absent ($v^i = 0, \forall i$). Agent $i$, therefore, chooses $c^i$ and $y^i$ to maximize $U(c^i, h_c^i)$ subject to the technology of human capital formation $h_c^i = \theta(y^i)'(h_p^i)^{\delta}$ and the feasibility constraint $h_p^i = c^i + y^i$. The first order condition with respect to $y^i$ is
102
+
103
+ $$y^i = \left( \frac{\lambda\gamma}{1 + \lambda\gamma} \right) h_p^i. \quad (3)$$
104
+
105
+ Therefore, parents invest a constant fraction $\lambda\gamma/(1+\lambda\gamma)$ of their income in the education of their children. We prove later that the fraction of the income that parents privately invest in education is identical to the majority's preferred tax rate.
106
+
107
+ ## 2.2 | The public education systems
108
+
109
+ Now suppose that education is financed publicly. No private acquisition of education is allowed, so $y^i = 0$. Thus, agents consume their after-tax income $c^i = (1-\tau)h_p^i$. Public education is financed by a proportional income tax $\tau$. The resources collected by the government are used to provide education to children. We focus on three different public funding systems. In the first, the government invests an equal amount of money in each student. In the second, the government invests resources to equalize the human capital of the students at the end of the education stage. In the third, the government seeks to maximize the total human capital of the economy.
110
+
111
+ ### 2.2.1 | The ex ante egalitarian public education system (S2)
112
+
113
+ In this public system, the government invests the same amount of resources in each student. The subsidy given to each student is denoted by $v$. Under the constraint that total expenditures must be equal to the total resources collected by the proportional income tax, the equilibrium investment in each student is
114
+
115
+ $$v = \tau \mathbb{E}[h_p]. \quad (4)$$
116
+ ---PAGE_BREAK---
117
+
118
+ Hence, the government gives a flat subsidy to all students. The amount of this subsidy is equal to a fraction $\tau$ of the per capita income of the economy. Moreover, since agent i's utility is $\ln(1 - \tau) + \gamma\lambda \ln \tau + (\text{terms independent of } \tau)$, the tax rate $\tau^i$ that maximizes agent i's utility is $\tau^i = \gamma\lambda/(1 + \gamma\lambda)$. Since $\tau^i$ is independent of agent i's characteristics, the same tax rate maximizes all agents' utilities. Therefore, we have that the government chooses $\tau = \gamma\lambda/(1 + \gamma\lambda)$, which is the tax rate preferred by all parents.
119
+
120
+ ### 2.2.2 | The ex post egalitarian public education system (S3)
121
+
122
+ In this system, the government seeks to remedy initial inequalities in human capital through investments in education that equalize ex post human capital. To do so, the government invests in agents i and j the amounts $v^i$ and $v^j$, respectively, such that $h_c^i = h_c^j$. Therefore, the relative public investment in students from different families must satisfy $v^i/v^j = (h_p^j/h_p^i)^{\delta/\gamma}$. Taking expectations with respect to j and imposing the balanced-budget constraint $\mathbb{E}[v] = \tau\mathbb{E}[h_p]$, we have that the amount invested by the government on a student from family i is
123
+
124
+ $$ v^i = \tau \mathbb{E}[h_p] \left( \frac{(h_p^i)^{\delta/\gamma}}{\mathbb{E}[h_p^{\delta/\gamma}]} \right). \quad (5) $$
125
+
126
+ Therefore, each student receives a proportion of the per capita subsidy delivered under regime S2. This proportion decreases with the initial level of the human capital of the student (or, equivalently, with the family income). Specifically, the proportion of the per capita voucher that each student receives varies according to some measure of the gap between the initial human capital of the student and the average human capital of the economy. Poorer students receive more resources to compensate for their initial lower levels of human capital so that the results of the educational process are equalized across all students.
127
+
128
+ Additionally, as in the case of an ex ante egalitarian system, the same argument applies to show that the tax rate chosen is $\tau = \gamma\lambda/(1 + \gamma\lambda)$.
129
+
130
+ ### 2.2.3 | The output maximizing public education system (S4)
131
+
132
+ In the third public system, the government invests the collected resources to maximize the total human capital of the economy. Given this goal, the efficient expenditure is achieved when the marginal product of investment in each student is equalized, that is, $dh_c^i/dv^i = dh_c^j/dv^j$, $\forall i, j$. Therefore, the relative amount of resources invested in each family is $v^i/v^j = (h_p^i/h_p^j)^{\delta/(1-\gamma)}$. As we did before, taking expectations with respect to j and imposing the balanced-budget constraint on the government $\mathbb{E}[v] = \tau\mathbb{E}[h_p]$, we obtain⁸
133
+
134
+ $$ v^i = \tau \mathbb{E}[h_p] \left( \frac{(h_p^i)^{\delta/(1-\gamma)}}{\mathbb{E}[h_p^{\delta/(1-\gamma)}}} \right). \quad (6) $$
135
+
136
+ ⁸Equation (6) characterizes a maximum only if the second-order condition holds: $\gamma(\gamma-1)(v^i v^j - 2(h_p^i)^2)^{\delta/2} < 0$, $\forall i$. This condition holds since we have assumed that $\gamma \in (0, 1)$.
137
+ ---PAGE_BREAK---
138
+
139
+ In this regime, each student receives a voucher that is increasing in the level of the student's initial human capital, since the marginal product of public investment in education is higher in students with a greater initial human capital. Therefore, output maximization requires providing larger subsidies to better-endowed students. As in the previous cases, it is straightforward to show that the tax rate chosen by the majority is $\tau = \gamma\lambda/(1 + \gamma\lambda)$.
140
+
141
+ # 3 | POLITICAL SUPPORT FOR THE EDUCATION FUNDING SYSTEMS
142
+
143
+ In this section, we analyze the political support for different education funding systems in a one-person, one-vote democracy. Concretely, we study the existence and identity of the Condorcet winner among the four funding systems described in Section 2. The game is solved by backward induction. First, the taxes are determined for each system. Then, the systems are compared in pairwise elections and the Condorcet winner is elected.
144
+
145
+ ## 3.1 | Utility comparison
146
+
147
+ We first derive the indirect utility $V(h_p^i)$ of an agent $i$ under the four funding systems. In the expressions below, we group the terms to facilitate a comparison of the channels through which the agent's human capital $h_p^i$ impacts the agent's utility. In addition, we discuss each of these channels and assess the ones that matter in our comparison.
148
+
149
+ $$V^{S1}(h_p^i) = \ln\left(\frac{1}{1+\lambda\gamma}\right)h_p^i + \lambda \ln \theta + \lambda\delta \ln h_p^i + \lambda\gamma \ln\left(\frac{\lambda\gamma}{1+\lambda\gamma}\right)h_p^i, \quad (7)$$
150
+
151
+ $$V^{S2}(h_p^i) = \ln(1 - \tau)h_p^i + \lambda \ln \theta + \lambda\delta \ln h_p^i + \lambda\gamma \ln \tau \mathbb{E}[h_p], \quad (8)$$
152
+
153
+ $$V^{S3}(h_p^i) = \ln(1-\tau)h_p^i + \lambda \ln \theta + \lambda\delta \ln h_p^i + \lambda\gamma \left[ -\frac{\delta}{\gamma} \ln h_p^i + \ln \tau \mathbb{E}[h_p] - \ln \mathbb{E}[(h_p)^{-\delta/\gamma}] \right], \quad (9)$$
154
+
155
+ $$V^{S4}(h_p^i) = \ln(1-\tau)h_p^i + \lambda \ln \theta + \lambda\delta \ln h_p^i + \lambda\gamma \left[ \left(\frac{\delta}{1-\gamma}\right) \ln h_p^i + \ln \tau \mathbb{E}[h_p] - \ln \mathbb{E}[(h_p)^{\delta/(1-\gamma)}] \right]. \quad (10)$$
156
+
157
+ Human capital influences the utility of an agent through three channels. First, human capital determines the income of the agent and, thus, the agent's consumption. The equilibrium of disposable income for consumption under the private system is $(1/(1+\lambda\gamma))h_p^i$, and it is $(1-\tau)h_p^i$ under each of the public systems. We have already shown that the chosen tax rate is $\lambda\gamma/(1+\lambda\gamma)$. Thus, the amount invested by each parent in the private system equals the taxes paid by them to finance a public system. It follows that the equilibrium consumption level reached by any agent is the same under each of the four education funding systems.
158
+ ---PAGE_BREAK---
159
+
160
+ We, therefore, conclude that the impact of a funding system on the disposable income of an agent is not a decisive factor to tilt the balance in favor of one of the funding systems.
161
+
162
+ Human capital also affects the indirect utility of agents through the production technology of human capital, described by Equation (2). Agents have preferences not only on consumption but also on the human capital they pass on to their children. Thus, the human capital of a parent directly determines the child's human capital and, through this channel, influences the parent's indirect utility. The latter effect is equal to $\lambda\delta\ln h_p^i$ and is identical under the four systems. Hence, neither does this channel play a role in the choice of the education funding system.
163
+
164
+ The third channel through which human capital affects the utility of an agent is the parental income's impact on the resources for education that the child receives under each of the funding systems. In the private system, parents invest a fixed fraction of their income, as reflected by the term $\ln y^i = \ln(\lambda\gamma/(1 + \lambda\gamma))h_p^i$ in Equation (7). Thus, there is a positive relationship between parental income and the resources invested in the student of the corresponding family. The ex ante egalitarian public education system (S2) invests the same resources in each family, as captured by the term $\ln v^i = \ln \tau \mathbb{E}[h_p]$ in Equation (8). Thus, there is no relationship between one family's income and the resources that the system invests in the student from that family. The ex post egalitarian public education system (S3) seeks to equalize ex post human capital. Thus, this system invests more in students from low-income families, generating a negative relationship between parental income and the resources invested by the system in the student. This relationship is expressed by $\ln v^i = -(\delta/\gamma)\ln h_p^i + \ln \tau \mathbb{E}[h_p] - \ln \mathbb{E}[(h_p)^{-(\delta/\gamma)}]$ in Equation (9). The opposite occurs with the efficient system (S4), which invests more in students from high-income families, as expressed by the term $\ln v^i = (\delta/(1-\gamma))\ln h_p^i + \ln \tau \mathbb{E}[h_p] - \ln \mathbb{E}[(h_p)^{(\delta/(1-\gamma))}]$ in Equation (10). Therefore, different systems invest differently in the student of a given family, even though the resources that the family disburses under each of the funding systems are identical.
165
+
166
+ The previous discussion implies that parents will support the system that invests the most in their children. The private system (S1) and the efficient system (S4) invest more in students from richer families, whereas the opposite occurs under the ex post egalitarian public education system (S3). The ex ante egalitarian public education system (S2) is neutral as it invests exactly the same amount in each student.
167
+
168
+ As an intermediate step in our analysis, we express Equations (7)–(10) in a simpler and more informative form. To do so, note that the resources invested by each of the systems in a student depend on first and second moments of the income distribution, that is, the average income and how unequally it is distributed over the families. We use the properties of the lognormal distribution to derive an expression for $\mathbb{E}[h_p]$, $\mathbb{E}[(h_p)^{-(\delta/\gamma)}]$, and $\mathbb{E}[(h_p)^{(\delta/(1-\gamma))}]$. For a lognormal distribution, we know that $\mathbb{E}[(h_p)^n] = \exp(n\mu + (1/2)n^2\sigma^2)$ for any $n \in \mathbb{R}$. Therefore,
169
+
170
+ $$ \mathbb{E}[h_p] = \exp\left(\mu + \frac{1}{2}\sigma^2\right), \qquad (11) $$
171
+
172
+ $$ \mathbb{E}[(h_p)^{-\delta/\gamma}] = \exp\left(-\frac{\delta}{\gamma}\mu + \frac{1}{2}\left(\frac{\delta}{\gamma}\right)^2\sigma^2\right), \qquad (12) $$
173
+ ---PAGE_BREAK---
174
+
175
+ $$
176
+ \mathbb{E}[(h_p)^{\delta/(1-\gamma)}] = \exp\left(\left(\frac{\delta}{1-\gamma}\right)\mu + \frac{1}{2}\left(\frac{\delta}{1-\gamma}\right)^2\sigma^2\right). \quad (13)
177
+ $$
178
+
179
+ We substitute (11)–(13) into Equations (7)–(10) and obtain the utility of an agent *i* as a function of the first and second moments of the income distribution. To do so, we use the fact that $\tau = \lambda\gamma/(1 + \lambda\gamma)$ and let $\omega^i = \ln(1 - \tau)h_p^i + \lambda \ln \theta + \lambda\delta \ln h_p^i + \lambda\gamma \ln \tau$. Observe that $\omega^i$ is the same for all the education funding systems. Thus, we can focus the analysis on the elements of the indirect utility function that are affected by the investment that the funding system makes in the students, as we have already concluded in the earlier discussion.
180
+
181
+ $$
182
+ V^{S1}(h_p^i) = \omega^i + \lambda\gamma \ln h_p^i, \quad (14)
183
+ $$
184
+
185
+ $$
186
+ V^{\mathcal{S}2}(h_p^i) = \omega^i + \lambda\gamma \left( \mu + \frac{1}{2} \sigma^2 \right), \quad (15)
187
+ $$
188
+
189
+ $$
190
+ V^{\mathcal{S}3}(h_p^i) = \omega^i + \lambda\gamma \left[ -\frac{\delta}{\gamma} \ln h_p^i + \left(1 + \frac{\delta}{\gamma}\right) \mu + \frac{1}{2} \left(1 - \left(\frac{\delta}{\gamma}\right)^2\right) \sigma^2 \right], \quad (16)
191
+ $$
192
+
193
+ $$
194
+ V^{\mathcal{S}4}(h_p^i) = \omega^i + \lambda\gamma \left[ \left( \frac{\delta}{1-\gamma} \right) \ln h_p^i + \left( 1 - \frac{\delta}{1-\gamma} \right) \mu + \frac{1}{2} \left( 1 - \left( \frac{\delta}{1-\gamma} \right)^2 \right) \sigma^2 \right]. \quad (17)
195
+ $$
196
+
197
+ Note that $\sigma = 0$ in a completely egalitarian economy, in which the four systems give agent $i$ the same utility if $h_p^i = \exp(\mu)$; that is, $V^\ell(\exp(\mu)) = \omega^i + \lambda\gamma\mu$, for all $j \in \{S1,S2,S3,S4\}$. This agent with income $h_p^i = \exp(\mu)$ is the one with the median income of a lognormal distribution. Positive levels of inequality, however, break this indifference between the systems and make the choice of the Condorcet winner nontrivial.
198
+
199
+ **3.2 | Pairwise elections and the Condorcet winner**
200
+
201
+ In this section, we use Equations (14)–(17) to study pairwise voting among the four regimes. Define $h^{\text{Sa,Sb}}$ as the income level at which the indirect utilities of the agent under systems Sa and Sb are the same, where $a, b \in \{1,2,3,4\}$. We compute this income threshold for the pairs $\{S2,S1\}$, $\{S2,S3\}$, and $\{S2,S4\}$. For each of these pairwise comparisons involving S2, we assess whether a majority coalition exists to elect S2. We show that in any pairwise election involving S2, this system emerges as the winner.
202
+
203
+ Using Equations (14)–(17), we obtain
204
+
205
+ $$
206
+ h^{\mathcal{S}2,\mathcal{S}1} = \exp\left(\mu + \frac{1}{2}\sigma^2\right), \qquad (18)
207
+ $$
208
+
209
+ $$
210
+ h^{\mathcal{S}2,\mathcal{S}3} = \exp\left(\mu - \frac{1}{2}\frac{\delta}{\gamma}\sigma^2\right), \tag{19}
211
+ $$
212
+ ---PAGE_BREAK---
213
+
214
+ TABLE 1 Condorcet winner, $\delta > (1 - \gamma)$ and $\sigma > 0$
215
+
216
+ <table><thead><tr><th>Election</th><th>I</th><th>II</th><th>III</th><th>IV</th><th>Outcome</th></tr></thead><tbody><tr><td>{S2, S1}</td><td>S2</td><td>S2</td><td>S1</td><td>S1</td><td>S2</td></tr><tr><td>{S2, S3}</td><td>S3</td><td>S2</td><td>S2</td><td>S2</td><td>S2</td></tr><tr><td>{S2, S4}</td><td>S2</td><td>S2</td><td>S2</td><td>S4</td><td>S2</td></tr></tbody></table>
217
+
218
+ $$h^{S2,S4} = \exp\left(\mu + \frac{1}{2}\left(\frac{\delta}{1-\gamma}\right)^2\right). \quad (20)$$
219
+
220
+ We examine the cases for which $\sigma > 0$. We divide our analysis into three cases, $\delta > (1 - \gamma)$, $\delta < (1 - \gamma)$, and $\delta = (1 - \gamma)$, since the ranking of the $h^{S1,S3}$ terms above change depending on the relative values of $\delta$ and $\gamma$.⁹
221
+
222
+ Suppose first $\delta > (1 - \gamma)$. It follows that $h^{S2,S3} < h^{S2,S1} < h^{S2,S4}$. Therefore, Equations (18)–(20) divide the population into four groups depending on their income $h_p^i$: Group I for income level $h_p^i \le h^{S2,S3}$; Group II for income level $h^{S2,S3} < h_p^i \le h^{S2,S1}$; Group III for income level $h^{S2,S1} < h_p^i \le h^{S2,S4}$; and Group IV for income level $h_p^i > h^{S2,S4}$. The median voter is the agent $m$ with an income level $h_p^m = \exp(\mu)$. Thus, this division of the income space implies that the median voter belongs to group II. We analyze the majority voting equilibria in the following pairwise elections: {S2, S1}, {S2, S3}, and {S2, S4}.
223
+
224
+ Consider first the {S2, S1} election. The indirect utility functions $V^{S1}(h_p^i)$ and $V^{S2}(h_p^i)$ imply that $V^{S2}(h_p^i) \ge V^{S1}(h_p^i)$ for all $h_p^i \le h^{S2,S1}$. Then, S2 provides a greater level of utility than S1 for all agents in Groups I and II. Thus, these agents with incomes below $h^{S2,S1}$ strictly prefer the ex ante egalitarian public education system (S2) to the private system (S1). Since the median voter is in Group II, it follows that Groups I and II form a majority who prefers S2 to S1. Intuitively, the public system invests a fraction $\tau = \lambda\gamma / (1 + \lambda\gamma)$ of the mean income of the economy in each student's education. By contrast, the private system puts a fraction $\lambda\gamma / (1 + \lambda\gamma)$ of the family's income into the student's education. Thus, agents with incomes below the mean income prefer S2, since the S2 public system invests more in their children than these agents' investment levels under the private system S1.
225
+
226
+ Consider next the {S2, S3} election. In this case, we have that $V^{S2}(h_p^i) \ge V^{S3}(h_p^i)$ for all $h_p^i \ge h^{S2,S3}$. Then, agents with an income level above $h^{S2,S3}$ strictly support S2 over S3. Therefore, all agents from Groups II, III, and IV form a majority to elect S2 from the {S2, S3} election. Intuitively, S3 invests more in students from low-income families and less in students from high-income families than S2. Therefore, students from richer families (with $h_p^i \ge h^{S2,S3}$) receive more resources under a public system that delivers a flat subsidy (S2) than under a public system that attempts to equalize ex post results (S3).
227
+
228
+ Lastly, consider the {S2, S4} election. We have that $V^{S2}(h_p^i) \ge V^{S4}(h_p^i)$, for all $h_p^i \le h^{S2,S4}$. Then, agents with an income level below $h^{S2,S4}$ strictly prefer S2 over S4. Therefore, agents from Groups I, II, and III form a majority that strictly prefers S2 to S4. Intuitively, in comparison to
229
+
230
+ ⁹The cases $\delta > (1 - \gamma)$, $\delta < (1 - \gamma)$, and $\delta = (1 - \gamma)$ correspond to increasing, decreasing, and constant returns to scale in the production function of human capital. We show that S2 is always the Condorcet winner under each of these cases. However, the political support for system S2 in the {$S2,S4$} election becomes more pronounced under increasing returns. The latter is a direct consequence of the fact that, under system S4, resources become much more concentrated on the richest students as returns to scale increase.
231
+ ---PAGE_BREAK---
232
+
233
+ **TABLE 2** Condorcet winner, $\delta < (1 - \gamma)$ and $\sigma > 0$
234
+
235
+ <table><thead><tr><th>Election</th><th>I</th><th>II</th><th>III</th><th>IV</th><th>Outcome</th></tr></thead><tbody><tr><td>{S2, S1}</td><td>S2</td><td>S2</td><td>S2</td><td>S1</td><td>S2</td></tr><tr><td>{S2, S3}</td><td>S3</td><td>S2</td><td>S2</td><td>S2</td><td>S2</td></tr><tr><td>{S2, S4}</td><td>S2</td><td>S2</td><td>S4</td><td>S4</td><td>S2</td></tr></tbody></table>
236
+
237
+ S2, system S4 invests more in students from high-income families at the expense of all of the agents in Groups I, II and III.
238
+
239
+ Table 1 summarizes the voting outcome in the one-on-one elections {S2, S1}, {S2, S3}, and {S2, S4} for the case $\delta > (1 - \gamma)$.
240
+
241
+ We perform now an analogous analysis for the case $\delta < (1 - \gamma)$. In this case, $h^{S2,S3} < h^{S2,S4} < h^{S2,S1}$. This again divides the agents into four groups, depending on income $h_p^i$: Group I with $h_p^i \le h^{S2,S3}$; Group II with $h^{S2,S3} < h_p^i \le h^{S2,S4}$; Group III with $h^{S2,S4} < h_p^i \le h^{S2,S1}$; and Group IV with $h_p^i > h^{S2,S1}$. By the same analysis as the one above, we can show that a majority coalition exists to support S2 in each of the three pairwise elections (see Table 2).
242
+
243
+ Lastly, consider the case in which $\delta = (1 - \gamma)$. It follows that $h^{S2,S3} < h^{S2,S4} = h^{S2,S1}$, dividing the population into three groups: Group I with $h_p^i \le h^{S2,S3}$; Group II with $h^{S2,S3} < h_p^i \le h^{S2,S4} = h^{S2,S1}$; and Group III with $h_p^i > h^{S2,S4} = h^{S2,S1}$. The median income is again in Group II. Then, this is a case in which the private system (S1) and the output maximizing system (S4) generate a subsidy schedule such that the indifference between these systems and the system S2 is observed for the same threshold agent: the one with income $h_p^i = h^{S2,S4} = h^{S2,S1}$. Proceeding analogously to what we did previously, we show in Table 3 the results for this case.
244
+
245
+ Therefore, we conclude from the results of Tables 1 through 3 that a public funding system that collects taxes to invest the same amount in each student is the Condorcet winner.
246
+
247
+ ## 3.3 | Public versus private funding for education: The type of public funding matters
248
+
249
+ We have shown that the ex ante egalitarian public education system S2 is the Condorcet winner in pairwise elections pitching S2 against private system S1 and the other two public systems, S3 and S4. In this section, we explore whether the other two public education funding schemes S3 and S4 also beat private education S1 in pairwise elections. That is, we study the Condorcet winner when the private system is confronted by only one public funding alternative that is different from S2. This analysis will shed light on whether the design and number of public funding alternatives matter for political support of public education over private education.
250
+
251
+ **TABLE 3** Condorcet winner, $\delta = (1 - \gamma)$ and $\sigma > 0$
252
+
253
+ <table><thead><tr><th>Election</th><th>I</th><th>II</th><th>III</th><th>Outcome</th></tr></thead><tbody><tr><td>{S2, S1}</td><td>S2</td><td>S2</td><td>S1</td><td>S2</td></tr><tr><td>{S2, S3}</td><td>S3</td><td>S2</td><td>S2</td><td>S2</td></tr><tr><td>{S2, S4}</td><td>S2</td><td>S2</td><td>S4</td><td>S2</td></tr></tbody></table>
254
+ ---PAGE_BREAK---
255
+
256
+ Consider first the {S1, S3} pairwise election. In this case, the threshold for the indifference between the systems is
257
+
258
+ $$h^{\mathrm{S1,S3}} = \exp\left(\mu + \frac{1}{2}\left(1 - \frac{\delta}{\gamma}\right)\sigma^2\right). \quad (21)$$
259
+
260
+ We have three cases: $\delta > \gamma$, $\delta < \gamma$, and $\delta = \gamma$. As we did before, we analyze the nontrivial case in which $\sigma > 0$. Suppose $\delta > \gamma$. Then, we have two income groups: Group I consists of agents with income levels $h_p^i \le h^{\mathrm{S1,S3}}$ and Group II consists of agents with income levels $h_p^i > h^{\mathrm{S1,S3}}$. In this case, the voter with the median income belongs to Group II since $\exp(\mu) > h^{\mathrm{S1,S3}}$. Equations (14) and (16) imply that $V^{\mathrm{S3}}(h_p^i) \ge V^{\mathrm{S1}}(h_p^i)$, for all $h_p^i \le h^{\mathrm{S1,S3}}$. Then, all agents with income levels below $h^{\mathrm{S1,S3}}$ support the public system S3. Intuitively, the private system (S1) results in greater investment for the wealthier students, whereas the public system (S3) leads to greater investment in the poorer students. Therefore, Group I votes for the public system, whereas Group II votes for the private system. Since the median-income voter belongs to Group II, it follows that the majority chooses the private system in the {S1, S3} election.
261
+
262
+ Suppose now $\delta < \gamma$. In this case, the median income belongs to Group I because $\exp(\mu) < h^{\mathrm{S1,S3}}$. As before, Group I votes for the public system S3, whereas Group II votes for the private system. However, with the median-income voter now in Group I, the majority chooses the public system S3. Lastly, when $\delta = \gamma$, the median income coincides with the threshold $h^{\mathrm{S1,S3}}$. Thus, half of the voters support the private system and the other half support the public system, resulting in a tie. Table 4 summarizes these results.
263
+
264
+ Our analysis shows that when public education is pitched against private education, political support indeed depends on the type of public education under consideration. When the private system (S1) and the ex post egalitarian public education system (S3) is proposed to the voters, the majority votes for the private system when the returns to investment in education are relatively low compared with the returns to endowed human capital, as expressed by the condition $\gamma < \delta$. A greater influence of endowed human capital on the formation of the students' human capital requires that an ex post egalitarian public education system (S3) redistribute even more resources to the poor, since $\nu^i/\nu^j = (h_F^j/h_P^i)^{\delta/\gamma}$. Thus, the public resources for education become more concentrated on a minority of low-income students, making the public system S3 less popular than the private system S1 for the majority.
265
+
266
+ We show next that a similar conclusion results when the private system (S1) and the output-maximizing public system (S4) are the only alternatives for the voters. In this case, the income threshold for the indifference between the systems is
267
+
268
+ $$h^{\mathrm{S1,S4}} = \exp\left(\mu + \frac{1}{2}\left(1 + \left(\frac{\delta}{1-\gamma}\right)\sigma^2\right)\right), \quad (22)$$
269
+
270
+ TABLE 4 Condorcet winner, {S1, S3} Election
271
+
272
+ <table><thead><tr><th>Parameters</th><th>I</th><th>II</th><th>Outcome</th></tr></thead><tbody><tr><td>&delta; &gt; &gamma; and &sigma; &gt; 0</td><td>S3</td><td>S1</td><td>S1</td></tr><tr><td>&delta; &lt; &gamma; and &sigma; &gt; 0</td><td>S3</td><td>S1</td><td>S3</td></tr><tr><td>&delta; = &gamma; and &sigma; &gt; 0</td><td>S3</td><td>S1</td><td>S1-S3</td></tr></tbody></table>
273
+ ---PAGE_BREAK---
274
+
275
+ **TABLE 5** Condorcet winner, {S1, S4} Election
276
+
277
+ <table><thead><tr><th>Parameters</th><th>I</th><th>II</th><th>Outcome</th></tr></thead><tbody><tr><td>δ > 1 − γ and σ > 0</td><td>S1</td><td>S4</td><td>S1</td></tr><tr><td>δ < 1 − γ and σ > 0</td><td>S4</td><td>S1</td><td>S4</td></tr><tr><td>δ = 1 − γ and σ > 0</td><td>S1–S4</td><td>S1–S4</td><td>S1–S4</td></tr></tbody></table>
278
+
279
+ for $\delta \neq (1 - \gamma)$. Then, we have Group I with income levels $h_p^i \le h^{S1,S4}$ and Group II with income levels $h_p^i > h^{S1,S4}$. We again have three cases: $\delta > 1 - \gamma$, $\delta < 1 - \gamma$, and $\delta = 1 - \gamma$. Consider first the case $\delta > 1 - \gamma$. Equations (14) and (17) imply that $V^{S1}(h_p^i) \ge V^{S4}(h_p^i)$, for all $h_p^i \le h^{S1,S4}$. Thus, agents in Group I support S1 whereas agents in Group II support system S4. The median-income voter is in Group I. Therefore the majority votes for the private system S1. The intuition behind this result again relies on the relative importance of parental human capital on the formation of the human capital of their children. A higher value of $\delta$ increases the marginal product of endowed resources for the richer students relative to poorer students. As a result, the S4 public system channels even more resources to a minority of rich students, making the private system S1 more appealing to the majority.
280
+
281
+ Suppose now $\delta < 1 - \gamma$. In this case, we have that $V^{S1}(h_p^i) \ge V^{S4}(h_p^i)$, for all $h_p^i \ge h^{S1,S4}$. Thus, agents in Group I support S4 whereas agents in Group II support S1. The median-income voter is in Group I and, therefore, the majority now votes for the public funding system S4. In this case, even though the public system S4 invests less in students from poorer families than in richer students, the amount received by the poor students under S4 is greater than the amount received by them under S1. The reason for this is that a relatively lower $\delta$ implies that the difference in the marginal product of investment across students is smaller. Hence, differences in the resources delivered across students by the system that aims at equalizing marginal product are not so pronounced as the ones that would be observed under the private system.
282
+
283
+ Lastly, suppose $\delta = 1 - \gamma$. Then, we have $V^{S1}(h_p^i) = V^{S4}(h_p^i)$, for all $h_p^i \in (0, \infty)$, the public system and the private system lead to the same outcome for each family, resulting in a tie. Table 5 summarizes the results.
284
+
285
+ The previous analysis again reinforces the important message that the voting outcome of public versus private funding for education depends crucially on the type of public funding under consideration. As we have shown, when the public funding alternative employs a design that aims to equalize ex post results or maximize output, the majority may elect private education. We also demonstrate that the introduction of an ex ante egalitarian public funding system can resolve this indeterminacy.
286
+
287
+ # 4 | EXTENSIONS OF THE MODEL
288
+
289
+ In this section, we discuss two extensions to the baseline model. First, we address the case of incomplete democracies, where a fraction of the agents do not participate in politics. Second, we introduce an example to consider the complementarity between private and public education.
290
+
291
+ ## 4.1 | Incomplete democracies
292
+
293
+ Our main result hinges upon the assumption that voters fully participate in a democracy in practice. However, voting turnout is never complete. In some democracies, the rich are more
294
+ ---PAGE_BREAK---
295
+
296
+ likely to participate in politics than the poor; in other democracies, the opposite can be true. We define democracy as incomplete when the voting turnout is less than 100%. An incomplete democracy can be biased toward either the rich or the poor. We define a democracy as “elitist” if it excludes a fraction of the poorest agents of the economy. Analogously, we define a democracy as “populist” if it excludes a fraction of the richest agents of the economy. We show in the appendix that our main result holds for democracies with a limited degree of either elitism or populism. We now discuss the intuition behind this result.
297
+
298
+ Consider first the case of an elitist democracy, which excludes a fraction of the poorest agents of the economy. Since parents will support the system that invests the most in their children, the poorest parents will support S3 over S2 because the ex post egalitarian system gives more resources to children from low-income families than the system that invests equally across students. Thus, the fact that S2 is preferred to S3 in a complete democracy immediately implies that S2 is also chosen when a number of the poorest agents do not participate in politics. In addition, the poorest parents support S2 over S1 and S2 over S4 in pairwise elections because the ex ante egalitarian system invests more in their children than the private system and the efficient public system. In the appendix, we prove that a limited degree of elitism still leaves S2 as the winner in the {$S2,S1$} and {$S2,S4$} elections.
299
+
300
+ Consider now the case of a populist democracy, which excludes a fraction of the richest agents of the economy. The richest parents support S1 over S2 and S4 over S2. Both the private system and the output maximizing system invest more heavily in their children than the ex ante egalitarian system. In complete democracies, S2 is elected in pairwise elections {$S2,S1$} and {$S2,S4$.} Therefore, S2 would also be supported by the majority when a fraction of the richest parents are excluded from voting. In addition, the richest parents prefer system S2 in the {$S2,S3$} election. The appendix shows that the ex ante egalitarian system still wins the {$S2,S3$} election when a democracy's degree of populism is limited.
301
+
302
+ ## 4.2 | Private and public education as complements
303
+
304
+ The analysis so far has assumed that private and public education are perfect substitutes in the human capital formation of students; note that the production technology of human capital is $h_c^i = \theta(v^i + y^i)^{\gamma}(h_p^i)^{\delta}$. The perfect substitutability between different systems is a realistic setting to study the political outcome when voters must choose a single alternative from a pool of purely private and public funding schemes.
305
+
306
+ The case in which public and private education are complements introduces two types of changes in the baseline model developed in Section 2. First, the production technology of human capital must address the complementarity between private and public education. Second, the information flow between private and public players must be precisely stated. Several modeling options arise from these considerations.
307
+
308
+ We sketch an example that modifies the production technology to show how our analysis can readily accommodate the complementarity between private and public education. Suppose that the educational process has two stages. In the first stage, agents carry out optimal private investment leading to $h_c^i$, which is the human capital of the student belonging to family i at the end of the first stage. Equations (2) and (3) imply that $h_c^i = \theta(\lambda\gamma/(1+\lambda\gamma))^{\gamma}(h_p^i)^{\delta+1}$. In the second stage, politicians present the three public funding alternatives to the voters, who then choose the winner. Suppose the level of the human capital of the student at the end of the first
309
+ ---PAGE_BREAK---
310
+
311
+ stage becomes her initial human capital for the second stage. Substituting $h_c^i$ into Equation (2), we obtain
312
+
313
+ $$h_c^i = \theta^{1+\delta} (\nu^i)^{\gamma} \left( \frac{\lambda^{\gamma}}{1+\lambda^{\gamma}} \right)^{\delta\gamma} (h_p^i)^{(\gamma+\delta)\delta}. \quad (23)$$
314
+
315
+ Note that, compared to Equation (2), this setting could exacerbate or mitigate differences in human capital across families, depending on whether $\gamma + \delta \le 1$. This has implications for the amount of resources that the ex post egalitarian public education system (S3) allocates to students from low-income families and the amount that the efficient system (S4) allocates to students from wealthy families. However, the analysis performed in Section 3.2 still holds once we reparametrize $\delta$ as $\tilde{\delta} = (\gamma + \delta)\delta$ and we consider the one-on-one elections that only include the public systems for the second stage.
316
+
317
+ We have shown one possible way to address the complementarity between private and public education. Interesting avenues for future research include the study of sequential voting, with agents first choosing from a pool of different private education schemes followed by a second-round election to choose from a pool of public funding systems. This could shed light on how the design of public funding schemes can affect agents’ choices of private investment in education.
318
+
319
+ # 5 | CONCLUSIONS
320
+
321
+ This paper analyzed the political support for different education funding regimes in a one-person, one-vote political system. We showed that a public system that collects taxes and delivers the same amount of resources to each family is the Condorcet winner. In economies with some degree of income inequality, a system that seeks to equalize or maximize educational outcomes concentrates resources on a minority of the population and, therefore, lacks majority support. In addition, families with an income level below the mean receive more net resources under a public system that employs flat subsidies than under a private system. Therefore, a private system also lacks majority support.
322
+
323
+ The results of this paper provide a political economy explanation for the observation that governments tend to favor free education for all students (i.e., to spend the same amount on each student). Our paper also highlights the importance of specifying the type of public education under discussion. In particular, we show that voters may favor private education over public education when the latter equalizes or maximizes ex post educational outcomes.
324
+
325
+ ## ORCID
326
+
327
+ Francisco Parroz http://orcid.org/0000-0002-4395-9540
328
+
329
+ ## REFERENCES
330
+
331
+ Aghion, P., & Bolton, P. (1992). Distribution and growth in models of imperfect capital markets. *European Economic Review*, 36(2–3), 603–611.
332
+
333
+ Anderberg, D., & Balestrino, A. 2008. The political economy of post-compulsory education policy with endogenous credit constraints (CESifo Working Paper Series 2304). Munich; CESifo Group.
334
+
335
+ Banerjee, A. V., & Newman, A. F. (1991). Risk-bearing and the theory of income distribution. *Review of Economic Studies*, 58(2), 211–235.
336
+ ---PAGE_BREAK---
337
+
338
+ Becker, G. S., & Tomes, N. (1979). An equilibrium theory of the distribution of income and intergenerational mobility. *Journal of Political Economy*, **87**(6), 1153-1189.
339
+
340
+ Becker, G. S. (1986). Human capital and the rise and fall of families. *Journal of Labor Economics*, **4**(3), 1-39.
341
+
342
+ Becker, G. S. (1993). *Human capital: A theoretical and empirical analysis with special reference to education* (3rd ed.). Chicago, IL: University of Chicago Press.
343
+
344
+ Benhabib, J., & Spiegel, M. M. (1994). The role of human capital in economic development evidence from aggregate cross-country data. *Journal of Monetary Economics*, **34**(2), 143-173.
345
+
346
+ Bentaouet Kattan, R. (2006). *Implementation of free basic education policy* (World Bank Education Working Papers Series No. 7).
347
+
348
+ Borck, R., & Wimbersky, M. (2014). Political economics of higher education finance. *Oxford Economic Papers*, **66**(1), 115-139.
349
+
350
+ Clementi, F., & Gallegati, M. (2005). Pareto's law of income distribution: Evidence for Germany, the United Kingdom, and the United States. In A. Chatterjee, S. Yarlagadda, & B. K. Chakrabarti (Eds.), *Econophysics of wealth distributions* (pp. 3-14). Milano: New Economic Windows, Springer.
351
+
352
+ Creedy, J., & Francois, P. (1990). Financing higher education and majority voting. *Journal of Public Economics*, **43**(2), 181-200.
353
+
354
+ Cunha, F., Heckman, J., Lochner, L. J., & Masterov, D. V. (2006). Interpreting the evidence on life cycle skill formation. In E. A. Hanushek, & F. Welch (Eds.), *Handbook of the Economics of Education* (pp. 697-812). Amsterdam: North-Holland.
355
+
356
+ Cunha, F., & Heckman, J. (2007). The technology of skill formation. *American Economic Review*, **97**(2), 31-47.
357
+
358
+ De Fraja, G. (2001). Education policies: Equity, efficiency and voting equilibrium. *Economic Journal*, **11**(471), 104-119.
359
+
360
+ Docquier, F. (2004). Income distribution, non-convexities and the fertility: Income relationship. *Economica*, **71**(282), 261-273.
361
+
362
+ Epple, D., & Romano, R. E. (1996). Ends against the middle: Determining public service provision when there are private alternatives. *Journal of Public Economics*, **62**(3), 297-325.
363
+
364
+ Fernandez, R., & Rogerson, R. (1995). On the political economy of education subsidies. *Review of Economic Studies*, **62**(2), 249-262.
365
+
366
+ Galor, O. (2000). Income distribution and the process of development. *European Economic Review*, **44**(4-6), 706-712.
367
+
368
+ Gibrat, R. 1931. *Les Inégalités Économiques*. Paris: Librairie du Recueil Sirey.
369
+
370
+ Glomm, G., & Ravikumar, B. (1992). Public versus private investment in human capital: Endogenous growth and income inequality. *Journal of Political Economy*, **100**(4), 818-834.
371
+
372
+ Hanushek, E. A., & Kimko, D. D. (2000). Schooling, labor-force quality, and the growth of nations. *American Economic Review*, **90**(5), 1184-1208.
373
+
374
+ Heckman, J. J. (2008). Schools, skills and synapses. *Economic Inquiry*, **46**(3), 289-324.
375
+
376
+ Heckman, J. J., & Masterov, D. V. (2007). The productivity argument for investing in young children. *Review of Agricultural Economics*, **29**(3), 446-493.
377
+
378
+ Kohlberg, E. (1976). A model of economic growth with altruism between generations. *Journal of Economic Theory*, **13**(1), 1-13.
379
+
380
+ Larkin, J., & Staton, P. (2001). Access, inclusion, climate, empowerment (AICE): A framework for gender equity in market-driven education. *Canadian Journal of Education*, **26**(3), 361-376.
381
+
382
+ Loury, G. C. (1981). Intergenerational transfers and the distribution of earnings. *Econometrica*, **49**(4), 843-867.
383
+
384
+ Moav, O. (2002). Income distribution and macroeconomics: The persistence of inequality in a convex technology framework. *Economics Letters*, **75**(2), 187-192.
385
+
386
+ Neal, D., & Rosen, S. (2000). Theories of the distribution of earnings. In A. B. Atkinson, & F. Bourguignon (Eds.), *Handbook of Income Distribution* (Vol. 1, pp. 379-427). Amsterdam: Elsevier North-Holland.
387
+
388
+ Samoff, J. (1996). Which priorities and strategies for education? *International Journal of Educational Development*, **16**(3), 249-71.
389
+
390
+ Sleebos, J. (2003). Low fertility rates in OECD countries: Facts and policy responses (OECD Labour Market and Social Policy Occasional Papers No. 15).
391
+ ---PAGE_BREAK---
392
+
393
+ **How to cite this article:** Correa JA, Lu Y, Parro F, Villena M. Why is free education so popular? A political economy explanation. *Journal of Public Economic Theory*. 2019;1–19.
394
+ https://doi.org/10.1111/jpet.12396
395
+
396
+ APPENDIX
397
+
398
+ In this appendix, we formally prove that our main result holds for democracies with a limited
399
+ degree of either elitism or populism. Consider first the percentiles of the income distribution in
400
+ which the agents with human capital $h^{S2,S1}$, $h^{S2,S3}$, and $h^{S2,S4}$ are located. These agents are
401
+ indifferent between the funding systems in the corresponding pairwise elections analyzed in
402
+ Section 3.2. The lognormal income distribution implies that an agent with income $h_P^i$ is located
403
+ in the $\Phi((\ln h_P^i - \mu)/\sigma) \times 100\%$ percentile of the income distribution, where $\Phi$ is the
404
+ cumulative function of the standard normal distribution. For instance, an agent with income
405
+ $h_P^i = \exp(\mu)$ is in the $\Phi(0) \times 100\%$ = 50th percentile of the income distribution. Let
406
+ $p^{S\alpha,S\beta} \times 100\%$ be the income percentile of an agent with income $h^{S\alpha,S\beta}$. Then, Equations
407
+ (18)–(20) imply
408
+
409
+ $$p^{S2,S1} = \Phi\left(\frac{\sigma}{2}\right), \qquad (A1)$$
410
+
411
+ $$p^{S2,S3} = \Phi\left(-\frac{\delta\sigma}{2\gamma}\right), \qquad (A2)$$
412
+
413
+ $$p^{S2,S4} = \Phi\left(\frac{\delta\sigma}{2(1-\gamma)}\right), \qquad (A3)$$
414
+
415
+ We now use Equations (A1)–(A3) to examine whether the ex ante egalitarian public education system (S2) remains the Condorcet winner in democracies with some degree of elitism or populism.
416
+
417
+ In Section 3.2, we concluded that all agents with an income below $h^{S2,S1}$ prefer the ex ante egalitarian public education system (S2) over the private system (S1) in a pairwise election. Thus, Equation (A1) implies that $\Phi(\sigma/2) \times 100\% > 50\%$ of voters prefer S2. Suppose an elitist democracy excludes a fraction $x$ of the poorest agents from voting. We can compute the $x$ such that S2 is still the winner of the {S2, S1} election¹⁰:
418
+
419
+ $$\frac{\Phi(\sigma/2) - x}{1-x} > 0.5. \qquad (A4)$$
420
+
421
+ Therefore, an elitist democracy that excludes less than $\tilde{x}^1 = 2(\Phi(\sigma/2) - 0.5)$ of the poorest agents still votes for the ex ante egalitarian public education system (S2) in the pairwise election {S2, S1}.
422
+ ---PAGE_BREAK---
423
+
424
+ We proceed analogously for the other two pairwise elections: {$S2, S3$} and {$S2, S4$.} As shown in Section 3.2, all agents with an income above $h_t^{S2,S3}$ prefer the ex ante egalitarian public education system (S2) over the ex post egalitarian public education system (S3) in a one-on-one election. Then, Equation (A2) implies that $100\% - \Phi(-\delta\sigma/2\gamma) \times 100\% > 50\%$ of voters prefer S2 to S3. Then, we can use an equation analogous to (A4) to derive the fraction of the richest agents that could be excluded from voting without affecting the selection of S2 in the {$S2, S3$} comparison:
425
+
426
+ $$ \frac{1 - \Phi(-\delta\sigma/2\gamma) - z}{1 - z} > 0.5. \quad (A5) $$
427
+
428
+ Therefore, a populist democracy that excludes less than $\bar{z} = 2(0.5 - \Phi(-\delta\sigma/2\gamma))$ of the richest agents still elects S2 over S3.
429
+
430
+ Lastly, we know from Section 3.2 that all agents with an income level below $h_t^{S2,S4}$ prefer the ex ante egalitarian public education system (S2) over the output maximizing system (S4) in a one-on-one election. Therefore, Equation (A3) implies that $\Phi(\delta\sigma/(2(1-\gamma))) \times 100\% > 50\%$ of the voters vote for S2. The equation analogous to (A4) is
431
+
432
+ $$ \frac{\Phi(\delta\sigma/(2(1-\gamma))) - x}{1-x} > 0.5. \quad (A6) $$
433
+
434
+ Thus, from Equation (A6) we conclude that an elitist democracy that excludes less than $\tilde{x}^2 = 2(\Phi(\delta\sigma/(2(1-\gamma)))) - 0.5$ of the poorest agents still selects the ex ante egalitarian public education system (S2) in the one-on-one election {$S2, S4$}.
435
+
436
+ We show now that the ex ante egalitarian public education system (S2) is still the Condorcet winner in democracies with a limited degree of elitism and populism. Consider first an elitist democracy that excludes less than $\min\{\tilde{x}^1, \tilde{x}^2\}$ of the poorest agents of the economy. By construction, the ex ante egalitarian public education system (S2) wins the pairwise elections {$S2, S1$} and {$S2, S4$}. Moreover, the ex post egalitarian public education system (S3) invests more resources in students from low-income families. Thus, the fact that S2 is preferred to S3 in a complete democracy immediately implies that S2 is also selected when a number of the poorest agents do not participate in politics. Formally, the political support for system S2 in the {$S2, S3$} election when $x$ of the poorest agents are excluded from voting is $(1 - \Phi(-\delta\sigma/2\gamma))/(1-x) \times 100\%$. We have already established that in a complete democracy ($x=0$), $(1 - \Phi(-\delta\sigma/2\gamma)) \times 100\% > 50\%$. Since $((1 - \Phi(-\delta\sigma/2\gamma))/(1-x)) \times 100\% > 1 - \Phi(-\delta\sigma/2\gamma) \times 100\% > 50\%$ for any positive value of $x$, it follows that S2 will also be selected in the {$S2, S3$} election within an incomplete democracy that excludes less than $\min\{\tilde{x}^1, \tilde{x}^2\}$ of the poorest agents. Hence, S2 remains the Condorcet winner even if a fraction of the poorest agents do not participate in elections.
437
+
438
+ Similarly, consider a populist democracy that excludes less than $\tilde{z}$ of the richest agents. By construction, the ex ante egalitarian public education system (S2) wins the {$S2, S3$} election. In addition, we know that systems S1 and S4 invest more resources in students from richer families, which makes these funding systems especially popular among the richest agents. We have shown that system S2 wins the one-on-one elections {$S2, S1$} and {$S2, S4$} in the context of a complete democracy. Then, it will also win in an incomplete democracy that excludes a fraction of the richest agents. Formally, the political support for system S2 in the {$S2, S1$} and {$S2, S4$} elections when a fraction $z$ of the richest agents are excluded from voting is $((\Phi(\sigma/2))/(1-z)) \times 100\%$
439
+ ---PAGE_BREAK---
440
+
441
+ and ((Φ(δσ/(2(1 − γ))))/(1 − z)) × 100%, respectively. We have already established that in
442
+ complete democracies (z = 0), Φ(σ/2) × 100% > 50% and Φ(δσ/(2(1 − γ))) × 100% > 50%.
443
+ These two conditions imply that ((Φ(σ/2))/(1 − z)) × 100% > 50% and (((Φ(δσ/
444
+ (2(1 − γ))))/(1 − z)) × 100% > 50%, for any positive fraction z. Thus, S2 wins the pairwise
445
+ elections {S2, S1} and {S2, S4} in a populist democracy that excludes less than $\bar{z}$ of the richest agents.
446
+ Hence, S2 remains the Condorcet winner even if a fraction of the richest agents do not participate in
447
+ elections.
samples/texts_merged/2865847.md ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # Join Decompositions for Efficient Synchronization
5
+ of CRDTs after a Network Partition
6
+
7
+ [Work in progress report]
8
+
9
+ Vitor Enes
10
+
11
+ Carlos Baquero
12
+
13
+ Paulo Sérgio Almeida
14
+
15
+ Ali Shoker
16
+
17
+ HASLab/INESC TEC and Universidade do Minho
18
+
19
+ ## Abstract
20
+
21
+ State-based CRDTs allow updates on local replicas without remote synchronization. Once these updates are propagated, possible conflicts are resolved deterministically across all replicas. $\delta$-CRDTs bring significant advantages in terms of the size of messages exchanged between replicas during normal operation. However, when a replica joins the system after a network partition, it needs to receive the updates it missed and propagate the ones performed locally. Current systems solve this by exchanging the full state bidirectionally or by storing additional metadata along the CRDT. We introduce the concept of join-decomposition for state-based CRDTs, a technique orthogonal and complementary to delta-mutation, and propose two synchronization methods that reduce the amount of information exchanged, with no need to modify current CRDT definitions.
22
+
23
+ ## 1. Introduction
24
+
25
+ The concept of Conflict-free Replicated Data Type (CRDT) was introduced in (Shapiro et al. 2011) and presents two flavors of CRDTs: state-based and operation-based. A state-based CRDT can be defined as a triple $(S, \bar{=}^*, \sqcup)$ where $S$ is a join-semilattice, $\bar{=}^*$ its partial order, and $\sqcup$ is a binary join operator that derives the least upper bound for every two elements of $S$.
26
+
27
+ With $\delta$-CRDTs (Almeida et al. 2016), every time a replica performs an update, it will only send the information needed to reflect this update in other replicas, with the anti-entropy algorithm keeping at each node metadata tracking which deltas still need to be propagated to current peers. However, after a long partition, such metadata is discarded. In this situation, when a replica goes online again, the other remote replicas typically send their full state so this replica sees the updates it missed.
28
+
29
+ (Linde et al. 2016) introduces the concept of $\Delta$-CRDTs where replicas exchange metadata used to calculate a $\Delta$ that reflects the missed updates. As this metadata is typically smaller than the full state, less is demanded from the network. In this approach CRDTs need to be extended to maintain the additional metadata for $\Delta$ derivation, and if this metadata needs to be garbage collected the mechanism will fall-back to standard full state transmission.
30
+
31
+ In this paper we will present a mechanism that does not add additional metadata to standard state-based CRDTs, but instead is able to decompose the state into smaller states than can be selected and grouped in a $\Delta$ for efficient transmission.
32
+
33
+ ## 1.1 Problem Statement
34
+
35
+ Consider replica *A* with state *a* and replica *B* with state *b*, which at some point stop disseminating updates but keep updating their local state. When these replicas go online, what should replica *A* send to replica *B* so that *B* sees the updates performed on *a* since they stopped communicating? We could try to find *c* such that:
36
+
37
+ $$a = b \sqcup c$$
38
+
39
+ but if both replicas performed updates while they were offline, their states are concurrent, and there's no such *c*. (We say two states *a* and *b* are concurrent if *a* is not less than *b* and *b* is not less than *a* in the partial order: $a \parallel b \iff a \supseteq b \land b \supseteq a$..) The trick is how to find *c* ($\Delta$ from now on) which reflects the updates in the join of *a* and *b* still missing in *b*:
40
+
41
+ $$a \sqcup b = b \sqcup \Delta$$
42
+
43
+ The trivial example would be $\Delta = a$, but we would like to send less information than the full state. So, how can replica *A* calculate a smaller $\Delta$ to be sent to replica *B*, reflecting the missed updates?
44
+
45
+ ## 1.2 Contributions
46
+
47
+ Firstly, we introduce the concept of join-decomposition for state-based CRDTs, a technique orthogonal and complementary to delta-mutation. Then, we propose two synchronization techniques. *State Driven*: replica *B* sends its full state *b* to replica *A* and replica *A* is able to derive $\Delta$. *Digest Driven*: replica *B* sends some information about its state *b*, smaller than *b* itself, but enough to allow replica *A* to compute $\Delta$.
48
+
49
+ ## 2. Join Decompositions
50
+
51
+ We now explain how the concept of join-decomposition (Birkhoff 1937) can be applied to state-based CRDTs. Given state $r \in S$, we say that $D \in \mathcal{P}(S)$ is a join-decomposition of $r$ if:
52
+
53
+ $$\sqcup D = r \qquad (i)$$
54
+
55
+ $$\forall s \in D \cdot \sqcup (D \setminus \{s\}) \subseteq r \qquad (ii)$$
56
+
57
+ Property (i) states that the join of all elements in a join-decomposition of $r$ should be $r$. Property (ii) says that each element in a join-decomposition is not redundant: joining the remaining elements is not enough to produce $r$.
58
+ ---PAGE_BREAK---
59
+
60
+ We are interested in decompositions made up of “basic” irreducible elements. An element $s$ is join-irreducible if it cannot result from a join of two elements other than itself, i.e.:
61
+
62
+ $$t \sqcup u = s \Rightarrow t = s \lor u = s$$
63
+
64
+ We say $D$ is a join-irreducible decomposition if $D$ is a join-decomposition and:
65
+
66
+ $$\forall s \in D \cdot s \text{ is join-irreducible} \qquad (iii)$$
67
+
68
+ States in common CRDTs typically have join-irreducible decompositions, and we now present some examples of decomposition functions, which take a state and return a join-irreducible decomposition.
69
+
70
+ ## 2.1 Example Decompositions
71
+
72
+ A GCounter is a simple replicated counter where its value can only increase (Almeida et al. 2016). It is represented as a map from ids to naturals, i.e., $GCounter = I \hookrightarrow N$, and each replica can only increase the value of the counter in its position of the map. The value of the counter is the sum of all increments. For example, $p = \{A \mapsto 3, B \mapsto 5\}$ means replica A has incremented the counter three times, replica B five times, hence the value is eight. For each state $s$, a join-irreducible decomposition can be obtained by function:
73
+
74
+ $$D^{GCounter}(s) = \{\{i \mapsto v\} | (i, v) \in s\}$$
75
+
76
+ The decomposition for the GCounter $p$ above would be $\{{A \mapsto 3}, \{B \mapsto 5\}\}$.
77
+
78
+ To allow both increments and decrements we can compose two GCounter by pairing them (Baquero et al. 2015) and we have a PNCounter $= (I \hookrightarrow N) \times (I \hookrightarrow N)$. Join-irreducible decompositions can be obtained through:
79
+
80
+ $$D^{PNCounter}((p,n)) = \{(\{i \mapsto v\}, \{} | (i,v) \in p\} \\ \cup \{\{\}, \{i \mapsto v\} | (i,v) \in n\}$$
81
+
82
+ As a final example, an Add-Wins set has state $\mathit{AWSet} = (E \hookrightarrow \mathcal{P}(D)) \times \mathcal{P}(D)$. This CRDT is a pair where the first component is a map (from element, in $E$, to a set of supporting dots (unique event identifiers), in $\mathcal{P}(D)$) and the second component is a causal context represented as a set of dots $\mathcal{P}(D)$ (Almeida et al. 2016). When an element is added to the set, a new entry in the map is created, if needed, mapping this element to a new dot, and current dots for the element, if any, are discarded. This new dot is also added to the causal context. To remove an element, we remove its entry from the map. An example for this data type where two elements $(x$ and $y)$ were added and another (initially marked with unique dot $a2$) was removed is $s = (\{x \mapsto \{a1\}, y \mapsto \{b1, c1\}\}, \{a1, a2, b1, c1\})$. (The *range* function `rng` returns all sets of supporting dots in the mapping.) The join-irreducible decomposition of state $(m, c)$ can be obtained through function:
83
+
84
+ $$D^{\mathit{AWSet}}((m,c)) = \{(\{e \mapsto \{d\}\}, \{d\}) | (e,s) \in m, d \in s\} \\ \cup \{\{\}, \{d\} | d \in c \setminus \bigcup \mathrm{rang} m\}$$
85
+
86
+ The join-irreducible decomposition for the state $s$ above is:
87
+
88
+ $$\{(\{x \mapsto \{a1\}\}, \{a1\}), \\ (\{y \mapsto \{b1\}\}, \{b1\}), \\ (\{y \mapsto \{c1\}\}, \{c1\}), \\ (\{\}, \{a2\})\}$$
89
+
90
+ ## 3. Efficient Synchronization
91
+
92
+ **State Driven** The State Driven approach can be applied to all state-based CRDTs as long as we have a corresponding join-decomposition. We define $\min^\Delta : S \times S \to S$ as a function that given two states (the local state $a$ and the remote replica state $b$) will produce a $\Delta$. Join-irreducible decompositions will in general produce smaller $\Delta$s. Let $D : S \to \mathcal{P}(S)$ be a function that produces a join-decomposition.
93
+
94
+ $$\min^{\Delta}(a, b) = \bigcup\{s | s \in D(a) \land b \sqsubseteq b \sqcup s\}$$
95
+
96
+ This $\min^\Delta$ function joins all $s$ in the local state join-decomposition that strictly inflate the remote state. If the local replica ships the resulting $\Delta$, to be joined to the remote replica, and joins the state received from the remote replica to its local state, both these replicas will reach convergence (if in the meantime no new update was performed).
97
+
98
+ **Digest Driven** With the Digest Driven approach we achieve the same results of State Driven but by exchanging less information. We re-define $\min^\Delta : S \times M \to S$ as a function that given the local state $a$ and some digest $m$ related to the remote state will produce a $\Delta$.
99
+
100
+ $$\min^{\Delta}(a,m) = \bigcup\{s | s \in D(a) \land \inf(s,m)\}$$
101
+
102
+ This digest will be data-type specific, which means that $\min^\Delta$ will use a type-specific function $\inf(s,m)$ to check if $s$ inflates the remote state summarized by the received digest $m$.
103
+
104
+ A digest extraction function digest: $S \to M$ and the inflation test $\inf: S \times M \to B$ for the causal $\mathit{AWSet}$ CRDT can be defined as:
105
+
106
+ $$\begin{align*}
107
+ \operatorname{digest}^{\mathit{AWSet}}((m,c)) &= (\bigcup \operatorname{rang} m, c) \\
108
+ \operatorname{inf}^{\mathit{AWSet}}((e,\{d\}), (a,c)) &=
109
+ \begin{cases}
110
+ T & \text{if } d \notin c \lor (e = \{\} \land d \in a) \\
111
+ F & \text{otherwise}
112
+ \end{cases}
113
+ \end{align*}$$
114
+
115
+ The function digest<sup>AWSet</sup> returns a pair where the first component is the set of active dots (the supporting dots of elements that were added and not yet removed) and the second component is the full causal context. The inflation check $\inf_{\mathit{AWSet}}$ will return $T$ for $s \in D(a)$ if the dot in $s$ has not been seen in the other replica or $s$ represents a removed element (i.e., $(\{\}, \{d\})$) that has been added and not yet removed in the other replica ($d$ is still in the active dots).
116
+
117
+ If the Digest Driven technique is performed bidirectionally and no updates occurred, both replicas will converge (otherwise, they can still be collected separately in a dedicated buffer for further transmission).
118
+
119
+ ## References
120
+
121
+ P. S. Almeida, A. Shoker, and C. Baquero. Delta State Replicated Data Types. CoRR, abs/1603.01529, 2016. URL http://arxiv.org/abs/1603.01529.
122
+
123
+ C. Baquero, P. S. Almeida, A. Cunha, and C. Ferreira. Composition of State-based CRDTs. 2015.
124
+
125
+ G. Birkhoff. Rings of sets. Duke Math. J., 3(3):443–454, 1937.
126
+
127
+ A. Linde, J. Leitão, and N. Preguiça. Δ-CRDTs: Making δ-CRDTs Delta-Based. PaPoc 2016, 2016.
128
+
129
+ M. Shapiro, N. Preguiça, C. Baquero, and M. Zawirski. Conflict-free Replicated Data Types. Technical Report RR-7687, July 2011. URL http://hal.inria.fr/inria-00609399/en/.
samples/texts_merged/2918349.md ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ Maneuvering Multi-Target Tracking Algorithm Based on
5
+ Modified Generalized Probabilistic Data Association
6
+
7
+ Zhentao Hu¹, Chunling Fu², Xianxing Liu¹
8
+
9
+ ¹College of Computer and Information Engineering, Henan University, Kaifeng, China
10
+
11
+ ²Basic Experiments Teaching Center, Henan University, Kaifeng, China
12
+
13
+ E-mail: hzt@henu.edu.cn
14
+
15
+ Received July 8, 2011; revised October 1, 2011; accepted November 1, 2011
16
+
17
+ **Abstract**
18
+
19
+ Aiming at the problem of strong nonlinear and effective echo confirm of multi-target tracking system in clutters environment, a novel maneuvering multi-target tracking algorithm based on modified generalized probabilistic data association is proposed in this paper. In view of the advantage of particle filter which can deal with the nonlinear and non-Gaussian system, it is introduced into the framework of generalized prob-abilistic data association to calculate the residual and residual covariance matrices, and the interconnection probability is further optimized. On that basis, the dynamic combination of particle filter and generalized probabilistic data association method is realized in the new algorithm. The theoretical analysis and experi-mental results show the filtering precision is obviously improved with respect to the tradition method using suboptimal filter.
20
+
21
+ **Keywords:** Multi-Target Tracking, Particle Filter, Generalized Probabilistic Data Association, Clutters
22
+
23
+ # 1. Introduction
24
+
25
+ In actual engineering applications, maneuvering multi-target tracking in clutters is always one of hottest and most difficult issues in target tracking studies, which could be solved by means of the two key technologies including filter design and data association. In recent years, the growth of computational power has made computer intensive statistical methods feasible. Based on the technique of sequential importance sampling and the recursive Bayesian filter principle, particle filter (PF) is particularly useful in dealing with nonlinear and non-Gaussian problems, and it can achieve the minimum variance estimation in theory [1-3]. Because of the above advantages, PF has been widely applied in many fields, such as signal processing, target tracking, fault diagnosis and image processing, et al. In data association methods, some novel solutions have been proposed to implement effective echo validation in clutters, mainly based on Bayesian estimation theory, evidential reasoning theory, and such intelligence calculation as fuzzy theory, neural networks and genetic evolution [4-7]. Where data association algorithms based on Bayesian estimation theory are the mainstream, of which probabilistic data association (PDA) and joint probabilistic data association (JPDA)
26
+
27
+ proposed by Bar-Shalom, et al., are always considered as the superior methods to solve the single target tracking and the multi-target tracking [8,9]. Two basic principles are applied in JPDA. One is that every measurement derives from unique target, the other is that observation deriving from one target is not more than one. Some scholars have attempted to replace the suboptimal filter by PF in JPDA, and the results show the tracking precision is obviously improved.
28
+
29
+ It has set the higher requirement for modern tracking and monitoring system, since the existence of various natural and artificial disturbance, the application of the penetration technology of large batches targets and the improvement of target maneuverability and control properties in the modern war environment, cause much denser formation and cross motion, which leads to the strong fuzzy and uncertainty of obtained data. When targets maneuvers with cross motion and denser formation, sensors are likely to regard many observations coming from different planes as one observation. In addition, with the improvement of resolution ratio of radar, the phenomenon, the many observations corresponding to one target, often arise from the multipath effect of observation and systematic error of networking radar. In these cases, the one-to-one correspondence rules between observation
30
+ ---PAGE_BREAK---
31
+
32
+ and target is not coincident with actual facts. Quan P. et al. break the feasibility-based rule in JPDA and give the definition of generalized joint event and generalized event, and propose a new method of partition and combination about them. On this basis, the generalized probabilistic data association (GPAD) is proposed on account of Bayesian estimation criteria. Theoretical analysis and simulation results for various kinds of typical environment show that the filtering precision and real time of GPDA are superior to JPDA. However, the application of suboptimal filters in GPDA inevitably cause that filtering precision is limited the adverse effects of strong nonlinear of tracking system [10,11].
33
+
34
+ According to the analysis above, through the dynamic combination of particle filter and generalized probabilistic data association, a novel maneuvering multi-target tracking algorithm based on modified generalized probabilistic data association in clutters is proposed. Experimental results show the feasibility and validity of the algorithm.
35
+
36
+ ## 2. Particle Filter
37
+
38
+ The problem of state estimation can be solved by calculating the posterior probability density function $p(x_k | z_{1:k})$ of the state variable $x_k$ at time k based on all the available data of observation sequence $z_{1:k} = \{z_1, z_2, ..., z_k\}$. Because the complete information of sequential estimation is in $p(x_k | z_{1:k})$, some parameters which system state estimation need can be obtained, such as mean and variance, etc. The concrete implementation is to approximate $p(x_k | z_{1:k})$ with particles in PF, and the mathematical description is written as
39
+
40
+ $$p(x_k | z_{1:k}) \approx \sum_{i=1}^{N} \delta(x_k - x_i^k)/N \quad (1)$$
41
+
42
+ where $\delta(\cdot)$ is Dirac's delta function. $x_k$ represents particle used in estimated system, which is sampled directly from $p(x_k | z_{1:k})$. However, $p(x_k | z_{1:k})$ is unknown generally, and the above process is often impossible to implement. The difficulty can be circumvented by sampling particles $\{x_i^k, \omega_i^k\}_{i=1}^N$ with associated importance weights from a known and easy-to-sample proposal distribution $q(x_k | z_{1:k})$. The process is described as the importance sampling. Where the associated importance weights of particle is defined as
43
+
44
+ $$\omega_k^i \propto p(x_k^i | z_{1:k}) / q(x_k^i | z_{1:k}) \quad (2)$$
45
+
46
+ To depict further the generation of $x_k^i$, the proposal distribution $q(x_k | z_{1:k})$ is factorized as follows
47
+
48
+ $$q(x_k | z_{1:k}) = q(x_k | x_{k-1}, z_{1:k}) q(x_{k-1} | z_{1:k-1}) \quad (3)$$
49
+
50
+ It is known that $x_k^i$ is sampled by augmenting each $x_{k-1}^i$ sampled from the proposal distribution $q(x_{k-1} | z_{1:k-1})$ with the new state sampled from $q(x_k | x_{k-1}, z_{1:k})$. In order to obtain the recursive equation of particle weights $\omega_k^i$, $p(x_k | z_{1:k})$ is expressed in terms of $p(z_k | x_k)$, $p(x_k | x_{k-1})$ and $p(x_{k-1} | z_{1:k-1})$. Noting that
51
+
52
+ $$\begin{align}
53
+ p(x_k | z_{1:k}) &= p(z_k | x_k, z_{1:k-1}) p(x_k | z_{1:k-1}) / p(z_k | z_{1:k-1}) \tag{4} \\
54
+ &\propto p(z_k | x_k) p(x_k | x_{k-1}) p(x_{k-1} | z_{1:k-1})
55
+ \end{align}$$
56
+
57
+ Under assumptions that states subject to a Markov process and the observations are conditionally independent, and combining with Equations (2)-(4), the particle weights is given by
58
+
59
+ $$\omega_k^i = \omega_{k-1}^i p(z_k | x_k^i) p(x_k^i | x_{k-1}^i) / q(x_k^i | x_{k-1}^i, z_{1:k}) \quad (5)$$
60
+
61
+ In the practical application, the proposal distribution is commonly selected as
62
+
63
+ $$q(x_k^i | x_{k-1}^i, z_{1:k}) = p(x_k^i | x_{k-1}^i) \quad (6)$$
64
+
65
+ Substituting Equation (6) into Equation (5), the particle weights update equation can then be shown to be
66
+
67
+ $$\omega_k^i = \omega_{k-1}^i p(z_k | x_k^i) \quad (7)$$
68
+
69
+ Then $\omega_k^i$ is normalized before the re-sampling stage, and $\omega_i^j$ denotes normalized weights. The key idea of re-sampling is to eliminate particles that have small weights and to duplicate particles with large weights, under the conditions of the total particles number invariant. A set of new particles $\{x_k^j, \omega_k^j\}_{j=1}^N$ are sampled after the re-sampling stage. According to Monte Carlo simulation technology, state estimation can be ultimately achieved by calculating the arithmetic mean of $\{x_k^j, \omega_k^j\}_{j=1}^N$. At present, re-sampling methods are mainly in the following categories: the residual re-sampling, the system re-sampling, the polynomial re-sampling, etc. That is standard particle filter and also known as bootstrap filter.
70
+
71
+ ## 3. Maneuvering Multi-Target Tracking Algorithm Based on Modified Generalized Probabilistic Data Association
72
+
73
+ Data association is one of the key technologies in multi-target tracking, because it directly affects on the whole performance of tracking system. Based on the multiplexing principle with observation and target, GPDA is considered as a kind of better echo confirmation method. It is known that the construction of GPDA completely adopts the framework of Kalman filter, thus GPDA lacks the effective processing ability for strong nonlinear cases.
74
+ ---PAGE_BREAK---
75
+
76
+ For nonlinear system, the extended Kalman filter (EKF) can directly replace KF, but the filtering precision of EKF sometimes is hard to meet the practical needs. Considering that PF and GPDA can effective treat strong nonlinear problem and echo confirmation, respectively. In this section, we give the generalized probabilistic data association based on particle filter (GPDA-PF) in clutters is proposed.
77
+
78
+ ### 3.1. Generalized Probabilistic Data Association
79
+
80
+ Considering *T* targets move in radar scanning region, the observations consist of the real measurement and clutters in each sample time. The state equation and observation equation of the *t*-th target is modeled as the following form.
81
+
82
+ $$x_k^t = f^t(x_{k-1}^t, u_{k-1}^t) \quad t = 1, 2, \dots, T \qquad (7)$$
83
+
84
+ $$z_{k,m} = h(x_k^t, v_k) \quad m = 1, 2, \dots, M \qquad (8)$$
85
+
86
+ where $x_k^t$ and $z_{k,m}$ denote the unknown state vector of $t$-th target and $m$-th observation vector at time $k$, respectively. $f^t(\cdot)$ and $h(\cdot)$ denote the evolution function of state and observation, respectively. System noise $u_k^t$ and observation noise $v_k$ are subject to white noise sequence, respectively, and meet independently identically distribution. Let $\bar{z}_k = \{z_{k,1}, z_{k,2}, \dots, z_{k,M}\}$ denotes the candidate echo set that fall into correlation window at time $k$. Different from feasibility-based rule in JPDA, GPDA adopts the following rules. Firstly, each target has possessed observations (one or more, including zero observation). Secondly, each observation originates from targets (one or more, including zero target). Thirdly, the probability corresponding to any target (observation) and observation (target) should be not less than the other correlated events probability in last two rules. Here, the zero target refers to no target, but it may be the new target of target concerned outside or the false object from interferences or clutters. The zero observation refers to no observation, namely target is not detected.
87
+
88
+ The first rule shows that observations can be multiplexed when target is considered as a benchmark, which is mainly used to solve the association problem between one target and multiple observations. The second rule shows that target can be multiplexed when observation is considered as a benchmark, which is mainly used to solve the association problem between one observation target and multiple targets. The third rule shows that the probability of one-to-one correlated events is dominant among all the correlated events assumed. To calculate interconnected probability in GPDA, the generalized joint events set $\mathcal{O}$ and poly-probability matrix *D* are defined as follows.
89
+
90
+ $\mathcal{O}_i$ and $\mathcal{O}_m$ denote generalized events subset which meet the first rule and the second rule, respectively. $d_{m,i}$ denotes statistical distance between the $m$-th observation and the $i$-th target.
91
+
92
+ $$L_{m,t} = \begin{cases} P_G^{-1} |2\pi S_k^t|^{-1/2} & t \neq 0, m \neq 0 \\ \times \exp\left[-\frac{1}{2}(\nu_{k,m}^t)^T (S_k^t)^{-1} \nu_{k,m}^t\right] & t \neq 0, m \neq 0 \\ (nV)^{-1}(1 - P_D P_G) & t \neq 0, m = 0 \\ \lambda & t = 0, m \neq 0 \\ 0 & t = 0, m = 0 \end{cases} \qquad (11)$$
93
+
94
+ $$v_{k,m}^t = z_{k,m}^t - \hat{z}_{k/k-1}^t \qquad (12)$$
95
+
96
+ $v_{k,m}^t$ and $S_k^t$ denote the residual and residual covariance matrices at time $k$, respectively. $z_{k,m}^t$ denotes the $m$-th confirmed echo from target, and $\hat{z}_{k/k-1}^t$ denotes the one-step state prediction of the $t$-th target. $V_k$ denotes the volume of correlation window. $P_G$ denotes the probability of true observations falling into the correlation window, and $P_D$ denotes the target detection probability, that is the complete detection probability of true observation. $V_k$ denotes the volume of correlation window and $n$ denotes coefficient and is usually taken as the positive integer. Assuming the false-alarm and the numbers of clutters are subject to the uniform distribution and the Poisson distribution, respectively. $\lambda$ denotes the space density of clutters, that is the expectation number of clutters in unit volume. The interconnection probability $\beta_{k,m}^t$ of the $m$-th confirmed echo is calculated as.
97
+
98
+ $$\beta_{k,m}^{t} = \frac{1}{c} \left( \varepsilon_{m,t} \prod_{tr=0}^{T} \sum_{r=0}^{M} \varepsilon_{r,tr} + \xi_{m,t} \prod_{r=0}^{M} \sum_{tr=0}^{T} \xi_{r,tr} \right) \qquad (13)$$
99
+
100
+ $$\varepsilon_{m,t} = l_{m,t} / \sum_{m=0}^{M} l_{m,t} \qquad (14)$$
101
+
102
+ $$\xi_{m,t} = l_{m,t} / \sum_{i=0}^{T} l_{m,i} \qquad (15)$$
103
+
104
+ $r$ and $r$ denote the index of target and observation label, respectively. $c$ is normalized coefficient.
105
+
106
+ ### 3.2. Generalized Probabilistic Data Association Based on Particle Filter
107
+
108
+ Firstly, particles are sampling from the proposal distribution on account of prior model information, and then one step observation prediction of particle $z_{k/k-1}^{i,t}$ and $S_k^t$ are calculated by the following equations.
109
+
110
+ $$x_k^{i,t} = f(x_{k-1}^{i,t}, u_{k-1}^t) \qquad (16)$$
111
+
112
+ $$z_{k/k-1}^{i,t} = h(x_k^{i,t}) \qquad (17)$$
113
+
114
+ $$\hat{z}_{k/k-1}^{i,t} = \sum_{i=1}^{N} z_{k/k-1}^{i,t} / N \qquad (18)$$
115
+ ---PAGE_BREAK---
116
+
117
+ $$S_k^t = \frac{\sum_{i=1}^{N} [z_{k/k-1}^{i,t} - \hat{z}_{k/k-1}^t] [z_{k/k-1}^{j,t} - \hat{z}_{k/k-1}^t]^T}{N} \quad (19)$$
118
+
119
+ Echo confirmation principle is realized by the following equation.
120
+
121
+ $$g = v_{k,m}^t S_{k,m}^t (v_{k,m}^t)^T \leq \gamma \quad (20)$$
122
+
123
+ where $\gamma$ denotes the threshold of $\chi^2$ hypothesis testing. Then $\beta_{k,m}^t$ of the confirmed echo $\bar{\zeta}_{k,m}^t$ is calculated by the poly-probability matrix $D$. The equivalent observation is solved by $\beta_{k,m}^t$, $\bar{\zeta}_{k,m}^t$ and $\hat{z}_{k/k-1}^t$.
124
+
125
+ $$\hat{z}_{k/k}^t = \hat{z}_{k/k-1}^t + \sum_{m=0}^{M} \beta_{k,m}^t (\bar{\zeta}_{k,m}^t - \hat{z}_{k/k-1}^t) \quad (21)$$
126
+
127
+ The likelihood score that particle is relative to $\hat{z}_{k/k}$, is used to measure particle weights, and then weights are normalized.
128
+
129
+ $$\hat{\sigma}_k^{i,t} = p(\hat{z}_{k/k}^t | x_k^{j,t}) / \sum_{i=1}^{N} p(\hat{z}_{k/k}^t | x_k^{j,t}) \quad (22)$$
130
+
131
+ The re-sampling is realized by normalized weights $\hat{\sigma}_k^{i,t}$, and $\{x_k^{j,t}\}_{j=1}^N$ are obtained. On the basis of Monte Carlo simulation principle, the state estimation of t-th target can be solved as follows.
132
+
133
+ $$\hat{x}_{k/k}^t = \sum_{j=1}^{N} x_k^{j,t} / N \quad (23)$$
134
+
135
+ ## 4. Simulation Results and Analysis
136
+
137
+ To illustrate the performance of GPDA-PF, the example of maneuvering target tracking based on two-coor-dinate radar is given. The target moves within the horizontal-vertical plane according to the standard second-order model.
138
+
139
+ $$X_k^t = FX_{k-1}^t + Gu_{k-1}^t, \quad t=1,2$$
140
+
141
+ $$z_k = [\sqrt{qt}[(x_k^t)^2 + (y_k^t)^2] \tan^{-1}(y_k^t/x_k^t)]^T + v_k$$
142
+
143
+ where $X_k^t = [x_k^t, \tilde{x}_k^t, y_k^t, \tilde{y}_k^t]^T$ denotes state vector of t-th taarget. $x_k^t$, $\tilde{x}_k^t$, $y_k^t$ and $\tilde{y}_k^t$ denote position component and velocity component in the horizontal direction and the vertical direction, respectively. $F = [\begin{matrix} f_{cv} & f \\ f & f_{cv} \end{matrix}]$ denotes the system state transition matrices, $f_{cv} = [\begin{matrix} 1 & \tau \\ 0 & 1 \end{matrix}]$, and $f = [\begin{matrix} 0 & 0 \\ 0 & 0 \end{matrix}]$. $G = [\begin{matrix} 0 & 0 & \tau/2 & \tau \\ \tau/2 & \tau & 0 & 0 \end{matrix}]^T$ denotes the system noise matrix. $\tau=1s$ denotes the sampling time. $u_k^1$ and $u_k^2$ denote system noise vector, and suppose they are subject to zero-mean Gaussian white noise with standard deviation $Q_k^1 = 0.15I$, $Q_k^2 = Q_k^1 \cdot v_k$ denote the observation noise vector and suppose it is subject
144
+
145
+ to zero-mean Gaussian white noise process with standard deviation $[\begin{matrix} R_r & 0 \\ 0 & R_\theta \end{matrix}]$, here the noise standard deviations of radial distance component and azimuth angle component are $R_r = 0.1$ km and $R_\theta = 0.3^\circ$, respectively. $P_G = 0.97$, $P_D = 0.99$ and $\gamma=16$. $X_0^1 = [\begin{matrix} 2 & 0.2 & 2 & 0.2 \end{matrix}]^T$ and $X_0^2 = [\begin{matrix} 2 & 0.2 & 14 & -0.2 \end{matrix}]^T$ denote the actual initial states of two targets, and the negative sign of state vector denotes that targets move on the negative half shaft of X axis (horizontal direction) and Y axis (vertical direction). The number of Monte Carlo simulation is 50 and the number of particles is 1000, and the total simulation step T is 60s. In order to verify the effect of clutters for algorithm performances, two kinds of simulation results are compared when $\lambda$ is 0.002 and 0.0055, respectively. And the root mean square error is used as the performance evaluation index of algorithm precision, which is defined as RMSE = $\{ \sum_{\eta=1}^{Num} (X_k^t - \hat{X}_{k/k,\eta})^2 / Num \}^{1/2}$, where $X_k^t$ and $\hat{X}_{k/k,\eta}$ denote the true state value and the state estimation value of the t-th target in $\eta$ times Monte Carlo simulations at current time, respectively.
146
+
147
+ Two target trajectories and clutters distribution are given in Figure 1 under $\lambda=0.002$ and $\lambda=0.0055$. By 50 times Monte Carlo simulations, the comparison of
148
+
149
+ Figure 1. Trajectory of target and clutters distribution. (a) $\lambda = 0.002$; (b) $\lambda = 0.0055$.
150
+ ---PAGE_BREAK---
151
+
152
+ **Figure 2.** RMSE of position estimation of target 1. (a) Horizontal direction; (b) Vertical direction.
153
+
154
+ **Figure 3.** RMSE of position estimation of target 2. (a) Horizontal direction; (b) Vertical direction.
155
+
156
+ **Table 1.** The comparison of the mean of RMSE under $\lambda = 0.002$ and $\lambda = 0.0055$.
157
+
158
+ <table><thead><tr><th>Algorithm</th><th>GPDA-EKF</th><th>GPDA-PF</th></tr></thead><tbody><tr><td>Target 1 in X direction</td><td>0.0705/0.0728</td><td>0.0549/0.0570</td></tr><tr><td>Target 1 in Y direction</td><td>0.0712/0.0737</td><td>0.0556/0.0578</td></tr><tr><td>Target 2 in X direction</td><td>0.0744/0.0759</td><td>0.0626/0.0635</td></tr><tr><td>Target 2 in Y direction</td><td>0.0763/0.0801</td><td>0.0634/0.0640</td></tr></tbody></table>
159
+
160
+ the RMSE of state estimation based on GPDA-EKF and GPDA-PF under $\lambda = 0.0055$ are given in Figures 2 and 3. The data from Table 1 quantitatively show the mean of RMSE of state estimation, when $\lambda$ is 0.002 and 0.0055, respectively. According to the above comparison of RMSE, it is shown that the filter precision of GPDA-PF is superior to GPDA-EKF. In addition, the following conclusions can be drawn by the analysis of data from Table 1 with the increase of clutters number in tracking environment, the filter precision of two algorithms all decline, but the performance of GPDA-PF is always stably superior to GPDA-EKF. In general case, PF is used as filter can lead to the increase of computational complexity, and the simulation also gets the same result in this paper. However, the real time of algorithm has a close relationship with the number of particle and filtering initial value. When the prior information is better, namely, the filtering initial value is close to the real state of target or system model is more accurate, the real time of GPDA-PF is effectively improved. Based on the above results, PF will be extended into the maneuvering multi-target tracking in clutters, which is our next research direction.
161
+
162
+ ## 5. Conclusions
163
+
164
+ A novel maneuvering multi-target tracking algorithm based on modified generalized probabilistic data association in clutters is proposed in this paper. The new algorithm effectively improves the decline problem of filtering precision caused by system strong nonlinear and dense clutters environment. The theory analysis and simulation results show GPDA-PF has the following advantages relative to existing methods. Firstly, adopting the basis framework of PF, so it preserves the advantage to solve nonlinear and non-Gaussian problems. Secondly, the construction of GPDA-PF avoids the derivation of Jacobi matrix and the calculation of state prediction covariance matrix and state estimation covariance matrix when EKF is utilized, which make the algorithm simple and is easy to realize. Finally, the feasibility-based rule
165
+ ---PAGE_BREAK---
166
+
167
+ of GPDA is accord with the actual situation of modern
168
+ battlefield environment, which enhances the adaptability
169
+ of algorithm and improves the reliability and stability for
170
+ target tracking result.
171
+
172
+ 6. Acknowledgements
173
+
174
+ The project work is supported by the National Natural
175
+ Science Foundation of China (60972119, 61170243) and
176
+ the Science Technology Department Natural Science
177
+ Foundation of Henan Province (112102210196). In addi-
178
+ tion, we thank Dr. Yandong Hou and Prof. Quan Pan for
179
+ helpful discussions.
180
+
181
+ 7. References
182
+
183
+ [1] O. Cappe, S. J. Godsill and E. Moulines, "An Overview of Existing Methods and Recent Advances in Sequential Monte Carlo," *Proceedings of the IEEE*, Vol. 95, No. 5, 2007, pp. 899-924. doi:10.1109/JPROC.2007.893250
184
+
185
+ [2] M. S. Arulampalam, S. Maskell, N. Gordon, et al., "A Tutorial on Particle Filters for Online Nonlinear/Non-Gaussian Bayesian Tracking," *IEEE Transactions on Signal Processing*, Vol. 50, No. 2, 2002, pp. 174-188.
186
+ doi:10.1109/78.978374
187
+
188
+ [3] H. A. P. Blom and E. A. Bloem, "Exact Bayesian and Particle Filtering of Stochastic Hybrid Systems," *IEEE Transactions on Aerospace and Electronic Systems*, Vol. 43, No. 1, 2007, pp. 55-70.
189
+ doi:10.1109/TAES.2007.357154
190
+
191
+ [4] S. Puranik and J. K. Tugnait, "Tracking of Multiple Maneuvering Targets Using Multiscan JPDA and IMM Filtering," *IEEE Transactions on Aerospace and Electronic Systems*, Vol. 43, No. 1, 2007, pp. 23-35.
192
+ doi:10.1109/TAES.2007.357152
193
+
194
+ [5] H. X. Liu, Y. Liang, Q. Pan, et al., "A Multi-Path Viterbi Data Association Algorithm," *Acta Electronica Sinica*, Vol. 34, No. 3, 2006, pp. 1640-1644.
195
+
196
+ [6] R. L. Popp, K. R. Pattipati and Y. Bar-Shalom, "M-Best S-D Assignment Algorithm with Application to Multi-Target Tracking," *IEEE Transactions on Aerospace and Electronic Systems*, Vol. 37, No. 1, 2001, pp. 22-39.
197
+ doi:10.1109/7.913665
198
+
199
+ [7] H. L. Kennedy, "Comparison of MHT and PDA Track Initiation Performance," *International Conference on Radar*, Adelaide, 2-5 September 2008, pp. 508-512.
200
+ doi:10.1109/RADAR.2008.4653977
201
+
202
+ [8] M. Ekman, "Particle Filters and Data Association for Multi-Target Tracking," *The 11th International Conference on Information Fusion*, Cologne, 30 June-3 July 2008, pp. 1-8.
203
+
204
+ [9] Z. T. Hu, Q. Pan and F. Yang, "A Novel Maneuvering Multi-Target Tracking Algorithm Based on Multiple Model Particle Filter in Clutters," *High Technology Letters*, Vol. 17, No. 1, 2011, pp. 19-24.
205
+
206
+ [10] X. N. Ye, Q. Pan and Y. M. Cheng, "A New and Better Algorithm for Multi-Target Tracking in Dense Clutter," *Journal of Northwestern Polytechnical University*, Vol. 22, No. 3, 2004, pp. 388-391.
207
+
208
+ [11] Q. Pan, X. N. Ye and H. C. Zhang, "Generalized Probability Data Association Algorithm," *Acta Electronica Sinica*, Vol. 33, No. 3, 2005, pp. 467-472.
samples/texts_merged/3148538.md ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ A CORRECTION TO “THE CONNECTIVITY
5
+ STRUCTURE OF THE HYPERSPACES $C_\epsilon(X)$”
6
+
7
+ by
8
+ ERIC L. McDOWELL
9
+
10
+ Electronically published on February 19, 2009
11
+
12
+ Topology Proceedings
13
+
14
+ **Web:** http://topology.auburn.edu/tp/
15
+
16
+ **Mail:** Topology Proceedings
17
+ Department of Mathematics & Statistics
18
+ Auburn University, Alabama 36849, USA
19
+
20
+ **E-mail:** topolog@auburn.edu
21
+
22
+ **ISSN:** 0146-4124
23
+
24
+ COPYRIGHT © by Topology Proceedings. All rights reserved.
25
+ ---PAGE_BREAK---
26
+
27
+ A CORRECTION TO “THE CONNECTIVITY
28
+ STRUCTURE OF THE HYPERSPACES $C_{\epsilon}(X)$”
29
+
30
+ ERIC L. McDOWELL
31
+
32
+ ABSTRACT. We demonstrate that Proposition 3.1 of [Eric L. McDowell and B. E. Wilder, *The connectivity structure of the hyperspaces* $C_{\epsilon}(X)$, Topology Proc. **27** (2003), no. 1, 223–232] is false by constructing a locally connected metric continuum which admits a non-locally connected small-point hyperspace.
33
+
34
+ Let $X$ be a continuum with metric $d$. For any $\epsilon > 0$ the set $C_{d,\epsilon}(X) = \{A \in C(X) : \text{diam}_d(A) \le \epsilon\}$ is called a *small-point hyperspace* of $X$. The notation $C_{\epsilon}(X)$ is used when the metric on $X$ is understood.
35
+
36
+ Proposition 3.1 of [2] asserts that $X$ is locally connected if and only if $C_{\epsilon}(X)$ is locally connected for every $\epsilon > 0$. While it is true that the local connectivity of $C_{\epsilon}(X)$ for every $\epsilon > 0$ implies the local connectivity of $X$, we show in this note that the reverse implication is false.
37
+
38
+ Below we construct a locally connected continuum $X$ in $\mathbb{R}^3$ for which $C_{\epsilon}(X)$ fails to be locally connected for some $\epsilon > 0$. The metric considered on $X$ is the usual metric inherited from $\mathbb{R}^3$. All
39
+
40
+ 2000 Mathematics Subject Classification. Primary 54F15; Secondary 54B20.
41
+ Key words and phrases. cyclic connectedness, hyperspace, locally connected continuum.
42
+
43
+ The author is grateful to Professor Sam B. Nadler, Jr. for questioning the validity of the proposition that this note addresses. The author is also grateful to the referee for suggestions which significantly enhanced this paper.
44
+
45
+ ©2009 Topology Proceedings.
46
+ ---PAGE_BREAK---
47
+
48
+ points $(r, \theta, z)$ are described using the standard cylindrical coordinate system, and all concepts and notation which are used without definition can be found in [3]. The example is similar to [4, Example 2].
49
+
50
+ **Example 1.** For each $n = 1, 2, \dots$, let $S_n$ denote the circle described by $\{(1, \theta, n^{-1}) : 0 \le \theta < 2\pi\}$ and let $S_0 = \{(1, \theta, 0) : 0 \le \theta < 2\pi\}$. For each $n = 1, 2, \dots$ and each $i = 1, 2, \dots, 2^n$, let $A_i^n$ denote the straight line segment given by $\{(1, 2\pi i/2^n, z) : 0 \le z \le n^{-1}\}$. Define $X$ to be the continuum given by
51
+
52
+ $$X = \left( \bigcup_{n=0}^{\infty} S_n \right) \cup \left( \bigcup_{n=1}^{\infty} \bigcup_{i=1}^{2^n} A_i^n \right).$$
53
+
54
+ It is straightforward to show that $X$ is a Peano continuum. We will now prove that $C_\epsilon(X)$ fails to be locally connected at the point $S_0$ when $\epsilon = 2$.
55
+
56
+ Let $\{U_1, \dots, U_k\}$ be an open cover of $S_0$ with the property that for every $n = 0, 1, \dots$ and every $i = 1, \dots, k$ it is true that
57
+
58
+ $$ (1) \quad S_n - U_i \text{ is connected and has arc length greater than } 3\pi/2. $$
59
+
60
+ Observe that $\mathcal{U} = \langle U_1, \cdots, U_k \rangle$ is an open subset of $C(X)$ that contains $S_0$ as well as all $S_n$ for $n$ sufficiently large. Select $N$ such that $S_N \in \mathcal{U}$. We will prove that $C_\epsilon(X)$ fails to be locally connected at $S_0$ by showing that every arc in $\mathcal{U}$ with endpoints $S_0$ and $S_N$ must contain a point of diameter greater than 2. Let $f: [0, 1] \to \mathcal{U}$ be an embedding for which $f(0) = S_0$ and $f(1) = S_N$. Let $\pi: X \to S_N$ denote the natural projection map. For any subset $S \subset X$ we say that $(1, \theta, z) \in S$ is an *antipodal point* of $S$ provided that $(1, \theta + \pi, z')$ belongs to $S$ for some $z'$. We will denote the set of antipodal points of $S$ by $\mathrm{AP}(S)$. We now show that
61
+
62
+ $$ (2) \quad (1, \theta, z) \in \mathrm{AP}(S) \text{ if and only if } (1, \theta, N^{-1}) \in \mathrm{AP}(\pi(S)). $$
63
+
64
+ To see (2), let $S \subset X$ and let $(1, \theta, z) \in \mathrm{AP}(S)$. By definition it follows that $(1, \theta + \pi, z')$ belongs to $S$ for some $z'$; thus, $\pi(1, \theta + \pi, z') = (1, \theta + \pi, N^{-1})$ belongs to $\pi(S)$. Since $(1, \theta, N^{-1}) = \pi(1, \theta, z) \in \pi(S)$, it follows that $(1, \theta, N^{-1}) \in \mathrm{AP}(\pi(S))$. The argument for the converse is similar.
65
+
66
+ If $M \in \mathcal{U}$ and $M \subset S_N$, then there exists an arc $A$ (possibly empty) such that $M$ is the closure of $S_N - A$; thus, the only elements
67
+ ---PAGE_BREAK---
68
+
69
+ of $M - AP(M)$ are the points that are diametrically opposed to the interior points of A. Therefore, $AP(M)$ is either $S_N$ (if $A = \emptyset$) or the union of two disjoint arcs. Since $f(t)$ is a continuum for each $0 \le t \le 1$, it follows from continuity that
70
+
71
+ (3) $AP(\pi(f(t)))$ is either $S_N$ or the union of two disjoint arcs.
72
+
73
+ Continuity also shows that the intersection of $\pi^{-1}(AP(\pi(f(t)))))$ and $f(t)$ is closed; moreover, it follows from (2) that this intersection is equal to $AP(f(t))$. Therefore, we have that
74
+
75
+ (4) $AP(f(t))$ is closed for every $0 \le t \le 1$.
76
+
77
+ Suppose that $(1, \theta, z) \in AP(f(t))$; then $(1, \theta + \pi, z') \in f(t)$ for some $z'$. If $z' \neq z$, then $(1, \theta, z)$ and $(1, \theta + \pi, z')$ are more than two units apart. Moreover, if $(1, \theta, z) \in AP(f(t)) - \bigcup_{n=0}^{\infty} S_n$, then it follows from the connectivity of $f(t)$ that there must exist some $z'' \neq z$ with $(1, \theta + \pi, z'') \in f(t)$. It follows that
78
+
79
+ (5) if $AP(f(t)) - \bigcup_{n=0}^{\infty} S_n \neq \emptyset$ then $\text{diam}(f(t)) > 2$.
80
+
81
+ We now show that there exists some $t_0 \in [0, 1]$ for which the diameter of $f(t_0)$ is greater than 2. Begin by defining
82
+
83
+ $$t' = \min\{t : [0, 1] : AP(f(t)) \cap S_N \neq \emptyset\}.$$
84
+
85
+ Suppose that $t' = 1$. Choose $\gamma > 0$ small enough such that the $\gamma$-ball, $\mathcal{B}$, about $S_N$ has the properties that $\mathcal{B} \subset \mathcal{U}$ and $S_n \cap (\cup \mathcal{B}) = \emptyset$ for all $n \neq N$. Choose $\delta > 0$ such that if $t \in (1 - \delta, 1]$ then $H_d(f(t), S_N) < \gamma$. Let $t_0 \in (1 - \delta, 1)$. By (3) we have that $AP(f(t_0)) \neq \emptyset$. However, since $t_0 < t'$ we have by the definition of $t'$ and our choice of $\gamma$ that $AP(f(t_0)) - \bigcup_{n=0}^{\infty} S_n \neq \emptyset$. Therefore, $\text{diam}(f(t_0)) > 2$ by (5).
86
+
87
+ Now suppose that $t' < 1$. Let $q = (1, \theta, z) \in AP(f(t')) \cap S_N$ and let $q' \in f(t') \cap \pi^{-1}(1, \theta+\pi, z)$. We may assume that $q' = (1, \theta+\pi, z)$ since $d(q, q') > 2$ otherwise. Using (3), we have that $AP(\pi(f(t'))) contains an arc $I$ containing $q$. We suppose first that $q$ is an isolated point of $AP(f(t'))$. Let $\{y_i\}_{i=1}^{\infty}$ be a sequence in $I$ converging to $q$; then use (2) to select $x_i \in \pi^{-1}(y_i) \cap AP(f(t'))$ for each $i = 1, 2, \dots$. We have by (4) that $AP(f(t'))$ is closed; hence, some subsequence of $\{x_i\}_{i=1}^{\infty}$ converges to a point $x_0$ of $AP(f(t'))$. Moreover, since $\{y_i\}_{i=1}^{\infty}$ converges to $q$, we have that $x_0 \in \pi^{-1}(q)$. Finally, since $q
88
+ ---PAGE_BREAK---
89
+
90
+ is an isolated point of $AP(f(t'))$, it follows that $x_0$ is a member of
91
+ $f(t') \cup \pi^{-1}(q)$ that does not belong to $S_N$. Therefore, $d(x_0, q') > 2$,
92
+ and thus, $\text{diam}(f(t')) > 2$. On the other hand, if $q$ is not an isolated
93
+ point of $AP(f(t'))$, then we may assume that the arc $I$ containing
94
+ $q$ belongs to $S_N \cap AP(f(t'))$. Choose $\gamma > 0$ small enough so that
95
+ (i) no $\gamma$-ball about a point of $I$ meets any $S_n$ for $n \neq N$ and (ii) the
96
+ midpoint $m = (1, \mu, z)$ of $I$ is not contained in the $\gamma$-balls about
97
+ the endpoints of $I$. Choose $\delta > 0$ such that if $t \in (t' - \delta, t']$, then
98
+ $H_d((f(t), f(t')) < \gamma$. Let $t_0 \in (t' - \delta, t')$. Since $H_d(f(t_0), f(t')) < \gamma$,
99
+ we have by (i), (ii), and the construction of $X$ that $f(t_0)$ contains
100
+ a point $m'$ for which $\pi(m') = m$; furthermore, we have by (i) that
101
+ $m' \in S_N$. Thus, $m' = (1, \mu, z) = m \in f(t_0)$. By a similar argument
102
+ we can show that $(1, \mu + \pi, z) \in f(t_0)$. Therefore, $m \in AP(t_0)$,
103
+ contrary to our assumption that $t_0 < t'$.
104
+
105
+ **Example 2.** K. Kuratowski [1, p. 268] describes a continuum, *K*, consisting of the segment {(*x*, 0) : 0 ≤ *x* ≤ 1}, of the vertical segments {(*m*/2n+1, *y*) : 0 ≤ *m* ≤ 2n+1, 0 ≤ *y* ≤ 1/2n} and of the level segments {(*x*, 1/2n) : 0 ≤ *x* ≤ 1}, where n = 1, 2, .... We note that *K* is similar in structure to the continuum in the previous example; however, C<sub>ρ<sub>1</sub>,ε</sub>(*K*) is locally connected when ρ<sub>1</sub> is the usual metric inherited from R<sup>2</sup>. (Informally, observe that if a subcontinuum *A* of *K* is contained in an open subset U of C(X), then U also con- tains subsets of *A* with diameter smaller than that of *A*. By first shrinking *A* to a continuum with smaller diameter within U, one can then continuously grow continua to include a subset of a target subcontinuum within U before continuously releasing *A*.)
106
+
107
+ Instead of considering the usual metric on $K$, let $h: K \to S^1 \times [0, 1]$ be an embedding which sends the leftmost vertical segment of $K$ to $\{(1, 0, z) : 0 \le z \le 1\}$ and the rightmost vertical segment of $K$ to $\{[1, 3\pi/2, z) : 0 \le z \le 1\}$, and which preserves the vertical and horizontal orientations of all subsets of $K$. Let $d$ denote the usual metric for $h(K)$ inherited from $\mathbb{R}^3$, and let $\rho_2$ denote the metric on $K$ given by $\rho_2(x, y) = d(h(x), h(y))$. Then an argument essentially identical to the one given in Example 1 can be used to show that $C_{\rho_2, \epsilon}(X)$ fails to be locally connected for $\epsilon = 2$.
108
+
109
+ Noting that the small-point hyperspaces of the arc, circle, and
110
+ simple triod are all locally connected, while the examples provided
111
+ ---PAGE_BREAK---
112
+
113
+ in this article admit non-locally connected small-point hyperspaces,
114
+ the referee suggests the following question.
115
+
116
+ **Question 1.** *Are the small-point hyperspaces of an hereditarily locally connected continuum always locally connected?*
117
+
118
+ Recall that a continuum is said to be *cyclicly connected* provided
119
+ that any two points of the continuum are contained in some simple
120
+ closed curve. Theorem 3.11 of [2] states that $C_{\epsilon}(X)$ is cyclicly
121
+ connected for every $\epsilon > 0$ whenever $X$ is locally connected; however,
122
+ the argument that is used to justify this assertion uses Proposition
123
+ 3.1 of [2]. Therefore, the following question remains open.
124
+
125
+ **Question 2.** If $X$ is a locally connected continuum with metric $\rho$,
126
+ must $C_{\rho,\epsilon}(X)$ be cyclicly connected for every $\epsilon > 0$?
127
+
128
+ REFERENCES
129
+
130
+ [1] K. Kuratowski, *Topology. Vol. II.* New edition, revised and augmented. Translated from the French by A. Kirkor. New York-London: Academic Press and Warsaw: PWN, 1968.
131
+
132
+ [2] Eric L. McDowell and B. E. Wilder, *The connectivity structure of the hyperspaces C<sub>ε</sub>(X)*, *Topology Proc.* **27** (2003), no. 1, 223-232.
133
+
134
+ [3] Sam B. Nadler, Jr. *Continuum Theory: An Introduction*. Monographs and Textbooks in Pure and Applied Mathematics, 158. New York: Marcel Dekker, Inc., 1992.
135
+
136
+ [4] Sam B. Nadler, Jr. and Thelma West, *Size levels for arcs*, Fund. Math. **141** (1992), no. 3, 243–255.
137
+
138
+ DEPARTMENT OF MATHEMATICS AND COMPUTER SCIENCE; BERRY COL-
139
+ LEGE; MOUNT BERRY, GEORGIA 30149-5014
140
+
141
+ *E-mail address: emcdowell@berry.edu*
samples/texts_merged/3193892.md ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # Anomalous VVH interactions at a linear collider
5
+
6
+ SUDHANSU S BISWAL¹,*, DEBAJYOTI CHOUDHURY²,
7
+ ROHINI M GODBOLE¹ and RITESH K SINGH³
8
+
9
+ ¹Centre for High Energy Physics, Indian Institute of Science, Bangalore 560 012, India
10
+
11
+ ²Department of Physics and Astrophysics, University of Delhi, New Delhi 110 007, India
12
+
13
+ ³Laboratoire de Physique Théoretique, 91405 Orsay Cedex, France
14
+
15
+ *E-mail: sudhansu@cts.iisc.ernet.in
16
+
17
+ **Abstract.** We examine, in a model independent way, the sensitivity of a linear collider to the couplings of a light Higgs boson to a pair of gauge bosons, including the possibility of CP violation. We construct several observables that probe the various possible anomalous couplings. For an intermediate mass Higgs, a collider operating at a center of mass energy of 500 GeV and with an integrated luminosity of 500 fb⁻¹ is shown to be able to constrain the ZZH vertex at the few per cent level, with even higher sensitivity for some of the couplings. However, lack of sufficient number of observables as well as contamination from the ZZH vertex limits the precision to which anomalous part of the WWH coupling can be probed.
18
+
19
+ **Keywords.** Anomalous Higgs couplings; linear collider.
20
+
21
+ PACS Nos 13.66.Fg; 14.80.Cp; 14.70.Fm; 14.70.Hp
22
+
23
+ ## 1. Introduction
24
+
25
+ The standard model (SM) of particle physics has been tested up to a high degree of accuracy, but the direct experimental verification of the phenomenon of spontaneous symmetry breaking is still pending. Various extensions of the SM have more than one Higgs boson whose CP parity and hypercharges may differ from those of the SM Higgs boson. The minimal supersymmetric standard model (MSSM) is one example of such an extended Higgs sector [1]. To establish the experimental observation of the SM Higgs boson it will be therefore, necessary to establish its properties such as hypercharge, CP parity etc. At an $e^+e^-$ collider the dominant Higgs production processes are $e^+e^- \to f\bar{f}H$, which proceed via the VVH coupling with $V = W, Z$ and $f$ any light fermion. Demanding Lorentz invariance, the VVH couplings can be parameterized as
26
+
27
+ $$ \Gamma_{\mu\nu} = g_V \left[ a_V g_{\mu\nu} + \frac{b_V}{m_V^2} (k_{1\nu} k_{2\mu} - g_{\mu\nu} k_1 \cdot k_2) + \frac{\tilde{b}_V}{m_V^2} \epsilon_{\mu\nu\alpha\beta} k_1^\alpha k_2^\beta \right], \quad (1) $$
28
+
29
+ where $k_i$ denote the momenta of the two W's (Z's); $g_W^{SM} = e \cot \theta_W M_Z$ and $g_Z^{SM} = 2eM_Z/\sin 2\theta_W$. In general, all these anomalous couplings can be complex. For
30
+ ---PAGE_BREAK---
31
+
32
+ simplicity we assume $a_V$ to be real and close to its SM value. For processes involving $VVH$ coupling alone we can choose, without loss of generality, $g_V = g_V^{SM}$ and $a_V = 1 + \Delta a_V$. We further assume $\Delta a_W = \Delta a_Z$ and keep terms up to linear order in the anomalous couplings. The analysis will be made for the ILC with center of mass energy 500 GeV and a Higgs boson of mass 120 GeV. We will use $H \to b\bar{b}$ final state and further assume b-quark detection efficiency of 0.7. The largest contribution comes from the process, $e^+e^- \to \nu_e\bar{\nu}_e H$. This process contains two missing neutrinos in the final state. However, this receives contributions from both the $WWH$ and $ZZH$ vertices. Hence one needs to look at $e^+e^- \to Z^*H \to f\bar{f}H$ to constrain $ZZH$ anomalous couplings and then make use of this information while probing $WWH$ couplings.
33
+
34
+ ## 2. Observables and kinematical cuts
35
+
36
+ We have constructed various momentum combinations $C_i$ by taking dot and scalar triple products of different linear combinations of momenta. These combinations have been listed in table 1 with their transformation properties under discrete symmetries C, P and $\tilde{T}$, where the pseudotime reversal operator ($\tilde{T}$) reverses the momenta and spins of particles without interchanging their initial and final states. Then we construct observables ($O_i$) by taking the expectation values of the signs of various $C_i$'s, i.e. $O_i = \langle \text{sign}(C_i) \rangle$. Most of these observables have definite CP and $\tilde{T}$ properties and hence can be used directly to probe the anomalous coupling which has the same CP and $\tilde{T}$ properties. In our analysis we keep the terms only upto linear order in anomalous couplings $B_i$. So all observables can be written down as
37
+
38
+ $$ \mathcal{O}(\{B_i\}) = \sum O_i B_i . $$
39
+
40
+ Measurements of these observables may be used to constrain the anomalous couplings. The possible sensitivity of these observables to the different anomalous couplings $B_i$, at a given degree of statistical significance $f$, can be obtained by demanding $|\mathcal{O}(\{B_i\}) - \mathcal{O}(\{0\})| \le f \delta\mathcal{O}$. Here $\mathcal{O}(\{0\})$ is the SM value of $\mathcal{O}$ and $\delta\mathcal{O}$ is the statistical fluctuation in $\mathcal{O}$.
41
+
42
+ **Table 1.** List of momentum correlators, their discrete transformation properties and anomalous couplings they probe. $\vec{P}_e = \vec{p}_{e-} - \vec{p}_{e+}$, $\vec{P}_f^+ = \vec{p}_f + \vec{p}_{\bar{f}}$, $\vec{P}_{\bar{f}} = \vec{p}_{\bar{f}} - \vec{p}_{\bar{f}}$.
43
+
44
+ <table><thead><tr><th>Correlator</th><th>C</th><th>P</th><th>CP</th><th>T&#x0304;</th><th>CPT&#x0304;</th><th>Probe of</th></tr></thead><tbody><tr><td>C<sub>0</sub> 1</td><td>+</td><td>+</td><td>+</td><td>+</td><td>+</td><td>a<sub>V</sub>, ℜ(b<sub>V</sub>)</td></tr><tr><td>C<sub>1</sub> &#x212C;<sub>e</sub> &sdot; &#x212C;<sub>f</sub><sup>+</sup></td><td>-</td><td>+</td><td>-</td><td>+</td><td>-</td><td>&Imacr;(b&#x0303;<sub>V</sub>)</td></tr><tr><td>C<sub>2</sub> [&#x212C;<sub>e</sub> &sdot; &#x212C;<sub>f</sub><sup>+</sup>] &sdot; &#x212C;<sub>f</sub><sup>-</sup></td><td>+</td><td>-</td><td>-</td><td>-</td><td>+</td><td>&Reacr;(b&#x0303;<sub>V</sub>)</td></tr><tr><td>C<sub>3</sub> [[&#x212C;<sub>e</sub> &sdot; &#x212C;<sub>f</sub><sup>+</sup>] &sdot; &#x212C;<sub>f</sub><sup>-</sup>][&#x212C;<sub>e</sub> &sdot; &#x212C;<sub>f</sub><sup>+</sup>]</td><td>-</td><td>-</td><td>+</td><td>-</td><td>-</td><td>&Imacr;(b<sub>V</sub>)</td></tr><tr><td>C<sub>4</sub> [[&#x212C;<sub>e</sub> &sdot; &#x212C;<sub>f</sub><sup>+</sup>] &sdot; &#x212C;<sub>f</sub><sup>-</sup>][&#x212C;<sub>e</sub> &sdot; &#x212C;<sub>f</sub><sup>-</sup>]</td><td>&times;</td><td>-</td><td>&times;</td><td>-</td><td>&times;</td><td>&Imacr;(b<sub>V</sub>), ℜ(b&#x0303;<sub>V</sub>)</td></tr></tbody></table>
45
+
46
+ Sudhansu S Biswal et al
47
+ ---PAGE_BREAK---
48
+
49
+ Anomalous VVH interactions
50
+
51
+ Statistical fluctuation in cross-section and in an asymmetry can be written as
52
+
53
+ $$
54
+ \Delta\sigma = \sqrt{\sigma_{\text{SM}}/\mathcal{L} + \epsilon^2 \sigma_{\text{SM}}^2}, \quad (2)
55
+ $$
56
+
57
+ $$
58
+ (\Delta A)^2 = \frac{1 - A_{\text{SM}}^2}{\sigma_{\text{SM}} \mathcal{L}} + \frac{\epsilon^2}{2} (1 - A_{\text{SM}}^2)^2. \qquad (3)
59
+ $$
60
+
61
+ Here $\sigma_{\text{SM}}$ and $A_{\text{SM}}$ are the SM value of cross-section and asymmetry respectively.
62
+
63
+ We choose the integrated luminosity $\mathcal{L} = 500 \text{ fb}^{-1}$, fractional systematic error $\epsilon = 0.01$ and $f = 3$.
64
+
65
+ Various kinematical cuts we impose, to suppress dominant background to the signal, are 5° ≤ θ₀ ≤ 175°; E_b, E_&#x0304;, E_l-, E_l+ ≥ 10 GeV; p<sub>T</sub><sup>missing</sup> ν ≥ 15 GeV; ΔR<sub>q₁q₂</sub> ≥ 0.7; ΔR<sub>l-l+</sub> ≥ 0.2; ΔR<sub>l-b</sub>, ΔR<sub>l-&#x0304;</sub>, ΔR<sub>l+b</sub>, ΔR<sub>l+l&#x0304;</sub> ≥ 0.4.
66
+
67
+ Here $(\Delta R)^2 \equiv (\Delta\phi)^2 + (\Delta\eta)^2$ when $\Delta\phi$ and $\Delta\eta$ denote the separation between the two jets in azimuthal angle and rapidity respectively.
68
+
69
+ We additionally impose cuts on the invariant mass of the $f\bar{f}$ system:
70
+
71
+ $$
72
+ R1 \equiv |m_{ff} - M_Z| \le 5 \Gamma_Z \quad \text{select Z-pole,} \tag{4}
73
+ $$
74
+
75
+ $$
76
+ R2 \equiv |m_{f\bar{f}} - M_Z| \ge 5 \Gamma_Z \quad \text{de-select Z-pole.} \tag{5}
77
+ $$
78
+
79
+ These enhance or suppress the contribution from Z resonance in the Bjorken process respectively. $\Gamma_Z$ in the above is the width of Z boson.
80
+
81
+ **3. ZZH couplings**
82
+
83
+ To probe the anomalous ZZH couplings we consider $f\bar{f}$ final state, where $f$ is any light fermion other than neutrinos. As outlined above we can construct observables with definite CP and $\tilde{T}$ properties and thus can maximize sensitivity to the anomalous couplings for a chosen final state. One can use some of these variables to probe the anomalous couplings [1a].
84
+
85
+ Cross-section: (observable $O_0$ corresponding to correlator $C_0$). Total rates are CP and $\tilde{T}$ even quantities. Hence these can be used to constrain $\Delta a_Z$ and $\Re(b_Z)$. Total rates with $R1$ cut and $f = \mu, u, d, c, s$ can be used to probe $|\Re(b_Z)| > 0.48 \times 10^{-2}$. Similarly total cross-section for $f=e$ with $R2$ cut, $\sigma(R2; e)$ can probe $\Delta a_Z$ to $|\Delta a_Z| > 0.038$ at $3\sigma$ level. Figure 1a shows that the sensitivity to $\Re(b_Z)$ is correlated with $\Delta a_Z$, whereas the reverse is not true.
86
+
87
+ *Forward-backward asymmetry (A₁):* We define the FB asymmetry $A_1$ with respect to the polar angle of Higgs boson. Since $A_1$ is CP odd and $\tilde{T}$ even, $A_1(R1; \mu, q)$ can be used to probe $\Im(\tilde{b}_Z)$. We find that this measurement can probe $|\Im(\tilde{b}_Z)| > 0.042$.
88
+
89
+ *Up-down asymmetry (A₂):* $A_2$ is the up-down asymmetry corresponding to $f$ being above or below the H-production plane. It is a CP odd and $\tilde{T}$ odd observable and a real probe of $\Re(\tilde{b}_Z)$. Since this asymmetry requires charge determination of the final-state fermions, we cannot consider quarks in the final state. Hence using $A_2^{R2}(e)$ one will be able to constrain $|\Re(\tilde{b}_Z)| \le 0.064$ and it is shown by vertical lines in figure 1b.
90
+ ---PAGE_BREAK---
91
+
92
+ Figure 1. Simultaneous $3\sigma$ limits on anomalous couplings with $L = 500 \text{ fb}^{-1}$: (a) $\Delta a_Z - \Re(b_Z)$ plane using cross-sections; (b) $\Re(\tilde{b}_Z) - \Im(\tilde{b}_Z)$ plane using various asymmetries.
93
+
94
+ **Polar–azimuthal asymmetry ($A_3$):** $A_3$ is a mixed polar–azimuthal asymmetry combining polar angle of Higgs boson and azimuthal angle of $f$ with respect to Higgs production plane and is CP even and $\tilde{T}$ odd. So it is sensitive only to $\Im(b_Z)$. This asymmetry requires charge measurement of $f$, hence suitable only for $f = e, \mu$. This can give a sensitivity at $3\sigma$ level as $|\Im(b_Z)| \le 0.17$. The region inside the horizontal lines in figure 1b shows $3\sigma$ variation in $A_3$.
95
+
96
+ **Another combined asymmetry ($A_4$):** We construct this combined asymmetry with respect to the polar and azimuthal angles of final state $f$. Although $A_4$ is $\tilde{T}$ odd, it does not have any definite CP property. So it is sensitive to both $\Im(b_Z)$ and $\Re(\tilde{b}_Z)$. Also $A_4$ requires charge determination of $f$ and hence we cannot consider quarks in the final-state for this observable. But we consider only $f = \mu$, because for $f = e$ many anomalous couplings contribute significantly with R1 cut. The corresponding constraint is shown in figure 1b with slant lines.
97
+
98
+ In table 2 we list all the achievable limits obtained above. We emphasize that all of them, except for $\Delta a_Z$ and $\Re(b_Z)$, are independent of other anomalous couplings. Table 2 shows that the constraint on $\Re(b_Z)$ depends on $\Delta a_Z$. Also $\tilde{T}$-odd observables require charge measurement of final-state fermions and hence quarks in the final-state cannot be considered to probe $\tilde{T}$-odd couplings leading to rather poor sensitivity to them.
99
+ ---PAGE_BREAK---
100
+
101
+ Anomalous VVH interactions
102
+
103
+ **Table 2.** Sensitivity achievable at 3σ level for various anomalous couplings with L = 500 fb⁻¹.
104
+
105
+ <table><thead><tr><th>Coupling</th><th>3σ Bound</th><th>Observable used</th></tr></thead><tbody><tr><td>|Δa<sub>Z</sub>|</td><td>0.038</td><td>σ with R2 cut; f = e<sup>-</sup></td></tr><tr><td>|Re(b<sub>Z</sub>)|</td><td>{ 0.0048 (Δa<sub>Z</sub> = 0) <br> 0.013 (|Δa<sub>Z</sub>| = 0.038)</td><td>σ with R1 cut; f = μ, q</td></tr><tr><td>|Ξ(b<sub>Z</sub>)|</td><td>0.17</td><td>A<sub>3</sub> with R1 cut; f = μ<sup>-</sup>, e<sup>-</sup></td></tr><tr><td>|Re(&#x0303;b<sub>Z</sub>)|</td><td>0.064</td><td>A<sub>2</sub>(φ<sub>e<sup>-</sup></sub>) with R2 cut</td></tr><tr><td>|Ξ(&#x0303;b<sub>Z</sub>)|</td><td>0.042</td><td>A<sub>1</sub>(c<sub>H</sub>) with R1 cut; f = μ, q</td></tr></tbody></table>
106
+
107
+ **Table 3.** Individual 3σ limits of sensitivity.
108
+
109
+ <table><thead><tr><th>Coupling</th><th>Limit</th><th>Observable used</th></tr></thead><tbody><tr><td>|Δa| ≤ 0.018</td><td>σ<sub>R2</sub></td><td></td></tr><tr><td>|Re(b<sub>W</sub>)| ≤ 0.098</td><td>σ<sub>R2</sub></td><td></td></tr><tr><td>|Ξ(b<sub>W</sub>)| ≤ 0.62</td><td>σ<sub>R1</sub></td><td></td></tr><tr><td>|Re(&#x0303;b<sub>W</sub>)| ≤ 1.6</td><td>A<sup>1</sup><sub>FB(c<sub>H</sub>)</sub></td><td></td></tr><tr><td>|Ξ(&#x0303;b<sub>W</sub>)| ≤ 0.39</td><td>A<sup>2</sup><sub>FB(c<sub>H</sub>)</sub></td><td></td></tr></tbody></table>
110
+
111
+ **Table 4.** Simultaneous 3σ limits of sensitivity.
112
+
113
+ <table><thead><tr><th>Coupling</th><th>Δa = 0</th><th>Δa ≠ 0</th></tr></thead><tbody><tr><td>|Δa| ≤ –</td><td>0.038</td><td></td></tr><tr><td>|Re(b<sub>W</sub>)| ≤ 0.10</td><td>0.31</td><td></td></tr><tr><td>|Ξ(b<sub>W</sub>)| ≤ 1.6</td><td>1.6</td><td></td></tr><tr><td>|Re(&#x0303;b<sub>W</sub>)| ≤ 3.2</td><td>3.2</td><td></td></tr><tr><td>|Ξ(&#x0303;b<sub>W</sub>)| ≤ 0.44</td><td>0.44</td><td></td></tr></tbody></table>
114
+
115
+ ## 4. WWH couplings
116
+
117
+ Due to missing neutrinos in the final state here one can only construct two observables: cross-section and forward-backward asymmetry with respect to polar angle of Higgs boson. Any deviation from SM value for cross-section largely depends on Δa$_{V}$ and Re(b$_{V}$) (CP even, T̄ even). Similarly, FB asymmetry receives a large contribution from Ξ(¯b$_{V}$) (CP odd, T̄ even). Hence there is no other direct observable to probe the remaining anomalous couplings. Assuming Δa$_{Z}$ = Δa$_{W}$ = Δa, we calculate the expressions for both the observables with R1 and R2 cuts. In table 3 we list the individual limits of sensitivity on the various anomalous couplings at 3σ level. To see what the sensitivity will be when all the anomalous couplings were to be nonzero, we construct a nine-dimensional region in parameter space and take a point from that region and calculate all the observables simultaneously. If the difference from their SM values due to these anomalous couplings is within the statistical fluctuation in SM values of these observables, then we say that the point is inside the blind region. The points on the boundary of this region give us the simultaneous limit of sensitivity of these measurements to the anomalous couplings. These are listed in table 4. These tables show that the lack of a specific observable to probe T̄-odd couplings results in rather poor sensitivity to them. For more details, see [2].
118
+ ---PAGE_BREAK---
119
+
120
+ Sudhansu S Biswal et al
121
+
122
+ 5. Conclusion
123
+
124
+ We have analyzed the sensitivity of the process $e^{+}e^{-} \rightarrow f\bar{f}H$, $f$ being a light fermion and probe different anomalous couplings. We implement various kinematical cuts on the different final-state particles so as to reduce background and also take into account finite b-tagging efficiency. When these effects are removed, our analysis reproduces the results of [4]. Although the observables constructed using optimal observable analysis [3] have maximum sensitivity to the anomalous couplings, they are a little opaque to the physics that is being probed. The observables that we have constructed by taking expectation values of sign of the correlators are simple to construct and most of them have definite CP and $\tilde{T}$ properties thus probing specific anomalous couplings. Apart from $\Re(b_V)$ and $\Delta a_V$, constraints on all the other anomalous couplings can be obtained using asymmetries and hence are robust to the effects of radiative corrections.
125
+
126
+ References
127
+
128
+ [1] See, for example, M Drees, R M Godbole and P Roy, *Theory and phenomenology of sparticles* (World Scientific, Singapore, 2004)
129
+
130
+ [1a] For detailed definition, see [2]
131
+
132
+ [2] Sudhansu S Biswal, Debajyoti Choudhury, Rohini M Godbole and Ritesh K Singh, *Phys. Rev. D73*, 035001 (2006)
133
+
134
+ [3] K Hagiwara, S Ishihara, J Kamoshita and B A Kniehl, *Euro. Phys. J. C14*, 457 (2000)
135
+
136
+ [4] T Han and J Jiang, *Phys. Rev. D63*, 096007 (2001)
samples/texts_merged/3224121.md ADDED
@@ -0,0 +1,735 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ Cooperation and dependencies in multipartite systems
5
+
6
+ Waldemar Kłobus,¹ Marek Miller,² Mahasweta Pandit,¹ Ray Ganardi,¹,³ Lukas Knips,⁴,⁵,⁶ Jan Dziewior,⁴,⁵,⁶
7
+ Jasmin Meinecke,⁴,⁵,⁶ Harald Weinfurter,⁴,⁵,⁶ Wiesław Laskowski,¹,³ and Tomasz Paterek¹,²,⁷
8
+
9
+ ¹Institute of Theoretical Physics and Astrophysics, Faculty of Mathematics,
10
+ Physics and Informatics, University of Gdańsk, 80-308 Gdańsk, Poland
11
+
12
+ ²School of Physical and Mathematical Sciences, Nanyang Technological University, 637371 Singapore
13
+
14
+ ³International Centre for Theory of Quantum Technologies, University of Gdańsk, 80-308 Gdańsk, Poland
15
+
16
+ ⁴Max-Planck-Institut für Quantenoptik, Hans-Kopfermann-Straße 1, 85748 Garching, Germany
17
+
18
+ ⁵Department für Physik, Ludwig-Maximilians-Universität, Schellingstraße 4, 80799 München, Germany
19
+
20
+ ⁶Munich Center for Quantum Science and Technology (MCQST), Schellingstraße 4, 80799 München, Germany
21
+
22
+ ⁷MajuLab, International Joint Research Unit UMI 3654,
23
+ CNRS, Université Côte d'Azur, Sorbonne Université,
24
+ National University of Singapore, Nanyang Technological University, Singapore
25
+
26
+ We propose an information-theoretic quantifier for the advantage gained from cooperation that captures the degree of dependency between subsystems of a global system. The quantifier is distinct from measures of multipartite correlations despite sharing many properties with them. It is directly computable for classical as well as quantum systems and reduces to comparing the respective conditional mutual information between any two subsystems. Secret sharing provides an exemplary cooperation task where this quantifier is beneficial. Based on the new quantifier we prove an inequality characterizing the lack of monotonicity of conditional mutual information under local operations and provide intuitive understanding for it.
27
+
28
+ I. INTRODUCTION
29
+
30
+ Identifying and quantifying dependencies in multipartite systems enable their analysis and provides a better understanding of complex phenomena. The problem has been addressed by several communities, considering both classical and quantum systems. For example, in neuroscience and genetics measures of multipartite synergy were put forward [1–6], in quantitative sociology quantifiers of coordination were introduced [7], and in physics and information processing quantities aimed at characterizing genuine multiparty correlations were studied in depth [8–13]. The former quantifiers are motivated mathematically, keeping the combinatorial aspects of complex systems in mind, e.g., the synergy is the difference in the information all subsystems have about an extra system as compared to the total information contained in any subset of the systems. Many of the latter quantifiers involve difficult optimizations and are therefore hard to compute. Here, we introduce an operationally defined, simple and computable quantifier of multipartite dependency in terms of information gain from cooperation when some parties meet and try to deduce the variables of some of the remaining parties. We show how it differs from multipartite correlations, prove its essential properties and discuss the application to quantum secret sharing.
31
+
32
+ It turns out that, in order to compute the quantity introduced here, it is sufficient to consider the respective conditional mutual information between only two subsystems. Therefore, any operational meaning of the conditional mutual information, e.g., in terms of communication cost of quantum state redistribution [14, 15], applies to the dependence measure as well. In this context, we prove an inequality which characterizes the lack of monotonicity of quantum conditional mutual informa-
33
+
34
+ tion under general local operations.
35
+
36
+ II. MULTIPARTITE DEPENDENCE
37
+
38
+ Let us begin by briefly recalling fundamental relationships, e.g., that two classical variables $X_1$ and $X_2$ are statistically independent if their probabilities satisfy $P(X_1|X_2) = P(X_1)$. Alternatively, the statistical independence can be stated in terms of entropies with the help of both the Shannon entropy $H(X) = -\sum_{i=1}^{d} P(x_i) \log_d P(x_i)$, where $d$ is the number of outcomes, and the conditional entropy $H(X|Y) = -\sum_{i,j} P(x_i, y_j) \log_d \frac{P(x_i, y_j)}{P(y_j)}$. As a measure of dependence of two variables $X_1$ and $X_2$ one introduces the corresponding entropic difference $H(X_1) - H(X_1|X_2)$, the so-called mutual information $I(X_1: X_2)$ [16]. Similarly, the quantum mutual information captures the dependence between quantum subsystems [17]. However, already in the case of three variables there are two levels of independence. The variable $X_1$ can be independent of all other variables, i.e., $P(X_1|X_2X_3) = P(X_1)$, or it can be conditionally independent of one of them, e.g., $P(X_1|X_2X_3) = P(X_1|X_2)$. The former dependence is again captured by the mutual information $I(X_1: X_2X_3)$, while the so-called conditional mutual information $I(X_1: X_3|X_2) = H(X_1|X_2) - H(X_1|X_2X_3)$ considers the latter. It is thus natural to define the *tripartite dependence* as the situation where any variable depends on all the other variables. This can be quantified as the worst case conditional mutual information
39
+
40
+ $$D_3 \equiv \min[I(X_1 : X_2 | X_3), I(X_1 : X_3 | X_2), \\ I(X_2 : X_3 | X_1)]. \quad (1)$$
41
+ ---PAGE_BREAK---
42
+
43
+ Due to strong subadditivity the conditional mutual in-
44
+ formation is non-negative and hence $D_3 \ge 0$ [18]. $D_3$
45
+ vanishes if and only if there exists a variable such that
46
+ already a subset of the remaining parties can gain the
47
+ maximally accessible information about the variable in
48
+ question. Note that this condition is also satisfied if a
49
+ variable is not correlated with the rest of the system at
50
+ all.
51
+
52
+ The value of $\mathcal{D}_3$ can be interpreted using an alternative expression for conditional mutual information, e.g., $I(X_1: X_3|X_2) = I(X_1: X_2X_3) - I(X_1: X_2)$. Reformulating now (1), one recognizes that $\mathcal{D}_3$ expresses the gain in information about the first subsystem that the second party has from cooperating with the third party. Accordingly, nonzero $\mathcal{D}_3$ ensures that any two parties always gain through cooperation when accessing the knowledge about the remaining subsystem. The minimal gain over the choice of parties is an alternative way to compute $\mathcal{D}_3$.
53
+
54
+ In the context of quantum subsystems we can
55
+ rewrite the conditional mutual information as $I(X_1 : X_3|X_2) = S(X_1|X_2) + S(X_3|X_2) - S(X_1X_3|X_2)$, where
56
+ e.g. $S(X_1|X_2)$ is the conditional entropy based on the
57
+ von Neumann entropy $S(\cdot)$. Since $S(X_1|X_2)$ is the entan-
58
+ glement cost of merging a state $X_1$ with $X_2$, see Ref. [19],
59
+ we can interpret the conditional mutual information as
60
+ the extra cost of merging states one by one ($X_1$ with $X_2$
61
+ and $X_3$ with $X_2$) instead of altogether ($X_1X_3$ with $X_2$).
62
+ $\mathcal{D}_3$ is the minimum extra cost of this merging.
63
+
64
+ *Secret sharing.*—An example of an intuitive applica-
65
+ tion of $\mathcal{D}_3$ is (quantum) secret sharing [20–23]. In the
66
+ tripartite setting, secret sharing requires collaboration of
67
+ two parties in order to read out the secret of the remain-
68
+ ing party. In the classical version of this problem the se-
69
+ cret is a random variable, e.g., the measurement outcome
70
+ of, say, the first observer. It is thus required that both,
71
+ the second as well as the third party alone has only little
72
+ or no information about the secret, i.e., $I(X_1: X_2)$ and
73
+ $I(X_1: X_3)$ are small, while both of them together can
74
+ reveal the result of the first observer, i.e., $I(X_1: X_2X_3)$
75
+ is large or unity. It is clear that the value of $\mathcal{D}_3$ (close to
76
+ its maximum) yields a measure for the working of secret
77
+ sharing. Furthermore, due to the minimization in (1), the
78
+ secret can be generated at any party. Below we derive
79
+ the classical distributions with large $\mathcal{D}_3$ as well as quan-
80
+ tum states which achieve maximal dependence. Quite
81
+ surprisingly these are mixed states belonging to the class
82
+ of so-called k-uniform states [24]. It turns out that these
83
+ states have perfect correlations along complementary lo-
84
+ cal measurements and therefore, by following the proto-
85
+ col in [22], the quantum solution to the secret sharing
86
+ problem offers additionally security against eavesdrop-
87
+ ping. In Appendix E we show that these states enable
88
+ perfect sharing of a quantum secret (unknown quantum
89
+ state) and that the value of dependence provides a lower
90
+ bound on the quality of quantum secret sharing for a class
91
+ of states. See Ref. [25] for an example of secret sharing
92
+ with a class of pure k-uniform states.
93
+
94
+ *Correlations and dependence.*—Before we generalize to
95
+
96
+ an arbitrary number of parties and present the properties
97
+ of the resulting $\mathcal{D}_N$, let us give a simple example that il-
98
+ lustrates the difference between multipartite correlations
99
+ and multipartite dependence. Consider three classical bi-
100
+ nary random variables described by the joint probability
101
+ distribution $P(000) = P(111) = \frac{1}{2}$. All three variables
102
+ are clearly correlated as confirmed, e.g., by quantifiers
103
+ introduced in Refs. [12, 13]. However, the knowledge of,
104
+ say, the first party about the third party does not in-
105
+ crease if the first observer is allowed to cooperate with
106
+ the second one. By examining her data, the first ob-
107
+ server knows the variables of both remaining parties and
108
+ any cooperation with one of them does not change this.
109
+ There is no information gain and hence this distribution
110
+ has vanishing tripartite dependence.
111
+
112
+ On the other hand, let us consider the joint proba-
113
+ bility distribution with $P(000) = P(011) = P(101) =$
114
+ $P(110) = \frac{1}{4}$, which can describe also a classical system.
115
+ Any two variables in this distribution are completely un-
116
+ correlated, but any two parties can perfectly decode the
117
+ value of the remaining variable. Hence the gain from co-
118
+ operation is 1 and so is the value of $\mathcal{D}_3$. This quantifier is
119
+ thus very good for identifying the suitability of a system
120
+ for secret sharing, where the secret could be at any party.
121
+
122
+ *Larger systems.*—Moving on to more complex systems,
123
+ we note that there are more conditions to be considered
124
+ already in order to define the four-partite dependence.
125
+ In analogy to the tripartite case the first condition is
126
+ to require that cooperation of any triple of parties pro-
127
+ vides more information about the remaining subsystem,
128
+ e.g., $I(X_1: X_2X_3X_4) - I(X_1: X_2X_3)$ must be positive.
129
+ But one should also impose that cooperation between
130
+ any pair brings information gain about the two remain-
131
+ ing variables, e.g., $I(X_1X_2: X_3X_4) - I(X_1X_2: X_3)$ must
132
+ be positive. The former condition demands a positive
133
+ conditional mutual information, $I(X_1: X_4|X_2X_3) > 0$,
134
+ while the latter one requires $I(X_1X_2: X_4|X_3) > 0$. In
135
+ order to compute $\mathcal{D}_4$ one takes the minimum of these
136
+ two conditional mutual informations over all permuta-
137
+ tions of subsystems. Note, however, that, e.g., $I(X_1X_2:
138
+ X_4|X_3) \ge I(X_1: X_4|X_2X_3)$ and therefore it is sufficient
139
+ to minimize over the conditional mutual information be-
140
+ tween two variables only. We emphasize that this step
141
+ simplifies the computation significantly. The same argu-
142
+ ment applies for arbitrary $N$ and leads to the definition
143
+ of $N$-partite dependence
144
+
145
+ $$
146
+ \mathcal{D}_N \equiv \min_{\text{perm}} I(X_1 : X_2 | X_3 \dots X_N), \quad (2)
147
+ $$
148
+
149
+ where the minimum is taken over all permutations of the
150
+ subsystems. In the case of a quantum system in state ρ
151
+ we obtain
152
+
153
+ $$
154
+ \mathcal{D}_N(\rho) = \min_{j,k} [S(\operatorname{Tr}_j \rho) + S(\operatorname{Tr}_k \rho) - S(\operatorname{Tr}_{jk} \rho) - S(\rho)], \quad (3)
155
+ $$
156
+
157
+ where *j*, *k* = 1...*N* and *j* ≠ *k*. Tr<sub>*j*</sub>ρ denotes a partial trace over the subsystem *j*. In general, calculating the N-partite dependence requires computation and comparison
158
+ ---PAGE_BREAK---
159
+
160
+ of $\binom{N}{2}$ values, i.e., scales polynomially as $N^2$, whereas for
161
+ permutationally invariant systems it is straightforward.
162
+
163
+ One may also like to study *k*-partite dependencies
164
+ within an *N*-partite system. To this aim we propose to
165
+ apply the definitions above to any *k*-partite subsystem
166
+ and take the minimum over the resulting values.
167
+
168
+ ### III. PROPERTIES
169
+
170
+ The maximal *N*-partite dependence over classical dis-
171
+ tributions of *d*-valued variables is given by 1 (recall that
172
+ our logarithms are base *d*) and follows from the fact that
173
+ classical mutual information cannot exceed the entropy
174
+ of each variable. On the other hand, quantum mutual in-
175
+ formation is bounded by 2 and this is the bound on $\mathcal{D}_N$
176
+ optimized over quantum states (see Appendix D). This
177
+ bound is achieved by mixed states belonging to the class
178
+ of *k*-uniform states, in particular for $k = N - 1$ [24]. In
179
+ the case of *N* qubits (for *N* even) the optimal states have
180
+ the following form
181
+
182
+ $$ \rho_{\max} = \frac{1}{2^N} \left( \sigma_0^{\otimes N} + (-1)^{N/2} \sum_{j=1}^{3} \sigma_j^{\otimes N} \right), \quad (4) $$
183
+
184
+ where $\sigma_j$ are the Pauli matrices and $\sigma_0$ denotes the $2 \times 2$
185
+ identity matrix. Note that $\rho_{\max}$ is permutationally in-
186
+ variant and gives rise to perfect correlations or anti-
187
+ correlations when all observers measure locally the same
188
+ Pauli observable. These states are known as the general-
189
+ ized bound entangled Smolin states [26, 27]. They are a
190
+ useful quantum resource for multiparty communication
191
+ schemes [28] and were experimentally demonstrated in
192
+ Refs. [29–34]. Per definition for (N − 1)-uniform states
193
+ all reduced density matrices are maximally mixed, with
194
+ vanishing mutual information, whereas the whole system
195
+ is correlated. In Appendix D we provide examples of
196
+ states which maximize $\mathcal{D}_N$ for arbitrary $d$ and show in
197
+ general that the only states achieving the maximal quan-
198
+ tum value of 2 are (N − 1)-uniform.
199
+
200
+ Let us also offer an intuition for values of $\mathcal{D}_N$ above
201
+ the classical bound of one. As shown in Appendix G
202
+ this can only happen for mixed quantum states. One
203
+ could then consider an auxiliary system which purifies
204
+ the mixed state. High values of $\mathcal{D}_N$ correspond to learn-
205
+ ing simultaneously the variables of the subsystems and
206
+ the auxiliary system. Note that making this statement
207
+ mathematically precise may be difficult as the problem
208
+ is equivalent to the interpretation of negative values of
209
+ conditional entropy [19, 35, 36].
210
+
211
+ As we have already emphasized, multipartite depen-
212
+ dence is different from multipartite correlations. Nev-
213
+ ertheless, it does share a number of properties that are
214
+ expected from measures of genuine multipartite correla-
215
+ tions. Any such quantifier should satisfy a set of postu-
216
+ lates put forward in Refs. [11, 13]. We now show that
217
+ most of them also hold for $\mathcal{D}_N$ and we precisely charac-
218
+ terize the deviation from one of the postulates. In Ap-
219
+
220
+ pendices A-C we prove the following properties of the
221
+ dependence:
222
+
223
+ (i) If $\mathcal{D}_N = 0$ and one adds a party in a product state
224
+ then the resulting $(N+1)$-party state has $\mathcal{D}_N = 0$.
225
+
226
+ (ii) If $\mathcal{D}_N = 0$ and one subsystem is split with two of its parts placed in different laboratories then the resulting $(N+1)$-party state has $\mathcal{D}_{N+1} = 0$.
227
+
228
+ (iii) $\mathcal{D}_N$ can increase under local operations. Let us denote with the bar the quantities computed after local operations. We have the following inequality:
229
+
230
+ $$ \bar{\mathcal{D}}_N \le \mathcal{D}_N + I(X_1 X_2 : X_3 \dots X_N) - I(X_1 X_2 : \bar{X}_3 \dots \bar{X}_N), \quad (5) $$
231
+
232
+ where systems $X_1$ and $X_2$ are the ones minimizing
233
+ $\mathcal{D}_N$, i.e., before the operations were applied.
234
+
235
+ The properties (i) and (ii) hold for all quantifiers of
236
+ multipartite correlations. It is expected that measures
237
+ of multipartite correlations are also monotonic under lo-
238
+ cal operations (though note that often this condition is
239
+ relaxed in practice, see e.g. quantum discord). In the
240
+ present case, the monotonicity property does not hold in
241
+ general for $\mathcal{D}_N$, however, property (iii) puts a bound on
242
+ its maximal violation. Moreover, it has a clear interpreta-
243
+ tion: local operations that uncorrelate a given subsystem
244
+ from the others may lead to information gain when the
245
+ less correlated party cooperates with other parties.
246
+
247
+ Let us explain this more quantitatively for the condi-
248
+ tional mutual information between variables $X_1$ and $X_2$.
249
+ While it is well-known that this quantity is monotonic
250
+ under local operations on subsystems not in the condi-
251
+ tion [37], we prove in Appendix C that the following in-
252
+ equality is satisfied under local operations on arbitrary
253
+ subsystem (being the origin of property (iii)):
254
+
255
+ $$ I(\overline{X}_1 : \overline{X}_2 | \overline{X}_3 \dots \overline{X}_N) \le I(X_1 : X_2 | X_3 \dots X_N) + I(X_1 X_2 : X_3 \dots X_N) - I(X_1 X_2 : \overline{X}_3 \dots \overline{X}_N). \quad (6) $$
256
+
257
+ The second line is non-negative due to the data process-
258
+ ing inequality and it quantifies how much the local opera-
259
+ tions have uncorrelated the variables $X_3 \dots X_N$ from the
260
+ variables $X_1 X_2$. This sets the upper bound to the lack
261
+ of monotonicity of the conditional mutual information.
262
+
263
+ ## IV. EXAMPLES
264
+
265
+ Multipartite dependence can be computed for both
266
+ classical and quantum systems and is a generic quan-
267
+ tifier of information gain from cooperation that can be
268
+ used across science. Here we discuss a few exemplary
269
+ calculations and applications of $\mathcal{D}_N$ in quantum infor-
270
+ mation.
271
+ ---PAGE_BREAK---
272
+
273
+ *Pure states.*—First of all, for pure quantum states $|\Psi\rangle$, the dependence can be further simplified as
274
+
275
+ $$
276
+ \begin{align}
277
+ \mathcal{D}_N(|\Psi\rangle) &= \min_{i,j} [S(\operatorname{Tr}_i |\Psi\rangle\langle\Psi|) \nonumber \\
278
+ &\quad + S(\operatorname{Tr}_j |\Psi\rangle\langle\Psi|) - S(\operatorname{Tr}_{ij} |\Psi\rangle\langle\Psi|)] \nonumber \\
279
+ &= \min_{i,j} [S(\rho_i) + S(\rho_j) - S(\rho_{ij})], \tag{7}
280
+ \end{align}
281
+ $$
282
+
283
+ where $\rho_i$ is the state of the system after removing all but the $i$-th particle, i.e., $\mathcal{D}_N(|\Psi\rangle)$ is given by the smallest quantum mutual information in two-partite subsystems. Here, we made use of the fact that both subsystems of a pure state have the same entropy: $S(\operatorname{Tr}_i\rho) = S(\rho_i)$ for $\rho = |\Psi\rangle\langle\Psi|$. In Appendix G we prove the following upper bound on $\mathcal{D}_N$ for pure states
284
+
285
+ $$ \mathcal{D}_N(|\Psi\rangle) \le 1. \tag{8} $$
286
+
287
+ It is a consequence of the trade-off relation between the quantum mutual information for different two-particle subsystems of a pure global state and the definition of $\mathcal{D}_N$ where the smallest conditional mutual information is chosen. In particular, the bound is achieved by N-qubit GHZ state $\frac{1}{\sqrt{d}}(|0\dots0\rangle + \dots + |d-1\dots d-1\rangle)$. Additionally, the quantum mutual information is bounded by 1 whenever the state $\rho_{ij}$ is separable [38]. A comprehensive list of dependencies within standard classes of quantum states is given in Tab. I. The analytical formula for the N-qubit Dicke states with $e$ excitations, $|D_N^e\rangle$, is presented in Appendix F. In short, if one fixes $e$ and takes the limit $N \to \infty$, the dependence $\mathcal{D}_N$ vanishes. For $e$ being a function of $N$, e.g., $e = N/2$, the dependence $\mathcal{D}_N$ tends to $1/2$.
288
+
289
+ *Entanglement without dependence.*—An intriguing question in the theory of multipartite entanglement is whether entanglement can exist without classical multipartite correlations [10]. The examples of N-party entangled states with vanishing N-party classical correlations are known in the literature [39–43], though the corresponding notions of classical correlations do not satisfy all the postulates of Refs. [11, 13]. Here we ask whether there are genuinely multipartite entangled states with no multipartite dependence and whether multipartite dependence can exist without multipartite correlations and vice versa. It turns out that all of those combinations are possible. There exist even pure genuinely multipartite entangled states without multipartite dependence. Consider any N-qubit cluster state (including linear, ring, 2D, etc.) for $N \ge 4$. It was shown in Ref. [44] that all single-particle subsystems are completely mixed and there exists at least one pair of subsystems in the bipartite completely mixed state. The corresponding entropies are equal to $S(\rho_i) = 1$ and $S(\rho_{ij}) = 2$, and lead to $\mathcal{D}_N = 0$, due to Eq. (7). Therefore, the information about a particular subsystem cannot be increased when other subsystems are brought together which explains the impossibility of the corresponding secret sharing task [45–47]. Note that there exist other subsets of observers who can successfully run secret sharing using a cluster
290
+
291
+ <table><thead><tr><th>N</th><th>state</th><th>D<sub>3</sub></th><th>D<sub>4</sub></th><th>D<sub>5</sub></th><th>D<sub>6</sub></th></tr></thead><tbody><tr><td>3</td><td>{P<sub>same</sub>}</td><td>0</td><td>-</td><td>-</td><td>-</td></tr><tr><td>3</td><td>{P<sub>even</sub>}</td><td>1</td><td>-</td><td>-</td><td>-</td></tr><tr><td>3</td><td>GHZ</td><td>1</td><td>-</td><td>-</td><td>-</td></tr><tr><td>3</td><td>D<sub>3</sub><sup>1</sup></td><td>0.9183</td><td>-</td><td>-</td><td>-</td></tr><tr><td>3</td><td>&rho;<sub>nc</sub></td><td>0.5033</td><td>-</td><td>-</td><td>-</td></tr><tr><td>4</td><td>GHZ</td><td>0</td><td>1</td><td>-</td><td>-</td></tr><tr><td>4</td><td>D<sub>4</sub><sup>1</sup></td><td>0.3774</td><td>0.62256</td><td>-</td><td>-</td></tr><tr><td>4</td><td>D<sub>4</sub><sup>2</sup></td><td>0.5033</td><td>0.7484</td><td>-</td><td>-</td></tr><tr><td>4</td><td>L<sub>4</sub></td><td>1</td><td>0</td><td>-</td><td>-</td></tr><tr><td>4</td><td>3-uniform</td><td>2</td><td>0</td><td>-</td><td>-</td></tr><tr><td>5</td><td>GHZ</td><td>0</td><td>0</td><td>1</td><td>-</td></tr><tr><td>5</td><td>D<sub>5</sub><sup>1</sup></td><td>0.2490</td><td>0.2490</td><td>0.4729</td><td>-</td></tr><tr><td>5</td><td>D<sub>5</sub><sup>2</sup></td><td>0.3245</td><td>0.3245</td><td>0.6464</td><td>-</td></tr><tr><td>5</td><td>L<sub>5</sub></td><td>0</td><td>0</td><td>0</td><td>-</td></tr><tr><td>5</td><td>R<sub>5</sub></td><td>1</td><td>1</td><td>0</td><td>-</td></tr><tr><td>5</td><td>AME(5,2)</td><td>1</td><td>1</td><td>0</td><td>-</td></tr><tr><td>6</td><td>GHZ</td><td>0</td><td>0</td><td>0</td><td>1</td></tr><tr><td>6</td><td>D<sub>6</sub><sup>1</sup></td><td>0.1866</td><td>0.1634</td><td>0.1866</td><td>0.3818</td></tr><tr><td>6</td><td>D<sub>6</sub><sup>2</sup></td><td>0.2566</td><td>0.1961</td><td>0.2566</td><td>0.5637</td></tr><tr><td>6</td><td>D<sub>6</sub><sup>3</sup></td><td>0.2729</td><td>0.1961</td><td>0.2729</td><td>0.6291</td></tr><tr><td>6</td><td>L<sub>6</sub></td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6</td><td>R<sub>6</sub></td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>6</td><td>AME(6,2)</td><td>0</td><td>2</td><td>0</td><td>0</td></tr><tr><td>6</td><td>5-uniform</td><td>0</td><td>0</td><td>0</td><td>2</td></tr></tbody></table>
292
+
293
+ TABLE I. Values of the dependence for several quantum states and probability distributions. {$P_{\text{same}}$} stands for $P(000) = P(111) = \frac{1}{2}$ and {$P_{\text{even}}$} for $P(000) = P(110) = P(101) = P(011) = \frac{1}{4}$. $D_N^k$ denotes the N-partite Dicke states with $k$ excitations $\sim |1...10...0\rangle + ... + |0...01...1\rangle$, with $k$ ones, $\rho_{nc}$ denotes the genuinely multipartite entangled state without multipartite correlations [10], the GHZ state is described in the text, $L_4$ stands for the linear cluster of four qubits and $\Psi_4$ is discussed in [48]. k-uniform states are states where all k-partite marginals are maximally mixed, whereas AME(n,d), so-called absolutely maximally entangled states, refers to $[n/2]$-uniform states of d dimensions [25].
294
+
295
+ This state also illustrates nicely that full correlations can exist without multipartite dependence. Conversely, the state $\rho_{nc} = \frac{1}{2}|D_N^c\rangle\langle D_N^c| + \frac{1}{2}|D_N^{N-1}\rangle\langle D_N^{N-1}|$ has the property of being N-partite entangled without N-partite correlation functions [10], yet its $\mathcal{D}_N$ is finite. This again shows that multipartite dependence is distinct from multipartite correlations and captures other properties of genuinely multi-partite entangled systems.
296
+
297
+ *Increasing *D* with local operations.*—We now give an analytical example where $\mathcal{D}_3$ increases under local operation on the system in the condition. Consider the following classical state
298
+
299
+ $$ \rho = \frac{1}{2} |000\rangle\langle000| + \frac{1}{8} |101\rangle\langle101| + \frac{1}{8} |110\rangle\langle110| + \frac{1}{4} |111\rangle\langle111|. \tag{9} $$
300
+ ---PAGE_BREAK---
301
+
302
+ <table><thead><tr><th>N state</th><th>D<sub>3</sub></th><th>D<sub>4</sub></th><th>D<sub>5</sub></th><th>D<sub>6</sub></th></tr></thead><tbody><tr><td>3</td><td>D<sub>3</sub><sup>1</sup> 0.87 (0.92)</td><td>-</td><td>-</td><td>-</td></tr><tr><td>3</td><td>&rho;<sub>nc</sub> 0.45 (0.50)</td><td>-</td><td>-</td><td>-</td></tr><tr><td>4</td><td>GHZ 0.06 (0.00)</td><td>0.95 (1.00)</td><td>-</td><td>-</td></tr><tr><td>4</td><td>D<sub>4</sub><sup>2</sup> 0.42 (0.50)</td><td>0.67 (0.75)</td><td>-</td><td>-</td></tr><tr><td>4</td><td>L<sub>4</sub> 0.90 (1.00)</td><td>0.09 (0.00)</td><td>-</td><td>-</td></tr><tr><td>4</td><td>&Psi;<sub>4</sub> 0.33 (0.42)</td><td>0.39 (0.42)</td><td>-</td><td>-</td></tr><tr><td>5</td><td>&rho;<sub>nc</sub> 0.25 (0.17)</td><td>0.16 (0.65)</td><td>0.171 (0.47)</td><td>-</td></tr><tr><td>6</td><td>D<sub>6</sub><sup>3</sup> 0.21 (0.27)</td><td>0.13 (0.20)</td><td>0.14 (0.27)</td><td>0.21 (0.63)</td></tr></tbody></table>
303
+
304
+ TABLE II. Illustrative values of dependence for several experimental quantum states. In brackets we give theoretical predictions for ideal states.
305
+
306
+ One verifies that its 3-dependence equals $D_3(\rho) = I(X_2 : X_3|X_1) = 0.06$, i.e., conditioning on $X_1$ gives the smallest conditional mutual information. The application of an amplitude-damping channel with Kraus operators
307
+
308
+ $$K_0 = \begin{pmatrix} 0 & 1/\sqrt{2} \\ 0 & 0 \end{pmatrix}, \quad K_1 = \begin{pmatrix} 1 & 0 \\ 0 & 1/\sqrt{2} \end{pmatrix}, \quad (10)$$
309
+
310
+ on subsystem $X_1$ produces the state $\bar{\rho}$, for which one computes $D_3(\bar{\rho}) = I(\bar{X}_1 : X_2|\bar{X}_3) = I(\bar{X}_1 : X_3|\bar{X}_2) = 0.19$. Note the change in the conditioned system minimizing the dependence. The local operation on $X_1$ has increased the information $I(X_2 : X_3|\bar{X}_1)$ above the other two conditional mutual informations.
311
+
312
+ *Experimental states.*—Finally, we move to multipartite dependence in quantum optics experiments. Table II gathers quantum states prepared with photonic qubits in Refs. [40, 49–53]. The dependencies were extracted from experimental density matrices obtained via state tomography using the evaluation described in Ref. [54]. We have chosen to present the states illustrating the properties discussed above.
313
+
314
+ The experimental data is in good agreement with the theoretical calculations. Deviations for the six qubit state $D_6^3$ result from reduced fidelities due to contributions of higher order noise in the state preparation. The same applies to the five qubit state $\rho_{nc}$ derived from $D_6^3$. Indeed, the states denoted as $\rho_{nc}$, which have vanishing correlation functions between all $N$ observers [40], clearly show a non-vanishing value for $D_N$. Hence, these states are examples for “entanglement without correlations” and “dependence without correlations”. Similarly, the experimental data of the linear cluster state $L_4$ indicates “entanglement without dependence” and “correlations without dependence”. In the experiment, the GHZ state $\sim |0000\rangle + |1111\rangle$ achieves the highest dependence of all considered states and is close to the theoretical dependence $D_4 = 1$, which is maximal over all pure states. The small value of $D_3$ for the four-partite GHZ state reflects its property of having vanishing dependence for all tripartite classically correlated subsystems.
315
+
316
+ ## V. CONCLUSIONS
317
+
318
+ We have introduced a quantity, the multipartite dependence, in order to determine whether and by what amount cooperation between any subsystems brings additional information about the remaining subsystems. It is expected that this tool, which can be used in classical as well as in quantum domains, will be of broad relevance as it is directly calculable and has a clear interpretation. Furthermore, it offers an alternative to the characterization of multipartite properties via multipartite correlations.
319
+
320
+ ## ACKNOWLEDGMENTS
321
+
322
+ We thank Krzysztof Szczygielski for valuable discussions. The work is supported by DFG (Germany) and NCN (Poland) within the joint funding initiative “Beethoven2” (2016/23/G/ST2/04273, 381445721), by the Singapore Ministry of Education Academic Research Fund Tier 2 Project No. MOE2015-T2-2-034, and by Polish National Agency for Academic Exchange NAWA Project No. PPN/PPO/2018/1/00007/U/00001. W.L. and R.G. acknowledge partial support by the Foundation for Polish Science (IRAP project, ICTQT, Contract No. 2018/MAB/5, cofinanced by EU via Smart Growth Operational Programme). JD and LK acknowledge support from the PhD programs IMPRS-QST and ExQM, respectively. JDMA is funded by the Deutsche Forschungs­gemeinschaft (DFG, German Research Foundation) under Germany’s Excellence Strategy - EXC-2111 - 390814868.
323
+
324
+ ## Appendix A: Proof of property (i)
325
+
326
+ If $D_N = 0$ and one adds a party in a product state then the resulting $(N+1)$-partite state has $D_N = 0$.
327
+
328
+ *Proof.* Per definition, we are minimizing the conditional mutual information over all $N$-partite subsystems of the total $(N+1)$-party state. If one takes the $N$-partite subsystem that excludes the added party, by assumptions $D_N = 0$. □
329
+
330
+ In other words, if the cooperation of $N-1$ parties within the $N$-partite system does not help in gaining additional knowledge about any other remaining party, then the cooperation with any additional independent system will not help either.
331
+
332
+ ## Appendix B: Proof of property (ii)
333
+
334
+ If $D_N = 0$ and one subsystem is split with two of its parts placed in different laboratories then the resulting $(N+1)$-party state has $D_{N+1} = 0$.
335
+ ---PAGE_BREAK---
336
+
337
+ *Proof.* Without loss of generality and in order to simplify notation let us consider an initially tri-partite system where the third party is in possession of two variables labeled $X_3$ and $X_4$. The splitting operation places these variables in separate laboratories producing a four-partite system. By assumption $\mathcal{D}_3 = 0$, but this does not specify which conditional mutual information in Eq. (1) vanishes. If this is the mutual information where the variables $X_3$ and $X_4$ of the third party enter in the condition, then this mutual information is also minimizing $\mathcal{D}_4$, and hence the latter vanishes. The second possibility is that the variables of the third party enter outside the condition, e.g., the vanishing conditional mutual information could be $I(X_1 : X_3X_4|X_2)$. From the chain rule for mutual information, $0 = I(X_1 : X_3X_4|X_2) \ge I(X_1 : X_4|X_2X_3)$. Finally, from strong subadditivity follows $\mathcal{D}_4 = 0$. In the N-partite case one writes more variables in the conditions and follows the same steps. $\square$
338
+
339
+ ## Appendix C: Proof of property (iii)
340
+
341
+ Consider a state $\rho$ that is processed by general local operations (CPTP maps) to a state $\bar{\rho}$. The following upper bound on the multipartite dependence after local operations holds:
342
+
343
+ $$ \overline{\mathcal{D}}_{\mathcal{N}} \leq \mathcal{D}_{\mathcal{N}} + I(X_1 X_2 : X_3 \dots X_N) \\ -I(X_1 X_2 : \overline{X}_3 \dots \overline{X}_N), \quad (\text{C1}) $$
344
+
345
+ where systems $X_1$ and $X_2$ are the ones minimizing $\mathcal{D}_N$, i.e., before the operations were applied.
346
+
347
+ Let us begin with a lemma characterizing the lack of monotonicity of conditional mutual information under local operations.
348
+
349
+ **Lemma 1.** *The following inequality holds:*
350
+
351
+ $$ I(\overline{X}_1 : \overline{X}_2 | \overline{X}_3 \dots \overline{X}_N) \le I(X_1 : X_2 | X_3 \dots X_N) + I(X_1 X_2 : X_3 \dots X_N) - I(X_1 X_2 : \overline{X}_3 \dots \overline{X}_N), (\text{C2}) $$
352
+
353
+ where bars denote subsystems transformed by arbitrary local CPTP maps.
354
+
355
+ *Proof.* The conditional mutual information is already known to be monotonic under operations on systems not in the condition [37]:
356
+
357
+ $$ I(\overline{X}_1 : \overline{X}_2 | \overline{X}_3 \dots \overline{X}_N) \le I(X_1 : X_2 | \overline{X}_3 \dots \overline{X}_N) (\text{C3}) $$
358
+
359
+ Now we continue as follows:
360
+
361
+ $$
362
+ \begin{align*}
363
+ & I(X_1 : X_2 | \overline{X}_3 \dots \overline{X}_N) + I(X_1 X_2 : \overline{X}_3 \dots \overline{X}_N) \\
364
+ &= I(X_1 : X_2 \overline{X}_3 \dots \overline{X}_N) + I(X_2 : X_1 \overline{X}_3 \dots \overline{X}_N) - I(X_1 : X_2) \\
365
+ &\le I(X_1 : X_2 X_3 \dots X_N) + I(X_2 : X_1 X_3 \dots X_N) - I(X_1 : X_2) \\
366
+ &= I(X_1 : X_2 | X_3 \dots X_N) + I(X_1 X_2 : X_3 \dots X_N),
367
+ \end{align*}
368
+ $$
369
+
370
+ where the first equation is obtained by manipulating entropies such that the mutual informations containing barred subsystems come with positive sign, next we used the data processing inequality and in the last step we
371
+
372
+ reversed the manipulations on entropies. This completes the proof of the lemma. $\square$
373
+
374
+ To complete the proof of property (iii) we write
375
+
376
+ $$
377
+ \begin{align*}
378
+ \mathcal{D}_N &= I(X_1 : X_2 | X_3 \dots X_N) \\
379
+ &\geq I(\overline{X}_1 : \overline{X}_2 | \overline{X}_3 \dots \overline{X}_N) - I(X_1 X_2 : X_3 \dots X_N) \\
380
+ &\phantom{\geq} + I(X_1 X_2 : \overline{X}_3 \dots \overline{X}_N) \\
381
+ &\geq \overline{\mathcal{D}}_N - I(X_1 X_2 : X_3 \dots X_N) + I(X_1 X_2 : \overline{X}_3 \dots \overline{X}_N),
382
+ \end{align*}
383
+ $$
384
+
385
+ where in the first line we denote the subsystems such that the conditional mutual information $I(X_1 : X_2|X_3\dots X_N)$ achieves minimum in $\mathcal{D}_N$. Next, the first inequality follows from Lemma 1, and the second inequality from the fact that $I(\overline{X}_1 : \overline{X}_2|\overline{X}_3\dots\overline{X}_N)$ may not be the one minimizing $\overline{\mathcal{D}}_N$.
386
+
387
+ ## Appendix D: Quantum qudit states maximizing $\mathcal{D}_N$
388
+
389
+ Let us consider a quantum state of $N$ qudits, for $N$ being a multiple of $d$ and $N \ge 3$, defined as the common eigenstate of the generators
390
+
391
+ $$ G_{1}^{(d)} = \bigotimes_{i=1}^{N} X^{(d)}, \quad G_{2}^{(d)} = \bigotimes_{i=1}^{N} Z^{(d)}, \quad (\text{D1}) $$
392
+
393
+ composed of $d$-dimensional Weyl-Heisenberg matrices
394
+ $$ X^{(d)} = \sum_{j=0}^{d-1} |j\rangle\langle j+1|, \quad \text{and} \quad Z^{(d)} = \sum_{j=0}^{d-1} \omega^j |j\rangle\langle j|, $$
395
+ with $\omega = e^{i2\pi/d}$. The explicit form of the state can be calculated in the following way:
396
+
397
+ $$ \rho_N^{(d)} = \frac{1}{d^N} \sum_{i,j=0}^{d-1} (G_1^{(d)})^i (G_2^{(d)})^j. \quad (\text{D2}) $$
398
+
399
+ The state (D2) belongs to the class of k-uniform mixed states defined in [24], with $k=N-1$.
400
+
401
+ It is known that for $N$ even the state $\rho_N^{(d)}$ has $d^{N-2}$ eigenvalues equal to $\frac{1}{d^{N-2}}$, so the entropy $S(\rho_N^{(d)})$ is equal to
402
+
403
+ $$ S(\rho_N^{(d)}) = N - 2. \quad (\text{D3}) $$
404
+
405
+ Since the state is $(N-1)$-uniform, all reduced density matrices are proportional to identity matrices giving
406
+
407
+ $$ S(\operatorname{Tr}_i \rho_N^{(d)}) = N - 1, \quad (\text{D4}) $$
408
+
409
+ $$ S(\operatorname{Tr}_{i,j} \rho_N^{(d)}) = N - 2. \quad (\text{D5}) $$
410
+
411
+ Therefore, for $N$ even
412
+
413
+ $$
414
+ \begin{align}
415
+ \mathcal{D}_N(\rho_N^{(d)}) &= S(\operatorname{Tr}_i \rho_N^{(d)}) + S(\operatorname{Tr}_j \rho_N^{(d)}) && (\text{D6}) \\
416
+ &\quad -S(\operatorname{Tr}_{i,j} \rho_N^{(d)}) - S(\rho_N^{(d)}) = 2.
417
+ \end{align}
418
+ $$
419
+
420
+ In the case of $N$ odd, however, the state $\rho_N^{(d)}$ has $d^{N-1}$ eigenvalues equal to $\frac{1}{d^{N-1}}$, and by analogous calculations we get
421
+
422
+ $$ \mathcal{D}_N(\rho_N^{(d)}) = 1, \quad (\text{D7}) $$
423
+ ---PAGE_BREAK---
424
+
425
+ for $(N-1)$-uniform states.
426
+
427
+ Now we show that the $(N-1)$-uniform states are the only ones that can achieve $\mathcal{D}_N = 2$. The requirement is
428
+
429
+ $$
430
+ \begin{aligned}
431
+ \mathcal{D}_N &= I(X_1 : X_2 | X_3 \dots X_N) \\
432
+ &= I(X_1 : X_2 X_3 \dots X_N) - I(X_1 : X_3 \dots X_N) \\
433
+ &= 2,
434
+ \end{aligned}
435
+ \quad (\text{D8})
436
+ $$
437
+
438
+ where $X_i$ stands for individual subsystem. Since in the definition of $\mathcal{D}_N$ we minimize over all permutations, the same equation holds for all permutations of subsystems. Due to subadditivity, the only way to satisfy (D8) is
439
+
440
+ $$
441
+ \begin{aligned}
442
+ I(X_1 : X_3 \dots X_N) &= 0, && (\text{D9}) \\
443
+ I(X_1 : X_2 X_3 \dots X_N) &= 2. && (\text{D10})
444
+ \end{aligned}
445
+ $$
446
+
447
+ From the first equation we conclude that
448
+
449
+ $$ \rho_{13 \dots N} = \rho_1 \otimes \rho_{3 \dots N}, \quad (\text{D11}) $$
450
+
451
+ which also holds for all permutation of indices. After tracing out all but the 1st and 3rd subsystem, we arrive at
452
+
453
+ $$ \rho_{13} = \rho_1 \otimes \rho_3, \quad (\text{D12}) $$
454
+
455
+ which means that every pair of subsystems is described by a tensor product state. It follows that any $N-1$ particle subsystem is described by a simple tensor product, e.g.,
456
+
457
+ $$ \rho_{13 \dots N} = \rho_1 \otimes \rho_3 \otimes \dots \otimes \rho_N. \quad (\text{D13}) $$
458
+
459
+ Using (D10) we write
460
+
461
+ $$ S(X_1) - S(X_1 | X_2 X_3 \dots X_N) = 2. \quad (\text{D14}) $$
462
+
463
+ Since for the quantum conditional entropy we have
464
+
465
+ $$ -S(X_1|X_2X_3\dots X_N) \leq S(X_1), \quad (\text{D15}) $$
466
+
467
+ the bound is achieved if
468
+
469
+ $$
470
+ \begin{aligned}
471
+ &2 = S(X_1) - S(X_1 | X_2 X_3 \dots X_N) \\
472
+ &\leq S(X_1) + S(X_1),
473
+ \end{aligned}
474
+ $$
475
+
476
+ i.e., for $S(X_1) = 1$. Hence, taking into account (D13), all $N-1$ particle subsystems are maximally mixed, i.e., the total state is $(N-1)$-uniform.
477
+
478
+ ## Appendix E: Quantum secret sharing
479
+
480
+ After introducing the $(N-1)$-uniform states, which are maximizing the $N$-dependence, we now show that they naturally feature in the task of quantum secret sharing.
481
+
482
+ Suppose Alice has a quantum state $\rho$, called the secret, that she wants to split into $n$ shares such that the secret is recoverable only when a party has all $n$ shares. A quantum secret sharing scheme [23] is a map $\mathcal{E}_n: A \to X^{\otimes n}$ such that,
483
+
484
+ $$ C_Q(\operatorname{Tr}_k \circ \mathcal{E}_n) = 0 \quad (\text{E1}) $$
485
+
486
+ where $\operatorname{Tr}_k$ is the partial trace over an arbitrary set of subsystems and $C_Q(\Lambda)$ is the quantum capacity of the channel $\Lambda$. The rate of a secret sharing scheme is given by the quantum capacity of the channel $\mathcal{E}_n$.
487
+
488
+ Consider that Alice prepares a quantum secret in the state $\rho = \frac{1}{2}(\sigma_0 + \sum_j s_j \sigma_j)$ of a single qubit, where $s_j$ are the components of the Bloch vector. Her encoding map has the $(N-1)$-uniform state as the Choi state [55], and one verifies that it leads to the outcome
489
+
490
+ $$
491
+ \begin{aligned}
492
+ \mathcal{E}_N(\rho) &= \frac{1}{2N} \left( \sigma_0^{\otimes N} \operatorname{Tr}\rho + (-1)^{N/2} \sum_{j=1}^3 \sigma_j^{\otimes N} \operatorname{Tr}(\sigma_j^T \rho) \right).
493
+ \end{aligned}
494
+ \quad (\text{E2})
495
+ $$
496
+
497
+ Since for any $\rho$ we have $(\operatorname{Tr}_k \circ \mathcal{E}_N)(\rho) \propto 1$, it follows that $C_Q(\operatorname{Tr}_k \circ \mathcal{E}_N) = 0$, i.e., no subset of observers can recover the quantum secret. All of them, however, can recover it perfectly with the decoding map
498
+
499
+ $$
500
+ \begin{aligned}
501
+ \mathcal{D}_N(\rho_N) &= \frac{1}{2} \left( \sigma_0 + (-1)^{N/2} \sum_j \operatorname{Tr}(\sigma_j^{\otimes N} \rho_N) \sigma_j \right)^T,
502
+ \end{aligned}
503
+ \quad (\text{E3})
504
+ $$
505
+
506
+ where $\rho_N = \mathcal{E}_N(\rho)$.
507
+
508
+ We now show that any $(N+1)$-partite state $\rho_c$ with maximally mixed marginals and non-classical dependence $\mathcal{D}_{N+1}(\rho_c) > 1$ is useful for quantum secret sharing. Consider the encoding map $\mathcal{E}_c: A \to X^{\otimes N}$ with the Choi state given by $\rho_c$, i.e., $(\mathbb{1} \otimes \mathcal{E}_c)(|\Phi\rangle\langle\Phi|) = \rho_c$, where $|\Phi\rangle$ is the maximally entangled state. The rate of quantum secret sharing admits the lower bound
509
+
510
+ $$
511
+ \begin{align*}
512
+ R &= C_Q(\mathcal{E}_c) && (\text{E4a}) \\
513
+ &\geq \sup_{\phi_{AN}} -S_{A|X_1\dots X_N}((\mathbb{1} \otimes \mathcal{E}_c)(\phi_{AN})) && (\text{E4b}) \\
514
+ &\geq -S_{A|X_1\dots X_N}(\rho_c) && (\text{E4c}) \\
515
+ &= I(A:X_1|X_2\dots X_N) - S(A|X_2\dots X_N) && (\text{E4d}) \\
516
+ &\geq I(A:X_1|X_2\dots X_N) - 1 && (\text{E4e}) \\
517
+ &\geq \mathcal{D}_{N+1}(\rho_c) - 1. && (\text{E4f})
518
+ \end{align*}
519
+ $$
520
+
521
+ The steps are justified as follows. The first line follows from definition. Ineq. (E4b) is the result of computing the quantum capacity of a channel [56–61], (E4c) follows because the maximally entangled state is a particular choice of $\phi_{AN}$, and the Choi state of $\mathcal{E}_c$ is $\rho_c$. Eqs. (E4d) and (E4e) follow from the properties of entropy recalling that our logarithms are base $d$. Finally, the dependence is the worst case conditional mutual information.
522
+
523
+ Since the marginals of $\rho_c$ are maximally mixed, the same holds for the encoded state $\rho_N = \mathcal{E}_c(\rho)$, i.e., no subset of parties can recover the quantum secret alone, yet for all of them together $R > 0$ holds for $\mathcal{D}_{N+1}(\rho_c) > 1$.
524
+
525
+ ## Appendix F: Dependence of Dicke states
526
+
527
+ We now present an analytical formula for $\mathcal{D}_N^e$ in $N$-qubit Dicke states with $e$ excitations. For that state it is
528
+ ---PAGE_BREAK---
529
+
530
+ given by
531
+
532
+ $$
533
+ \begin{equation}
534
+ \begin{aligned}
535
+ \mathcal{D}_N(D_N^e) = {}& (\begin{smallmatrix} N \\ e \end{smallmatrix})^{-1} \left[ - \frac{2(N-1)!\log\left(\frac{e}{N}\right)}{(e-1)!(N-e)!} \right. \\
536
+ & \qquad \left. - 2\binom{N-1}{e} \log\left(1-\frac{e}{N}\right) + \binom{N-2}{e-2} \log\left(\frac{\binom{N-2}{e-1}}{\binom{N}{e}}\right) \right] \quad (\text{F1}) \\
537
+ & + 2\binom{N-2}{e-1} \log\left(\frac{2\binom{N-2}{e-1}}{\binom{N}{e}}\right) + \binom{N-2}{e} \log\left(\frac{\binom{N-2}{e}}{\binom{N}{e}}\right).
538
+ \end{aligned}
539
+ \end{equation}
540
+ $$
541
+
542
+ This comes from the fact that for a general Dicke state with *e* excitations all one-partite reduced density matrices {$ρ_i$} have the two non-zero eigenvalues *e*/N and (N-*e*)/N, while all two-partite reduced states {$ρ_{ij}$} have the three non-vanishing eigenvalues *e*(e−1)/N(N−1), 2*e*(N−*e*)/N(N−1), and (N−*e*−1)(N−*e*)/N(N−1). For *e* as a function of the number of parties, *e* = N/*k*, in the limit of *N* → ∞, the N-dependence converges to a finite value, i.e., *D*<sub>*N*</sub>(*D*<sub>*N*</sub><sup>*e*</sup>) tends to 2(*k* − 1)/*k*<sup>2</sup>. The maximally achievable dependence of 1/2 is reached for *e* = N/2. For an arbitrarily chosen constant *e* (e.g., for the W state, *e* = 1), *D*<sub>*N*</sub>(*D*<sub>*N*</sub><sup>*e*</sup>) tends to 0 for *N* → ∞.
543
+
544
+ These results allow to answer the following question: If $\mathcal{D}_N \le 1$, are there local measurements on the subsystems with classical outcomes having conditional mutual information equal to $\mathcal{D}_N$? The answer is negative. We have optimized the conditional informations over local measurements for Dicke states with $N=3,4$ and $0 < e < N$, and observed that the values obtained are always smaller than $\mathcal{D}_N$.
545
+
546
+ ## Appendix G: Bounds on mutual N-dependence
547
+
548
+ a. Bound on mixed states
549
+
550
+ $$
551
+ S(\mathrm{Tr}_{j}\rho) \le S(\mathrm{Tr}_{ij}\rho) + S(\rho_{i}), \quad (\mathrm{G1})
552
+ $$
553
+
554
+ $$
555
+ S(\mathrm{Tr}_i\rho) - S(\rho_i) \le S(\rho), \quad (G2)
556
+ $$
557
+
558
+ where $ρ_i$ is the reduced state of the *i*-th particle. Using the above inequalities we write
559
+
560
+ $$
561
+ \begin{align*}
562
+ \mathcal{D}_N(\rho) &\le S(\mathrm{Tr}_i\rho) - S(\rho) + S(\mathrm{Tr}_j\rho) - S(\mathrm{Tr}_{ij}\rho) \\
563
+ &\le S(\rho_i) + S(\rho_i) \\
564
+ &\le 2.
565
+ \end{align*}
566
+ \tag{G3}
567
+ $$
568
+
569
+ b. Bounds on pure states
570
+
571
+ Now we prove that for pure states we have $\mathcal{D}_N(\rho) \le 1$. Note that due to Eq. (7) from the main text we need to find the smallest mutual information $I(\rho_i : \rho_j)$, where $\rho_i$, $\rho_j$ are subsystems of the pure state $\rho$. Consider
572
+
573
+ $$
574
+ \begin{align}
575
+ I(\rho_i : \rho_j) + I(\rho_j : \rho_k) \tag{G4} \\
576
+ &= S(\rho_i) + S(\rho_j) - S(\rho_{ij}) + S(\rho_j) + S(\rho_k) - S(\rho_{jk}) \nonumber \\
577
+ &\le 2S(\rho_j) \nonumber \\
578
+ &\le 2, \tag{G5}
579
+ \end{align}
580
+ $$
581
+
582
+ where the first inequality comes from the strong subadditivity of entropy
583
+
584
+ $$
585
+ S(\rho_i) + S(\rho_k) \leq S(\rho_{ij}) + S(\rho_{jk}). \quad (G6)
586
+ $$
587
+
588
+ The subadditivity of quantum entropy states that for the reduced quantum states we have
589
+
590
+ Hence, this monogamy relation with respect to mutual information proves that there is always a bipartite subsystem with mutual information bounded by 1.
591
+
592
+ [1] T. Gawne and B. Richmond, Journal of Neuroscience **13**, 2758 (1993).
593
+
594
+ [2] I. Gat and N. Tishby, in Proceedings of the 1998 Conference on Advances in Neural Information Processing Systems (Ben, V. Vedral, and A. Winter, Phys. Rev. Lett. **101**, 070502 (2008).
595
+
596
+ [3] E. Schneidman, W. Bialek, and M. J. Berry, Journal of Neuroscience **23**, 11539 (2003).
597
+
598
+ [4] E. Schneidman, S. Still, M. J. Berry, and W. Bialek, Phys. Rev. Lett. **91**, 238701 (2003).
599
+
600
+ [5] V. Varadan, I. Miller, David M., and D. Anastassiou, Bioinformatics **22**, e497 (2006).
601
+
602
+ [6] D. Anastassiou, Molecular Systems Biology **3**, 83 (2007).
603
+
604
+ [7] D. Trendafilov, D. Polani, and R. Murray-Smith, *2015 17th UKSim-AMSS International Conference on Modelling and Simulation (UKSim)*, , 361 (2015).
605
+
606
+ [8] D. L. Zhou, B. Zeng, Z. Xu, and L. You, Phys. Rev. A **74**, 052110 (2006).
607
+
608
+ [9] D. L. Zhou, Phys. Rev. Lett. **101**, 180505 (2008).
609
+
610
+ [10] C. H. Bennett, A. Grudka, M. Horodecki, P. Horodecki, and R. Horodecki, Phys. Rev. A **83**, 012312 (2011).
611
+
612
+ [11] G. L. Giorgi, B. Bellomo, F. Galve, and R. Zambrini, Phys. Rev. Lett. **107**, 190501 (2011).
613
+
614
+ [12] D. Girolami, T. Tufarelli, and C. E. Susa, Phys. Rev. Lett. **119**, 140505 (2017).
615
+
616
+ [13] I. Devetak and J. Yard, Phys. Rev. Lett. **100**, 230501 (2008).
617
+
618
+ [14] F. G. S. L. Brandao, A. W. Harrow, J. Oppenheim, and S. Strelchuk, Phys. Rev. Lett. **115**, 050501 (2015).
619
+ ---PAGE_BREAK---
620
+
621
+ [16] T. M. Cover and J. A. Thomas, *Elements of Information Theory* (Wiley-Interscience, 2006).
622
+
623
+ [17] K. Modi, T. Paterek, W. Son, V. Vedral, and M. Williamson, Phys. Rev. Lett. **104**, 080501 (2010).
624
+
625
+ [18] M. A. Nielsen and I. L. Chuang, *Quantum Computation and Quantum Information* (Cambridge University Press, 2000).
626
+
627
+ [19] M. Horodecki, J. Oppenheim, and A. Winter, Nature **436**, 673 (2005).
628
+
629
+ [20] A. Shamir, ACM **22**, 612 (1979).
630
+
631
+ [21] G. R. Blakley, Proceedings of AFIPS'79 **48**, 313 (1979).
632
+
633
+ [22] M. Hillery, V. Bužek, and A. Berthiaume, Phys. Rev. A **59**, 1829 (1999).
634
+
635
+ [23] H. Imai, J. Müller-Quade, A. C. A. Nascimento, P. Tuyls, and A. Winter, Quantum Info. Comput. **5**, 69 (2005).
636
+
637
+ [24] W. Klobus, A. Burchardt, A. Kolodziejski, M. Pandit, T. Vértesi, K. Życzkowski, and W. Laskowski, Phys. Rev. A **100**, 032112 (2019).
638
+
639
+ [25] W. Helwig, W. Cui, J. I. Latorre, A. Riera, and H.-K. Lo, Phys. Rev. A **86**, 052335 (2012).
640
+
641
+ [26] J. A. Smolin, Phys. Rev. A **63**, 032306 (2001).
642
+
643
+ [27] R. Augusiak and P. Horodecki, Phys. Rev. A **73**, 012318 (2006).
644
+
645
+ [28] R. Augusiak and P. Horodecki, Phys. Rev. A **74**, 010305R (2006).
646
+
647
+ [29] E. Amselem and M. Bourennane, Nat. Phys. **5**, 748 (2009).
648
+
649
+ [30] J. Lavoie, R. Kaltenbaek, M. Piani, and K. J. Resch, Nat. Phys. **6**, 827 (2010).
650
+
651
+ [31] E. Amselem and M. Bourennane, Nat. Phys. **6**, 827 (2010).
652
+
653
+ [32] J. Lavoie, R. Kaltenbaek, M. Piani, and K. J. Resch, Phys. Rev. Lett. **105**, 130501 (2010).
654
+
655
+ [33] J. Barreiro, P. Schindler, O. Gühne, T. Monz, M. Chwalla, C. F. Roos, M. Hennrich, and R. Blatt, Nat. Phys. **6**, 943 (2010).
656
+
657
+ [34] E. Amselem, M. Sadiq, and M. Bourennane, Sci. Rep. **3**, 1966 (2013).
658
+
659
+ [35] L. del Rio, J. Aberg, R. Renner, O. Dahlsten, and V. Vedral, Nature **474**, 61 (2011).
660
+
661
+ [36] T. K. Chuan, J. Maillard, K. Modi, T. Paterek, M. Paternostro, and M. Piani, Phys. Rev. Lett. **109**, 070501 (2012).
662
+
663
+ [37] M. M. Wilde, J. Phys. A: Math. Theor. **51**, 374002 (2018).
664
+
665
+ [38] R. Horodecki, P. Horodecki, M. Horodecki, and K. Horodecki, Rev. Mod. Phys. **81**, 865 (2009).
666
+
667
+ [39] W. Laskowski, M. Markiewicz, T. Paterek, and M. Wieśniak, Phys. Rev. A **86**, 032105 (2012).
668
+
669
+ [40] C. Schwemmer, L. Knips, M. C. Tran, A. de Rosier, W. Laskowski, T. Paterek, and H. Weinfurter, Phys. Rev. Lett. **114**, 180501 (2015).
670
+
671
+ [41] S. Designolle, O. Giraud, and J. Martin, Phys. Rev. A **96**, 032322 (2017).
672
+
673
+ [42] M. C. Tran, M. Zuppardo, A. de Rosier, L. Knips, W. Laskowski, T. Paterek, and H. Weinfurter, Phys. Rev. A **95**, 062331 (2017).
674
+
675
+ [43] W. Klobus, W. Laskowski, T. Paterek, M. Wieśniak, and H. Weinfurter, Eur. Phys. J. D **73**, 29 (2019).
676
+
677
+ [44] P. Hyllus, O. Gühne, and A. Smerzi, Phys. Rev. A **82**, 012337 (2010).
678
+
679
+ [45] D. Markham and B. C. Sanders, Phys. Rev. A **78**, 042309 (2008).
680
+
681
+ [46] A. Keet, B. Fortescue, D. Markham, and B. C. Sanders, Phys. Rev. A **82**, 062315 (2010).
682
+
683
+ [47] D. Markham and B. C. Sanders, Phys. Rev. A **83**, 019901 (2010).
684
+
685
+ [48] H. Weinfurter and M. Żukowski, Phys. Rev. A **64**, 010102 (2001).
686
+
687
+ [49] N. Kiesel, C. Schmid, G. Toth, E. Solano, and H. Weinfurter, Phys. Rev. Lett. **98**, 063604 (2007).
688
+
689
+ [50] G. Toth, W. Wieczorek, D. Gross, R. Krischek, C. Schwemmer, and H. Weinfurter, Phys. Rev. Lett. **105**, 250403 (2010).
690
+
691
+ [51] R. Krischek, W. Wieczorek, A. Ozawa, N. Kiesel, P. Michelberger, T. Udem, and H. Weinfurter, Nat. Photonics **4**, 170 (2010).
692
+
693
+ [52] R. Krischek, C. Schwemmer, W. Wieczorek, H. Weinfurter, P. Hyllus, L. Pezze, and A. Smerzi, Phys. Rev. Lett. **107**, 080504 (2011).
694
+
695
+ [53] L. Knips, C. Schwemmer, N. Klein, M. Wieśniak, and H. Weinfurter, Phys. Rev. Lett. **117**, 210504 (2016).
696
+
697
+ [54] L. Knips, C. Schwemmer, N. Klein, J. Reuter, G. Tóth, and H. Weinfurter, ArXiv e-prints (2015), arXiv:1512.06866 [quant-ph].
698
+
699
+ [55] M.-D. Choi, Linear Alg. Appl. **10**, 285 (1975).
700
+
701
+ [56] B. Schumacher, Phys. Rev. A **54**, 2614 (1996).
702
+
703
+ [57] B. Schumacher and M.A.Nielsen,
704
+ Phys. Rev.
705
+
706
+ A **54**, 2629 (1996).
707
+
708
+ [58] H.
709
+
710
+ Barnum,
711
+
712
+ M.A.Nielsen
713
+
714
+ and B.Schumacher,
715
+ Phys.Rev.A**57**,4153(1998).
716
+
717
+ [59] H.
718
+
719
+ Barnum,
720
+
721
+ E.Knill,
722
+
723
+ and M.A.Nielsen,
724
+ IEEE Trans.
725
+ Info.Theor.
726
+ **46**, 1317 (2000).
727
+
728
+ [60] S.Lloyd,
729
+ Phys.Rev.A
730
+ **55**, 1613 (1997).
731
+
732
+ [61] I.Devetak,
733
+ IEEE Trans.
734
+ Info.Theor.
735
+ **51**, 44 (2005).
samples/texts_merged/3327355.md ADDED
The diff for this file is too large to render. See raw diff
 
samples/texts_merged/339686.md ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ ## 7.1 Vector Spaces
5
+
6
+ A **vector space** ($\mathbf{V}$, $\mathbb{F}$) is a set of vectors $\mathbf{V}$, a set of scalars $\mathbb{F}$, and two operators that satisfy the following properties:
7
+
8
+ * **Vector Addition**
9
+
10
+ - **Associative:** $\vec{u} + (\vec{v} + \vec{w}) = (\vec{u} + \vec{v}) + \vec{w}$ for any $\vec{v}, \vec{u}, \vec{w} \in \mathbf{V}$.
11
+
12
+ - **Commutative:** $\vec{u} + \vec{v} = \vec{v} + \vec{u}$ for any $\vec{v}, \vec{u} \in \mathbf{V}$.
13
+
14
+ - **Additive Identity:** There exists an additive identity $\vec{0} \in \mathbf{V}$ such that $\vec{v} + \vec{0} = \vec{v}$ for any $\vec{v} \in \mathbf{V}$.
15
+
16
+ - **Additive Inverse:** For any $\vec{v} \in \mathbf{V}$, there exists $-\vec{v} \in \mathbf{V}$ such that $\vec{v} + (-\vec{v}) = \vec{0}$. We call $-\vec{v}$ the additive inverse of $\vec{v}$.
17
+
18
+ - **Closure under vector addition:** For any two vectors $\vec{v}, \vec{u} \in \mathbf{V}$, their sum $\vec{v} + \vec{u}$ must also be in $\mathbf{V}$.
19
+
20
+ * **Scalar Multiplication**
21
+
22
+ - **Associative:** $\alpha(\beta\vec{v}) = (\alpha\beta)\vec{v}$ for any $\vec{v} \in \mathbf{V}$, $\alpha, \beta \in \mathbb{F}$.
23
+
24
+ - **Multiplicative Identity:** There exists $1 \in \mathbb{F}$ where $1 \cdot \vec{v} = \vec{v}$ for any $\vec{v} \in \mathbb{F}$. We call $1$ the multiplicative identity.
25
+
26
+ - **Distributive in vector addition:** $\alpha(\vec{u} + \vec{v}) = \alpha\vec{u} + \alpha\vec{v}$ for any $\alpha \in \mathbb{F}$ and $\vec{u}, \vec{v} \in \mathbf{V}$.
27
+
28
+ - **Distributive in scalar addition:** $(\alpha + \beta)\vec{v} = \alpha\vec{v} + \beta\vec{v}$ for any $\alpha, \beta \in \mathbb{F}$ and $\vec{v} \in \mathbf{V}$.
29
+
30
+ - **Closure under scalar multiplication:** For any vector $\vec{v} \in \mathbf{V}$ and scalar $\alpha \in \mathbb{F}$, the product $\alpha\vec{v}$ must also be in $\mathbf{V}$.
31
+
32
+ You have already seen vector spaces before! For example, $(\mathbb{R}^n, \mathbb{R})$ is the vector space of all $n$-dimensional vectors. With the definitions of vector addition and scalar multiplication defined in the previous notes you could show that it satisfies all the properties above. In fact, matrices also are a vector space $(\mathbb{R}^{n \times m}, \mathbb{R})$ since they fulfill all of the properties above as well – but in this class we will generally only deal with vector spaces containing vectors in $\mathbb{R}^n$ or $\mathbb{C}^n$.
33
+
34
+ **Additional Resources** For more on vector spaces, read *Strang* pages 123 - 125 and try Problem Set 3.1.
35
+
36
+ In Schaum's, read pages 112-114 and try problems 4.1, 4.2, and 4.71 to 4.76. Extra: Read and Understand Polynomial Spaces, Spaces of Arbitrary "Field."
37
+ ---PAGE_BREAK---
38
+
39
+ ### 7.1.1 Bases
40
+
41
+ We can use a series of vectors to define a vector space. We call this set of vectors a **basis**, which we define formally below:
42
+
43
+ **Definition 7.1 (Basis):**
44
+
45
+ Given a vector space $(V, \mathbb{F})$, a set of vectors $\{\vec{v}_1, \vec{v}_2, \dots, \vec{v}_n\}$ is a **basis** of the vector space if it satisfies the following two properties:
46
+
47
+ * $\vec{v}_1, \vec{v}_2, \dots, \vec{v}_n$ are linearly independent vectors
48
+
49
+ * For any vector $\vec{v} \in V$, there exist scalars $\alpha_1, \alpha_2, \dots, \alpha_n \in \mathbb{F}$ such that $\vec{v} = \alpha_1\vec{v}_1 + \alpha_2\vec{v}_2 + \dots + \alpha_n\vec{v}_n$.
50
+
51
+ Intuitively, a basis of a vector space is the *minimum* set of vectors needed to represent all vectors in the vector space. If a set of vectors is linearly dependent and “spans” the vector space, it is still not a basis because we can remove at least one vector from the set and the resulting set will still span the vector space.
52
+
53
+ The next natural question to ask is: Given a vector space, is the basis unique? Intuitively, it is not because multiplying one of the vectors in a given basis by a nonzero scalar will not affect the linear independence or span of the vectors. We could alternatively construct another basis by replacing one of the vectors with the sum of itself and any other vector in the set.
54
+
55
+ To illustrate this mathematically, suppose $\{\vec{v}_1, \vec{v}_2, \dots, \vec{v}_n\}$ is a basis for the vector space we are considering.
56
+ Then
57
+
58
+ $$ \{\alpha \vec{v}_1, \vec{v}_2, \dots, \vec{v}_n\} \qquad (1) $$
59
+
60
+ where $\alpha \neq 0$ is also a basis because, just as we've seen in Gaussian elimination row operations, multiplying a row by a nonzero constant does not change the linear independence or dependence of the rows. We can generalize this to say that multiplying a vector by a nonzero scalar also does not change the linear independence of the set of vectors. In addition, we know that
61
+
62
+ $$ \operatorname{span}(\{\vec{v}_1, \vec{v}_2, \dots, \vec{v}_n\}) = \operatorname{span}(\{\alpha \vec{v}_1, \vec{v}_2, \dots, \vec{v}_n\}). \qquad (2) $$
63
+
64
+ because any vector in $\operatorname{span}(\{\vec{v}_1, \vec{v}_2, \dots, \vec{v}_n\})$ can be created as a linear combination of the set $\{\alpha\vec{v}_1, \vec{v}_2, \dots, \vec{v}_n\}$ by dividing the scale factor on $\vec{v}_1$ by $\alpha$. We can use a similar argument to show that $\{\vec{v}_1 + \vec{v}_2, \vec{v}_2, \dots, \vec{v}_n\}$ is also a basis for the same vector space.
65
+
66
+ **Example 7.1 (Vector space ($\mathbb{R}^3, \mathbb{R}$)):** Let's try to find a basis for the vector space $(\mathbb{R}^3, \mathbb{R})$. We want to find a set of vectors that can represent any vector of the form $\begin{bmatrix} a \\ b \\ c \end{bmatrix}$ where $a,b,c \in \mathbb{R}$. One basis could be the set of standard unit vectors:
67
+
68
+ $$ \left\{ \begin{bmatrix} 1 \\ 0 \\ 0 \end{bmatrix}, \begin{bmatrix} 0 \\ 1 \\ 0 \end{bmatrix}, \begin{bmatrix} 0 \\ 0 \\ 1 \end{bmatrix} \right\} $$
69
+ ---PAGE_BREAK---
70
+
71
+ The set of vectors is linearly independent and we can represent any vector $[\begin{matrix} a \\ b \\ c \end{matrix}]$ in the vector space using the three vectors:
72
+
73
+ $$ \begin{bmatrix} a \\ b \\ c \end{bmatrix} = a \begin{bmatrix} 1 \\ 0 \\ 0 \end{bmatrix} + b \begin{bmatrix} 0 \\ 1 \\ 0 \end{bmatrix} + c \begin{bmatrix} 0 \\ 0 \\ 1 \end{bmatrix}. \quad (3) $$
74
+
75
+ Alternatively, we could show that
76
+
77
+ $$ \left\{ \begin{bmatrix} 1 \\ 1 \\ 0 \end{bmatrix}, \begin{bmatrix} 0 \\ 1 \\ 1 \end{bmatrix}, \begin{bmatrix} 1 \\ 0 \\ 1 \end{bmatrix} \right\} $$
78
+
79
+ is a basis for the vector space.
80
+
81
+ Now that we have defined bases, we can define the dimension of a vector space.
82
+
83
+ **Definition 7.2 (Dimension):** The dimension of a vector space is the number of basis vectors.
84
+
85
+ Since each basis vector can be scaled by one coefficient, the dimension of a space as the fewest number of parameters needed to describe an element or member of that space. The dimension can also be thought of as the degrees of freedom of your space – that is, the number of parameters that can be varied when describing a member of that space.
86
+
87
+ **Example 7.2 (Dimension of ($\mathbb{R}^3, \mathbb{R}$)):** Previously, we identified a basis
88
+
89
+ $$ \left\{ \begin{bmatrix} 1 \\ 0 \\ 0 \end{bmatrix}, \begin{bmatrix} 0 \\ 1 \\ 0 \end{bmatrix}, \begin{bmatrix} 0 \\ 0 \\ 1 \end{bmatrix} \right\} $$
90
+
91
+ for the vector space $(\mathbb{R}^3, \mathbb{R})$. The basis consists of three vectors, so the dimension of the vector space is three.
92
+
93
+ **Note that a vector space can have many bases, but each basis must have the same number of vectors.**
94
+
95
+ We will not prove this rigorously, but let's illustrate our arguments. Suppose a basis for the vector space we're considering has $n$ vectors. This means that the minimum number of vectors we can use to represent all vectors in the vector space is $n$, because the vectors in the basis would not be linearly independent if the vector space could be represented with fewer vectors. Then we can show that any set with less than $n$ vectors cannot be a basis because it does not have enough vectors to span the vector space — there would be some vectors in the vector space that cannot be expressed as a linear combination of the vectors in the set. In addition, we can show that any set with more than $n$ vectors must be linearly dependent and therefore cannot be a basis. Combining the two arguments, we have that any other set of vectors that forms a basis for the vector space must have exactly $n$ vectors.
96
+
97
+ We introduced quite a few terms in this lecture note, and we'll see how we can connect these with our understanding of matrices in the next lecture note!
98
+ ---PAGE_BREAK---
99
+
100
+ **Additional Resources** For more on bases, read *Strang* pages 167 - 171 and try Problem Set 3.4.
101
+ *Extra: Read Sections on Matrix and Function Space.*
102
+
103
+ In Schaum's, read pages 124-126 and pages 127-129. Try Problems 4.24 to 4.28, 4.97 to 4.103, and 4.33 to 4.40.
104
+
105
+ ## 7.2 Practice Problems
106
+
107
+ These practice problems are also available in an interactive form on the course website.
108
+
109
+ 1. True or False: $\{\begin{bmatrix} -3 \\ 1 \end{bmatrix}, \begin{bmatrix} -1 \\ 0 \end{bmatrix}, \begin{bmatrix} 5 \\ 2 \end{bmatrix}\}$ spans $\mathbb{R}^2$.
110
+
111
+ 2. True or False: $\{\begin{bmatrix} 1 \\ 2 \\ 3 \end{bmatrix}, \begin{bmatrix} 5 \\ -2 \\ 1 \end{bmatrix}, \begin{bmatrix} -3 \\ 6 \\ 5 \end{bmatrix}\}$ is a basis for $\mathbb{R}^3$.
112
+
113
+ 3. The following vectors span $\mathbb{R}^3$:
114
+
115
+ $$ \vec{x}_1 = \begin{bmatrix} 1 \\ 2 \\ 2 \end{bmatrix}, \vec{x}_2 = \begin{bmatrix} 2 \\ 5 \\ 4 \end{bmatrix}, \vec{x}_3 = \begin{bmatrix} 1 \\ 3 \\ 2 \end{bmatrix}, \vec{x}_4 = \begin{bmatrix} 2 \\ 7 \\ 4 \end{bmatrix}, \vec{x}_5 = \begin{bmatrix} 1 \\ 1 \\ 0 \end{bmatrix} $$
116
+
117
+ Which vectors of this set form a basis for $\mathbb{R}^3$?
118
+
119
+ (a) $\vec{x}_1, \vec{x}_2, \vec{x}_3, \vec{x}_4, \vec{x}_5$
120
+
121
+ (b) $\vec{x}_1, \vec{x}_3, \vec{x}_5$
122
+
123
+ (c) $\vec{x}_1, \vec{x}_2, \vec{x}_4$
124
+
125
+ (d) $\vec{x}_1, \vec{x}_3, \vec{x}_4, \vec{x}_5$
samples/texts_merged/3495399.md ADDED
@@ -0,0 +1,382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # The Paramagnetic Ground State of Ruby—Revisited
5
+
6
+ J. Shell¹
7
+
8
+ A more accurate formula for the ruby spin Hamiltonian (than used in earlier JPL programs) is presented for calculating the ground-state paramagnetic spectrum of ruby and transition probability matrix elements between quantum states induced by radio-frequency magnetic fields. A coordinate system is chosen that simplifies the expressions for the radio-frequency magnetic field. Applications of the computer program to several past and current Deep Space Network maser designs are presented. The program is included in an appendix along with a sample output.
9
+
10
+ ## I. Introduction
11
+
12
+ The low-noise maser amplifiers in the Deep Space Network (DSN) use ruby as the active material. The quantum states of the paramagnetic chromium ion in the ruby crystal are used in the amplification process. An external static magnetic field, $\vec{H}_{dc}$, is applied to the ruby to generate the quantum states. The nature of these states depends on the strength and orientation of this field relative to the ruby crystal c-axis. Transitions between these quantum states are induced by radio frequency (rf) magnetic fields. These transitions are used in two distinct ways. In the first instance, microwave energy from a pump source is used to alter the distribution of spins amongst the energy levels. This creates the population inversion necessary for the ruby to amplify an incoming signal. In the second instance, the process of stimulated emission amplifies the transitions resulting from an incoming “signal.” This incoming signal may be from a distant spacecraft, for example.
13
+
14
+ A good model and understanding of the ruby's paramagnetic behavior are necessary for maser design. In particular, the low-lying energy levels, which are used in cryogenic low-noise amplifiers, are of interest. The ability to calculate the transitions between levels induced by an rf field is also necessary for good maser design. This article contains a computer program that models these effects. The program can be used to select static magnetic-field strengths and orientations and microwave magnetic-field orientations and polarization. This program can aid in the understanding of current and past DSN ruby masers.
15
+
16
+ In 1970, a Fortran program was written to calculate these same quantities using a different coordinate system and different numerical values for the parameters used to describe the ruby [1]. This program was used to generate many sets of tables for maser design. Some tables exist today, but the program is no longer readily available. In 1978, the National Bureau of Standards (NBS) published a report describing the use of ruby as a standard reference material in electron paramagnetic resonance experiments [2]. It published precise values of the spectroscopic splitting factors and the zero-field splitting for ruby.
17
+
18
+ ¹ Communications Ground Systems Section.
19
+
20
+ The research described in this publication was carried out by the Jet Propulsion Laboratory, California Institute of Technology, under a contract with the National Aeronautics and Space Administration.
21
+ ---PAGE_BREAK---
22
+
23
+ The program described in this article uses these more recent values. The program also uses a different coordinate system that simplifies the task of calculating transition probabilities due to an rf field. Rather than aligning the ruby crystal c-axis in the z-direction, the applied static magnetic field is chosen along the z-direction [3]. In addition, the advent of new commercial software specifically designed to work with matrices allows for a much simpler program [4]. The program listing and a sample output are included in Appendix A.
24
+
25
+ ## II. Spin Hamiltonian for Ruby
26
+
27
+ A very concise description of the low-lying states, often referred to as the ground state, is made possible through the concept of an effective spin Hamiltonian. This approach includes such effects as the Zeeman splitting of the states due to applied magnetic fields, including anisotropy of this splitting. It also describes the splitting of energy levels due to the electrostatic field of surrounding atoms. In the case of ruby, this appears as a quadrupole interaction. Excellent discussions of this concept can be found in several books [5,6].
28
+
29
+ The presence of the crystal field makes the form of the Hamiltonian dependent on the orientation of the coordinate system. For example, if the ruby crystal c-axis is chosen along the z-direction, then the spin Hamiltonian, $H_s$, is given by
30
+
31
+ $$H_s = g_1\beta H_z S_z + g_2\beta(H_x S_x + H_y S_y) + D \left[ S_z^2 - \frac{1}{3}S(S+1) \right] \quad (1)$$
32
+
33
+ Here, $g_1$ and $g_2$ are spectroscopic splitting factors, $\beta$ is the Bohr magneton, and $\vec{H}_{dc} = (H_x, H_y, H_z)$ is the applied static magnetic field. The spin vector is denoted by $\vec{S}' = (S_x, S_y, S_z)$. Here, $S_x, S_y, S_z$ are spin matrices, given below. The variable $D$ represents one half of the zero-field splitting between the $S_z = \pm 1/2$ spin states and the $S_z = \pm 3/2$ spin states. The quantity $S(S+1)$ is the eigenvalue of the operator $S^2 = S_x^2 + S_y^2 + S_z^2$. Equation (1) is very similar to the expression used in [1]. The coordinate system appropriate to this form is shown in Fig. 1(a).
34
+
35
+ Personnel at Bell Telephone Laboratories used a Hamiltonian wherein the z-axis is along the applied static magnetic field [3]. The ruby crystal c-axis is specified by the polar angle, $\theta$, with respect to the dc magnetic field and an azimuthal angle, $\varphi$, with respect to the x-axis. Their result is
36
+
37
+ Fig. 1. The coordinate system used in (a) Eq. (1) and (b) Eq. (2).
38
+ ---PAGE_BREAK---
39
+
40
+ $$
41
+ \begin{align}
42
+ H_s = {}& (g_1 \cos^2 \theta + g_2 \sin^2 \theta) \beta H_z S_z \nonumber \\
43
+ & + D \left( \cos^2 \theta - \frac{1}{2} \sin^2 \theta \right) \left[ S_z^2 - \frac{1}{3} S(S+1) \right] \nonumber \\
44
+ & + D \left( \frac{1}{2} \right) \left( \cos \theta \sin \theta \right) \left[ e^{-j\varphi} (S_z S_+ + S_+ S_z) + e^{j\varphi} (S_z S_- + S_- S_z) \right] \nonumber \\
45
+ & + D \left( \frac{1}{4} \right) \sin^2 \theta \left( e^{-2j\varphi} S_+^2 + e^{2j\varphi} S_-^2 \right) \tag{2}
46
+ \end{align}
47
+ $$
48
+
49
+ Here, $S_+ = S_x + jS_y$, $S_- = S_x - jS_y$, and $j = \sqrt{-1}$. We use the values for the spectroscopic splitting factors $g_1 = 1.9817$ and $g_2 = 1.9819$, and the zero-field splitting $D = -3.8076 \times 10^{-17}$ ergs, published by the National Bureau of Standards. This is the form that will be used for the results presented in this article.
50
+
51
+ The coordinate system appropriate to the Hamiltonian of Eq. (2) is shown in Fig. 1(b). From the point of view of the crystal, it's a more natural choice to choose the z-axis along the c-axis direction. From the point of view of the rf magnetic fields, it makes more sense to let the direction of the c-axis be unrestricted. The result is a more complex expression for the spin Hamiltonian. However, since a digital computer performs the calculation, the additional complexity is not a concern. Equation (2) can be shown to be almost exactly equal to Eq. (1). We have neglected terms involving the difference between $g_1$ and $g_2$ because they are nearly equal. Demonstration of the equivalence is discussed in Appendix B.
52
+
53
+ The values predicted by this program are different from the values published by Berwin [1] or Siegman [6]. This is due to the slightly different values of the spectroscopic splitting factor and zero-field splitting used by the two programs. For example, with a 2600-gauss magnetic field oriented 90 degrees to the ruby c-axis, Berwin calculates the 1–2 transition frequency to be 2.6083 GHz. The current program predicts 2.5677 GHz, a difference of 40.6 MHz, or about 1.5 percent.
54
+
55
+ In addition to choosing a coordinate system, we must choose a representation for the spin operators. This means choosing a set of base states in terms of which the spin quantum states can be expressed. The usual choice for a spin system is the set of states that are simultaneous eigenstates of the total angular momentum squared and the projection of the angular momentum along some axis, usually the z-axis. In this representation, the matrices representing $S^2$ and $S_z$ are diagonal. We also adopt this convention. For a spin $S = 3/2$ system, such as the Cr$^{+3}$ ion in ruby, $S^2$ and $S_z$ are given by $(2S+1)$-by-$(2S+1)$ matrices. In particular,
56
+
57
+ $$
58
+ \begin{align}
59
+ S^2 &= \frac{15}{4} \cdot \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} \notag \\
60
+ S_z &= \frac{1}{2} \cdot \begin{bmatrix} 3 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & -1 & 0 \\ 0 & 0 & 0 & -3 \end{bmatrix} \tag{3a}
61
+ \end{align}
62
+ $$
63
+
64
+ In this representation, the matrices representing the spin operators $S_x$ and $S_y$ are given by
65
+ ---PAGE_BREAK---
66
+
67
+ $$ S_x = \frac{1}{2} \cdot \begin{bmatrix} 0 & \sqrt{3} & 0 & 0 \\ \sqrt{3} & 0 & 2 & 0 \\ 0 & 2 & 0 & \sqrt{3} \\ 0 & 0 & \sqrt{3} & 0 \end{bmatrix} \qquad (3b) $$
68
+
69
+ $$ S_y = \frac{1}{2} \cdot \begin{bmatrix} 0 & -\sqrt{3}j & 0 & 0 \\ \sqrt{3}j & 0 & -2j & 0 \\ 0 & 2j & 0 & -\sqrt{3}j \\ 0 & 0 & \sqrt{3}j & 0 \end{bmatrix} $$
70
+
71
+ From Eqs. (1) or (2) and (3), it can be seen that the spin Hamiltonian is a 4-by-4 matrix. The eigenvalues of the matrix are the energies of the discrete quantum states available to the spins. The difference in energies divided by Planck's constant determines the resonant transition frequencies. The eigenvector associated with an eigenvalue is a representation of the quantum state having that energy. The transition frequencies are calculated and displayed by the program. The eigenvectors are used to calculate the spin vectors discussed in the next section. The eigenvectors are not normally displayed, although it is a simple matter to do so.
72
+
73
+ ### III. Transition Probability Matrix Elements and Spin Vectors
74
+
75
+ The ability of the rf magnetic field to induce transitions between the quantum states of ruby is fundamental to maser design. If the rf field is the signal from a spacecraft, this ability is related to the gain of the maser. If the rf field is from a microwave pump source, this ability is related to the amount of pump energy needed to saturate the transition. A measure of the ability of a given rf field to induce a transition is given by a matrix element.
76
+
77
+ The transition probability between quantum states $i$ and $j$ induced by an rf magnetic field is
78
+
79
+ $$ W_{i \to j} = \frac{1}{4} \gamma^2 g(f) |\langle j | \vec{H}_{rf}^* \cdot \vec{S} | i \rangle|^2 \quad (4) $$
80
+
81
+ where $\gamma = g\beta\mu_o/\hbar$ and $g(f)$ is the line-shape function. The matrix element mentioned above is given by $\langle j | \vec{H}_{rf}^* \cdot \vec{S} | i \rangle$. The quantum states, $\langle j | , | i \rangle$, are represented by the eigenvectors of the spin Hamiltonian. The spin vector is shorthand for $\vec{S} = (S_x, S_y, S_z)$, where the spin matrices are given above.
82
+
83
+ As seen in Eq. (4), the operator describing the interaction between the spin and the rf magnetic field has much the same form as the operator describing a spin in a static magnetic field. It takes the form of a dot product between the conjugate of the rf magnetic field vector and the spin vector. The magnetic field vector can be pulled outside the brackets, leading to the expression
84
+
85
+ $$
86
+ \begin{aligned}
87
+ \vec{H}_{rf}^* \cdot (\langle j | \vec{S} | i \rangle) &= \vec{H}_{rf}^* \cdot \{\langle j | S_x | i \rangle \hat{x} + \langle j | S_y | i \rangle \hat{y} + \langle j | S_z | i \rangle \hat{z}\} \\
88
+ &= H_x^* S_x^{ij} + H_y^* S_y^{ij} + H_z^* S_z^{ij} = \vec{H}_{rf}^* \cdot \vec{S}^{ij}
89
+ \end{aligned}
90
+ $$
91
+
92
+ In general, $H_x^*, S_x^{ij}, H_y^*, S_y^{ij}, H_z^*, S_z^{ij}$ are complex numbers. Thus, the transition probability between two states depends on the magnitude, orientation, and polarization of the rf magnetic field. The spin vectors, $\vec{S}^{ij} = \langle j | \vec{S} | i \rangle$, as well as the quantities $T_{ij} = |\vec{H}_{rf}^* \cdot \vec{S}^{ij}|^2$, for a user-specified rf field, are calculated by the program.
93
+ ---PAGE_BREAK---
94
+
95
+ # IV. Program Description and Examples Using the Program
96
+
97
+ The program is written in the high-level language MATLAB. This is commercial software specifically designed to handle matrices. MATLAB has intrinsic eigenvalue and eigenvector routines. This greatly reduces the program length. After the Hamiltonian is entered into the program, the eigenvalues and eigenvectors are calculated by executing one statement. The eigenvectors are ordered with the one corresponding to the lowest energy, $e_1$, labeled $v_1$, and the next one labeled $v_2$, and so on. The eigenvectors calculated by MATLAB are also orthogonal and normalized. For a general choice of the azimuth angle, $\varphi$, the eigenvectors are complex. If the c-axis is chosen in the x–z plane, that is, $\varphi = 0$ or 180 degrees, the eigenvectors are real.
98
+
99
+ The program input consists of the static magnetic-field strength, the angles $\theta$ and $\varphi$ specifying the c-axis orientation and the rf magnetic field in phasor form. The program calculates and displays the transition frequencies (in GHz), the associated spin vectors, and the quantity $T_{ij}$ for all the transitions. A sample output follows the program listing.
100
+
101
+ The user can check the transition frequencies for selected field strength and orientation against the NBS tables. The NBS tables include values for $T_{x'}^{\alpha\beta} = |\langle\alpha|S_{x'}|\beta\rangle|^2$ and $T_{y'}^{\alpha\beta} = |\langle\alpha|S_{y'}|\beta\rangle|^2$. These can be compared against the $T_{ij}$ calculated by the program by entering $H_{rf} = (1,0,0)$ and $H_{rf} = (0,1,0)$, respectively, as program input. Note that the levels in the NBS tables are labeled in the opposite order, with level 1 being the highest and level 4 being the lowest.
102
+
103
+ In the following subsections, the program is used to analyze or describe past and current DSN masers.
104
+
105
+ ## A. Example 1: S-Band Coaxial Cavity Masers
106
+
107
+ Our first example of the use of the program will be a comparison of two early 2.36-GHz (S-band) coaxial cavity masers. The first such cavity had the ruby oriented in the coaxial line, as shown in Figs. 2(a) and 2(b).² The static magnetic field was oriented perpendicular to the coaxial line. Its strength was approximately 2500 gauss. The rf magnetic-field lines of constant magnitude are circles surrounding the center conductor in a plane perpendicular to the center conductor, as shown in Fig. 2(c). The ruby c-axis is in a plane perpendicular to the static magnetic field and oriented 30 degrees out of the plane of the rf magnetic field.
108
+
109
+ With the right-hand x–y–z coordinate system in Fig. 2(a), we set $\varphi = 60$ degrees and $\theta = 90$ degrees. The rf-field lines of constant magnitude form circles in the y–z plane, and the polarization is linear. The
110
+
111
+ Fig. 2. The first S-band coaxial cavity: (a) a perspective drawing showing the direction of the static magnetic field and the crystal c-axis, (b) a side view, and (c) a top view (a typical rf magnetic field line is also shown).
112
+
113
+ ² R. C. Clauss, personal communication, Jet Propulsion Laboratory, Pasadena, California, February 2002.
114
+ ---PAGE_BREAK---
115
+
116
+ interaction of the ruby with the linear rf field depends on the angle $\psi$, shown in Fig. 2(c). We can generate a table of transition probabilities as a function of $\psi$ by changing the relative magnitude of the y- and z-components of the rf magnetic field. Because of the symmetry, we need only cover 1/4 of the circumference of the circle. We choose 10-degree increments.
117
+
118
+ A word about our notation is in order. We will represent the rf magnetic field in the form $H_{rf} = H_1(a, b, c)$, where $a, b, c$ can be complex and satisfy $|a|^2 + |b|^2 + |c|^2 = 1$. In its most general form, $H_1$ would be $H_1 = he^{j\alpha}$. The actual rf field is given by multiplying $H_{rf}$ by $e^{j\omega t}$ and taking the real part. In our examples, $H_1$ will be chosen equal to one. For example, a right-hand circular polarized wave in the x-y plane would be written as $H_{rf} = (1, -j, 0)$. If the wave is viewed as propagating toward the observer, then if the fingers of the right hand curl in the direction of vector rotation, the thumb will point toward the observer. The linear rf field phasors are listed in Table 1 along with the associated value of $T_{12}$. For the 1-2 transition, the average value of $T_{12}$ per unit rf field strength is $T_{12}/H_1 = 0.623$.
119
+
120
+ To accurately estimate the ruby absorption, we would have to account for the stronger field near the shorted end of the ruby cavity, as well as the variation of the field strength from the center conductor to the outer conductor. Since the second maser geometry in this comparison is the same as the first, we will neglect these effects. The second maser geometry is shown in Figs. 3(a) and 3(b) [7]. Now the static magnetic field is along the center conductor of the coaxial line, and the ruby c-axis is in the plane perpendicular to it. It is also the plane of the rf magnetic field, as seen in Fig. 3(c). For this orientation, we set $\theta = 90$ degrees and $\phi = 0$ degrees. Again we vary $H_{rf}$, at 10-degree increments, around 1/4 of the circumference of the circle in the x-y plane. The transition probabilities are shown in Table 2. For the 1-2 transition, the average value of $T_{12}$ per unit rf field strength is $T_{12}/H_1 = 0.892$. Therefore, the second maser geometry should be significantly better, with a transition probability for the signal transition about 43 percent greater than the first geometry.
121
+
122
+ ## B. Example 2: X-band Coupled-Cavity Maser
123
+
124
+ The next example concerns the behavior of ruby as it might appear in a DSN 8.42-GHz (X-band) coupled-cavity maser. This is shown schematically in Fig. 4. The ruby crystal is shown in a cavity with a signal broadbanding cavity on the left and a pump broadbanding cavity on the right. To the left of the signal broadbanding cavity is a stepped-height pump reject filter. An applied static magnetic
125
+
126
+ Table 1. First S-band coaxial cavity.
127
+
128
+ <table><thead><tr><th>H<sub>rf</sub></th><th>T<sub>12</sub></th></tr></thead><tbody><tr><td>(0, 1, 0)</td><td>1.2451</td></tr><tr><td>(0, 0.985, 0.174)</td><td>1.2081</td></tr><tr><td>(0, 0.949, 0.342)</td><td>1.1002</td></tr><tr><td>(0, 0.866, 0.500)</td><td>0.9338</td></tr><tr><td>(0, 0.766, 0.643)</td><td>0.7306</td></tr><tr><td>(0, 0.643, 0.766)</td><td>0.5148</td></tr><tr><td>(0, 0.500, 0.866)</td><td>0.3113</td></tr><tr><td>(0, 0.342, 0.940)</td><td>0.1456</td></tr><tr><td>(0, 0.174, 0.985)</td><td>0.0377</td></tr><tr><td>(0, 0, 1)</td><td>0.0</td></tr><tr><td>&mdash;</td><td>0.623<br>(average)</td></tr></tbody></table>
129
+ ---PAGE_BREAK---
130
+
131
+ Fig. 3. The second S-band coaxial cavity: (a) a perspective drawing showing the direction of the static magnetic field and the crystal c-axis, (b) a side view, and (c) a top view (a typical rf magnetic field line is also shown).
132
+
133
+ Table 2. Second S-band coaxial cavity.
134
+
135
+ <table><thead><tr><th>H<sub>rf</sub></th><th>T<sub>12</sub></th></tr></thead><tbody><tr><td>(1, 0, 0)</td><td>1.5985</td></tr><tr><td>(0.985, 0.174, 0)</td><td>1.5565</td></tr><tr><td>(0.949, 0.342, 0)</td><td>1.4341</td></tr><tr><td>(0.866, 0.500, 0)</td><td>1.2451</td></tr><tr><td>(0.766, 0.643, 0)</td><td>1.0144</td></tr><tr><td>(0.643, 0.766, 0)</td><td>0.7695</td></tr><tr><td>(0.500, 0.866, 0)</td><td>0.5384</td></tr><tr><td>(0.342, 0.940, 0)</td><td>0.3505</td></tr><tr><td>(0.174, 0.985, 0)</td><td>0.2280</td></tr><tr><td>(0, 1, 0)</td><td>0.1851</td></tr><tr><td>&mdash;</td><td>0.892<br/>(average)</td></tr></tbody></table>
136
+
137
+ Fig. 4. A perspective view of an X-band coupled-cavity maser. The cavities are drawn for illustrative purposes only; they are not to scale.
138
+ ---PAGE_BREAK---
139
+
140
+ field of 4,981 gauss is oriented 90 degrees to the crystal c-axis. The signal transition is chosen between levels 1 and 2 and occurs at 8.421 GHz. The first pump transition is between levels 1 and 3 and occurs at 24.05 GHz. A second pump transition is between levels 3 and 4 and occurs at 19.21 GHz. The spin vectors for these transitions are very important to the maser design.
141
+
142
+ The spin vector for the signal transition is $\vec{S}_{12} = (-1.0735, 0.65443j, 0)$. Since we have chosen $\varphi = 0$, the c-axis is in the x-direction. Thus, if the rf fields of the signal are linearly polarized, as in the case of the coupled-cavity maser, the interaction with the ruby is stronger if the rf magnetic field is predominantly in the x-direction rather than the y-direction. The value of $T_{12}$ with $H_{rf} = (1, 0, 0)$ is 1.1525. The value of $T_{12}$ with $H_{rf} = (0, 1, 0)$ is 0.4282. Thus, the advantage is 2.69. Therefore, elongating the cavity in the x-direction will increase the coupling with the rf magnetic field. From this we can also see that rf magnetic fields in the z-direction, along the applied static magnetic field, are ineffective in inducing transitions.
143
+
144
+ The spin vector indicates that the optimum rf field polarization is elliptical. If an rf field of unit amplitude is linearly polarized in the x-direction, then $T_{12} = 1.1524$. That is the best you can do with a linearly polarized signal. However, if the rf field has the proper elliptical polarization and is of unit amplitude, then $H_{rf} = (0.854, -0.521j, 0)$ and $T_{12} = 1.582$. There also exists an rf field polarization in this plane that does not induce a response. It is $H_{rf} = (0.521, 0.854j, 0)$.
145
+
146
+ The spin vector for the first pump transition is $\vec{S}_{13} = (0, 0, 0.4140)$. Thus, a linearly polarized field in the z-direction will be required to stimulate this transition. Therefore, the pump waveguide feeding the ruby cavity must support a 24-GHz mode whose electric field is perpendicular to the applied magnetic field. Finally, the spin vector for the second pump is $\vec{S}_{34} = (-0.7229, 1.0051j, 0)$. It is similar to the signal component, except the roles of the x- and y-directions are reversed. The value of $T_{34}$ with $H_{rf} = (1, 0, 0)$ is 0.5225. The value of $T_{34}$ with $H_{rf} = (0, 1, 0)$ is 1.0102. Now the transition probability is almost twice as strong for the linear rf field polarized in the y-direction as compared to the x-direction.
147
+
148
+ ### C. Example 3: Ka-Band Coupled-Cavity Maser
149
+
150
+ Our last example will concern the behavior of ruby as it is used in the current DSN 31.8- to 32.3-GHz (Ka-band) coupled-cavity maser. This is shown schematically in Fig. 5. A static magnetic field of 11,881 gauss is applied along the z-direction, and the ruby c-axis is oriented 54.735 degrees to this direction. The signal transition occurs between levels 2 and 3 at frequencies around 32 GHz. The spin vector for this transition is $\vec{S} = (-0.9777, 0.9786j, -0.0424)$. Therefore, for maximum transition probability, the rf magnetic field should be $H_{rf} = (0.707, -0.707j, 0.031)$. This is a circularly polarized
151
+
152
+ Fig. 5. A perspective view of a Ka-band coupled-cavity maser. The cavities are drawn for illustrative purposes only; they are not to scale.
153
+ ---PAGE_BREAK---
154
+
155
+ signal in the x-y plane. For this reason, the orientation of the c-axis in azimuth is not important. The c-axis can lie anywhere on a cone at 54.735 degrees to the applied field without affecting the signal transition probability.
156
+
157
+ Two pump transitions typically are used for this operating point. The first pump between levels 1 and 3 occurs at 66.25 GHz. The spin vector for this transition is $\vec{S} = (-0.1455, 0.1519j, 0.0990)$. For maximum transition probability, the rf magnetic field should be $\vec{H}_{rf} = (0.6259, -0.6534j, -0.4259)$. This is nearly a circularly polarized signal in the x-y plane, with a significant, but smaller, component in the z-direction. For this reason, this transition normally is pumped with waveguide modes whose electric fields lie along the applied static magnetic field.
158
+
159
+ The second pump between levels 2 and 4 also occurs at 66.25 GHz. The spin vector for this transition is $\vec{S} = (-0.1289, 0.1183j, 0.0990)$. Therefore, for maximum transition probability, the rf magnetic field should be $\vec{H}_{rf} = (0.6399, -0.5873j, -0.4955)$. This is more elliptical than the first pump, but the difference between $T_{24}$ for an x-polarized rf field and a y-polarized rf field is never more than 17 percent as the c-axis is varied in azimuth. Again, the z-component is smaller than either the x- or y-component. The waveguide modes mentioned above are also used for pumping this transition. It is a fortunate situation that pump energy at the same frequency and in the same waveguide mode is effective in pumping both transitions. This is especially helpful at this operating point where the pump transitions are very weak. If $H_{rf} = (0.7071, 0.7071, 0)$, $T_{13}/T_{23} = 0.023$ and $T_{24}/T_{23} = 0.016$. This is the main reason for having the ruby cavity resonant at both the signal and pump frequencies in the coupled-cavity maser design.
160
+
161
+ ## V. Conclusion
162
+
163
+ A program has been written to calculate the ground state spectrum of ruby and the transition probability due to an rf magnetic field. This information is used in the design and analysis of masers using ruby as the active material. It is based on a Hamiltonian where the z-axis is along the static magnetic field and the x- and y-axes are chosen to simplify the expressions for the rf magnetic field. The direction of the c-axis is specified by two polar angles. It is written in the language of MATLAB and is included in Appendix A for reference purposes. A discussion of some DSN masers using the results of the program is presented.
164
+
165
+ ## References
166
+
167
+ [1] R. Berwin, *Paramagnetic Energy Levels of the Ground State of Cr<sup>+3</sup> in Al<sub>2</sub>O<sub>3</sub> (Ruby)*, Technical Memorandum 33-440, Jet Propulsion Laboratory, Pasadena, California, January 15, 1970.
168
+
169
+ [2] T. Chang, D. Foster, and A. H. Kahn, “An Intensity Standard for Electron Paramagnetic Resonance Using Chromium-Doped Corundum (Al<sub>2</sub>O<sub>3</sub>:Cr<sup>3+</sup>),” *Journal of Research of the National Bureau of Standards*, vol. 83, no. 2, pp. 133–164, March–April 1978.
170
+
171
+ [3] E. O. Schulz-Du Bois, “Paramagnetic Spectra of Substituted SAPPHIRES—Part I: Ruby,” *Bell System Technical Journal*, vol. 38, p. 271, January 1959.
172
+
173
+ [4] MATLAB, Version 5, The MathWorks, Inc., Natick, Massachusetts, copyright 1984–1998.
174
+ ---PAGE_BREAK---
175
+
176
+ [5] A. Abragam and B. Bleaney, *Electron Paramagnetic Resonance of Transition Ions*, New York: Dover Publications, Inc., 1986.
177
+
178
+ [6] A. E. Siegman, *Microwave Solid State Masers*, New York: McGraw-Hill Book Company, 1964.
179
+
180
+ [7] R. C. Clauss, "A 2388 Mc Two-Cavity Maser for Planetary Radar," *Microwave Journal*, May 1965.
181
+ ---PAGE_BREAK---
182
+
183
+ # Appendix A
184
+
185
+ ## Ruby Energy Level Program and Sample Output
186
+
187
+ The MATLAB program listing follows. Statements following a “%” are comments. (Notice that MATLAB denotes $\sqrt{-1}$ by $i$.)
188
+
189
+ * an m-file called rubylevels.m to calculate the eigenvalues
190
+
191
+ * and eigenvectors of the spin hamiltonian for ruby
192
+
193
+ * it calculates the spin vector and the transition frequencies (in GHz)
194
+
195
+ * and also the transition probabilities for a given r-f magnetic field
196
+
197
+ * Hdc is along the z-axis and the c-axis direction is unrestricted
198
+
199
+ g1=1.9817; % use the values for g1, g2 and D
200
+ g2=1.9819; % suggested by the National Bureau
201
+ D=-3.8076e-17; % of Standards
202
+ beta=9.273e-21;
203
+
204
+ h=4981 % enter the magnetic field strength
205
+ thetad=90.0 % enter the polar angle
206
+ phid=0.0 % enter the azimuthal angle
207
+ Hrf=[0.854; -0.521i; 0.0] % enter the r-f field polarization
208
+
209
+ theta=pi*(thetad/180.0); % convert polar angle to radians
210
+ phi=pi*(phid/180.0); % convert azimuthal angle to radians
211
+
212
+ % construct the spin hamiltonian
213
+ Sx=(0.5)*[0 1.732 0 0;1.732 0 2 0;0 2 0 1.732;0 0 1.732 0];
214
+ Sy=(0.5)*[0 -1.732i 0 0;1.732i 0 -2i 0;0 2i 0 -1.732i;0 0 1.732i 0];
215
+ Sz=(0.5)*[3 0 0 0;0 1 0 0;0 0 -1 0;0 0 0 -3];
216
+
217
+ Sp=Sx+i*Sy; Sm=Sx-i*Sy;
218
+ sh1=(g1*(cos theta))^2+g2*(sin theta))^2*beta*h*Sz;
219
+ sh2=D*((cos theta))^2-(0.5)*(sin theta))^2*(Sz^2-1.25*eye(4));
220
+ sh3=D*(sin theta)*(cos theta)*(0.5)*exp(-i*phi)*(Sz*Sp+Sp*Sz);
221
+ sh4=D*(sin theta)*(cos theta)*(0.5)*exp(i*phi)*(Sz*Sm+Sm*Sz);
222
+ sh5=D*(0.25)*(sin theta))^2*(exp(-2*i*phi)*Sp^2+exp(2*i*phi)*Sm^2);
223
+ sh sh1+sh2+sh3+sh4+sh5;
224
+
225
+ % calculate the eigenvectors and eigenvalues
226
+ [evec,eval]=eig(sh);
227
+
228
+ e1=eval(1,1); e2=eval(2,2); e3=eval(3,3); e4=eval(4,4);
229
+
230
+ % the eigenvector associated with the first eigenvalue is the first
231
+ % column of the matrix evect, the 2nd eigenvector is the 2nd column, etc
232
+
233
+ v1=evect(:,1); v2=evect(:,2); v3=evect(:,3); v4=evect(:,4);
234
+
235
+ % order the eigenvalues such that the most negative one is labeled e1
236
+ % and the most positive one is labeled e4, carry the eigenvectors
237
+ % along with the eigenvalues
238
+
239
+ if e1>e2
240
+ et=e1; vt=v1;
241
+ e1=e2; v1=v2;
242
+ e2=et; v2=vt;
243
+ end
244
+ ---PAGE_BREAK---
245
+
246
+ ```pascal
247
+ if e1>e3
248
+ et=e1; vt=v1;
249
+ e1=e3; v1=v3;
250
+ e3=et; v3=vt;
251
+ end
252
+
253
+ if e1>e4
254
+ et=e1; vt=v1;
255
+ e1=e4; v1=v4;
256
+ e4=et; v4=vt;
257
+ end
258
+
259
+ if e2>e3
260
+ et=e2; vt=v2;
261
+ e2=e3; v2=v3;
262
+ e3=et; v3=vt;
263
+ end
264
+
265
+ if e2>e4
266
+ et=e2; vt=v2;
267
+ e2=e4; v2=v4;
268
+ e4=et; v4=vt;
269
+ end
270
+
271
+ if e3>e4
272
+ et=e3; vt=v3;
273
+ e3=e4; v3=v4;
274
+ e4=et; v4=vt;
275
+ end
276
+ ```
277
+
278
+ % calculate and display the transition frequencies
279
+ f12=(e2-e1)/6.626e-18, f13=(e3-e1)/6.626e-18, f14=(e4-e1)/6.626e-18,
280
+ f23=(e3-e2)/6.626e-18, f24=(e4-e2)/6.626e-18, f34=(e4-e3)/6.626e-18,
281
+
282
+ % calculate and display the spin vectors
283
+ S12=[v2'*Sx*v1; v2'*Sy*v1; v2'*Sz*v1]
284
+ S13=[v3'*Sx*v1; v3'*Sy*v1; v3'*Sz*v1]
285
+ S14=[v4'*Sx*v1; v4'*Sy*v1; v4'*Sz*v1]
286
+ S23=[v3'*Sx*v2; v3'*Sy*v2; v3'*Sz*v2]
287
+ S24=[v4'*Sx*v2; v4'*Sy*v2; v4'*Sz*v2]
288
+ S34=[v4'*Sx*v3; v4'*Sy*v3; v4'*Sz*v3]
289
+
290
+ %display the "transition probabilities" for the rf signal
291
+ T12=(Hrf*S12)*(Hrf*S12)', T13=(Hrf*S13)*(Hrf*S13)',
292
+ T14=(Hrf*S14)*(Hrf*S14)', T23=(Hrf*S23)*(Hrf*S23)',
293
+ T24=(Hrf*S24)*(Hrf*S24)', T34=(Hrf*S34)*(Hrf*S34)'
294
+
295
+ The sample output follows. The user specifies the values of h, thetad, phid, and Hrf. The program determines the frequencies, spin vectors, and transition probabilities. The numbers 1,2,3,4 identify the quantum states, with 1 being the lowest energy state and 4 being the highest.
296
+
297
+ h = 4981
298
+ thetad = 90
299
+ phid = 0
300
+ Hrf = 0.8540
301
+ 0 - 0.5210i
302
+ 0
303
+ ---PAGE_BREAK---
304
+
305
+ $$f_{12} = 8.4214$$
306
+
307
+ $$f_{13} = 24.0415$$
308
+
309
+ $$f_{14} = 43.2512$$
310
+
311
+ $$f_{23} = 15.6201$$
312
+
313
+ $$f_{24} = 34.8298$$
314
+
315
+ $$f_{34} = 19.2097$$
316
+
317
+ $$S_{12} = -1.0735 + 0.6544i$$
318
+
319
+ $$0 + 0.6544i$$
320
+
321
+ $$0$$
322
+
323
+ $$S_{13} = 0$$
324
+
325
+ $$0$$
326
+
327
+ $$0.4140$$
328
+
329
+ $$S_{14} = -0.0287 + 0.0899i$$
330
+
331
+ $$0 + 0.0899i$$
332
+
333
+ $$0$$
334
+
335
+ $$S_{23} = -0.9078 + 1.0264i$$
336
+
337
+ $$0 + 1.0264i$$
338
+
339
+ $$0$$
340
+
341
+ $$S_{24} = 0$$
342
+
343
+ $$0$$
344
+
345
+ $$0.2858$$
346
+
347
+ $$S_{34} = -0.7229 + 1.0051i$$
348
+
349
+ $$0 + 1.0051i$$
350
+
351
+ $$0$$
352
+
353
+ $$T_{12} = 1.5819$$
354
+
355
+ $$T_{13} = 0$$
356
+
357
+ $$T_{14} = 0.0051$$
358
+
359
+ $$T_{23} = 1.7160$$
360
+
361
+ $$T_{24} = 0$$
362
+
363
+ $$T_{34} = 1.3018$$
364
+ ---PAGE_BREAK---
365
+
366
+ # Appendix B
367
+
368
+ ## Derivation of the Hamiltonian Used in Equation (2)
369
+
370
+ The reader may be convinced of the equivalence of Eqs. (1) and (2) in the following way. First, Eq. (1) is expressed in spherical coordinates. This gives the result
371
+
372
+ $$H_s = g_1\beta H \cos\theta S_z + g_2\beta H (\sin\theta \cos\varphi S_x + \sin\theta \sin\varphi S_y) - D \left[S_z^2 - \frac{1}{3}S(S+1)\right] \quad (B-1)$$
373
+
374
+ Then the coordinate system is rotated three times. First the coordinate system is rotated about the z-axis by an angle $\varphi$ until the static magnetic field is in the $x'-z'$ plane. Then the coordinate system is rotated by an angle $-\theta$ about the y'-axis until the dc magnetic field is along the $z''$-direction. Finally, the coordinate system is rotated about the $z''$-axis by the angle $(\pi - \varphi)$. The rotation matrix relating the unprimed coordinates and the triple-primed coordinates is the product of the three rotation matrices:
375
+
376
+ $$\begin{bmatrix} x \\ y \\ z \end{bmatrix} = \begin{bmatrix} \cos \varphi & -\sin \varphi & 0 \\ \sin \varphi & \cos \varphi & 0 \\ 0 & 0 & 1 \end{bmatrix} \begin{bmatrix} \cos \theta & 0 & \sin \theta \\ 0 & 1 & 0 \\ -\sin \theta & 0 & \cos \theta \end{bmatrix} \begin{bmatrix} -\cos \varphi & -\sin \varphi & 0 \\ \sin \varphi & -\cos \varphi & 0 \\ 0 & 0 & 1 \end{bmatrix} \begin{bmatrix} x''' \\ y''' \\ z''' \end{bmatrix}$$
377
+
378
+ Now we use the rather remarkable fact that the spin matrices transform just like the components of a vector. Thus, the relationship between the unprimed spin operators and the triple-primed spin operators is the same as the above relationship between the coordinates. Thus, we can write
379
+
380
+ $$\begin{bmatrix} S_x \\ S_y \\ S_z \end{bmatrix} = \begin{bmatrix} -\cos\theta\cos^2\varphi - \sin^2\varphi & -\sin\varphi\cos\varphi\cos\theta + \sin\varphi\cos\varphi & \sin\theta\cos\varphi \\ -\cos\theta\sin\varphi\cos\varphi + \sin\varphi\cos\varphi & -\cos\theta\sin^2\varphi - \cos^2\varphi & \sin\theta\sin\varphi \\ \sin\theta\cos\varphi & \sin\theta\sin\varphi & \cos\theta \end{bmatrix} \begin{bmatrix} S_{x'''} \\ S_{y'''} \\ S_{z'''} \end{bmatrix}$$
381
+
382
+ Expressing the spin operators $S_x, S_y, S_z$ in Eq. (B-1) in terms of $S_{x'''}$, $S_{y'''}$, $S_{z'''}$ leads to Eq. (2), where the triple primes have been dropped. Equation (2) neglects Zeeman terms involving differences between $g_1$ and $g_2$.
samples/texts_merged/3603622.md ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # Microscopic study of low-lying yrast spectra and deformation systematics in neutron-rich 98-106Sr isotopes
5
+
6
+ ANIL CHANDAN, SURAM SINGH, ARUN BHARTI* and S K KHOSA
7
+
8
+ Department of Physics, University of Jammu (J&K), Jammu 180 006, India
9
+
10
+ *Corresponding author. E-mail: arunbharti_2003@yahoo.co.in
11
+
12
+ MS received 15 January 2009; revised 7 May 2009; accepted 23 May 2009
13
+
14
+ **Abstract.** Variation-after-projection (VAP) calculations in conjunction with Hartree-Bogoliubov (HB) ansatz have been carried out for A = 98-106 strontium isotopes. In this framework, the yrast spectra with $J^{\Pi} \ge 10^{+}$, $B(E2)$ transition probabilities, quadrupole deformation parameter and occupation numbers for various shell model orbits have been obtained. The results of the calculation for yrast spectra give an indication that it is important to include the hexadecapole-hexadecapole component of the two-body interaction for obtaining various nuclear structure quantities in Sr isotopes. Besides this, it is also found that the simultaneous polarization of $p_{3/2}$ and $f_{5/2}$ proton subshells is a significant factor in making a sizeable contribution to the deformation in neutron-rich Sr isotopes.
15
+
16
+ **Keywords.** Nuclear structure of 98-106Sr; variation-after-projection (VAP) calculations; calculated levels; $B(E2)$ transition probabilities; quadrupole $\beta_2$ deformation parameter.
17
+
18
+ PACS Nos 21.60.-n; 21.60.Jz; 27.60.+j
19
+
20
+ ## 1. Introduction
21
+
22
+ The existence of a large deformation in the neutron-rich nuclei in the mass region A = 100 was established by Cheifetz et al [1]. Since then considerable effort has gone in understanding the properties of this region. It has been observed that neutron-rich isotopes with N ≥ 60 and A ≈ 100 are characterized by strong axial deformations. Quadrupole deformation of β = 0.4 has been deduced for 98Sr and 100Sr from the lifetimes of the first excited states and from mean square radii measured by collinear laser spectroscopy [2-6]. According to these results, ground state deformation remains constant after its sudden onset at N = 60. This trend even could continue at larger neutron numbers. The recent development in experimental techniques like the decay of on-line mass separated 98Rb to 98Sr by γ-spectroscopy [7] make attractive a compilation of some of the more general features of the structure of doubly even neutron-rich Sr isotopes. The 98Sr nucleus is well deformed.
23
+ ---PAGE_BREAK---
24
+
25
+ Anil Chandan et al
26
+
27
+ The ground state band of $^{98}$Sr, in particular, exhibits excellent rotational properties with a large and rigid moment of inertia. $^{98}$Sr is predicted to have a well-deformed prolate ground state. The levels in $^{100}$Sr were first observed in a β-decay study of $^{100}$Rb by Azuma *et al* [8] who identified the $4^+ \rightarrow 2^+ \rightarrow 0^+$ cascade and performed the first lifetime measurement of the $2^+$ state, thereby, establishing large deformations. Further members of the ground state band upto $I^\pi = 10^+$ were identified in prompt-fission studies [9]. Sometime back, evidence for the $2^+$ level in $^{102}$Sr was observed from the decay study of $^{102}$Rb mass separated at the CERN-ISOLDE Facility [10]. It has been recently predicted [11] that $^{102}$Sr is a strongly deformed nucleus with properties close to the rotational limit. Presently, the $^{102}$Sr nucleus is the most deformed neutron-rich even-even isotope in the Sr region.
28
+
29
+ From the systematics of the $2^+$ states in neutron-rich Sr isotopes, one observes large decrease in $E_2^+$ energy as neutron number N changes from 58 to 60. The onset of deformation in this region for Sr is the most abrupt known for even-even nuclei, as evidenced by the fact that the $2^+$ energy decreases by a factor of 5.7 as N increases from 58 to 60 [12]. Besides this, it is also observed that the energy of the $2^+$ state decreases from 0.144 MeV in $^{98}$Sr to 0.126 MeV in $^{102}$Sr giving an indication that there is an increase in the degree of deformation as one moves from $^{98}$Sr to $^{102}$Sr. The experimental data for $^{104-106}$Sr are not available. From the observed data, it is clear that $^{102}$Sr is the most deformed nucleus in the Sr region.
30
+
31
+ A microscopic explanation for the onset of deformation at $N=60$ has been given by Federman and Pittel [13]. They argued that strong attractive n-p interaction between $(g_{7/2})\nu$ and $(g_{9/2})\pi$ spin-orbit partner (SOP) orbitals are the underlying cause of the unusual characteristics. The realization of large deformation requires that the spin-orbit partner orbitals lie near the Fermi surface, both prior to and after the onset of deformation. There is another school of thought put forth by mean-field theorists [14,15] who have assigned the development of large deformation in $A=100$ region to the occupation of low k-components of $(h_{11/2})\nu$ orbit. They find their mean field calculations indicating the appearance of $k=1/2$ component of $(h_{11/2})\nu$ orbit at the Fermi surface in $^{100-102}$Sr. It was shown by the authors in [16–19] that the phenomenological pairing plus quadrupole-quadrupole (PQ) model of the two-body interaction is highly reliable in this mass region. It was shown by Khosa and Sharma [19] that, two-body effective interactions have a dominantly quadrupole-quadrupole character and the deformation-producing tendency of neutron-proton (n-p) and like-particle interactions depend upon the degeneracy of the underlying single-particle valence space. One of the natural choices for the two-body residual interaction would, therefore, be pairing plus quadrupole-quadrupole (PQ). It turns out from the calculated values of energy spectra obtained with PQ interaction that, the agreement with experiment is not satisfactory. It becomes, therefore, necessary to add a correction term to the PQ interaction in the form of hexadecapole-hexdecapole interaction, which hereafter will be denoted as PQH interaction.
32
+
33
+ The purpose of the present work is to know whether the PQ model of two-body interaction can further be modified to produce better results in agreement with the experiments. We have, thus, examined the available yrast spectra in deformed neutron-rich Sr isotopes with $A=98-106$ in the framework of variation-after-projection (VAP) technique in conjunction with the HB ansatz for the trial wave
34
+ ---PAGE_BREAK---
35
+
36
+ *Neutron-rich* $^{98-106}$Sr isotopes
37
+
38
+ functions resulting from PQH interaction. The deformed Hartree–Fock–Bogoliubov state of the nucleus is generated using a phenomenological PQH interaction with $ ^{56}\text{Ni} $ as the core.
39
+
40
+ The VAP prescription selects an optimum intrinsic state for each yrast level through a minimization of the expectation value of the Hamiltonian with respect to the states characterized by a definite angular momentum. Our VAP calculations performed with PQH model of two-body interaction show a marked improvement in agreement with the experimentally observed yrast spectra as compared to the yrast spectra obtained with the PQ interaction. The results obtained for B(E2) transition probabilities and quadrupole deformation parameter ($\beta_2$) are also found to be in reasonably good agreement with the experiments.
41
+
42
+ ## 2. Calculational details
43
+
44
+ ### 2.1 *The one- and two-body parts of Hamiltonian*
45
+
46
+ In our calculations presented here, we have employed the valence space spanned by $3s_{1/2}$, $2p_{1/2}$, $2p_{3/2}$, $2d_{3/2}$, $2d_{5/2}$, $1f_{5/2}$, $1g_{7/2}$, $1g_{9/2}$ and $1h_{11/2}$ orbits for protons and neutrons under the assumption of $N = Z = 28$ subshell closure. The single-particle energies (SPEs) that we have taken are (in MeV): $(3s_{1/2}) = 9.90$, $(2p_{1/2}) = 1.08$, $(2p_{3/2}) = 0.0$, $(2d_{3/2}) = 11.40$, $(2d_{5/2}) = 8.90$, $(1f_{5/2}) = 0.78$ $(1g_{7/2}) = 11.90$, $(1g_{9/2}) = 3.50$ and $(1h_{11/2}) = 12.90$. The energy values of single-particle orbits for $2p-1f-1g$ levels is the same as employed for $ ^{56}\text{Ni} $ core plus one nucleon. The energies of higher single-particle valence orbits is the same as used by Vergados and Kuo [20] relative to $1g_{9/2}$ valence orbit.
47
+
48
+ The two-body effective interaction that has been employed is of PQH type. The parameters of PQ part of the two-body interaction are also the same as used by Sharma *et al* [16]. The relative magnitudes of the parameters of the hexadecapole-hexadecapole parts of the two-body interaction were calculated from a relation suggested by Bohr and Mottelson [21]. According to them, the approximate magnitude of these coupling constants for isospin $T=0$ is given by
49
+
50
+ $$ \chi_{\lambda} = \frac{4\pi m\omega_0^2}{2\lambda + 1} A \langle r^{2\lambda-2} \rangle \quad \text{for } \lambda = 1, 2, 3, 4 \qquad (1) $$
51
+
52
+ and the parameters for $T=1$ are approximately half the magnitude of their $T=0$ counterparts. This relation was used to calculate the values of $\chi_{pp4}$ relative to $\chi_{pp}$ by generating the wave function for strontium isotopes and then calculating the values of $\langle r^{2\lambda-2} \rangle$ for $\lambda=2$ and $4$.
53
+
54
+ The values for hexadecapole-hexadecapole part of the two-body interaction turn out to be
55
+
56
+ $$ \chi_{pp4}(\chi_{nn4}) = -0.00033 \text{ MeV b}^{-8} \quad \text{and} \quad \chi_{pn4} = -0.00066 \text{ MeV b}^{-8}. $$
57
+ ---PAGE_BREAK---
58
+
59
+ Anil Chandan et al
60
+
61
+ ## 2.2 Projection of states of good angular momentum from axially-symmetric HB intrinsic states
62
+
63
+ The procedure for obtaining the axially symmetric HB intrinsic states has been discussed in ref. [22].
64
+
65
+ ## 2.3 The variation-after-angular-momentum projection (VAP) method
66
+
67
+ The VAP calculations have been carried out as follows: We first generated the self-consistent HB solutions, $\Phi(\beta)$, by carrying out the HB calculations with the Hamiltonian $(H - \beta Q_0^2)$, where $\beta$ is a variational parameter. The selection of the optimum intrinsic states, $\Phi_{\text{opt}}(\beta_J)$, is then made by finding out the minimum of the projected energy
68
+
69
+ $$E_J(\beta) = \langle \Phi(\beta) | H P_{00}^J | \Phi(\beta) \rangle / \langle \Phi(\beta) | P_{00}^J | \Phi(\beta) \rangle \quad (3)$$
70
+
71
+ as a function of $\beta$. In other words, the optimum intrinsic state for each yrast $J$ satisfies the condition
72
+
73
+ $$\partial/\partial\beta[\langle\Phi(\beta)|HP_{00}^J|\Phi(\beta)\rangle/\langle\Phi(\beta)|P_{00}^J|\Phi(\beta)\rangle]|_{\beta=\beta_J} = 0. \quad (4)$$
74
+
75
+ # 3. Deformation systematics of Sr isotopes
76
+
77
+ From the systematics of 2+ states in $^{98-102}$Sr, it is observed that the energy of 2+ states decreases from 0.144 MeV in $^{98}$Sr to 0.129 MeV in $^{100}$Sr giving an indication that there is an increase in the degree of deformation as we move from $^{98}$Sr to $^{100}$Sr. This fact is also confirmed by the increase in the ratio $E_4^+/E_2^+$. The value of this ratio for $^{98}$Sr is 3.00 whereas its value for $^{100}$Sr is 3.23. For a rotational nucleus, the value of this ratio is 3.33. Besides this, it is observed that the 2+ state does not change much as we move from $^{100}$Sr to $^{102}$Sr and it changes only marginally by a factor of 0.003 MeV. This is indicative of the fact that if asymptotic deformation has taken place in $^{102}$Sr, there is very little chance of increasing deformation thereafter. This fact is also indicated by a small change in the value of the ratio $E_4^+/E_2^+$ from $^{100}$Sr to $^{102}$Sr. The value of this ratio changes from 3.23 for $^{100}$Sr to the recently predicted value of 3.31 for $^{102}$Sr [11]. Phenomenologically, it is well known that a nucleus having a smaller value of 2+ energy should have a larger deformation. Since $Q_2^+$ of a nucleus is directly related to its intrinsic quadrupole moment, one should, therefore, expect that a smaller energy value for 2+ state should manifest itself in terms of a larger value for the ratio of intrinsic quadrupole moment to the maximum possible intrinsic quadrupole moment for that nucleus in the SU(3) limit ($(Q_0^2)_{\text{HB}}/\langle Q_0^2 \rangle_{\text{max}}$) denoted hereafter as RQ and vice versa. (The SU(3) limit of the quadrupole moment for a particular nucleus in the HB framework is calculated by putting all the SPEs of the valence orbits equal to zero and thereby allowing the Nilsson orbits to fill up in the increasing order of quadrupole moment.) In other words, the observed systematics of $E_2^+$ with A should produce a corresponding
78
+ ---PAGE_BREAK---
79
+
80
+ Neutron-rich $^{98-106}$Sr isotopes
81
+
82
+ **Table 1.** The experimental values of excitation energy of the $E_2^+$ state in MeV, proton ($\langle Q_0^2 \rangle_\pi$) and neutron ($\langle Q_0^2 \rangle_\nu$) intrinsic quadrupole moments, ratio (RQ) of intrinsic quadrupole moment ($\langle Q_0^2 \rangle_{HB}$) to the maximum possible value ($\langle Q_0^2 \rangle_{max}$) and the ratio $E_4^+/E_2^+$ for $^{98-106}$Sr isotopes obtained with PQH interaction. The quadrupole moments have been computed in units of b$^2$, where $b = \sqrt{\hbar/m\omega}$ is the oscillator parameter.
83
+
84
+ <table><thead><tr><th>Sr<br>nuclei<br>(A)</th><th>E<sub>2</sub><sup>+</sup><br>(exp.)*</th><th>&lang;Q<sub>0</sub><sup>2</sup>&rang;<sub>&pi;</sub></th><th>&lang;Q<sub>0</sub><sup>2</sup>&rang;<sub>&nu;</sub></th><th>&lang;Q<sub>0</sub><sup>2</sup>&rang;<sub>HB</sub></th><th>&lang;Q<sub>0</sub><sup>2</sup>&rang;<sub>max</sub></th><th>RQ</th><th>E<sub>4</sub><sup>+</sup>/E<sub>2</sub><sup>+</sup><br>(exp.)</th></tr></thead><tbody><tr><td>98</td><td>0.144</td><td>35.18</td><td>36.44</td><td>71.62</td><td>118.10</td><td>0.60</td><td>3.00*</td></tr><tr><td>100</td><td>0.129</td><td>35.55</td><td>38.17</td><td>73.72</td><td>115.17</td><td>0.64</td><td>3.23*</td></tr><tr><td>102</td><td>0.126</td><td>35.47</td><td>38.83</td><td>74.30</td><td>110.04</td><td>0.67</td><td>3.31**</td></tr><tr><td>104</td><td>-</td><td>35.19</td><td>39.26</td><td>74.45</td><td>104.19</td><td>0.71</td><td>-</td></tr><tr><td>106</td><td>-</td><td>34.85</td><td>39.86</td><td>74.71</td><td>98.17</td><td>0.76</td><td>-</td></tr></tbody></table>
85
+
86
+ *Data taken from refs [7–10,31,32].
87
+
88
+ **Data taken from ref. [11].
89
+
90
+ inverse systematics of this ratio of quadrupole moments for the $^{98-106}$Sr with increasing A. Based on the above logic, the calculated values of this ratio of intrinsic quadrupole moment should therefore exhibit an increase in its value as we move from $^{98}$Sr to $^{102}$Sr and thereafter, it should show very small increase which could be indicative of the asymptotic onset of deformation in heavy Sr isotopes. In table 1, the results of HB calculations are presented. Note that the ratio RQ increases from 0.60 to 0.76 as we move from $^{98}$Sr to $^{106}$Sr.
91
+
92
+ We next focus our attention on the factors that could be responsible for the deformation of neutron-rich Sr isotopes. In this regard, it is important to discuss and highlight some of the well-accepted factors responsible for bringing sizeable collectivity in nuclei in the same mass region. It is generally felt that the neutron-proton (np) effective interactions possess a deformation producing tendency and the neutron-neutron (nn) or proton-proton (pp) effective interactions are mostly of spherifying nature [23–28]. These ideas have played a pivotal role in the development of the stretch scheme [26] of Danos and Gillet, the rotor model [27] of Arima and Gillet and the interacting boson model of Arima et al [28]. In this regard, the role of np interaction in spin-orbit partner (SOP) orbits in the context of general development of collective features was also suggested by Federman and Pittel [23] and Casten et al [29]. Their calculations provided evidence suggesting that np interaction between the valence nucleons in the SOP orbits – the orbits $(g_{9/2})_\pi$ and $(g_{7/2})_\nu$ – may be instrumental vis-à-vis the observed onset of deformation in Sr isotopes with $A \ge 100$. It may also be pointed out that the role of np interaction between the SOP orbits in producing deformation depends critically on the relative occupation probability of $(g_{9/2})_\pi$ and $(g_{7/2})_\nu$ orbits [30].
93
+
94
+ As is evident from the results presented in table 1, the deformation appearing in heavy Sr isotopes is 60% in $^{98}$Sr and 76% in $^{106}$Sr of the maximum possible deformation in these isotopes. This is indicated by the fact that RQ values change from 0.60 to 0.76 as we move from $^{98}$Sr to $^{106}$Sr. In order to understand how
95
+ ---PAGE_BREAK---
96
+
97
+ Anil Chandan et al
98
+
99
+ **Table 2.** The subshell occupation numbers (protons) in the $^{98-106}$Sr nuclei with PQH interaction.
100
+
101
+ <table><thead><tr><th rowspan="2">Sr<br>nuclei<br>(A)</th><th colspan="9">Subshell occupation number</th></tr><tr><th>3s<sub>1/2</sub></th><th>2p<sub>1/2</sub></th><th>2p<sub>3/2</sub></th><th>2d<sub>3/2</sub></th><th>2d<sub>5/2</sub></th><th>1f<sub>5/2</sub></th><th>1g<sub>7/2</sub></th><th>1g<sub>9/2</sub></th><th>1h<sub>11/2</sub></th></tr></thead><tbody><tr><td>98</td><td>0.10</td><td>0.55</td><td>2.26</td><td>0.06</td><td>0.68</td><td>3.17</td><td>0.04</td><td>3.09</td><td>0.00</td></tr><tr><td>100</td><td>0.11</td><td>0.58</td><td>2.24</td><td>0.06</td><td>0.75</td><td>3.18</td><td>0.04</td><td>3.03</td><td>0.00</td></tr><tr><td>102</td><td>0.10</td><td>0.59</td><td>2.22</td><td>0.05</td><td>0.76</td><td>3.17</td><td>0.03</td><td>3.03</td><td>0.00</td></tr><tr><td>104</td><td>0.08</td><td>0.61</td><td>2.22</td><td>0.03</td><td>0.77</td><td>3.17</td><td>0.03</td><td>3.07</td><td>0.00</td></tr><tr><td>106</td><td>0.06</td><td>0.63</td><td>2.21</td><td>0.02</td><td>0.76</td><td>3.16</td><td>0.02</td><td>3.12</td><td>0.00</td></tr></tbody></table>
102
+
103
+ **Table 3.** The subshell occupation numbers (neutrons) in the $^{98-106}$Sr nuclei with PQH interaction.
104
+
105
+ <table><thead><tr><th rowspan="2">Sr<br>nuclei<br>(A)</th><th colspan="9">Subshell occupation number</th></tr><tr><th>3s<sub>1/2</sub></th><th>2p<sub>1/2</sub></th><th>2p<sub>3/2</sub></th><th>2d<sub>3/2</sub></th><th>2d<sub>5/2</sub></th><th>1f<sub>5/2</sub></th><th>1g<sub>7/2</sub></th><th>1g<sub>9/2</sub></th><th>1h<sub>11/2</sub></th></tr></thead><tbody><tr><td>98</td><td>0.73</td><td>1.99</td><td>3.98</td><td>1.32</td><td>3.04</td><td>5.97</td><td>2.21</td><td>9.80</td><td>2.92</td></tr><tr><td>100</td><td>0.82</td><td>1.99</td><td>3.98</td><td>1.43</td><td>3.05</td><td>5.97</td><td>2.76</td><td>9.81</td><td>3.70</td></tr><tr><td>102</td><td>0.94</td><td>1.99</td><td>3.98</td><td>1.55</td><td>4.01</td><td>5.97</td><td>3.27</td><td>9.84</td><td>4.40</td></tr><tr><td>104</td><td>1.06</td><td>1.99</td><td>3.98</td><td>1.68</td><td>4.49</td><td>5.99</td><td>3.78</td><td>9.88</td><td>5.14</td></tr><tr><td>106</td><td>1.14</td><td>1.99</td><td>3.99</td><td>1.79</td><td>4.84</td><td>5.99</td><td>4.24</td><td>9.92</td><td>6.10</td></tr></tbody></table>
106
+
107
+ this deformation arises, we present in tables 2 and 3, the results of occupation probabilities of various proton and neutron subshells. These results have been obtained using PQH interaction.
108
+
109
+ From table 2, it is observed that $p_{1/2}$, $p_{3/2}$, $f_{5/2}$ proton subshells are partially filled. The polarization of these subshells could be one of the important factors contributing to the appearance of deformation in Sr isotopes. Secondly, it is observed from this table that $(g_{9/2})_\pi$ occupation is sizeable and from table 3, one notices that there are neutrons in $g_{7/2}$ subshell. Thus, there is an opportunity for neutron-proton (np) interaction in spin-orbit partner (SOP) orbits – the orbits $(g_{9/2})_\pi$ and $(g_{7/2})_\nu$ – in this case, to operate. As pointed out by Federman and Pittel [13,23], this factor could also lead to deformation in heavy Sr isotopes. From table 3, we also notice that low k-components of $(h_{11/2})_\nu$ orbits are occupied in $^{98}$Sr to $^{106}$Sr. Since these low k-components are sharply downsloping, their occupation could also lead to large deformation in these isotopes. This has been claimed to be the mechanism behind large onset of deformation in Sr isotopes by mean field theorists [14,15]. From the above discussion, it is evident that there are three factors responsible for the deformation in $^{98}$Sr to $^{106}$Sr. The first factor is the polarization of $2p_{1/2}$, $2p_{3/2}$ and $2f_{5/2}$ proton subshells. Because of this polarization, the protons tend to occupy $1g_{9/2}$ proton orbit which makes it possible for np interaction to operate between SOP orbits – the $(g_{9/2})_\pi$ and $(g_{7/2})_\nu$ orbits in the present context – as
110
+ ---PAGE_BREAK---
111
+
112
+ Neutron-rich $^{98-106}$Sr isotopes
113
+
114
+ there are already neutrons in ($g_{7/2}$) orbit. Besides this, the increasing trend in the occupation probability of $(1h_{11/2})\nu$ reinforces the development of deformation as we move from $^{98}$Sr to $^{106}$Sr. It may be noted that $(h_{11/2})\nu$ orbit is nearly half-filled in $^{106}$Sr making maximum contribution to the quadrupole moment.
115
+
116
+ 4. Energy spectra in $^{98-106}$Sr
117
+
118
+ Now to test the reliability and efficiency of HB calculations performed with PQH model of two-body interaction, it is important to obtain satisfactory agreement for the yrast spectra. A projection calculation for the energy spectra of $^{98-106}$Sr has been carried out by employing the phenomenological PQ and PQH models of two-body interaction in the following manner:
119
+
120
+ Starting from the Hamiltonian ($H - \beta Q_0^2$), HB intrinsic state was obtained for a number of values of variational parameter ($\beta$) for each Sr isotope. From these intrinsic states, even spin and even parity angular momentum states were projected out. Then, the lowest energy value ($E_J^+$) corresponding to each angular momentum state ($J^+$) is collected to obtain the yrast spectra in each Sr nucleus.
121
+
122
+ In figures 1a and 1b, the yrast spectra for $^{98-106}$Sr is displayed. The spectra corresponding to Th.1 is obtained when PQ model of interaction is employed, whereas the spectra corresponding to Th.2 is obtained when PQH model of interaction is employed. It may be noted that the spectra corresponding to Th.2 is in satisfactory agreement with the experiment [31,32] and there is lot of improvement as we go from the spectra corresponding to Th.1 to the spectra corresponding to Th.2 when compared with the observed spectra. For example, in the case of $^{98}$Sr, the observed yrast $8^+$, $10^+$ and $12^+$ energy states are having energies 1.43 MeV, 2.12 MeV and 2.93 MeV respectively and the Th.1 spectra give the corresponding energy values as 3.30 MeV, 4.90 MeV and 6.80 MeV, which are sharply in disagreement with the observed yrast states. However, the projection calculations presented under Th.2 spectra give the energy values as 1.30 MeV, 2.00 MeV and 2.80 MeV for the yrast $8^+$, $10^+$ and $12^+$ states respectively. It is, therefore, very obvious that the energy values calculated under Th.2 are in satisfactory agreement with the observed energy values. The same trend is also observed for $^{100}$Sr isotope. We can, therefore, make a comment that the study of yrast spectra in $^{98-100}$Sr indicates that PQH model of interaction is an improvement over the PQ model of interaction in the case of Sr isotopes. Since the experimental spectra for $^{102-106}$Sr is not available (only upto $2^+$ state in $^{102}$Sr is available), it turns out that the different yrast states predicted by the Th.2 calculations for $^{102}$Sr, $^{104}$Sr and $^{106}$Sr will serve as a motivation for the experimentalists to look for these states in $^{102-106}$Sr. It may be noted that the calculations of spectra are carried out for the entire set of the $^{98-106}$Sr isotopes with a single set of input parameters. In table 4, the values of the variational pa- rameter ($\beta$), corresponding to which the yrast spectra for Th.2 has been obtained, are presented.
123
+ ---PAGE_BREAK---
124
+
125
+ Figure 1. (a) Experimental and theoretical low-lying yrast spectra for $^{98-102}$Sr nuclei. (b) Theoretical low-lying yrast spectra for $^{104-106}$Sr nuclei.
126
+ ---PAGE_BREAK---
127
+
128
+ Neutron-rich $^{98-106}$Sr isotopes
129
+
130
+ **Table 4.** Values of the variational parameter ($\beta$) and spins ($I^+$) corresponding to which the yrast spectra for Th.2 has been obtained in $^{98-106}$Sr.
131
+
132
+ <table><thead><tr><th>Nucleus</th><th>Spins (I<sup>+</sup>)</th><th>Variational parameter (β)</th></tr></thead><tbody><tr><td rowspan="3"><sup>98</sup>Sr</td><td>0<sup>+</sup> → 6<sup>+</sup></td><td>0.0</td></tr><tr><td>8<sup>+</sup> → 10<sup>+</sup></td><td>0.15</td></tr><tr><td>12<sup>+</sup> → 16<sup>+</sup></td><td>0.20</td></tr><tr><td rowspan="3"><sup>100</sup>Sr</td><td>0<sup>+</sup> → 8<sup>+</sup></td><td>0.0</td></tr><tr><td>10<sup>+</sup> → 12<sup>+</sup></td><td>0.10</td></tr><tr><td>14<sup>+</sup> → 16<sup>+</sup></td><td>0.15</td></tr><tr><td rowspan="3"><sup>102</sup>Sr</td><td>0<sup>+</sup> → 8<sup>+</sup></td><td>0.0</td></tr><tr><td>10<sup>+</sup> → 14<sup>+</sup></td><td>0.15</td></tr><tr><td>16<sup>+</sup></td><td>0.20</td></tr><tr><td rowspan="3"><sup>104</sup>Sr</td><td>0<sup>+</sup> → 4<sup>+</sup></td><td>0.0</td></tr><tr><td>6<sup>+</sup> → 12<sup>+</sup></td><td>0.10</td></tr><tr><td>14<sup>+</sup> → 16<sup>+</sup></td><td>0.15</td></tr><tr><td rowspan="3"><sup>106</sup>Sr</td><td>0<sup>+</sup> → 8<sup>+</sup></td><td>0.0</td></tr><tr><td>10<sup>+</sup> → 12<sup>+</sup></td><td>0.10</td></tr><tr><td>14<sup>+</sup> → 16<sup>+</sup></td><td>0.15</td></tr></tbody></table>
133
+
134
+ **5. Systematics of the calculated values of E2 transition probabilities in Sr isotopes**
135
+
136
+ The reliability and goodness of the HB wave function is also examined by calculating the *B(E2)* values. In table 5, the calculated values of *E2* transition probabilities between the states *E*<sub>*J*</sub> and *E*<sub>*J*+2</sub> are presented. The calculated values are expressed in parametric form in terms of the proton (*e*<sub>p</sub>) and neutron (*e*<sub>n</sub>) effective charges, such that *e*<sub>p</sub> = 1 + *e*<sub>eff</sub> and *e*<sub>n</sub> = *e*<sub>eff</sub>, and have been obtained through a rigorous projection calculation. The *B(E2: J*<sub>*i*</sub><sup>+</sup> → J*<sub>*f*</sub>*<sup>+</sup>)* values have been calculated in units of *e*<sub>p</sub>*b*<sub>*n*</sub><sup>2</sup> (where b<sub>n</sub> stands for barn, 1 barn = 10<sup>-28</sup> m<sup>2</sup>). The results indicate that by choosing *e*<sub>eff</sub> = 0.25, a good agreement with the observed values for *B(E2: 0<sup>+</sup> → 2<sup>+</sup>)* transition probabilities is obtained for <sup>98-100</sup>Sr nuclei. For example, in <sup>98</sup>Sr, the calculated value of *B(E2: 0<sup>+</sup> → 2<sup>+</sup>)* is 1.41 units and experimental value is 1.28(39) units. Similarly for <sup>100</sup>Sr, the calculated and observed values of *B(E2: 0<sup>+</sup> → 2<sup>+</sup>)* are 1.34 units and 1.42(8) units respectively. The experimental data for the higher transitions in <sup>98-100</sup>Sr is not available but we have calculated the data for the higher transitions, upto 8<sup>+</sup> → 10<sup>+</sup>, also. Similarly, the experimental data for any of the transitions in <sup>102-106</sup>Sr is not available but we have also calculated the data upto 8<sup>+</sup> → 10<sup>+</sup> transitions, corresponding to the same effective charge as used for <sup>98,100</sup>Sr, in these nuclei.
137
+
138
+ From the comparison of the calculated *B(E2)* values with the experimental values [34] for the 0<sup>+</sup> → 2<sup>+</sup> transitions in <sup>98-100</sup>Sr, it is satisfactory to note that the calculated *B(E2)* values are in good agreement with the experiments. Since the
139
+ ---PAGE_BREAK---
140
+
141
+ Anil Chandan et al
142
+
143
+ **Table 5.** The reduced transition probabilities for E2 transitions for the yrast levels in the nuclei $^{98-106}$Sr. Here $e_p(e_n)$ denotes the effective charge for protons (neutrons). The entries presented in the third column correspond to the reduced matrix elements of the quadrupole operator between yrast states [16]. The reduced matrix elements have been expressed in a form that brings out their explicit dependence on the effective charges. The entries presented in the fourth column correspond to the effective charges indicated in the first column. The $B(E2)$ values are in units of $e^2b_n^2$ (where $b_n$ stands for barn, 1 barn = $10^{-28}$ m$^2$).
144
+
145
+ <table><thead><tr><th rowspan="2">Nucleus<br>(e<sub>p</sub>, e<sub>n</sub>)<br>(1)</th><th rowspan="2">Transition<br>(J<sub>i</sub><sup>+</sup> → J<sub>f</sub><sup>+</sup>)<br>(2)</th><th rowspan="2">$[B(E2: J<sub>i</sub><sup>+</sup> → J<sub>f</sub><sup>+</sup>)]<sup>1/2</sup><br>(3)</th><th colspan="2">$B(E2: J<sub>i</sub><sup>+</sup> → J<sub>f</sub><sup>+</sup>)</th></tr><tr><th>Theory<br>(4)</th><th>(Exp.)*<br>(5)</th></tr></thead><tbody><tr><td rowspan="5"><sup>98</sup>Sr (1.25, 0.25)</td><td>0<sup>+</sup> → 2<sup>+</sup></td><td>0.75e<sub>p</sub>+1.00e<sub>n</sub></td><td>1.41</td><td>1.28(39)</td></tr><tr><td>2<sup>+</sup> → 4<sup>+</sup></td><td>0.90e<sub>p</sub>+1.19e<sub>n</sub></td><td>2.02</td><td>-</td></tr><tr><td>4<sup>+</sup> → 6<sup>+</sup></td><td>0.94e<sub>p</sub>+1.25e<sub>n</sub></td><td>2.21</td><td>-</td></tr><tr><td>6<sup>+</sup> → 8<sup>+</sup></td><td>0.95e<sub>p</sub>+1.28e<sub>n</sub></td><td>2.27</td><td>-</td></tr><tr><td>8<sup>+</sup> → 10<sup>+</sup></td><td>0.95e<sub>p</sub>+1.30e<sub>n</sub></td><td>2.28</td><td>-</td></tr><tr><td rowspan="5"><sup>100</sup>Sr (1.25, 0.25)</td><td>0<sup>+</sup> → 2<sup>+</sup></td><td>0.76e<sub>p</sub>+0.84e<sub>n</sub></td><td>1.34</td><td>1.42(8)</td></tr><tr><td>2<sup>+</sup> → 4<sup>+</sup></td><td>0.91e<sub>p</sub>+0.93e<sub>n</sub></td><td>1.87</td><td>-</td></tr><tr><td>4<sup>+</sup> → 6<sup>+</sup></td><td>0.95e<sub>p</sub>+0.98e<sub>n</sub></td><td>2.05</td><td>-</td></tr><tr><td>6<sup>+</sup> → 8<sup>+</sup></td><td>0.97e<sub>p</sub>+1.00e<sub>n</sub></td><td>2.13</td><td>-</td></tr><tr><td>8<sup>+</sup> → 10<sup>+</sup></td><td>0.98e<sub>p</sub>+1.01e<sub>n</sub></td><td>2.18</td><td>-</td></tr><tr><td rowspan="5"><sup>102</sup>Sr (1.25, 0.25)</td><td>0<sup>+</sup> → 2<sup>+</sup></td><td>0.78e<sub>p</sub>+0.68e<sub>n</sub></td><td>1.31</td><td>-</td></tr><tr><td>2<sup>+</sup> → 4<sup>+</sup></td><td>0.93e<sub>p</sub>+0.81e<sub>n</sub></td><td>1.86</td><td>-</td></tr><tr><td>4<sup>+</sup> → 6<sup>+</sup></td><td>0.97e<sub>p</sub>+0.86e<sub>n</sub></td><td>2.03</td><td>-</td></tr><tr><td>6<sup>+</sup> → 8<sup>+</sup></td><td>0.99e<sub>p</sub>+0.88e<sub>n</sub></td><td>2.12</td><td>-</td></tr><tr><td>8<sup>+</sup> → 10<sup>+</sup></td><td>0.99e<sub>p</sub>+0.90e<sub>n</sub></td><td>2.13</td><td>-</td></tr><tr><td rowspan="5"><sup>104</sup>Sr (1.25, 0.25)</td><td>0<sup>+</sup> → 2<sup>+</sup></td><td>0.78e<sub>p</sub>+0.91e<sub>n</sub></td><td>1.44</td><td>-</td></tr><tr><td>2<sup>+</sup> → 4<sup>+</sup></td><td>0.93e<sub>p</sub>+1.08e<sub>n</sub></td><td>2.05</td><td>-</td></tr><tr><td>4<sup>+</sup> → 6<sup>+</sup></td><td>0.98e<sub>p</sub>+1.13e<sub>n</sub></td><td>2.27</td><td>-</td></tr><tr><td>6<sup>+</sup> → 8<sup>+</sup></td><td>0.99e<sub>p</sub>+1.16e<sub>n</sub></td><td>2.33</td><td>-</td></tr><tr><td>8<sup>+</sup> → 10<sup>+</sup></td><td>0.99e<sub>p</sub>+1.17e<sub>n</sub></td><td>2.34</td><td>-</td></tr><tr><td rowspan="5"><sup>106</sup>Sr (1.25, 0.25)</td><td>0<sup>+</sup> → 2<sup>+</sup></td><td>0.78e<sub>p</sub>+0.69e<sub>n</sub></td><td>1.31</td><td>-</td></tr><tr><td>2<sup>+</sup> → 4<sup>+</sup></td><td>0.93e<sub>p</sub>+0.83e<sub>n</sub></td><td>1.87</td><td>-</td></tr><tr><td>4<sup>+</sup> → 6<sup>+</sup></td><td>0.97e<sub>p</sub>+0.87e<sub>n</sub></td><td>2.04</td><td>-</td></tr><tr><td>6<sup>+</sup> → 8<sup>+</sup></td><td>0.99e<sub>p</sub>+0.89e<sub>n</sub></td><td>2.13</td><td>-</td></tr><tr><td>8<sup>+</sup> → 10<sup>+</sup></td><td>1.00e<sub>p</sub>+0.91e<sub>n</sub></td><td>2.18</td><td>-</td></tr></tbody></table>
146
+
147
+ *Exp. data taken from ref. [34].
148
+
149
+ experimental data for the higher transitions in $^{98-100}$Sr and any of the transitions in $^{102-106}$Sr are not available, it turns out that the calculated data predicted for different transitions in $^{98-106}$Sr will serve as a motivation for the experimentalists to look for this data.
150
+ ---PAGE_BREAK---
151
+
152
+ Neutron-rich $^{98-106}$Sr isotopes
153
+
154
+ **6. Quadrupole deformations ($\beta_2$) in Sr isotopes**
155
+
156
+ We have calculated values for deformation parameter ($\beta_2$) for $^{98-106}$Sr. The deformation parameter $\beta_2$ is related to $B(E2)\uparrow$ by the formula suggested by Raman *et al* [33] as
157
+
158
+ $$ \beta_2 = (4\pi/3ZR_0^2)[B(E2)\uparrow/e^2]^{1/2}, \quad (5) $$
159
+
160
+ where $R_0$ is usually taken to be 1.2 Å$^{1/3}$ fm and $B(E2)\uparrow$ is in units of e$^2$b$_n^2$.
161
+
162
+ The deformation parameter $\beta_2$ has been calculated using the calculated $B(E2)\uparrow$ values, given in table 5. From the calculations, we find that $\beta_2$ values for the nuclei $^{98}$Sr, $^{100}$Sr, $^{102}$Sr, $^{104}$Sr and $^{106}$Sr are 0.42, 0.41, 0.40, 0.41 and 0.39 respectively. The experimental values [34] for $^{98}$Sr and $^{100}$Sr are 0.40(6) and 0.42(12) respectively. From the comparison of the data, we find that there is reasonable agreement for $\beta_2$ values for the nuclei $^{98-100}$Sr. The experimental data for $^{102-106}$Sr are not available.
163
+
164
+ **7. Conclusions**
165
+
166
+ From the results of our calculations, the following conclusions can be drawn:
167
+
168
+ (i) The VAP calculations performed with PQH interaction reproduce correctly the observed deformation systematics in $^{98-102}$Sr isotopes. The deformation develops because of the simultaneous polarization of ($p_{3/2}$) and ($f_{5/2}$) proton subshells and the operation of np interaction between ($g_{9/2}$)$_\pi$ and ($g_{7/2}$)$_\nu$ subshells. The polarization of $p_{3/2}$ or $f_{5/2}$ orbits is an important pre-requisite for the np interaction between SOP orbits to operate.
169
+
170
+ (ii) The yrast spectra obtained with the inclusion of hexadecapole interaction shows satisfactory agreement with the observed spectra compared to the spectra obtained with PQ model of interaction.
171
+
172
+ (iii) The values of hexadecapole interaction parameters employed by us are the appropriate ones in this mass region as, with them, the HB wave function yields values of $B(E2)$ which are in satisfactory agreement with experiments.
173
+
174
+ **References**
175
+
176
+ [1] E Cheifetz, R C Jarad, S G Thompson and J B Wilhelmy, Phys. Rev. Lett. **25**, 38 (1970)
177
+
178
+ [2] H Ohm, G Lhersonneau, K Sistemich, B Pfeiffer and K L Kratz, Z. Phys. **A327**, 483 (1987)
179
+
180
+ [3] G Lhersonneau, H Gabelmann, K L Kratz, B Pfeiffer, N Kaffrell and the ISOLDE Collaboration, Z. Phys. **A332**, 243 (1989)
181
+
182
+ [4] G Lhersonneau, H Gabelmann, N Kaffrell, K L Kratz, B Pfeiffer, K Heyde and the ISOLDE Collaboration, Z. Phys. **A337**, 143 (1990)
183
+
184
+ [5] F Buchinger, E B Ramsay, E Arnold, W Neu, R Neugart, K Wendt, R Silverans, E Lievens, L Vermeeren, D Berdichevsky, R Fleming and D W L Sprung, Phys. Rev. C **41**, 2883 (1990)
185
+ ---PAGE_BREAK---
186
+
187
+ Anil Chandan et al
188
+
189
+ [6] P Lievens, R E Silverans, L Vermeeren, W Borchers, W Neu, R Neugart, K Wendt, F Buchinger, E Arnold and the ISOLDE Collaboration, Phys. Lett. B256, 141 (1991)
190
+
191
+ [7] G Lhersonneau, B Pfeiffer, R Capote, J M Quesada, H Gabelmann, K L Kratz and the ISOLDE Collaboration, Phys. Rev. C65, 024318 (2002)
192
+
193
+ [8] R E Azuma, G L Borchert, L C Carraz, P G Hansen, B Jonson, S Matttsson, O B Nielsen, G Nyman, I Ragnarson and H L Ravn, Phys. Lett. B86, 5 (1979)
194
+
195
+ [9] J H Hamilton, A V Ramayya, S J Zhu, G M Ter-Akopia, Yu Oganessian, J D Cole, J O Rasmussen and M A Stoyer, Prog. Part. Nucl. Phys. 35, 635 (1995)
196
+
197
+ [10] G Lhersonneau, B Pfeiffer, M Huhta, A Wohr, I Klockl, K L Gratz, J Aysto and the ISOLDE Collaboration, Z. Phys. A351, 357 (1995)
198
+
199
+ [11] S Verma, P Ahmad, R Devi and S K Khosa, Phys. Rev. C77, 024308 (2008)
200
+
201
+ [12] John C Hill, J A Winger, F K Wohn, R F Petry, J D Goulden, R L Gill, A Piotrowski and H Mach, Phys. Rev. C33, 5 (1985)
202
+
203
+ [13] P Federman and S Pittel, Phys. Rev. C20, 820 (1979)
204
+
205
+ [14] P Bonche, H Flocard, P H Heenen, S J Krieger and M S Weiss, Nucl. Phys. A443, 39 (1985)
206
+
207
+ [15] X Campi and M Epherre, Phys. Rev. C22, 2605 (1980)
208
+
209
+ [16] S K Sharma, P N Tripathi and S K Khosa, Phys. Rev. C38, 2935 (1988)
210
+
211
+ [17] P N Tripathi, S K Sharma and S K Khosa, Phys. Rev. C29, 1951 (1984)
212
+
213
+ [18] S K Khosa, P N Tripathi and S K Sharma, Phys. Lett. B119, 257 (1982)
214
+
215
+ [19] S K Khosa and S K Sharma, Phys. Rev. C25, 2715 (1981)
216
+
217
+ [20] J D Vergados and T T S Kuo, Phys. Lett. B35, 93 (1971)
218
+
219
+ [21] A Bohr and B R Mottelson, Nuclear structure (Benjamin, New York, 1975) Vol. II, p. 356
220
+
221
+ [22] S K Sharma, Nucl. Phys. A260, 226 (1976)
222
+
223
+ [23] P Federman and S Pittel, Phys. Lett. B69, 385 (1977)
224
+
225
+ [24] S Pittel, Nucl. Phys. A347, 417 (1980)
226
+
227
+ [25] S C K Nair, A Ansari and L Satpathi, Phys. Lett. B71, 257 (1977)
228
+
229
+ [26] M Danos and V Gillet, Phys. Rev. C161, 1034 (1967)
230
+
231
+ [27] A Arima and V Gillet, Ann. Phys. 66, 117 (1971)
232
+
233
+ [28] A Arima, T Ohtsuka, F Lachella and I Talmi, Phys. Lett. B66, 205 (1977)
234
+
235
+ [29] R F Casten et al., Phys. Lett. 47, 1433 (1981)
236
+
237
+ [30] P K Mattu and S K Khosa, Phys. Rev. C39, 2018 (1989)
238
+
239
+ [31] M Sakai, At. Data Nucl. Data Tables 31, 409 (1984)
240
+
241
+ [32] B Singh and Z Hu, Nucl. Data Sheets 98, 335 (2003)
242
+
243
+ [33] S Raman, C W Nestor, S Kahane and K H Bhatt, At. Data Nucl. Data Tables 42, 1 (1989)
244
+
245
+ [34] S Raman, C W Nestor and P Tikkanen, At. Data Nucl. Data Tables 78, 40 (2001)
samples/texts_merged/3764397.md ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ Early Collision and Fragmentation Detection of Space
5
+ Objects without Orbit Determination
6
+
7
+ Lyndy E. Axon*
8
+
9
+ This paper demonstrates that from using the hypothesized constraint of the admissible regions it is possible to determine if a combination of new uncorrelated debris objects have a common origin that also intersects with a known catalog object orbit, thus indicating a collision or fragmentation has occurred. Admissible region methods are used to bound the feasible orbit solutions of multiple observations using constraints on energy and radius of periapsis, propagating them to a common epoch in the past, and using sequential quadratic programming optimization to find a set of solution states that minimize the Euclidean distance between the observations at that time. If this given this set of solutions intersects with a catalog object orbit, then that object is the probabilistic source of the debris objects. This proposed method is demonstrated on an example of a low-earth object observation.
10
+
11
+ I. Introduction
12
+
13
+ A problem of constant concern for the future of space operations, especially as massive thousand-satellite constellations are in the design phase, is the tracking, orbit determination, and cataloging of all space objects in orbit around Earth. The U.S. Air Force Space command utilizes the Space Surveillance Network (SSN) to make approximately 80,000 daily observations to track which make an estimated population of over 300,000 objects with a diameter of over 1 cm, 17,000 known catalog objects greater than 10 cm in diameter, and 1300 active satellites¹².³ In over 50 years of space missions, over 5000 satellites have gone into orbit, of which less than 1300 are still operational today.⁴ Many of the remaining satellites have deorbited successfully, or were put into designated storage orbits prior to end-of-life, however, a large number of them are dormant orbiting the Earth.⁵ In addition to defunct satellites, debris from collisions, fragmentations, and launch litter the operational orbit environments from LEO to GEO. Not all the SSN's daily observations, or uncorrelated tracks, can be used to create actionable information.
14
+
15
+ Extracting actionable information from an initial UCT, is not a simple task, for with a single UCT it is not possible to uniquely identify the state of the object, or how useful it would be to immediately prioritize additional observations.³ On a daily basis, thousands observations of space objects from the SSN take place over short time periods and do not possess enough observation data geometric diversity to initiate a well-posed classical initial orbit determination (IOD) problem, such as angles-only IOD. Traditional orbit determination methods rely on the curvature of the measurements in order to produce a state estimate. However, measurements obtained from a short observation or a very short sequence of observations have linear dynamics and traditional methods fail as the observation time decreases.⁶ Optical sensors measure state information as either a series of angle measurements over time or from streaks formed during a single observation; these angular measurements form a tracklet, but the range and range-rate of the SO are not observable. Therefore, the SO state is underdetermined and for any given tracklet, a continuum of range and range-rate solutions are possible which define the admissible region for a given observation.⁷
16
+
17
+ In an operational environment when UCTs cannot be correlated with known objects in the Space Object Catalog (SOC), operators must have a method to quickly determine if a potential threat exists. Extreme examples of potential threats include a decreased capability due to a breakup of an asset, or a debris field created by a collision. These debris objects must have had an origin, and it is currently computationally difficult and time consuming to solve this problem with real-time accuracy, and as a result collisions and fragmentations of smaller space objects have occurred. To accurately correlate new UCTs with a known
18
+
19
+ *Graduate Researcher, Daniel Guggenheim School of Aerospace Engineering, Georgia Institute of Technology, 270 First Dr. Atlanta, GA 30313.
20
+ ---PAGE_BREAK---
21
+
22
+ catalog object's orbit as an origin, multiple orbits must occur for LEO cases, and hours of continuous tracking is required for GEO cases. In this situation, it is more efficient to take a collection of UCTs and propagate them back over a designated period of time to determine if any of the possible states shared the same position at the same epoch, which would indicate that the observed UCTs were disparate debris from a known catalog object. Using admissible regions to initiate this approach allows the tasks of initial orbit determination and tracking to be foregone, which allows for faster actionable information. This would allow operators to track incoming UCTs and assign them as fragments or debris from a past event with a tracked catalog object, and allow for tasking of Space Surveillance Network assets to observe the catalog object as well as characterize the current state and future risks that these debris objects may pose.
23
+
24
+ Admissible region ($\mathcal{R}$) methods are methods to constrain undetermined states using a priori constraint hypotheses, and have been proposed to support data association and track-initiation tasks. Many have extended the applicability of AR methods to space situational awareness (SSA) since Milani et al. first proposed applying these methods to the detection of asteroids too-short arc (TSA) problems.⁸ The AR approach has been applied by Tommei et al. to SO detection and discrimination by using radar and optical measurements.⁹ Optimization methods to identify a best-fitting orbit solution are proposed by Siminski et al.¹⁰ Existing admissible region methods can be used discretizing the admissible region and considering the solutions at discrete points, which would allow for a particle filter approach.¹ Additionally, an optimization scheme can be used to identify the best fitting orbits within and admissible region eliminating the need to discretize the whole region.¹⁰ Fujimoto and Scheeres work shows that observations can be associated by applying Bayes' rule to an admissible region generated from two epochs, where a nonzero result indicates that the observations are correlated.¹¹ In addition, a solution technique for correlating multiple optical observations by computing the overlap between their admissible regions, as well as using highly constrained probability distributions in Poincare orbit element space has been proposed by Fujimoto and Scheeres by.¹² Worthy et al. has developed an observation association method which uses an optimization based approach to identify local Mahalanobis distance minima in state space between two uncertain admissible regions.¹³ A limitation of these methods using the intersection of the $\mathcal{R}$ volumes is that a feasible orbit can only be constructed if the observations are the same object, otherwise these iterative solution methods will fail. The proposed methodology in this paper seeks to demonstrate that given multiple new debris objects, that cannot be associated with any known catalog object, determine if a collision or fragmentation event has occurred, and from what origin, in near real-time as new UCTs become available.
25
+
26
+ This paper proposes a methodology for applying AR methods to bound the feasible orbit solutions of multiple observations using constraints on energy and radius of periapsis, propagating them to a common epoch in the past, and using sequential quadratic programming optimization to find a set of solution states that minimize the Euclidean distance between the observations at that time. This numerical zero-finding approach demonstrates that given two uncorrelated observations, and corresponding admissible regions, a line of feasible solutions exist that minimize the distance between the objects. In summary, this paper demonstrates that from using the hypothesized constraint of the admissible regions it is possible to determine if a combination of new uncorrelated debris objects have a common origin that also intersects with a known catalog object orbit, thus indicating a collision or fragmentation has occurred.
27
+
28
+ ## II. Approach and Methodology
29
+
30
+ The goal of this methodology is to detect collisions and fragmentations by observing disparate debris without requiring the computational and time burden of using orbit determination. This approach can be used for a variety of orbit types and observation lengths. Given two uncorrelated observations, at two different times, $t_1$ and $t_2$, the proposed method will determine if a common origin exists for these objects at a selected epoch $t_0$. Figure 1 shows the orbital path in $\mathbb{R}^6$ orbit element space of a known catalog object as a function of time, until at some $t_0$ a break-up event occurs that results in a discrete number of debris objects. Each observation at $t_1$ and $t_2$ are of different debris from what is hypothesized to be a common origin.
31
+ ---PAGE_BREAK---
32
+
33
+ Figure 1. Catalog Object Break-up at a given Epoch as a function of time
34
+
35
+ Given independent observations of multiple debris objects, a continuum of range and range-rate combinations define the admissible region. These range and range-rate solutions make up the undetermined portion of a potential full state; each full-state (determined or observable information combined with unobservable) correspond to a given position and velocity solution. These solutions can be propagated back to an arbitrary estimated epoch $t_0$, at which a solution manifold can be constructed by using sequential quadratic programming and selected constraint criteria to minimize the Euclidean distance between the positions of the two observed objects at $t_0$. The solution manifold represents a line of possible common origins that goes through $\mathbb{R}^6$; if it intersects with the catalog object orbit then the observed have spawned from a break-up event involving that known object. Figure 1 is a three dimensional illustration of the previous figure, but at a particular time. Notice this figure that the solution manifold will cross the orbit of the catalog object at the hypothesized epoch $t_0$.
36
+
37
+ Figure 2. Catalog Object Break-Up and Observation of Debris Objects from Ground Station
38
+
39
+ Optical measurements generate angle and angle rates of objects tracked using a streak or sequence of angle measurements of right ascension, α, and declination, δ. The parameters associated with optical measurements include the observer position and velocity, **o** and $\dot{\textbf{o}}$, respectively, as well as the times at which the observations are made. Using this information, the position, **r**, and velocity, **v** of the object are given by
40
+
41
+ $$ \mathbf{r} = \mathbf{o} + \rho \hat{\mathbf{l}} \qquad (1) $$
42
+ ---PAGE_BREAK---
43
+
44
+ where $\rho$ is the range to the target, $\dot{\rho}$ is the range-rate, and $\hat{\mathbf{l}}$, $\hat{\mathbf{l}}_{\alpha}$, and $\hat{\mathbf{l}}_{\delta}$ are given by
45
+
46
+ $$ \mathbf{v} = \dot{\mathbf{o}} + \dot{\rho}\hat{\mathbf{l}} + \rho\dot{\alpha}\hat{\mathbf{l}}_{\alpha} + \rho\dot{\delta}\hat{\mathbf{l}}_{\delta} \quad (2) $$
47
+
48
+ $$ \hat{\mathbf{l}} = \begin{bmatrix} \cos\alpha\cos\delta \\ \sin\alpha\cos\delta \\ \sin\delta \end{bmatrix} \qquad (3) $$
49
+
50
+ $$ \hat{\mathbf{l}}_{\alpha} = \begin{bmatrix} -\sin\alpha\cos\delta \\ \cos\alpha\cos\delta \\ 0 \end{bmatrix} \qquad (4) $$
51
+
52
+ $$ \hat{\mathbf{l}}_{\delta} = \begin{bmatrix} -\cos\alpha\sin\delta \\ -\sin\alpha\sin\delta \\ \cos\delta \end{bmatrix} \qquad (5) $$
53
+
54
+ For this system, the states **x**, the observations **k**, and parameters **p** are defined as
55
+
56
+ $$ \mathbf{x}^T = [\dot{\alpha} \ \dot{\delta} \ \dot{\delta} \ \rho \ \dot{\rho}] \qquad (6) $$
57
+
58
+ $$ \mathbf{k}^T = [\alpha_1 \dots \alpha_q \ \delta_1 \dots \delta_q] \mathbf{o}^T \dot{\mathbf{o}}^T \qquad (7) $$
59
+
60
+ $$ \mathbf{p}^T = [\mathbf{o}^T \dot{\mathbf{o}}^T] \qquad (8) $$
61
+
62
+ where $\dot{\alpha}$ and $\dot{\delta}$ are the angle rates which are generated using Lagrange Interpolation shown in Equation 9, and $q$ is the number of observations. In order to limit the inherent error associated with using Lagrange interpolation from point values, streak observations are used in this methodology. The rate estimations from the center of each streak are used for further calculations as this provides a better estimate of the rate than the beginning of the streak.
63
+
64
+ $$ \begin{aligned} \dot{\alpha}(t) ={}& \alpha(t_1) \frac{(t-t_2) + (t-t_3) + \cdots + (t-t_q)}{(t_1-t_2)(t_1-t_3)\cdots(t_1-t_q)} \\ &+ \alpha(t_2) \frac{(t-t_2) + (t-t_3) + \cdots + (t-t_q)}{(t_2-t_1)(t_2-t_3)\cdots(t_2-t_q)} \\ &+ \cdots + \alpha(t_l) \frac{(t-t_2) + (t-t_3) + \cdots + (t-t_{q-1})}{(t_l-t_1)(t_l-t_3)\cdots(t_l-t_{q-1})} \end{aligned} \qquad (9) $$
65
+
66
+ For an observation with two measurements, the combined measurement and parameter vector, $\mathbf{y}^T \in \mathbb{R}^{12}$ is given by
67
+
68
+ $$ \mathbf{y}^T = [\alpha_1 \ \alpha_2 \ \delta_1 \ \delta_2] (\mathbf{t}_1 \ \mathbf{t}_2) (\mathbf{u}_1 \ \mathbf{u}_2)^T \qquad (10) $$
69
+
70
+ Given $\mathbf{y}$ and solving for the angle rates using Equation 9, four of the six states in $\mathbf{x}$ can be observed or determined; these four states, known henceforth as $\mathbf{x}_d$ are shown in Equation 11. The remaining two undetermined states, known as $\mathbf{x}_u$, are given by Equation 12.
71
+
72
+ $$ \mathbf{x}_d = \begin{bmatrix} \alpha \\ \dot{\alpha} \\ \delta \\ \dot{\delta} \end{bmatrix}_{4\times1} \qquad (11) $$
73
+
74
+ $$ \mathbf{x}_u = \begin{bmatrix} \rho \\ \dot{\rho} \\ 0 \\ 0 \end{bmatrix}_{2\times1} \qquad (12) $$
75
+
76
+ To limit the realm of possible solutions for $\mathbf{x}_u$, constraint hypotheses are imposed on the admissible regions. These constraints can be based on a priori information about the observation (e.g. is the object LEO or GEO), as well as reasonable constraints for objects in orbit around Earth can be imposed. For the
77
+ ---PAGE_BREAK---
78
+
79
+ purpose of this paper, the primary assumption is that of 2-body motion, which allows the use of a constraint on the specific orbital energy equation. This constraint, $\kappa$ requires that the space object is in Earth's orbit, and therefore excludes hyperbolic orbit solutions. To constrain these solutions for $\mathbf{x}_u$, the admissible region set $\mathcal{R}$ can be defined as $\{\mathbf{x}_u \in \mathbb{R}^2 | \epsilon(\mathbf{r}, \dot{\mathbf{v}}) = 0\}$, which is the solution to Equation 13.⁶ The solutions to this polynomial define the two dimensional boundary of the admissible region.
80
+
81
+ $$ \kappa(\mathbf{x}_u, \mathbf{y}) = 2\epsilon(\mathbf{r}, \mathbf{v}) = \dot{\rho}^2 + w_1\dot{\rho} + T(\rho) - \frac{2\mu}{\sqrt{S(\rho)}} = 0 \quad (13) $$
82
+
83
+ Farnocchia, et. al. and Tommei et. al. define $T(\rho)$, $S(\rho)$, and coefficients $w_0$ through $w_5$ as Equations 14 and 15.¹⁴.⁹
84
+
85
+ $$ T(\rho) = w_2\rho^2 + w_3\rho + w_4, \quad S(\rho) = \rho^2 + w_5\rho + w_6 \quad (14) $$
86
+
87
+ $$
88
+ \begin{align}
89
+ w_0 &= \|\mathbf{o}\|^2, & w_1 &= 2\langle \dot{\mathbf{o}} \cdot \hat{\mathbf{l}} \rangle \\
90
+ w_2 &= \dot{\alpha}^2 \cos^2 \delta + \dot{\delta}^2, & w_3 &= 2\dot{\alpha} \langle \dot{\mathbf{o}} \cdot \hat{\mathbf{l}}_\alpha \rangle + 2\dot{\delta} \langle \dot{\mathbf{o}} \cdot \hat{\mathbf{l}}_\delta \rangle \\
91
+ w_4 &= \|\dot{\mathbf{o}}\|^2, & w_5 &= 2\langle \mathbf{o} \cdot \hat{\mathbf{l}} \rangle
92
+ \end{align}
93
+ \quad (15)
94
+ $$
95
+
96
+ To further constrain the realm of possible state solutions, a periapsis radius constraint is used to exclude parabolic and potentially re-entering space objects that will impact the Earth in less than one revolution. For the purpose of this paper, the minimum radius of periapsis $r_{min}$ is set at 6378 km plus $h_{atm}$, where $h_{atm}$ is 200 km. A form of this constraint, $r_p = a(1-e) \ge r_{min}$ was proposed by Maruskin et. al.¹ The periapsis constraint $r_{min} - r_p(\rho, \dot{\rho})$ was analytically developed by Farnocchia et. al. to be¹⁴
97
+
98
+ $$ (r_{min}^2 - \|D\|^2)\dot{\rho}^2 - P(\rho)\dot{\rho} - U(\rho) + r_{min}^2 T(\rho) - \frac{2r_{min}^2\mu}{\sqrt{S(\rho)}} \le 0 \quad (16) $$
99
+
100
+ with
101
+
102
+ $$ P(\rho) = 2\mathbf{D} \cdot \mathbf{E}\rho^2 + 2\mathbf{D} \cdot \mathbf{F}\rho + 2\mathbf{D} \cdot \mathbf{G} - r_{min}^2 w_1 \quad (17) $$
103
+
104
+ $$ U(\rho) = \|E\|^2 \rho^4 + 2E \cdot F\rho^3 + (2E \cdot G + \|F\|^2)\rho^2 + 2F \cdot G\rho + \|G\|^2 - 2r_{min}\mu \quad (18) $$
105
+
106
+ given the following
107
+
108
+ $$
109
+ \begin{align}
110
+ \mathbf{D} &= \mathbf{o} \times \hat{\mathbf{l}}, & \mathbf{E} &= \hat{\mathbf{l}} \times (\dot{\alpha}\mathbf{l}_{\alpha} + \dot{\delta}\mathbf{l}_{\delta}) \\
111
+ \mathbf{F} &= \mathbf{o} \times (\dot{\alpha}\mathbf{l}_{\alpha} + \dot{\delta}\mathbf{l}_{\delta}) + \mathbf{l} \times \dot{\mathbf{o}}, & \mathbf{G} &= \mathbf{o} \times \dot{\mathbf{o}}
112
+ \end{align}
113
+ \quad (19)
114
+ $$
115
+
116
+ Additional constraints may be relevant depending on available a priori information about the space object. For example, eccentricity would be an appropriate constraint to apply to GEO observations.¹ For the purpose of this paper, only energy and radius of periapsis constraints will be imposed. Imposing these constraints on an observation **y** results in a two dimensional space of solutions to **x**_u that could possibly complete the state **x** of the observed space object.
117
+
118
+ Given two observations of an object, such as shown in Equation 10, admissible regions can be determined for each observation, $\mathcal{R}_1$ and $\mathcal{R}_2$. Each of these have a set of possible undermined states $\mathbf{x}_u$ that satisfy the aforementioned constraints. By combining these into a single variable, into a single variable **z**
119
+
120
+ $$ z = \begin{bmatrix} x_{u,1} \\ x_{u,2} \end{bmatrix} = \begin{bmatrix} \rho_1 \\ \dot{\rho}_1 \\ \rho_2 \\ \dot{\rho}_2 \end{bmatrix} \quad (20) $$
121
+
122
+ It is possible to conduct a random uniform sampling of both $\mathcal{R}_1$ and $\mathcal{R}_2$ to collect a set of **z** solutions that satisfy the constraints. Each $\mathbf{x}_{u,1}$ and $\mathbf{x}_{u,2}$, combined with $\mathbf{x}_{d,1}$ and $\mathbf{x}_{d,2}$, respectively, create a possible full state solution $mathbf{fx}_1$ and $mathbf{fx}_2$ for the observed space object. Each of these states can be converted into Cartesian position **r** and velocity **v** by using Equations 1 and 2. Propagating these states back to some common time *t* in the past, the resulting vectors are defined as
123
+ ---PAGE_BREAK---
124
+
125
+ $$
126
+ \begin{align}
127
+ \mathbf{r}_1(t) &= [\mathbb{I} \ 0] \phi(t, \mathbf{x}_{u,1}, \mathbf{x}_{d,1}, t_1) \\
128
+ \mathbf{r}_2(t) &= [\mathbb{I} \ 0] \phi(t, \mathbf{x}_{u,2}, \mathbf{x}_{d,2}, t_2)
129
+ \end{align}
130
+ $$
131
+
132
+ From this, the goal is to determine if there is a set of solutions for **z** that minimize the Euclidean distance between the position vectors corresponding to each observation time. The cost function J(**z**) and gradient are as follows
133
+
134
+ $$
135
+ J(\mathbf{z}) = \frac{1}{2} (\mathbf{r}_1 - \mathbf{r}_2)^T (\mathbf{r}_1 - \mathbf{r}_2) \quad (22)
136
+ $$
137
+
138
+ $$
139
+ \frac{\partial J}{\partial \mathbf{z}} = \left[ \frac{\partial J}{\partial \mathbf{x}_{u,1}}, \frac{\partial J}{\partial \mathbf{x}_{u,2}} \right] = \left[ (\mathbf{r}_1 - \mathbf{r}_2)^T \cdot \left[ \mathbb{I} \ 0 \right] \frac{\partial \phi}{\partial \mathbf{x}_1} \frac{\partial \mathbf{x}_1}{\partial \mathbf{x}_{u,1}}, (\mathbf{r}_1 - \mathbf{r}_2)^T \cdot \left[ \mathbb{I} \ 0 \right] \frac{\partial \phi}{\partial \mathbf{x}_2} \frac{\partial \mathbf{x}_2}{\partial \mathbf{x}_{u,2}} \right] \quad (23)
140
+ $$
141
+
142
+ **Algorithm 1:** Algorithm to Determine Solution Manifold
143
+
144
+ **Result:** Minimize Eq. 22
145
+
146
+ 1 initialization of givens, observables, and parameter settings;
147
+
148
+ 2 compute GS Vectors and observer unit vectors with Eq. 3, 4, & 5;
149
+
150
+ 3 compute $\mathcal{R}$ boundaries for each Obs. by solving the quadratic equation for $\dot{\rho}$ given a continuous set of $\rho$ values using Eq. 13;
151
+
152
+ 4 uniformly sample from $\mathcal{R}$ interiors by selecting a random $\rho$ & $\dot{\rho}$ based on the min and max values and satisfying the energy (Eq. 13) and radius of periapsis constraints (Eq. 16);
153
+
154
+ 5 construct $\mathbf{z}$ (Eq. 20) by stacking the sample values from $\mathcal{R}_1$ & $\mathcal{R}_2$;
155
+
156
+ 6 **for** *i* = 1:*length(*z*) *do*
157
+
158
+ 7 &nbsp;&nbsp; Establish current **z** "guess" value (*z* = *z*(:, *i*)) ;
159
+
160
+ 8 &nbsp;&nbsp; **while** $J(\tilde{\mathbf{z}}) \geq Tolerance$ **do**
161
+
162
+ 9 &nbsp;&nbsp;&nbsp;&nbsp; Use fmincon to estimate the gradient (Eq. 23) and step **z** in that direction using nonlinear constraints in Eqs. 13 & 16;
163
+
164
+ 10 &nbsp;&nbsp;&nbsp;&nbsp; Update **z** value to reflect step towards minimum;
165
+
166
+ 11 &nbsp;&nbsp;&nbsp;&nbsp; Evaluate constraints (Eq. 13 and Eq. 16) given current **z** value to ensure solution still falls within $\mathcal{R}$;
167
+
168
+ 12 &nbsp;&nbsp;&nbsp;&nbsp; **if** current **z** not within $\mathcal{R}$ (does not meet constraints);
169
+
170
+ 13 &nbsp;&nbsp;&nbsp;&nbsp; **then**
171
+
172
+ 14 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Get new "guess" for **z** from fmincon by continuing;
173
+
174
+ 15 &nbsp;&nbsp;&nbsp;&nbsp; **else**
175
+
176
+ 16 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Convert **z** to cartesian using Eq. 1 & 2 to get $\tilde{\mathbf{z}}$;
177
+
178
+ 17 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Propagate $\tilde{\mathbf{z}}$ to $t_0$ and calculate distance using Eq. 22;
179
+
180
+ 18 &nbsp;&nbsp;&nbsp;&nbsp; **end**
181
+
182
+ 19 **end**
183
+
184
+ 20 save **z** solution value that minimize $J(\mathbf{z})$ (Eq. 22)
185
+
186
+ 21 end
187
+
188
+ III. Results
189
+
190
+ The goal of this methodology is to detect collisions and fragmentations by observing disparate debris. To demonstrate the initial effectiveness of this approach, two independent optical observations of the same object where used. The observations were made for one second exposures 5 minutes (300 seconds) apart based on an observation taken March 1, 2014 at 02:01:36 UTC. The measurement values for the tested LEO case are given in Table 1. The observations were made using an equatorial mounted telescope from Deerlick Astronomy Village, the observer parameters are given in Table 2. Error in observation measurements were assumed to have a zero angle mean noise and approximately 0.5 arcsecond standard deviation of the noise on the angle observations (right ascension and declination). The standard deviation is approximated at this value due to the type of mount the observations were made from, as well as the exposure time.
191
+ ---PAGE_BREAK---
192
+
193
+ **Table 1. LEO Optical Observation Measurements**
194
+
195
+ <table><thead><tr><td>Time</td><td>α (rad)</td><td>δ (rad)</td><td>Exposure (sec)</td></tr></thead><tbody><tr><td>02:01:36</td><td>1.4007</td><td>0.5556</td><td>1</td></tr><tr><td>02:06:36</td><td>1.3504</td><td>-0.6931</td><td>1</td></tr></tbody></table>
196
+
197
+ **Table 2. Observer Parameters for Deerlick Astronomy Village, GA**
198
+
199
+ <table><thead><tr><th>Latitude</th><th>Longitude</th><th>Altitude (m)</th></tr></thead><tbody><tr><td>33.561deg N</td><td>82.764deg W</td><td>176.8</td></tr></tbody></table>
200
+
201
+ From these observations, admissible regions were constructed using a radius of periapsis constraint of 6578 km (radius of Earth plus 200km), an energy constraint of less than zero (Earth orbiting), and eccentricity constraint of less than 0.7. A set *n* particle pairs **x**<sub>*u*</sub>'s that meet these constraints were then created by randomly uniformly sampling the interiors of each admissible region. Then set from the observation at t<sub>1</sub>, **x**<sub>*u*,1</sub>, is combined with **x**<sub>*u*,2</sub> from the observation at t<sub>2</sub>, it results in **z** being a 4 × *n* matrix (Equation 20). Figure 3 shows the admissible regions corresponding to each observation as well as the sampled points from each interior.
202
+
203
+ **Figure 3. Admissible Region boundaries for Observation 1 & 2**
204
+
205
+ Using this test case, two different epoch times were selected: 100 seconds and 1 hour (3600 seconds) in the past. In each of these scenarios, each column in mathbf*z* is then stepped towards a minimum solution for the cost function *J*(*z*) at time t<sub>0</sub> by using the MATLAB function fmincon from the optimization toolbox to solve for the minimum of Equation 22 given the nonlinear constraints and functions. Fmincon is a gradient-based method that is designed to work on problems where the objective and constraint functions are both continuous and have continuous first derivatives.
206
+
207
+ **A. Epoch Time = -100 Seconds**
208
+
209
+ In this scenario, 5000 particles were sampled from the admissible regions, resulting in a 4 × 5000 matrix for **z**. Each of the 5000 columns of **z** was propagated backwards 100 seconds using a two-body propagator in Ode45. The solution values for **z**, that correspond to each observation, that minimize the Euclidean distance between the observed objects are shown in Figures 4 and 5. At an epoch that is only 100 seconds before the first observation, the solution manifold appears to have very limited curvature.
210
+ ---PAGE_BREAK---
211
+
212
+ Figure 4. Admissible Region for Observation 1 with minimized solutions and truth for $t_0 = -100$sec
213
+
214
+ Figure 5. Admissible Region for Observation 2 with minimized solutions and truth for $t_0 = -100$sec
215
+
216
+ The solution manifold line in the first observation's admissible region is much longer than the corresponding line in the second observation's admissible region. This result was to be expected, as the first observation was taken at a minimum range to the ground station, which means that a set of possible solution orbits with larger variation are possible. Conversely, the second observation was taken at a much lower elevation, thus increasing the slant range to the object from the ground station. This provides a smaller amount of variation in the solution states. The 3D plots shown in Figures 6 and 7 show the solution manifold in the position and velocity space. In these figures, the first observation is indicated with a blue arrow, the second with an orange arrow, and the ground station with a green arrow. The solution manifold is a short line made up of red (observation 1) and blue (observation 2) position solutions that clearly intersects with the shown known object truth orbit at the given epoch. This indicates that the observed debris objects have the same origin and it is possible that they spawned from an event involving the shown known object orbit.
217
+ ---PAGE_BREAK---
218
+
219
+ Figure 6. 3D Plot Earth Hemisphere with Solution Manifold and True Catalog Orbit for $t_0 = -100sec$
220
+
221
+ Figure 7. Solution Manifold and True Catalog Orbit Intersection for $t_0 = -100sec$
222
+
223
+ ## B. Epoch Time = -1 Hour
224
+
225
+ In this scenario, 1100 particles were sampled from the admissible regions, resulting in a 4 × 1100 matrix for z. Each of the 1100 columns of *mathbf{z}* was propagated backwards one hour (3600 seconds) using a two-body propagator in Ode45. The solution values for *mathbf{z}*, that correspond to each observation, that minimize the Euclidean distance between the observed objects are shown in Figures 4 and 5. At an epoch that is one hour prior to the first observation, the solution manifold appears to have an increased amount of curvature when compared with the corresponding results from the previous scenario. This is especially true of the solution manifold in Observation 2's admissible region.
226
+ ---PAGE_BREAK---
227
+
228
+ Figure 8. Admissible Region for Observation 1 with minimized solutions and truth for $t_0 = -3600$ sec
229
+
230
+ Figure 9. Admissible Region for Observation 2 with minimized solutions and truth for $t_0 = -3600$ sec
231
+
232
+ Just as was shown with the first scenario, the solution manifold line in the first observation's admissible region is much longer than the corresponding line in the second observation's admissible region. However, for an epoch of one hour prior to the first observation, the solution manifolds display much more curvature than the 100 second scenario. This is especially evident in the solution manifold corresponding to observation 2 in Figure 9. The 3D plots shown in Figure 10 show the solution manifold in the position and velocity space. In this figure, the first observation is indicated with a blue arrow, the second with an orange arrow, and the ground station with a green arrow. The solution manifold is a short line made up of red (observation 1) and blue (observation 2) position solutions that clearly intersects with the shown known object truth orbit at the given epoch. This indicates that the observed debris objects have the same origin and it is possible that they spawned from an event involving the shown known object orbit. The solution manifold displays much more interesting characteristics and curvature in this scenario, as it extends well beyond the known object orbit.
233
+ ---PAGE_BREAK---
234
+
235
+ Figure 10. 3D Plot Earth Hemisphere with Solution Manifold and True Catalog Orbit for $t_0 = -3600sec$
236
+
237
+ ### C. Error and Challenges
238
+
239
+ Error in observation measurements were assumed to have a zero angle mean noise and approximately 0.5 arcsecond standard deviation of the noise on the angle observations (right ascension and declination). The standard deviation is approximated at this value due to the type of mount the observations were made from, as well as the exposure time. Error also is inherent with any numerical propagation method, such as Ode45. The relative and absolute tolerances were set to $1e^{-12}$ to limit error throughout this process. Additional sources of error can be found from using Lagrangian interpolation in order to estimate the angle rates of each observation. As aforementioned, to minimize this error source, streaks were used and the observation information from the center of the streak was used and the rates were estimated using the beginning and the end of the streak. For future work, the rates will be fed in as part of the 4-state $x_d$ and not estimated based on the right ascension and declination of the observations. This approach is computationally slow because implementing fmincon, which estimates the gradient, instead of using a gradient based approach like steepest descent. A limitation of the method described here is that the epoch time, $t_0$, is arbitrary, and may be based on a priori information (e.g. last known observation, etc.), but would require an iterative "guessing" process to select a good estimate for $t_0$ which increases computational cost.
240
+
241
+ ## IV. Conclusions
242
+
243
+ The results in this paper, though there are limitations, illustrate that it is possible to detect fragments and collisions much sooner than current capabilities that rely on orbit determination. The current state-of-the-art relies on orbit determination, which requires multiple observations over at least two orbits for a LEO object and continual observation over hours for a GEO object. The approach outlined in this paper requires only independent observations of two debris orbits and to answer the same hypothesis, with the cost largely in computation. The problem reduces to a 4-dimensional particle swarm optimization, which can easily be solved using a gradient-based method. By using the hypothesized constraint of the admissible regions it was demonstrated that it is possible to determine if a combination of new uncorrelated debris objects have a common origin that also intersects with a known catalog object orbit, thus indicating break-up of that known object has occurred.
244
+ ---PAGE_BREAK---
245
+
246
+ V. Future Work
247
+
248
+ This paper reflects a very initial endeavour into understanding the limitations and applications of this methodology. Additional test cases, including one on a GEO break up as well as another using LEO collision path, will need to be done, as this paper only demonstrates that if you have two observations of the same object that a zero-finding problem is possible. Other phenomenology should also be considered, such as radar observations. LEO observations are not typically made using optical or electro-optical hardware; conversely, GEO observations are almost exclusively made with these methods. Radar observations have different admissible region structure, as they provide a different set of observable, or determined states. In this scenario, $x_d$ is a 2 × 1 matrix, whereas in optical it is 4 × 1 matrix. Therefore, to use radar information in the methodology described in this paper, additional observations would need to be included to create a closed solution.
249
+
250
+ References
251
+
252
+ ¹J. M. Maruskin, D. J. Scheeres, and K. T. Alfriend, "Correlation of optical observations of objects in earth orbit," *Journal of Guidance, Control, and Dynamics*, Vol. 32, No. 1, 2009, pp. 194-209.
253
+
254
+ ²A. Rossi, "The earth orbiting space debris," *Serbian Astronomical Journal*, Vol. 170, 2005, pp. 1-12.
255
+
256
+ ³M. J. Holzinger, K. K. Luu, C. Sabol, and K. Hill, "Uncorrelated-Track Classification, Characterization, and Prioritization Using Admissible Regions and Bayesian Inference," *Journal of Guidance, Control, and Dynamics*, 2016, pp. 2469-2484.
257
+
258
+ ⁴K. Wormnes, R. Le Letty, L. Summerer, R. Schonenborg, O. Dubois-Matra, E. Luraschi, A. Cropp, H. Krag, and J. Delaval, "ESA technologies for space debris remediation," *6th IAASS Conference:Safety is Not an Option, Montrel*, 2013.
259
+
260
+ ⁵P. d. Selding, "Orbital Debris a Growing Problem with No End in Sight," *Space News*, Vol. 31, 2006.
261
+
262
+ ⁶J. L. Worthy, *Initialization of sequential estimation for unobservable dynamical systems using partial information in the presence of systemic uncertainty*. PhD thesis, Georgia Institute of Technology, 2017.
263
+
264
+ ⁷J. L. Worthy III and M. J. Holzinger, "Incorporating uncertainty in admissible regions for uncorrelated detections," *Journal of Guidance, Control, and Dynamics*, Vol. 38, No. 9, 2015, pp. 1673-1689.
265
+
266
+ ⁸A. Milani, G. F. Gronchi, M. d. Vitturi, and Z. Knežević, "Orbit determination with very short arcs. I admissible regions," *Celestial Mechanics and Dynamical Astronomy*, Vol. 90, No. 1-2, 2004, pp. 57-85.
267
+
268
+ ⁹G. Tommei, A. Milani, and A. Rossi, "Orbit determination of space debris: admissible regions," *Celestial Mechanics and Dynamical Astronomy*, Vol. 97, No. 4, 2007, pp. 289-304.
269
+
270
+ ¹⁰J. A. Siminski, O. Montenbruck, H. Fiedler, and T. Schildknecht, "Short-arc tracklet association for geostationary objects," *Advances in space research*, Vol. 53, No. 8, 2014, pp. 1184-1194.
271
+
272
+ ¹¹K. Fujimoto and D. J. Scheeres, "Applications of the admissible region to space-based observations," *Advances in Space Research*, Vol. 52, No. 4, 2013, pp. 696-704.
273
+
274
+ ¹²K. Fujimoto and D. J. Scheeres, "Correlation of optical observations of earth-orbiting objects and initial orbit determination," *Journal of guidance, control, and dynamics*, Vol. 35, No. 1, 2012, pp. 208-221.
275
+
276
+ ¹³J. L. Worthy, M. J. Holzinger, and D. J. Scheeres, "An optimization approach for observation association with systemic uncertainty applied to electro-optical systems," *Advances in Space Research*, 2018.
277
+
278
+ ¹⁴D. Farnocchia, G. Tommei, A. Milani, and A. Rossi, "Innovative methods of correlation and orbit determination for space debris," *Celestial Mechanics and Dynamical Astronomy*, Vol. 107, No. 1-2, 2010, pp. 169-185.
samples/texts_merged/3884483.md ADDED
The diff for this file is too large to render. See raw diff
 
samples/texts_merged/393503.md ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # On the Choice of Multiple Flat Outputs for Fault Detection and Isolation of a Flat System
5
+
6
+ Rim RAMMAL*, Tudor-Bogdan AIRIMITOAIE*,
7
+ Franck CAZAURANG*, Jean LÈVINE**,
8
+ Pierre MELCHIOR*
9
+
10
+ * Univ. Bordeaux, Bordeaux INP, CNRS, IMS, 33405 Talence, France
11
+ ictional redundancy in which multiple sensors and actuators are used to measure and control a particular variable (Chen et al., 2015). The drawbacks of this method are the extra equipment, maintenance cost and additional space required to accommodate the equipment. This approach was improved later on by the introduction of the *model-based analytical redundancy method*, based on the notion of *generating residual signals*. These residues are defined as the difference between the measured variables and the estimated ones. In the case of no fault, and in the ideal case of noise free observations, the values of the residues are equal to zero. In the non-zero case, the estimation method must be specified, see e.g. the observer-based approach (Tousi and Khorasani, 2011), the parity-space approach (Diversi et al., 2002) or the Kalman-based approach (Izadian and Khayyer, 2010). However, in these approaches, a sensor may be wrongly declared faulty because of the lack of efficiency of the estimation algorithm, hence the importance of the notion of *detectability*.
12
+
13
+ Recently, the flatness property has been introduced into the repertoire of FDI techniques (Suryawan et al., 2010; Martínez-Torres et al., 2014). Here, residues are calculated using the differential flatness property. Roughly speaking, let us recall that a system is said to be flat if all the
14
+
15
+ state and input variables can be expressed as functions of a particular variable, called flat output, and a finite number of its successive derivatives. The method presented in Suryawan et al. (2010) is dedicated to linear flat systems and uses the properties of B-spline parameterisation to estimate the time derivatives of the flat output, which may not be defined because of the presence of noise. This derivative estimation can take time and cause a delay in the reconfiguration process. In order to overcome these issues, a high-gain observer has been proposed in Martínez-Torres et al. (2014) to evaluate the time derivative of the noisy signals. The observer may be complemented by a low-pass filter to improve its performance. Note that the latter method can be applied to both, linear and nonlinear flat systems.
16
+
17
+ In the present flatness-based FDI approach, an effort is made to dissociate the theoretical *isolability* property, based on residue computation, and the estimation process. For this purpose, we compute the residues between the measurements and their expression exactly obtained from the measured flat outputs and their derivatives estimated online. The treatment of these residues slightly differs from the ones of the previous approaches (Kóscielny et al., 2016): every sensor and actuator admits a *fault alarm signature*, i.e. a number of residues affected by a fault on this sensor/actuator and a fault on a sensor/actuator is isolable if its corresponding fault alarm signature is distinct. In practice, the treatment of these residues is adapted, in the presence of noise, by introducing a threshold and an estimation process as in the previous approaches (Martínez-Torres et al., 2013). Moreover, we show that it is possible to increase the isolability of faults by considering several flat outputs, at the condition that they are independent,
18
+
19
+ **Abstract:** This paper presents a rigorous definition of the isolability of a fault in a flat system whose flat outputs are measured by sensors that are subject to faults. In particular, if only one sensor or actuator is faulty at a time, we show that the isolation of faults can be achieved if a pair of flat outputs satisfies some independence condition. A detailed characterization of this condition is presented. Finally, the pertinence of the isolability concept is demonstrated on the example of a three tank system.
20
+
21
+ **Keywords:** nonlinear flat system, flat output, fault detection and isolation, three tank system.
22
+
23
+ ## 1. INTRODUCTION
24
+ ---PAGE_BREAK---
25
+
26
+ thus completing in a rigorous way some heuristic results of Martínez-Torres et al. (2013). These results are applied to a three tank FDI problem where we compute two independent flat outputs that allow the isolation of all possible simple faults (only one faulty sensor or actuator at a time).
27
+
28
+ The main contributions of this paper are the above mentioned rigorous definition of isolability of faults and the characterization of the flat outputs to be used in the fault isolation.
29
+
30
+ This paper is organized as follows: section 2 introduces the basic concepts of FDI for nonlinear differentially flat systems and their definitions. Section 3 discusses the conditions for independence between flat outputs. Section 4 deals with the application of this FDI approach to the three tank system. Finally, section 5 concludes the paper.
31
+
32
+ ## 2. FLATNESS-BASED FDI
33
+
34
+ ### 2.1 Differentially Flat System
35
+
36
+ Consider the following nonlinear system
37
+
38
+ $$ \begin{cases} \dot{x} = f(x, u) \\ y = h(x, u) \end{cases} \quad (1) $$
39
+
40
+ where $x$, the vector of states, evolves in a $n$-dimensional manifold $X$, $u \in \mathbb{R}^m$ is the vector of inputs, $y \in \mathbb{R}^p$ is the measured output, $m \le n$, $\text{rank}(\frac{\partial f}{\partial u}) = m$ and $m \le p$. Let $(x, \bar{u}) \triangleq (x, u, \dot{u}, \ddot{u}, \ldots)$ be a prolongation of the coordinates $(x, u)$ to the manifold of jets of infinite order $\mathcal{X} \triangleq X \times \mathbb{R}_\infty^m$ (Fliess et al., 1999), (Levine, 2009, Chapter 5).
41
+
42
+ In the sequel, we systematically denote by $\bar{\xi} \triangleq (\xi, \dot{\xi}, \ddot{\xi}, \ldots)$ the sequence of infinite order jets of a vector $\xi$ and $\tilde{\xi}^{(\alpha)} \triangleq (\xi, \dot{\xi}, \ddot{\xi}, \ldots, \xi^{(\alpha)})$ the truncation at the finite order $\alpha \in \mathbb{N}$ of the previous sequence.
43
+
44
+ The system (1) is flat at a point $(x_0, \bar{u}_0) \in \mathcal{X}$ if and only if there exist a vector $z = (\bar{z}_1, \ldots, \bar{z}_m) \in \mathbb{R}^m$, two integers $\rho$ and $\nu$ and mappings $\psi$ defined on a neighbourhood $\mathcal{V}$ of $(x_0, \bar{u}_0)$ in $\mathcal{X}$ and $\varphi = (\varphi_0, \varphi_1, \ldots)$ defined on a neighbourhood $\mathcal{W} \subset \mathcal{V}$ of $\bar{z} \triangleq (z, \dot{z}, \ddot{z}, \ldots) \triangleq \psi(x_0, \bar{u}_0)$ in $\mathbb{R}_\infty^m$ such that:
45
+
46
+ (1) $z = \psi(x, \bar{u}^{(\nu)}) \in \mathcal{W}$
47
+
48
+ (2) $\bar{z}_1, \ldots, \bar{z}_m$ and their successive derivatives are linearly independent in $\mathcal{W}$
49
+
50
+ (3) The state $x$ and the input $u$ are functions of $z$ and its successive derivatives:
51
+
52
+ $$ (x, u) = (\varphi_0(\bar{z}^{(\rho)}), \varphi_1(\bar{z}^{(\rho+1)})) \in \operatorname{pr}_{X \times \mathbb{R}^m}(\mathcal{V}) \quad (2) $$
53
+
54
+ where $\operatorname{pr}_{X \times \mathbb{R}^m}(\mathcal{V})$ is the canonical projection from $\mathcal{V}$ to $X \times \mathbb{R}^m$
55
+
56
+ (4) The differential equation $\dot{\varphi}_0(\bar{z}) = f(\varphi_0(\bar{z}), \varphi_1(\bar{z}))$ is identically satisfied in $\mathcal{W}$.
57
+
58
+ The vector $z$ is called flat output of the system. The mappings $\psi$ and $\varphi$ are called Lie-Bäcklund isomorphisms and are inverse of one another.
59
+
60
+ **Remark 1.** The property of flatness is not defined globally. The Lie-Bäcklund isomorphisms $\psi$ and $\varphi$ are non unique and only locally defined. Thus, there might exist points in $\mathcal{X}$ where no such isomorphisms exist or, otherwise
61
+
62
+ stated, where the system is not flat. It has been proven in Kaminski et al. (2018) that the set of intrinsic singularities contains the set of equilibrium points of the system that are not first order controllable.
63
+
64
+ ### 2.2 Fault Detection and Isolation
65
+
66
+ For the flat system (1), we suppose that the vector $y^s = (y_1^s, \ldots, y_p^s)^T$ is measured by sensors $S_1, \ldots, S_p$ respectively. We also suppose that the flat output $z$ is part of these measurements according, without loss of generality, to
67
+
68
+ $$ z^s = (y_1^s, \ldots, y_m^s)^T. \quad (3) $$
69
+
70
+ Moreover, the value of the input vector $u = (u_1, \ldots, u_m)^T$, corresponding to the actuators $A_1, \ldots, A_m$, is assumed to be available at every time. We now propose a new definition of the notion of residue that generalizes the one introduced by Martínez-Torres et al. (2014).
71
+
72
+ According to (2), the state and input read:
73
+
74
+ $$ x^z = \varphi_0(\overline{z^s^{(\rho)}}), \quad u^z = \varphi_1(\overline{z^s^{(\rho+1)}}) \quad (4) $$
75
+
76
+ where the superscript $z$ indicates that they are evaluated as functions of the measurements $z^s$ and, according to (1),
77
+
78
+ $$ y_k^z \triangleq h_k(\varphi_0(\overline{z^s^{(\rho)}}), \varphi_1(\overline{z^s^{(\rho+1)}})) \quad (5) $$
79
+
80
+ is the virtual value of $y_k$ computed via the measured flat output $z^s$.
81
+
82
+ Note that the first $m$ components of $y^z$ are equal to the corresponding components of $z^s$:
83
+
84
+ $$ y^z = (\overline{z^s}, \tilde{h}(\varphi_0(\overline{z^s}), \varphi_1(\overline{z^s})))^T \quad (6) $$
85
+
86
+ with $\tilde{h} = (h_{m+1}(\varphi_0(\overline{z^s}), \varphi_1(\overline{z^s})), \dots, h_p(\varphi_0(\overline{z^s}), \varphi_1(\overline{z^s})))^T$. **Definition 1.** The $k$th-sensor residue $R_{S_k}$ and $l$th-input residue $R_{A_l}$, for $k=1,\dots,p$ and $l=1,\dots,m$, are given by:
87
+
88
+ $$ R_{S_k} = y_k^s - y_k^{\tilde{z}}, \quad R_{A_l} = u_l - u_l^{\tilde{z}}. \quad (7) $$
89
+
90
+ In total, we have $p+m$ residues for a single flat output $z^s$ and we denote the full residue vector by:
91
+
92
+ $$ r = (R_{S_1}, \dots, R_{S_m}, R_{S_{m+1}}, \dots, R_{S_p}, R_{A_1}, \dots, R_{A_m})^T \\ = (r_1, \dots, r_m, r_{m+1}, \dots, r_p, r_{p+1}, \dots, r_{p+m})^T \quad (8) $$
93
+
94
+ and according to (6)
95
+
96
+ $$ r = (0, \dots, 0, R_{S_{m+1}}, \dots, R_{S_p}, R_{A_1}, \dots, R_{A_m})^T \\ = (0, \dots, 0, r_{m+1}, \dots, r_p, r_{p+1}, \dots, r_{p+m})^T. \quad (9) $$
97
+
98
+ Measured and calculated variables are illustrated in Fig. 1.
99
+
100
+ A residue who is always equal to zero indicates that it cannot be affected by faults on one of the sensors or actuators. Then, we eliminate it and truncate the residue vector to keep the last $p$ components only. This truncated vector is denoted by $r_\tau$:
101
+
102
+ $$ r_\tau = (R_{S_{m+1}}, \dots, R_{S_p}, R_{A_1}, \dots, R_{A_m})^T \\ = (r_{\tau_1}, r_{\tau_2}, \dots, r_{\tau_p})^T. \quad (10) $$
103
+
104
+ **Hypothesis:** From now on, we assume that there is only one fault at a time affecting the sensors or actuators.
105
+
106
+ In practice, due to the presence of noises on sensors and actuators, the successive derivatives of $z^s$ may not be
107
+ ---PAGE_BREAK---
108
+
109
+ Fig. 1. Flatness-based residual generation
110
+
111
+ defined. We assume that they are computed via a high-
112
+ gain observer, possibly completed by a low-pass filter as in
113
+ Martínez-Torres et al. (2014) to improve its robustness.
114
+ Moreover, a threshold is associated to each residue. In
115
+ the non faulty case, the residues in (10) will not exceed
116
+ their thresholds. If, otherwise, at least one of the residues
117
+ exceeds its threshold then a fault alert is launched. If
118
+ several residues in (10) trigger an alert at the same time, a
119
+ fault alarm signature, defined below, is required to isolate
120
+ the fault.
121
+
122
+ For this purpose, we introduce the so-called *signature matrix*:
123
+
124
+ *Definition 2.* (Signature matrix). Given the vector of residues $r_{\tau}$ defined in (10) and $\zeta = (y_1^s, \dots, y_p^s, u_1, \dots, u_m)^T \in \mathbb{R}^{p+m}$ the vector of available measurements. We define by the *signature matrix* associated to $z^s$, the matrix **S** given by:
125
+
126
+ $$
127
+ \mathbf{S} = \begin{pmatrix}
128
+ \sigma_{1,1} & \sigma_{1,2} & \cdots & \sigma_{1,p+m} \\
129
+ \vdots & \vdots & \ddots & \vdots \\
130
+ \sigma_{p,1} & \sigma_{p,2} & \cdots & \sigma_{p,p+m}
131
+ \end{pmatrix} \quad (11)
132
+ $$
133
+
134
+ with
135
+
136
+ $$
137
+ \sigma_{i,j} \triangleq \begin{cases} 0 & \text{if } \frac{\partial r_{\tau_i}}{\partial \zeta_j^{(\varrho)}} = 0 \quad \forall \varrho \in \{0, 1, \dots\} \\ 1 & \text{if } \exists \varrho \in \{0, 1, \dots\} \text{ s.t. } \frac{\partial r_{\tau_i}}{\partial \zeta_j^{(\varrho)}} \neq 0 \end{cases} \tag{12}
138
+ $$
139
+
140
+ *Remark 1.* Each column $\Sigma_j$ of the signature matrix $\mathbf{S}$ indicates whether a residue $r_{\tau_i}$ is or is not functionally affected by a fault on the measurement $\zeta_j$. So in (12), $\sigma_{i,j} = 0$ means that the residue $r_{\tau_i}$ is not affected by a fault on the measurement $\zeta_j$ and $\sigma_{i,j} = 1$ means that the residue may be affected.
141
+
142
+ *Definition 3.* A column $\Sigma_j$ of the signature matrix $\mathbf{S}$ is called *fault alarm signature* or simply *signature*, associated to the sensor/actuator $\zeta_j$.
143
+
144
+ From the signature matrix **S** we propose the following
145
+ definitions of detectability and isolability in the flatness
146
+ context:
147
+
148
+ *Definition 4.* (Detectability). A fault on a sensor/actuator $\zeta_j$ is detectable if, and only if there exists at least one $i \in \{1, \dots, p\}$ such that $\sigma_{i,j} = 1$.
149
+
150
+ *Definition 5.* (Isolability). A fault on a sensor $S_k$,
151
+ $k = 1, \dots, p$, is said *isolable* if, and only if, its correspond-
152
+ ing fault alarm signature $\Sigma_k$ in the signature matrix $S$ is
153
+ distinct from the others, i.e.
154
+
155
+ $$
156
+ \Sigma_k \neq \Sigma_j, \quad \forall j = 1, \dots, p+m, \quad j \neq k. \tag{13}
157
+ $$
158
+
159
+ An isolable fault on the actuator $A_l$, for $l = 1, \dots, m$, is
160
+ defined analogously:
161
+
162
+ $$
163
+ \Sigma_{p+l} \neq \Sigma_j, \quad \forall j = 1, \dots, p+m, \quad j \neq p+l. \quad (14)
164
+ $$
165
+
166
+ We define $\mu$ as the number of distinct signatures of the
167
+ signature matrix $\mathbf{S}$ associated to $z^s$. Then, $\mu$ is the number
168
+ of isolable faults associated to $z^s$.
169
+
170
+ A more general, but much more complicated, definition of isolability in the structured residual context of polynomial systems has been introduced in Staroswiecki and Comtet-Varga (2001), based on elimination techniques.
171
+
172
+ Definition 5 means that if the signature matrix $\mathbf{S}$ has two identical signatures, i.e. $\Sigma_i = \Sigma_j$, for two different sensors/actuators $\zeta_i \neq \zeta_j$, then we cannot make a decision on the faulty device, hence the fault is detected but cannot be isolated. Thus, the number of isolated faults is equal to the number of distinct signatures in the matrix $\mathbf{S}$.
173
+
174
+ ## 2.3 The Example of the three tank System
175
+
176
+ We consider a three tank system made up with three cylindrical tanks of cross-sectional area S, connected to each other by means of cylindrical pipes of section S<sub>n</sub>, and two pumps P<sub>1</sub> and P<sub>2</sub> that supply tanks T<sub>1</sub> and T<sub>2</sub>. These three tanks are also connected to a central reservoir through pipes (see Fig. 2).
177
+
178
+ The model is given by:
179
+
180
+ $$
181
+ \dot{x}_1 = -Q_{10}(x_1) - Q_{13}(x_1, x_3) + u_1 \quad (15)
182
+ $$
183
+
184
+ $$
185
+ \dot{x}_2 = -Q_{20}(x_2) + Q_{32}(x_2, x_3) + u_2
186
+ $$
187
+
188
+ $$
189
+ \dot{x}_3 = Q_{13}(x_1, x_3) - Q_{32}(x_2, x_3) - Q_{30}(x_3) \quad (17)
190
+ $$
191
+
192
+ where the state variables $x_i$, $i = 1, 2, 3$ represent the water level of each tank, $Q_{i0}$, $i = 1, 2, 3$ the outflow between each tank and the central reservoir, $Q_{13}$ is the outflow between tanks $T_1$ and $T_3$ and $Q_{32}$ the outflow between tanks $T_3$ and $T_2$, $u_1$ and $u_2$ are the incoming flows by unit of surface of each pump.
193
+
194
+ We assume the following inequalities to avoid singularities¹:
195
+
196
+ $$
197
+ x_1 > x_3 > x_2.
198
+ $$
199
+
200
+ We consider that the valves connecting tanks $T_1$ and $T_3$
201
+ with the central reservoir are closed, i.e. $Q_{10} \equiv 0$ and
202
+ $Q_{30} \equiv 0$. The expressions of $Q_{13}$, $Q_{32}$ and $Q_{20}$ are given
203
+ by:
204
+
205
+ $$
206
+ Q_{13}(x_1, x_3) = a_{z1} \sqrt{2g(x_1 - x_3)} \quad (18)
207
+ $$
208
+
209
+ $$
210
+ Q_{20}(x_2) = a_{z2} \sqrt{2g(x_2)}
211
+ $$
212
+
213
+ $$
214
+ Q_{32}(x_2, x_3) = a_{z3} \sqrt{2g(x_3 - x_2)} \quad (20)
215
+ $$
216
+
217
+ ¹ According to the *Remark 1*, the point $\bar{x} \in \mathcal{X}$ s.t. $x_1 = x_2 = x_3$ is an equilibrium point which is not first order controllable, then it is a point of intrinsic flatness singularity.
218
+ ---PAGE_BREAK---
219
+
220
+ Fig. 2. *Three Tank System*, Source: (Noura et al., 2009)
221
+
222
+ where $a_{zr}$, $r = 1, 2, 3$, is the flow coefficient and $g$ the gravitational force. Each tank $T_i$ is equipped with a sensor $\mathbf{S}_i$ to measure its level $x_i$. Hence, the measured output is:
223
+
224
+ $$y^s = (y_1^s, y_2^s, y_3^s)^T = (x_1^s, x_2^s, x_3^s)^T \quad (21)$$
225
+
226
+ The system (15)-(16)-(17) is flat with $z = (x_1, x_3)^T = (z_1, z_2)^T$ as flat output. The measured flat output is then given by $z^s = (y_1^s, y_3^s)^T = (z_1^s, z_2^s)^T$. In order to construct the vector of residues, using (4) and (5), we set:
227
+
228
+ $$\begin{aligned}
229
+ y_1^z &= z_1^s \\
230
+ y_2^z &= z_2^s - \frac{1}{2g} \left( \frac{a_{z1} \sqrt{2g(z_1^s - z_2^s) - \dot{z}_2^s}}{a_{z3}} \right)^2 \\
231
+ y_3^z &= z_2^s \\
232
+ u_1^z &= \dot{z}_1^s + a_{z1} \sqrt{2g(z_1^s - z_2^s)} \\
233
+ u_2^z &= \dot{y}_2^z - a_{z3} \sqrt{2g(z_2^s - y_2^z)} + a_{z2} \sqrt{2gy_2^z}.
234
+ \end{aligned}$$
235
+
236
+ According to (7), the vector of residues, associated to $z^s$, is then given by:
237
+
238
+ $$r = \begin{pmatrix} R_{S_1} \\ R_{S_2} \\ R_{S_3} \\ R_{A_1} \\ R_{A_2} \end{pmatrix} = \begin{pmatrix} y_1^s \\ y_2^s \\ y_3^s \\ u_1 \\ u_2 \end{pmatrix} - \begin{pmatrix} y_1^z \\ y_2^z \\ y_3^z \\ u_1^z \\ u_2^z \end{pmatrix}. \quad (22)$$
239
+
240
+ However, residues $R_{S_1}$ and $R_{S_3}$ are identically zero:
241
+
242
+ $$\begin{aligned}
243
+ R_{S_1} &= y_1^s - y_1^z = z_1^s - z_1^s = 0 \\
244
+ R_{S_3} &= y_3^s - y_3^z = z_2^s - z_2^s = 0
245
+ \end{aligned} \quad (23)$$
246
+
247
+ hence, according to (10), the vector $r$ is truncated to:
248
+
249
+ $$r_\tau = (R_{S_2}, R_{A_1}, R_{A_2})^T = (r_{\tau_1}, r_{\tau_2}, r_{\tau_3})^T. \quad (24)$$
250
+
251
+ Therefore, the signature matrix $\mathbf{S}$, associated to $z^s$, is constructed as follows:
252
+
253
+ - All the residues in (24) depend on the measurement of $z^s = (y_1^s, y_3^s)^T$ then the first and the third columns of the signature matrix contain only ones:
254
+
255
+ $$\sigma_{i,1} = \sigma_{i,3} = 1, \forall i = 1, 2, 3$$
256
+
257
+ - Only residue $r_{\tau_1}$ depends on $y_2^s$ and its successive derivatives, then the second column will be such that:
258
+
259
+ $$\sigma_{1,2} = 1 \text{ and } \sigma_{i,2} = 0, i = 2, 3$$
260
+
261
+ - Since $r_{\tau_2}$ depends only on $u_1$ and $r_{\tau_3}$ depends only on $u_2$, then column 4 and column 5 of $\mathbf{S}$ are such that:
262
+
263
+ $$\sigma_{2,4} = 1 \text{ and } \sigma_{i,4} = 0 \forall i = 1, \dots, 3, i \neq 2$$
264
+
265
+ and
266
+
267
+ $\sigma_{3,5} = 1$ and $\sigma_{i,5} = 0 \forall i = 1, \dots, 3, i \neq 3$
268
+
269
+ respectively.
270
+
271
+ Hence, the signature matrix, associated to $r_\tau$, is given by:
272
+
273
+ $$\mathbf{S} = \begin{pmatrix} 1 & 1 & 1 & 0 & 0 \\ 1 & 0 & 1 & 1 & 0 \\ 1 & 0 & 1 & 0 & 1 \end{pmatrix}. \quad (25)$$
274
+
275
+ According to definition 4, all faults on the three tank system's sensors and actuators are detectable. Since fault alarm signatures $\Sigma_2$, $\Sigma_4$ and $\Sigma_5$ are distinct, then, according to definition 5, faults on sensor $\mathbf{S}_2$ and actuators $\mathbf{A}_1$ and $\mathbf{A}_2$ are isolable. This reflects the fact that if, at some point during system operation, a fault alarm is launched with the signature $\Sigma_2$ then we conclude that the sensor $\mathbf{S}_2$ is faulty. However, if we obtain a signature like $\Sigma_1$, the fault could be on the sensor $\mathbf{S}_1$ or $\mathbf{S}_3$, since signatures $\Sigma_1$ and $\Sigma_3$ are identical. Then, a fault on $\mathbf{S}_1$ or $\mathbf{S}_3$ cannot be isolated. To conclude, this example shows that the isolability property is strongly conditioned by the dependence of the flat output with respect to the measured variables. This motivates the study of the choice of flat outputs of the next section.
276
+
277
+ **Remark 2.** In Nagy et al. (2009), it has been shown that system (15)-(16)-(17) is observable through $x_1$ only and that $x_2$ and $x_3$ can be estimated using $x_1$ given the measurements of $u_1$ and $u_2$, leading to different isolability results. The reader may refer to this article for more details. Note that, here, the measurements of $u_1$ and $u_2$ are not necessary to guarantee the $x_2$-isolability.
278
+
279
+ ### 3. FLAT OUTPUT SELECTION
280
+
281
+ In order to get more isolability on systems sensor and actuator, the authors in Martínez-Torres et al. (2014) propose to increase the number of residues by using several flat outputs. These flat outputs must be *independent* in the sense that when we use them together we gain more isolability of faults. In this section, we propose a characterization of the relation between different flat outputs using a so-called *augmented signature matrix*. This characterization leads to a decision concerning the choice of flat outputs that are useful for the isolability.
282
+
283
+ According to definition 5, the number $\mu$ of isolated faults by a flat output $z$ is equal to the number of distinct signatures $\Sigma_k$ of its signature matrix. Then, in order to get more isolability of faults, we need to increase the number of distinct signatures. This is possible when different projections of the system's output $y$ are available that are flat outputs. For this purpose, we introduce definitions 6 and 7.
284
+
285
+ In the following, we denote the $i^{th}$ element of the set of q flat output vectors $Z_i$ by $Z_i = (z_{i1}, \dots, z_{im})^T$.
286
+
287
+ *Definition 6.* (Augmented signature matrix). Let $Z_1, \dots, Z_q$ be q different flat output vectors of the flat system (1), such that $Z_i = \text{pr}_{\mathbb{R}^m}(y)$. The *augmented signature matrix* $\tilde{\mathbf{S}}$ associated to $Z_1, \dots, Z_q$ is defined by:
288
+
289
+ $$\tilde{\mathbf{S}} = \begin{pmatrix} \mathbf{S}_1 \\ \mathbf{S}_2 \\ \vdots \\ \mathbf{S}_q \end{pmatrix} \quad (26)$$
290
+ ---PAGE_BREAK---
291
+
292
+ where $\mathbf{S}_i$ is the signature matrix associated to the flat output vector $Z_i$.
293
+
294
+ The choice of flat output vectors is not arbitrary. They must be independent in the sense given by the following definition:
295
+
296
+ *Definition 7.* (Independence). Let $\tilde{\mathbf{S}}$ be the augmented signature matrix associated to $Z_1$ and $Z_2$:
297
+
298
+ $$ \tilde{\mathbf{S}} = \begin{pmatrix} \mathbf{S}_1 \\ \mathbf{S}_2 \end{pmatrix}, $$
299
+
300
+ $\mu_i, i = 1, 2$, the number of distinct signatures of the matrix $\mathbf{S}_i$ and $\tilde{\mu}$ the number of distinct signatures of the augmented matrix $\tilde{\mathbf{S}}$. We say that $Z_1$ and $Z_2$ are *independent* if, and only if
301
+
302
+ $$ \tilde{\mu} > \mu_1 \quad \text{and} \quad \tilde{\mu} > \mu_2. \tag{27} $$
303
+
304
+ Definition 7 means that two flat outputs are independent if, by using them together, the number of distinct signatures increases which corresponds to the number of isolated faults. If the condition (27) is not satisfied then the combination of $Z_1$ and $Z_2$ is not helpful for the isolability, and we have to find another combination by calculating more flat outputs. To conclude, the condition of full isolability is given by the following proposition:
305
+
306
+ *Proposition 2.* Let $Z_1, \dots, Z_q$ be $q$ different flat output vectors of the system (1). A full isolability of faults on sensors and actuators is achieved if the augmented matrix
307
+
308
+ $$ \tilde{\mathbf{S}} = \begin{pmatrix} \mathbf{S}_1 \\ \mathbf{S}_2 \\ \vdots \\ \mathbf{S}_q \end{pmatrix} $$
309
+
310
+ has $p+m$ distinct signatures, i.e. $\tilde{\mu} = p+m$.
311
+
312
+ # 4. APPLICATION TO THE THREE TANK SYSTEM
313
+
314
+ Back to the three tank system presented in section 2.3, we denote by $Z_1$ the flat output vector $Z_1 = (z_{11}, z_{12})^T = (x_1, x_3)^T$. The corresponding vector of residues is given by (24). We recall the signature matrix associated to $Z_1$, and we denote it by $\mathbf{S}_1$:
315
+
316
+ $$ \mathbf{S}_1 = \begin{pmatrix} 1 & 1 & 1 & 0 & 0 \\ 1 & 0 & 1 & 1 & 0 \\ 1 & 0 & 1 & 0 & 1 \end{pmatrix} \tag{28} $$
317
+
318
+ We also recall that, according to definition 5, faults on sensors $\mathbf{S}_1$ and $\mathbf{S}_3$ cannot be isolated. The number of distinct signatures of $\mathbf{S}_1$ is $\mu_1 = 3$.
319
+
320
+ In order to increase the number of isolable faults, we consider $Z_2 = (z_{21}, z_{22})^T = (x_2, x_3)^T$ another flat output vector of the three tank system. It is measured by sensors $\mathbf{S}_2$ and $\mathbf{S}_3$, i.e. $Z_2^s = (z_{21}^s, z_{22}^s)^T = (y_2^s, y_3^s)^T$. To construct the vector of residues associated to $Z_2^s$ and its signature matrix, we set, using (4) and (5):
321
+
322
+ $$
323
+ \begin{align*}
324
+ y_1^{Z_2} &= z_{22}^s + \frac{1}{2g} \left( a_{z3} \sqrt{2g(z_{22}^s - z_{21}^s)} + \dot{z}_{22}^s \right)^2 \\
325
+ y_2^{Z_2} &= z_{21}^s \\
326
+ y_3^{Z_2} &= z_{22}^s \\
327
+ u_1^{Z_2} &= \dot{z}_{22}^s + a_{z1} \sqrt{2g(z_{21}^s - z_{22}^s)} \\
328
+ u_2^{Z_2} &= \dot{y}_{2}^{Z_2} - a_{z3} \sqrt{2g(z_{22}^s - y_{2}^{Z_2})} + a_{z2} \sqrt{2gy_{2}^{Z_2}}.
329
+ \end{align*}
330
+ $$
331
+
332
+ Therefore, as shown for the flat output $Z_1$, residues $R_{S_2}^{Z_2}$ and $R_{S_3}^{Z_2}$ are identically zero and the truncated vector of residues (10) reads:
333
+
334
+ $$ r_{\tau}^{Z_2} = \begin{pmatrix} R_{\mathbf{S}_1}^{Z_2} \\ R_{A_1}^{Z_2} \\ R_{A_2}^{Z_2} \end{pmatrix} = \begin{pmatrix} y_2^s \\ u_1 \\ u_2 \end{pmatrix} - \begin{pmatrix} y_2^{Z_2} \\ u_1^{Z_2} \\ u_2^{Z_2} \end{pmatrix}. \tag{29} $$
335
+
336
+ Hence, the signature matrix associated to $Z_2$ is given by:
337
+
338
+ $$ S_2 = \begin{pmatrix} 1 & 1 & 1 & 0 & 0 \\ 0 & 1 & 1 & 1 & 0 \\ 0 & 1 & 1 & 0 & 1 \end{pmatrix}. \tag{30} $$
339
+
340
+ Signatures $\Sigma_1, \Sigma_4$ and $\Sigma_5$ in the matrix $\mathbf{S}_2$ are distinct, then, according to definition 5, faults on sensor $\mathbf{S}_1$ and actuators $\mathbf{A}_1$ and $\mathbf{A}_2$ are isolable by the flat output $Z_2$. Moreover, the number of distinct signatures of $\mathbf{S}_2$ is $\mu_2 = 3$. However, since signatures $\Sigma_2$ and $\Sigma_3$ are identical, then faults on sensors $\mathbf{S}_2$ and $\mathbf{S}_3$ cannot be isolated.
341
+
342
+ It remains to be verified whether the two flat outputs $Z_1$ and $Z_2$ are independent.
343
+
344
+ The augmented signature matrix associated to $Z_1$ and $Z_2$ is given by:
345
+
346
+ $$ \tilde{\mathbf{S}} = \begin{pmatrix} 1 & 1 & 1 & 0 & 0 \\ 1 & 0 & 1 & 1 & 0 \\ 1 & 0 & 1 & 0 & 1 \\ 1 & 1 & 1 & 0 & 0 \\ 0 & 1 & 1 & 1 & 0 \\ 0 & 1 & 1 & 0 & 1 \end{pmatrix}. \tag{31} $$
347
+
348
+ The number of distinct fault alarm signatures of $\tilde{\mathbf{S}}$ is $\tilde{\mu} = 5$, and we have
349
+
350
+ $$ \tilde{\mu} > \mu_1 \quad \text{and} \quad \tilde{\mu} > \mu_2. $$
351
+
352
+ Then, according to definition 6, the flat output vectors $Z_1$ and $Z_2$ are independent. Moreover, since $\tilde{\mu} = p+m$, then flat output vectors $Z_1$ and $Z_2$ ensure full isolability of faults on the three tank system.
353
+
354
+ Simulation results that confirm the effectiveness of this approach can be found in Martínez-Torres et al. (2013).
355
+
356
+ # 5. CONCLUSION
357
+
358
+ The current paper introduces a novel and rigorous definition of the isolability of faults affecting a system's sensors and actuators, using the flatness-based FDI approach. The described condition of isolability provides an efficient way to select flat outputs that are useful for fault isolation. Our results are tested and validated using the three tank system. Future work should focus on the development of a method that calculates independent flat outputs directly.
359
+ ---PAGE_BREAK---
360
+
361
+ REFERENCES
362
+
363
+ Chen, J., Li, H., Sheng, D., and Li, W. (2015). A hybrid data-driven modeling method on sensor condition monitoring and fault diagnosis for power plants. *International Journal of Electrical Power & Energy Systems*, 71, 274-284.
364
+
365
+ Diversi, R., Simani, S., and Soverini, U. (2002). Robust residual generation for dynamic processes using decoupling technique. In *Proceedings of the International Conference on Control Applications*, volume 2, 1270–1275. IEEE.
366
+
367
+ Flies, M., Lévine, J., Martin, P., and Rouchon, P. (1999). A lie-backlund approach to equivalence and flatness of nonlinear systems. *IEEE Transactions on automatic control*, 44(5), 922–937.
368
+
369
+ Izadian, A. and Khayyer, P. (2010). Application of kalman filters in model-based fault diagnosis of a dc-dc boost converter. In *IECON 2010-36th Annual Conference on IEEE Industrial Electronics Society*, 369–372. IEEE.
370
+
371
+ Kaminski, Y.J., Lévine, J., and Ollivier, F. (2018). Intrinsic and apparent singularities in differentially flat systems, and application to global motion planning. *Systems & Control Letters*, 113, 117–124.
372
+
373
+ Kóscielny, J.M., Syfert, M., Rostek, K., and Sztyber, A. (2016). Fault isolability with different forms of the faults-symptoms relation. *International Journal of Applied Mathematics and Computer Science*, 26(4), 815–826.
374
+
375
+ Levine, J. (2009). *Analysis and control of nonlinear systems: A flatness-based approach*. Springer Science & Business Media.
376
+
377
+ Martínez-Torres, C., Lavigne, L., Cazaurang, F., Alcorta-García, E., and Díaz-Romero, D.A. (2013). Fault detection and isolation on a three tank system using differential flatness. In *2013 European Control Conference (ECC)*, 2433–2438. IEEE.
378
+
379
+ Martínez-Torres, C., Lavigne, L., Cazaurang, F., Alcorta-García, E., and Díaz-Romero, D.A. (2014). Flatness-based fault tolerant control. *Dyna*, 81(188), 131–138.
380
+
381
+ Nagy, A.M., Marx, B., Mourot, G., Schutz, G., and Ragot, J. (2009). State estimation of the three-tank system using a multiple model. In *Proceedings of the 48th IEEE Conference on Decision and Control (CDC) held jointly with 2009 28th Chinese Control Conference*, 7795–7800. IEEE.
382
+
383
+ Noura, H., Theilliol, D., Ponsart, J.C., and Chamseddine, A. (2009). *Fault-tolerant control systems: Design and practical applications*. Springer Science & Business Media.
384
+
385
+ Staroswiecki, M. and Comtet-Varga, G. (2001). Analytical redundancy relations for fault detection and isolation in algebraic dynamic systems. *Automatica*, 37(5), 687–699.
386
+
387
+ Suryawan, F., De Doná, J., and Seron, M. (2010). Fault detection, isolation, and recovery using spline tools and differential flatness with application to a magnetic levitation system. In *2010 Conference on Control and Fault-Tolerant Systems (SysTol)*, 293–298. IEEE.
388
+
389
+ Thirumarimurugan, M., Bagyalakshmi, N., and Paarkavi, P. (2016). Comparison of fault detection and isolation methods: A review. In *2016 10th International Conference on Intelligent Systems and Control (ISCO)*, 1–6. IEEE.
390
+
391
+ Tousi, M. and Khorasani, K. (2011). Robust observer-based fault diagnosis for an unmanned aerial vehicle. In *2011 IEEE International Systems Conference*, 428–434. IEEE.
392
+
393
+ Zhou, Y., Xu, G., and Zhang, Q. (2014). Overview of fault detection and identification for non-linear dynamic systems. In *2014 IEEE International Conference on Information and Automation (ICIA)*, 1040–1045. IEEE.
samples/texts_merged/4174805.md ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ NON-CUT, SHORE AND NON-BLOCK POINTS IN
5
+ CONTINUA
6
+
7
+ JOZEF BOBOK, PAVEL PYRIH AND BENJAMIN VEJNAR
8
+
9
+ Czech Technical University in Prague and Charles University in Prague,
10
+ Czech Republic
11
+
12
+ **ABSTRACT.** In a nondegenerate continuum we study the set of non-cut points. We show that it can be stratified by inclusion into six natural subsets (containing also non-block and shore points). Among other results we show that every nondegenerate continuum contains at least two non-block points. Our investigation is further focused on both the classes of arc-like and circle-like continua.
13
+
14
+ # 1. INTRODUCTION
15
+
16
+ In Continuum theory it is often useful to know more about special kinds of points in a continuum. A well known example is the classical result of Moore (see [Bo67, p. 177]) stating that every nondegenerate continuum must have at least two non-cut points (a non-cut point in a connected space is a point whose complement is connected); the result has been recently generalized to the shore points by Leonel ([Le13]) - precise definitions will be given later. We recommend to the reader the book of Nadler ([Na92]) as a general reference for many notions used throughout the paper.
17
+
18
+ More authors have investigated various properties of special sets in continua: Grace in [Gr81] provides a survey of results relating the notions of aposyndesis and weak cut point; Illanes in [Il01] shows that, in a dendroid, finite union of pairwise disjoint shore subdendroids is a shore set; among other results, a simple example of a planar dendroid in which the union of two disjoint closed shore sets is not a shore set is presented in [BMPV14]; in [Na07]
19
+
20
+ 2010 Mathematics Subject Classification. 54F15, 54D10.
21
+ Key words and phrases. Continuum, shore point, non-cut point, arc-like continuum.
22
+ The work was supported by the grant GAČR 14-06989P. The third author is a junior researcher in the University Center for Mathematical Modeling, Applied Analysis and Computational Mathematics (Math MAC).
23
+ ---PAGE_BREAK---
24
+
25
+ Nall explores the relationship between center points and shore points in a den-
26
+ droid; Illanes and Krupski study blockers and nonblockers for several kinds
27
+ of continua ([IKr11]); and, using the results of [IKr11], Escobedo, López and
28
+ Villanueva ([ELV12]) characterize some classes of locally connected continua
29
+ - for further information on the subject see also [PV12, Le13].
30
+
31
+ Our aim is to study blocking properties of points in a general continuum.
32
+ We laminate the set of non-cut points to six natural subsets (containing non-
33
+ block and shore points, among others) ordered by the inclusion and consider
34
+ various questions related to them. Our interest is mainly focused on both the
35
+ classes of arc-like and circle-like continua.
36
+
37
+ It is interesting to compare our lamination of non-cut points with several kinds of end points. The points of order one are points of colocal connect-edness. In dendroids end points in the classical sense are exactly the points which are not weak cut points. In chainable continua the notion of end point is usually used in another sense and we show that in chainable continua the end points are closely related to non-block points.
38
+
39
+ The structure of our paper is as follows. In the next section we nominate
40
+ the definitions of various kinds of non-cut points followed by illustrating ex-
41
+ amples. We recall several related results known from the literature and discuss
42
+ the Borel hierarchy with respect to the notions in question. Also we show,
43
+ generalizing the result from [Le13], that the sets of non-block points spans
44
+ every nondegenerate continuum. Section 3 is devoted to the class of chain-
45
+ able (arc-like) continua. Among other results we show that any chainable
46
+ continuum consisting of the non-block points is indecomposable - Corollary
47
+ 3.6. In Section 4 we deal with the circle-like continua. The main result of this
48
+ part states that every point in a circle-like continuum is a non-block point -
49
+ Theorem 4.5.
50
+
51
+ ## 2. LAMINATION OF NON-CUT POINTS
52
+
53
+ We start by one illuminating example showing that the notion of a non-
54
+ cut point is relatively weak. Let $X$ be the continuum defined as the union of
55
+ two $\sin(1/x)$-continua with the common vertical segment $S$. One can easily see
56
+ that the set of non-cut points consists of all points in $S$ and two end points
57
+ $e_1$ and $e_2$ of the sinusoidal branches. Choose $y \in S$ arbitrarily. The non-cut
58
+ points $e_1, e_2$ do not have the same relationship to $X$ as the point $y$. There
59
+ are arbitrarily small open neighborhoods of $e_1$, $e_2$ complements of which are
60
+ connected, which is not true for $y$. The composant of $y$ is the whole $X$ whereas
61
+ the composants of $e_1$, $e_2$ are proper subsets of $X$. The end points $e_1, e_2$ span
62
+ $X$, i.e. no proper subcontinuum of $X$ contains all of them, at the same time
63
+ the points in $S$ do not influence spanning of $X$ at all.
64
+
65
+ So it seems to be meaningful to distinguish various non-cut points in a
66
+ continuum. Let us consider six kinds of non-cut points listed in Table 1.
67
+ ---PAGE_BREAK---
68
+
69
+ <table><thead><tr><td>notation</td><td>notion</td><td>definition</td></tr></thead><tbody><tr><td>P1</td><td>x is a point of colocal connectedness</td><td>there are arbitrary small open neighborhoods of x complements of which are connected</td></tr><tr><td>P2</td><td>x is not a weak cut point</td><td>any pair of points distinct from x is contained in a subcontinuum avoiding x</td></tr><tr><td>P3</td><td>x is a non-block point</td><td>there exist subcontinua A<sub>1</sub> ⊂ A<sub>2</sub> ⊂ … ⊂ X such that ⋃<sub>n</sub> A<sub>n</sub> is dense in X \\ {x}</td></tr><tr><td>P4</td><td>x is a shore point</td><td>for each ε > 0 there is an ε-dense sub-continuum avoiding x</td></tr><tr><td>P5</td><td>x is not a strong center</td><td>every pair of nonempty open sets is intersected by a subcontinuum avoiding x</td></tr><tr><td>P6</td><td>x is a non-cut point</td><td>the complement of {x} is connected</td></tr></tbody></table>
70
+
71
+ TABLE 1.
72
+
73
+ It is easy to see that in general context any property with smaller number implies the one with greater number. On the other hand, as we show later, no property in Table 1 with greater number implies the one with smaller number.
74
+
75
+ Whyburn ([Wh39]) defined a continuum $X$ to be semi-locally connected at a point $x$ provided that if $U$ is an open subset of $X$ containing $x$, there is an open subset $V$ of $X$ lying in $U$ and containing $x$ such that $X \setminus V$ has a finite number of components. A continuum is semi-locally connected if it is semi-locally connected at every point.
76
+
77
+ A continuum $X$ is *aposyndetic* at a point $x$ provided that whenever $y$ is a point of $X$ distinct from $x$, there exists a subcontinuum $Y$ of $X$ and an open subset $U$ of $X$ such that $x \in U \subset Y \subset X \setminus \{y\}$. A continuum is aposyndetic if it is aposyndetic at every point.
78
+
79
+ **REMARK 2.1.** A continuum is semi-locally connected if and only if it is aposyndetic ([Ma05, Theorem 1.7.17]).
80
+
81
+ Using the results from [Wh39] we deduce that all the notions from Table 1 are equivalent.
82
+
83
+ **PROPOSITION 2.2.** Let $X$ be a semi-locally connected continuum. Then all properties P1-P6 are equivalent. In particular, it is true when $X$ is locally connected.
84
+
85
+ **PROOF.** It is sufficient to show that P6 implies P1. Let $x \in X$ be a non-cut point. Choose an arbitrary open neighborhood $U$ of $x$. We assume that $X$ is semi-locally connected at $x$, so by definition there is an open neighborhood
86
+ ---PAGE_BREAK---
87
+
88
+ $V$ of $x$ such that $x \in V \subset U$ and some components $C_1, \dots, C_n$ of $X \setminus V$
89
+ cover $X \setminus U$. By [Wh39, (6.2)] there exist subcontinua $X_{ij} \subset X \setminus \{x\}$, $1 \le$
90
+ $i, j \le n$, such that $X_{ij}$ connects $C_i$ and $C_j$ for $i \neq j$. Obviously the set
91
+ $W = U \setminus (\bigcup_i C_i \cup \bigcup_{i \neq j} X_{ij})$ is an open neighborhood of $x$ satisfying $W \subset U$
92
+ and for which $X \setminus W$ is connected. Since every locally connected continuum is
93
+ semi-locally connected ([Wh39, Example 2(i)]), the last part of our proposition
94
+ follows. $\square$
95
+
96
+ Another natural notion for a point $x$ in a continuum $X$ which fits for our
97
+ table could be:
98
+
99
+ P2': There exist subcontinua $A_1 \subset A_2 \subset \dots \subset X$ such that
100
+ $X \setminus \{x\} = \bigcup_n A_n$.
101
+
102
+ Clearly P1 implies P2' which implies P2. However, it turns out that P2' only provides an alternative way of how to characterize the points with the property P2.
103
+
104
+ PROPOSITION 2.3. Let $X$ be a continuum containing a point $x$. The following two properties are equivalent.
105
+
106
+ (i) *x* has the property P2 (*x* is not a weak cut point).
107
+
108
+ (ii) *x* has the property P2'.
109
+
110
+ PROOF. Clearly (ii) implies (i). In order to show the opposite implication,
111
+ let $x \in X$ be not a weak cut point. Let $B(x, 1/n)$ denote the open ball with
112
+ the center at $x$ and the radius $1/n$. Choose a point $p \in X \setminus \{x\}$ arbitrarily.
113
+ Let $A_n$ be the connected component of $X \setminus B(x, 1/n)$ containing $p$. Then
114
+ for each sufficiently large $n$ the set $A_n$ is a continuum. We may assume that
115
+ $A_1 \neq \emptyset$. Then $\emptyset \neq A_1 \subset A_2 \subset \dots$ and, since $x$ is not a weak cut point,
116
+ $\bigcup_n A_n = X \setminus \{x\}$. $\square$
117
+
118
+ In order to complete the definitions from Table 1 we list several examples
119
+ in which $P(n+1)$ is not accompanied by $P^n$. For simplicity of notation, we
120
+ write $P(n+1)\backslash P^n$.
121
+
122
+ EXAMPLE (P2\P1). Let $X$ be a dendroid constructed as follows: if $P = (0,0)$, $Q = (1,0)$, $A_n = (1+1/n, 1/n)$, $B_n = (1+1/n, -1/n)$ and $C_n = (0, -1/n)$ are the points from $\mathbb{R}^2$, then the union of segments forms the desired dendroid
123
+
124
+ $$
125
+ X = PQ \cup \bigcup_n (PA_n) \cup (AnB_n) \cup (B_nC_n).
126
+ $$
127
+
128
+ The point Q is neither a weak cut point nor a point of colocal connectedness.
129
+
130
+ EXAMPLE (P3\P2). Any point of the vertical segment in the $\sin(\frac{1}{x})$-continuum is a non-block point and a weak cut point.
131
+ ---PAGE_BREAK---
132
+
133
+ EXAMPLE (P4\P3). Let us denote by $C$ the Cantor middle third set, let $Y \subset \mathbb{R}^2$ be the union of all segments $[p, c]$ connecting the point $p = (0, 1)$ with a point $c \in C \times \{0\}$. The continuum $Y$ is a special dendroid called the Cantor fan. Let $D_n = \{d_1^n, \dots, d_{m(n)}^n\} \subset C \times \{0\}$, $n = 1, 2, \dots$, be a finite 1/n-net in $C \times \{0\}$ such that $D_i \cap D_j = \emptyset$ for $i \neq j$. We define a decomposition $\sigma$ of Y whose nondegenerate elements consist of finite sets
134
+
135
+ $$ \ell_\alpha \cap \bigcup_{i=1}^{m(n)} [p, d_i^n], n \in \mathbb{N}, \alpha \in [1 - 1/n, 1), $$
136
+
137
+ where $\ell_\alpha$ denotes the horizontal line of points with second coordinate $\alpha \in \mathbb{R}$. The quotient space $X = Y/\sigma$ is a continuum, since $\sigma$ is an upper semi-continuous decomposition. The continuum $X$ is a dendroid as well. The point $p$ is a shore point but not a non-block point.
138
+
139
+ EXAMPLE (P5\P4). With the above notation, let
140
+
141
+ $$ \{(a_1^n, a_2^n): a_1^n \neq a_2^n \text{ for } n \in \mathbb{N} \text{ and } \{a_1^m, a_2^m\} \cap \{a_1^n, a_2^n\} = \emptyset \text{ for } m \neq n\} $$
142
+
143
+ be dense in $C \times C$. We define a decomposition $\tau$ of the Cantor fan $Y$ whose nondegenerate elements consist of pairs of points
144
+
145
+ $$ \ell_\alpha \cap \bigcup_{i=1}^{2} [p, a_i^n], n \in \mathbb{N}, \alpha \in [1 - 1/n, 1). $$
146
+
147
+ The quotient space $X = Y/\tau$ is again a dendroid. The point $p$ is neither a strong center nor a shore point.
148
+
149
+ EXAMPLE (P6\P5). Any point of the common vertical segment of two $\sin(\frac{1}{x})$-continua is a non-cut point and a strong center.
150
+
151
+ There are easy examples of continua without P2-points. For example, the closure of the graph of the function
152
+
153
+ $$ \sin\left(\frac{1}{1-|x|}\right), x \in (-1, 1) $$
154
+
155
+ has this property. In indecomposable continua there are no points of colocal connectedness (P1), even there are no points with property P2. On the other hand, every point of an indecomposable continuum is a non-block point (P3).
156
+
157
+ Let us briefly mention some results known from the literature related to the notions listed in our Table 1. In arcwise connected continua there are points of colocal connectedness (P1) ([KM79, Corollary 3.8]); the same is true for continua with exactly two arc components ([KM79, Corollary 3.11]). Every nondegenerate hereditarily decomposable continuum $X$ contains at least one subcontinuum $K$ with empty interior at which the continuum $X$ is colocally connected ([KM79, Corollary 3.5]). Hence any point of $K$ is a non-block point of $X$ (P3). In particular, every nondegenerate hereditarily decomposable
158
+ ---PAGE_BREAK---
159
+
160
+ continuum contains a non-block point. We show that every nondegenerate continuum contains at least two such points (Corollary 2.8). Recently, using the results of Bing ([Bi48]), it has been proved that every nondegenerate continuum contains at least two shore points ([Le13]).
161
+
162
+ In what follows we concern the Borel types of sets of points listed in the table. We summarize our knowledge in the following.
163
+
164
+ **PROPOSITION 2.4.** Let $X$ be a continuum. The following is true.
165
+
166
+ (i) The set of P1-points is of type $G_\delta$.
167
+
168
+ (ii) The set of P4-points is of type $G_\delta$.
169
+
170
+ (iii) The set of P5-points is of type $G_\delta$.
171
+
172
+ (iv) The set of P6-points is of type $F_{\sigma\delta}$.
173
+
174
+ **PROOF.** (i) Let $C$ be the set of all points of colocal connectedness. For every $n \in \mathbb{N}$ there is an open cover $B_n$ of $C$ by open sets of diameter less than $1/n$ whose complements in $X$ are connected. It holds that $C = \bigcap_n \cup B_n$.
175
+
176
+ (ii) For $n \in \mathbb{N}$ let $G_n$ be the set of all points $p$ in $X$ for which there exists a $(1/n)$-dense continuum in $X \setminus p$. Then each $G_n$ is open and $\bigcap_{n=1}^\infty G_n$ is the set of all shore points in $X$.
177
+
178
+ (iii) Let $\mathcal{B}$ be a countable base of $X$. The set of all non-strong centers can be expressed as
179
+
180
+ $$ \bigcap_{A,B \in \mathcal{B}} \bigcup_K \{X \setminus K : K \cap A \neq \emptyset \neq K \cap B, K \text{ is a continuum}\}. $$
181
+
182
+ (iv) See [Wh42, Theorem 5.2]. □
183
+
184
+ We complete Proposition 2.4 by three examples.
185
+
186
+ **EXAMPLE 2.5.** (i) The set of P2-points need not be Borel. In a dendroid $X$, a point $x$ is an end point (in the classical sense) if whenever $x \in \gamma$ for some arc $\gamma \subset X$, then $x$ is an end point of $\gamma$. Obviously, the set of P2-points in $X$ coincide with the set of all end points. The assertion follows from [NT90, Example 5], where the authors have found an example of a dendroid in which the set of all end points is co-analytic and not Borel.
187
+
188
+ (ii) The set of Pn-points, $n \in \{1, 4, 5, 6\}$, need not be of type $F_\sigma$. Let us denote by $X$ the Wazewski universal dendrite ([Wa23]), and by $E$ the set of all end points in $X$. As stated in the explanation of (i), the set $E$ coincides with the set of all P2-points. Since $X$ is locally connected, it follows from Proposition 2.2 that the sets of points with the properties P1-P6 coincide and hence they are equal to $E$. In particular, by Proposition 2.4(i) the set $E$ is of type $G_\delta$. It is known that $E$ is dense with an empty interior in $X$, hence by the Baire category theorem $E$ cannot be of type $F_\sigma$.
189
+
190
+ (iii) The set of all non-cut points is in general not of type $G_\delta$. We only sketch our argument. Let $Q = \{q_n: n \ge 0\}$ be the set of all rational numbers
191
+ ---PAGE_BREAK---
192
+
193
+ from the interval $(-1, 1)$. Define the continuum $X$ as the closure of the graph of the function
194
+
195
+ $$ \sum_{n=0}^{\infty} \frac{1}{2^n} \sin \left( \frac{1}{x - q_n} \right), x \in (-1, 1) \setminus Q. $$
196
+
197
+ Let us denote by $N$ the set of all non-cut points in $X$. Obviously $(x, y) \in N$ if and only if $x \in Q \cup \{-1, 1\}$, the set $N$ is dense and of the first category in $X$. By the Baire category theorem $N$ is not of type $G_\delta$.
198
+
199
+ It is of interest that the Borel complexity of the set of shore points is in general better than the one of the set of non-cut points. From this point of view the notion of a shore point is simpler than that of a non-cut point. Note that we still do not know the descriptive character of the set of non-block points. So we can pose the following.
200
+
201
+ **QUESTION 2.1.** Is the set of non-block points Borel?
202
+
203
+ Our proof of Theorem 2.7 is based on the result of Bing [Bi48, Theorem 5].
204
+
205
+ **THEOREM 2.6.** For each proper subset $R$ of the continuum $X$ there is a point $x$ of $X \setminus R$ such that the union of all continua that lie in $X \setminus \{x\}$ and intersect $R$ is dense in $X$.
206
+
207
+ We say that a subset $S$ of a continuum $X$ spans $X$ if no proper subcontinuum of $X$ contains $S$. The next theorem and its corollary generalize the fact that every nondegenerate continuum contains at least two non-cut points [Bo67, Le13].
208
+
209
+ **THEOREM 2.7.** Let $X$ be a continuum. Then the set of all non-block points spans $X$.
210
+
211
+ **PROOF.** To the contrary, let $A$ be a proper subcontinuum of $X$ containing all non-block points. By Theorem 2.6 there exists a point $x \in X \setminus A$ such that the union of all continua that lie in $X \setminus \{x\}$ and intersect $A$ is dense in $X$. For some decreasing sequence $(\varepsilon_n)_{n=1}^\infty$ of positive reals converging to zero let us denote by $B_n$ the open ball with the center at $x$ and the radius $\varepsilon_n$; we can assume that $A \cap B_1 = \emptyset$. For each $n$, let $A_n$ be the component of $X \setminus B_n$ containing $A$. Since $B_n$ is open, the set $A_n$ is a continuum. Moreover, $A_n$ is a subset of $A_{n+1}$ for $n=1, 2, \dots$ and any continuum $C \subset X \setminus \{x\}$ intersecting $A$ is a subset of $A_n$ for each sufficiently large $n$. Hence by Theorem 2.6 the union $\bigcup_{n=1}^\infty A_n$ is dense in $X \setminus \{x\}$, i.e. the point $x$ is a non-block point. Moreover, $x \notin A$ which is a contradiction. $\square$
212
+
213
+ **COROLLARY 2.8.** Let $X$ be a nondegenerate continuum. Then $X$ contains at least two non-block points.
214
+ ---PAGE_BREAK---
215
+
216
+ ### 3. CHAINABLE CONTINUA
217
+
218
+ In this section our attention will be focused on the class of chainable continua. For their own interest we state and prove several results describing various roles of distinct kinds of non-cut points from Table 1. When arguing our statements we will repeatedly use the fact that chainable continua are hereditarily unicoherent ([Na92, Theorem 12.2]).
219
+
220
+ We start with two lemmas concerning the decomposability of a chainable continuum.
221
+
222
+ **LEMMA 3.1.** Let $X$ be a chainable continuum such that $X = K \cup L$ for two proper subcontinua $K$ and $L$ of $X$. Then every point of $K \cap L$ is a strong center.
223
+
224
+ **PROOF.** Let $p \in K \cap L$ and suppose that $p$ is not a strong center. Consider the nonempty open sets $X \setminus K$ and $X \setminus L$. Since $p$ is not a strong center, there is a continuum $M$ intersecting $K$ and $L$ but omitting $p$. It follows that
225
+
226
+ $$M \cup (K \cap L) = (M \cap K) \cup (M \cap L) \cup (K \cap L)$$
227
+
228
+ form a weak triod. This is a contradiction with the fact that chainable continua do not contain weak triods ([Na92, Corollary 12.7]). □
229
+
230
+ Notice that the intersection $K \cap L$ from Lemma 3.1 can consist of the non-cut points only, see our Example P6$\setminus$P5 in Section 2.
231
+
232
+ A shore set in a continuum $X$ is a subset $A$ of $X$ such that, for each $\varepsilon > 0$, there exists a subcontinuum $Y$ of $X$ such that the Hausdorff distance from $Y$ to $X$ is less than $\varepsilon$ and $A \cap Y = \emptyset$. In [I101, Na07, BMPV14] the authors have studied in dendroids (or $\lambda$-dendroids) when the union of shore points (continua) is a shore set. In the case of chainable continua we deduce the following general result.
233
+
234
+ **PROPOSITION 3.2.** *The set of all shore points of a decomposable chainable continuum is a shore set.*
235
+
236
+ **PROOF.** Let $X$ be a decomposable chainable continuum and let $X = K \cup L$ for two proper subcontinua $K$ and $L$ of $X$. By Theorem 2.7 there are non-block points hence also shore points $p \in K \setminus L$ and $q \in L \setminus K$. Related to $p$, $q$ there are sequences of continua $A_n$ and $B_n$ which converge to $X$ and such that $p \notin A_n$ and $q \notin B_n$. We may suppose that all $A_n$ and $B_n$ contain $K \cap L$. We define $M_n = (A_n \cap K) \cup (B_n \cap L)$. The sequence $M_n$ converges to $X$. We prove that the complement of $\bigcup M_n$ consists of all shore points. Clearly any point of $X \setminus \bigcup M_n$ is a shore point. On the other hand, suppose for contradiction that there is a shore point $r \in M_n$ for some $n \in \mathbb{N}$. Without loss of generality we may suppose that $r \in K$, notice that by Lemma 3.1 and Table 1 even $r \in K \setminus L$. Since $r$ is a shore point and $K \cap M_n$ is a proper closed subset of $K$, it follows that there is a subcontinuum $C$ of $X$ such that
237
+ ---PAGE_BREAK---
238
+
239
+ $C \cap (K \setminus M_n)$ is nonempty, $C \cap L = L \cap B_n$ and $r \notin C$. One can easily verify that $C \cup M_n \cup L$ is a weak triod in $X$ which is a contradiction. Thus the set of all shore points of $X$ is a shore set. $\square$
240
+
241
+ In a nondegenerate continuum $X$, a point $p \in X$ is a point of irreducibility provided that for some point $q \in X \setminus \{p\}$ no proper subcontinuum of $X$ contains $\{p, q\}$. Clearly $p$ is a point of irreducibility if and only if the composant of $p$ is a proper subset of $X$ (compare [Na92, Theorem 11.2]).
242
+
243
+ The next two lemmas hold true in the general context. They will be useful when proving Proposition 3.5 and Corollary 3.6. The first is from [Na92, Corollary 11.19]. The second generalizes [Le13, Theorem 3].
244
+
245
+ **LEMMA 3.3.** Let $X$ be a nondegenerate continuum. The following two properties are equivalent.
246
+
247
+ (i) Every point $p \in X$ is a point of irreducibility of $X$.
248
+
249
+ (ii) $X$ is indecomposable.
250
+
251
+ **LEMMA 3.4.** Every point of irreducibility of a nondegenerate continuum $X$ is a non-block point.
252
+
253
+ PROOF. If $p$ is a point of irreducibility, then for some point $q \in X \setminus \{p\}$ no proper subcontinuum of $X$ contains $\{p,q\}$. It means that the composant $\kappa(q)$ of $q$ does not contain the point $p$. Since the composant $\kappa(q)$ is dense in $X$ and can be expressed as a union of countably many proper subcontinua each of which contains $q$ ([Na92, Proposition 11.14]), the point $p$ is a non-block point. $\square$
254
+
255
+ The main statement of this section follows.
256
+
257
+ PROPOSITION 3.5. Let $X$ be a chainable continuum and let $p \in X$. The following properties of $p$ are equivalent.
258
+
259
+ (i) $p$ is a point of irreducibility.
260
+
261
+ (ii) $p$ is a non-block point.
262
+
263
+ (iii) $p$ is a shore point.
264
+
265
+ (iv) $p$ is not a strong center.
266
+
267
+ PROOF. By Lemma 3.4 (i) implies (ii). Moreover, (ii) implies (iii) and (iii) implies (iv) in general.
268
+
269
+ Let us prove (iv) implies (ii). If $X$ is an indecomposable continuum every point is a point of irreducibility ([Na92, Theorem 11.18]), so (iv) implies (i) and (i) implies (ii) by Lemma 3.4. Thus we can assume that $X$ is decomposable. Let $X = K \cup L$, where $K$ and $L$ are two proper subcontinua of $X$ and let $p$ be not a strong center of $X$. From Lemma 3.1 follows that $p \notin K \cap L$. Without loss of generality we may suppose that $p \in K \setminus L$. Let $\{B_n : n \in \mathbb{N}\}$ be the base of nonempty open subsets of $X \setminus \{p\}$. Since $p$ is not a strong center, there is for every $n \in \mathbb{N}$ a continuum $M_n$ intersecting $B_n$ and $X \setminus K$
270
+ ---PAGE_BREAK---
271
+
272
+ such that $p \notin M_n$. It is enough to let $P_n = L \cup M_1 \cup \dots, \cup M_n$. We deduce
273
+ that $P_n$ is a continuum not containing $p$, $P_1 \subseteq P_2 \subseteq \dots$ and $P_n$ converge to
274
+ $X$ in the Hausdorff metric. We have shown that $p$ is a non-block point, i.e.
275
+ (iv) implies (ii).
276
+
277
+ It remains to prove that (ii) implies (i). In much the same way as above,
278
+ let $p \in K \setminus L$. By Theorem 2.7 there is a non-block point $q \in L \setminus K$. We show
279
+ that $X$ is irreducible between $p$ and $q$.
280
+
281
+ Suppose for contradiction that there is a proper subcontinuum A of X
282
+ which contains both p and q, let x be a strong center by
283
+ Lemma 3.1. At least one of the sets K \\ A, L \\ A is nonempty. Assume the
284
+ former possibility. Since p is a non-block point, there is a sufficiently dense
285
+ subcontinuum B such that p /∈ B, x ∈ B and (K \\ A) ∩ B is nonempty.
286
+ It follows that (L ∩ A) ∪ (K ∩ A) ∪ (K ∩ B) forms a weak triod which is a
287
+ contradiction.
288
+
289
+ Thus $X$ is irreducible between the points $p$ and $q$ and hence $p$ is a point
290
+ of irreducibility. □
291
+
292
+ Combining Proposition 3.5 and Lemma 3.3 we deduce the following.
293
+
294
+ COROLLARY 3.6. Let X be a chainable continuum. The following properties are equivalent.
295
+
296
+ (i) Each point in X is a non-block point.
297
+
298
+ (ii) *X is indecomposable.*
299
+
300
+ REMARK 3.7. By Proposition 3.5 the property P3 in Corollary 3.6 can be replaced by P4 or P5. Let X be an arc of pseudoarcs ([Le85]). Then each point of X is a non-cut point, at the same time X is decomposable, hence Corollary 3.6 is not true for P6.
301
+
302
+ Let $X$ be a chainable continuum. A point $x \in X$ is called an *end point* of $X$ provided that for every $\varepsilon > 0$ there is an $\varepsilon$-chain $B_1, \dots, B_n$ covering $X$ such that $x \in B_1$. An end point in a chainable continuum need not fulfill the (classical) definition presented in Section 2 before Proposition 2.4. In [Do94] it has been shown that the cardinality of end points of a chainable continuum can be any cardinal number from $\{0, 1, \dots, \aleph_0, c\}$. In particular, it is known that the Buckethandle continuum is chainable and has exactly one end point ([Do08]). Gluing two Buckethandle continua together in their end points we find a chainable continuum with no end point.
303
+
304
+ There is a classical characterization of end points in chainable continua
305
+ ([Do08, p. 32]). We recall two descriptions in the following statement.
306
+
307
+ PROPOSITION 3.8. For a point $x$ of a nondegenerate chainable continuum $X$ the following conditions are equivalent.
308
+
309
+ (i) $x$ is an end point of $X$.
310
+ ---PAGE_BREAK---
311
+
312
+ (ii) Each nondegenerate subcontinuum of X containing x is irreducible between x and some other point.
313
+
314
+ (iii) If there are two subcontinua of X containing x, one of them contains the other.
315
+
316
+ From the above characterization of an end point in a chainable continuum and our Proposition 3.5 we conclude the following.
317
+
318
+ PROPOSITION 3.9. Let $X$ be a chainable continuum and let $p \in X$. Then the following are equivalent.
319
+
320
+ (i) *p* is an end point.
321
+
322
+ (ii) *p* is a non-block point of every subcontinuum of X which contains p.
323
+
324
+ **PROOF.** It is a consequence of Propositions 3.8(i), (ii) and 3.5(i), (ii). □
325
+
326
+ A point $x$ in a chainable continuum $X$ is called an *absolute end point*, provided that whenever $X$ is irreducible between $p$ and $q$, then either $x = p$ or $x = q$. By the definition there are at most two absolute end points in a chainable continuum. The notion of an absolute end point in chainable continua was introduced in [Ro88], where a number of equivalent characterizations was proved. We choose only the following one. A point $x$ is an absolute end point if and only if $x$ is a point of irreducibility and $X$ is locally connected at $x$ ([Ro88, Theorem 1.0]). We note that being locally connected at a point $x$ of a chainable continuum is the same as being connected im kleinen at $x$ ([Ro88, Theorem 1.7]).
327
+
328
+ It is easy to show that a point of order one in a chainable continuum is an absolute end point. The converse need not be true. For example the two end points of the arcless arc ([BPV13]) are absolute end points but these are not of order one. This suggests to use the following notion. A continuum $X$ is said to be *rim-connected* at a point $x$ if there are arbitrarily small neighborhoods of $x$ whose boundaries are connected. From the Boundary bumping theorem we easily deduce that if a continuum $X$ is rim-connected at $x$, then $X$ is locally as well as colocally connected at $x$. We give two other characterizations of an absolute end point. One of them is based on Table 1 from Section 2, the other is using the notion of rim-connectedness. These results are using the following.
329
+
330
+ LEMMA 3.10. Let $x$ be a point of irreducibility of a continuum $X$. Then the following are equivalent.
331
+
332
+ (i) $x$ is a point of local connectedness.
333
+
334
+ (ii) $x$ is a point of colocal connectedness.
335
+
336
+ **PROOF.** (i) $\implies$ (ii). Let $X$ be irreducible between $x$ and $y$. Let $U$ be any neighborhood of $x$. There is an open connected neighborhood $V$ of $x$ whose closure is a subset of $U$ and which avoids $y$. Let $K$ be the component of $X \setminus V$ which contains $y$. Clearly $K$ is a continuum intersecting the boundary of $V$.
337
+ ---PAGE_BREAK---
338
+
339
+ and hence $K \cup \text{cl}(V)$ is a continuum. Since it contains $x$ and $y$, we conclude that it is equal to $X$. Hence $X \setminus K$ is a neighborhood of $x$ whose complement is connected.
340
+
341
+ (ii) $\implies$ (i). Let $X$ be irreducible between $x$ and $y$. Let $U$ be any neighborhood of $x$. In $U$ there is an open neighborhood $V$ of $x$ avoiding $y$, whose complement is connected. Let us denote by $C$ the component of the point $x$ in $V$ and let us denote by $K$ the closure of $C$. Clearly $K$ is a continuum intersecting the boundary of $X \setminus V$. We get that $K \cup (X \setminus V)$ is a continuum containing $x$ and $y$ and thus it equals $X$. It follows that $K$ contains $V$ and thus $C = V$ is an open connected neighborhood of $x$ contained in $U$. $\square$
342
+
343
+ **PROPOSITION 3.11.** The following are equivalent for a point $x$ in a chain-able continuum $X$.
344
+
345
+ (i) *x* is an absolute end point.
346
+
347
+ (ii) *x* is a point of rim-connectedness.
348
+
349
+ (iii) *x* is a point of colocal connectedness.
350
+
351
+ **PROOF.** (i) $\implies$ (ii). Let $x$ be an absolute end point. By [Ro88, Theorem 1.0] $x$ is a point of irreducibility at which $X$ is locally connected. Thus there is $y \in X$ such that $X$ is irreducible between $x$ and $y$. Let $U$ be any neighborhood of $x$ whose closure does not contain $y$. There is a connected open neighborhood $V \subseteq U$ of the point $x$. Let $K$ be the closure of $V$. Define $S$ to be the union of all subcontinua of $X \setminus K$ containing the point $y$. Let $L$ be the closure of $S$. First, we claim that $K \cap L \neq \emptyset$. Suppose to the contrary that this is not the case. Thus we can find an open set $W$ such that $L \subseteq W \subseteq \text{cl}(W) \subseteq X \setminus K$. By the Boundary Bumping Theorem ([Na92, Theorem 5.4]) the component $C$ of the set $\text{cl}(W)$ containing the point $y$ intersects the boundary of $W$. Since $C$ is disjoint with $K$ we get that $C \subseteq S$ and hence $C \subseteq L$. Thus $L$ intersects the boundary of $W$ which contradicts the fact that $L \subseteq W$ and $W$ is open.
352
+
353
+ It follows that $K \cup L$ is a continuum containing both $x$ and $y$ and thus
354
+ $X = K \cup L$. Let $B$ be the boundary of $K$, we want to show that $B = K \cap L$.
355
+ Clearly
356
+
357
+ $$
358
+ B = K \cap \text{cl}(X \setminus K) \subseteq K \cap \text{cl}(L) = K \cap L
359
+ $$
360
+
361
+ because $K \cup L = X$. For the opposite inclusion suppose that $z \in K \cap L$
362
+ is arbitrary. By the definition of $L$ there is a sequence of points $z_n \in S$
363
+ converging to $z$. Since $S$ is disjoint with $K$ it follows that $z \in B$.
364
+
365
+ By the unicoherence of X, the set B = K ∩ L is connected. Since the neighborhood U was arbitrary we get that x is a point of rim-connectedness.
366
+
367
+ (ii) ⇒ (iii). This implication holds in general.
368
+
369
+ (iii) ⇒ (i). Let $x$ be a point of colocal connectedness. In order to show that $x$ is an absolute end point it is enough to show that it is a point of irreducibility at which $X$ is locally connected ([Ro88, Theorem 1.0]). We have
370
+ ---PAGE_BREAK---
371
+
372
+ shown in Section 2 that $x$ is a non-block point (P1 implies P3) and thus it is a
373
+ point of irreducibility by Proposition 3.5. Then from Lemma 3.10 we conclude
374
+ that $x$ is a point of local connectedness of $X$ and hence $x$ is an absolute end
375
+ point of $X$. $\square$
376
+
377
+ ### 4. CIRCLE-LIKE CONTINUA
378
+
379
+ In this section we investigate the class of circle-like continua. The main
380
+ tool of our approach will be the use of an inverse limit. Our main result is
381
+ formulated in Theorem 4.5.
382
+
383
+ Let $\mathcal{P}$ be a collection of compact metric spaces. We say that a continuum $X$ is $\mathcal{P}$-like provided that for each $\varepsilon > 0$ there is a continuous map $f$ from $X$ onto some member of $\mathcal{P}$ such that $\operatorname{diam} f^{-1}(f(x)) < \varepsilon$ for each $x \in X$. In particular, if $\mathcal{P}$ consists of an arc (resp. a simple closed curve), then $X$ is called arc-like (resp. circle-like).
384
+
385
+ The next general result can be found for example in [Na92, Theorem 2.13].
386
+
387
+ PROPOSITION 4.1. A continuum $X$ is $\mathcal{P}$-like if and only if $X$ is an inverse limit $\lim_{\leftarrow X_i, f_i}\{X_i, f_i\}$, where all the coordinate spaces $X_i$ are chosen from $\mathcal{P}$ and each bonding map $f_i: X_{i+1} \to X_i$ is continuous and onto.
388
+
389
+ It is known that the classes of arc-like and chainable continua coincide
390
+ ([Na92, Theorem 12.11]). Some continua are both arc-like and circle-like, see
391
+ for example the Buckethandle continuum ([Na92, 12.48]). In this section we
392
+ deal with the circle-like continua from the point of view of our Table 1.
393
+
394
+ For a continuum $X$ and a continuous map $f: X \to X$ we say that $f$ is
395
+ weakly confluent if for any subcontinuum $K \subset X$ there exists a component $L$
396
+ of $f^{-1}(K)$ such that $f(L) = K$.
397
+
398
+ Let $\mathbb{S}^1 = \{z \in \mathbb{C} : |z| = 1\}$. Consider a continuous map $f: \mathbb{S}^1 \to \mathbb{S}^1$ of degree $\deg(f) \in \mathbb{Z}$. Let $F: \mathbb{R} \to \mathbb{R}$ be a lifting of $f$, i.e. the continuous map for which
399
+
400
+ $$ (4.1) \qquad \varphi \circ F = f \circ \varphi \text{ on } \mathbb{R}, $$
401
+
402
+ where $\varphi: \mathbb{R} \to \mathbb{S}^1$ is defined as $\varphi(x) = e^{2\pi ix}$. Then
403
+
404
+ $$ (4.2) \qquad F(x+1) = F(x) + \deg(f) \text{ for each } x \in \mathbb{R}. $$
405
+
406
+ In particular, if the degree $\deg(f)$ is nonzero the map $F$ is onto. Note that
407
+ any map $F+m$ is also a lifting of $f$ for $m \in \mathbb{Z}$.
408
+
409
+ We start with one lemma providing an important ingredient of our next
410
+ construction.
411
+
412
+ LEMMA 4.2. (i) Any nonzero degree continuous self-map of the unit circle is weakly confluent.
413
+
414
+ (ii) Any continuous onto map $f: I \to J$, where $I, J$ are intervals, is weakly confluent.
415
+ ---PAGE_BREAK---
416
+
417
+ PROOF. (i) Let $F: \mathbb{R} \to \mathbb{R}$ be a lifting of $f$. By our assumption on the degree, the map $F$ is onto.
418
+
419
+ Let $K$ be an arc in $\mathbb{S}^1$. Then $\varphi^{-1}(K) = [a,b] + \mathbb{Z}$ for some interval $[a,b] \subset \mathbb{R}$, $0 < b - a < 1$. Since $F$ is continuous onto, there exist points $x, y \in \mathbb{R}$ such that $F(\{x,y\}) = \{a,b\}$ and each point $t$ between $x, y$ is mapped by $F$ into $(a,b)$. Moreover, from (4.2) we conclude $|x-y| < 1$. Let $J \subset \mathbb{R}$ be the interval with end points $x$ and $y$. Then $L' = \varphi(J)$ is an arc in $\mathbb{S}^1$ and using (4.1) we deduce $f(L') = \varphi(F(J)) = \varphi([a,b]) = K$. So, if $L \subset \mathbb{S}^1$ is a component of $f^{-1}(K)$ containing $L'$, then also $f(L) = K$ and $f$ is weakly confluent. (ii) We let the proof to the reader. □
420
+
421
+ Let $X$ be a circle-like continuum. By Proposition 4.1 the continuum $X$ can be expressed as an inverse limit
422
+
423
+ $$ (4.3) \quad \varliminf_{\mathcal{S}^1} \{f_i\} = \{((x_i)_{i=1}^\infty : f_i(x_{i+1}) = x_i \text{ for each } i \in \mathbb{N})\}. $$
424
+
425
+ The space $X$ will be equipped with the metric
426
+
427
+ $$ d(x, y) = \sum_{i=1}^{\infty} \frac{\rho(x_i, y_i)}{2^i}, $$
428
+
429
+ where $\rho(x_i, y_i)$ denotes the Euclidean distance of $x_i, y_i \in \mathbb{S}^1$. For $n \in \mathbb{N}$ let $X_n = \{(x_i)_{i=1}^n : (x_i)_{i=1}^\infty \in X\}$ be a metric space endowed with the metric $d_n(x,y) = \sum_{i=1}^n \frac{\rho(x_i, y_i)}{2^i}$. Let $\mathcal{H}_d$, resp. $\mathcal{H}_{d_n}$ be the induced Hausdorff metric on $X$, resp. $X_n$.
430
+
431
+ LEMMA 4.3. Let each bonding map in (4.3) has nonzero degree. Fix a point $x = (x_i)_{i=1}^\infty \in X$. Then there is a countable set
432
+
433
+ $$ \{K_i^j : i \in \mathbb{N}, j \in \{1, \dots, i\}\} $$
434
+
435
+ of arcs in $\mathbb{S}^1$ satisfying (let $f_j(i-1) = f_j \circ \dots \circ f_{i-1}$ for each $i > 1$ and $1 \le j < i-1$, $f_{(i-1)(i-1)} = f_{i-1}$, $f_i(i-1) = id$)
436
+
437
+ (i) $K_i^i \subset \mathbb{S}^1 \setminus \{x_i\}$ for $i \in \mathbb{N}$,
438
+
439
+ (ii) $f_i(K_{i+1}^j) = K_i^j$ for $i \in \mathbb{N}$ and $j \in \{1, \dots, i\}$,
440
+
441
+ (iii) $K_i^i \supset K_{i+1}^{i-1} \supset \dots \supset K_i^{i-1}$ for $i \in \mathbb{N}$,
442
+
443
+ (iv) $f_{ji-1}(K_i^i) \supset K_j^j$ for each $i > 1$ and $1 \le j < i$,
444
+
445
+ (v) $\mathcal{H}_{d_1}(K_1^1, X_1) < 1$,
446
+
447
+ (vi) For each $i > 1$,
448
+
449
+ $$ \mathcal{H}_{d_i}(X_i \cap \prod_{j=1}^{i} f_{j(i-1)}(K_j^i), X_i) < 1/2^{i-1}. $$
450
+
451
+ PROOF. In the construction of $K_i^j$ we repeatedly use the fact that the bonding maps $f_i$ are continuous, onto and of a nonzero degree and apply
452
+ ---PAGE_BREAK---
453
+
454
+ Lemma 4.2. We proceed by the induction. In the $i$th step, we choose arcs $K_i^{i-1}, K_i^{i-2}, \dots, K_i^1, K_i^i$ (in written order):
455
+
456
+ STEP 1. We choose an arc $K_1^1 \subset S^1 \setminus \{x_1\}$ fulfilling the property (v).
457
+
458
+ STEP 2. With the help of Lemma 4.2(i) we choose an arc $K_2^1 \subset S^1$ such that $f_1(K_2^1) = K_1^1$. Since the arc $K_2^1$ does not contain the point $x_2$ and $f_1(x_2) = x_1$, there exists an arc $K_2^2 \subset S^1 \setminus \{x_2\}$ such that $K_2^2 \supset K_2^1$ (iii), $f_1(K_2^2) \supset K_1^1$ (iv) and (vi) is fulfilled for $i=2$.
459
+
460
+ STEP $i+1$. Let us assume that the arcs $K_i^i, K_i^{i-1}, \dots, K_i^1$ fulfilling (i)-(vi) have already been defined. Using Lemma 4.2 we can choose arcs $K_{i+1}^j$, $j \in \{1, \dots, i\}$ satisfying (ii) and (iii). Since the arc $K_{i+1}^i$ does not contain the point $x_{i+1}$ ($f_i(K_{i+1}^i) = K_i^i$ and $x_i \notin K_i^i$ by (i)), there exists an arc $K_{i+1}^{i+1}$ (the length of which is sufficiently close to $2\pi$) such that all the properties (i),(iii),(iv) and (vi) are satisfied.
461
+
462
+ This finishes our construction of the arcs $K_i^j$ satisfying (i)-(vi). □
463
+
464
+ For each $n \in \mathbb{N}$, let
465
+
466
+ $$L_i^n = f_{i(n-1)}(K_n^n) \text{ if } 1 \le i < n, L_i^n = K_i^n \text{ for } i \ge n.$$
467
+
468
+ The key proposition follows.
469
+
470
+ PROPOSITION 4.4. Let $X$ be a circle-like continuum such that each bonding map in (4.3) has nonzero degree.
471
+
472
+ (i) For each $n \in \mathbb{N}$, the set
473
+
474
+ $$A_n = \varprojlim \{L_i^n, f_i\}$$
475
+
476
+ is a subcontinuum of X. Moreover, $A_n \subset A_{n+1}$.
477
+
478
+ (ii) $\bigcup_n A_n \subset X \setminus \{x\}$,
479
+
480
+ (iii) $\bigcup_n A_n$ is dense in X.
481
+
482
+ PROOF. (i) By our definition of the arcs $L_i^n$ we conclude $f_i(L_{i+1}^n) = L_i^n$ for each $i \in \mathbb{N}$. Thus, the set $A_n$ is well defined for each $n \in \mathbb{N}$ and it is a subcontinuum in X. The inclusion $A_n \subset A_{n+1}$ directly follows from properties (iii) and (iv) of Lemma 4.3.
483
+
484
+ (ii) From property (i) of Lemma 4.3 we conclude $x_n \notin L_n^n = K_n^n$, hence $x \notin A_n$ for each $n$. It implies $x \notin \bigcup_n A_n$.
485
+
486
+ (iii) From the properties (v), (vi) of Lemma 4.3 we deduce
487
+
488
+ $$
489
+ \begin{align*}
490
+ \mathcal{H}_d(A_n, X) &\le \mathcal{H}_{d_n}(X_n \cap \prod_{j=1}^n f_{j(n-1)}(K_n^n), X_n) + \sum_{i=n+1}^\infty \frac{2}{2^i} \\
491
+ &< \frac{1}{2^{n-1}} + \frac{1}{2^{n-2}} = \frac{1}{2^{n-2}}.
492
+ \end{align*}
493
+ $$
494
+
495
+
496
+ ---PAGE_BREAK---
497
+
498
+ Using the above construction and the conclusion of Proposition 4.4 we
499
+ conclude the following.
500
+
501
+ **THEOREM 4.5.** Let X be a circle-like continuum. Then every point $x \in X$ is a non-block point.
502
+
503
+ PROOF. If X is also arc-like, then by [Bi62, p. 121] the continuum X is indecomposable and the conclusion follows from Corollary 3.6. So in what follows we assume that X is not arc-like. Then by [Ma05, Theorems 2.5.9-10] each bonding map in (4.3) can be assumed to have a positive degree and Proposition 4.4 can be applied.
504
+
505
+ We have proved that each point of a circle-like continuum has the property P3 from our Table 1. On the other hand, there are circle-like continua in which no point has the property P2, for example the circle of pseudoarcs is such a continuum ([BJ59]).
506
+
507
+ REFERENCES
508
+
509
+ [Bi48] R. H. Bing, Some characterizations of arcs and simple closed curves, Amer. J. Math. **70** (1948), 497–506.
510
+
511
+ [Bi62] R. H. Bing, *Embedding circle-like continua in the plane*, Canad. J. Math. **14** (1962), 113–128.
512
+
513
+ [BJ59] R.H. Bing and F.B. Jones, *Another homogeneous plane continuum*, Trans. Amer. Math. Soc. **90** (1959), 171–192.
514
+
515
+ [BMPV14] J. Bobok, R. Marciña, P. Pyrih and B. Vejnar, *Union of shore sets in a dendroid*, Topology Appl. **161** (2014), 206–214.
516
+
517
+ [BPV13] J. Bobok, P. Pyrih and B. Vejnar, *Half-homogeneous chainable continua with end points*, Topology Appl. **160** (2013), 1066–1073.
518
+
519
+ [Bo67] K. Borsuk, Theory of retracts, Polish Scientific Publishers, Warsaw, 1967.
520
+
521
+ [Do94] J. Doucet, *Cardinality, completeness, and decomposability of sets of endpoints of chainable continua*, Topology Appl. **60** (1994), 41–59.
522
+
523
+ [Do08] J. Doucet, *Sets of endpoints of chainable continua*, Topology Proc. **32** (2008), 31–35.
524
+
525
+ [Gr81] E. E. Grace, *Aposyndesis and weak cutting*, in: General topology and modern analysis, Academic Press, New York, 1981, 71–82.
526
+
527
+ [ELV12] R. Escobedo, M. de Jesús López and H. Villanueva, *Nonblockers in hyperspaces*, Topology Appl. **159** (2012), 3614–3618.
528
+
529
+ [Il01] A. Illanes, *Finite unions of shore sets*, Rend. Circ. Mat. Palermo (2) **50** (2001), 483–498.
530
+
531
+ [IKr11] A. Illanes and P. Krupski, *Blockers in hyperspaces*, Topology Appl. **158** (2011), 653–659.
532
+
533
+ [KM79] J. Krasinkiewicz and P. Minc, *Continua and their open subsets with connected complements*, Fund. Math. **102** (1979), 129–136.
534
+
535
+ [Le13] R. Leonel, *Shore points of a continuum*, Topology Appl. **161** (2014), 433–441.
536
+
537
+ [Le85] W. Lewis, *Continuous curves of pseudo-arcs*, Houston J. Math. **11** (1985), 91–99.
538
+
539
+ [Ma05] S. Macias, Topics on continua, Chapman and Hall/CRC, Boca Raton, 2005.
540
+
541
+ [Na92] S. B. Nadler, Continuum theory. An introduction, Marcel Dekker, New York, 1992.
542
+ ---PAGE_BREAK---
543
+
544
+ [Na07] V. C. Nall, *Centers and shore points of a dendroid*, Topology Appl. **154** (2007), 2167–2172.
545
+
546
+ [NT90] J. Nikiel and E. D. Tymchatyn, *Sets of end-points and ramification points in dendroids*, Fund. Math. **138** (1991), 139–146.
547
+
548
+ [PV12] P. Pyrih and B. Vejnar, *A lambda-dendroid with two shore points whose union is not a shore set*, Topology Appl. **159** (2012), 69–74.
549
+
550
+ [Ro88] I. Rosenholtz, *Absolute endpoints of chainable continua*, Proc. Amer. Math. Soc. **103** (1988), 1305–1314.
551
+
552
+ [Wa23] T. Wazewski, *Sur un continu singulier*, Fundamenta Mathematicae **4** (1923), 214–245.
553
+
554
+ [Wh39] G. T. Whyburn, *Semi-locally connected sets*, Amer. J. Math. **61** (1939), 733–749.
555
+
556
+ [Wh42] G. T. Whyburn, *Analytic topology*, American Mathematical Society, New York, 1942.
557
+
558
+ J. Bobok
559
+ Faculty of Civil Engineering
560
+ Czech Technical University in Prague
561
+
562
+ P. Pyrih
563
+ Faculty of Mathematics and Physics
564
+ Charles University in Prague
565
+ 118 00 Prague
566
+ Czech Republic
567
+
568
+ B. Vejnar
569
+ Faculty of Mathematics and Physics
570
+ Charles University in Prague
571
+ 118 00 Prague
572
+ Czech Republic
573
+
574
+ *E-mail:* vejnar@karlin.mff.cuni.cz
575
+
576
+ *Received:* 14.8.2014.
577
+
578
+ *Revised:* 18.2.2015.
samples/texts_merged/4239587.md ADDED
@@ -0,0 +1,872 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ RECOVERING CONDUCTIVITY AT THE BOUNDARY IN
5
+ THREE-DIMENSIONAL ELECTRICAL IMPEDANCE
6
+ TOMOGRAPHY
7
+
8
+ GEN NAKAMURA
9
+
10
+ Graduate school of Science, Hokkaido University
11
+ Sapporo 060-0810, Japan
12
+
13
+ PÄIVI RONKANEN
14
+
15
+ Department of Physics and Mathematics
16
+ University of Eastern Finland
17
+ FIN-70211 Kuopio, Finland
18
+
19
+ SAMULI SILTANEN
20
+
21
+ Department of Mathematics and Statistics
22
+ FI-00014 University of Helsinki, Finland
23
+
24
+ KAZUMI TANUMA
25
+
26
+ Department of Mathematics, Graduate School of Engineering
27
+ Gunma University
28
+ Kiryu 376-8515, Japan
29
+
30
+ (Communicated by Matti Lassas)
31
+
32
+ **ABSTRACT.** The aim of electrical impedance tomography (EIT) is to reconstruct the conductivity values inside a conductive object from electric measurements performed at the boundary of the object. EIT has applications in medical imaging, nondestructive testing, geological remote sensing and subsurface monitoring. Recovering the conductivity and its normal derivative at the boundary is a preliminary step in many EIT algorithms; Nakamura and Tanuma introduced formulae for recovering them approximately from localized voltage-to-current measurements in [Recent Development in Theories & Numerics, International Conference on Inverse Problems 2003]. The present study extends that work both theoretically and computationally. As a theoretical contribution, reconstruction formulas are proved in a more general setting. On the computational side, numerical implementation of the reconstruction formulae is presented in three-dimensional cylindrical geometry. These experiments, based on simulated noisy EIT data, suggest that the conductivity at the boundary can be recovered with reasonable accuracy using practically realizable measurements. Further, the normal derivative of the conductivity can also be recovered in a similar fashion if measurements from a homogeneous conductor (dummy load) are available for use in a calibration step.
33
+
34
+ 2000 Mathematics Subject Classification. Primary: 35R30; Secondary: 65N21.
35
+ Key words and phrases. Electrical impedance tomography, boundary determination, localized Dirichlet to Neumann map, inverse conductivity problem.
36
+ ---PAGE_BREAK---
37
+
38
+ 1. **Introduction.** The aim of Electrical Impedance Tomography (EIT) is imaging the conductivity distribution inside an unknown body from electrical measurements at the boundary. Applications of EIT include medical imaging, nondestructive testing and subsurface monitoring, see [6, 7, 13]. We introduce a new practical solution method for the subproblem of recovering the conductivity and its normal derivative at the boundary of a three-dimensional target from localized measurements. This is required in several EIT algorithms as the first step before full reconstruction.
39
+
40
+ Assume given a bounded domain $\Omega \subset \mathbb{R}^3$ with Lipschitz boundary $\partial\Omega$ and a real-valued conductivity $\gamma \in L^\infty(\Omega)$ satisfying $\gamma(x) \ge c > 0$ almost everywhere in $\Omega$. We consider applying a voltage potential $f$ on the boundary and solving the Dirichlet problem
41
+
42
+ $$ (1) \qquad \left\{ \begin{array}{ll} \nabla \cdot (\gamma \nabla u) = 0 & \text{in } \Omega, \\ u = f & \text{on } \partial\Omega, \end{array} \right. $$
43
+
44
+ where $u = u(x)$ is electric potential. The resulting distribution of current through the boundary is
45
+
46
+ $$ (2) \qquad \Lambda_{\gamma} f = \gamma \frac{\partial u}{\partial \nu} |_{\partial \Omega} $$
47
+
48
+ where $\Lambda_{\gamma}$ is the Dirichlet-to-Neumann (DN) map and $\nu$ is the outward unit normal. The problem is to determine $\gamma$ from the knowledge of $\Lambda_{\gamma}$. This mathematical formulation was introduced by Calderón in [12].
49
+
50
+ Practical measurements are typically done using a finite number of electrodes on the surface of the body, and various data models including electrodes are discussed in [14, 44]. In this study we use the continuum model (1) for simplicity. However, the effect of electrodes is taken into account by considering the maximum frequency of spatial oscillations in Dirichlet data $f$ in (1) that can be approximated with reasonable accuracy using a given number of electrodes.
51
+
52
+ The following cylindrical geometry is frequently used in our discussion. Take $\ell > 0$ and $R > 0$ and define
53
+
54
+ $$ (3) \qquad \Omega := \{(x_1^2 + x_2^2)^{1/2} < R, |x_3| < \ell\} = \Omega' \times [-\ell, \ell] \subset \mathbb{R}^3, $$
55
+
56
+ where $\Omega' = D(0, R) \subset \mathbb{R}^2$. Denote the lateral boundary surface of $\Omega$ by
57
+
58
+ $$ \Gamma := \{(x_1^2 + x_2^2)^{1/2} = R, |x_3| < \ell\} = (\partial\Omega') \times [-\ell, \ell] \subset \partial\Omega. $$
59
+
60
+ Parametrize a neighborhood of $\Gamma$ by boundary normal coordinates $(\tau, s, r)$:
61
+
62
+ $$ (4) \qquad x_1 = (R - r) \cos(s/R), \quad x_2 = (R - r) \sin(s/R), \quad x_3 = \tau. $$
63
+
64
+ Then $\Omega$ and $\Gamma$ are given by $0 < r \le R$ and $r = 0$, respectively.
65
+
66
+ Given a point $x_0 \in \Gamma$, we wish to recover $\gamma(x_0)$ and $\partial\gamma/\partial\nu(x_0)$ approximately from the (local) knowledge of $\Lambda_\gamma$. Without loss of generality we may put $x_0 = (0, 0, 0)$ in the coordinate system $(\tau, s, r)$. We assume that the conductivity $\gamma$ is once continuously differentiable in a neighborhood of the boundary. Let $\eta(\tau, s)$ be any function in $C_0^1(\Gamma)$, choose a unit vector $(t_1, t_2) \in \mathbb{R}^2$ and define
67
+
68
+ $$ (5) \qquad \phi_N(\tau, s) = e^{iN(\tau t_1 + st_2)} \eta(\tau, s), $$
69
+
70
+ $$ (6) \qquad \psi_N(\tau, s) = e^{i\frac{N}{2}(\tau t_1 + st_2)} \eta(\tau, s). $$
71
+
72
+ The following formulas can be derived from Theorem 1 in Section 2:
73
+ ---PAGE_BREAK---
74
+
75
+ $$ (7) \quad \int_{\mathbb{R}^2} \gamma(\tau, s, 0) \eta(\tau, s)^2 d\tau ds = \lim_{N \to \infty} N^{-1} \langle \Lambda_\gamma \phi_N, \overline{\phi_N} \rangle, $$
76
+
77
+ $$ (8) \quad \int_{\mathbb{R}^2} \frac{\partial \gamma}{\partial \nu}(\tau, s, 0) \eta(\tau, s)^2 d\tau ds = \lim_{N \to \infty} \left[ \begin{aligned} & \left(2 - \frac{1+t_1^2-t_2^2}{2RN}\right) \langle \Lambda_\gamma \phi_N, \overline{\phi_N} \rangle \\ & -4 \langle \Lambda_\gamma \psi_N, \overline{\psi_N} \rangle \end{aligned} \right]. $$
78
+
79
+ Practical implementation of formulas (7) and (8) in dimension two is reported in [40]; according to those results, formula (7) can be used reliably for approximate recovery of the conductivity at the boundary, while practical use of formula (8) seems to require an unrealistic number of electrodes.
80
+
81
+ This paper has two goals. The theoretical goal is to prove generalizations of formulas (7) and (8) in a more general setting, where the forthcoming formulae (17) and (18) which generalize (7) and (8) contain geometric information about the boundary $\partial\Omega$. Such information may be used for estimating the shape of the boundary from EIT data, but we do not discuss such possibilities further in this work. Uncertainty in domain shape is a significant source of error in EIT reconstructions [30, 20, 1].
82
+
83
+ The computational goal is to implement (7) and (8) numerically in the three-dimensional cylindrical geometry (3) and study the possibilities of using them in practical EIT. We recover the trace of conductivity approximately at the boundary using formula (7) with a finite value of $N$. The results suggest that the right hand side of (7) converges as $N \to \infty$ quickly enough for the Dirichlet data (5) to remain only mildly oscillatory. It seems that the frequency of those oscillations is low enough for $\phi_N(\tau, s)$ to be represented reasonably accurately using 64 electrodes.
84
+
85
+ Practical use of formula (8) to recover the normal derivative of the conductivity seems to be more problematic, as is the case in the two-dimensional situation [40]. The convergence of the right hand side of (8) is too slow for acceptable reconstructions from realistic voltage-to-current measurements. However, our numerical experiments suggest that the difference between the right hand sides of (8) corresponding to a nontrivial conductivity and to a constant conductivity do converge rather quickly as $N \to \infty$, allowing reasonable reconstructions of normal derivative from realistic data after a calibration step.
86
+
87
+ Most three-dimensional EIT algorithms for recovering conductivity inside $\Omega$ are iterative methods where the direct problem (1) needs to be solved repeatedly using a numerical algorithm, typically the finite element method (FEM). This is computationally demanding since 3D FEM involves representing the conductivity with a large number of parameters. Out of the few published implementations of 3D EIT we mention the work of Barber, Brown, Metherall and Smallwood [31, 32, 33]; Blue, Goble, Cheney, Isaacson, Newell, Ross and Saulnier [5, 22, 41]; Morucci, Granie, Lei, Chabert and Marsili [34]; Kaipio, Savolainen, P.J. Vauhkonen and M. Vauhkonen [49, 50, 51]; and Wexler [52]. In all these works the reconstruction algorithms need a good initial guess for conductivity inside $\Omega$ in order to convergence to the global minimum; at present $\gamma$ is often assumed to be constant near the boundary. We believe that the knowledge of conductivity $\gamma$ and its normal derivative at the boundary $\partial\Omega$ helps to design better initial guesses for full reconstruction algorithms.
88
+
89
+ Non-iterative 3D EIT algorithms have been suggested as well, see [4, 8, 16], and numerical inclusion detection algorithms are presented in [24, 19, 25]. In such
90
+ ---PAGE_BREAK---
91
+
92
+ methods it may be important to continue the conductivity artificially outside $\Omega$ in a regular fashion; this involves recovering $\gamma$ and $\partial\gamma/\partial\nu$ at the boundary first.
93
+
94
+ The mathematical formulation of the inverse conductivity problem was originally given by Calderón, who solved in [12] a linearized version of the problem. Unique determination of piecewise real-analytic conductivities from the DN map was proved for $\mathbb{R}^n$ with $n \ge 2$ in [29]. The possibility of uniquely determining an infinitely smooth conductivity with $n \ge 3$ was shown in [47]. Later, unique determination in the case $n \ge 3$ has been shown for conductivities having $\frac{3}{2}$ derivatives in [42] (see also the refinement [10]), and in [23] for conductivities allowed to have certain conormal singularities on submanifolds. In dimension $n=2$, uniqueness was proven in [46] for radially symmetric conductivities and in [36] for nonsymmetric, twice weakly differentiable conductivities. Generalization to one derivative was provided in [11], and Calderón's original $L^\infty$ question was solved in [3].
95
+
96
+ Previous theoretical results on the recovery of conductivity and its derivatives at the boundary include [2, 28, 29, 35, 36, 48] starting from infinite precision data measured on the whole boundary, and [9, 27, 37, 38, 39] starting from infinite precision data measured on a part of the boundary. The present work is the first numerical boundary reconstruction result in dimension three.
97
+
98
+ This paper is organized as follows. In Section 2 we give a proof of formulas (7) and (8) using our main Theorem 1, which in turn is proved in Section 3. In Section 4 we explain how we simulate noisy voltage-to-current data, and in Section 5 we substitute the data to formulas (7) and (8) to study their convergence in practice as $N$ grows. Based on those numerical experiments we introduce and demonstrate a calibrated reconstruction method in Section 6. Finally, we conclude our findings in Section 7.
99
+
100
+ **2. Basic theorem and derivation of formulas (7) and (8).** Let $\Omega$ be a bounded domain in $\mathbb{R}^n$ with $n \ge 2$. We assume that the boundary $\partial\Omega$ is Lipschitz and, in addition, locally $C^2$ near a recovery point $x_0 \in \partial\Omega$. Then there exists a $C^2$ diffeomorphism $y = \Psi(x)$ which induces a curvilinear coordinate system $y = (y', y_n) = (y_1, \dots, y_{n-1}, y_n)$ around $x_0$ such that $\Psi(x_0) = 0$ and $\Omega, \partial\Omega$ are given by
101
+
102
+ $$ (9) \qquad \Omega = \{y_n > 0\}, \quad \partial\Omega = \{y_n = 0\} $$
103
+
104
+ locally around $y=0$. Let $G = (g_{ij})_{1\le i,j\le n}$ be the metric tensor associated with the diffeomorphism $y = \Psi(x)$, whose components are given by
105
+
106
+ $$ (10) \qquad g_{ij} = e_i \cdot e_j. $$
107
+
108
+ Here the natural base related to the curvilinear coordinate system $y$ is formed by
109
+
110
+ $$ (11) \qquad e_i = \left[ \frac{\partial x_k}{\partial y_i} \right]_{k \downarrow 1, 2, \dots, n} (i = 1, 2, \dots, n). $$
111
+
112
+ We assume that $y = (y', y_n)$ forms the boundary normal coordinates so that
113
+
114
+ $$ (12) \qquad g_{nn} = 1, \quad g_{\alpha n} = g_{n\alpha} = 0 \quad (\alpha = 1, 2, \dots, n-1) $$
115
+
116
+ in a neighborhood of $x_0$ in $\bar{\Omega}$. The contravariant components $g^{ij}$ ($1 \le i,j \le n$) of $G^{-1}$ are defined matrixwise as
117
+
118
+ $$ (g^{ij}) = G^{-1}. $$
119
+ ---PAGE_BREAK---
120
+
121
+ Furthermore, it is easy to see that
122
+
123
+ $$ (13) \qquad g^{ij} = \sum_{k=1}^{n} \frac{\partial y_i}{\partial x_k} \frac{\partial y_j}{\partial x_k}. $$
124
+
125
+ It follows from (12) that in a neighborhood of $x_0$ in $\bar{\Omega}$ we have
126
+
127
+ $$ (14) \qquad g^{nn} = 1, \quad g^{\alpha n} = g^{n\alpha} = 0 \quad (\alpha = 1, 2, \dots, n-1). $$
128
+
129
+ Let $d$ and $\delta$ be sufficiently small positive numbers so that the expressions (9) for $\Omega$ and $\partial\Omega$ are valid in a relatively open neighborhood $\mathcal{N}$ of $x_0 \in \partial\Omega$, where
130
+
131
+ $$ (15) \qquad \mathcal{N} = \{ |y'| < d, 0 \le y_n < 2\delta \} \subset \bar{\Omega}. $$
132
+
133
+ Let $\phi = \phi(y')$ be a phase function which satisfies the following eikonal equation:
134
+
135
+ $$ (16) \qquad \sum_{i,j=1}^{n-1} g^{ij}(y', 0) \frac{\partial \phi}{\partial y_i} \frac{\partial \phi}{\partial y_j} = 1. $$
136
+
137
+ We remark that equation (16) can be solved in $\{|y'| < d\}$ by the method of characteristic curves ([17]). When $g^{ij}(y', 0) = \delta_{ij}$ ($i, j = 1, 2, \dots, n-1$), we immediately have $\phi = y' \cdot t'$, where $t' = (t_1, \dots, t_{n-1})$ is any unit vector in $\mathbb{R}^{n-1}$. We shall use this specific phase function below in our numerical experiments.
138
+
139
+ **Theorem 1.** Assume that $\gamma \in L^\infty(\Omega)$ is strictly positive: $\gamma \ge c > 0$ (a.e. $x \in \Omega$). Also, suppose that $\gamma = \gamma(y', y_n)$, as a function of $y_n \in [0, 2\delta]$ with values in the space of $L^2(\{|y'| \le d\})$, is right continuous at $y_n = 0$, and that $\nabla_{y'}\gamma(y', 0) \in L^2(\{|y'| \le d\})$. Let $\eta(y')$ be any function in $C_0^1(\mathbb{R}^{n-1})$ compactly supported in $\{|y'| \le d\}$ and define the Dirichlet data $f_N$ and $g_N$ for $N = 1, 2, 3, \dots$ by
140
+
141
+ $$ f_N = e^{i N \phi(y')} \eta(y') \Big|_{y=\Psi(x), x \in \partial \Omega}, \qquad g_N = e^{i \frac{N}{2} \phi(y')} \eta(y') \Big|_{y=\Psi(x), x \in \partial \Omega}. $$
142
+
143
+ Then (i) and (ii) below hold:
144
+
145
+ (i) We have the equality
146
+
147
+ $$ (17) \qquad \lim_{N \to \infty} N^{-1} \langle \Lambda_\gamma f_N, \overline{f_N} \rangle = \int_{\mathbb{R}^{n-1}} \gamma(y', 0) \frac{\eta(y')^2}{\sqrt{\det(g^{ij}(y', 0))}} dy'. $$
148
+
149
+ (ii) Suppose that $\gamma = \gamma(y', y_n)$, as a function of $y_n \in [0, 2\delta]$ with values in the space of $L^2(\{|y'| \le d\})$, is right differentiable at $y_n = 0$, and that $\nabla_{y'}\gamma(y', 0) \in L^2(\{|y'| \le d\})$. Then
150
+
151
+ $$ \lim_{N \to \infty} \left[ 4\langle \Lambda_\gamma g_N, \overline{g_N} \rangle - 2\langle \Lambda_\gamma f_N, \overline{f_N} \rangle \right] = \int_{\mathbb{R}^{n-1}} \frac{\partial \gamma}{\partial y_n}(y', 0) \frac{\eta(y')^2}{\sqrt{\det(g^{ij}(y', 0))}} dy' $$
152
+
153
+ $$ (18) + \frac{1}{2} \int_{\mathbb{R}^{n-1}} \gamma(y', 0) \left. \frac{\partial}{\partial y_n} \frac{\sum_{i,j=1}^{n-1} g^{ij} \phi_{y_i} \phi_{y_j} + 1}{\sqrt{\det(g^{ij})}} \right|_{y_n=0} = 0. $$
154
+
155
+ *Derivation of formulas (7) and (8) from the theorem 1.*
156
+
157
+ Putting $n=3$ and $(y_1, y_2, y_3) = (y', y_3) = (\tau, s, r)$, from (11), (4) and (10) we get
158
+
159
+ $$ G = (g_{ij}) = \begin{bmatrix} 1 & 0 & 0 \\ 0 & (1-r/R)^2 & 0 \\ 0 & 0 & 1 \end{bmatrix}, \qquad (g^{ij}) = G^{-1} = \begin{bmatrix} 1 & 0 & 0 \\ 0 & (1-r/R)^{-2} & 0 \\ 0 & 0 & 1 \end{bmatrix} $$
160
+
161
+ and $\det(g^{ij}) = (1 - r/R)^{-2}$. Thus we see that $(\tau, s, r)$ form boundary normal coordinates. Moreover, since $g^{ij}(y', 0) = \delta_{ij}$ ($i, j = 1, 2$), as a solution to eikonal
162
+ ---PAGE_BREAK---
163
+
164
+ equation (16) we may choose $\phi = y' \cdot t' = \tau t_1 + s t_2$, where $t' = (t_1, t_2)$ is any unit vector in $\mathbb{R}^2$. Then formula (7) is an immediate consequence of formula (17).
165
+
166
+ Noting that $-\frac{\partial \gamma}{\partial r}(\tau, s, 0) = \frac{\partial \gamma}{\partial \nu}(\tau, s, 0)$, where $\nu$ is the outward unit normal vector to the boundary $\partial\Omega$, we obtain
167
+
168
+ $$
169
+ \int_{\mathbb{R}^2} \frac{\partial \gamma}{\partial \nu}(\tau, s, 0) \eta(\tau, s)^2 d\tau ds = \lim_{N \to \infty} \left[ 2\langle \Lambda_\gamma \phi_N, \overline{\phi_N} \rangle - 4\langle \Lambda_\gamma \psi_N, \overline{\psi_N} \rangle \right] \\
170
+ - \frac{1+t_1^2-t_2^2}{2R} \int_{\mathbb{R}^2} \gamma(\tau, s, 0) \eta(\tau, s)^2 d\tau ds,
171
+ $$
172
+
173
+ which combined with (7) yields (8).
174
+
175
+ 3. **Proof of Theorem 1.** Let $\zeta(y_n) \in C^\infty([0, \infty))$ satisfy $0 \le \zeta \le 1$, $\zeta(y_n) = 1$ for $0 \le y_n \le \delta$, and $\zeta(y_n) = 0$ for $2\delta \le y_n$. Then from the weak formulation of $\Lambda_\gamma$ it follows that
176
+
177
+ $$
178
+ (19) \qquad \langle \Lambda_{\gamma} f_{N}, \overline{f}_{N} \rangle = \int_{\Omega} \gamma \nabla u_{N} \cdot \nabla (\zeta \overline{F}_{N}) \, dx,
179
+ $$
180
+
181
+ where $u_N \in H^1(\Omega)$ is the solution to
182
+
183
+ $$
184
+ (20) \qquad \nabla \cdot (\gamma \nabla u_N) = 0 \quad \text{in } \Omega, \quad u_N|_{\partial\Omega} = f_N,
185
+ $$
186
+
187
+ and $F_N(x)$ is an $H^1(\Omega)$ extension of $f_N$, for which we take
188
+
189
+ $$
190
+ (21) \qquad F_N(x) = e^{iN\phi(y')} e^{-Ny_n} \eta(y') \Big|_{y=\Psi(x)}.
191
+ $$
192
+
193
+ Put $r_N = u_N - \zeta F_N$. Then we get from (19)
194
+
195
+ $$
196
+ \langle \Lambda_{\gamma} f_{N}, \bar{f}_{N} \rangle = \int_{\Omega} \gamma \nabla(\zeta F_{N}) \cdot \nabla(\zeta \bar{F}_{N}) dx + \int_{\Omega} \gamma \nabla r_{N} \cdot \nabla(\zeta \bar{F}_{N}) dx = I_{1} + I_{2}.
197
+ $$
198
+
199
+ It suffices to show that
200
+
201
+ $$
202
+ (22) \quad \lim_{N \to \infty} N^{-1} I_1 = \int_{\mathbb{R}^{n-1}} \gamma(y', 0) \frac{\eta(y')^2}{\sqrt{\det(g^{ij}(y', 0))}} dy',
203
+ $$
204
+
205
+ $$
206
+ (23) \quad \lim_{N \to \infty} N^{-1} I_2 = 0.
207
+ $$
208
+
209
+ We denote the Jacobian of the diffeomorphism $y = \Psi(x)$ by $\nabla\Psi$, which is given by
210
+
211
+ $$
212
+ (24) \qquad \nabla\Psi = \left( \frac{\partial y_i}{\partial x_j} \right)_{i,j=1,2,\dots,n}.
213
+ $$
214
+
215
+ Then $\nabla = \nabla_x = {}^t\nabla\Psi\nabla_y$, where the superscript $t$ denotes transposition. By the change of the coordinate systems between $x$ and $y$, integral $I_1$ becomes
216
+
217
+ $$
218
+ I_1 = \int_N \gamma(y) {}^t\nabla\Psi\nabla_y(\zeta F_N) \cdot {}^t\nabla\Psi\nabla_y(\zeta F_N) |\det\nabla\Psi|^{-1} dy
219
+ $$
220
+
221
+ $$
222
+ (25) \qquad = \int_N \gamma(y) \left( \frac{\nabla\Psi{}^t\nabla\Psi}{|\det\nabla\Psi|} \nabla_y(\zeta F_N) \right) \cdot \nabla_y(\zeta F_N) dy,
223
+ $$
224
+
225
+ where $\mathcal{N}$ is a relatively open neighborhood of $x_0 \in \partial\Omega$ defined by (15). Equations (13) and (24) imply that $|\det\nabla\Psi|^{-1}\nabla\Psi{}^t\nabla\Psi = (\det(g^{ij}))^{-1/2}(g^{ij})$. Henceforth, we use the $n \times n$ symmetric matrix $\tilde{\gamma}$
226
+
227
+ $$
228
+ (26) \qquad \tilde{\gamma}(y) = \gamma(y) (\det(g^{ij}))^{-1/2} (g^{ij}).
229
+ $$
230
+ ---PAGE_BREAK---
231
+
232
+ Then $I_1 = \int_N \tilde{\gamma}(y) \nabla_y (\zeta F_N) \cdot \nabla_y (\zeta \overline{F_N}) dy$. Since $\zeta = 1$ for $0 \le y_n \le \delta$, it is convenient to put $D = \{|y'| \le d, 0 \le y_n \le \delta\}, D' = \{|y'| \le d, \delta \le y_n\}$ and decompose $I_1$ as
233
+
234
+ $$ (27) \qquad I_1 = \int_D \tilde{\gamma}(y) \nabla_y F_N \cdot \nabla_y \overline{F_N} dy + \int_{D'} \tilde{\gamma}(y) \nabla_y (\zeta F_N) \cdot \nabla_y (\zeta \overline{F_N}) dy = I_3 + I_4. $$
235
+
236
+ Now (21) implies the following two equations:
237
+
238
+ $$ (28) \qquad \nabla_y F_N = \left( N \begin{bmatrix} i \nabla_{y'} \phi \\ -1 \end{bmatrix} \eta(y') + \begin{bmatrix} \nabla_{y'} \eta \\ 0 \end{bmatrix} \right) e^{i N \phi(y')} e^{-N y_n}, $$
239
+
240
+ $$ (29) \qquad \nabla_y(\zeta F_N) = \left( N \begin{bmatrix} i \nabla_{y'} \phi \\ -1 \end{bmatrix} \zeta(y_n)\eta(y') + \nabla_y(\zeta\eta) \right) e^{i N\phi(y')} e^{-Ny_n}. $$
241
+
242
+ Thus we see that
243
+
244
+ $$ I_3 = N^2 \int_D \tilde{\gamma}(y) \begin{bmatrix} i\nabla_{y'}\phi \\ -1 \end{bmatrix} \cdot \begin{bmatrix} -i\nabla_{y'}\phi \\ -1 \end{bmatrix} \eta(y')^2 e^{-2Ny_n} dy $$
245
+
246
+ $$ (30) \qquad + \int_D \tilde{\gamma}(y) \begin{bmatrix} \nabla_{y'}\eta \\ 0 \end{bmatrix} \cdot \begin{bmatrix} \nabla_{y'}\eta \\ 0 \end{bmatrix} e^{-2Ny_n} dy. $$
247
+
248
+ In deriving (30), we have used the fact that the term of the order $O(N)$ in the integrand of $I_3$ vanishes, because
249
+
250
+ $$ \tilde{\gamma}(y) \begin{bmatrix} \nabla_{y'}\eta \\ 0 \end{bmatrix} \cdot \begin{bmatrix} -i\nabla_{y'}\phi \\ -1 \end{bmatrix} + \tilde{\gamma}(y) \begin{bmatrix} i\nabla_{y'}\phi \\ -1 \end{bmatrix} \cdot \begin{bmatrix} \nabla_{y'}\eta \\ 0 \end{bmatrix} = \tilde{\gamma}(y) \begin{bmatrix} \nabla_{y'}\eta \\ 0 \end{bmatrix} \cdot \begin{bmatrix} 0 \\ -2 \end{bmatrix} = 0, $$
251
+
252
+ the last equality of which follows from (14) and (26).
253
+
254
+ After the scaling transformation $z_n = N y_n$ we get for large $N$
255
+
256
+ $$ I_3 = N \int_0^{N\delta} \left( \int_{|y'|<d} \tilde{\gamma}(y', \frac{z_n}{N}) \begin{bmatrix} i\nabla_{y'}\phi \\ -1 \end{bmatrix} \cdot \begin{bmatrix} -i\nabla_{y'}\phi \\ -1 \end{bmatrix} \eta(y')^2 dy' \right) e^{-2z_n} dz_n + O(\frac{1}{N}). $$
257
+
258
+ The order of the last term in the right hand side follows from $\tilde{\gamma}(y) \in L^\infty(D)$ and $z_n = Ny_n$. The dominated convergence theorem and $\int_0^\infty e^{-2z_n} dz_n = \frac{1}{2}$ imply
259
+
260
+ $$ \lim_{N\to\infty} N^{-1} I_3 = \frac{1}{2} \int_{|y'|<d} \tilde{\gamma}(y', 0) \begin{bmatrix} i\nabla_{y'}\phi \\ -1 \end{bmatrix} \cdot \begin{bmatrix} -i\nabla_{y'}\phi \\ -1 \end{bmatrix} \eta(y')^2 dy'. $$
261
+
262
+ By (14) and (26), the integrand on the right hand side becomes
263
+
264
+ $$ (31) \qquad \gamma(y', 0) \left( \sum_{i,j=1}^{n-1} g^{ij}(y', 0) \frac{\partial\phi}{\partial y_i} \frac{\partial\phi}{\partial y_j} + 1 \right) \frac{\eta(y')^2}{\sqrt{\det(g^{ij}(y', 0))}}. $$
265
+
266
+ Therefore, from eikonal equation (16) we obtain
267
+
268
+ $$ (32) \qquad \lim_{N\to\infty} N^{-1} I_3 = \int_{|y'|<d} \gamma(y', 0) \frac{\eta(y')^2}{\sqrt{\det(g^{ij}(y', 0))}} dy'. $$
269
+
270
+ This, combined with (27) and $I_4 = O(Ne^{-2\delta N})$ ($N \to +\infty$), proves (22).
271
+
272
+ To prove (23), we follow basically the methods in [9] and [43]. From (20), (21) and $r_N = u_N - \zeta F_N$ it follows that $r_N \in H_0^1(\Omega)$ satisfies $\nabla \cdot (\gamma\nabla r_N) = -\nabla \cdot (\gamma\nabla(\zeta F_N))$ in $\Omega$. The Lax-Milgram theorem ([21], §§5.7, 5.8) implies that
273
+
274
+ $$ (33) \qquad \|r_N\|_{H_0^1(\Omega)} \le C \|(\gamma\nabla(\zeta F_N))\|_{H^{-1}(\Omega)}. $$
275
+
276
+ Here and hereafter we denote by $C$ any positive constants (depending on $\gamma, \Psi, \zeta$ or $\eta$) without distinguishing between them. We use the dual pairing of $H^{-1}(\Omega)$
277
+ ---PAGE_BREAK---
278
+
279
+ with $H_0^1(\Omega)$ to write $I_2$ as $I_2 = -\langle \nabla \cdot (\gamma \nabla (\zeta \overline{F_N})), r_N \rangle_{H^{-1}-H_0^1}$. Then $|I_2| \le \|\nabla \cdot (\gamma \nabla (\zeta \overline{F_N}))\|_{H^{-1}(\Omega)} \|r_N\|_{H_0^1(\Omega)}$ and it follows from (33) that
280
+
281
+ $$
282
+ (34) \qquad |I_2| \le C \|\nabla \cdot (\gamma \nabla (\zeta F_N))\|^2_{H^{-1}(\Omega)}.
283
+ $$
284
+
285
+ Hence we shall estimate $\|\nabla \cdot (\gamma \nabla(\zeta F_N))\|_{H^{-1}(\Omega)}$.
286
+
287
+ For any $v \in H_0^1(\Omega)$, it follows after changing coordinates (see (24)-(26)) that
288
+
289
+ $$
290
+ \begin{align*}
291
+ \langle \nabla \cdot (\gamma \nabla(\zeta F_N)), v \rangle_{H^{-1}-H_0^1} &= -\int_{\Omega} \gamma \nabla(\zeta F_N) \cdot \nabla v \, dx \\
292
+ &= -\int_{\mathcal{N}} \tilde{\gamma}(y) \nabla_y(\zeta F_N) \cdot \nabla_y v \, dy \\
293
+ &= -\int_{\mathcal{N}} (\tilde{\gamma}(y', y_n) - \tilde{\gamma}(y', 0)) \nabla_y(\zeta F_N) \cdot \nabla_y v \, dy - \int_{\mathcal{N}} \tilde{\gamma}(y', 0) \nabla_y(\zeta F_N) \cdot \nabla_y v \, dy \\
294
+ &= I_5(v) + I_6(v).
295
+ \end{align*}
296
+ $$
297
+
298
+ Hence
299
+
300
+ $$
301
+ (35) \qquad \nabla \cdot (\gamma \nabla(\zeta F_N)) = I_5 + I_6, \quad I_5, I_6 \in H^{-1}(\Omega).
302
+ $$
303
+
304
+ By the Schwarz inequality
305
+
306
+ $$
307
+ |I_5(v)|^2 \le C \int_N |\tilde{\gamma}(y', y_n) - \tilde{\gamma}(y', 0)|^2 |\nabla_y(\zeta F_N)|^2 dy \|v\|_{H_0^1(\Omega)}^2,
308
+ $$
309
+
310
+ and we get $\|I_5\|_{H^{-1}(\Omega)}^2 \le C \int_{\mathcal{N}} |\tilde{\gamma}(y', y_n) - \tilde{\gamma}(y', 0)|^2 |\nabla_y(\zeta F_N)|^2 dy$. From (29) and the scaling transformation $z_n = N y_n$ we have
311
+
312
+ $$
313
+ \begin{equation}
314
+ \begin{aligned}
315
+ \|I_5\|_{H^{-1}(\Omega)}^2 &\le C \int_0^{2\delta} \int_{|y'| \le d} |\tilde{\gamma}(y', y_n) - \tilde{\gamma}(y', 0)|^2 \\
316
+ &\qquad \times (N^2(|\nabla_{y'}\phi|^2 + 1)\zeta(y_n)^2\eta(y')^2 + O(N)) e^{-2Ny_n} dy'dy_n \\
317
+ &= C \int_0^{2N\delta} \int_{|y'| \le d} |\tilde{\gamma}(y', \frac{z_n}{N}) - \tilde{\gamma}(y', 0)|^2 \\
318
+ &\qquad \times (N(|\nabla_{y'}\phi|^2 + 1)\zeta(\frac{z_n}{N})^2 \eta(y')^2 + O(1)) e^{-2z_n} dy'dz_n
319
+ \end{aligned}
320
+ \tag{36}
321
+ \end{equation}
322
+ $$
323
+
324
+ for large N. Therefore, the dominated convergence theorem and the assumption in
325
+ (i) of the theorem imply that
326
+
327
+ $$
328
+ (37) \qquad N^{-1/2} \|I_5\|_{H^{-1}(\Omega)} = o(1) \quad (N \to +\infty).
329
+ $$
330
+
331
+ Now we shall estimate $I_6(v)$. From (29) it follows that
332
+
333
+ $$
334
+ \begin{equation}
335
+ \begin{split}
336
+ I_6(v) = {}& -N \int_\mathcal{N} \tilde{\gamma}(y', 0) \left[ i |\nabla_{y'} \phi| - 1 \right] \cdot \nabla_y v \, \zeta(y_n) \eta(y') e^{iN\phi(y')} e^{-Ny_n} \, dy \\
337
+ & - \int_\mathcal{N} \tilde{\gamma}(y', 0) \nabla_y(\zeta\eta) \cdot \nabla_y v \, e^{iN\phi(y')} e^{-Ny_n} dy = I_7(v) + I_8(v).
338
+ \end{split}
339
+ \tag{38}
340
+ \end{equation}
341
+ $$
342
+ ---PAGE_BREAK---
343
+
344
+ Since $v \in H_0^1(\Omega)$ and $\zeta(y_n)\eta(y') = 0$ on $\partial\mathcal{N}$, integrating by parts leads to
345
+
346
+ $$
347
+ \begin{align*}
348
+ I_7(v) &= N \int_{\mathcal{N}} \left[ \nabla_y \cdot \tilde{\gamma}(y', 0) \begin{bmatrix} i \nabla y' \phi \\ -1 \end{bmatrix} \right] \zeta(y_n) \eta(y') e^{iN\phi(y')} e^{-Ny_n} v \, dy \\
349
+ &\quad + N \int_{\mathcal{N}} \tilde{\gamma}(y', 0) \begin{bmatrix} i \nabla y' \phi \\ -1 \end{bmatrix} \cdot \nabla_y (\zeta\eta) e^{iN\phi(y')} e^{-Ny_n} v \, dy \\
350
+ &\quad + N^2 \int_{\mathcal{N}} \tilde{\gamma}(y', 0) \begin{bmatrix} i \nabla y' \phi \\ -1 \end{bmatrix} \cdot \begin{bmatrix} i \nabla y' \phi \\ -1 \end{bmatrix} \zeta(y_n) \eta(y') e^{iN\phi(y')} e^{-Ny_n} v \, dy.
351
+ \end{align*}
352
+ $$
353
+
354
+ The last integral on the right hand side vanishes, because (14), (26) and eikonal equation (16) imply that
355
+
356
+ $$ \tilde{\gamma}(y', 0) \begin{bmatrix} i \nabla y' \phi \\ -1 \end{bmatrix} \cdot \begin{bmatrix} i \nabla y' \phi \\ -1 \end{bmatrix} = \frac{\gamma(y', 0)}{\sqrt{\det(g^{ij}(y', 0))}} \left( -\sum_{i,j=1}^{n-1} g^{ij}(y', 0) \phi_{y_i} \phi_{y_j} + 1 \right) = 0. $$
357
+
358
+ To estimate the first two terms on the right hand side, as in [9] and [43] we use the following consequence of Hardy's inequality [18]: $||v/y_n||_{L^2(\mathcal{N})} \le 2||\partial v/\partial y_n||_{L^2(\mathcal{N})}$ for $v \in H_0^1(\Omega)$. Then it holds that
359
+
360
+ $$ (39) \qquad \left\| \frac{v}{y_n} \right\|_{L^2(\mathcal{N})} \le C ||v||_{H_0^1(\Omega)} \quad (v \in H_0^1(\Omega)). $$
361
+
362
+ Since $\nabla y' \gamma(y', 0), \gamma(y', 0) \in L^2(\{|y'| \le d\})$, the Schwarz inequality and (39) imply that
363
+
364
+ $$ |I_7(v)| \le CN (\|y_n\nabla y'\tilde{\gamma}(y', 0)e^{-Ny_n}\|_{L^2(\mathcal{N})} + \|y_n\tilde{\gamma}(y', 0)e^{-Ny_n}\|_{L^2(\mathcal{N})}) \|v\|_{H_0^1(\Omega)}, $$
365
+
366
+ and it follows from $z_n = Ny_n$ that
367
+
368
+ $$
369
+ \begin{align}
370
+ (I_7(v)) &\leq CN^{-1/2} (\|z_n\nabla y'\tilde{\gamma}(y', 0)e^{-z_n}\|_{L^2(|y'|\leq d, 0\leq z_n\leq 2N\delta)} \\
371
+ &\quad + \|z_n\tilde{\gamma}(y', 0)e^{-z_n}\|_{L^2(|y'|\leq d, 0\leq z_n\leq 2N\delta)} \|v\|_{H_0^1(\Omega)} \leq CN^{-1/2}\|v\|_{H_0^1(\Omega)}.
372
+ \end{align}
373
+ $$
374
+
375
+ Here we have used the relation
376
+
377
+ $$ (41) \qquad \| \cdot \|_{L^2(\mathcal{N})} = N^{-1/2} \| \cdot \|_{L^2(|y'| \le d, 0 \le z_n \le 2N\delta)}. $$
378
+
379
+ We get from the Schwarz inequality $|I_8(v)| \le C\|\tilde{\gamma}(y', 0)e^{-Ny_n}\|_{L^2(\mathcal{N})}\|v\|_{H_0^1(\Omega)}$ and from (41) we have $|I_8(v)| \le CN^{-1/2}\|v\|_{H_0^1(\Omega)}$. Combining this with (38) and (40) we obtain
380
+
381
+ $$ (42) \qquad \|I_6\|_{H^{-1}(\Omega)} = O(N^{-1/2}) \quad (N \to +\infty). $$
382
+
383
+ Thus, from (35), (37) and (42) we have $\|\nabla \cdot (\gamma \nabla (\zeta F_N))\|_{H^{-1}(\Omega)} = o(N^{1/2})$. Hence from (34) we get $I_2 = o(N)$ as $N \to +\infty$. This proves (23).
384
+
385
+ Next we prove (ii). From the weak formulation of $\Lambda_\gamma$ it follows that
386
+
387
+ $$ (43) \qquad 4\langle\Lambda_\gamma g_N, \overline{g_N}\rangle - 2\langle\Lambda_\gamma f_N, \overline{f_N}\rangle = \int_\Omega 4\gamma\nabla v_N \cdot \nabla(\zeta\overline{G_N}) - 2\gamma\nabla u_N \cdot \nabla(\zeta\overline{F_N}) dx. $$
388
+
389
+ Here $u_N \in H^1(\Omega)$ is the solution to (20), $F_N$ is defined by (21), $v_N \in H^1(\Omega)$ is the solution to $\nabla \cdot (\gamma \nabla v_N) = 0$ in $\Omega$ with $v_N|_{\partial\Omega} = g_N$, and $G_N(x)$ is an $H^1(\Omega)$ extension of $g_N$, for which we choose $G_N(x) = e^{i\frac{N}{2}\phi(y')} e^{-\frac{N}{2}y_N}\eta(y')|_{y=\Psi(x)}$. Put
390
+ ---PAGE_BREAK---
391
+
392
+ $r_N = u_N - \zeta F_N$ and $s_N = v_N - \zeta G_N$. It follows that $r_N, s_N \in H_0^1(\Omega)$. Equation (43) can be written as
393
+
394
+ $$
395
+ \begin{align*}
396
+ & 4\langle \Lambda_{\gamma} g_{N}, \overline{g_{N}} \rangle - 2\langle \Lambda_{\gamma} f_{N}, \overline{f_{N}} \rangle \\
397
+ &= \int_{\Omega} (4\gamma \nabla (\zeta G_{N}) \cdot \nabla (\zeta \overline{G_{N}}) - 2\gamma \nabla (\zeta F_{N}) \cdot \nabla (\zeta \overline{F_{N}})) dx \\
398
+ & \quad - 2 \int_{\Omega} \gamma \nabla r_{N} \cdot \nabla (\zeta \overline{F_{N}}) dx + 4 \int_{\Omega} \gamma \nabla s_{N} \cdot \nabla (\zeta \overline{G_{N}}) dx = J_{1} - 2J_{2} + 4J_{3}.
399
+ \end{align*}
400
+ $$
401
+
402
+ We shall show that
403
+
404
+ $$
405
+ (44) \qquad \lim_{N \to \infty} J_1 = \int_{\mathbb{R}^{n-1}} \frac{\partial \gamma(y', 0)}{\partial y_n} \frac{\eta(y')^2}{\sqrt{\det(g^{ij}(y', 0))}} dy' \\
406
+ \qquad + \frac{1}{2} \int_{\mathbb{R}^{n-1}} \gamma(y', 0) \left. \frac{\partial}{\partial y_n} \frac{\sum_{i,j=1}^{n-1} g^{ij} \phi_{y_i} \phi_{y_j} + 1}{\sqrt{\det(g^{ij})}} \right|_{y_n=0} \eta(y')^2 dy'
407
+ $$
408
+
409
+ and $\lim_{N\to\infty} J_2 = 0$ and $\lim_{N\to\infty} J_3 = 0$.
410
+
411
+ As in the proof of (i), after the change of the coordinate system, using the regions D and D' in (27) we write $J_1$ as
412
+
413
+ $$
414
+ \begin{equation}
415
+ \begin{aligned}
416
+ J_1 &= \int_D (4 \tilde{\gamma}(y) \nabla_y G_N \cdot \nabla_y \overline{G_N} - 2 \tilde{\gamma}(y) \nabla_y F_N \cdot \nabla_y \overline{F_N}) dy \\
417
+ &\quad + \int_{D'} (4 \tilde{\gamma}(y) \nabla_y (\zeta G_N) \cdot \nabla_y (\zeta \overline{G_N}) - 2 \tilde{\gamma}(y) \nabla_y (\zeta F_N) \cdot \nabla_y (\zeta \overline{F_N})) dy \\
418
+ &= J_4 + J_5.
419
+ \end{aligned}
420
+ \tag{45}
421
+ \end{equation}
422
+ $$
423
+
424
+ From (28), (29),
425
+
426
+ $$
427
+ \nabla_y G_N = \left( \frac{N}{2} \begin{bmatrix} i \nabla_{y'} \phi \\ -1 \end{bmatrix} \eta(y') + \begin{bmatrix} \nabla_{y'} \eta \\ 0 \end{bmatrix} \right) e^{i \frac{N}{2} \phi(y')} e^{-\frac{N}{2} y_n},
428
+ $$
429
+
430
+ and
431
+
432
+ $$
433
+ \nabla_y(\zeta G_N) = \left( \frac{N}{2} \begin{bmatrix} i\nabla_{y'}\phi \\ -1 \end{bmatrix} \zeta(y_n)\eta(y') + \nabla_y(\zeta\eta) \right) e^{i\frac{N}{2}\phi(y')} e^{-\frac{N}{2}y_n}
434
+ $$
435
+
436
+ it follows that
437
+
438
+ $$
439
+ \begin{align}
440
+ J_4 &= N^2 \int_D \tilde{\gamma}(y) \begin{bmatrix} i\nabla_{y'}\phi \\ -1 \end{bmatrix} \cdot \begin{bmatrix} -i\nabla_{y'}\phi \\ -1 \end{bmatrix} \eta(y')^2 (e^{-Ny_n} - 2e^{-2Ny_n}) dy \\
441
+ &\quad + 2 \int_D \tilde{\gamma}(y) \begin{bmatrix} \nabla_{y'}\eta \\ 0 \end{bmatrix} \cdot \begin{bmatrix} \nabla_{y'}\eta \\ 0 \end{bmatrix} (2e^{-Ny_n} - e^{-2Ny_n}) dy.
442
+ \end{align}
443
+ $$
444
+
445
+ From the assumption there exists a function $h(y', y_n)$ of $y_n$ valued in $L^2(\{|y'| \le d\})$
446
+ such that
447
+
448
+ $$
449
+ (47) \qquad \tilde{\gamma}(y', y_n) - \tilde{\gamma}(y', 0) = y_n \left( \frac{\partial \tilde{\gamma}}{\partial y_n}(y', 0) + h(y', y_n) \right)
450
+ $$
451
+
452
+ with $h(y', y_n) \to 0$ in $L^2(\{|y'| \le d\})$ as $y_n \to +0$. Then the first term on the right hand side of (46) becomes
453
+
454
+ $$
455
+ N^2 \int_0^\delta \int_{|y'| \le d} y_n \left( \frac{\partial \tilde{\gamma}}{\partial y_n}(y', 0) + h(y', y_n) \right) [\begin{bmatrix} i\nabla_{y'}\phi \\ -1 \end{bmatrix}] [\begin{bmatrix} -i\nabla_{y'}\phi \\ -1 \end{bmatrix}] \eta(y')^2 dy' du' \\
456
+ \times (e^{-Ny_n} - 2e^{-2Ny_n}) dy_n + O(Ne^{-\delta N})
457
+ $$
458
+ ---PAGE_BREAK---
459
+
460
+ for large $N$. Then after the scaling transformation $z_n = N y_n$ we get for large $N$
461
+
462
+ $$
463
+ J_4 = \int_0^{N\delta} \left( \int_{|y'| \le d} \left( \frac{\partial \tilde{\gamma}}{\partial y_n}(y', 0) + h(y', \frac{z_n}{N}) \right) \begin{bmatrix} i \nabla_{y'} \phi \\ -1 \end{bmatrix} \cdot \begin{bmatrix} -i \nabla_{y'} \phi \\ -1 \end{bmatrix} \eta(y')^2 dy' \right) \\ \times z_n (e^{-z_n} - 2e^{-2z_n}) dz_n + O(N^{-1}).
464
+ $$
465
+
466
+ The dominated convergence theorem and $\int_0^\infty z_n (e^{-z_n} - 2e^{-2z_n}) dz_n = \frac{1}{2}$ imply
467
+
468
+ $$
469
+ \lim_{N \to \infty} J_4 = \frac{1}{2} \int_{|y'| \le d} \frac{\partial \tilde{\gamma}}{\partial y_n}(y', 0) \begin{bmatrix} i \nabla_{y'} \phi \\ -1 \end{bmatrix} \cdot \begin{bmatrix} -i \nabla_{y'} \phi \\ -1 \end{bmatrix} \eta(y')^2 dy'.
470
+ $$
471
+
472
+ From (26) we have
473
+
474
+ $$
475
+ \frac{\partial \tilde{\gamma}}{\partial y_n}(y', 0) = \frac{\partial \gamma}{\partial y_n}(y', 0) \frac{(g^{ij}(y', 0))}{\sqrt{\det(g^{ij}(y', 0))}} + \gamma(y', 0) \frac{\partial}{\partial y_n} \frac{(g^{ij})}{\sqrt{\det(g^{ij})}} \Big|_{y_n=0}.
476
+ $$
477
+
478
+ Recalling that
479
+
480
+ $$
481
+ (g^{ij}(y', 0)) \begin{bmatrix} i \nabla_{y'} \phi \\ -1 \end{bmatrix} \cdot \begin{bmatrix} -i \nabla_{y'} \phi \\ -1 \end{bmatrix} = 2
482
+ $$
483
+
484
+ which follows from (14) and (16) (cf. (31) and (32)), we obtain
485
+
486
+ $$
487
+ \lim_{N \to \infty} J_4 = \int_{|y'| \le d} \frac{\partial \gamma}{\partial y_n}(y', 0) \frac{\eta(y')^2}{\sqrt{\det(g^{ij}(y', 0))}} dy' \\
488
+ + \frac{1}{2} \int_{|y'| \le d} \gamma(y', 0) \frac{\partial}{\partial y_n} \frac{\sum_{i,j=1}^{n-1} g^{ij} \phi_{y_i} \phi_{y_j} + 1}{\sqrt{\det(g^{ij})}} \Bigg|_{y_n=0} \eta(y')^2 dy'
489
+ $$
490
+
491
+ This, combined with (45) and $J_5 = O(Ne^{-\delta N}) (N \to +\infty)$, proves (44).
492
+
493
+ To prove $\lim_{N\to\infty} J_2 = 0$ and $\lim_{N\to\infty} J_3 = 0$, recalling that we already have
494
+
495
+ $$
496
+ (48) \qquad |J_2| \le C \| \nabla \cdot (\gamma \nabla (\zeta F_N)) \|_{H^{-1}(\Omega)}^2
497
+ $$
498
+
499
+ (see (34)), we shall estimate $\|\nabla \cdot (\gamma \nabla (\zeta F_N))\|_{H^{-1}(\Omega)}$ under the conditions in (ii) of the theorem. For any $v \in H_0^1(\Omega)$, as in (35) we can write
500
+
501
+ $$
502
+ (49) \qquad \nabla \cdot (\gamma \nabla (\zeta F_N)) = J_6 + J_7,
503
+ $$
504
+
505
+ where $J_6, J_7 \in H^{-1}(\Omega)$ and
506
+
507
+ $$
508
+ J_6(v) &= -\int_N (\tilde{\gamma}(y', y_n) - \tilde{\gamma}(y', 0)) \nabla_y (\zeta F_N) \cdot \nabla_y v dy, \\
509
+ J_7(v) &= -\int_N \tilde{\gamma}(y', 0) \nabla_y (\zeta F_N) \cdot \nabla_y v dy.
510
+ $$
511
+
512
+ Then $\|J_6\|_{H^{-1}(\Omega)}$ has the estimate (36) for large $N$. We get from (47)
513
+
514
+ $$
515
+ \|J_6\|_{H^{-1}(\Omega)}^2 \le CN^{-1} \int_0^{2N\delta} \left( \int_{|y'|<d} \left| \frac{\partial \tilde{\gamma}}{\partial y_n}(y', 0) + h(y', \frac{z_n}{N}) \right|^2 dy' \right. \\
516
+ \left. \times ((|\nabla_{y'}\phi|^2 + 1)\eta(y')^2 + O(N^{-1})) dy' \right) z_n^2 e^{-2z_n} dz_n
517
+ $$
518
+
519
+ for large $N$. Therefore, the dominated convergence theorem implies that
520
+ $\|J_6\|_{H^{-1}(\Omega)} = O(N^{-1/2})$. The proof of $\|J_7\|_{H^{-1}(\Omega)} = O(N^{-1/2})$ is exactly the
521
+ same as that of (42). These estimates together with (49) imply
522
+
523
+ $$
524
+ \|\nabla \cdot (\gamma \nabla (\zeta F_N))\|_{H^{-1}(\Omega)} = O(N^{-1/2}) \quad (N \to +\infty).
525
+ $$
526
+ ---PAGE_BREAK---
527
+
528
+ Therefore, from (48) we get $J_2 = O(N^{-1})$ as $N \to +\infty$. This proves $\lim_{N \to \infty} J_2 = 0$. The proof of $\lim_{N \to \infty} J_3 = 0$ is parallel.
529
+
530
+ ### 4. Simulation of measurement data.
531
+
532
+ #### 4.1. Solution of the boundary value problem with FEM.
533
+ We need a variational equation for approximating the solution of (1) with FEM. We use test functions $v \in H^1(\Omega)$ and denote $H = H^1(\Omega) \times \mathbb{R}^L$. Multiplying (1) with a test function and integrating over the domain $\Omega$ yields
534
+
535
+ $$ (50) \qquad \int_{\Omega} v \nabla \cdot (\gamma \nabla u) \, dx = 0. $$
536
+
537
+ Equation (50) is referred to as the *variational form* of (1), and using Green's formula we can write it in the form
538
+
539
+ $$ (51) \qquad \int_{\Omega} \gamma \nabla u \cdot \nabla v \, dx - \int_{\partial \Omega} \gamma \frac{\partial u}{\partial \nu} v \, dS = 0. $$
540
+
541
+ The domain $\Omega$ is discretized into small tetrahedral elements and the potential distribution $u$ in $\Omega$ is expressed as linear combination of piecewise linear basis functions $\varphi_j$, $j = 1, \dots, K$:
542
+
543
+ $$ (52) \qquad u(x) \approx \sum_{j=1}^{K} u_j \varphi_j(x), \quad x \in \mathbb{R}^3. $$
544
+
545
+ Here $K$ is the number of nodal points $x_j$ in the finite element mesh and $u_j = u(x_j)$. The piecewise linear basis functions $\varphi_j \in H^1(\Omega)$ are constructed uniquely by the requirement $\varphi_j(x_\ell) = \delta_{j\ell}$; we denote $E_j := \text{supp}(\varphi_j) \subset \Omega$.
546
+
547
+ Inserting the approximation (52) into (51) and using the basis functions $\varphi_j$ as test functions (Galerkin FEM scheme, see [45]) yields
548
+
549
+ $$ (53) \qquad \sum_{k=1}^{K} u_k \int_{\Omega} \gamma \nabla \varphi_k \cdot \nabla \varphi_j \, dx - \sum_{k=1}^{K} u_k \int_{\partial \Omega} \gamma \frac{\partial \varphi_k}{\partial \nu} \varphi_j \, dS = 0. $$
550
+
551
+ This can be written in the matrix form
552
+
553
+ $$ (54) \qquad Au = 0, $$
554
+
555
+ where the matrix $A$ is defined as
556
+
557
+ $$ (55) \qquad A(j,k) = \int_{E_k \cup E_j} \gamma \nabla \varphi_k \cdot \nabla \varphi_j \, dx - \int_{\partial(E_k \cup E_j) \cap \partial\Omega} \gamma \frac{\partial \varphi_k}{\partial\nu} \varphi_j \, dS $$
558
+
559
+ and $u = [u_1, ..., u_K]^T$. The solution of $Au = 0$ is not unique, but using the Dirichlet boundary condition $u = \phi_N$ on $x \in \partial\Omega$ and $u = 0$ on the horizontal boundaries (top and bottom) we can write
560
+
561
+ $$ (56) \qquad \tilde{A}\tilde{u} = -A_{\tilde{\phi}}\tilde{\phi}_N, $$
562
+
563
+ where $A_{\tilde{\phi}}$ includes the columns of the matrix $A$ that correspond with the nodes on the lateral and horizontal boundaries, $\tilde{A} = A\setminus A_{\tilde{\phi}}$, $\tilde{\phi}_N = (\phi_N, 0)$, $0 = (0, ..., 0)^T \in \mathbb{R}^M$, $M$ is the number of the nodes on the horizontal boundaries and $\tilde{u} = u\setminus\tilde{\phi}_N$. The problem (56) is overdetermined so the rows that correspond with the nodes at the boundary $\partial\Omega$ can be eliminated from the matrices $\tilde{A}$ and $A_{\phi}$, resulting in a solvable linear system of equations.
564
+ ---PAGE_BREAK---
565
+
566
+ Due to the above reduction, the boundary integrals in (55) need not to be computed. The integrations over tetrahedra in equation (55) are computed by using the mapping which relates the actual element (global) to a standard element, see [49]. We represent the conductivity distribution using piecewise linear basis.
567
+
568
+ Our local reconstruction method involves applying rather oscillatory Dirichlet data supported in small subsets of the boundary. Hence, in order to achieve an adequate accuracy with reasonable computational cost, it is advantageous to use non-uniform finite element meshes with higher density near the support of the Dirichlet data. One example of a non-uniform mesh of the type we use is illustrated in Figure 1.
569
+
570
+ FIGURE 1. Computational grid corresponding to one boundary node. Number of the nodes is 950.
571
+
572
+ **4.2. Choosing cut-off functions.** We use the cut-off function $\eta(\tau, s) = \eta_1(s)\eta_2(\tau)$ with
573
+
574
+ $$ (57) \qquad \eta_1(s) = \begin{cases} c_1 \left(T - \frac{\pi R}{2\epsilon_2}\right)^2 \left(T + \frac{\pi R}{2\epsilon_2}\right)^2 & \text{for } -\frac{\pi R}{2\epsilon_2} < T < \frac{\pi R}{2\epsilon_2}, \\ 0 & \text{otherwise,} \end{cases} $$
575
+
576
+ $$ (58) \qquad \eta_2(\tau) = \begin{cases} c_2 \left(Z - \frac{\pi R}{2\epsilon_1}\right)^2 \left(Z + \frac{\pi R}{2\epsilon_1}\right)^2 & \text{for } -\frac{\pi R}{2\epsilon_1} < Z < \frac{\pi R}{2\epsilon_1}, \\ 0 & \text{otherwise,} \end{cases} $$
577
+
578
+ where $T = s - t$ and $s, t \in [0, 2\pi R]$ and $Z = \tau - z$ and $\tau, z \in [0, h]$. The constants $c_j$ are chosen so that $\int \eta_j^2(s) ds = 1$ for $j = 1, 2$, and the parameters $\epsilon_1$ and $\epsilon_2$ can be used to adjust the width of the functions $\eta_1$ and $\eta_2$.
579
+
580
+ Figure 2 shows the cut-off functions corresponding to different $\epsilon_1$ and $\epsilon_2$ in cylindrical geometry (3) with $R = 1$ and $h = 1.7671$.
581
+
582
+ **4.3. Simulating noisy voltage-to-current measurements.** We apply the Dirichlet data specified by the theory and calculate the potentials in inner nodes as a solution of the boundary value problem as explained in Section 4.1.
583
+
584
+ We evaluate the normal derivative of the potentials $\frac{\partial u}{\partial v}$ for the computation of the current density. The gradient of $u$ can be approximated as follows: In the case of piecewise linear basis functions the gradient $\nabla u$ is constant in each element and discontinuous on element boundaries. We estimate the value of the gradient
585
+ ---PAGE_BREAK---
586
+
587
+ FIGURE 2. Plot of the cut-off function η(τ, s) with various ε₁ and ε₂. First row: ε₁ = ε₂=2. Second row: ε₁ = ε₂ = 4. Third row: ε₁ = ε₂ = 6. Fourth row: ε₁ = ε₂ = 8.
588
+
589
+ in boundary nodes as a mean value of the all elements connected to the node in question. The normal derivative of the potentials in each node (τ, s, 0) on the lateral surface of Ω can be then computed from the equation
590
+
591
+ $$
592
+ (59) \qquad \frac{\partial u}{\partial \nu}(\tau, s, 0) = \nabla u(\tau, s, 0) \cdot \nu(\tau, s, 0).
593
+ $$
594
+
595
+ We add simulated measurement noise $\mathcal{E}_N$ to the computed current density.
596
+
597
+ $$
598
+ (60) \qquad \mathcal{E}_N = \sum_{l=-N+1}^{N} \sigma a_l e^{il\theta},
599
+ $$
600
+
601
+ where $a_l$ are independent normally distributed random variables with mean zero and standard deviation 1, and $\sigma > 0$ is a constant used to tune the noise amplitude.
602
+
603
+ 5. **Numerical experiments.** In this section we test the reconstruction formulas (7) and (8) numerically with a sequence of three-dimensional conductivity distributions with increasing complexity. As difficulties arise, we design corrective steps to
604
+ ---PAGE_BREAK---
605
+
606
+ overcome them. This process leads to a novel noise-robust reconstruction algorithm
607
+ that is presented in detail in Section 6.
608
+
609
+ For a given conductivity $\gamma$ we define
610
+
611
+ $$ (61) \qquad \tilde{g}_N := \frac{1}{N} \int_{\mathbb{R}^2} \overline{\phi_N} \Lambda_\gamma \phi_N d\tau ds. $$
612
+
613
+ As the surface measure on the lateral boundary $\Gamma$ is $d\tau ds$, by formula (7) we have
614
+
615
+ $$ \tilde{g}_N \approx \int_{\mathbb{R}^2} \gamma(\tau, s, 0) \eta(\tau, s)^2 d\tau ds . $$
616
+
617
+ Furthermore, set
618
+
619
+ $$ (62) \qquad \tilde{h}_N := \left(2 + \frac{t_2^2 - t_1^2 - 1}{2NR}\right) \int_{\mathbb{R}^2} \overline{\phi_N} \Lambda_\gamma \phi_N d\tau ds - 4 \int_{\mathbb{R}^2} \overline{\psi_N} \Lambda_\gamma \psi_N d\tau ds. $$
620
+
621
+ for any unit vector $(t_1, t_2)$. Then by formula (8) we have
622
+
623
+ $$ \tilde{h}_N \approx \int_{\mathbb{R}^2} \frac{\partial \gamma}{\partial \nu}(\tau, s, 0) \eta(\tau, s)^2 d\tau ds. $$
624
+
625
+ We work in cylindrical geometry (3) with $R = 1$ and $h = 1.7671$. We use standard deviation $\sigma = 0.0001$ in (60), giving relative noise level 0.01 %.
626
+
627
+ **5.1. Homogeneous conductivity.** Our first experiment uses simply the homogeneous conductivity distribution $\gamma_0 \equiv 1$. We substitute $\Lambda_{\gamma_0}$ to formula (7) and call the result $\tilde{g}_N^{(0)}$. Then we have $\tilde{g}_N^{(0)} \to 1$ as $N$ grows, and we can study numerically the speed of convergence using various values of the related parameters.
628
+
629
+ Let us first get an idea how large $N$ is practically useful. Figure 3 shows the Dirichlet data for several values of $N$. Apparently there is hope of representing the data with $N = 20$ with 64 electrodes in a 8 × 8 configuration, but the data with $N = 50$ seems to need way too many electrodes to be practically feasible. This rough derivation is based on the simple idea that each minimum and maximum of the Dirichlet data needs to be evaluated on at least one electrode. Consequently we will restrict our experiments to $0 < N \le 20$.
630
+
631
+ Next we examine the convergence rate $\tilde{g}_N^{(0)} \to 1$ as $N$ grows. Figure 4 shows $\tilde{g}_N^{(0)}$ as function of $N$ computed with finite element mesh with varying numbers of elements. We conclude that the mesh comprising 16285 nodes gives acceptable accuracy in the range $14 \le N \le 20$, and we will use that mesh in the sequel.
632
+
633
+ We study the effect of cut-off function on speed of convergence by choosing different values for $\epsilon_1$ and $\epsilon_2$ in (57) and (58), respectively. Figure 5 illustrates that using a wider cut-off function leads to faster convergence. Thus there is a trade-off between (a) more accurate reconstruction using a narrow cut-off function that better approximates Dirac's delta, and (b) higher rate of convergence.
634
+
635
+ We proceed to test the reconstruction of the normal derivative. We substitute $\Lambda_{\gamma_0}$ to formula (8) and call the result $\tilde{h}_N^{(0)}$. Then by formula (8) we have $\tilde{h}_N^{(0)} \to 0$ as $N \to \infty$. Line " *" in Figure 6 shows $\tilde{h}_N^{(0)}$ as function of $N$. We see that $\tilde{h}_N^{(0)}$ converges slowly and the apparent limit value is -5 instead of the value 0 predicted by theory. We conclude that more experimenting is needed to find out what's going on.
636
+ ---PAGE_BREAK---
637
+
638
+ FIGURE 3. Plot of the Dirichlet data $\phi_N$ with three different values of $N$. $t_1 = t_2 = \frac{1}{\sqrt{2}}$ and $\epsilon_1 = \epsilon_2 = 4$. First row: $N = 10$. Second row: $N = 14$. Third row: $N = 20$. Fourth row: $N = 50$
639
+
640
+ **5.2. Radial conductivities with unit trace.** We define a collection of radially varying conductivity distributions when $0 \le r \le \frac{4R}{5}$ (constant distribution when $\frac{4R}{5} < r \le R$) for further testing of formula (62):
641
+
642
+ $$
643
+ \begin{aligned}
644
+ \gamma_1(\tau, s, r) &:= R - r, && \gamma_1|_{\partial\Omega} = 1, && \frac{\partial\gamma_1}{\partial\nu}|_{\partial\Omega} = 1, \\
645
+ \gamma_2(\tau, s, r) &:= (R - r)^2, && \gamma_2|_{\partial\Omega} = 1, && \frac{\partial\gamma_2}{\partial\nu}|_{\partial\Omega} = 2, \\
646
+ \gamma_3(\tau, s, r) &:= (R - r)^3, && \gamma_3|_{\partial\Omega} = 1, && \frac{\partial\gamma_3}{\partial\nu}|_{\partial\Omega} = 3.
647
+ \end{aligned}
648
+ $$
649
+
650
+ Figure 6 shows the convergence of $\tilde{h}_N$ as function of $N$ computed using formula (62) for $\gamma_1$ and $\gamma_2$ and $\gamma_3$. We see that the various $\tilde{h}_N$ converge slowly to limit values with systematic error of -5.
651
+ ---PAGE_BREAK---
652
+
653
+ FIGURE 4. Estimated convolution $\tilde{g}_N$ defined in (61) as a function of N corresponding to one node on the boundary with different computational grids, $\epsilon_1 = \epsilon_2 = 4$. Correct value of conductivity is one. Line "* * *": 950 nodes in grid. Line "++": 2946 nodes in grid. Line "- -": 9324 nodes in grid. Line "o o": 16285 nodes in grid. Line "x x": 21385 nodes in grid.
654
+
655
+ FIGURE 5. Estimated convolution $\tilde{g}_N$ computed with formula (61) as a function of N corresponding to one node on the boundary with computational grid of 16285 nodes and various $\epsilon_1$ and $\epsilon_2$. Correct value of conductivity is one. Line "* * *": $\epsilon_1 = \epsilon_2 = 2$. Line "++": $\epsilon_1 = \epsilon_2 = 4$. Line "- -": $\epsilon_1 = \epsilon_2 = 6$. Line "o o": $\epsilon_1 = \epsilon_2 = 8$.
656
+
657
+ However, the evidence in Figure 6 suggests the relative values of $\tilde{h}_N$ are roughly correct throughout the computational interval $2 \le N \le 20!$ This surprising observation can be used to calibrate the results as follows. Suppose we have available
658
+ ---PAGE_BREAK---
659
+
660
+ FIGURE 6. Estimated normal derivative $\tilde{h}_N$ computed with formula (62) as a function of N corresponding to one node on the boundary: computational grid of 16285 nodes, $R = 1$ and $\epsilon_1 = \epsilon_2 = 4$. Line “**”: $\gamma_0(\tau, s, r) = 1$ and $\frac{\partial\gamma_0}{\partial\nu}|_{\partial\Omega} = 0$. Line “+-”: $\gamma_1(\tau, s, r) = R-r$ and $\frac{\partial\gamma_1}{\partial\nu}|_{\partial\Omega} = 1$. Line “-”: $\gamma_2(\tau, s, r) = (R-r)^2$ and $\frac{\partial\gamma_2}{\partial\nu}|_{\partial\Omega} = 2$. Line “o o”: $\gamma_3(\tau, s, r) = (R-r)^3$ and $\frac{\partial\gamma_3}{\partial\nu}|_{\partial\Omega} = 3$.
661
+
662
+ measurements $\Lambda_{\gamma_0}$ from the “dummy load” conductivity $\gamma_0 \equiv 1$. We can compute $\tilde{h}_{20}^{(0)} \approx -5$ corresponding to $\gamma_0$. Since the relative values of $\tilde{h}_N$ are close to correct for $\gamma_1$ and $\gamma_2$ and $\gamma_3$, we suggest that the formula
663
+
664
+ $$ (63) \qquad \frac{\partial \gamma}{\partial \nu}(x_0) \approx \tilde{h}_{20}(x_0) - \tilde{h}_{20}^{(0)}(x_0) $$
665
+
666
+ serves as a calibrated reconstruction method of the normal derivative for any conductivity $\gamma$ with trace 1.
667
+
668
+ 5.3. **Radial conductivities with varying traces.** It remains to study the numerical properties of formula (62) in the case of conductivities whose trace is not 1. To this end, we define a collection of conductivities with varying traces as follows:
669
+
670
+ $$ \begin{align*} \gamma_4(\tau, s, r) &:= R - r + 1, & \gamma_4|_{\partial\Omega} &= 2, & \frac{\partial\gamma_4}{\partial\nu}|_{\partial\Omega} &= 1, \\ \gamma_5(\tau, s, r) &:= R - r + 2, & \gamma_5|_{\partial\Omega} &= 3, & \frac{\partial\gamma_5}{\partial\nu}|_{\partial\Omega} &= 1, \\ \gamma_6(\tau, s, r) &:= R - r + 3, & \gamma_6|_{\partial\Omega} &= 4, & \frac{\partial\gamma_6}{\partial\nu}|_{\partial\Omega} &= 1. \end{align*} $$
671
+
672
+ The integral in formula (61) as function of N is shown in Figure 7. The slope of the curve increases when conductivity value on the boundary node increases; the values of the integral actually seem to depend linearly on the conductivity value at the boundary. For instance, for fixed N, the integral corresponding to $\gamma_4$ satisfying $\gamma_4(\tau, s, 0) = 2$ is twice as large as the integral corresponding to $\gamma_1$ satisfying $\gamma_1(\tau, s, 0) = 1$.
673
+
674
+ Same phenomenon can be found when normal derivatives are estimated, see Fig. 8. Hence we suggest that $\tilde{h}_{20}^{(0)}$ in the calibrated algorithm (63) should be multiplied
675
+ ---PAGE_BREAK---
676
+
677
+ FIGURE 7. The values of the integral in formula (61) as a function of N corresponding to one node on the boundary with computational grid of 16285 nodes, $R = 1$ and $\epsilon_1 = \epsilon_2 = 4$. Line "* * *": conductivity distribution $\gamma_1(\tau, s, r) = R - r$, on the boundary $\gamma_1(\tau, s, 0) = 1$. Line " + +": conductivity distribution $\gamma_4(\tau, s, r) = (R - r) + 1$, on the boundary $\gamma_4(\tau, s, 0) = 2$. Line "-": conductivity distribution $\gamma_5(\tau, s, r) = (R - r) + 2$, on the boundary $\gamma_5(\tau, s, 0) = 3$. Line "o-o": conductivity distribution $\gamma_6(\tau, s, r) = (R - r) + 3$, on the boundary $\gamma_6(\tau, s, 0) = 4$.
678
+
679
+ by estimated conductivity $\tilde{g}_{20}(x_0)$.
680
+
681
+ $$ (64) \qquad \frac{\partial \gamma}{\partial \nu}(\tau, s) \approx \tilde{h}_{20}(x_0) - \tilde{h}_{20}^{(0)}(x_0) \tilde{g}_{20}(x_0). $$
682
+
683
+ **6. The calibrated reconstruction algorithm.** The numerical experiments presented in Section 5 inspire us to suggest the following calibrated method for recovering the trace and normal derivative of a given conductivity $\gamma$ based on localized boundary measurements.
684
+
685
+ 1. Use the dummy load $\gamma_0 \equiv 1$ to find a big enough $N_0 > 0$ for $\tilde{g}_{N_0}^{(0)}$ computed by formula (7) to be reasonably close to 1.
686
+
687
+ 2. Use formula (7) to recover $\gamma|_{\Gamma}$ approximately as $\gamma(\tau, s, 0) \approx \tilde{g}_{N_0}(\tau, s)$.
688
+
689
+ 3. Substitute the dummy load to formula (8) and denote the result by $\tilde{h}_{N_0}^{(0)}$.
690
+
691
+ 4. Use formula (8) to recover the normal derivative of $\gamma$ approximately as
692
+
693
+ $$ \frac{\partial \gamma}{\partial \nu}(\tau, s) \approx \tilde{h}_{N_0}(\tau, s) - \tilde{h}_{N_0}^{(0)}(\tau, s) \tilde{g}_{N_0}(\tau, s). $$
694
+
695
+ We tested the calibrated reconstruction algorithm with the simple cases discussed in Section 5. Figure 9 shows the calibrated normal derivatives for the radial conductivities with unit trace as function of $N$. The bigger change in conductivity in the normal direction the smaller gets the current density on the boundary in the case of finite $N$. Hence the integral in formula (61) gets smaller values even if the trace of the conductivity on the boundary is same in all cases. Therefore the calibrated algorithm (64) underestimates the normal derivatives of the conductivity.
696
+ ---PAGE_BREAK---
697
+
698
+ FIGURE 8. Estimated normal derivative $\tilde{h}_N$ computed with formula (62) as a function of N corresponding to one node on the boundary for the conductivity distributions: computational grid of 16285 nodes, $R=1$ and $\epsilon_1 = \epsilon_2 = 4$. Line "**": $\gamma_1(\tau, s, r) = R-r$ and $\frac{\partial\gamma_1}{\partial\nu}|_{\partial\Omega} = 1$. Line "++": $\gamma_4(\tau, s, r) = (R-r)+1$ and $\frac{\partial\gamma_4}{\partial\nu}|_{\partial\Omega} = 1$. Line "--": $\gamma_5(\tau, s, r) = (R-r)+2$ and $\frac{\partial\gamma_5}{\partial\nu}|_{\partial\Omega} = 1$. Line "o": $\gamma_6(\tau, s, r) = (R-r)+3$ and $\frac{\partial\gamma_6}{\partial\nu}|_{\partial\Omega} = 1$.
699
+
700
+ Figure 10 shows the calibrated normal derivatives for the radial conductivities with varying trace as function of N. As can be seen from the Table 1, absolute error between true conductivity and its estimated convolution increases when conductivity value increases. Therefore also error in calibrated normal derivative of the conductivity increases with the conductivity value. See Table 2 for reconstruction errors between true normal derivatives and calibrated normal derivatives.
701
+
702
+ TABLE 1. Absolute errors and relative errors for the reconstruction of $\gamma|_{\partial\Omega}$ when $\frac{\partial\gamma_j}{\partial\nu}|_{\partial\Omega} = 1$
703
+
704
+ <table><thead><tr><th></th><th>|γ - g̃<sub>N</sub>|<sub>N=20</sub></th><th>|γ - g̃<sub>N</sub>|<sub>N=20</sub>/γ</th></tr></thead><tbody><tr><td>γ<sub>1</sub>(τ, s, 0) = 1</td><td>0.0915</td><td>0.0915</td></tr><tr><td>γ<sub>4</sub>(τ, s, 0) = 2</td><td>0.1844</td><td>0.0922</td></tr><tr><td>γ<sub>5</sub>(τ, s, 0) = 3</td><td>0.2661</td><td>0.0887</td></tr><tr><td>γ<sub>6</sub>(τ, s, 0) = 4</td><td>0.3479</td><td>0.0870</td></tr></tbody></table>
705
+
706
+ At the moment the approximation properties of the above method are not well understood. However, we can gather intuition about the method by testing it also with a more demanding example. We test our calibrated reconstruction algorithm with a fairly complicated non-homogeneous distribution with three inclusions. Two of the inclusions touch the boundary and one is located in the middle of the target, see Figure 11.
707
+
708
+ We estimate the conductivity and normal derivatives on 1216 boundary points (64 equidistantly placed points on 19 layers). A nonuniform finite element mesh
709
+ ---PAGE_BREAK---
710
+
711
+ FIGURE 9. Calibrated normal derivative computed with formula (64) as a function of N corresponding to one node on the boundary: computational grid of 16285 nodes, $R = 1$ and $\epsilon_1 = \epsilon_2 = 4$. Line "*": $\gamma_0(\tau, s, r) = 1$ and $\frac{\partial\gamma_0}{\partial\nu}|_{\partial\Omega} = 0$. Line "+ +": $\gamma_1(\tau, s, r) = R-r$ and $\frac{\partial\gamma_1}{\partial\nu}|_{\partial\Omega} = 1$. Line "- -": $\gamma_2(\tau, s, r) = (R-r)^2$ and $\frac{\partial\gamma_2}{\partial\nu}|_{\partial\Omega} = 2$. Line "o o": $\gamma_3(\tau, s, r) = (R-r)^3$ and $\frac{\partial\gamma_3}{\partial\nu}|_{\partial\Omega} = 3$.
712
+
713
+ TABLE 2. Absolute errors and relative errors for the reconstruction of $\frac{\partial \gamma}{\partial \nu} |_{\partial \Omega}$.
714
+
715
+ <table><thead><tr><th></th><th>$|\frac{\partial \gamma}{\partial \nu} - \tilde{h}_N|_{N=20}$</th><th>$\frac{\frac{\partial \gamma}{\partial \nu} - \tilde{h}_N|_{N=20}}{\frac{\partial \gamma}{\partial \nu}}$</th></tr></thead><tbody><tr><td>γ₀(τ, s, 0) = 1,</td><td>$\frac{\partial \gamma_0}{\partial \nu} = 0$</td><td>0.1842</td></tr><tr><td>γ₁(τ, s, 0) = 1,</td><td>$\frac{\partial \gamma_1}{\partial \nu} = 1$</td><td>0.3002</td></tr><tr><td>γ₂(τ, s, 0) = 2,</td><td>$\frac{\partial \gamma_2}{\partial \nu} = 2$</td><td>0.5078</td></tr><tr><td>γ₃(τ, s, 0) = 3,</td><td>$\frac{\partial \gamma_3}{\partial \nu} = 3$</td><td>0.7990</td></tr><tr><td>γ₄(τ, s, 0) = 4,</td><td>$\frac{\partial \gamma_4}{\partial \nu} = 1$</td><td>0.5069</td></tr><tr><td>γ₅(τ, s, 0) = 1,</td><td>$\frac{\partial \gamma_5}{\partial \nu} = 1$</td><td>0.6978</td></tr><tr><td>γ₆(τ, s, 0) = 1,</td><td>$\frac{\partial \gamma_6}{\partial \nu} = 1$</td><td>0.8852</td></tr></tbody></table>
716
+
717
+ is constructed corresponding to each boundary point; the number of nodes in the meshes is on the average 9000. See Figure 12 for the recovered trace and Figure 13 for the approximate normal derivative reconstructed using the above calibration.
718
+
719
+ We computed the relative $L^2(\partial\Omega)$ and $L^\infty(\partial\Omega)$ errors between true conductivity distribution $\gamma|_{\partial\Omega}$ and its convolution $\gamma * \eta^2$. Errors were computed also between $\gamma * \eta^2$ and its approximation $\tilde{g}_N$.
720
+
721
+ $$ (65) \quad E_\gamma^2 = \frac{\|\gamma - \gamma * \eta^2\|_{L^2(\partial\Omega)}}{\|\gamma\|_{L^2(\partial\Omega)}} $$
722
+
723
+ $$ E_\gamma^\infty = \frac{\max_{\partial\Omega} |\gamma - \gamma * \eta^2|}{\max_{\partial\Omega} |\gamma|} $$
724
+
725
+ $$ (66) \quad E_g^2(N) = \frac{\|\tilde{g}_N - \gamma * \eta^2\|_{L^2(\partial\Omega)}}{\|\gamma\|_{L^2(\partial\Omega)}} $$
726
+
727
+ $$ E_g^\infty(N) = \frac{\max_{\partial\Omega} |\tilde{g}_N - \gamma * \eta^2|}{\max_{\partial\Omega} |\gamma|} $$
728
+ ---PAGE_BREAK---
729
+
730
+ FIGURE 10. Calibrated normal derivative computed with formula (64) as a function of N corresponding to one node on the boundary for the conductivity distributions: computational grid of 16285 nodes, $R = 1$ and $\epsilon_1 = \epsilon_2 = 4$. Line "* * *": $\gamma_1(\tau, s, r) = R - r$ and $\frac{\partial\gamma_1}{\partial\nu}|_{\partial\Omega} = 1$. Line "++": $\gamma_4(\tau, s, r) = (R-r)+1$ and $\frac{\partial\gamma_4}{\partial\nu}|_{\partial\Omega} = 1$. Line "--": $\gamma_5(\tau, s, r) = (R-r)+2$ and $\frac{\partial\gamma_5}{\partial\nu}|_{\partial\Omega} = 1$. Line "o o": $\gamma_6(\tau, s, r) = (R-r)+3$ and $\frac{\partial\gamma_6}{\partial\nu}|_{\partial\Omega} = 1$.
731
+
732
+ FIGURE 11. Left: True conductivity distribution on five cross-sectional planes. Right: True conductivity distribution on the lateral boundary.
733
+
734
+ See Table 3 for reconstruction errors.
735
+
736
+ TABLE 3. Relative errors (65) and (66) for the convolution and reconstruction of $\gamma|_{\partial\Omega}$. Three lowest and three highest boundary node layers have been removed.
737
+
738
+ <table><thead><tr><th>E<sup>2</sup>&gamma;</th><th>E<sup>&infin;</sup>&gamma;</th><th>E<sup>2</sup><sub>g</sub>(N)</th><th>E<sup>&infin;</sup><sub>g</sub>(N)</th></tr></thead><tbody><tr><td>0.0149</td><td>0.1009</td><td>0.0242</td><td>0.0384</td></tr></tbody></table>
739
+
740
+ 7. **Conclusion.** Our study of recovering trace and normal derivative of conductivity from static electric boundary measurements is based on two aspects: theoretical
741
+ ---PAGE_BREAK---
742
+
743
+ FIGURE 12. True conductivity distribution $\gamma(\tau, s, 0)$ (first row), convolution $(\gamma|_{\partial\Omega} * \eta^2)(\tau, s)$ (second row) and estimated conductivity distribution $\tilde{g}_N$ (third row) with same colormap.
744
+
745
+ FIGURE 13. True normal derivative $\frac{\partial\gamma}{\partial\nu}(\tau, s, 0)$ (first row) and estimated normal derivative $\tilde{h}_N$ (second row) with same colormap.
746
+
747
+ and numerical. Theoretically, we show that it is possible to recover convolved approximations to both trace and normal derivative from localized boundary measurements. Our Theorem 1 is proved under quite general geometric assumptions.
748
+
749
+ Our numerical experiments suggest that the trace of conductivity can be approximately recovered using Theorem 1 with a finite value of $N$ and simulated data with realistic noise level. The recovery of the normal derivative seems to be more difficult, but we are able to introduce a calibration method allowing useful reconstructions at least for our simulated examples.
750
+
751
+ The applicability of our method for real-world measured data needs a further study. However, the voltage distributions applied at the boundary in our simulations seem to be representable using a 8 × 8 electrode array covering the support of the localized excitation pattern. Since we included simulated data with realistic noise level (relative error of the same order than in the ACT3 impedance imager of Rensselaer Polytechnic Institute [15]), we have a reason to believe that our method is implementable with a 64-channel impedance tomography device.
752
+ ---PAGE_BREAK---
753
+
754
+ **Acknowledgments.** We thank the referees for carefully reading the manuscript and for giving us helpful comments. The work of KT was partly supported by Grant-in-Aid for Scientific Research (C) (Nos. 19540113 & 22540111), Society for the Promotion of Science, Japan. The work of SS was supported by Academy of Finland (Centre of Excellence in Inverse Problems Research (213476) and Computational Science Research Programme (134868)). During part of the preparation of this work, SS worked as professor at the Department of Mathematics of Tampere University of Technology.
755
+
756
+ ## REFERENCES
757
+
758
+ [1] A. Adler, R. Guardo, and Y. Berthiaume, *Impedance imaging of lung ventilation: Do we need to account for chest expansion?*, IEEE Trans. Biomed. Eng., **43** (1996), 414–420.
759
+
760
+ [2] G. Alessandrini, *Singular solutions of elliptic equations and the determination of conductivity by boundary measurements*, J. Diff. Eq., **84** (1990), 252–273.
761
+
762
+ [3] K. Astala and L. Päivärinta, *Calderón's inverse conductivity problem in the plane*, Ann. of Math., **163** (2006), 265–299.
763
+
764
+ [4] J. Bikowski, "Electrical Impedance Tomography Reconstructions in two and three Dimensions; From Calderón to Direct Methods," Ph.D thesis, Colorado State University, 2008.
765
+
766
+ [5] R. Blue, "Real-time Three-dimensional Electrical Impedance Tomography," Ph.D thesis, R.P.I. in Troy, NY, 1997.
767
+
768
+ [6] L. Borcea, *Electrical impedance tomography*, Inverse Problems, **18** (2002), R99-R136.
769
+
770
+ [7] L. Borcea, *Addendum to "Electrical impedance tomography"*, Inverse Problems, **19** (2002), 997–998.
771
+
772
+ [8] G. Boverman, D. Isaacson, T-J Kao, G. J. Saulnier and J. C. Newell, "Methods for Direct Image Reconstruction for EIT in Two and Three Dimensions," in "Electrical Impedance Tomography Conf.," Hanover, New Hampshire, USA, (2008).
773
+
774
+ [9] R. M. Brown, Recovering the conductivity at the boundary from the Dirichlet to Neumann map: a pointwise result, J. Inverse and Ill-posed Prob., **9** (2001), 567–574.
775
+
776
+ [10] R. Brown and R. Torres, Uniqueness in the inverse conductivity problem for conductivities with $3/2$ derivatives in $L^p, p > 2n$, J. Fourier Analysis Appl., **9** (2003), 1049–1056.
777
+
778
+ [11] R. M. Brown and G. Uhlmann, Uniqueness in the inverse conductivity problem for nonsmooth conductivities in two dimensions, Comm. Partial Differential Equations, **22** (1997), 1009–1027.
779
+
780
+ [12] A. P. Calderón, On an inverse boundary value problem, Seminar on Numerical Analysis and its Applications to Continuum Physics, Soc. Brasileira de Matemática, (1980), 65–73.
781
+
782
+ [13] M. Cheney, D. Isaacson and J. C. Newell, Electrical impedance tomography, SIAM Review, **41** (1999), 85–101.
783
+
784
+ [14] K-S Cheng, D. Isaacson, J. C. Newell and D. G. Gisser, Electrode models for electric current computed tomography, IEEE Transactions on Biomedical Imaging, (1989), 918–924.
785
+
786
+ [15] R. D. Cook, G. J. Saulnier and J. C. Goble, A phase sensitive voltmeter for a high-speed, high-precision electrical impedance tomograph, in "Proc. Annu. Int. Conf. IEEE Engineering in Medicine and Biology Soc.," (1991), 22–23.
787
+
788
+ [16] H. Cornean, K. Knudsen and S. Siltanen, Towards a d-bar reconstruction method for three-dimensional EIT, Journal of Inverse and Ill-Posed Problems, **14** (2006), 111–134.
789
+
790
+ [17] R. Courant and D. Hilbert, "Methods of Mathematical Physics," Interscience Publishers, Vol. II 1962.
791
+
792
+ [18] E. B. Davies, "Heat Kernels and Spectral Theory," Cambridge University Press, Cambridge, 1989.
793
+
794
+ [19] B. Gebauer and N. Hyvönen, Factorization method and inclusions of mixed type in an inverse elliptic boundary value problem, Inverse Probl. Imaging, **2** (2008), 355–372.
795
+
796
+ [20] E. Gersing, B. Hoffman, and M. Osypka, Influence of changing peripheral geometry on electrical impedance tomography measurements, Medical & Biological Engineering & Computing, **34** (1996), 359–361.
797
+
798
+ [21] D. Gilbarg and N. S. Trudinger, "Elliptic Partial Differential Equations of Second Order," Grundlehren der Mathematischen Wissenschaften, Springer, Berlin, **224**, 1989.
799
+
800
+ [22] J. Goble, M. Cheney and D. Isaacson, Electrical impedance tomography in three dimensions Appl. Comput. Electromagn. Soc. J., **7** (1992), 128–147.
801
+ ---PAGE_BREAK---
802
+
803
+ [23] A. Greenleaf, M. Lassas and G. Uhlmann, *The Calderón problem for conormal potentials, I: Global uniqueness and reconstruction*, Comm. Pure Appl. Math., **56** (2003), 328–352.
804
+
805
+ [24] M. Hanke and B. Schappel, *The factorization method for electrical impedance tomography in the half-space*, SIAM J. Appl. Math., **68** (2008), 907–924.
806
+
807
+ [25] T. Ide, H. Isozaki, S. Nakata and S. Siltanen, *Local detection of three-dimensional inclusions in electrical impedance tomography*, Inverse Problems, **26** (2010), 35001–35017.
808
+
809
+ [26] D. Isaacson, J. L. Mueller, J. C. Newell and S. Siltanen, *Reconstructions of chest phantoms by the d-bar method for electrical impedance tomography*, Physiol Meas., **27** (2006), 43–50.
810
+
811
+ [27] H. Kang and K. Yun, *Boundary determination of conductivities and Riemannian metrics via local Dirichlet-to-Neumann operator*, SIAM J. Math. Anal., **34** (2003), 719–735.
812
+
813
+ [28] R. V. Kohn and M. Vogelius, *Determining conductivity by boundary measurements*, Commun. Pure Appl. Math., **37** (1984), 289–298.
814
+
815
+ [29] R. V. Kohn and M. Vogelius, *Determining conductivity by boundary measurements II. Interior results*, Commun. Pure Appl. Math., **38** (1985), 643–667.
816
+
817
+ [30] V. Kolehmainen, M. Vauhkonen, P. A. Karjalainen and J. P. Kaipio, *Assessment of errors in static electrical impedance tomography with adjacent and trigonometric current patterns*, Physiological Measurement, **18** (1997), 289–303.
818
+
819
+ [31] P. Metherall, D. C. Barber and R. H. Smallwood, *Three dimensional electrical impedance tomography*, in "IX Int. Conf. Electrical Bio-Impedance," Heidelberg, Germany, (1995), 510–511.
820
+
821
+ [32] P. Metherall, D. C. Barber, R. H. Smallwood and B. H. Brown, *Three-dimensional electrical impedance tomography*, Nature, **380** (1996), 509–512.
822
+
823
+ [33] P. Metherall, R. H. Smallwood and D. C. Barber, *Three dimensional electrical impedance tomography of the human thorax*, in "18th Int. Conf. IEEE Eng. Med. Biol. Society," (1996).
824
+
825
+ [34] J. P. Morucci, M. Granie, M. Lei, M. Chabert and P. M. Marsili, *3D reconstruction in electrical impedance imaging using a direct sensitivity matrix approach*, Physiol. Meas., **16** (1995), A123–A128.
826
+
827
+ [35] A. I. Nachman, *Reconstructions from boundary measurements*, Ann. of Math., **128** (1988), 531–576.
828
+
829
+ [36] A. I. Nachman, *Global uniqueness for a two-dimensional inverse boundary value problem*, Ann. of Math., **143** (1996), 71–96.
830
+
831
+ [37] G. Nakamura and K. Tanuma, *Local determination of conductivity at the boundary from the Dirichlet-to-Neumann map*, Inverse Problems, **17** (2001), 405–419.
832
+
833
+ [38] G. Nakamura and K. Tanuma, *Direct determination of the derivatives of conductivity at the boundary from the localized Dirichlet to Neumann map*, Comm. Korean Math. Soc., **16** (2001), 415–425.
834
+
835
+ [39] G. Nakamura and K. Tanuma, *Formulas for reconstructing conductivity and its normal derivative at the boundary from the localized Dirichlet to Neumann map*, in "Recent Development in Theories & Numerics, Int. Conf. on Inverse Problems" (eds. Yiu-Chung Hon, Masahiro Yamamoto, Jin Cheng and June-Yub Lee), World Scientific, (2003), 192–201.
836
+
837
+ [40] G. Nakamura, K. Tanuma, S. Siltanen and S. Wang, *Numerical recovery of conductivity at the boundary from the localized Dirichlet to Neumann map*, Computing, **75** (2004), 197–213.
838
+
839
+ [41] J. C. Newell, R. S. Blue, D. Isaacson, G. J. Saulnier and A. S. Ross, *Phasic three-dimensional impedance imaging of cardiac activity*, Physiol. Meas., **23** (2002), 203–209.
840
+
841
+ [42] L. Päivärinta, A. Panchenko and G. Uhlmann, *Complex geometrical optics for Lipschitz conductivities*, Rev. Mat. Iberoam., **19** (2003), 57–72.
842
+
843
+ [43] R. L. Robertson, *Boundary identifiability of residual stress via the Dirichlet to Neumann map*, Inverse Problems, **13** (1997), 1107–1119.
844
+
845
+ [44] E. Somersalo, M. Cheney and D. Isaacson, *Existence and uniqueness for electrode models for electric current computed tomography*, SIAM J. Appl. Math., **52** (1992), 1023–1040.
846
+
847
+ [45] G. Strang and G. Fix, "An Analysis of The Finite Element Method," Prentice Hall, 1973.
848
+
849
+ [46] J. Sylvester, *A convergent layer stripping algorithm for the radially symmetric impedance tomography problem*, Comm. PDE, **17** (1992), 1955–1994.
850
+
851
+ [47] J. Sylvester and G. Uhlmann, *A global uniqueness theorem for an inverse boundary value problem*, Ann. of Math., **125** (1987), 153–169.
852
+
853
+ [48] J. Sylvester and G. Uhlmann, *Inverse boundary value problems at the boundary — continuous dependence*, Comm. Pure Appl. Math., **41** (1988), 197–221.
854
+
855
+ [49] P. J. Vauhkonen, "Image Reconstruction in Three-Dimensional Electrical Impedance Tomography," Ph.D thesis, University of Kuopio, 2004.
856
+ ---PAGE_BREAK---
857
+
858
+ [50] P. J. Vauhkonen, M. Vauhkonen, T. Savolainen and J. P. Kaipio, *Static three-dimensional electrical impedance tomography*, Ann. New York Acad. Sci., **873** (1999), 472–481.
859
+
860
+ [51] P. J. Vauhkonen, M. Vauhkonen, T. Savolainen and J. P. Kaipio, *Three-dimensional electrical impedance tomography based on the complete electrode model*, IEEE Trans. Biomed. Eng., **46** (1999), 1150–1160.
861
+
862
+ [52] A. Wexler, *Electrical impedance imaging in two and three dimensions*, Clin. Phys. Physiol. Meas., Suppl A, **9** (1988), 29–33.
863
+
864
+ Received April 2010; revised August 2010.
865
+
866
+ *E-mail address:* gnaka@math.sci.hokudai.ac.jp
867
+
868
+ *E-mail address:* Paivi.Ronkanen@uef.fi
869
+
870
+ *E-mail address:* samuli.siltanen@helsinki.fi
871
+
872
+ *E-mail address:* tanuma@gunma-u.ac.jp
samples/texts_merged/4523932.md ADDED
@@ -0,0 +1,952 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # Differential Evolution - A simple and efficient adaptive scheme for global optimization over continuous spaces
5
+
6
+ by Rainer Storn¹) and Kenneth Price²)
7
+
8
+ TR-95-012
9
+
10
+ March 1995
11
+
12
+ ## Abstract
13
+
14
+ A new heuristic approach for minimizing possibly nonlinear and non differentiable continuous space functions is presented. By means of an extensive testbed, which includes the De Jong functions, it will be demonstrated that the new method converges faster and with more certainty than Adaptive Simulated Annealing as well as the Annealed Nelder&Mead approach, both of which have a reputation for being very powerful. The new method requires few control variables, is robust, easy to use and lends itself very well to parallel computation.
15
+
16
+ ¹) International Computer Science Institute, 1947 Center Street, Berkeley, CA 94704-1198, Suite 600, Fax: 510-643-7684. E-mail: storn@icsi.berkeley.edu. On leave from Siemens AG, ZFE T SN 2, Otto-Hahn-Ring 6, D-81739 Muenchen, Germany. Fax: 01149-636-44577, E-mail:rainer.storn@zfe.siemens.de.
17
+
18
+ ²) 836 Owl Circle, Vacaville, CA 95687, kprice@solano.community.net.
19
+ ---PAGE_BREAK---
20
+
21
+ # Introduction
22
+
23
+ Problems which involve global optimization over continuous spaces are ubiquitous throughout the scientific community. In general, the task is to optimize certain properties of a system by pertinently choosing the system parameters. For convenience, a system's parameters are usually represented as a vector. The standard approach to an optimization problem begins by designing an objective function that can model the problem's objectives while incorporating any constraints. Especially in the circuit design community, methods are in use which do not need an objective function [1], [2], [3]. Although these methods can make formulating a problem simpler, they are usually inferior to techniques which make full use of an objective function. Consequently, we restrict ourselves to optimization methods which fully use the objective function. In most cases, the objective function is designed to transform the optimization problem into a minimization task. To this end, we will limit our investigation in the following to minimization problems.
24
+
25
+ When the objective function is nonlinear and non differentiable, direct search approaches are the methods of choice. The best known of these are the algorithms by Nelder&Mead [4], by Hooke&Jeeves [4], genetic algorithms [5], and evolutionary algorithms [6], [7] with the latter being truly continuous counterparts of genetic algorithms. At the heart of every direct search method is a strategy that generates variations of the parameter vectors. Once a variation is generated, a decision must be made whether or not to accept the newly derived parameters. All basic direct search methods use the greedy criterion to make this decision. Under the greedy criterion, a new parameter vector is accepted if and only if it reduces the value of the objective function. Although the greedy decision process converges fairly fast, it runs the risk of becoming trapped by a local minimum. Inherently parallel search techniques like genetic and evolutionary algorithms have some built-in safeguards to forestall misconvergence. By running several vectors simultaneously, superior parameter configurations can help other vectors escape local minima. Another method which can extricate a parameter vector from a local minimum is Simulated Annealing [8], [9], [10]. Annealing relaxes the greedy criterion by occasionally permitting an uphill move. Such moves potentially allow a parameter vector to climb out of a local minimum. As the number of iterations increases, the probability of accepting an uphill move decreases. In the long run, this leads to the greedy criterion. While all direct search methods lend themselves to annealing, it has mostly been used just for the Random Walk, which itself is the simplest case of an evolutionary algorithm [6]. Nevertheless, attempts have been made to anneal other direct searches like the method of Nelder&Mead [10] and genetic algorithms [8], [11].
26
+
27
+ Users generally demand that a practical optimization technique should fulfill three requirements. First, the method should find the true global minimum, regardless of the initial system parameter values. Second, convergence should be fast. Third, the program should have a minimum of control parameters so that it will be easy to use. In our search for a fast and easy to use "sure fire" technique, we developed a method which is not only astonishingly simple, but also performs extremely well on a wide variety of test problems. It is inherently parallel and hence lends itself to computation via a network of computers or processors. The basic strategy employs the difference of two randomly selected parameter vectors as the source of random variations for a third parameter vector. In the following, we present a more rigorous description of the new optimization method which we call Differential Evolution.
28
+ ---PAGE_BREAK---
29
+
30
+ **Problem Formulation**
31
+
32
+ Consider a system with the real valued properties
33
+
34
+ $$g_m; m = 0, 1, 2, \dots, P-1 \tag{1}$$
35
+
36
+ which constitute the objectives of the system to be optimized.
37
+
38
+ Additionally, there may be real valued constraints
39
+
40
+ $$g_m; m = P, P+1, \dots, P+C-1 \tag{2}$$
41
+
42
+ which describe properties of the system that need not be optimized but neither shall be degraded. For example, one may wish to design a mobile phone with the dual objectives of maximizing the transmission power $g_1$ and minimizing the noise $g_2$ of the audio amplifier while simultaneously keeping the battery life $g_3$ above a certain threshold. The properties $g_1$ and $g_2$ represent objectives to be optimized whereas $g_3$ is a constraint. Let all properties of the system be dependent on the real valued parameters
43
+
44
+ $$x_j; j = 0, 1, 2, \dots, D-1. \tag{3}$$
45
+
46
+ In the case of the mobile phone the parameters could be resistor and capacitor values. For most technical systems realizability requires
47
+
48
+ $$x_j \in [x_{jI}, x_{jH}] . \tag{4}$$
49
+
50
+ Usually, restrictions on the $x_j$ will be incorporated into the collection $g_m$, $m \ge P$, of constraints. Optimization of the system means to vary the D-dimensional parameter vector
51
+
52
+ $$\underline{x} = (x_0, x_1, \dots, x_{D-1})^T \tag{5}$$
53
+
54
+ until the properties $g_m$ are optimized and the constraints $g_m$, $m \ge P$, are met. An optimization task can always be reformulated as the minimization problem
55
+
56
+ $$\min f_m(\underline{x}) \tag{6}$$
57
+
58
+ where $f_m(\underline{x})$ represents the function by which the property $g_m$ is calculated and its optimization or constraint preservation is represented as the minimization of $f_m(\underline{x})$. All functions $f_m(\underline{x})$ can be combined into a single objective function $z(\underline{x})$ [2], [12], which usually is computed either via the weighted sum
59
+
60
+ $$z(\underline{x}) = \sum_{m=0}^{P+C-1} w_m \cdot f_m(\underline{x}) \tag{7}$$
61
+
62
+ or via
63
+
64
+ $$z(\underline{x}) = \max(w_m \cdot f_m(\underline{x})) \tag{8}$$
65
+
66
+ with
67
+
68
+ $$w_m > 0. \tag{9}$$
69
+
70
+ The weighting factors $w_m$ are used to define the importance associated with the different objectives and constraints as well as to normalize different physical units. The optimization task can now be restated as
71
+
72
+ $$\min z(\underline{x}) \tag{10}$$
73
+
74
+ The min-max formulation (8) and (10) guarantees that all local minima, the Pareto critical points, including the possibly multiple global minima, the Pareto points, can at least theoretically be found [2], [12]. For the objective function (7) and (10) this is true only if the region of realizability of $\underline{x}$ is convex [1], [2], which in general does not apply in most technical problems.
75
+ ---PAGE_BREAK---
76
+
77
+ # The Method of Differential Evolution
78
+
79
+ Differential Evolution (DE) is a novel parallel direct search method which utilizes NP parameter vectors
80
+
81
+ $$ \underline{x}_{i,G}, i = 0, 1, 2, \dots, \text{NP-1}. \qquad (11) $$
82
+
83
+ as a population for each generation G. NP doesn't change during the minimization process. The initial population is chosen randomly if nothing is known about the system. As a rule, we will assume a uniform probability distribution for all random decisions unless otherwise stated. In case a preliminary solution is available, the initial population is often generated by adding normally distributed random deviations to the nominal solution $\underline{x}_{\text{nom},0}$. The crucial idea behind DE is a new scheme for generating trial parameter vectors. DE generates new parameter vectors by adding the weighted difference vector between two population members to a third member. If the resulting vector yields a lower objective function value than a predetermined population member, the newly generated vector replaces the vector with which it was compared. The comparison vector can, but need not be part of the generation process mentioned above. In addition the best parameter vector $\underline{x}_{\text{best},G}$ is evaluated for every generation G in order to keep track of the progress that is made during the minimization process.
84
+
85
+ Extracting distance and direction information from the population to generate random deviations results in an adaptive scheme with excellent convergence properties. We tried several variants of DE, the two most promising of which we subsequently present in greater detail.
86
+
87
+ ## Scheme DE1
88
+
89
+ Our first variant of DE works as follows: for each vector $\underline{x}_{i,G}$, $i = 0,1,2,\dots,\text{NP}-1$, a trial vector $\underline{v}$ is generated according to
90
+
91
+ $$ \underline{v} = \underline{x}_{r_1,G} + F \cdot (\underline{x}_{r_2,G} - \underline{x}_{r_3,G}), \qquad (12) $$
92
+
93
+ with
94
+
95
+ $$ r_1, r_2, r_3 \in [0, \text{NP} - 1], \text{ integer and mutually different, and } F > 0. \qquad (13) $$
96
+
97
+ The integers $r_1, r_2$ and $r_3$ are chosen randomly from the interval $[0, \text{NP}-1]$ and are different from the running index $i$. $F$ is a real and constant factor which controls the amplification of the differential variation $(\underline{x}_{r_2,G} - \underline{x}_{r_3,G})$. Fig. 1 shows a two dimensional example that illustrates the different vectors which play a part in DE1.
98
+ ---PAGE_BREAK---
99
+
100
+ Fig.1: Two dimensional example of an objective function showing its contour lines and the process for generating **v** in scheme DE1.
101
+
102
+ In order to increase the diversity of the parameter vectors, the vector
103
+
104
+ $$ \underline{u} = (u_1, u_2, \dots, u_D)^T \qquad (14) $$
105
+
106
+ with
107
+
108
+ $$ u_j = \begin{cases} v_j & \text{for } j = \langle n \rangle_D, \langle n+1 \rangle_D, \dots, \langle n+L-1 \rangle_D \\ (x_{i,G})_j & \text{otherwise} \end{cases} \qquad (15) $$
109
+
110
+ is formed where the acute brackets $\langle \rangle_D$ denote the modulo function with modulus D.
111
+
112
+ I.e. a certain sequence of the vector elements of $\underline{u}$ are identical to the elements of $\underline{v}$, the other elements of $\underline{u}$ acquire the original values of $x_{i,G}$. Choosing a subgroup of parameters for mutation is similar to a process known as crossover in evolution theory. This idea is illustrated in Fig. 2 for D=7, n=2 and L=3. The starting index *n* in (15) is a randomly chosen integer from the interval [0, D-1]. The integer *L* is drawn from the interval [0, D-1] with the probability Pr(L=v) = (CR)⁰. CR ∈ [0,1] is the crossover probability and constitutes a control variable for the DE1-scheme. The random decisions for both *n* and *L* are made anew for each trial vector *v*.
113
+ ---PAGE_BREAK---
114
+
115
+ Fig. 2: Illustration of the crossover process for D=7, n=2 and L=3.
116
+
117
+ In order to decide whether the new vector **u** shall become a population member of generation G+1, it will be compared to **x**_{i,G}. If vector **u** yields a smaller objective function value than **x**_{i,G}, **x**_{i,G+1} is set to **u**, otherwise the old value **x**_{i,G} is retained.
118
+
119
+ ## Scheme DE2
120
+
121
+ Basically, scheme DE2 works the same way as DE1 but generates the vector **v** according to
122
+
123
+ $$ \underline{v} = \underline{x}_{i,G} + \lambda \cdot (\underline{x}_{best,G} - \underline{x}_{i,G}) + F \cdot (\underline{x}_{r2,G} - \underline{x}_{r3,G}), \quad (16) $$
124
+
125
+ introducing an additional control variable $\lambda$. The idea behind $\lambda$ is to provide a means to enhance the greediness of the scheme by incorporating the current best vector $\underline{x}_{best,G}$. This feature can be useful for non-critical objective functions. Fig. 3 illustrates the vector-generation process defined by (16). The construction of $\underline{u}$ from $\underline{v}$ and $\underline{x}_{i,G}$ as well as the decision process are identical to DE1.
126
+ ---PAGE_BREAK---
127
+
128
+ Fig.3: Two dimensional example of an objective function showing its contour lines and the process for generating *v* in scheme DE2.
129
+
130
+ **Competing minimization methods**
131
+
132
+ In order to compare the DE method with other global minimizing strategies, we looked for approaches where the source code is readily available, which are known to be powerful and which are capable of coping with nonlinear and non differentiable functions. Two methods in particular piqued our interest. The first was the annealed version of the Nelder&Mead strategy (ANM) [10] which is appealing because of its adaptive scheme for generating random parameter deviations. When the annealing part is switched off, a fast converging direct search method remains which is especially useful for non-critical objective functions. The basic control variables in ANM are T, the starting temperature, TF, the temperature reduction factor and NV, the number of random variations at a given temperature level.
133
+
134
+ The second method of interest was Adaptive Simulated Annealing (ASA) [8] which claims to converge very quickly and to outperform genetic algorithms on the De Jong test suite [9]. Although ASA provides more than a dozen control variables, it turned out that just two of them, TEMPERATURE Ratio SCALE (TRS) and TEMPERATURE_ANNEAL_SCALE (TAS), had significant impact on the minimization process. We will compare both ANM and ASA to DE1 and DE2. During our research we also wrote an annealed version of the Hooke&Jeeves method [5] and tested two Monte Carlo methods [3] one of which used NP parallel vectors and the differential mutation scheme of DE. Although these approaches all worked, they quickly turned out not to be competitive.
135
+
136
+ **The Testbed**
137
+
138
+ Our function testbed contains the De Jong test functions as presented in [9] plus some additional
139
+ functions which present further distinctive difficulties for a global minimizer:
140
+ ---PAGE_BREAK---
141
+
142
+ 1) First De Jong function (sphere)
143
+
144
+ $$f_1(\underline{x}) = \sum_{j=0}^{2} x_j^2; \qquad x_j \in [-5.12, 5.12] \tag{17}$$
145
+
146
+ $f_1(\underline{x})$ is considered to be a very simple task for every serious minimization method. The minimum is
147
+ $f_1(0) = 0$.
148
+
149
+ 2) Second De Jong function (Rosenbrock's saddle)
150
+
151
+ $$f_2(\underline{x}) = 100 \cdot (x_0^2 - x_1)^2 + (1 - x_0)^2; \qquad x_j \in [-2.048, 2.048] \tag{18}$$
152
+
153
+ Although $f_2(\underline{x})$ has just two parameters, it has the reputation of being a difficult minimization problem. The minimum is $f_2(\underline{1})=0$.
154
+
155
+ 3) Third De Jong function (step)
156
+
157
+ $$f_3(\underline{x}) = 30 + \sum_{j=0}^{4} \lfloor x_j \rfloor; \qquad x_j \in [-5.12, 5.12] \tag{19}$$
158
+
159
+ For $f_3(\underline{x})$ it is necessary to incorporate the constraints imposed on the $x_j$ into the objective function.
160
+ We implemented this according to the min-max formulation (8). The minimum is
161
+ $f_3(-5-\epsilon)=0$ where $\epsilon \in [0,0.12]$. The step function exhibits many plateaus which pose a considerable
162
+ problem for many minimization algorithms.
163
+
164
+ 4) Modified fourth De Jong function (quartic)
165
+
166
+ $$f_4(\underline{x}) = \sum_{j=0}^{29} (x_j^4 \cdot (j+1) + \eta); \qquad x_j \in [-1.28, 1.28] \tag{20}$$
167
+
168
+ This function is designed to test the behavior of a minimization algorithm in the presence of noise.
169
+ In the original De Jong function, $\eta$ is a random variable produced by Gaussian noise having the
170
+ distribution N(0,1). According to [9], this function appears to be flawed as no definite global
171
+ minimum exists. In response to the problem, we followed the suggestion given in [9] and chose $\eta$ to
172
+ be a random variable with uniform distribution and bounded by [0,1). In contrast to the original
173
+ version of De Jong's quartic function, we also included $\eta$ inside the summation instead of just
174
+ adding $\eta$ to the summation result. This change makes $f_4(\underline{x})$ more difficult to minimize. The
175
+ functional minimum is $f_4(0) \le 30 \cdot E[\eta] = 15$, where $E[\eta]$ is the expectation of $\eta$.
176
+
177
+ 5) Fifth De Jong function (Shekel's Foxholes)
178
+
179
+ $$f_5(\underline{x}) = \frac{1}{0.002 + \sum_{i=0}^{24} \frac{1}{i + \sum_{j=0}^{1}(x_j - a_{ij})^6}}; \qquad x_j \in [-65.536, 65.536] \tag{21}$$
180
+
181
+ with $a_{i0}=\{-32, -16,0,16,32\}$ for $i = 0,1,2,3,4$ and $a_{i0}=a_i \bmod 5$, $0$
182
+
183
+ as well as $a_{i1}=\{-32, -16,0,16,32\}$ for $i = 0,5,10,15,20$ and $a_{i1}=a_{i+1,k}, 1, k=1,2,3,4$
184
+
185
+ The global minimum for this function is $f_6(-32,-32) \approx 0.998004$.
186
+ ---PAGE_BREAK---
187
+
188
+ 6) Corana's parabola [8], [13]
189
+
190
+ $$
191
+ f_6(\underline{x}) = \sum_{j=0}^{3} \begin{cases} 0.15(z_j - 0.05 \cdot \operatorname{sgn}(z_j))^2 \cdot d_j & \text{if } |x_j - z_j| < 0.05 \\ d_j \cdot x_j^2 & \text{otherwise} \end{cases}; x_j \in [-1000, 1000] \quad (22)
192
+ $$
193
+
194
+ with
195
+ $$
196
+ z_j = \left\lfloor \frac{|x_j|}{0.2} + 0.49999 \right\rfloor \cdot \operatorname{sgn}(x_j) \cdot 0.2
197
+ $$
198
+
199
+ and
200
+ $d_j = \{1,1000,10,100\}$
201
+
202
+ $f_6(\underline{x})$ defines a paraboloid whose axes are parallel to the coordinate axes. It is riddled with a set of holes that increase in depth the closer one approaches the origin. Any minimization algorithm that goes strictly downhill will almost always be captured by the holes. The minimum here is $f_6(\underline{x}) = 0$, with $|\underline{x}_j|<0.05$, $j=0,1,2,3$.
203
+
204
+ 7) Griewangk's function [14]
205
+
206
+ $$
207
+ f_7(\underline{x}) = \sum_{j=0}^{9} \frac{x_j^2}{4000} - \prod_{j=0}^{9} \cos\left(\frac{x_j}{\sqrt{j+1}}\right) + 1; \quad x_j \in [-400, 400] \qquad (23)
208
+ $$
209
+
210
+ Like test function f₆(x), f₇(x) has many local minima so that it is very difficult to find the true
211
+ minimum f₇(0) = 0.
212
+
213
+ 8) Zimmermann's problem [15]
214
+
215
+ $$
216
+ f_8(\underline{x}) = 9 - x_0 - x_1; \qquad x_j > 0, j=1,2 \tag{24}
217
+ $$
218
+
219
+ with
220
+ $$
221
+ (x_0 - 3)^2 + (x_1 - 2)^2 \leq 16 \tag{25}
222
+ $$
223
+
224
+ and
225
+ $$
226
+ x_0 \cdot x_1 \leq 14 \tag{26}
227
+ $$
228
+
229
+ Finding the minimum $f_8(7,2)=0$ poses a special problem, because the minimum is located at the corner of the constrained region defined by (24), (25) and (26).
230
+
231
+ 9) Polynomial fitting problem
232
+
233
+ $$
234
+ f_9(\underline{x}, z) = \sum_{j=0}^{2k} x_j \cdot z^j, k \text{ integer and } >0, \qquad (27)
235
+ $$
236
+
237
+ is a polynomial of degree 2k in z with the coefficients x_j such that
238
+
239
+ $$
240
+ f_9(\underline{x}, z) \in [-1,1] \quad \text{for} \quad z \in [-1,1] \tag{28}
241
+ $$
242
+
243
+ and
244
+ $f_9(\underline{x},z) \ge T_{2k}(1.2)$ for $z = \pm 1.2$
245
+ (29)
246
+
247
+ with $T_{2k}(z)$ being a Chebychev Polynomial of degree 2k. The Chebychev Polynomials are defined recursively according to the difference equation $T_{n+1}(z) = 2z \cdot T_n(z) - T_{n-1}(z)$, $n$ integer and $> 0$, with the initial conditions $T_0(z)=1$ and $T_1(z)=z$. The solution to the polynomial fitting problem is, of course, $f_9(\underline{x}, z) = T_{2k}(z)$, a polynomial which oscillates between -1 and 1 when its argument $z$ is between -1 and 1. Outside this "tube" the polynomial rises steeply in direction of high positive ordinate values. The polynomial fitting problem has its roots in electronic filter design [16] and
248
+ ---PAGE_BREAK---
249
+
250
+ challenges an optimization procedure by forcing it to find parameter values with grossly different magnitudes, something very common in technical systems. In our test suite we employed
251
+
252
+ $$T_8(z) = 1 - 32z^2 + 160z^4 - 256z^6 + 128z^8 \quad (30)$$
253
+
254
+ with
255
+
256
+ $$T_8(1.2) \approx 72.6606669 \quad (31)$$
257
+
258
+ as well as
259
+
260
+ $$T_{16}(z) = 1 - 128z^2 + 2688z^4 - 21504z^6 + 84480z^8 - \\
261
+ 180224z^{10} + 212992z^{12} - 131072z^{14} + 32768z^{16} \quad (32)$$
262
+
263
+ with
264
+
265
+ $$T_{16}(1.2) \approx 10558.1450229. \quad (33)$$
266
+
267
+ and used the weighted sum (7) of squared errors in order to transform the above constrained optimization problem into an objective function to be minimized. The starting values for the parameters were drawn randomly from the interval [-100,100] for (30), (31) and [-1000,1000] for (32), (33).
268
+
269
+ ## Test Results
270
+
271
+ We tried to optimize each of the four algorithms by experimenting to find the control settings which provided fastest and smoothest convergence. Table I contains our choice of control variable settings for each minimization algorithm and each test function along with the averaged number of function evaluations (nfe) which were required to find the global minimum.
272
+
273
+ <table><thead><tr><th rowspan="2">f<sub>i</sub>(x)</th><th colspan="4">ANM</th><th colspan="3">ASA</th><th colspan="4">DE1</th><th colspan="4">DE2 (F=1)</th></tr><tr><th>T</th><th>TF</th><th>NV</th><th>nfe</th><th>TRS</th><th>TAS</th><th>nfe</th><th>NP</th><th>F</th><th>CR</th><th>nfe</th><th>NP</th><th>λ</th><th>CR</th><th>nfe</th></tr></thead><tbody><tr><td>1</td><td>0</td><td>n.a.</td><td>1</td><td>95</td><td>1·10<sup>-5</sup></td><td>10</td><td>397</td><td>10</td><td>0.5</td><td>0.3</td><td>490</td><td>6</td><td>0.95</td><td>0.5</td><td>392</td></tr><tr><td>2</td><td>0</td><td>n.a.</td><td>1</td><td>106</td><td>1·10<sup>-5</sup></td><td>10000</td><td>11275</td><td>6</td><td>0.95</td><td>0.5</td><td>746</td><td>6</td><td>0.95</td><td>0.5</td><td>615</td></tr><tr><td>3</td><td>300</td><td>0.99</td><td>20</td><td>90258</td><td>1·10<sup>-7</sup></td><td>100</td><td>354</td><td>10</td><td>0.8</td><td>0.3</td><td>915</td><td>20</td><td>0.95</td><td>0.2</td><td>1300</td></tr><tr><td>4</td><td>300</td><td>0.98</td><td>30</td><td>-</td><td>1·10<sup>-5</sup></td><td>100</td><td>4812</td><td>10</td><td>0.75</td><td>0.5</td><td>2378</td><td>10</td><td>0.95</td><td>0.2</td><td>2873</td></tr><tr><td>5</td><td>3000</td><td>0.995</td><td>50</td><td>-</td><td>1·10<sup>-5</sup></td><td>100</td><td>1379</td><td>15</td><td>0.9</td><td>0.3</td><td>735</td><td>20</td><td>0.95</td><td>0.2</td><td>828</td></tr><tr><td>6</td><td>5·10<sup>6</sup></td><td>0.995</td><td>100</td><td>-</td><td>1·10<sup>-5</sup></td><td>100</td><td>3581</td><td>10</td><td>0.4</td><td>0.2</td><td>834</td><td>10</td><td>0.9</td><td>0.2</td><td>1125</td></tr><tr><td>7</td><td>10</td><td>0.99</td><td>50</td><td>-</td><td>1·10<sup>-5</sup></td><td>0.1</td><td>-</td><td>30</td><td>1.</td><td>0.3</td><b>22167</b></td><td>20</td><td>0.99</td><td>0.2</td><b>12804</b></td></tr><tr><td>8</td><td>5</td><td>0.95</td><td>5</td><b>2116</b></td><b>1·10<sup>-6</sup></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b></b>\n<table>\tbody>\n<tr>\n<td>f<sub>i(x)</sub>(x)</td>\n<td>T(x)</td>\n<td>TF(x)</td>\n<td>NV(x)</td>\n<td>nfe(x)</td>\n<td>TRS(x)</td>\n<td>TAS(x)</td>\n<td>nfe(x)</td>\n<td>NP(x)</td>\n<td>F(x)</td>\n<td>CR(x)</td>\n<td>nfe(x)</td>\n<td>F(x)</td>\n<td>&lambda;(x)</td>\n<td>CR(x)</td>\n<td>nfe(x)</td>\n<td>&lambda;(x)</tau;</div>
274
+
275
+ <table>
276
+ <thead>
277
+ <tr>
278
+ <th>i</th>
279
+ <th>T</th>
280
+ <th>TF</th>
281
+ <th>NV</th>
282
+ <th>nfe</th>
283
+ <th>TAS<br/>TRS<br/>z<sup>s-4/3/6/8/9/.../i-4/.../i-6/.../i-8/.../i-9/.../i-.../i-8/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/...//...</sub>
284
+ </th>
285
+ <th>TAS<br/>TRS<br/>z<sup>s=3/s=6/s=9/s=12/s=15/s=...</sub>/z<sup>s=3/s=6/s=9/s=12/s=15/s=...</sub>/z<sup>s=3/s=6/s=9/s=12/s=...</sub>/z<sup>s=3/s=6/s=9/s=...</sub>/z<sup>s=3/s=6/s=...</sub>/z<sup>s=3/s=...</sub>/z<sup>s=...</sup>/z<sup>s=3/s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sup>/z<sup>s=...</sub>
286
+ </th>
287
+ <th>TAS<br/>TRS<br/>z<sub>i+3/s+6/s+9/s+12/s+15/s+...</sub>/z<sub>i+3/s+6/s+9/s+12/s+15/s+...</sub>/z<sub>i+3/s+6/s+9/s+...</sub>/z<sub>i+3/s+6/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub>/z<sub>i+3/s+...</sub}/
288
+ </th>
289
+ <th>TAS<br/>TRS<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/z<br/>s/zbr&gt;</div>
290
+
291
+ <div style="text-align: center;">
292
+ <table>
293
+ <tbody>
294
+ <tr>
295
+ <th scope="row">f(1)</th>
296
+ <th scope="row">f(2)</th>
297
+ <th scope="row">f(3)</th>
298
+ <th scope="row">f(4)</th>
299
+ <th scope="row">f(5)</th>
300
+ <th scope="row">f(6)</th>
301
+ <th scope="row">f(7)</th>
302
+ <th scope="row">f(8)</th>
303
+ <th scope="row">f(9)</th>
304
+ <th scope="row">f(10)</th>
305
+ <th scope="row">f(11)</th>
306
+ <th scope="row">f(12)</th>
307
+ <th scope="row">f(13)</th>
308
+ <th scope="row">f(14)</th>
309
+ <th scope="row">f(15)</th>
310
+ <th scope="row">f(16)</th>
311
+ </tr>
312
+ </tbody>
313
+ </table>
314
+ <p>Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;A&nbsp;hyphen&nbsp;indicates&nbsp;misconvergence&nbsp;and&nbsp;n.a.&nbsp;stands&nbsp;for&nbsp;"not applicable".&nbsp;</p>
315
+
316
+ <div style="text-align: center;">
317
+ <table>
318
+ <tbody>
319
+ <tr>
320
+ <th scope="row">f(7)</th>
321
+ <th scope="row">f(8)</th>
322
+ <th scope="row">f(9)</th>
323
+ <th scope="row">f(10)</th>
324
+ <th scope="row">f(11)</th>
325
+ <th scope="row">f(12)</th>
326
+ <th scope="row">f(13)</th>
327
+ <th scope="row">f(14)</th>
328
+ <th scope="row">f(15)</th>
329
+ <th scope="row">f(16)</th>
330
+ </tr>
331
+ </tbody>
332
+ </table>
333
+
334
+ <div style="text-align: center;">
335
+ <table>
336
+ <tbody>
337
+ <tr>
338
+ <th scope="row">f(7)</th>
339
+ <th scope="row">f(8)</th>
340
+ <th scope="row">f(9)</th>
341
+ <th scope="row">f(10)</th>
342
+ <th scope="row">f(11)</th>
343
+ <th scope="row">f(12)</th>
344
+ <th scope="row">f(13)</th>
345
+ <th scope="row">f(14)</th>
346
+ <th scope="row">f(15)</th>
347
+ <th scope="row">f(16)</th>
348
+ </tr>
349
+ </tbody>
350
+ </table>
351
+
352
+ <div style="text-align: center;">
353
+ <table>
354
+ <tbody>
355
+ <tr>
356
+ <th scope="row">f(7)</th>
357
+ <th scope="row">f(8)</th>
358
+ <th scope="row">f(9)</th>
359
+ <th scope="row">f(10)</th>
360
+ <th scope="row">f(11)</th>
361
+ <th scope="row">f(12)</th>
362
+ <th scope="row">f(13)</th>
363
+ <th scope="row">f(14)</th>
364
+ <th scope="row">f(15)</th>
365
+ <th scope="row">f(16)</th>
366
+ </tr>
367
+ </tbody>
368
+ </table>
369
+
370
+ <div style="text-align: center;">
371
+ <table>
372
+ <tbody>
373
+ <tr>
374
+ <th scope="row">f(7)</th>
375
+ <th scope="row">f(8)</th>
376
+ <th scope="row">f(9)</th>
377
+ <th scope="row">f(10)</th>
378
+ <th scope="row">f(11)</th>
379
+ <th scope="row">f(12)</th>
380
+ <th scope="row">f(13)</th>
381
+ <th scope="row">f(14)</th>
382
+ <th scope="row">f(15)</th>
383
+ <th scope="row">f(16)</th>
384
+ </tr>
385
+ </tbody>
386
+ </table>
387
+
388
+ <div style="text-align: center;">
389
+ <table>
390
+ <tbody>
391
+ <tr>
392
+ <th scope="row">f(7)</th>
393
+ <th scope="row">f(8)</th>
394
+ <th scope="row">f(9)</th>
395
+ <th scope="row">f(10)</th>
396
+ <th scope="row">f(11)</th>
397
+ <th scope="row">f(12)</th>
398
+ <th scope="row">f(13)</th>
399
+ <th scope="row">f(14)</th>
400
+ <th scope="row">f(15)</th>
401
+ <th scope="row">f(16)</th>
402
+ </tr>
403
+ </tbody>
404
+ </table>
405
+
406
+ <div style="text-align: center;">
407
+ f(i) is the i-th function value.
408
+ f(i) is the i-th function value.
409
+ f(i) is the i-th function value.
410
+ f(i) is the i-th function value.
411
+ f(i) is the i-th function value.
412
+ f(i) is the i-th function value.
413
+ f(i) is the i-th function value.
414
+ f(i) is the i-th function value.
415
+ f(i) is the i-th function value.
416
+ f(i) is the i-th function value.
417
+ f(i) is the i-th function value.
418
+ f(i) is the i-th function value.
419
+ f(i) is the i-th function value.
420
+ f(i) is the i-th function value.
421
+ </div>
422
+
423
+ <div style="text-align: center;">
424
+ f(i) is the i-th function value.
425
+ f(i) is the i-th function value.
426
+ f(i) is the i-th function value.
427
+ f(i) is the i-th function value.
428
+ f(i) is the i-th function value.
429
+ f(i) is the i-th function value.
430
+ f(i) is the i-th function value.
431
+ f(i) is the i-th function value.
432
+ f(i) is the i-th function value.
433
+ f(i) is the i-th function value.
434
+ f(i) is the i-th function value.
435
+ f(i) is the i-th function value.
436
+ f(i) is the i-th function value.
437
+ f(i) is the i-th function value.
438
+ </div>
439
+
440
+ <div style="text-align: center;">
441
+ f(i) is the i-th function value.
442
+ f(i) is the i-th function value.
443
+ f(i) is the i-th function value.
444
+ f(i) is the i-th function value.
445
+ f(i) is the i-th function value.
446
+ f(i) is the i-th function value.
447
+ f(i) is the i-th function value.
448
+ f(i) is the i-th function value.
449
+ f(i) is the i-th function value.
450
+ f(i) is the i-th function value.
451
+ f(i) is the i-th function value.
452
+ f(i) is the i-th function value.
453
+ f(i) is the i-th function value.
454
+ f(i) is the i-th function value.
455
+ </div>
456
+
457
+ <div style="text-align: center;">
458
+ f(i) is the i-th function value.
459
+ f(i) is the i-th function value.
460
+ f(i) is the i-th function value.
461
+ f(i) is the i-th function value.
462
+ f(i) is the i-th function value.
463
+ f(i) is the i-th function value.
464
+ f(i) is the i-th function value.
465
+ f(i) is the i-th function value.
466
+ f(i) is the i-th function value.
467
+ f(i) is the i-th function value.
468
+ f(i) is the i-th function value.
469
+ f(i) is the i-th function value.
470
+ f(i) is the i-th function value.
471
+ f(i) is the i-th function value.
472
+ </div>
473
+
474
+ <div style="text-align: center;">
475
+ f(i) is the i-th function value.
476
+ f(i) is the i-th function value.
477
+ f(i) is the i-th function value.
478
+ f(i) is the i-th function value.
479
+ f(i) is the i-th function value.
480
+ f(i) is the i-th function value.
481
+ f(i) is the i-th function value.
482
+ f(i) is the i-th function value.
483
+ f(i) is the i-th function value.
484
+ f(i) is the i-th function value.
485
+ f(i) is the i-th function value.
486
+ f(i) is the i-th function value.
487
+ f(i) is the i-th function value.
488
+ f(i) is the i-th function value.
489
+ </div>
490
+
491
+ <div style="text-align: center;">
492
+ f(i) is the i-th function value.
493
+ f(i) is the i-th function value.
494
+ f(i) is the i-th function value.
495
+ f(i) is the i-th function value.
496
+ f(i) is the i-th function value.
497
+ f(i) is the i-th function value.
498
+ f(i) is the i-th function value.
499
+ f(i) is the i-th function value.
500
+ f(i) is the i-th function value.
501
+ f(i) is the i-th function value.
502
+ f(i) is the i-th function value.
503
+ f(i) is the i-th function value.
504
+ f(i) is the i-th function value.
505
+ f(i) is the i-th function value.
506
+ </div>
507
+
508
+ <div style="text-align: center;">
509
+ \begin{tabular}{@{}l@{}}
510
+ \hline
511
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
512
+ \hline
513
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
514
+ \hline
515
+ \end{tabular}
516
+ </div>
517
+
518
+ <div style="text-align: center;">
519
+ \begin{tabular}{@{}l@{}}
520
+ \hline
521
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
522
+ \hline
523
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
524
+ \hline
525
+ \end{tabular}
526
+ </div>
527
+
528
+ <div style="text-align: center;">
529
+ \begin{tabular}{@{}l@{}}
530
+ \hline
531
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
532
+ \hline
533
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
534
+ \hline
535
+ \end{tabular}
536
+ </div>
537
+
538
+ <div style="text-align: center;">
539
+ \begin{tabular}{@{}l@{}}
540
+ \hline
541
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
542
+ \hline
543
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
544
+ \hline
545
+ \end{tabular}
546
+ </div>
547
+
548
+ <div style="text-align: center;">
549
+ \begin{tabular}{@{}l@{}}
550
+ \hline
551
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
552
+ \hline
553
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
554
+ \hline
555
+ \end{tabular}
556
+ </div>
557
+
558
+ <div style="text-align: center;">
559
+ \begin{tabular}{@{}l@{}}
560
+ \hline
561
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
562
+ \hline
563
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
564
+ \hline
565
+ \end{tabular}
566
+ </div>
567
+
568
+ <div style="text-align: center;">
569
+ \begin{tabular}{@{}l@{}}
570
+ \hline
571
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
572
+ \hline
573
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
574
+ \hline
575
+ \end{tabular}
576
+ </div>
577
+
578
+ <div style="text-align: center;">
579
+ \begin{tabular}{@{}l@{}}
580
+ \hline
581
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
582
+ \hline
583
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
584
+ \hline
585
+ \end{tabular}
586
+ </div>
587
+
588
+ <div style="text-align: center;">
589
+ \begin{tabular}{@{}l@{}}
590
+ \hline
591
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
592
+ \hline
593
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
594
+ \hline
595
+ \end{tabular}
596
+ </div>
597
+
598
+ <div style="text-align: center;">
599
+ \begin{tabular}{@{}l@{}}
600
+ \hline
601
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
602
+ \hline
603
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
604
+ \hline
605
+ \end{tabular}
606
+ </div>
607
+
608
+ <div style="text-align: center;">
609
+ \begin{tabular}{@{}l@{}}
610
+ \hline
611
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
612
+ \hline
613
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
614
+ \hline
615
+ \end{tabular}
616
+ </div>
617
+
618
+ <div style="text-align: center;">
619
+ \begin{tabular}{@{}l@{}}
620
+ \hline
621
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
622
+ \hline
623
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
624
+ \hline
625
+ \end{tabular}
626
+ </div>
627
+
628
+ <div style="text-align: center;">
629
+ \begin{tabular}{@{}l@{}}
630
+ \hline
631
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
632
+ \hline
633
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
634
+ \hline
635
+ \end{tabular}
636
+ </div>
637
+
638
+ <div style="text-align: center;">
639
+ \begin{tabular}{@{}l@{}}
640
+ \hline
641
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
642
+ \hline
643
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
644
+ \hline
645
+ \end{tabular}
646
+ </div>
647
+
648
+ <div style="text-align: center;">
649
+ \begin{tabular}{@{}l@{}}
650
+ \hline
651
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
652
+ \hline
653
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
654
+ \hline
655
+ \end{tabular}
656
+ </div>
657
+
658
+ <div style="text-align: center;">
659
+ \begin{tabular}{@{}l@{}}
660
+ \hline
661
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
662
+ \hline
663
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
664
+ \hline
665
+ \end{tabular}
666
+ </div>
667
+
668
+ <div style="text-align: center;">
669
+ \begin{tabular}{@{}l@{}}
670
+ \hline
671
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
672
+ \hline
673
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
674
+ \hline
675
+ \end{tabular}
676
+ </div>
677
+
678
+ <div style="text-align: center;">
679
+ \begin{tabular}{@{}l@{}}
680
+ \hline
681
+ \multicolumn{8}{c}{\textbf{Table I:}} \\
682
+ \hline
683
+ \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\
684
+ \hline
685
+ \end{tabular}
686
+ </div>
687
+
688
+ <div style="text-align: center;">
689
+ \begin{tabular}{@{}l@{}}
690
+ \\[-2ex]
691
+ \\[-2ex]
692
+ \\[-2ex]
693
+
694
+
695
+
696
+
697
+
698
+
699
+
700
+
701
+
702
+
703
+
704
+
705
+
706
+
707
+
708
+
709
+
710
+
711
+
712
+
713
+
714
+
715
+
716
+
717
+
718
+
719
+
720
+
721
+
722
+
723
+
724
+
725
+
726
+
727
+
728
+
729
+
730
+
731
+
732
+
733
+
734
+
735
+
736
+
737
+
738
+
739
+
740
+
741
+
742
+
743
+
744
+
745
+
746
+
747
+
748
+
749
+
750
+
751
+
752
+
753
+
754
+
755
+
756
+
757
+
758
+
759
+
760
+
761
+
762
+
763
+
764
+
765
+
766
+
767
+
768
+
769
+
770
+
771
+
772
+
773
+
774
+
775
+
776
+
777
+
778
+
779
+
780
+
781
+
782
+ \\[-2ex]
783
+ \\[-2ex]
784
+ \\[-2ex]
785
+ \\[-2ex]
786
+ \\[-2ex]
787
+ \\[-2ex]
788
+ \\[-2ex]
789
+ \\[-2ex]
790
+ \\[-2ex]
791
+ \\[-2ex]
792
+ \\[-2ex]
793
+ \\[-2ex]
794
+ \\[-2ex]
795
+ \\[-2ex]
796
+ \\[-2ex]
797
+ \\[-2ex]
798
+ \\[-2ex]
799
+ \\[-2ex]
800
+ \\[-2ex]
801
+ \\[-2ex]
802
+ \\[-2ex]
803
+ \\[-2ex]
804
+ \\[-2ex]
805
+ \\[-2ex]
806
+ \\[-2ex]
807
+ \\[-2ex]
808
+ \\[-2ex]
809
+ \\[-2ex]
810
+ \\[-2ex]
811
+ \\[-2ex]
812
+ \\[-2ex]
813
+ \\[-2ex]
814
+ \\[-2ex]
815
+ \\[-2ex]
816
+ \\[-2ex]
817
+ \\[-2ex]
818
+ \\[-2ex]
819
+ \\[-2ex]
820
+ \\[-2ex]
821
+ \\[-2ex]
822
+ \\[-2ex]
823
+ \\[-2ex]
824
+ \\[-2ex]
825
+ \\[-2ex]
826
+ \\[-2ex]
827
+ \\[-2ex]
828
+ \\[-2ex]
829
+ \\[-2ex]
830
+ \\[-2ex]
831
+ \\[-2ex]
832
+ \\[-2ex]
833
+ \\[-2ex]
834
+ \\[-2ex]
835
+ \\[-2ex]
836
+ \\[-2ex]
837
+ \\[-2ex]
838
+ \\[-2ex]
839
+ \\[-2ex]
840
+ \\[-2ex]
841
+ \\[-2ex]
842
+ \\[-2ex]
843
+ \\[-2ex]
844
+ \\[-2ex]
845
+ \\[-2ex]
846
+ \\[-2ex]
847
+ \\[-2ex]
848
+ \\[-2ex]
849
+ \\[-2ex]
850
+ \\[-2ex]
851
+ \\[-2ex]
852
+ \\[-2ex]
853
+ \\[-2ex]
854
+ \\[-2ex]
855
+ \\[-2ex]
856
+ \\[-2ex]
857
+ \\[-2ex]
858
+ \\[-2ex]
859
+ \\[-2ex]
860
+ \\[-2ex]
861
+ \\[-2ex]
862
+ \\[-2ex]
863
+ \\[-2ex]
864
+ \\[-2ex]
865
+ \\[-2ex]
866
+ \\[-2ex]
867
+ \\[-2ex]
868
+ \\[-2ex]
869
+ \\[-2ex]
870
+ \\[-2ex]
871
+ \\[-2ex]
872
+ \\[-2ex]
873
+ \\[-2ex]
874
+ \\[-2ex]
875
+ \\[-2ex]
876
+ \\[-2ex]
877
+ \\[-2ex]
878
+ \\[-2ex]
879
+ \\[-2ex]
880
+ \\[-2ex]
881
+ \\[-2ex]
882
+ \\[-2ex]
883
+ \\[-2ex]
884
+ \\[-2ex]
885
+ \\[-2ex]
886
+ \\[-2ex]
887
+ \\[-2ex]
888
+ \\[-2ex]
889
+ \\[-2ex]
890
+ \\[-2ex]
891
+ \\[-2ex]
892
+ \\[-2ex]
893
+ \\[-2ex]
894
+ \\[-2ex]
895
+ \\[-2ex]
896
+ \\[-2ex]
897
+ \\[-2ex]
898
+ \\[-2ex]
899
+ \\[-2ex]
900
+ \\[-2ex]
901
+ \\[-2ex]
902
+ \\[-2ex]
903
+ \\[-.5em]
904
+ \end{tabular}
905
+
906
+ \end{document}
907
+ ---PAGE_BREAK---
908
+
909
+ If the corresponding field for the number of function evaluations contains a hyphen, the global minimum could not be found. If the number is enclosed in parentheses, not all of the test runs provided the global minimum. We executed ten test runs with randomly chosen initial parameter vectors for each test function and each minimization.
910
+
911
+ When the global minimum was 0, we defined the minimization task to be completed once the final value was obtained with an accuracy better than $10^{-6}$. For $f_4(x)$, we chose a value less than 15 to indicate the global minimum and a value less than 0.998004 in the case of $f_5(x)$.
912
+
913
+ ## Conclusion
914
+
915
+ The Differential Evolution method (DE) for minimizing continuous space functions has been introduced and shown to be superior to Adaptive Simulated Annealing (ASA) [8] as well as the Annealed Nelder&Mead approach (ANM) [10]. DE was the only technique to converge for all of the functions in our test function suite. For those problems where ASA or ANM could find the minimum, DE usually converged faster, especially in the more difficult cases. Since DE is inherently parallel, a further significant speedup can be obtained if the algorithm is executed on a parallel machine or a network of computers. This is especially true for real world problems where computing the objective function requires a significant amount of time.
916
+
917
+ Despite these already promising results, DE is still in its infancy and can most probably be improved. Further research might include a mathematical convergence proof like the one that exists for Simulated Annealing. A theoretically sound analysis to determine why DE converges so well would also be of great interest. Whether or not an annealed version of DE, or the combination of DE with other optimization approaches is of practical use, is still unanswered. Finally, it is important for practical applications to gain more knowledge on how to choose the control variables for DE.
918
+ ---PAGE_BREAK---
919
+
920
+ References
921
+
922
+ 1. Brayton, H., Hachtel, G. and Sangiovanni-Vincentelli, A., A Survey of Optimization Techniques for Integrated Circuit Design, Proc. IEEE 69, 1981, pp. 1334 - 1362.
923
+
924
+ 2. Lueder, E., Optimization of Circuits with a Large Number of Parameters, Archiv f. Elektr. u. Uebertr., Band 44, Heft 2, 1990, pp 131 - 138.
925
+
926
+ 3. Storn, R., Constrained Optimization, Dr. Dobb's Journal, May 1995, pp. 119 - 123.
927
+
928
+ 4. Bunday, B.D. and Garside G.R., Optimisation Methods in Pascal, Edward Arnold Publ., 1987.
929
+
930
+ 5. Goldberg, D.E., Genetic Algorithms in Search, Optimization & Machine Learning, Addison-Wesley, 1989.
931
+
932
+ 6. Rechenberg, I., Evolutionsstrategie: Optimierung technischer Systeme nach Prinzipien der biologischen Evolution. Frommann-Holzboog, Stuttgart, 1973.
933
+
934
+ 7. Voigt, H. M., Fuzzy Evolutionary Algorithms, Technical Report TR-92-038 at ICSI, ftp.icsi.berkeley.edu, 1992.
935
+
936
+ 8. Ingber, L., Simulated Annealing: Practice Versus Theory, J. Mathl. Comput. Modelling, Vol. 18, No. 11, 1993, pp. 29 - 57.
937
+
938
+ 9. Ingber, L. and Rosen, B., Genetic Algorithms and Very Fast Simulated Annealing: A Comparison, J. Mathl. Comput. Modelling, Vol. 16, No. 11, 1992, pp. 87 - 100.
939
+
940
+ 10. Press, W.H., Teukolsky, S.A., Vetterling, W.T. and Flannery, B.P., Numerical Recipes in C, Cambridge University Press, 1992.
941
+
942
+ 11. Price, K., Genetic Annealing, Dr. Dobb's Journal, Oct. 1994, pp. 127 - 132.
943
+
944
+ 12. Moebus, D., Algorithmen zur Optimierung von Schaltungen und zur Loesung nichtlinearer Differentialgleichungen, Diss. am Inst. fuer Netzwerk- und Systemtheorie der Univ. Stuttgart, 1990.
945
+
946
+ 13. Corana, A., Marchesi, M., Martini, C. and Ridella, S., Minimizing Multimodal Functions of Continuous Variables with the "Simulated Annealing Algorithm", ACM Trans. Mathl. Software, March 1987, pp. 272 - 280.
947
+
948
+ 14. Griewangk, A.O., Generalized Descent for Global Optimization, JOTA, vol. 34, 1981, pp. 11 - 39.
949
+
950
+ 15. Zimmermann, W., Operations Research, Oldenbourg, 1990.
951
+
952
+ 16. Rabiner, L.R. and Gold, B., Theory and Applications of Digital Signal Processing, Prentice-Hall, Englewood Cliffs, N.J., 1975.
samples/texts_merged/4579765.md ADDED
@@ -0,0 +1,623 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ On the Sensitivity Conjecture
5
+
6
+ Avishay Tal *
7
+
8
+ April 18, 2016
9
+
10
+ Abstract
11
+
12
+ The sensitivity of a Boolean function $f: \{0,1\}^n \to \{0,1\}$ is the maximal number of neighbors a point in the Boolean hypercube has with different $f$-value. Roughly speaking, the block sensitivity allows to flip a set of bits (called a block) rather than just one bit, in order to change the value of $f$. The sensitivity conjecture, posed by Nisan and Szegedy (CC, 1994), states that the block sensitivity, $bs(f)$, is at most polynomial in the sensitivity, $s(f)$, for any Boolean function $f$. A positive answer to the conjecture will have many consequences, as the block sensitivity is polynomially related to many other complexity measures such as the certificate complexity, the decision tree complexity and the degree. The conjecture is far from being understood, as there is an exponential gap between the known upper and lower bounds relating $bs(f)$ and $s(f)$.
13
+
14
+ We continue a line of work started by Kenyon and Kutin (Inf. Comput., 2004), studying the $\ell$-block sensitivity, $bs_\ell(f)$, where $\ell$ bounds the size of sensitive blocks. While for $bs_2(f)$ the picture is well understood with almost matching upper and lower bounds, for $bs_3(f)$ it is not. We show that any development in understanding $bs_3(f)$ in terms of $s(f)$ will have great implications on the original question. Namely, we show that either $bs(f)$ is at most sub-exponential in $s(f)$ (which improves the state of the art upper bounds) or that $bs_3(f) \ge s(f)^{3-\epsilon}$ for some Boolean functions (which improves the state of the art separations).
15
+
16
+ We generalize the question of $bs(f)$ versus $s(f)$ to bounded functions $f: \{0,1\}^n \to [0,1]$ and show an analog result to that of Kenyon and Kutin: $bs_\ell(f) = O(s(f))^\ell$. Surprisingly, in this case, the bounds are close to being tight. In particular, we construct a bounded function $f: \{0,1\}^n \to [0,1]$ with $bs(f) \ge n/\log n$ and $s(f) = O(\log n)$, a clear counterexample to the sensitivity conjecture for bounded functions.
17
+
18
+ Finally, we give a new super-quadratic separation between sensitivity and decision tree complexity by constructing Boolean functions with $\mathrm{DT}(f) \ge s(f)^{2.115}$. Prior to this work, only quadratic separations, $\mathrm{DT}(f) = s(f)^2$, were known.
19
+
20
+ *Institute for Advanced Study, Princeton, NJ. Email: avishay.tal@gmail.com. Research supported by the Simons Foundation, and by the National Science Foundation grant No. CCF-1412958. Any opinions, findings and conclusions or recommendations expressed in this material are those of the author and do not necessarily reflect the views of the National Science Foundation.
21
+ ---PAGE_BREAK---
22
+
23
+ # 1 Introduction
24
+
25
+ A long-standing open problem in complexity and combinatorics asks what is the relationship between two complexity measures of Boolean functions: the sensitivity and block-sensitivity. We first recall the definition of the two complexity measures.
26
+
27
+ **Definition 1.1.** Let $f : \{0,1\}^n \to \{0,1\}$ be a Boolean function and $x \in \{0,1\}^n$ be a point. The sensitivity of $f$ at $x$ is the number of neighbors $y$ of $x$ in the Hamming cube such that $f(y) \neq f(x)$, i.e., $s(f,x) \triangleq | \{i \in [n] : f(x) \neq f(x \oplus e_i) \} |$.¹ The (maximal) sensitivity of $f$ is defined as $s(f) \triangleq \max_{x \in \{0,1\}^n} s(f,x)$.
28
+
29
+ **Definition 1.2.** Let $f : \{0,1\}^n \to \{0,1\}$ be a Boolean function and $x \in \{0,1\}^n$ be a point. For a block $B \subseteq [n]$, denote by $\mathbb{1}_B \in \{0,1\}^n$ its characteristic vector, i.e., $(\mathbb{1}_B)_i = 1$ iff $i \in B$. We say that a block $B$ is sensitive for $f$ on $x$ if $f(x) \neq f(x \oplus \mathbb{1}_B)$. The block-sensitivity of $f$ at $x$ is the maximal number of disjoint sensitive blocks for $f$ at $x$, i.e.,
30
+
31
+ $$bs(f, x) = \max\{r : \exists \text{ disjoint } B_1, B_2, \dots, B_r \subseteq [n], f(x) \neq f(x \oplus \mathbb{1}_{B_i})\}.$$
32
+
33
+ The (maximal) block-sensitivity of $f$ is defined as $bs(f) \triangleq \max_{x \in \{0,1\}^n} bs(f,x)$.
34
+
35
+ For shorthand, we will denote $(x \oplus e_i)$ and $(x \oplus \mathbb{1}_B)$ by $(x + e_i)$ and $(x + B)$ respectively. By definition, the block-sensitivity is at least the sensitivity by considering only blocks of size 1. The sensitivity conjecture, posed by Nisan and Szegedy [NS94], asks if a relation in the other direction holds as well.
36
+
37
+ **Conjecture 1.3 (The Sensitivity Conjecture).** $\exists d \; \forall f : bs(f) \leq s(f)^d$.
38
+
39
+ A stronger variant of the conjecture states that $d$ can be taken to be 2. Despite much work on the problem [Nis89, NS94, Rub95, KK04, Cha11, Vir11, AS11, HKP11, Bop12, ABG$^+$14, AP14, AV15, APV15, GKS15, Sze15, GNS$^+$16] there is still an exponential gap between the best known separations and the best known relations connecting the two complexity measures.
40
+
41
+ **Known Separations.** An interesting example due to Rubinstein [Rub95] shows a quadratic separation between the two measures: $bs(f) = \frac{1}{2} \cdot s(f)^2$. This example was improved by [Vir11] and then by [AS11] to $bs(f) = \frac{2}{3} \cdot s(f)^2 \cdot (1 - o(1))$ which is current state of the art.
42
+
43
+ **Known Relations.** Simon [Sim83] proved (implicitly) that $bs(f)$ is at most $4^{s(f)} \cdot s(f)$. The upper bound was improved by Kenyon and Kutin [KK04] who showed that $bs(f) \le O(e^{s(f)} \cdot \sqrt{s(f)})$. Recently, Ambainis et al. [ABG$^+$14] improved this bound to $bs(f) \le 2^{s(f)-1} \cdot s(f)$. Even more recently, Ambainis et al. [APV15] improved this bound slightly to $bs(f) \le 2^{s(f)-1} (s(f) - 1/3)$.
44
+
45
+ To sum up, while the best known upper bound on the block-sensitivity in terms of sensitivity is exponential, the best known lower bound is quadratic. Indeed, we seem far from understanding the right relation between the two complexity measures.
46
+
47
+ ## 1.1 $\ell$-block sensitivity
48
+
49
+ All mentioned examples that exhibit quadratic separations between the sensitivity and block sensitivity ([Rub95, Vir11, AS11]) have the property that the maximal block sensitivity is achieved on blocks of size at most 2. For this special case, Kenyon and Kutin [KK04] showed that the block sensitivity is at most $2 \cdot s(f)^2$. Hence, these examples are essentially tight for this subcase.
50
+
51
+ ¹$e_i$ is the vector whose $i$-th entry equals 1 and all other entries equal 0.
52
+ ---PAGE_BREAK---
53
+
54
+ Kenyon and Kutin introduced the notion of $\ell$-block sensitivity (denoted $bs_\ell(f)$): the maximal number of disjoint sensitive blocks where each block is of size at most $\ell$. Note that without loss of generality we may consider only sensitive blocks that are minimal with respect to set-inclusion (since otherwise we could of picked smaller blocks that are still disjoint). A well-known fact (cf. [BdW02, Lemma 3]) asserts that any minimal sensitive block for $f$ is of size at most $s(f)$, thus $bs(f) = bs_{s(f)}(f)$. Kenyon and Kutin proved the following inequalities relating the $\ell$-block sensitivity of different $\ell$-s:
55
+
56
+ $$bs_{\ell}(f) \leq \frac{4}{\ell} \cdot s(f) \cdot bs_{\ell-1}(f) \quad (1)$$
57
+
58
+ $$bs_{\ell}(f) \leq \frac{e}{(\ell - 1)!} \cdot s(f)^{\ell} \quad (2)$$
59
+
60
+ for all $2 \leq \ell \leq s(f)$. Plugging $\ell = s(f)$ gives the aforementioned bound $bs(f) \leq O(e^{s(f)} \cdot \sqrt{s(f)})$.
61
+
62
+ ## 1.2 Our Results
63
+
64
+ 1. In Section 2, we refine the argument of Kenyon and Kutin giving a better upper bound on the $\ell$-block sensitivity in terms of the $(\ell - 1)$-block sensitivity. We show that
65
+
66
+ $$bs_{\ell}(f) \leq \frac{e}{\ell} \cdot s(f) \cdot bs_{\ell-1}(f) \quad (3)$$
67
+
68
+ improving the bound in Eq. (1). On the other hand, Kenyon and Kutin gave examples with $bs_\ell(f) \geq \frac{1}{\ell} \cdot s(f) \cdot bs_{\ell-1}(f)$. Hence, Eq. (3) (and in fact, also Eq. (1)) is tight up to a constant. Interestingly, our analysis uses (a very simple) ordinary differential equation.
69
+
70
+ 2. In Section 3, we put focus on understanding $bs_3(f)$ in terms of the sensitivity. We show that an upper bound of the form $bs_3(f) \leq s(f)^{3-\epsilon}$ for some constant $\epsilon$ implies a sub-exponential upper bound for the sensitivity conjecture: $\forall f : bs(f) \leq 2^{s(f)^{1-\delta}}$, for $\delta > 0$. On the other hand, the best known separation (i.e., the aforementioned example by [AS11]) gives examples with $bs_3(f) \geq bs_2(f) \geq \Omega(s(f)^2)$. Thus, improving either the upper or lower bound for $bs_3(f)$ in terms of $s(f)$ will imply a breakthrough in our understanding of the sensitivity conjecture.
71
+
72
+ 3. In Section 4, we consider an extension of the sensitivity conjecture to bounded functions $f: \{0,1\}^n \to [0,1]$. We show that while Kenyon and Kutin's approach works in this model, it is almost tight, i.e., we give functions for which $bs_\ell(f) = \Omega((s(f)/\ell)^\ell)$. In particular, we give a function with sensitivity $O(\log n)$ and block sensitivity $\Omega(n/\log n)$ – a clear counterexample for the sensitivity conjecture in this model.
73
+
74
+ 4. In Section 5, we find better-than-quadratic separations between the sensitivity and the decision tree complexity. We construct functions based on minterm cyclic functions (as coined by Chakraborty [Cha11]), that were found using computer search. In particular, we give an infinite family of functions $\{f_n\}_{n \in I}$ with $\mathrm{DT}(f_n) = n$ and $s(f_n) = O(n^{0.48})$. In addition, we give an infinite family of functions $\{g_n\}_{n \in I}$ with $s(g_n) = O(\mathrm{DT}(g_n)^{0.473})$.
75
+
76
+ # 2 Improving The Bound on $bs_\ell$
77
+
78
+ In this section, we improve the bound on $bs_\ell(f)$ as a function of $bs_{\ell-1}(f)$ and $s(f)$. We start by recalling the analysis of [KK04], and then improve it using new ideas.
79
+ ---PAGE_BREAK---
80
+
81
+ ## 2.1 Kenyon-Kutin Argument
82
+
83
+ Let $x \in \{0, 1\}^n$ be a point in the Boolean hypercube and $\mathcal{B}$ a collection of disjoint minimal blocks such that $f(x) \neq f(x + B)$ for any $B \in \mathcal{B}$. We assign weights $w_1 \ge \dots \ge w_\ell \ge 1$ to sets of size 1, 2, ..., $\ell$ respectively, and we seek to maximize $t(x, \mathcal{B}) = \sum_{B \in \mathcal{B}} w_B$. Since all weights are at least 1, we have $t(x, \mathcal{B}) \ge |B|$. Thus, upper bounding the value of $t$ yields an upper bound on the $\ell$-block sensitivity.
84
+
85
+ We choose $w_1 = w_2 = \dots = w_{\ell-1} = w$ and $w_\ell = 1$ for some parameter $w \ge 1$. Let $(x, \mathcal{B})$ be a point and a collection of disjoint minimal sensitive blocks maximizing $t(\cdot, \cdot)$ w.r.t. the parameter $w$. Let $m_1, \dots, m_\ell$ be the number of blocks of size 1, $\dots$, $\ell$ respectively in $\mathcal{B}$. We have $t(x, \mathcal{B}) = w \cdot (m_1 + \dots + m_{\ell-1}) + m_\ell$.
86
+
87
+ **Lemma 2.1.** Suppose $(x, \mathcal{B})$ maximize $t(\cdot, \cdot)$ w.r.t. $w \ge 1$ and let $m_1, \dots, m_\ell$ be the number of blocks of size $1, \dots, \ell$ in $\mathcal{B}$ respectively. Then,
88
+
89
+ $$m_{\ell} \cdot (\ell w - s(f)) \le (m_1 + \dots + m_{\ell-1}) \cdot w \cdot s(f).$$
90
+
91
+ *Proof.* We would derive the above inequality by examining the value of $t(\cdot, \cdot)$ on neighbors of $x$, and using the fact that all of these values are smaller or equal to $t(x, \mathcal{B})$.
92
+
93
+ Let $B \in \mathcal{B}$ be a block of size $\ell$. By the minimality of the block $B$, it means that any subset of $B$ does not flip the value of $f$ on $x$. Thus, for each $i \in B$, we have $f(x+e_i) = f(x)$. In addition, the block $B' = B \setminus \{i\}$ is a sensitive block (of size $\ell-1$) for $x+e_i$, but is not a sensitive block for $x$. Consider all such $\ell \cdot m_\ell$ neighbors $y = x+e_i$ where $i \in B$, $B \in \mathcal{B}$ and $|B| = \ell$. Denote by $\mathcal{A}_i$ the collection of all blocks $B''$ in $\mathcal{B}$ such that $f(y) = f(y+B'')$ (i.e., we are only considering disjoint blocks that were sensitive on $x$ and minimal). Looking at a specific block $B'' \in \mathcal{B}$, we count for how many $y$'s it is not a sensitive block, i.e., $f(y) = f(y+B'')$. Since $f(x) = f(y)$ and $f(x) \neq f(x+B'')$ the block $B''$ is not sensitive for $y = x+e_i$ if and only if $f(x+B'') \neq f(x+B''+e_i)$. In other words, for $B''$ to be non-sensitive on $y = x+e_i$, $i$ must be a sensitive coordinate of $x+B''$. Hence, each block $B'' \in \mathcal{B}$ may appear in at most $s(f)$ of the sets $\mathcal{A}_i$.
94
+
95
+ By our design for $y = x+e_i$ the block $B' = B \setminus \{i\}$ and the blocks in $B'' \in \mathcal{B} \setminus \mathcal{A}_i$ are sensitive. In order to show that they are disjoint it is enough to show that $B \in \mathcal{A}_i$. This is indeed the case since $x+e_i+B = x+B'$ and by the minimality of $B$, we have $f(x+e_i+B) = f(x+B') = f(x) = f(x+e_i)$, hence $B$ is not a sensitive block for $x+e_i$. We got that $\{B'\} \cup (\mathcal{B} \setminus \mathcal{A}_i)$ is a family of disjoint sensitive blocks for $x+e_i$.
96
+
97
+ Using the fact that $t(x, \mathcal{B})$ is maximal, and summing over all neighbors of $x$ considered above, we get
98
+
99
+ $$
100
+ \begin{aligned}
101
+ \ell \cdot m_{\ell} \cdot t(x, \mathcal{B}) &\geq \sum_{i \in B, |B|=\ell} t(x+e_i, \{B \setminus \{i\}\} \cup (\mathcal{B} \setminus \mathcal{A}_i)) \\
102
+ &\geq \sum_{i \in B, |B|=\ell} \left( w_{\ell-1} + t(x, \mathcal{B}) - \sum_{B'' \in \mathcal{A}_i} w_{|B''|} \right).
103
+ \end{aligned}
104
+ $$
105
+
106
+ Rearranging we get
107
+
108
+ $$\ell \cdot m_{\ell} \cdot w_{\ell-1} \leq \sum_{i \in B, |B|=\ell} \sum_{B'' \in A_i} w_{|B''|} = \sum_{B''} w_{|B''|} \cdot |\{(i, B) : i \in B, |B| = \ell, B'' \in A_i\}| \leq \sum_{B''} w_{|B''|} \cdot s(f).$$
109
+
110
+ Substituting $w_1, \dots, w_{\ell-1}$ with $w$ and $w_\ell$ with 1 and rearranging gives
111
+
112
+ $$m_{\ell} \cdot (\ell w - s(f)) \le (m_1 + \dots + m_{\ell-1}) w s(f)$$
113
+
114
+ which completes the proof. □
115
+ ---PAGE_BREAK---
116
+
117
+ In order to get something meaningful from Lemma 2.1 we need $\ell \cdot w - s(f)$ to be greater than 0. Writing $w$ as $\alpha \cdot s(f)/\ell$, this means that $\alpha > 1$. So we can choose any $\alpha > 1$ and get that the optimal $(m_1, \dots, m_\ell)$ for that $\alpha$ fulfills the following inequality:
118
+
119
+ $$m_{\ell} \le (m_1 + \dots + m_{\ell-1}) \cdot \frac{\alpha \cdot s^2/\ell}{\alpha \cdot s - \ell} = (m_1 + \dots + m_{\ell-1}) \cdot \frac{s}{\ell} \cdot \frac{\alpha}{\alpha-1}.$$
120
+
121
+ Overall we got that the maximal value of $t(\cdot, \cdot)$ with respect to $w = \frac{\alpha}{\ell} \cdot s(f)$ is at most the value of
122
+ following linear program:
123
+
124
+ $$
125
+ \begin{array}{ll}
126
+ \text{maximize} & \frac{\alpha \cdot s(f)}{\ell} \cdot (m_1 + \dots + m_{\ell-1}) + m_{\ell} \\
127
+ \text{subject to} & m_{\ell} \le \frac{\alpha}{\alpha-1} \cdot \frac{s(f)}{\ell} \cdot (m_1 + \dots + m_{\ell-1}) \\
128
+ & (m_1 + \dots + m_{\ell-1}) \le b s_{\ell-1}(f) \\
129
+ & m_i \ge 0 & \text{for } i = 1, \dots, \ell
130
+ \end{array}
131
+ $$
132
+
133
+ Substituting $x_1 \triangleq (m_1+\ldots+m_{\ell-1})/bs_{\ell-1}$ and $x_2 \triangleq m_\ell/(bs_{\ell-1}\cdot s(f)/\ell)$ gives the following equivalent linear program:
134
+
135
+ $$
136
+ \begin{array}{ll}
137
+ \text{maximize} & \displaystyle \frac{s(f)}{\ell} \cdot bs_{\ell-1}(f) \cdot (\alpha x_1 + x_2) \\
138
+ \text{subject to} & x_2 \le \frac{\alpha}{\alpha-1} x_1 \\
139
+ & x_1 \le 1 \\
140
+ & x_i \ge 0 & \text{for } i=1,2
141
+ \end{array}
142
+ $$
143
+
144
+ The value of this linear program is $\frac{s(f)}{\ell} \cdot bs_{\ell-1}(f) \cdot (\alpha + \frac{\alpha}{\alpha-1})$ (achieved at $x_1 = 1$ and $x_2 = \frac{\alpha}{\alpha-1}$).
145
+ This value attains its minimum at $\alpha = 2$, which gives a value of $\frac{s(f)}{\ell} \cdot bs_{\ell-1}(f) \cdot 4$ to the LP.
146
+
147
+ What does that mean? It means that $(m_1 + ... + m_{l-1}) \cdot s(f) \cdot 2/l + m_l \le \frac{s(f)}{l} \cdot bs_{l-1} \cdot 4$
148
+ for any $(m_1, ..., m_l)$ disjoint sensitive blocks of size $(1, ..., m_l)$ respectively. In particular, since
149
+ $s(f) \cdot 2/l \ge 1$ (because $l \le s(f)$ WLOG) this inequality bounds $bs_l(f)$ from above by $\frac{s(f)}{l} \cdot bs_{l-1} \cdot 4$.
150
+
151
+ ## 2.2 Improved Bounds
152
+
153
+ Kenyon-Kutin [KK04] stopped at this point, seemingly getting the best bound this analysis could offer. This is indeed true if we use only one choice of $\alpha$, however, one can consider using several different $\alpha$'s to get a better bound, as we do next.
154
+
155
+ For starters, we show that using two different weights $\alpha_1, \alpha_2$ gives better bounds on $bs_\ell(f)$ in terms of the $bs_{\ell-1}(f)$ and $s(f)$. The idea is that the solution for the linear program for a certain $\alpha_1$ implies a new equation for the feasible region of the linear program for $\alpha_2$.
156
+
157
+ Recall that choosing $\alpha_1 = 2$ implies that $2 \cdot x_1 + x_2 \le 4$. We now rewrite the linear program for an arbitrary $\alpha$ adding this constraint.
158
+
159
+ $$
160
+ \begin{array}{ll}
161
+ \text{maximize} & \displaystyle \frac{s(f)}{\ell} \cdot b s_{\ell-1}(f) \cdot (\alpha x_1 + x_2) \\
162
+ \text{subject to} & x_2 \leq \frac{\alpha}{\alpha-1} x_1 \\
163
+ & 2 \cdot x_1 + x_2 \leq 4 \\
164
+ & x_1 \leq 1 \\
165
+ & x_i \geq 0 & \text{for } i=1,2
166
+ \end{array}
167
+ $$
168
+
169
+ One can check that for $\alpha_2 = \frac{4}{3}$ the optimal value for the LP is $\frac{32}{9} \cdot \frac{s(f)}{\ell} \cdot b s_{\ell-1}(f)$. One can now get a new constraint from the linear program for $\alpha_2$ and continue repeating this process by choosing a sequence of $\alpha$'s. Instead of defining a sequence of $\alpha$'s we will use a continuous strategy.
170
+ ---PAGE_BREAK---
171
+
172
+ **Theorem 2.2.** $\forall f: bs_\ell(f) \le \frac{e}{\ell} \cdot s(f) \cdot bs_{\ell-1}(f)$.
173
+
174
+ *Proof.* We calculate the optimal value for $\alpha$ given an optimal value for $\alpha + \delta$, for an infinitely small $\delta > 0$. Let $\text{OPT}(\alpha)$ be the optimal value of $t(\cdot, \cdot)$ for parameter $\alpha$, and in order to avoid carrying the multiplicative factor of $bs_{\ell-1}(f) \cdot \frac{s(f)}{\ell}$ let $F(\alpha) = \frac{\text{OPT}(\alpha)}{bs_{\ell-1}(f) \cdot s(f)/\ell}$. The value of the next linear program upper bounds $F(\alpha)$:
175
+
176
+ $$
177
+ \begin{array}{ll}
178
+ \text{maximize} & \alpha \cdot x_1 + x_2 \\
179
+ \text{subject to} & x_2 \le \frac{\alpha}{\alpha-1} \cdot x_1 \\
180
+ & x_1 \le 1 \\
181
+ & x_i \ge 0 & \text{for } i = 1, 2
182
+ \end{array}
183
+ \qquad (7)
184
+ $$
185
+
186
+ By the definition of $F(\alpha)$ as the normalized optimal value of $t(\cdot, \cdot)$ w.r.t. $\alpha$ we get a new linear equation $\alpha \cdot x_1 + x_2 \le F(\alpha)$ for all feasible $(x_1, x_2)$. We wish to invoke the equation given by $\alpha + \delta$ on the linear program upper-bounding $F(\alpha)$, for an infinitely small $\delta > 0$.
187
+
188
+ $$
189
+ F(\alpha) \le \begin{cases}
190
+ \alpha \cdot x_1 + x_2 & \\
191
+ \text{subject to} & x_2 \le \frac{\alpha}{\alpha-1} \cdot x_1 \\
192
+ & (\alpha + \delta) \cdot x_1 + x_2 \le F(\alpha + \delta) \\
193
+ & x_1 \le 1 \\
194
+ & x_i \ge 0 & \text{for } i=1,2
195
+ \end{cases} \tag{8}
196
+ $$
197
+
198
+ Let $(x_1^{\text{OPT}}, x_2^{\text{OPT}})$ be the optimal point for the above LP. In the above LP, $x_2$ is upper bounded by two linear functions on $x_1$:
199
+
200
+ $$
201
+ x_2 \le \frac{\alpha}{\alpha - 1} \cdot x_1 \quad \text{and} \quad x_2 \le F(\alpha + \delta) - (\alpha + \delta) \cdot x_1.
202
+ $$
203
+
204
+ Since one linear function is increasing and the other is decreasing, the optimal value is achieved either at the intersection of these two lines or at $x_1 = 1$. The intersection point of the two lines, denoted by $x_1^{\text{int}}$ is given by
205
+
206
+ $$
207
+ x_1^{\text{int}} = \frac{F(\alpha + \delta)}{\frac{\alpha}{\alpha-1} + \alpha + \delta}.
208
+ $$
209
+
210
+ $x_1^{\text{int}}$ is smaller than 1 for $\alpha > 1$ since $F(\alpha + \delta) \le \frac{\alpha+\delta}{(\alpha+\delta)-1} + \alpha + \delta$ and $\frac{x}{x-1}$ is decreasing for $x > 1$. After the intersection, $x_2$ decreases faster than $\alpha \cdot x_1$ increases, hence the optimal value of the LP is achieved at the intersection, $x_1^{\text{OPT}} = x_1^{\text{int}}$. The optimal value of $x_2$ is given by $x_2^{\text{OPT}} = \frac{\alpha}{\alpha-1} \cdot x_1^{\text{OPT}}$, which yields
211
+
212
+ $$
213
+ \begin{align*}
214
+ F(\alpha) &\le x_1^{\text{OPT}} \cdot \alpha + x_2^{\text{OPT}} = x_1^{\text{OPT}} \cdot \left( \frac{\alpha}{\alpha-1} + \alpha \right) \\
215
+ &= \frac{F(\alpha + \delta)}{\frac{\alpha}{\alpha-1} + \alpha + \delta} \cdot \left( \frac{\alpha}{\alpha-1} + \alpha \right) \\
216
+ &= F(\alpha + \delta) \cdot \left( 1 - \frac{\delta}{\frac{\alpha}{\alpha-1} + \alpha + \delta} \right)
217
+ \end{align*}
218
+ $$
219
+
220
+ Rearranging the equation gives
221
+
222
+ $$
223
+ \frac{F(\alpha + \delta) - F(\alpha)}{\delta} \leq \frac{F(\alpha + \delta)}{\frac{\alpha}{\alpha-1} + \alpha + \delta},
224
+ $$
225
+ ---PAGE_BREAK---
226
+
227
+ and as $\delta$ tends to 0 we get $F'(\alpha) \le \frac{F(\alpha)}{\frac{\alpha}{\alpha-1}+\alpha} = F(\alpha) \cdot \frac{\alpha-1}{\alpha^2}$. The solution for this ODE is $F(\alpha) \le \alpha \cdot e^{\frac{1}{\alpha}} \cdot c$
228
+ for some constant $c > 0$. Taking an initial condition on $\alpha \gg 1$: $F(\alpha) \le \alpha + \frac{\alpha}{\alpha-1}$ gives
229
+
230
+ $$c \le \frac{F(\alpha)}{\alpha \cdot e^{\frac{1}{\alpha}}} \le \frac{\alpha \cdot \left(1 + \frac{1}{\alpha-1}\right)}{\alpha \cdot e^{\frac{1}{\alpha}}} \xrightarrow{\alpha \to \infty} 1.$$
231
+
232
+ Hence, $F(\alpha) \le \alpha \cdot e^{\frac{1}{\alpha}}$. When $\alpha$ approaches 1 we get $\lim_{\alpha \to 1^+} F(\alpha) \le e$, thus $bs_\ell(f) \le \frac{e}{\ell} \cdot s(f) \cdot bs_{\ell-1}(f)$ completing the proof. $\square$
233
+
234
+ As a special case, Theorem 2.2 implies that $bs_2(f) \le \frac{e}{2} \cdot s(f)^2$, which leads us to the following open problem.
235
+
236
+ **Open Problem 1.** *What is the smallest constant $c > 0$ such that $bs_2(f) \le c \cdot s(f)^2$ for all Boolean functions?*
237
+
238
+ An example with $bs_2(f) = \frac{2}{3} \cdot s(f)^2 \cdot (1 - o(1))$ is given in [AS11], thus $\frac{2}{3} \le c \le \frac{e}{2}$.
239
+
240
+ # 3 Understanding $bs_3(f)$ is Important
241
+
242
+ As the upper and lower bounds for $bs_2(f)$ are almost matching, it seems that the next challenge is
243
+ understanding the asymptotic behavior of $bs_3(f)$. A more modest challenge is the following.
244
+
245
+ **Open Problem 2.** *Improve either the upper or lower bound on $bs_3(f)$.*
246
+
247
+ Recall that the upper bound on $bs_3(f)$ is $O(s(f)^3)$ (see Eq.(2)) and the lower bound is $(2/3) \cdot s(f)^2 \cdot (1-o(1))$. It is somewhat surprising that any slight improvement on either the lower or upper bound on $bs_3$ would be a significant step forward in our understanding of the general question. The following claim shows that a slightly better than quadratic gap on a single example implies a better than quadratic gap on an infinite family of examples.
248
+
249
+ **Claim 3.1.** *If there exists a function such that $bs_3(f) > s(f)^2$ then there exists a family of functions $\{f_n\}_{n \in \mathbb{N}}$ with $bs(f_n) > s(f_n)^{2+\epsilon}$ for some constant $\epsilon > 0$ (dependant on f).*
250
+
251
+
252
+
253
+ This family is simply $f_1 = f$, $f_n = f \circ f_{n-1}$ where $\circ$ stands for Boolean function composition as in [Tal13]. Next, we prove a theorem exhibiting the self-reducibility nature of the problem.
254
+
255
+ **Theorem 3.2.** Let $k, \ell, a \in \mathbb{N}$ such that $\ell > k$ and let $T: \mathbb{N} \to \mathbb{R}$ be a monotone function.
256
+
257
+ *If $\forall f : bs_\ell(f) \le T bs_k(f)$, then $\forall f' : bs_{\ell a}(f') \le T bs_{ka}(f')$.*
258
+
259
+ *Proof.* Assume by contradiction that there exists a function $f'$ such that $bs_{\ell a}(f') > T bs_{ka}(f')$. We will show that there exists a function $f$ such that $bs_\ell(f) > T bs_k(f)$. We shall assume WLOG that the maximal $bs_{\ell a}$ of $f'$ is achieved on $\vec{0}$. Let $B_1, B_2, \dots, B_m$ be a family of disjoint sensitive blocks for $f$ at $\vec{0}$, each $B_i$ of size at most $\ell a$. Split every block $B_i$ to $\ell$ sets $B_{i,1}, \dots, B_{i,\ell}$ of size at most $a$. The function $f$ will have a variable $x_{i,j}$ corresponding to every set $B_{i,j}$ of size at most $a$. The value of $f(x_{1,1}, \dots, x_{m,\ell})$ is defined to be the value of $f'$ where the variable in each $B_{i,j}$ equal $x_{i,j}$, and all other variables equal 0. $bs_\ell(f, \vec{0}) \ge bs_{\ell a}(f', \vec{0})$, since for any sensitive block $B_1, \dots, B_m$ for $f'$, there exists a corresponding sensitive block $B'_1, \dots, B'_m$ for $f$ of size $\ell$, where $B'_i = \{x_{i,j} : j \in [\ell]\}$.
260
+
261
+ On the other hand, any set of disjoint sensitive blocks of size at most *k* for *f* corresponds to a disjoint set of sensitive blocks of size at most *ka* for *f'*. Thus $bs_k(f) \le bs_{ka}(f')$, giving
262
+
263
+ $$T(bs_k(f)) \le T(bs_{ka}(f')) < bs_{\ell a}(f') \le bs_\ell(f),$$
264
+
265
+ where we used the monotonicity of *T* in the first inequality. $\square$
266
+ ---PAGE_BREAK---
267
+
268
+ Using Theorem 3.2 we get that any upper bound of the form $bs_\ell(f) \le s(f)^{\ell-\epsilon}$ implies a subexponential upper bound on $bs(f)$ in terms of $s(f)$.
269
+
270
+ **Theorem 3.3.** Let $k \in \mathbb{N}, \varepsilon > 0$ be constants. If for all Boolean functions $bs_k(f) \le s(f)^{k-\varepsilon}$, then for the constant $\gamma = \frac{\log(k-\varepsilon)}{\log(k)} < 1$ it holds that $bs(f) \le 2^{O(s(f)^{\gamma} \cdot \log s(f))}$ for all $f$.
271
+
272
+ For example, Theorem 3.3 shows that if $\forall f : bs_3(f) \le s(f)^2$, then $\forall f : bs(f) \le 2^{O(s^{0.631} \cdot \log(s))}$.
273
+
274
+ *Proof.* Using the hypothesis and Theorem 3.2 one can show by induction on $t$ that
275
+
276
+ $$ \forall f : bs_{k^t}(f) \le s(f)^{(k-\epsilon)t}. \qquad (9) $$
277
+
278
+ The base case $t=1$ is simply the hypothesis. We assume the claim is true for $1,..., t-1$, and show the claim is true for $t$. Using Theorem 3.2 with $T(x) = x^{k-\epsilon}$ and $a = k^{t-1}$ we get $bs_{k^t}(f) \le T$.
279
+
280
+ $$bs_{k^t}(f) \le T bs_{k^{t-1}}(f) = (bs_{k^{t-1}}(f))^{k-\epsilon}.$$
281
+
282
+ By induction $bs_{k^{t-1}}(f) \le s(f)^{(k-\epsilon)t-1}$. Hence, we get $bs_{k^t}(f) \le s(f)^{(k-\epsilon)t}$, which finishes the induction proof.
283
+
284
+ Fix $f$ and let $s = s(f)$. Recall that $bs(f) = bs_s(f)$ since each minimal block that flips the value of $f$ is of size at most $s$. Hence,
285
+
286
+ $$bs(f) = bs_s(f) = bs_{k^{\lceil \log_k(s) \rceil}}(f) \\ \le s^{(k-\epsilon)^{\lceil \log_k(s) \rceil}} \le s^{(k-\epsilon)^{\log_k(s)+1}} = 2^{\log(s) \cdot s^{\log(k-\epsilon)/\log(k)}} \cdot (k-\epsilon) = 2^{O(s^\gamma \log(s))}. \quad \square$$
287
+
288
+ # 4 The Sensitivity Conjecture for Bounded Functions
289
+
290
+ In this section, we generalize the definitions of sensitivity and block sensitivity to bounded functions $f: \{0,1\}^n \to [0,1]$, extending the definitions for Boolean functions. We generalize the result of Kenyon and Kutin to this setting (after removing some trivial obstacles). Given that, one may hope that the sensitivity conjecture holds also for bounded functions, i.e., that the block-sensitivity is at most polynomial in the sensitivity. However, we give a counterexample to this question, by constructing functions on $n$ variables with sensitivity $O(\log n)$ and block sensitivity $n/\log(n)$. In fact, we show that the result of Kenyon and Kutin is essentially tight by giving examples for which $bs_\ell(f) = n/\ell$ and $s(f) = O(\ell \cdot n^{1/\ell})$ for any $\ell \le \log n$.
291
+
292
+ We begin by generalizing the definitions of sensitivity and block-sensitivity. For $f: \{0,1\}^n \to [0,1]$ and $x \in \{0,1\}^n$, we denote the sensitivity of $f$ at a point $x$ by
293
+
294
+ $$ s(f,x) = \sum_{i=1}^{n} |f(x) - f(x \oplus e_i)|. \qquad (10) $$
295
+
296
+ Similarly we define the block sensitivity and $\ell$-block sensitivity as
297
+
298
+ $$ bs(f,x) = \max \left\{ \sum_i |f(x) - f(x + B_i)| : B_1, \dots, B_k \subseteq [n] \text{ are disjoint} \right\}. \qquad (11) $$
299
+
300
+ and
301
+
302
+ $$ bs_\ell(f,x) = \max \left\{ \sum_i |f(x) - f(x + B_i)| : B_1, \dots, B_k \subseteq [n] \text{ are disjoint and } \forall i, |B_i| \le \ell \right\}. $$
303
+ ---PAGE_BREAK---
304
+
305
+ Naturally we denote by $s(f) = \max_x s(f, x)$, by $bs(f) = \max_x bs(f, x)$ and by $bs_\ell(f) = \max_x bs_\ell(f, x)$.
306
+ It is easy to see that for a Boolean function these definitions match the standard definitions of sen-
307
+ sitivity, block sensitivity and $\ell$-block sensitivity.
308
+
309
+ We wish to prove an analog of Kenyon-Kutin result, showing that $bs_{\ell}(f) \le c_{\ell} \cdot s(f)^{\ell}$. However,
310
+ stated as is the claim is false for a “silly” reason. Take any Boolean function $f$ with a gap between
311
+ the sensitivity and the $\ell$-block sensitivity and take $g(x) = f(x)/s(f)$. Then, we get $s(g) = 1$ and
312
+ $bs_{\ell}(g) = bs_{\ell}(f)/s(f)$. As there are examples with $bs_2(f) = n/2$ and $s(f) = \sqrt{n}$, we get that
313
+ $bs_2(g) = \sqrt{n}/2$ while $s(g) = 1$, where $n$ grows to infinity. This seems to rule out any relation
314
+ between the sensitivity and block sensitivity (and even 2-block sensitivity) in the case of bounded
315
+ functions. To overcome this triviality, we insist that the block sensitivity is close to $n$, or alterna-
316
+ tively that changing each block dramatically changes the value of the function. Surprisingly, under
317
+ this requirement we are able to retrieve known relations between sensitivity and block sensitivity
318
+ that were established in the Boolean setting by Kenyon and Kutin [KK04].
319
+
320
+ **Theorem 4.1.** Let $c > 0$ and $f : \{0,1\}^n \to [0,1]$. Assume that there exists a point $x_0 \in \{0,1\}^n$
321
+ and disjoint blocks $B_1, \dots, B_k$ of size at most $\ell$ such that $|f(x_0) - f(x_0 + B_i)| \ge c$ for all $i \in k$.
322
+ Furthermore, assume that $2 \le \ell \le \log(k)$. Then, $s(f) \ge \Omega(k^{1/\ell} \cdot c)$.
323
+
324
+ We get the following corollary, whose proof is deferred to Appendix A.
325
+
326
+ **Corollary 4.2.** Let $f: \{0,1\}^n \to [0,1]$ with $bs(f) \ge n/\ell$. Then, $s(f) \ge \Omega(n^{1/2\ell}/\ell)$.
327
+
328
+ Unlike in the Boolean case, we are able to show that Theorem 4.1 is essentially tight! That is,
329
+ for any $\ell$ and $n$ we have a construction with $bs_\ell(f) \ge n/\ell$ and $s(f) = O(\ell \cdot n^{1/\ell})$. In particular,
330
+ picking $\ell = \log(n)$ gives an exponential separation between block sensitivity (which is at least
331
+ $n/\log n$) and sensitivity (which is $O(\log n)$).
332
+
333
+ **Theorem 4.3.** Let $\ell, n \in \mathbb{N}$ with $2 \le \ell \le n$. Then, there exists a function $h : \{0,1\}^n \to [0,1]$ with $bs_\ell(h) \ge \lfloor n/\ell \rfloor$ and $s(h) \le 3 \cdot \ell \cdot n^{1/\ell}$.
334
+
335
+ ## 4.1 Proof of Kenyon-Kutin Result for Bounded Functions
336
+
337
+ **Proof Overview.** We start by giving a new proof for Kenyon-Kutin result, based on random walks on the hypercube. We assume by contradiction that $f(x_0) = 0$ and $f(x_0 + B_i) = 1$ for all $i \in [k]$ and that the sensitivity is $o(k^{1/\ell})$. Taking a random walk of length $r = n/k^{1/\ell}$ starting from $x_0$ will end up in point $y$ where with high probability $f(y) = f(x_0)$. This is true since in each step with probability at least $1-s(f)/n$ we are maintaining the value of $f$, hence by union bound with probability at least $1-r \cdot s(f)/n$ we maintain the value of $f$ in the entire walk. On the contrast, choosing a random $i \in [k]$ and starting a random walk of length $r - |B_i|$ starting from $(x_0 + B_i)$ will lead to a point $y'$ where with high probability $f(y') = f(x_0 + B_i) = 1$. However, as we show in the proof below, the distributions of $y$ and $y'$ are similar (close in statistical distance). This leads to a contradiction as $f(y)$ tends to be equal to 0 and $f(y')$ tends to be equal to 1.
338
+
339
+ A simple observation, which allows us to generalize the argument above to bounded function,
340
+ is that for a given point $x \in \{0,1\}^n$ and a random neighbor in the hypercube, $y \sim x$, the expected
341
+ value of $f(y)$ is close to $f(x)$. This follows from Eq. (10). Thus, the only difference in the argument
342
+ for bounded functions will be that $\mathbf{E}[f(y)]$ is close to 0 and $\mathbf{E}[f(y')]$ is close to 1, leading to a
343
+ contradiction as well.
344
+
345
+ *Proof of Theorem 4.1.* First, we make a few assumptions that are without loss of generality, in order to make the argument later clearer. We assume $x_0 = 0^n$ and $f(x_0) = 0$. We assume $n = k \cdot \ell$
346
+ ---PAGE_BREAK---
347
+
348
+ and that the blocks are given by $B_i = \{(i-1)\ell + 1, \dots, i\ell\}$ for $i \in [k]$. We assume that $c=1$, since for $c<1$ one can take $f'(x) = \min\{f(x)/c, 1\}$, and note that $f'$ is a bounded function with $f'(x_0 + B_i) = 1$. Proving the theorem for $f'$ gives $s(f) \ge s(f') \cdot c \ge \Omega(c \cdot k^{1/\ell})$.
349
+
350
+ Let $r = \lfloor \frac{n}{(2k)^{1/\ell}} \rfloor$, by the assumption $2 \le \ell \le \log(k)$ we have $\sqrt{n} \le r \le n/2$. Assume by contradiction that $s(f) \le \varepsilon \cdot k^{1/\ell}$ for some sufficiently small constant $\varepsilon > 0$ to be determined later. Consider the following two random processes.
351
+
352
+ **Algorithm 1 Process A**
353
+
354
+ 1: $X_0 \leftarrow 0^n$
355
+ 2: **for** $t = 1, \dots, r$ **do**
356
+ 3: &nbsp;&nbsp;&nbsp;&nbsp;Select a random $i \in [n]$ among the coordinates for which $X_{t-1}$ is 0 and let $X_t \leftarrow X_{t-1} + e_i$.
357
+ 4: **end for**
358
+
359
+ **Algorithm 2 Process B**
360
+
361
+ 1: Select uniformly $i \in [k]$ and let $Y_0 \leftarrow B_i$
362
+ 2: **for** $t = 1, \dots, r - \ell$ **do**
363
+ 3: &nbsp;&nbsp;&nbsp;&nbsp;Select a random $i \in [n]$ among the coordinates for which $Y_{t-1}$ is 0 and let $Y_t \leftarrow Y_{t-1} + e_i$.
364
+ 4: **end for**
365
+
366
+ For each $t \in \{0, \dots, r-1\}$, we claim that
367
+
368
+ $$
369
+ \begin{align*}
370
+ \mathbf{E}[f(X_{t+1}) - f(X_t)] &= \mathbf{E} \left[ \frac{1}{n-t} \cdot \sum_{i:(X_t)_i=0} (f(X_t + e_i) - f(X_t)) \right] \\
371
+ &\le \frac{1}{n-t} \cdot \mathbf{E}[s(f(X_t))] \le \frac{s(f)}{n-t}.
372
+ \end{align*}
373
+ $$
374
+
375
+ By telescoping this implies that
376
+
377
+ $$
378
+ \mathbf{E}[f(X_r)] = \mathbf{E}[f(X_0)] + \sum_{t=0}^{r-1} \mathbf{E}[f(X_{t+1}) - f(X_t)] \le 0 + \frac{r \cdot s(f)}{n-r} \le O(\varepsilon).
379
+ $$
380
+
381
+ In a symmetric fashion, for each $t \in \{1, \dots, r-\ell\}$ we have $\mathbf{E}[f(Y_{t+1}) - f(Y_t)] \ge -\frac{s(f)}{n-t-\ell}$. Again, telescoping implies that
382
+
383
+ $$
384
+ \mathbf{E}[f(Y_{r-\ell})] \geq \mathbf{E}[f(Y_0)] - \frac{(r-\ell) \cdot s(f)}{n-r} \geq 1 - \frac{r \cdot s(f)}{n-r} \geq 1 - O(\varepsilon).
385
+ $$
386
+
387
+ So it seems that the distribution of $X_r$ and $Y_{r-\ell}$ are very different from one another. However, we shall show that conditioned on a probable event, $X_r$ and $Y_{r-\ell}$ are identically distributed. To define the event, consider the sets
388
+
389
+ $$
390
+ U_i = \{1_A \mid A \subseteq [n], |A| = r, B_i \subseteq A, \forall j \neq i : B_j \not\subseteq A\}
391
+ $$
392
+
393
+ for $i \in [k]$ and their union
394
+
395
+ $$
396
+ U = \bigcup_{i=1}^{k} U_i = \{\mathbb{1}_A \mid A \subseteq [n], |A| = r, \exists! i \in [k] : B_i \subseteq A\}.
397
+ $$
398
+
399
+ Let $E_X$ be the event that $X_r \in U$, and $E_Y$ be the event that $Y_{r-\ell} \in U$. We show that
400
+ ---PAGE_BREAK---
401
+
402
+ **Claim 4.4.** The following hold:
403
+
404
+ 1. $X_r|E_X$ is identically distributed as $Y_{r-\ell}|E_Y$.
405
+
406
+ 2. $\mathbf{Pr}[E_Y] = \Omega(1)$
407
+
408
+ 3. $\mathbf{Pr}[E_X] = \Omega(1)$
409
+
410
+ We defer the proof of Claim 4.4 for later. We derive a contradiction from all of the above by showing that $\mathbf{E}[f(X_r)|E_X] < \mathbf{E}[f(Y_{r-\ell})|E_Y]$ (this is indeed a contradiction because by the claim $X_r|E_X$ and $Y_{r-\ell}|E_Y$ should be identically distributed and hence the expected values of $f(\cdot)$ on each of them should be the same). To show this, we note that
411
+
412
+ $$
413
+ \begin{align*}
414
+ \mathbf{E}[f(X_r)|E_X] &= \mathbf{E}[f(X_r) \cdot 1_{E_X}]/\mathbf{Pr}[E_X] \\
415
+ &\leq \mathbf{E}[f(X_r)]/\mathbf{Pr}[E_X] = O(\mathbf{E}[f(X_r)]) = O(\varepsilon).
416
+ \end{align*}
417
+ $$
418
+
419
+ On the other hand
420
+
421
+ $$
422
+ \begin{align*}
423
+ \mathbf{E}[f(Y_{r-l})|E_Y] &= 1 - \mathbf{E}[1 - f(Y_{r-l})|E_Y] \\
424
+ &\geq 1 - \mathbf{E}[1 - f(Y_{r-l})]/\mathbf{Pr}[E_Y] = 1 - O(\mathbf{E}[1 - f(Y_{r-l})]) = 1 - O(\varepsilon).
425
+ \end{align*}
426
+ $$
427
+
428
+ Choosing $\epsilon$ to be a small enough constant implies that $\mathbf{E}[f(X_r)|E_X] < \mathbf{E}[f(Y_{r-\ell})|E_Y]$, which completes the proof. $\square$
429
+
430
+ *Proof of Claim 4.4.* We shall use in the proof of Items 2 and 3 the fact that $1/3 \le r^\ell k / n^\ell \le 1/2$ which follows from the choice of $r = \lfloor n/(2k)^{1/\ell} \rfloor$ (for large enough $n$ and $k$).
431
+
432
+ 1. First note that $X_r$ is distributed uniformly over the set of vectors in $\{0,1\}^n$ with hamming weight $r$. In particular, conditioning that $X_r$ is in a set $U$ of such vectors, makes it uniform over $U$. We are left to show that $Y_{r-\ell}|E_Y$ is distributed uniformly over $U$. Given that $Y_0 = B_i$, we have that $Y_{r-\ell}$ is the OR of $1_{B_i}$ with a random vector of weight $r-\ell$ on $[n] \setminus B_i$. Conditioned on $E_Y$ the only way to reach $U_i$ is if $Y_0 = B_i$, hence, by the above, all points in $U_i$ are attained with the same probability. Using symmetry, all points in $U = \cup_i U_i$ are attained with the same probability.
433
+
434
+ 2. Let $B_i$ be the block selected in the first step of Process B. We analyze the probability that all indices in $B_j$ for some $j \neq i$ are chosen in the $r-\ell$ iterations of Process B.
435
+
436
+ $$
437
+ \begin{align*}
438
+ \text{\textbf{Pr}}[B_j \text{ is selected}] &= \frac{(\# \text{ of sequences where } B_j \text{ is selected})}{(\# \text{ of sequences})} \\
439
+ &= \frac{(r-\ell)^\ell \cdot (n-2\ell)^{\frac{r-2\ell}{n}-1}}{(n-\ell)^{\frac{r-\ell}{n}}} = \frac{(r-\ell)!(n-2\ell)!(n-r)!}{(r-2\ell)!(n-r)!(n-\ell)!} \\
440
+ &= \frac{(r-\ell)!(n-2\ell)!}{(r-2\ell)!(n-\ell)!} = \frac{(r-\ell)\cdots(r-2\ell+1)}{(n-\ell)\cdots(n-2\ell+1)} \le \left(\frac{r}{n}\right)^\ell
441
+ \end{align*}
442
+ $$
443
+
444
+ (recall that $n^k \triangleq \frac{n!}{(n-k)!}$). Hence, $\mathrm{Pr}[\exists j \neq i : B_j \text{ is selected}] \leq k \cdot (r/n)^l \leq 1/2$ and we have $\mathrm{Pr}[E_Y] \geq 1/2$.
445
+
446
+ 3. Let $\pi_1, \dots, \pi_r \in [n]$ be the sequence of choices made by Process A. For $i \in [k]$, let $E_{X,i}$ be the event that $X_r \in U_i$. By the uniqueness of the block contained in $X_r$ the events $E_{X,i}$ are disjoint, hence $\mathrm{Pr}[E_X] = \sum_{i=1}^k \mathrm{Pr}[E_{X,i}]$. By symmetry, $\mathrm{Pr}[E_X] = k \cdot \mathrm{Pr}[E_{X,1}]$. The event
447
+ ---PAGE_BREAK---
448
+
449
+ $E_{X,1}$ is simply the event that there exists a set $S \subseteq [r]$ of size $\ell$ such that $\{\pi_j\}_{j \in S} = B_1$ and the sequence $\{\pi_j : j \in [r] \setminus S\}$ is a sequence of choices for which $E_Y$ holds, when starting Process B from $Y_0 = B_1$. This shows that $\Pr[E_{X,1}] = \Pr[E_Y|Y_0 = B_1] \cdot \Pr[B_1 \subseteq \{\pi_1, \dots, \pi_r\}]$. By Symmetry, $\Pr[E_Y|Y_0 = B_i] = \Pr[E_Y] = \Omega(1)$ from the previous item. In addition,
450
+
451
+ $$
452
+ \begin{align*}
453
+ \mathbf{Pr}[B_1 \subseteq \{\pi_1, \dots, \pi_r\}] &= \frac{r^{\ell} \cdot (n-\ell)^{r-\ell}}{n^{\underline{\ell}}} = \frac{r!(n-\ell)!(n-r)!}{(r-\ell)!(n-r)!n!} \\
454
+ &= \frac{r!(n-\ell)!}{(r-\ell)!n!} = \frac{r \cdots (r-\ell+1)}{n \cdots (n-\ell+1)} \geq \left(\frac{r-\ell}{n}\right)^{\ell} \\
455
+ &= \left(\frac{r}{n}\right)^{\ell} \cdot (1-\ell/r)^{\ell} = \left(\frac{r}{n}\right)^{\ell} \cdot (1-o(1))
456
+ \end{align*}
457
+ $$
458
+
459
+ where $(1 - \ell/r)^{\ell} = 1 - o(1)$ follows from $\ell \le \log(k)$ and $r \ge \sqrt{n} \ge \sqrt{k}$. Thus,
460
+
461
+ $$
462
+ \begin{align*}
463
+ \mathbf{Pr}[E_X] &= k \cdot \mathbf{Pr}[E_{X,1}] = k \cdot \mathbf{Pr}[B_1 \text{ is selected}] \cdot \mathbf{Pr}[E_Y | Y_0 = B_1] \\
464
+ &\geq k \cdot \left(\frac{r}{n}\right)^{\ell} \cdot (1-o(1)) \cdot \frac{1}{2} \geq \frac{1}{3} \cdot (1-o(1) \cdot \frac{1}{2} = \Omega(1). \quad \square
465
+ \end{align*}
466
+ $$
467
+
468
+ ## 4.2 Separating Sensitivity and Block Sensitivity of Bounded Functions
469
+
470
+ **The Lattice Variant of The Sensitivity Conjecture** The proof of Theorem 4.3 is more natural in the lattice-variant of the sensitivity conjecture as suggested by Aaronson (see [Bop12]). In this variant, instead of talking about functions over {$0, 1$}^n we are considering functions over {$0, 1, ..., \ell$}^k for $\ell, k \in \mathbb{N}$. Given a function $g: \{0, 1, ..., \ell\}^k \to \mathbb{R}$ one can define a Boolean function $f: \{0, 1\}^{\ell \cdot k} \to \mathbb{R}$ by the following equation:
471
+
472
+ $$ f(x_{1,1}, \dots, x_{k,\ell}) = g\left(\sum_{i=1}^{\ell} x_{1,i}, \dots, \sum_{i=1}^{\ell} x_{k,i}\right). \qquad (12) $$
473
+
474
+ For a point $y \in \{0, 1, ..., \ell\}^k$ and function $g: \{0, ..., \ell\}^k \to \mathbb{R}$ one can define the sensitivity of $g$ at $y$ as
475
+
476
+ $$ s(g, y) = \sum_{y' \sim y} |g(y') - g(y)| $$
477
+
478
+ where $y' \sim y$ if $y' \in \{0, ..., \ell\}^k$ is a neighbor of $y$ in the grid $\{0, ..., \ell\}^k$, i.e., if $y$ and $y'$ agree on all coordinates except for one coordinate, say $j \in [k]$, on which $|y_j - y'_j| = 1$. The following claim relates the sensitivity of $f$ to that of $g$.
479
+
480
+ **Claim 4.5.** Let $g: \{0, ..., \ell\}^k \to \mathbb{R}$ and let $f$ be the function defined by Eq. (12). Then $s(f) \leq \ell \cdot s(g)$.
481
+
482
+ *Proof.* Let $x = (x_{1,1},...,x_{k,\ell}) \in \{0,1\}^{kl}$ and let $x' \in \{0,1\}^{kl}$ be a neighbor of $x$, obtained by flipping the $(i,j)$-th coordinate. Let $y = (\sum_{i=1}^{\ell} x_{1,i},..., \sum_{i=1}^{\ell} x_{k,i})$ and similarly let $y' = (\sum_{i=1}^{\ell} x'_{1,i},..., \sum_{i=1}^{\ell} x'_{k,i})$. Then $y$ and $y'$ differ only on the $i$-th coordinate, and on this coordinate they differ by a $\pm 1$. If $y'_i = y_i + 1$, then the number of neighbors $x' \sim x$ that are mapped to $y'$ by $y' = (\sum_i x'_{1,i},...,\sum_i x'_{k,i})$ equals the number of zeros in the $i$-th block of $x$, i.e., it equals $\ell - y_i$. Similarly, in the case $y'_i = y_i - 1$ the number of $x' \sim x$ that are mapped to $y'$ equals $y_i$. In both cases, there are between 1 to $\ell$ points $x' \sim x$ that are mapped to each neighbor $y' \sim y$. Thus,
483
+
484
+ $$
485
+ \sum_{x' \sim x} |f(x') - f(x)| = \sum_{x' \sim x} |g(y') - g(y)| \leq \ell \cdot \sum_{y' \sim y} |g(y') - g(y)|.
486
+ \quad \square
487
+ $$
488
+ ---PAGE_BREAK---
489
+
490
+ **Construction of a Separation.** Let $k, \ell$ be integers. We construct $f : \{0, 1, \dots, \ell\}^k \to [0, 1]$ such that $f(0) = 0$, $f(e_i \cdot \ell) = 1$ for all $i \in [k]$ and $s(f) \le O(k^{1/\ell})$.
491
+
492
+ Define a weight function $w : \{0, 1, \dots, \ell\} \to [0, 1]$ as follows: $w(a) = k^{a/\ell}/k$ for $a \in \{1, \dots, \ell\}$ and $w(0) = 0$. Take $g : \{0, \dots, \ell\}^k \to \mathbb{R}^+$ to be the function $g(x_1, \dots, x_n) = \sum_{i=1}^k w(x_i)$ and take $f : \{0, \dots, \ell\}^k \to [0, 1]$ to be $f(x) = \min\{1, g(x)\}$. Then $f(0^k) = 0$ and $f(\ell \cdot e_i) = 1$ for all $i \in [k]$.
493
+
494
+ **Theorem 4.6.** $s(f) \le 3 \cdot k^{1/\ell}$.
495
+
496
+ *Proof.* Let $x \in \{0, 1, \dots, \ell\}^k$ be a point in the lattice. We distinguish between two cases $g(x) \ge 2$ and $g(x) < 2$. In the first case, all neighbors $x' \sim x$ have $g(x') \ge 1$ since the sums $\sum_i w(x_i)$ and $\sum_i w(x'_i)$ differ by at most 1. Since both $g(x)$ and $g(x')$ are at least 1 we get that $f(x) = f(x') = 1$ and the sensitivity of $f$ at $x$ is 0.
497
+
498
+ In the latter case, $g(x) < 2$, we bound the sensitivity as well. For ease of notation we extend $w$ to be defined over $\{-1, \dots, \ell+1\}$ by taking $w(\ell+1) = w(\ell)$ and $w(-1) = w(0)$. We extend also $g$ to $\{-1, 0, \dots, \ell+1\} \to \mathbb{R}^+$ by taking $g(x_1, \dots, x_n) = \sum_i w(x_i)$. We have
499
+
500
+ $$
501
+ \begin{align*}
502
+ s(f,x) \le s(g,x) &= \sum_{i=1}^{k} |g(x+e_i) - g(x)| + |g(x) - g(x-e_i)| \\
503
+ &= \sum_{i=1}^{k} |w(x_i+1) - w(x_i)| + |w(x_i) - w(x_i-1)| \\
504
+ &= \sum_{i=1}^{k} w(x_i+1) - w(x_i-1) && (w \text{ is monotone}) \\
505
+ &\le \sum_{i=1}^{k} w(x_i+1) && (w \text{ is non-negative}) \\
506
+ &\le \sum_{i:x_i=0} w(1) + \sum_{i:x_i>0} w(x_i) \cdot k^{1/\ell} \\
507
+ &\le k \cdot \frac{k^{1/\ell}}{k} + \sum_{i} w(x_i) \cdot k^{1/\ell} \\
508
+ &= k^{1/\ell} + g(x) \cdot k^{1/\ell} \le 3k^{1/\ell}. && \square
509
+ \end{align*}
510
+ $$
511
+
512
+ We show that Theorem **4.3** is a corollary of Theorem **4.6**.
513
+
514
+ *Proof of Theorem 4.3.* Let $k = n/\ell$. Let $f : \{0, 1, \dots, \ell\}^k \to [0, 1]$ be the function in Theorem **4.6**. Take $h(x_{1,1}, \dots, x_{k,\ell}) = f(\sum_{i=1}^\ell x_{1,i}, \dots, \sum_{i=1}^\ell x_{n,i})$. For $x = 0^n$, there are $k$ disjoint blocks $B_1, \dots, B_k$ of size $\ell$ each such that $h(x+B_i) = 1$. Hence, $bs_\ell(h) \ge k = n/\ell$. By Claim **4.5**, the sensitivity of $h$ is at most $s(f) \cdot \ell \le 3 \cdot k^{1/\ell} \cdot \ell \le 3 \cdot n^{1/\ell} \cdot \ell$ which completes the proof. $\square$
515
+
516
+ # 5 New Separations between Decision Tree Complexity and Sensitivity
517
+
518
+ We report a new separation between the decision tree complexity and the sensitivity of Boolean functions. We construct an infinite family of Boolean functions with
519
+
520
+ $$ DT(f_n) \geq s(f_n)^{1+\log_{14}(19)} \geq s(f_n)^{2.115}. $$
521
+ ---PAGE_BREAK---
522
+
523
+ Our functions are transitive functions, and are inspired by the work of Chakraborty [Cha11].
524
+
525
+ Our construction is based on finding a “gadget” Boolean function $f$, defined over a constant number of variables, such that $s^0(f) = 1$, $s^1(f) = k$ and $\text{DT}(f) = \ell$ for $\ell > k$ (recall that $s^0(f) = \max_{x:f(x)=0} s(f,x)$ and similarly $s^1(f) = \max_{x:f(x)=1} s(f,x)$). Given the gadget $f$, we construct an infinite family of functions with super-quadratic gap between the sensitivity and the decision tree complexity using compositions (which is a well-used trick in query complexity separations, cf. [Tal13]).
526
+
527
+ **Lemma 5.1.** Let $f: \{0,1\}^c \to \{0,1\}$ such that $s^0(f) = 1$, $s^1(f) = k$ and $\text{DT}(f) = \ell > k$. Then, there exists an infinite family of functions $\{g_i\}_{i \in \mathbb{N}}$ such that $s(g_i) = k^i$ and $\text{DT}(g_i) = (k\ell)^i = s(g_i)^{1+\log(k)/\log(\ell)}$.
528
+
529
+ *Proof.* Take $g = \text{OR}_k \circ f$. It is easy to verify that $s(g) = k$, and that $\text{DT}(g) = \text{DT}(\text{OR}_k) \cdot \text{DT}(f) = k\ell$ (for the latter, one can use [Tal13, Lemma 3.1]). For $i \in \mathbb{N}$, we take $g_i = g^i$. It is well-known (cf. [Tal13, Lemma 3.1]) that $s(g^i) \le s(g)^i$ and that $\text{DT}(g^i) = \text{DT}(g)^i$, which completes the proof. $\square$
530
+
531
+ ## 5.1 Finding a Good Gadget
532
+
533
+ The gadget $f$ will be a minterm-cyclic function. Roughly speaking, a function $f: \{0,1\}^n \to \{0,1\}$ is minterm-cyclic if there exists pattern $p \in \{0,1,*\}^n$ such that the function $f$ simply checks if $x$ matches one of the cyclic shifts of $p$. The formal definition follows
534
+
535
+ **Definition 5.2.** A pattern $p \in \{0, 1, *\}^n$ is a partial assignment to the variables $x_1, \dots, x_n$. We say that a point $x \in \{0, 1\}^n$ matches the pattern $p$, denoted by $p \subseteq x$, if for all $i \in [n]$ such that $p_i \in \{0, 1\}$ we have $p_i = x_i$. Given a pattern $p$, let $\text{CS}(p) = \{p^1, \dots, p^n\}$ be the set of cyclic shifts of $p$, where the $i$-th cyclic shift of $p$ is given by $p^i = (p_i, p_{i+1}, \dots, p_n, p_1, \dots, p_{i-1})$. For a pattern $p \in \{0, 1, *\}^n$ we denote by $f_p: \{0, 1\}^n \to \{0, 1\}$ the function defined by
536
+
537
+ $$f_p(x) = 1 \iff \exists p^i \in \text{CS}(p) : p^i \subseteq x$$
538
+
539
+ and call $f_p$ the minterm cyclic function defined by $p$.
540
+
541
+ For example, the pattern $p = 0011**$ defines a function $f_p$ that checks if there's a sequence of two zeros followed by two ones in $x$, when $x$ is viewed as a cyclic string. We say that two patterns $p, q \in \{0, 1, *\}^n$ disagree on a coordinate $i$ if both $p_i$ and $q_i$ are in $\{0, 1\}$ and $p_i \neq q_i$.
542
+
543
+ **Claim 5.3.** Let $p \in \{0, 1, *\}^n$ be a pattern defining $f_p: \{0, 1\}^n \to \{0, 1\}$. Assume that any two different cyclic-shifts of $p$ disagree on at least 3 coordinates. Then, $s^0(f_p) = 1$.
544
+
545
+ *Proof.* Let $x \in \{0,1\}^n$ with $f_p(x) = 0$ and assume by contradiction that $s(f_p,x) \ge 2$. In such a case, there are two indices $i$ and $j$ such that $f_p(x+e_i) = 1$ and $f_p(x+e_j) = 1$. Let $q$ and $q'$ be the patterns among $\text{CS}(p)$ that $x+e_i$ and $x+e_j$ satisfy respectively. If $q=q'$, then since both $x+e_i$ and $x+e_j$ satisfy $q$ and they differ on coordinates $i$ and $j$, it must be the case that $q_i=q_j=*$. However, this implies that $x$ satisfy $q$ as well, which is a contradiction. If $q \ne q'$, then we get that $q$ and $q'$ may disagree only on coordinates $i$ and $j$, which is also a contradiction. $\square$
546
+
547
+ The following fact is easy to verify.
548
+
549
+ **Fact 5.4.** Let $p \in \{0, 1, *\}^n$ be a pattern defining $f_p: \{0, 1\}^n \to \{0, 1\}$. Then, $s^0(f_p) \le c^0(f_p) \le |\{i \in [n] : p_i \in \{0, 1\}\}|$.
550
+ ---PAGE_BREAK---
551
+
552
+ Next, we demonstrate a simple example with better-than-quadratic separation between $\text{DT}(f)$ and $s(f)$. Take the pattern $p = *001011$. Denote by $p^1, \dots, p^7$ all the cyclic shifts of $p$, where in $p^i$ the $i$-th coordinate equals *. It is easy to verify that any $p^i$ and $p^j$ for $i \neq j$ disagree on at least 3 coordinates. Hence, $s^0(f_p) = 1$ and $s^1(f_p) \le 6$. We wish to show that any decision tree $T$ for $f_p$ is of depth 7. Let $x_i$ be the first coordinate read by a decision tree $T$ for $f_p$. Our adversary will answer 0, and will continue to answer as if $x$ matches $p^i$. Assume the decision tree made a decision before reading the entire input. The decision tree must decide 1 since the adversary answered according to $x$ which satisfies $p^i$. However, if the decision tree hasn't read the entire input, there is still an unread coordinate $j$, where $j \neq i$. Let $x' = x + e_j$. Then, the decision tree answers 1 on $x'$ as well. However $x'$ does not match pattern $p^i$ as $(p^i)_j \in \{0,1\}$ and it must be the case that $x_j = (p^i)_j \neq x'_j$.
553
+
554
+ We also need to rule out that $x'$ matches some other pattern. Indeed, if $x'$ matches some other pattern $p^k$ it means that $p^k$ and $p^i$ disagree only on at most one coordinate, which as discussed above cannot happen.
555
+
556
+ Using Lemma 5.1 the function $f_p$ can be turned into an infinite family of functions $g_i$ with $\text{DT}(g_i) = (6 \cdot 7)^i$ and $s(g_i) \le 6^i$. This gives a super-quadratic separation since
557
+
558
+ $$ \text{DT}(g_i) \ge s(g_i)^{1+\log(7)/\log(6)} \ge s(g_i)^{2.086}. $$
559
+
560
+ In a similar fashion, one can show that for the pattern $p = **0*10000*101$ after reading any two input bits from the input there exists a cyclic shift $p^i$ of the pattern from which no $\{0,1\}$ coordinate has been read yet. However, to verify that the input $x$ matches $p^i$ we must read all $\{0,1\}$ positions in $p^i$, which gives $\text{DT}(f_p) \ge 9+2$ where 9 is the number of $\{0,1\}$-s in the pattern $p$.
561
+
562
+ The decision tree complexity analysis for the other patterns written below is more involved. We computed it using a computer program written to calculate the decision tree complexity in this special case. In the list below, we report several patterns yielding super-quadratic separations. For each pattern $p$ we report its length $n$, the decision tree complexity of $f_p$, the maximal sensitivity of $f_p$ (which equals the number of $\{0,1\}$-s in $p$) and the resulting exponent one get by using Lemma 5.1 (i.e., $1 + \frac{\log \text{DT}(f_p)}{\log s(f_p)}$).
563
+
564
+ <table><tr><td>p = *001011,</td><td>n = 7,</td><td>DT = 7,</td><td>s = 6,</td><td>exp = 2.086</td></tr><tr><td>p = **0*10000*101,</td><td>n = 13,</td><td>DT = 11,</td><td>s = 9,</td><td>exp = 2.091</td></tr><tr><td>p = *****01*1*01100000,</td><td>n = 19,</td><td>DT = 14,</td><td>s = 11,</td><td>exp = 2.100</td></tr><tr><td>p = *****00*0*0010**1*00*011,</td><td>n = 25,</td><td>DT = 17,</td><td>s = 13,</td><td>exp = 2.104</td></tr><tr><td>p = *****1**0**0**1**0**0**0*0*10*1011,</td><td>n = 33,</td><td>DT = 19,</td><td>s = 14,</td><td>exp = 2.115</td></tr></table>
565
+
566
+ **Acknowledgements.** I wish to thank my PhD advisor, Ran Raz, for lots of stimulating and helpful discussions about this problem. I wish to thank Scott Aaronson for his encouragement.
567
+
568
+ ## References
569
+
570
+ [ABG+14] A. Ambainis, M. Bavarian, Y. Gao, J. Mao, X. Sun, and S. Zuo. Tighter relations between sensitivity and other complexity measures. In *ICALP (1)*, pages 101–113, 2014.
571
+
572
+ [AP14] A. Ambainis and K. Prusis. A tight lower bound on certificate complexity in terms of block sensitivity and sensitivity. In *MFCS*, pages 33–44, 2014.
573
+ ---PAGE_BREAK---
574
+
575
+ [APV15] A. Ambainis, K. Prusis, and J. Vihrovs. Sensitivity versus certificate complexity of boolean functions. *CoRR*, abs/1503.07691, 2015.
576
+
577
+ [AS11] A. Ambainis and X. Sun. New separation between $s(f)$ and $bs(f)$. *Electronic Colloquium on Computational Complexity (ECCC)*, 18:116, 2011.
578
+
579
+ [AV15] A. Ambainis and J. Vihrovs. Size of sets with small sensitivity: A generalization of simon's lemma. In *Theory and Applications of Models of Computation - 12th Annual Conference, TAMC 2015, Singapore, May 18-20, 2015, Proceedings*, pages 122-133, 2015.
580
+
581
+ [BdW02] H. Buhrman and R. de Wolf. Complexity measures and decision tree complexity: a survey. *Theor. Comput. Sci.*, 288(1):21-43, 2002.
582
+
583
+ [Bop12] M. Boppana. Lattice variant of the sensitivity conjecture. *Electronic Colloquium on Computational Complexity (ECCC)*, 19:89, 2012.
584
+
585
+ [Cha11] S. Chakraborty. On the sensitivity of cyclically-invariant boolean functions. *Discrete Mathematics & Theoretical Computer Science*, 13(4):51-60, 2011.
586
+
587
+ [GKS15] J. Gilmer, M. Koucký, and M. E. Saks. A new approach to the sensitivity conjecture. In *Proceedings of the 2015 Conference on Innovations in Theoretical Computer Science, ITCS 2015, Rehovot, Israel, January 11-13, 2015*, pages 247-254, 2015.
588
+
589
+ [GNS$^{+}$16] P. Gopalan, N. Nisan, R. A. Servedio, K. Talwar, and A. Wigderson. Smooth boolean functions are easy: Efficient algorithms for low-sensitivity functions. In *ITCS*, pages 59-70, 2016.
590
+
591
+ [HKP11] P. Hatami, R. Kulkarni, and D. Pankratov. Variations on the sensitivity conjecture. *Theory of Computing, Graduate Surveys*, 2:1-27, 2011.
592
+
593
+ [KK04] C. Kenyon and S. Kutin. Sensitivity, block sensitivity, and l-block sensitivity of boolean functions. *Inf. Comput.*, 189(1):43-53, 2004.
594
+
595
+ [Nis89] N. Nisan. Crew prams and decision trees. In *STOC*, pages 327-335, 1989.
596
+
597
+ [NS94] N. Nisan and M. Szegedy. On the degree of Boolean functions as real polynomials. *Computational Complexity*, 4:301-313, 1994.
598
+
599
+ [Rub95] D. Rubinstein. Sensitivity vs. block sensitivity of boolean functions. *Combinatorica*, 15(2):297-299, 1995.
600
+
601
+ [Sim83] H. U. Simon. A tight $\Omega(\log \log n)$-bound on the time for parallel ram's to compute nondegenerated boolean functions. In *Foundations of computation theory*, pages 439-444. Springer, 1983.
602
+
603
+ [Sze15] M. Szegedy. An $O(n^{0.4732})$ upper bound on the complexity of the GKS communication game. *Electronic Colloquium on Computational Complexity (ECCC)*, 22:102, 2015.
604
+
605
+ [Tal13] A. Tal. Properties and applications of boolean function composition. In *ITCS*, pages 441-454, 2013.
606
+
607
+ [Vir11] M. Virza. Sensitivity versus block sensitivity of boolean functions. *Inf. Process. Lett.*, 111(9):433-435, 2011.
608
+ ---PAGE_BREAK---
609
+
610
+ A Proof of Corollary 4.2
611
+
612
+ *Proof.* Let $x \in \{0,1\}^n$ and $B_1, \dots, B_m$ be the blocks that achieve $bs(f)$. Assume without loss of generality that $B_1, \dots, B_{m'}$ are of size at most $2\ell$ and that $B_{m'+1}, \dots, B_m$ are of size larger than $2\ell$. Then, by the disjointness of $B_{m'+1}, \dots, B_m$ we have that $m-m' \le \frac{n}{2\ell}$. Thus,
613
+
614
+ $$
615
+ \begin{align*}
616
+ bs_\ell(f,x) &\geq \sum_{i=1}^{m'} |f(x)-f(x+B_i)| \\
617
+ &= \sum_{i=1}^{m} |f(x)-f(x+B_i)| - \sum_{i=m'+1}^{m} |f(x)-f(x+B_i)| \\
618
+ &\geq bs(f,x) - (m-m') \geq bs(f,x) - \frac{n}{2\ell} \geq \frac{n}{2\ell}.
619
+ \end{align*}
620
+ $$
621
+
622
+ Assume without loss of generality that $B_1, \dots, B_{m''}$ are blocks such that $|f(x) - f(x + B_i)| \ge \frac{1}{4\ell}$ and that $B_{m''+1}, \dots, B_{m'}$ are not. Then, $\sum_{i=m''+1}^{m'} |f(x) - f(x + B_i)| \le \frac{m''-m'}{4\ell} \le \frac{n}{4\ell}$. This implies that $\sum_{i=1}^{m''} |f(x) - f(x + B_i)| \ge \frac{n}{4\ell}$, and in particular that $m'' \ge \frac{n}{4\ell}$. Thus, there are $m'' \ge n/4\ell$ disjoint blocks of size at most $2\ell$ which change the value of $f$ by at least $\frac{1}{4\ell}$. Theorem 4.1 gives that $s(f) \ge \Omega((m'')^{1/2\ell}/\ell) \ge \Omega(n^{1/2\ell}/\ell)$. $\square$
623
+
samples/texts_merged/4753802.md ADDED
@@ -0,0 +1,646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ On the irreducibility of certain polynomials
5
+ with coefficients as products of terms in an
6
+ arithmetic progression
7
+
8
+ by
9
+
10
+ CARRIE E. FINCH (Lexington, VA) and N. SARADHA (Mumbai)
11
+
12
+ 1. Introduction. In 1929, Schur [10] used prime ideals in algebraic number fields to prove that the Taylor polynomials for the exponential function, with some possible variations in the coefficients, are irreducible.
13
+
14
+ THEOREM 1. Let $m$ be a positive integer and let $a_0, \dots, a_m$ be arbitrary integers with $|a_0| = |a_m| = 1$. Then
15
+
16
+ $$ (1.1) \qquad a_m \frac{x^m}{m!} + a_{m-1} \frac{x^{m-1}}{(m-1)!} + \dots + a_1 x + a_0 $$
17
+
18
+ is irreducible over the rationals.
19
+
20
+ Filaseta [7] used Newton polygons to obtain Schur's result, and also strengthened the result by allowing more possible values for the leading coefficient than just those of absolute value 1.
21
+
22
+ THEOREM 2. Let $m$ be a positive integer and let $a_0, \dots, a_m$ be arbitrary integers with $|a_0| = 1$ and $0 < |a_m| < m$. Then the polynomial given in (1.1) is irreducible over the rationals except when
23
+
24
+ $$ a_m = \pm 5 \text{ and } m = 6 \quad \text{or} \quad a_m = \pm 7 \text{ and } m = 10. $$
25
+
26
+ Filaseta's use of Newton polygons to demonstrate the irreducibility of the polynomials in (1.1) is based on the theorem of Dumas [3] regarding the construction of the Newton polygon of the product of two polynomials. In particular, Filaseta employs the following useful lemma, from [6].
27
+
28
+ LEMMA 1. Let $k$ and $l$ be integers with $k > l \ge 0$. Suppose $g(x) = \sum_{j=0}^{n} b_j x^j \in \mathbb{Z}[x]$ and $p$ is a prime such that $p \nmid b_n$, $p \mid b_j$ for all $j \in \{0, 1, \dots, n-l-1\}$, and the rightmost edge of the Newton polygon for $g(x)$
29
+
30
+ 2010 Mathematics Subject Classification: Primary 12E05; Secondary 12D05.
31
+ Key words and phrases: arithmetic progressions, greatest prime factor, irreducibility of polynomials, Newton polygons.
32
+ ---PAGE_BREAK---
33
+
34
+ with respect to $p$ has slope $< 1/k$. Then for any integers $a_0, a_1, \ldots, a_n$ with
35
+ $|a_0| = |a_n| = 1$, the polynomial $f(x) = \sum_{j=0}^n a_j b_j x^j$ cannot have a factor
36
+ with degree in the interval $[l+1, k]$.
37
+
38
+ From this lemma, we see that using Newton polygons to eliminate the
39
+ possibility of factors of a particular degree hinges on finding primes that
40
+ divide certain coefficients of the polynomial. To obtain Theorem 2, Filaseta
41
+ appeals to a result of Ecklund, Eggleton, Erdős and Selfridge [5] on prime
42
+ divisors of binomial coefficients. We refer to [7] for details. Moreover, using
43
+ the same tools, Allen and Filaseta [1], [2] proved the following result.
44
+
45
+ **THEOREM 3.** Let $m > 1$ and $a_0, \dots, a_m$ denote arbitrary integers with $|a_0| = 1$.
46
+
47
+ (i) Suppose $m + 1 = k'2^u$ with $k'$ odd and $(m + 1)m = k''2^v3^w$ with $\text{gcd}(k'', 6) = 1$. Let $0 < |a_m| < \min\{k', k''\}$. Then
48
+
49
+ $$
50
+ (1.2) \quad a_m \frac{x^m}{(m+1)!} + a_{m-1} \frac{x^{m-1}}{m!} + \cdots + a_2 \frac{x^2}{2!} + a_1 x + a_0
51
+ $$
52
+
53
+ is irreducible over the rationals.
54
+
55
+ (ii) Suppose $0 < |a_m| < 2m - 1$. Then
56
+
57
+ $$
58
+ (1.3) \quad a_m \frac{x^{2m}}{1 \cdot 3 \cdots (2m-1)} + a_{m-1} \frac{x^{2m-2}}{1 \cdot 3 \cdots (2m-3)} + \dots \\
59
+ \phantom{(1.3) \quad a_m \frac{x^{2m}}{1 \cdot 3 \cdots (2m-1)} + } + a_2 \frac{x^4}{1 \cdot 3} + a_1 \frac{x^2}{1} + a_0
60
+ $$
61
+
62
+ is irreducible over the rationals.
63
+
64
+ We observe that the common thread among the polynomials in (1.1),
65
+ (1.2), and (1.3) is that the denominators of the coefficients are products
66
+ of integers in an arithmetic progression; in the case of (1.1) and (1.2), we
67
+ see an arithmetic progression with initial term 1 and common difference 1,
68
+ and in (1.3) we see an arithmetic progression with initial term 1 and com-
69
+ mon difference 2. In this paper, we prove analogous results by considering
70
+ denominators which are again products of integers in an arithmetic progres-
71
+ sion with initial term an odd integer *a* and common difference 2. Let
72
+
73
+ $$
74
+ (1.4) \quad f(x) = \frac{x^m}{a(a+2)\cdots(a+2(m-1))} + \cdots + \frac{x^2}{a(a+2)} + \frac{x}{a} + 1,
75
+ $$
76
+
77
+ $$
78
+ (1.5) \quad g(x) = a_m \frac{x^m}{a(a+2)\cdots(a+2(m-1))} \\
79
+ \qquad + a_{m-1} \frac{x^{m-1}}{a(a+2)\cdots(a+2(m-2))} + \cdots + a_2 \frac{x^2}{a(a+2)} + a_1 \frac{x}{a} + a_0.
80
+ $$
81
+
82
+ Letting $P(n)$ denote the greatest prime factor of the positive integer $n$
83
+ (putting $P(1) = 1$), we prove the following results.
84
+ ---PAGE_BREAK---
85
+
86
+ **THEOREM 4.** Let $a \ge 1$ be an odd integer, $\max\{a, 110\} \le k \le m/2$ and $a_0, \dots, a_m$ be arbitrary integers with $P(a_0a_m) \le 2k+a$. Then $f(x)$ and $g(x)$ do not have a factor of degree $k$.
87
+
88
+ Now we restrict to $1 \le a < 29$. We have
89
+
90
+ **THEOREM 5.** Let $a$ be an odd integer with $1 \le a < 29$, and let $m > 1$ and $a_0, \dots, a_m$ be arbitrary integers with $P(a_0a_m) \le a + 4$. Then
91
+
92
+ (i) $f(x)$ has no factor of degree $\ge 2$,
93
+
94
+ (ii) $g(x)$ has no factor of degree $\ge 3$, and
95
+
96
+ (iii) $g(x)$ has no factor of degree 2 except perhaps when
97
+
98
+ $$ (a,m) \in \{(21,4), (19,59), (5,121), (19,114), (21,113), (21,163), (21,554)\}. $$
99
+
100
+ Further if there exists a prime $p \ge a+2$ dividing $a+2(m-1)$, then $f(x)$ has no linear factor. Also if such a prime $p$ does not divide $a_0a_m$, then $g(x)$ has no linear factor.
101
+
102
+ When $(a, m) = (21, 4)$, by choosing $a_0 = a_4 = 1$, $a_1 = -15$, $a_2 = -140$, $a_3 = 35$, we see that
103
+
104
+ $$ g(x) = \frac{x^4}{21 \cdot 23 \cdot 25 \cdot 27} + \frac{35x^3}{21 \cdot 23 \cdot 25} - \frac{140x^2}{21 \cdot 23} - \frac{15x}{21} + 1 $$
105
+
106
+ $$ = \frac{1}{326025} (x^2 - 90x - 315)(x^2 + 1035x - 1035). $$
107
+
108
+ We thank the referee for providing this example. As in the proofs of Theorems 2 and 3, our method also depends on the use of Dumas' theorem on Newton polygons for the irreducibility of polynomials. On the other hand, we do not use results from Ecklund, Eggleton, Erdős, and Selfridge [5]. Instead, we establish a Sylvester type result on the greatest prime factor of a product of several consecutive terms in an arithmetic progression. This result is of interest independent of its application to establish Theorems 4 and 5. We show for instance that for any $k \ge 2$,
109
+
110
+ $$ P(n(n+2)\cdots(n+2(k-1))) > 2k+a $$
111
+
112
+ if $n \ge 2k+a$, where $n$ is odd and $a$ is a positive odd integer less than 29, except for an explicitly given set of values of $(n,k,a)$. This result depends on a result of Lehmer [8] and several computations. The above assertion is also true for any odd $a$ provided $k$ is large; see Lemma 5. As an application of Theorem 5 we give another criterion for the irreducibility of (1.3).
113
+
114
+ **COROLLARY 6.** Let $P(a_0a_m) \le 5$. Suppose there exists a prime $p$ such that
115
+
116
+ $$ p \mid (2m-1) \quad \text{and} \quad p \nmid a_0a_m. $$
117
+
118
+ Then the polynomial given in (1.3) is irreducible over the rationals.
119
+ ---PAGE_BREAK---
120
+
121
+ Let $a$ be an even integer equal to $2b$, say. Then $f(x)$ and $g(x)$ can be
122
+ transformed into a polynomial of the form
123
+
124
+ $$
125
+ a'_{m} \frac{x^m}{b(b+1)\cdots(b+m-1)} + \cdots + a'_{1} \frac{x}{b} + a'_{0}
126
+ $$
127
+
128
+ with $a'_m = 1, a'_0, \dots, a'_{m-1}$ integers. The case $b=1$ and $|a'_0| = 1$ is Schur's
129
+ polynomial given in (1.1). For some results on the factors of such polynomials,
130
+ we refer to [11]. We will not deal with this case in the present paper.
131
+
132
+ The remainder of this paper is organized as follows. In Section 2, we prove the result about the greatest prime factor of a product of consecutive terms in arithmetic progression. In Section 3, we use Newton polygons to exclude some factors of the polynomials in question. In particular cases, all factors of degree ≥ 2 are excluded. Theorem 4 comes out as a consequence of Lemmas 5 and 11. In Section 4 we discuss linear factors and prove Theorem 5 from Theorems 8 and 9.
133
+
134
+ **2. Greatest prime factor of a product of integers in arithmetic progression.** The letters $n, d, k$ denote positive integers with $\gcd(n, d) = 1$. Set $\Delta = n(n+d)\cdots(n+(k-1)d)$. Let $\pi(n)$ denote the number of primes $\le n$ and $\pi_d(n)$ be the number of primes $\le n$ that are co-prime to $d$. Let $\nu_p(n)$ denote the power of the prime $p$ in $n$ and $p_i$ denote the $i$th prime. In this section, we obtain lower bounds on $P(\Delta)$, and conclude the section with Theorem 8, a particular result for $P(\Delta)$ when $d=2$.
135
+
136
+ We state without proof our first lemma. See [9] for details.
137
+
138
+ LEMMA 2. For $0 \le i < k$, suppose $P(n + id) \le c_0k$. Let $S = \{n, n + d, \dots, n + (k-1)d\}$. For every prime $p \le c_0k$ with $p \nmid d$, choose $n + ipd \in S$ such that $p$ does not appear to a higher power in the factorization of any other element of $S$. Let $S_1$ be the subset of $S$ obtained by deleting from $S$ all $n + ipd$ with $p \le c_0k$ and $p \nmid d$. Then
139
+
140
+ $$
141
+ \prod_{n+id \in S_1} (n+id) \le (k-1)! \prod_{p|d} p^{-\nu_p((k-1)!)}.
142
+ $$
143
+
144
+ When $d=1$, the product on the right hand side is taken as 1. In the next
145
+ lemma, inequality (i) is an easy consequence of the formula of Legendre on
146
+ the $\nu_p((k-1)!)$. The estimate for $\pi(x)$ in (ii) is due to Dusart [4].
147
+
148
+ LEMMA 3.
149
+
150
+ (i) $\nu_p((k-1)!) \ge \frac{k-p}{p-1} - \frac{\log(k-1)}{\log p}$.
151
+
152
+ (ii) $\pi(x) \le \frac{x}{\log x} \left( 1 + \frac{1.2762}{\log x} \right)$ for $x > 1$.
153
+ ---PAGE_BREAK---
154
+
155
+ LEMMA 4. Let $k \ge 2$, $c_0 > 1$, $c_1 > 0$, $d \ge 1$ and $k - \pi_d(c_0 k) \ge 1$. Suppose $n \ge c_1 k d$ and $P(\Delta) \le c_0 k$. Then
156
+
157
+ $$ (2.1) \qquad (c_1 d)^{k-\pi_d(c_0 k)} \le k^{\pi_d(c_0 k)} \prod_{p|d} p^{-\nu_p((k-1)!)}. $$
158
+
159
+ *Proof.* Observe that $\Delta$ is not divisible by primes dividing $d$ and that every prime $> k$ may divide only one term of $\Delta$. Hence there are at least $k - \pi_d(c_0k) + \pi_d(k)$ terms which are divisible only by primes $\le k$. By deleting a term in which a prime $p \le k$, $p\nmid d$ appears to the maximum power, using the notation from Lemma 2, we see that $|S_1| \ge k - \pi_d(c_0k) \ge 1$. We set $t := |S_1| - 1$. We arrange the elements of $S_1$ as
160
+
161
+ $$ n + i_0 d < n + i_1 d < \dots < n + i_t d. $$
162
+
163
+ Then by Lemma 2,
164
+
165
+ $$ \prod_{v=0}^{t} (n + i_v d) \le (k-1)! \prod_{p|d} p^{-\nu_p((k-1)!)}. $$
166
+
167
+ This gives
168
+
169
+ $$ n^{k-\pi_d(c_0k)} \le n(n+d)\cdots(n+(k-\pi_d(c_0k)-1)d) \le (k-1)! \prod_{p|d} p^{-\nu_p((k-1)!)}. $$
170
+
171
+ Since $n \ge c_1kd$, we get
172
+
173
+ $$ (c_1 d)^{k-\pi_d(c_0 k)} k^{k-\pi_d(c_0 k)} \le k^k \prod_{p|d} p^{-\nu_p((k-1)!)}, $$
174
+
175
+ which gives the assertion of the lemma. ■
176
+
177
+ Putting together the inequalities from Lemma 3 with the result in Lemma 4 and observing that $\pi_d(c_0k) \le \pi(c_0k)$, we obtain the following result.
178
+
179
+ COROLLARY 7. Let $k \ge 2$, $c_0 > 1$, $c_1 > 0$, $d = p$ prime and $k - \pi_p(c_0k) \ge 1$. Suppose that $n \ge c_1kp$ and $P(\Delta) \le c_0k$. Let
180
+
181
+ $$ f(k,p) = \begin{cases} 0 & \text{if } p \ge k, \\ \displaystyle \frac{1}{p-1} - \frac{p}{k(p-1)} - \frac{\log(k-1)}{k \log p} & \text{otherwise.} \end{cases} $$
182
+
183
+ Then
184
+
185
+ $$ p \le \exp \left[ \frac{c_0 + \frac{1.2762c_0}{\log c_0 k} - \left(1 - \frac{c_0}{\log c_0 k} - \frac{1.2762c_0}{(\log c_0 k)^2}\right) \log c_1}{1 - \frac{c_0}{\log c_0 k} - \frac{1.2762c_0}{(\log c_0 k)^2} + f(k,p)} \right]. $$
186
+ ---PAGE_BREAK---
187
+
188
+ For the rest of this section, we restrict our attention to arithmetic progressions with common difference $d = 2$. We denote
189
+
190
+ $$ \Delta_2 = n(n+2)\cdots(n+2(k-1)) \quad \text{with } n \text{ odd.} $$
191
+
192
+ Before we state the next lemma, we note that Allen and Filaseta [1] showed that for every $n \ge 213$, there exists a prime $p \in (n, 1.05n]$. We will use this result in the next lemma.
193
+
194
+ **LEMMA 5.** Let $a \ge 1$ be an odd integer, $k \ge \max\{a, 110\}$ and $n \ge 2k+a$. Then $P(\Delta_2) > 2k+a$.
195
+
196
+ *Proof.* Suppose $P(\Delta_2) \le 2k+a$. First assume that $n \ge 40(k-1)$. Note that $2+a/k \le 3$ since $a \le k$. We apply Corollary 7 with $c_0 = 3$ and $c_1 = 19.5$. Note that $\pi(c_0k) < k$ since $k \ge 110$. We find that the right hand side of the inequality in Corollary 7 is a decreasing function of $k$ since each term involving $k$ is a decreasing function of $k$. Hence if the inequality is not valid for some $k=k_0$, then it is not valid for any $k > k_0$. We check that the inequality is not valid for $k_0=110$. This proves the assertion of the lemma for $n \ge 40(k-1)$.
197
+
198
+ Next we assume that $n < 40(k-1)$. Note that $n \ge 213$. Then there exists a prime $p$ in $\{n+2, \dots, n+2(k-1)\}$ since the interval $(n, 1.05n]$ is contained in $(n, n+2(k-1)]$ as $n < 40(k-1)$. Further this prime exceeds $n \ge 2k+a$, by assumption. Thus $P(\Delta_2) > 2k+a$. $\blacksquare$
199
+
200
+ Now we restrict to odd $a < 29$.
201
+
202
+ **LEMMA 6.** Let $1 \le a < 29$, *a odd*, $k \ge 31$ and $n \ge 2k+a$. Then $P(\Delta_2) > 2k + 29$.
203
+
204
+ *Proof.* Suppose $P(\Delta_2) \le 2k+29$. We follow the argument as in Lemma 5. First let $n \ge 40(k-1)$. We apply Corollary 7 with $c_0 = 2+29/k$ and $c_1 = 19.5$. We check that the inequality in Corollary 7 is not valid for $k_0 = 100$. Thus we may assume that $k \le 99$. Now we check that the inequality (2.1) with actual values of the $\pi$-function is invalid for all $31 \le k \le 99$.
205
+
206
+ Next we assume that $213 \le n < 40(k-1)$. Since now $(n, 1.05n] \subset (n, n+2(k-1)]$, there exists a prime $\ge 2k+a$ dividing $\Delta_2$. Hence we may assume that $n < 213$. Then we need only consider $2k+a \le n < 213$ with $n$ odd. For these finitely many values of $n$ and $k$, we check directly that the assertion of the lemma is true. $\blacksquare$
207
+
208
+ Let $T$ be the set of all integers $M \ge 1$ with $P(M(M+2)) \le 31$. Table 1 below shows 101 such integers put in groups according to the largest prime factor of $M(M+2)$. It follows from Lehmer's work [8] that if $n > 1$ is an
209
+ ---PAGE_BREAK---
210
+
211
+ integer, then $P(n(n+2)) \ge 37$ except when $n = M$ with $M$ given by Table 1.
212
+ Thus Table 1 gives all the integers of $T$.
213
+
214
+ **Table 1**
215
+
216
+ <table><thead><tr><th>p</th><th>Integers M with P(M(M + 2)) = p</th></tr></thead><tbody><tr><td>3</td><td>1</td></tr><tr><td>5</td><td>3, 25</td></tr><tr><td>7</td><td>5, 7, 243</td></tr><tr><td>11</td><td>9, 33, 75</td></tr><tr><td>13</td><td>11, 13, 63, 273, 845, 1573</td></tr><tr><td>17</td><td>15, 49, 117, 119, 187, 1375</td></tr><tr><td>19</td><td>17, 19, 55, 133, 169, 245, 323, 361, 625, 663, 1615, 3211, 3969</td></tr><tr><td>23</td><td>21, 23, 115, 207, 253, 297, 343, 1125, 1309, 2185, 2275, 2873, 3703, 6875, 8073, 9315, 18513, 41743, 57475, 1128125, 1447873</td></tr><tr><td>29</td><td>27, 85, 143, 145, 375, 435, 493, 665, 2871, 8379, 9945, 12673, 14875, 16443, 24563, 41325, 45617, 87723, 184875</td></tr><tr><td>31</td><td>29, 31, 91, 93, 153, 341, 403, 525, 527, 713, 897, 1083, 1519, 1953, 2695, 3625, 4123, 5423, 7161, 19435, 22475, 86273, 130975, 203203, 2509045, 3322053, 287080365</td></tr></tbody></table>
217
+
218
+ The next three lemmas deal with the complementary case of Lemma 6 when $k \le 30$.
219
+
220
+ LEMMA 7. Let $2 \le k \le 30$. Suppose no $M \in T$ is of the form $n + 4j$ for any $j$ with $0 \le j \le (k-2)/2$. Then $P(\Delta_2) \ge 2k + 29$.
221
+
222
+ *Proof.* We divide the integers $n, n + 2, \dots, n + 2(k-1)$ into pairs
223
+
224
+ $$ (2.2) \qquad (n, n+2), (n+4, n+6), \dots $$
225
+
226
+ Note that there are at least $[k/2]$ pairs. By hypothesis, none of these pairs coincides with $(M, M+2)$ for any $M \in T$. Then the product of integers in each pair in (2.2) has a prime factor $\ge 37$. Since these integers are in a block of length at most 30, we see that each pair in (2.2) must have a distinct prime $\ge 37$ dividing their product. Thus $\Delta_2$ is divisible by at least $[k/2]$ primes $\ge 37$. Hence
227
+
228
+ $$ P(\Delta_2) \ge p_{\lfloor k/2 \rfloor+11}. $$
229
+
230
+ We check that $p_{\lfloor k/2 \rfloor+11} \ge 2k+29$ for $2 \le k \le 30$, which completes the proof of the lemma. $\blacksquare$
231
+
232
+ LEMMA 8. Let $3 \le k \le 30$ and $n \ge 2k+29$. Then $P(\Delta_2) \ge 2k+29$ except when $(n,k)$ is one of the following ten pairs:
233
+
234
+ (91, 3), (115, 3), (115, 4), (117, 3), (143, 3), (243, 3), (341, 3),
235
+ (525, 3), (663, 3), (2871, 3).
236
+ ---PAGE_BREAK---
237
+
238
+ *Proof.* By Lemma 7, we need only consider $n$ such that
239
+
240
+ $$M = n + 4j \quad \text{for some } M \in T \text{ and some } j \text{ with } 0 \le j \le (k-2)/2.$$
241
+
242
+ Then we find that
243
+
244
+ $$ (2.3) \qquad 2k+29 \le n \le M \le n+2(k-2). $$
245
+
246
+ Let $p$ be the largest prime $\le M$ and $q$ the smallest prime $\ge \max\{2k+29, M\}$. If $p \ge n$, then (2.3) implies that $p$ divides $\Delta_2$. If $p < n$ and $q \le p+2k$, then $q$ divides $\Delta_2$. Thus for any $k \ge (q-p)/2$, the product $\Delta_2$ is divisible by either $p$ or $q$. So the assertion of the lemma is true provided $p \ge 2k+29$. Thus we may assume that either
247
+
248
+ $$ k < \frac{q-p}{2} \quad \text{or} \quad k > \frac{p-29}{2}. $$
249
+
250
+ Combining with (2.3), we have
251
+
252
+ $$ (2.4) \qquad \begin{array}{l} \max\{2k+29, M-2(k-2)\} \le n \le M, \\ k < \min\left\{31, \frac{q-p}{2}\right\} \quad \text{or} \quad \frac{p-29}{2} < k \le 30. \end{array} $$
253
+
254
+ Thus for each $M \in T$, we check for the finitely many values of $(n,k)$ in (2.4) whether $P(\Delta) \ge 2k+29$. We illustrate the above procedure with an example. Let $M = 243$. Then $p=241$ and $q=251$. Hence
255
+
256
+ $$ k < 5 \quad \text{and} \quad 239 \le n \le 243, \quad n \text{ odd.} $$
257
+
258
+ In these cases we check directly that $P(\Delta) \ge 2k+29$, the only exception being $(n,k) = (243,3)$. By the above procedure we find only the 10 exceptions listed in the statement of the lemma. $\blacksquare$
259
+
260
+ Finally we show
261
+
262
+ LEMMA 9. Let $3 \le k \le 30$ and $n \ge 2k+a$ with $1 \le a < 29$, $a$ odd.
263
+ Assume that $\Delta_2$ is not equal to any of the ten products in Lemma 8. Then
264
+
265
+ $$ P(\Delta_2) > 2k + a $$
266
+
267
+ except when $(n,k,a) \in \{(23,3,17), (31,3,25)\}$.
268
+
269
+ *Proof.* By Lemmas 7 and 8, we need to check the assertion only when
270
+
271
+ $$ 2k + a \le n < 2k + 29, \quad 1 \le a < 29, \quad a \text{ odd}, \quad 3 \le k \le 30, $$
272
+
273
+ which is done by direct computation. $\blacksquare$
274
+
275
+ For each odd $a \in [1,27]$, let $T(a)$ be the set of $M \in T$ for which
276
+ $P(M(M+2)) \le a + 4 \le M$. For example, when $a = 1$, $T(a) = \{25\}$;
277
+ when $a = 3$, $T(a) = \{7,25,243\}$. Let $k$ be given and $n \ge 2k + a$ with
278
+ ---PAGE_BREAK---
279
+
280
+ $1 \le a < 29$, $a$ odd. We denote by $a^*$ the smallest $a$ such that
281
+
282
+ $$P(\Delta_2) \le 2k + a.$$
283
+
284
+ With the above notation, we combine Lemmas 6 and 9 to obtain the following theorem.
285
+
286
+ **THEOREM 8.** Let $k \ge 2$ and $n \ge 2k+a$ with $1 \le a < 29$, $a$ odd. Then
287
+
288
+ $$ (2.5) \qquad P(\Delta_2) > 2k + a $$
289
+
290
+ except for the following values of $n, k$ and $a$:
291
+
292
+ $$ k = 2, 1 \le a < 29 \text{ with } n \in T(a), $$
293
+
294
+ $$ k = 3, (n, a) = (23, 17), (31, 25), $$
295
+
296
+ $$ (2.6) \qquad \begin{aligned} k = 3, (n, a^*) &= (91, 25), (115, 17), (117, 11), (143, 23), (243, 13), \\ & \phantom{k=3,} (341, 25), (525, 25), (663, 23), (2871, 23), \end{aligned} $$
297
+
298
+ $$ k = 4, (n, a^*) = (115, 15). $$
299
+
300
+ *Proof.* Let $k \ge 3$. Suppose $(n, k, a) \in \{(23, 3, 17), (31, 3, 25)\}$. Then $P(\Delta_2) \le 2k+a$ and these exceptions are listed in (2.6). Now assume that $(n, k, a) \notin \{(23, 3, 17), (31, 3, 25)\}$. Then by Lemmas 6 and 9 we find that $P(\Delta_2) > 2k+a$ except possibly when $(n,k)$ equals any of the ten pairs in Lemma 8. Let us take $(n,k) = (91,3)$. Then $P(\Delta_2) = 31 > 2k+a$ except when $a=25,27$. Thus $a^*=25$. Similarly $a^*$ for other pairs in Lemma 8 are found and listed in (2.6).
301
+
302
+ Now we take $k=2$. Then
303
+
304
+ $$ P(n(n+2)) \ge 37 > 2k+a $$
305
+
306
+ for all $n$ except those $n=M$ listed in $T$. For any given odd $a$, $1 \le a < 29$, by our notation $T(a)$ denotes the values of $n \ge 2k+a$ for which (2.5) does not hold. Hence $T(a)$ gives the set of exceptional values of $n$. This proves the theorem. $\blacksquare$
307
+
308
+ **3. Newton polygons.** As mentioned in the Introduction, a result of Dumas [3], from 1906, led Filaseta [6] to Lemma 1. Filaseta also remarks in [6] that this lemma may be strengthened by only requiring that $p$ not divide $a_0a_m$ in place of the condition that $|a_0| = |a_m| = 1$; we make use of this stronger version of the lemma here. In [11], Shorey and Tijdeman gave a refinement of Lemma 1 using the notion of Newton function. Let $f$ be any polynomial of degree $n$ in $\mathbb{Z}[x]$. The *Newton function* $Nf_p(x)$ with respect to a prime $p$ is a real valued function on the interval $[0,n]$ which has the Newton polygon of $f$ with respect to $p$ as its graph. We shall give below a slightly modified version of their lemma.
309
+ ---PAGE_BREAK---
310
+
311
+ LEMMA 10. Let $k$ and $l$ be integers with $k > l \ge 0$. Suppose $u(x) = \sum_{j=0}^n b_j x^j \in \mathbb{Z}[x]$ and $p$ is a prime such that $p \nmid b_n$ and $p | b_j$ for all $j \in \{0, 1, \dots, n-l-1\}$. Let $a_0, a_1, \dots, a_n$ be integers with $p \nmid a_0 a_n$. Put $v(x) = \sum_{j=0}^n a_j b_j x^j$. Then for any factor $h(x)$ of $v(x)$ having degree $k > l$, we have
312
+
313
+ $$Nu_p(y) \le Nh_p(y), \quad Nu_p(n) - Nu_p(n-k+y) \ge Nh_p(k) - Nh_p(y)$$
314
+
315
+ for any $y \in [0, k]$ where $Nh_p(k)$ is a positive integer.
316
+
317
+ *Proof.* Suppose
318
+
319
+ $$v(x) = h(x)w(x)$$
320
+
321
+ with $\deg h(x) = k > l$. If the leftmost edge of the Newton polygon of $v(x)$ is of slope 0, then its $x$-length is $\le l$. Since $h(x)$ is of degree $k > l$, the Newton polygon of $h(x)$ has at least one edge of non-zero slope. Thus $Nh_p(k)$ is a positive integer.
322
+
323
+ From the hypothesis it is clear that
324
+
325
+ $$ (3.1) \qquad \begin{cases} Nu_p(0) = Nv_p(0) = 0, & Nu_p(n) = Nv_p(n), \\ Nu_p(x) \le Nv_p(x) & \text{for } x \in (0, n). \end{cases} $$
326
+
327
+ Further, by Dumas' theorem, we have
328
+
329
+ $$ (3.2) \qquad \begin{cases} Nv_p(y) \le Nh_p(y) & \text{for } y \in [0, k], \\ Nv_p(n) - Nv_p(n-k+y) \le Nu_p(n) - Nu_p(n-k+y) & \text{for } y \in [0, k]. \end{cases} $$
330
+
331
+ Let us translate parallel to *xy*-axes the Newton polygon of *h* defined on the interval $[0, k]$ so that the point $(k, Nh_p(k))$ coincides with $(n, Nv_p(n))$. Then the origin is shifted to $(n-k, Nv_p(n)-Nh_p(k))$ and any $(x, Nh_p(x))$ for $x \in [0, k]$ goes to $(n-k+x, Nh_p(x)+Nv_p(n)-Nh_p(k))$. Thus the shifted Newton polygon of *h* goes from $(n-k, Nv_p(n)-Nh_p(k))$ to $(n, Nv_p(n))$ and it lies on or above the Newton polygon of *v* in the interval $[n-k, n]$, by Dumas' theorem. Hence for any $x \in [0, k]$, we have
332
+
333
+ $$Nh_p(x) + Nv_p(n) - Nh_p(k) \ge Nv_p(n-k+x)$$
334
+
335
+ or
336
+
337
+ $$Nv_p(n) - Nv_p(n-k+x) \ge Nh_p(k) - Nh_p(x).$$
338
+
339
+ Thus from (3.1) and (3.2) we get the assertion of the lemma. $\blacksquare$
340
+
341
+ Note that Lemma 1 follows from the above lemma, since, when the last edge of the Newton polygon of *g* has slope $< 1/k$, then $Ng_p(n) - Ng_p(n-k) < 1$, by taking $u = g$ in the above lemma, from which we get $Nh_p(k) = 0$, a contradiction.
342
+
343
+ LEMMA 11. Let $a, m$ and $k$ be positive integers with an odd and $k \le m/2$. Assume that there exists a prime $p > 2k + a$ dividing
344
+
345
+ $$(a+2(m-k))\cdots(a+2(m-1)).$$
346
+ ---PAGE_BREAK---
347
+
348
+ Then the polynomial
349
+
350
+ $$F(x) = x^m + (a + 2(m-1))x^{m-1} + \cdots + (a + 2(m-1))(a+2(m-2))\cdots a$$
351
+
352
+ has no factor of degree $k$. Also the polynomial
353
+
354
+ $$G(x) = a_m x^m + a_{m-1}(a+2(m-1))x^{m-1} + \cdots + a_0(a+2(m-1))(a+2(m-2))\cdots a$$
355
+
356
+ has no factor of degree $k$, where $a_0, \dots, a_m$ are integers with $p \nmid a_0 a_m$.
357
+
358
+ *Proof.* Write $F(x) = x^m + c_{m-1}x^{m-1} + \cdots + c_1x + c_0$ and $G(x) = a_m x^m + a_{m-1}c_{m-1}x^{m-1} + \cdots + a_1c_1x + a_0c_0$, where
359
+
360
+ (3.3) $\quad c_j = (a+2j)\cdots(a+2(m-1))$ for $0 \le j < m$ and $c_m = 1$.
361
+
362
+ By assumption, there exists a prime $p > 2k + a$ dividing $c_0, c_1, \ldots, c_{m-k}$.
363
+ From Lemma 1 we see that it suffices to show that the slope of the rightmost
364
+ edge of the Newton polygon of $F(x)$ with respect to the prime $p$ is $< 1/k$.
365
+ The slope of the rightmost edge is
366
+
367
+ $$ (3.4) \qquad L = \max_{1 \le j \le m} \left\{ \frac{\nu_p(c_0) - \nu_p(c_j)}{j} \right\}. $$
368
+
369
+ Therefore $L$ equals
370
+
371
+ $$ \nu_p(a(a+2)\cdots(a+2(j-1))/j \quad \text{for some } 1 \le j \le m. $$
372
+
373
+ Let $j \le k$. Since $p > 2k + a$, we see that
374
+
375
+ $$ \nu_p(a(a+2)\cdots(a+2(j-1))/j = 0 < 1/k. $$
376
+
377
+ Let $j > k$. Since $p \ge 2k + a + 2$, we have
378
+
379
+ $$ \nu_p(a(a+2)\cdots(a+2(j-1))) \le \nu_p((a+2j)!) < \frac{a+2j}{p-1} \le \frac{a+2j}{a+1+2k} < \frac{j}{k}. $$
380
+
381
+ Thus, $L < 1/k$, as desired, completing the proof. $\blacksquare$
382
+
383
+ *Proof of Theorem 4.* The assertion is a direct consequence of Lemmas 5 and 11. $\blacksquare$
384
+
385
+ We combine Theorem 8 and Lemma 11 to obtain the following theorem.
386
+
387
+ **THEOREM 9.** Let $a \ge 1$ be an odd integer less than 29, and let $m$ be any integer $\ge 2$. Assume that $2m \ne n + 2k - a$ for any $(n,k,a)$ given by (2.6). Then the polynomial $F(x)$ in Lemma 11 has no factor of degree $\ge 2$. Further the polynomial $G(x)$ also has no factor of degree $\ge 2$ provided $a_0$ and $a_m$ are composed of primes $\le a + 4$.
388
+
389
+ **4. Linear factors of $F(x)$ and $G(x)$.** In this section we deal with the linear factors of $F(x)$ and $G(x)$. Again using Lemma 11, we show
390
+ ---PAGE_BREAK---
391
+
392
+ LEMMA 12. Let $a \ge 1$ be an odd integer and let $m \ge 2$. Assume that there exists a prime $p$ such that
393
+
394
+ $$p \nmid a, \quad p \mid (a+2(m-1)), \quad p^{1-1/(p-1)} \ge (a+2)^{1/2}.$$
395
+
396
+ Then the polynomial $F(x)$ has no linear factor. Also the polynomial $G(x)$
397
+ has no linear factor if such a prime $p$ does not divide $a_0a_m$.
398
+
399
+ *Proof*. Since *p* divides *a* + 2(*m* − 1), we see that *p* | *c*<sub>*j*</sub> for 0 ≤ *j* < *m* where c_j is given by (3.3). Further we require that L < 1 where L is given by (3.4). Thus we need
400
+
401
+ $$
402
+ (4.1) \qquad \nu_p(a(a+2)\cdots(a+2(j-1))) < j \quad \text{for } 1 \le j < m.
403
+ $$
404
+
405
+ Note that $\nu_p(a) = 0$. Hence we may assume that $j > 1$. Also we may suppose
406
+ that $p \le a + 2(j-1)$, otherwise (4.1) holds since the left hand side of the
407
+ inequality is 0. Suppose $p^\alpha \le a + 2(j-1) < p^{\alpha+1}$. Then by taking blocks of
408
+ $p, p^2, \dots, p^\alpha$ successive terms we see that
409
+
410
+ $$
411
+ \begin{align*}
412
+ \nu_p(a(a+2)\cdots(a+2(j-1))) &\le \left(\left\lfloor \frac{j}{p} \right\rfloor + 1\right) + \cdots + \left(\left\lfloor \frac{j}{p^\alpha} \right\rfloor + 1\right) \\
413
+ &< \frac{j}{p-1} + \alpha \le \frac{j}{p-1} + \frac{\log(a+2(j-1))}{\log p}.
414
+ \end{align*}
415
+ $$
416
+
417
+ Thus (4.1) is valid if
418
+
419
+ $$
420
+ p^{j(1-\frac{1}{p-1})} \geq a + 2(j-1),
421
+ $$
422
+
423
+ which is true for $j \ge 2$ by the assumption on $p$. Now the result follows by
424
+ Lemma 1. $\blacksquare$
425
+
426
+ **5. Proof of Theorem 5.** Suppose $g(x)$ has a factor of degree $k$ with $2 \le k \le m/2$. By Theorem 9 we need only consider the exceptional values given in (2.6) with $n = M = a+2(m-k) \ge a+2k$. To exclude these cases, we use Lemma 1. This requires finding a suitable prime so that
427
+
428
+ $$
429
+ (5.1) \qquad p \mid c_0, \dots, c_{m-k} \quad \text{and} \quad L < 1/k
430
+ $$
431
+
432
+ where *L* is given by (3.4). For this, we use the following procedure.
433
+
434
+ Let $p$ be a prime such that
435
+
436
+ $$
437
+ (5.2) \qquad p | c_{m-k}
438
+ $$
439
+
440
+ and $p \mid (a + 2(m - \delta))$ with $\delta \ge 1$ chosen as small as possible.
441
+
442
+ Suppose $a+2h$ is the least integer in $\{a, a+2, \dots, a+2(m-1)\}$ divisible by $p$. Let $\nu_p(a+2(h+ip)) = \theta_i$ for $0 \le i \le r = (m-\delta-h)/p$. Let
443
+
444
+ $$
445
+ L' = \max \left\{ \frac{\theta_0}{h+1}, \frac{\theta_0 + \theta_1}{h+p+1}, \dots, \frac{\theta_0 + \dots + \theta_r}{h+rp+1} \right\}.
446
+ $$
447
+ ---PAGE_BREAK---
448
+
449
+ Then we see that
450
+
451
+ $$
452
+ \begin{align*}
453
+ \nu_p(c_0) &= \dots = \nu_p(c_h), \\
454
+ \nu_p(c_{h+1}) &= \dots = \nu_p(c_{h+p}) = \nu_p(c_0) - \theta_0, \\
455
+ &\vdots \\
456
+ \nu_p(c_{h+(r-1)p+1}) &= \dots = \nu_p(c_{h+rp}) = \nu_p(c_0) - \theta_0 - \dots - \theta_{r-1}, \\
457
+ \nu_p(c_{h+rp+1}) &= \dots = \nu_p(c_{m-1}) = \nu_p(c_0) - \theta_0 - \dots - \theta_r.
458
+ \end{align*}
459
+ $$
460
+
461
+ Since $\theta_r > 0$, we have $L' > 0$. Thus by definition, $L = L'$.
462
+
463
+ Assume that
464
+
465
+ $$
466
+ (5.3) \quad \frac{\theta_0}{h+1} < \frac{1}{k} \quad \text{and} \quad \theta_i < \frac{p}{k} \quad \text{for } 1 \le i \le r.
467
+ $$
468
+
469
+ Then
470
+
471
+ $$
472
+ \begin{align*}
473
+ \frac{\theta_0 + \theta_1}{h+p+1} &= \frac{\theta_0}{h+1} \frac{h+1}{h+p+1} + \frac{\theta_1}{h+p+1} \\
474
+ &< \frac{h+1}{k(h+p+1)} + \frac{p}{k(h+p+1)} = \frac{1}{k}
475
+ \end{align*}
476
+ $$
477
+
478
+ and by induction, we see that
479
+
480
+ $$
481
+ \frac{\theta_0 + \cdots + \theta_s}{h + sp + 1} < \frac{1}{k} \quad \text{for } 1 < s \le r.
482
+ $$
483
+
484
+ Thus $L < 1/k$, which is required in (5.1). Thus we need only satisfy (5.3).
485
+ Since
486
+
487
+ $$
488
+ p^{\theta_i} \le a + 2(h + ip) \le a + 2(m - 1) = M + 2(k - 1),
489
+ $$
490
+
491
+ condition (5.3) is satisfied if
492
+
493
+ $$
494
+ (5.4) \qquad \frac{\theta_0}{h+1} < \frac{1}{k} \quad \text{and} \quad M+2(k-1) < p^{p/k}.
495
+ $$
496
+
497
+ Thus we need only choose a prime *p* satisfying (5.2) and (5.4). In Tables 2 and 3, we give a choice of *p* for most of the values of (*M*; *a*) listed in (2.6). Note that the choice of *p* is not unique. When *k* = 4, we have (*M*; *a*) = (115; 15–27). We exclude this case by taking *p* = 13 so that *a* + 2*h* = 39 giving *6* ≤ *h* ≤ 12 and *θ*₀ = 1, which satisfy (5.2) and (5.4).
498
+
499
+ Table 2 (k = 3)
500
+
501
+ <table>
502
+ <thead>
503
+ <tr>
504
+ <td>
505
+ p
506
+ </td>
507
+ <td>
508
+ (M; a)
509
+ </td>
510
+ </tr>
511
+ </thead>
512
+ <tbody>
513
+ <tr>
514
+ <td>
515
+ 7
516
+ </td>
517
+ <td>
518
+ (31; 25)
519
+ </td>
520
+ </tr>
521
+ <tr>
522
+ <td>
523
+ 11
524
+ </td>
525
+ <td>
526
+ (117; 13), (143; 23–27), (341; 25–27), (2871; 23–27)
527
+ </td>
528
+ </tr>
529
+ <tr>
530
+ <td>
531
+ 13
532
+ </td>
533
+ <td>
534
+ (91; 25–27), (115; 17–27), (117; 15–27), (243; 15–27), (663; 23–27)
535
+ </td>
536
+ </tr>
537
+ <tr>
538
+ <td>
539
+ 17
540
+ </td>
541
+ <td>
542
+ (117; 11), (525; 25–27)
543
+ </td>
544
+ </tr>
545
+ <tr>
546
+ <td>
547
+ 19
548
+ </td>
549
+ <td>
550
+ (243; 13)
551
+ </td>
552
+ </tr>
553
+ <tr>
554
+ <td>
555
+ 23
556
+ </td>
557
+ <td>
558
+ (23; 17)
559
+ </td>
560
+ </tr>
561
+ </tbody>
562
+ </table>
563
+ ---PAGE_BREAK---
564
+
565
+ **Table 3** ($k=2$)
566
+
567
+ <table><thead><tr><th>p</th><th>(M; a)</th></tr></thead><tbody><tr><td>5</td><td>(5; 1), (13; 9), (25; 1, 7, 9, 11, 17)</td></tr><tr><td>7</td><td>(7; 3), (19; 15), (33; 9–17), (49; 13–17), (63; 9–17), (75; 9–17), (117; 13–17), (133; 15–17, 23–27), (145; 25–27), (243; 3, 9–17, 23–27), (273; 9–17), (343; 23–27), (845; 9–17), (1125; 23–27)</td></tr><tr><td>11</td><td>(31; 27), (33; 7, 19–27), (55; 15–27), (75; 7, 19–27), (119; 13–27), (143; 25–27), (187; 13–27), (207; 19–27), (253; 19–27), (273; 19–27), (297; 19–27), (341; 27), (361; 15–27), (493; 25–27), (625; 15–27), (713; 27), (845; 19–27), (1309; 19–27), (1375; 13–27), (1573; 13–27), (1615; 15–27), (2275; 19–27), (2695; 27), (2871; 25–27), (3969; 15–27), (4123; 27), (5423; 27), (6875; 19–27), (7161; 27), (9315; 19–27), (16443; 25–27), (18513; 19–27), (19435; 27), (24563; 25–27), (41325; 25–27), (41743; 19–27), (45617; 25–27), (57475; 19–27), (86273; 27), (87723; 25–27), (130975; 27), (184875; 25–27), (203203; 27)</td></tr><tr><td>13</td><td>(63; 19–27), (91; 27), (115; 19–27), (117; 19–27), (169; 15–27), (245; 15–27), (323; 15–27), (375; 25–27), (403; 27), (663; 15–27), (897; 27), (1519; 27), (1573; 9), (2873; 19–27), (3211; 15–27), (3625; 27), (3703; 19–27), (8073; 19–27), (9945; 25–27), (12673; 25–27), (22475; 27), (1128125; 19–27), (1447873; 19–27)</td></tr><tr><td>17</td><td>(49; 19–27), (85; 25–27), (153; 27), (525; 27), (527; 27), (1953; 27), (8379; 25–27), (14875; 25–27), (2509045; 27), (3322053; 27)</td></tr><tr><td>19</td><td>(93; 27), (133; 21), (435; 25–27), (665; 25–27), (1083; 27), (2185; 21–27), (287080365; 27)</td></tr><tr><td>23</td><td>(23; 19), (343; 19), (1125; 19), (2185; 19)</td></tr></tbody></table>
568
+
569
+ For the choices of $p$ given in Tables 2 and 3, conditions (5.2) and (5.4) are satisfied and thus all these values are excluded. When $k=2$ and $(M;a) = (243; 7)$, $(1573; 11)$, we take $p=5, 7$, respectively, and compute $L'$ to get $L < 1/k$. Hence these cases are also excluded. Thus all values of $(M;a)$ given in (2.6) are excluded except when
570
+
571
+ $$
572
+ (5.5) \quad (M;a) \in \{(25\text{};3\text{,}5\text{,}13\text{,}15\text{,}19\text{,}21)\text{, } (133\text{};19)\text{, } (243\text{};5\text{,}19\text{,}21)\text{,}\\
573
+ \phantom{(5.5) \quad} (343\text{};21)\text{, } (1125\text{};21)\}.
574
+ $$
575
+
576
+ Next we illustrate the application of Lemma 10 with an example. Let $(M;a) = (25\text{;}3)$. Then $m=13$ and $u(x) = x^{13} + 27x^{12} + \dots + 3 \cdot 5 \cdot \dots \cdot 27$. The vertices of the Newton polygon of $u(x)$ with respect to the prime $p=3$ are
577
+
578
+ $$
579
+ (0,0) - (9,5) - (12,7) - (13,8).
580
+ $$
581
+
582
+ By Lemma 10, any quadratic factor $h(x)$ satisfies $Nh_3(2) \ge Nu_3(2) = 10/9$
583
+ ---PAGE_BREAK---
584
+
585
+ implying $Nh_3(2) \ge 2$. Also we have
586
+
587
+ $$Nu_3(13) - Nu_3(11) = 8 - 19/3 = 5/3 \ge Nh_3(2),$$
588
+
589
+ which is a contradiction. Thus the case $(M;a) = (25;3)$ is excluded. Below we give the values of $(M;a, p)$ together with the vertices of the corresponding Newton polygon which are excluded by Lemma 10.
590
+
591
+ $$ (25; 5, 3): \quad (0,0) - (9,5) - (12,7), $$
592
+
593
+ $$ (25; 13, 3): \quad (0,0) - (8,5), $$
594
+
595
+ $$ (25; 15, 3): \quad (0,0) - (6,4) - (7,5), $$
596
+
597
+ $$ (25; 19, 3): \quad (0,0) - (5,4). $$
598
+
599
+ As already noted, in the case $(M;a) = (25;21)$ there are reducible polynomials. Thus we are left with six undecided cases in (5.5).
600
+
601
+ In these cases including $(M;a) = (25;21)$ we check directly with MATH-EMATICA that the resulting polynomials $f(x)$ do not factor. Thus $f(x)$ has no factors of degree $\ge 2$. This completes the proof of the theorem. $\blacksquare$
602
+
603
+ **Acknowledgments.** The authors would like to thank Professor Michael Filaseta for many helpful discussions. The second author also wishes to thank him for his kind hospitality during her visit to the University of South Carolina in May-June, 2007. We thank Professors T. N. Shorey and R. Tijdeman for providing us with the preprint of their paper [11]. We also owe our sincere thanks to the referee for his/her helpful comments and for pointing out a rectifiable error in the earlier version of the paper.
604
+
605
+ **References**
606
+
607
+ [1] M. Allen and M. Filaseta, *A generalization of a second irreducibility theorem of I. Schur*, Acta Arith. 109 (2003), 65–79.
608
+
609
+ [2] —, —, *A generalization of a third irreducibility theorem of I. Schur*, ibid. 114 (2004), 183–197.
610
+
611
+ [3] G. Dumas, *Sur quelques cas d'irréductibilité des polynômes à coefficients rationnels*, J. Math. Pures Appl. 2 (1906), 191–258.
612
+
613
+ [4] P. Dusart, *Autour de la fonction qui compte le nombre de nombres premiers*, Ph.D. thesis, Université de Limoges, 1998.
614
+
615
+ [5] E. F. Ecklund, Jr., R. B. Eggleton, P. Erdős and J. L. Selfridge, *On the prime factorization of binomial coefficients*, J. Austral. Math. Soc. Ser. A 26 (1978), 257–269.
616
+
617
+ [6] M. Filaseta, *The irreducibility of all but finitely many Bessel polynomials*, Acta Math. 174 (1995), 383–397.
618
+
619
+ [7] —, *A generalization of an irreducibility theorem of I. Schur*, in: Analytic Number Theory, Proc. Internat. Conf. in Honor of Heini Halberstam, Vol. 1, B. C. Berndt, H. G. Diamond and A. J. Hildebrand (eds.), Birkhäuser, Boston, 1996, 371–395.
620
+
621
+ [8] D. H. Lehmer, *On a problem of Störmer*, Illinois J. Math. 8 (1964), 57–79.
622
+ ---PAGE_BREAK---
623
+
624
+ [9] N. Saradha and T. N. Shorey, *Almost perfect powers in arithmetic progression*, Acta Arith. 99 (2001), 363–388.
625
+
626
+ [10] I. Schur, *Einige Sätze über Primzahlen mit Anwendungen auf Irreduzibilitätsfragen, I, II*, Sitzungsber. Preuss. Akad. Wiss. Berlin Phys.-Math. Kl. 1929, 125–136, 370–391.
627
+
628
+ [11] T. N. Shorey and R. Tijdeman, *Generalizations of some irreducibility results by Schur*, preprint.
629
+
630
+ Carrie E. Finch
631
+ Mathematics Department
632
+ Washington and Lee University
633
+ Lexington, VA 24450, U.S.A.
634
+ E-mail: finchc@wlu.edu
635
+
636
+ N. Saradha
637
+ School of Mathematics
638
+ Tata Institute of Fundamental Research
639
+ Homi Bhabha Road
640
+ Mumbai, 400 005, India
641
+ E-mail: saradha@math.tifr.res.in
642
+
643
+ *Received on 24.5.2008*
644
+ *and in revised form on 13.1.2010*
645
+
646
+ (5712)
samples/texts_merged/4971236.md ADDED
@@ -0,0 +1,562 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # Parameter Estimation of Bernoulli Distribution using Maximum Likelihood and Bayesian Methods
5
+
6
+ Nurmaita Hamsyiah¹), Khoirin Nisa¹), & Warsono¹)
7
+
8
+ ¹) Department of Mathematics, Faculty of Mathematics and Science, University of Lampung
9
+ Jl. Prof. Dr. Sumantri Brojonegoro No. 1 Bandar Lampung
10
+ Phone Number +62 721 701609 Fax +62 721 702767
11
+ E-mail: itamath98@gmail.com
12
+
13
+ ## ABSTRACT
14
+
15
+ The term parameter estimation refers to the process of using sample data to estimate the parameters of the selected distribution. There are several methods that can be used to estimate distribution parameter(s). In this paper, the maximum likelihood and Bayesian methods are used for estimating parameter of Bernoulli distribution, i.e. θ, which is undefined as the probability of success event for two possible outcomes. The maximum likelihood and Bayesian estimators of Bernoulli parameter are derived, for the Bayesian estimator the Beta prior is used. The analytical calculation shows that maximum likelihood estimator is unbiased while Bayesian estimator is asymptotically unbiased. However, empirical analysis by Monte Carlo simulation shows that the mean square errors (MSE) of the Bayesian estimator are smaller than maximum likelihood estimator for large sample sizes.
16
+
17
+ **Keywords:** Bernoulli distribution, beta distribution, conjugate prior, parameter estimation.
18
+
19
+ ## 1. PENDAHULUAN
20
+
21
+ Parameter estimation is a way to predict the characteristics of a population based on the sample taken. In general, parameter estimation is classified into two types, namely point estimation and interval estimation. The point estimation of a parameter is a value obtained from the sample and is used as a parameter estimator whose value is unknown.
22
+
23
+ Several point estimation methods are used to calculate the estimator, such as moment method, maximum likelihood method, and Bayesian method. The moment method predicts the parameters by equating the values of sample moments to the population moment and solving the resulting equation system [1]. The maximum likelihood (ML) method uses differential calculus to determine the maximum of the likelihood function to obtain the parameters estimates. The Bayesian method differs from the traditional methods by introducing a frequency function for the parameter being estimated namely prior distribution. The Bayesian method combines the prior distribution and sample distribution. The prior distribution is the initial distribution that provides information about the parameters. The sample distribution combined with the prior distribution provides a new distribution i.e. the posterior distribution that expresses a degree of confidence regarding the location of the parameters after the sample is observed [2].
24
+
25
+ Researches on parameter estimation using various methods of various distributions have been done, for example: Bayesian estimation of exponential distribution [3], [4], ML and Bayesian estimations of Poisson distribution [5], Bayesian estimation of Poisson-Exponential distribution [6], and Bayesian estimation of Rayleigh distribution [7].
26
+
27
+ The difference between the ML and the Bayesian methods is that the ML method considers that the parameter is
28
+ ---PAGE_BREAK---
29
+
30
+ an unknown quantity of fixed value and the inference is based only on the information in the sample; while the Bayesian method considers the parameter as a variable that describes the initial knowledge of the parameters before the observation is performed and expressed in a distribution called the prior distribution. After the observation is performed, the information in the prior distribution is combined with the sample data information through Bayesian theorem, and the result is expressed in a distribution form called the posterior distribution, which further becomes the basis for inference in the Bayesian method [8].
31
+
32
+ The Bayesian method has advantages over other methods, one of which is the Bayesian method can be used for drawing conclusions in complicated or extreme cases that cannot be handled by other methods, such as in complex hierarchical models. In addition, if the prior information does not indicate complete and clear information about the distribution of the prior, appropriate assumptions may be given to its distribution characteristics. Thus, if the prior distribution can be determined, then a posterior distribution can be obtained which may require mathematical computation [8].
33
+
34
+ This paper examines the parameter estimation of Bernoulli distribution using ML and Bayesian methods. A review of Bernoulli distribution and Beta distribution is presented in Section 2. The research methodology is described in Section 3. Section 4 provides the results and discussion. Finally, the conclusion is given in Section 5.
35
+
36
+ # 2. THEORETICAL FRAMEWORK
37
+
38
+ ## 2.1 Bernoulli Distribution
39
+
40
+ Bernoulli distribution was introduced by Swiss mathematician Jacob Bernoulli (1654-1705). It is the probability distribution resulting from two outcomes or events in a given experiment, i.e. success ($X = 1$) and fail ($X = 0$), with the probability of the success is $\theta$ and the probability of failure is $1 - \theta$.
41
+
42
+ *Definition*
43
+
44
+ A random variable X is called a Bernoulli random variable (or X is Bernoulli distributed) if and only if its probability distribution is given by
45
+
46
+ $$f(x; \theta) = \theta^x (1-\theta)^{1-x}, \text{ for } x = 0,1.$$
47
+
48
+ *Proposition 1*
49
+
50
+ Bernoulli distribution $f(x; \theta)$ has mean and variance as follows:
51
+
52
+ $$\mu = \theta \text{ and } \sigma^2 = \theta(1 - \theta).$$
53
+
54
+ *Proof:*
55
+
56
+ The mean of Bernoulli random variable X is
57
+ ---PAGE_BREAK---
58
+
59
+ $$
60
+ \begin{align*}
61
+ \mu &= E(X) \\
62
+ &= \sum_{x=0}^{1} x f(x; \theta) \\
63
+ &= \sum_{x=0}^{x=1} x \theta^x (1 - \theta)^{1-x} \\
64
+ &= 0 \cdot \theta (1 - \theta)^{1-0} + 1 \cdot (1 - \theta)^{1-1} = \theta.
65
+ \end{align*}
66
+ $$
67
+
68
+ The variance, i.e. $\sigma^2 = E(X - \mu)^2 = E(X^2) - [E(X)]^2$, of Bernoulli distribution is obtained as follows:
69
+
70
+ $$
71
+ \begin{align*}
72
+ E(X^2) &= \sum_{x=0}^{1} x^2 f(x; \theta) \\
73
+ &= \sum_{x=0}^{1} x^2 f(x; \theta) \\
74
+ &= \sum_{x=0}^{1} x^2 \theta (1 - \theta)^{1-x} \\
75
+ &= 0^2 \cdot \theta^0 (1 - \theta)^{1-0} + 1^2 \cdot (1 - \theta)^{1-1} = \theta.
76
+ \end{align*}
77
+ $$
78
+
79
+ Then,
80
+
81
+ $$
82
+ \sigma^2 = E(X - \mu)^2 = \theta - \theta^2 = \theta(1 - \theta).
83
+ $$
84
+
85
+ ## 2.2. Beta Distribution
86
+
87
+ ### Definition
88
+
89
+ A random variable X is called a betarandom variable with parameters a and b if the density function of X is given by
90
+
91
+ $$
92
+ f(x) = \begin{cases} \frac{1}{B(a,b)} x^{a-1} (1-x)^{b-1}, & 0 < x < 1 \\ 0, & \text{lainnya} \end{cases}
93
+ $$
94
+
95
+ where $B(a, b)$ is betafunction defined as
96
+
97
+ $$
98
+ B(a, b) = \int_{0}^{1} x^{a-1} (1-x)^{b-1} dx ; a > 0, b > 0. \quad (1)
99
+ $$
100
+
101
+ ### Proposition 2
102
+
103
+ The beta function and gamma function is connected by
104
+
105
+ $$
106
+ B(a, b) = \frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)}. \qquad (2)
107
+ $$
108
+
109
+ Proof :
110
+
111
+ $$
112
+ \mathbb{I}(a)\mathbb{I}(b) = \int_{x=0}^{\infty} x^{a-1} e^{-x} dx \cdot \int_{y=0}^{\infty} y^{b-1} e^{-y} dy
113
+ $$
114
+ ---PAGE_BREAK---
115
+
116
+ $$ = \int_{y=0}^{\infty} \int_{x=0}^{\infty} x^{a-1} y^{b-1} e^{-x-y} dxdy. $$
117
+
118
+ Let $f(z,t) = z$ and $y = g(z,t) = z(1-t)$,
119
+
120
+ $$
121
+ \begin{aligned}
122
+ \Gamma(a)\Gamma(b) &= \int_{z=0}^{\infty} \int_{t=0}^{1} (zt)^{a-1}[z(1-t)]^{b-1}e^{-z} |J(z,t)| dt dz \\
123
+ &= \int_{z=0}^{\infty} \int_{t=0}^{1} (zt)^{a-1}[z(1-t)]^{b-1}e^{-z} zdtdz \\
124
+ &= \int_{z=0}^{\infty} \int_{t=0}^{1} z^{a-1+b-1+1}e^{-z} t^{a-1}(1-t)^{b-1} dt dz \\
125
+ &= \int_{z=0}^{\infty} z^{a+b-1}e^{-z} dz \cdot \int_{t=0}^{1} t^{a-1}(1-t)^{b-1} dt \\
126
+ &= \Gamma(a+b)B(a,b).
127
+ \end{aligned}
128
+ $$
129
+
130
+ Then,
131
+
132
+ $$ B(a, b) = \frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)}. $$
133
+
134
+ Proposisi 3
135
+
136
+ The mean and variance of beta distribution with parameters *a* and *b* are
137
+
138
+ $$ \mu = \frac{a}{a+b} \quad \text{and} \quad \sigma^2 = \frac{ab}{(a+b+1)(a+b)^2}. $$
139
+
140
+ Proof:
141
+
142
+ The proposition can be proved by using the moment of beta distribution as follows:
143
+
144
+ $$
145
+ \begin{aligned}
146
+ E(X^n) &= \frac{1}{B(a, b)} \int_0^1 x^n x^{a-1} (1-x)^{b-1} dx \\
147
+ &= \frac{1}{B(a, b)} \int_0^1 x^{(a+n)-1} (1-x)^{b-1} dx.
148
+ \end{aligned}
149
+ $$
150
+
151
+ From equations (1) and (2) we obtain
152
+
153
+ $$
154
+ \begin{aligned}
155
+ E(X^n) &= \frac{B(a + n, b)}{B(a, b)} \\
156
+ &= \frac{\displaystyle\frac{\Gamma(a+n)\Gamma(b)}{\Gamma(a+b+n)}}{\displaystyle\frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)}}
157
+ \end{aligned}
158
+ $$
159
+ ---PAGE_BREAK---
160
+
161
+ $$
162
+ \begin{align*}
163
+ &= \frac{\Gamma(a+n)\Gamma(b)}{\Gamma(a+b+n)} \times \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} \\
164
+ &= \frac{\Gamma(a+n)\Gamma(a+b)}{\Gamma(a+b+n)\Gamma(a)}. \tag{3}
165
+ \end{align*}
166
+ $$
167
+
168
+ Thus the mean and variance of beta distribution will be obtained by substituting $n = 1$ and $n = 2$ to equation (3), then
169
+
170
+ $$
171
+ \begin{align*}
172
+ \text{Mean}(X) &= E(X^1) = \frac{\Gamma(a+1)\Gamma(a+b)}{\Gamma(a+b+1)\Gamma(a)} \\
173
+ &= \frac{a\Gamma(a)\Gamma(a+b)}{(a+b)\Gamma(a+b)\Gamma(a)} \\
174
+ &= \frac{a}{a+b}
175
+ \end{align*}
176
+ $$
177
+
178
+ and $Var(X) = \sigma^2 = E(X^2) - [E(X)]^2$.
179
+
180
+ Since
181
+
182
+ $$
183
+ \begin{align*}
184
+ E(X^2) &= \frac{\Gamma(a+2)\Gamma(a+b)}{\Gamma(a+b+2)\Gamma(a)} \\
185
+ &= \frac{(a+1)\Gamma(a+1)\Gamma(a+b)}{(a+b+1)\Gamma(a+b+1)\Gamma(a)} \\
186
+ &= \frac{(a+1)a\Gamma(a)\Gamma(a+b)}{(a+b+1)(a+b)\Gamma(a+b)\Gamma(a)} \\
187
+ &= \frac{(a+1)a}{(a+b+1)(a+b)},
188
+ \end{align*}
189
+ $$
190
+
191
+ then
192
+
193
+ $$
194
+ \begin{align*}
195
+ \mathrm{Var}(X) &= \frac{(a+1)a}{(a+b+1)(a+b)} - \left(\frac{a}{a+b}\right)^2 \\
196
+ &= \frac{(a+1)a}{(a+b+1)(a+b)} - \frac{a^2}{(a+b)^2} \\
197
+ &= \frac{(a+b)(a^2+a) - a^2(a+b+1)}{(a+b)^2(a+b+1)} \\
198
+ &= \frac{a^3 + a^2b + a^2 + ab - a^3 - a^2b - a^2}{(a+b)^2(a+b+1)} \\
199
+ &= \frac{ab}{(a+b)^2(a+b+1)}.
200
+ \end{align*}
201
+ $$
202
+
203
+ # 3. RESEARCH METHOD
204
+
205
+ The research method for estimating the parameter of Bernoulli distribution in this paper can be described as follows. For ML estimation, the parameter estimation is done by differentiating partially the log of the likelihood
206
+ ---PAGE_BREAK---
207
+
208
+ function and equation it by zero,
209
+
210
+ $$ \frac{\partial \ln L(\theta)}{\partial \theta} = 0 $$
211
+
212
+ to obtain ML estimator($\hat{\theta}_{ML}$). The second derivation assessment is performed to show that the resulted $\hat{\theta}$ truly maximize the likelihood function. For the Bayesian method, the parameter estimation is done through the following steps:
213
+
214
+ 1. Form the likelihood function of Bernoulli distribution as follows:
215
+
216
+ $$ L(x_1, x_2, \dots, x_n | \theta) = \prod_{i=1}^{n} f((x_i)|\theta). $$
217
+
218
+ 2. Calculate the joint probability distribution, which is obtained by multiplying the likelihood function and the prior distribution,
219
+
220
+ $$ H(x_1, x_2, \dots, x_n; \theta) = L(x_1, x_2, \dots, x_n | \theta) \cdot \pi(\theta). $$
221
+
222
+ 3. Calculate the marginal probability distribution function,
223
+
224
+ $$ p(x_1, x_2, \dots, x_n) = \int H(x_1, x_2, \dots, x_n; \theta) d\theta. $$
225
+
226
+ 4. Calculate the posterior distribution by dividing the joint probability distribution function by the marginal function,
227
+
228
+ $$ \pi(\theta|x_1, x_2, \dots, x_n) = \frac{H(x_1, x_2, \dots, x_n; \theta)}{p(x_1, x_2, \dots, x_n)} $$
229
+
230
+ The Bayesian parameter estimate of $\theta$ is then produced as the mean of the posterior distribution.
231
+
232
+ After the parameter estimate of $\theta$ is obtained by MLE and Bayesian methods, the evaluation of the estimators is performed by assessing their bias, variance, and mean square error.
233
+
234
+ # 4. RESULT AND DISCUSSION
235
+
236
+ ## 4.1. The ML Estimator of the Bernoulli Distribution Parameter ($\theta$)
237
+
238
+ Let $X_1, X_2, \dots, X_n$ be Bernoulli distributed random sample with $X_i \sim Bernoulli(\theta)$, where $\theta \in \Omega = (0,1)$. The probability function of $X_i$ is
239
+
240
+ $$ f(x_i; \theta) = \theta^{x_i} (1 - \theta)^{1-x_i} \text{with } x_i \in \{0,1\}. $$
241
+
242
+ The likelihood function of Bernoulli distribution is given by
243
+
244
+ $$
245
+ \begin{align*}
246
+ L(\theta) &= f(x_1, x_2, \dots, x_n; \theta) \\
247
+ &= \prod_{i=1}^{n} f(x_i; \theta) \\
248
+ &= \prod_{i=1}^{n} \theta^{x_i} (1 - \theta)^{1-x_i}
249
+ \end{align*}
250
+ $$
251
+ ---PAGE_BREAK---
252
+
253
+ $$
254
+ \begin{align*}
255
+ &= \theta^{x_1}(1-\theta)^{1-x_1} \cdot \theta^{x_2}(1-\theta)^{1-x_2} \cdots \theta^{x_n}(1-\theta)^{1-x_n} \\
256
+ &= \theta^{\sum_{i=1}^{n} x_i} (1-\theta)^{n-\sum_{i=1}^{n} x_i}. \tag{4}
257
+ \end{align*}
258
+ $$
259
+
260
+ The natural logarithm of the likelihood function is then
261
+
262
+ $$
263
+ \begin{align*}
264
+ \ln L(\theta) &= \ln[\theta^{\sum_{i=1}^{n} x_i} (1-\theta)^{n - \sum_{i=1}^{n} x_i}] \\
265
+ &= \ln \theta^{\sum_{i=1}^{n} x_i} + \ln(1-\theta)^{n - \sum_{i=1}^{n} x_i} \\
266
+ &= \sum_{i=1}^{n} x_i \ln \theta + (n - \sum_{i=1}^{n} x_i) \ln(1-\theta). \tag{5}
267
+ \end{align*}
268
+ $$
269
+
270
+ The ML estimate value of θ is obtained by differentiating equation (5) with respect to θ and equating the differential result to zero, i.e.
271
+
272
+ $$
273
+ \begin{align*}
274
+ \frac{\partial}{\partial \theta} \ln L(\theta) &= \frac{\partial}{\partial \theta} \left[ \sum_{i=1}^{n} x_i \ln \theta + \left( n - \sum_{i=1}^{n} x_i \right) \ln(1-\theta) \right] = 0 \\
275
+ &= \frac{\sum_{i=1}^{n} x_i}{\theta} - \frac{n - \sum_{i=1}^{n} x_i}{1-\theta} = 0 \\
276
+ (1-\theta) \sum_{i=1}^{n} x_i - \theta \left( n - \sum_{i=1}^{n} x_i \right) &= 0 \\
277
+ \sum_{i=1}^{n} x_i - \theta \sum_{i=1}^{n} x_i - n\theta + \theta \sum_{i=1}^{n} x_i &= 0 \\
278
+ \sum_{i=1}^{n} x_i &= n\theta,
279
+ \end{align*}
280
+ $$
281
+
282
+ then we obtain
283
+
284
+ $$
285
+ \hat{\theta} = \frac{1}{n} \sum_{i=1}^{n} x_i.
286
+ $$
287
+
288
+ To show that $\hat{\theta}$ is the value that maximizes the likelihood function $L(\theta)$, it must be confirmed that the second derivative of the likelihood function for $\theta = \hat{\theta}$ is negative:
289
+
290
+ $$
291
+ \begin{align*}
292
+ \frac{\partial^2}{\partial \theta^2} \ln L(\theta) &= \frac{\partial^2}{\partial \theta^2} \left[ \sum_{i=1}^{n} x_i \ln \theta + \left( n - \sum_{i=1}^{n} x_i \right) \ln(1-\theta) \right] \\
293
+ &= -\frac{\sum_{i=1}^{n} x_i}{\theta^2} - \frac{n - \sum_{i=1}^{n} x_i}{(1-\theta)^2} \\
294
+ &= \frac{-(1-\theta)^2 \sum_{i=1}^{n} x_i - \theta^2 (n - \sum_{i=1}^{n} x_i)}{\theta^2 (1-\theta)^2} \\
295
+ &= \frac{-\sum_{i=1}^{n} x_i + 2\theta \sum_{i=1}^{n} x_i - \theta^2 \sum_{i=1}^{n} x_i - n\theta^2 + \theta^2 \sum_{i=1}^{n} x_i}{\theta^2 (1-\theta)^2}
296
+ \end{align*}
297
+ $$
298
+ ---PAGE_BREAK---
299
+
300
+ $$ = \frac{-n\theta^2 + 2\theta \sum_{i=1}^{n} x_i - \sum_{i=1}^{n} x_i}{\theta^2(1-\theta)^2} < 0. $$
301
+
302
+ Since $\hat{\theta}$ maximizes the likelihood function, we conclude that the ML estimator of $\theta$ is given by
303
+
304
+ $$ \hat{\theta}_{ML} = \frac{1}{n} \sum_{i=1}^{n} x_i. $$
305
+
306
+ ## 4.2. The Bayesian Estimator of the Bernoulli Distribution Parameter($\theta$)
307
+
308
+ To estimate $\theta$ using Bayesian method, it is necessary to choose the initial information of a parameter called the prior distribution, denoted by $\pi(\theta)$, to be applied to the basis of the method namely the conditional probability. In this paper, the prior selection for Bernoulli distribution refers to the formation of its likelihood function. From equation (4) we have
309
+
310
+ $$ \pi(\theta) \propto \theta^{\sum_{i=1}^{n} x_i} (1 - \theta)^{1 - \sum_{i=1}^{n} x_i}. $$
311
+
312
+ A distribution having probability function in the same form as the above expression is the beta distribution with density function
313
+
314
+ $$ f(\theta; a, b) = \frac{1}{B(a,b)} \theta^{a-1} (1-\theta)^{b-1}, 0 < \theta < 1 $$
315
+
316
+ where $a-1 = \sum_{i=1}^{n} x_i$, $b-1 = n - \sum_{i=1}^{n} x_i$, and $\frac{1}{B(a,b)}$ are factors required for the density function to be satisfied.
317
+
318
+ The prior distribution is combined with the sample distribution to produce a new distribution called posterior distribution and denoted by $\pi(\theta|x_1, x_2, \dots, x_n)$. Posterior distribution is obtained by dividing the joint density distribution by the marginal distribution.
319
+
320
+ Joint probability density function of $(x_1, x_2, \dots, x_n)$ is given by:
321
+
322
+ $$
323
+ \begin{align*}
324
+ H(x_1, x_2, \dots, x_n; \theta) &= L(x_1, x_2, \dots, x_n | \theta) \cdot \pi(\theta) \\
325
+ &= \theta^{\sum_{i=1}^{n} x_i} (1-\theta)^{n-\sum_{i=1}^{n} x_i} \cdot \frac{1}{B(a,b)} \theta^{a-1} (1-\theta)^{b-1} \\
326
+ &= \frac{1}{B(a,b)} \theta^{a+\sum_{i=1}^{n} x_i - 1} (1-\theta)^{b+n-\sum_{i=1}^{n} x_i - 1} \tag{6}
327
+ \end{align*}
328
+ $$
329
+
330
+ and the marginal function of $(x_1, x_2, \dots, x_n)$ is obtained as follows:
331
+
332
+ $$ p(x_1, x_2, \dots, x_n) = \int_0^1 H(x_1, x_2, \dots, x_n; \theta) d\theta. $$
333
+
334
+ Using equation (6) we have
335
+
336
+ $$ p(x_1, x_2, \dots, x_n) = \int_0^1 \frac{1}{B(a,b)} \theta^{a+\sum_{i=1}^{n} x_i - 1} (1-\theta)^{b+n-\sum_{i=1}^{n} x_i - 1} d\theta $$
337
+ ---PAGE_BREAK---
338
+
339
+ $$
340
+ \begin{aligned}
341
+ &= \frac{1}{B(a, b)} \int_{0}^{1} \theta^{a + \sum_{i=1}^{n} x_i^{-1}} (1 - \theta)^{b + n - \sum_{i=1}^{n} x_i^{-1}} d\theta \\
342
+ &= \frac{1}{B(a,b)} B(a + \sum_{i=1}^{n} x_i, b + n - \sum_{i=1}^{n} x_i).
343
+ \end{aligned}
344
+ \quad (7) $$
345
+
346
+ Then from equation (6) and (7) the posterior distribution can be written as follows:
347
+
348
+ $$
349
+ \begin{aligned}
350
+ \pi(\theta | x_1, x_2, \dots, x_n) &= \frac{H(x_1, x_2, \dots, x_n; \theta)}{p(x_1, x_2, \dots, x_n)} \\
351
+ &= \frac{\frac{1}{B(a,b)} \theta^{a + \sum_{i=1}^{n} x_i^{-1}} (1-\theta)^{b+n-\sum_{i=1}^{n} x_i^{-1}}}{\frac{1}{B(a,b)} B(a + \sum_{i=1}^{n} x_i, b+n-\sum_{i=1}^{n} x_i)} \\
352
+ &= \frac{\theta^{a+\sum_{i=1}^{n} x_i^{-1}(1-\theta)^{b+n-\sum_{i=1}^{n} x_i^{-1}}}}{B(a+\sum_{i=1}^{n} x_i, b+n-\sum_{i=1}^{n} x_i)}.
353
+ \end{aligned}
354
+ \quad (8) $$
355
+
356
+ The posterior distribution expressed in equation (8) is obviously following beta distribution also with parameter ($a + \Sigma_{i=1}^n x_i$) and ($b + n - \Sigma_{i=1}^n x_i$), or
357
+
358
+ $$ \hat{\theta} \sim \text{Beta}(a + \sum_{i=1}^{n} x_i, b + n - \sum_{i=1}^{n} x_i). $$
359
+
360
+ Since the prior and posterior distribution of Bernoulli follows the same distribution, i.e. the Beta distribution, beta distribution is called as the conjugate prior of the Bernoulli distribution. The posterior mean is used as the parameter estimate $\theta$ in Bayesian method. Using Proposition 2, the Bayesian estimator of parameter $\theta$ is obtained as follows:
361
+
362
+ $$
363
+ \begin{aligned}
364
+ \hat{\theta}_B &= \frac{a + \sum_{i=1}^{n} x_i}{a + \sum_{i=1}^{n} x_i + b + n - \sum_{i=1}^{n} x_i} \\
365
+ &= \frac{a + \sum_{i=1}^{n} x_i}{a + b + n}.
366
+ \end{aligned}
367
+ $$
368
+
369
+ ### 4.3. Evaluation of the Estimators Properties
370
+
371
+ The parameter estimation of the Bernoulli distribution is obtained by the MLE and Bayesian methods yields different estimates. The best estimator has to meet the following properties:
372
+
373
+ #### 1. Unbiased
374
+
375
+ An estimator is called to be unbiased if its expected values is equal to the estimated parameter, i.e. $\hat{\theta}$ is an unbiased estimator of $\theta$ if $E(\hat{\theta}) = \theta$. The bias of an estimator is then given by:
376
+
377
+ $$ \operatorname{Bias}(\hat{\theta}) = E(\hat{\theta}) - \theta. \quad (9) $$
378
+
379
+ Let $X_1, X_2, ..., X_n$ are Bernoulli ($\theta$) random sample observations. Since $\hat{\theta}_{ML} = \frac{1}{n}\sum_{i=1}^n x_i$ is the ML estimator of $\theta$, its expected value is as follows:
380
+ ---PAGE_BREAK---
381
+
382
+ $$
383
+ \begin{align*}
384
+ E(\hat{\theta}_{ML}) &= E\left(\frac{1}{n}\sum_{i=1}^{n} x_i\right) \\
385
+ &= \frac{1}{n}E\left(\sum_{i=1}^{n} x_i\right) \\
386
+ &= \frac{1}{n}\sum_{i=1}^{n} E(x_i) \\
387
+ &= \frac{1}{n} \cdot n\theta = \theta. \tag{10}
388
+ \end{align*}
389
+ $$
390
+
391
+ Since $E(\hat{\theta}_{ML}) = \theta, \hat{\theta}_{MLE}$ is an unbiased estimator of $\theta$.
392
+
393
+ Now consider the Bayesian estimator of θ i.e. $\hat{\theta}_B = \frac{a + \sum_{i=1}^n x_i}{a+b+n}$. The expected value of Bayesian estimator is given by
394
+
395
+ $$
396
+ \begin{align*}
397
+ E(\hat{\theta}_B) &= E \left( \frac{a + \sum_{i=1}^{n} x_i}{a + b + n} \right) \\
398
+ &= \frac{1}{a + b + n} E \left( a + \sum_{i=1}^{n} x_i \right) \\
399
+ &= \frac{1}{a + b + n} \left[ E(a) + E \left( \sum_{i=1}^{n} x_i \right) \right] \\
400
+ &= \frac{1}{a + b + n} \left[ E(a) + \sum_{i=1}^{n} E(x_i) \right] \\
401
+ &= \frac{1}{a + b + n} (a + n\theta). \tag{11}
402
+ \end{align*}
403
+ $$
404
+
405
+ Since $E(\hat{\theta}_B) \neq \theta$, $\hat{\theta}_B$ is a biased estimator of $\theta$. The bias value of $\hat{\theta}_B$ is:
406
+
407
+ $$
408
+ \begin{align}
409
+ \operatorname{Bias}(\hat{\theta}_B) &= E(\hat{\theta}_B) - \theta \nonumber \\
410
+ &= \frac{a+n\theta}{a+b+n} - \theta. \tag{12}
411
+ \end{align}
412
+ $$
413
+
414
+ Although $\hat{\theta}_B$ is a biased estimator of $\theta$, it can be shown that $\hat{\theta}_B$ is asymptotically unbiased. The proof is given as follows:
415
+
416
+ $$
417
+ \begin{align*}
418
+ \lim_{n \to \infty} E(\hat{\theta}_B) &= \lim_{n \to \infty} \frac{a+n\theta}{a+b+n} \\
419
+ &= \lim_{n \to \infty} \frac{\frac{a}{n} + \frac{np}{n}}{\frac{a}{n} + \frac{b}{n} + \frac{n}{n}} \\
420
+ &= \lim_{n \to \infty} \frac{\frac{a}{n} + p}{\frac{a}{n} + \frac{b}{n} + 1}
421
+ \end{align*}
422
+ $$
423
+ ---PAGE_BREAK---
424
+
425
+ $$ = \frac{\theta}{1} = \theta. \qquad (13) $$
426
+
427
+ Since $\lim_{n \to \infty} E(\hat{\theta}_B) = \theta$, $\hat{\theta}_B$ is an asymptotically unbiased estimator of $\theta$.
428
+
429
+ ## 2. Efficiency
430
+
431
+ The efficiency of an estimator is observed from its variance. The best parameter estimator is the one that has the smallest variance. This is because the variance of an estimator is a measure of the spread of the estimator around its mean.
432
+
433
+ The variance of ML estimator $\hat{\theta}_{ML}$ is:
434
+
435
+ $$
436
+ \begin{aligned}
437
+ Var(\hat{\theta}_{ML}) &= Var\left(\frac{1}{n}\sum_{i=1}^{n} x_i\right) \\
438
+ &= \frac{1}{n^2} Var\left(\sum_{i=1}^{n} x_i\right) \\
439
+ &= \frac{1}{n^2} \sum_{i=1}^{n} Var(x_i) \\
440
+ &= \frac{1}{n^2} n\theta(1-\theta) \\
441
+ &= \frac{1}{n}\theta(1-\theta).
442
+ \end{aligned}
443
+ \qquad (14) $$
444
+
445
+ While the variance of the Bayesian estimator $\hat{\theta}_B$ is given by:
446
+
447
+ $$
448
+ \begin{aligned}
449
+ Var(\hat{\theta}_B) &= Var\left(\frac{a + \sum_{i=1}^{n} x_i}{a + b + n}\right) \\
450
+ &= \frac{1}{(a+b+n)^2} Var\left(a + \sum_{i=1}^{n} x_i\right) \\
451
+ &= \frac{1}{(a+b+n)^2} \left[ Var(a) + \sum_{i=1}^{n} Var(x_i) \right].
452
+ \end{aligned}
453
+ $$
454
+
455
+ Since $Var(a) = 0$ and $Var(x_i) = \theta(1 - \theta)$, we obtain
456
+
457
+ $$ Var(\hat{\theta}_B) = \frac{1}{(a+b+n)^2} n\theta(1-\theta). \qquad (15) $$
458
+
459
+ From equation (10), it is shown that the ML estimator is unbiased, whereas from equations (11) and (12) it is shown that Bayesian estimator is biased. As a result, the efficiency of the two methods cannot be compared because the efficiency of estimators applies to unbiased estimators.
460
+ ---PAGE_BREAK---
461
+
462
+ ### 3. Consistency
463
+
464
+ The consistency of the estimators is evaluated from their mean square error (MSE). The MSE can be expressed as
465
+
466
+ $$MSE(\hat{\theta}) = E(\hat{\theta} - \theta)^2 = Var(\hat{\theta}) + (\text{bias}\hat{\theta})^2. \quad (16)$$
467
+
468
+ If the sample size grows infinitely, a consistent estimator will give a perfect point estimate to $\theta$. Mathematically, $\theta$ is a consistent estimator if and only if
469
+
470
+ $$E(\hat{\theta} - \theta)^2 \to 0 \text{ when } n \to \infty,$$
471
+
472
+ which means that the bias and the variance approaches to 0 if $n \to \infty$.
473
+
474
+ Substituting equation (10) and (14) to equation (16), the MSE of ML estimator $\hat{\theta}_{MLE}$ is then
475
+
476
+ $$
477
+ \begin{aligned}
478
+ E(\hat{\theta}_{MLE} - \theta)^2 &= Var(\hat{\theta}_{MLE}) + (\text{bias}\hat{\theta}_{MLE})^2 \\
479
+ E(\hat{\theta}_{MLE} - \theta)^2 &= Var(\hat{\theta}_{MLE}) = \frac{1}{n}\theta(1-\theta).
480
+ \end{aligned}
481
+ $$
482
+
483
+ For $n \to \infty$, we have
484
+
485
+ $$\lim_{n \to \infty} E(\hat{\theta}_{MLE} - \theta)^2 = \lim_{n \to \infty} \frac{1}{n} \theta(1-\theta) = 0. \quad (17)$$
486
+
487
+ In the same manner, by substituting equation (12) and (15) the MSE of Bayesian estimator $\hat{\theta}_B$ is:
488
+
489
+ $$
490
+ \begin{aligned}
491
+ E(\hat{\theta}_B - \theta)^2 &= Var(\hat{\theta}_B) + (\text{bias}\hat{\theta}_B)^2 \\
492
+ E(\hat{\theta}_B - \theta)^2 &= \left[ \frac{1}{(a+b+n)^2} n\theta(1-\theta) \right] + \left( \frac{a+n\theta}{a+b+n} - \theta \right)^2.
493
+ \end{aligned}
494
+ $$
495
+
496
+ For $n \to \infty$, we have
497
+
498
+ $$\lim_{n \to \infty} (\hat{\theta}_B - \theta)^2 = \lim_{n \to \infty} \left[ \frac{1}{(a+b+n)^2} n\theta(1-\theta) + \left( \frac{a+n\theta}{a+b+n} - \theta \right)^2 \right] = 0. \quad (18)$$
499
+
500
+ From equation (17) and (18), we can conclude that ML and Bayesian estimators are consistent estimators of $\theta$.
501
+
502
+ ### 4.4. Empirical Comparison of the Properties of ML and Bayesian Estimators
503
+
504
+ To compare the ML and Bayesian estimators of $\theta$, a Monte Carlo simulation using R program was conducted. The simulation was performed by generating Bernoulli distributed data with $\theta = 0.1, 0.3$, and $0.5$ and eight different sample sizes, i.e. $n = 20, 50, 100, 300, 500, 1000, 5000$, and $10000$. The simulation was repeated 1000 times for each combination of $\theta$ and $n$. The generated data were used to estimate parameter $\theta$ using the two methods. Furthermore, the bias and MSE of both estimators were calculated using the formulas in equations (9) and (16) and the results are presented in Table 1.
505
+ ---PAGE_BREAK---
506
+
507
+ **Table 1. The bias and MSE of ML and Bayesian estimators of θ**
508
+
509
+ <table><thead><tr><th rowspan="2">θ</th><th rowspan="2">N</th><th colspan="2">Bias</th><th colspan="2">MSE</th></tr><tr><th>ML</th><th>Bayesian<br>(α = 1, β = 1)</th><th>ML</th><th>Bayesian<br>(α = 1, β = 1)</th></tr></thead><tbody><tr><td rowspan="8">0,1</td><td>20</td><td>0,001200</td><td>0,223084</td><td>0,031478</td><td>0,558149</td></tr><tr><td>50</td><td>0,002180</td><td>0,036602</td><td>0,015264</td><td>0,041740</td></tr><tr><td>100</td><td>0,000270</td><td>0,009328</td><td>0,007058</td><td>0,008843</td></tr><tr><td>300</td><td>0,000413</td><td>0,001075</td><td>0,001901</td><td>0,002906</td></tr><tr><td>500</td><td>0,000210</td><td>0,000364</td><td>0,001183</td><td>0,000551</td></tr><tr><td>1000</td><td>0,000195</td><td>0,000091</td><td>0,000503</td><td>0,000128</td></tr><tr><td>5000</td><td>0,000003</td><td>0,000003</td><td>0,000143</td><td>0,000004</td></tr><tr><td>10000</td><td>0,000114</td><td>0,000001</td><td>0,000184</td><td>0,000002</td></tr><tr><td rowspan="8">0,3</td><td>20</td><td>0,003100</td><td>0,536609</td><td>0,001652</td><td>0,493287</td></tr><tr><td>50</td><td>0,001300</td><td>0,090000</td><td>0,003113</td><td>0,142692</td></tr><tr><td>100</td><td>0,000630</td><td>0,021341</td><td>0,001583</td><td>0,067615</td></tr><tr><td>300</td><td>0,000003</td><td>0,002205</td><td>0,000327</td><td>0,002307</td></tr><tr><td>500</td><td>0,000312</td><td>0,000845</td><td>0,000111</td><td>0,008999</td></tr><tr><td>1000</td><td>0,000545</td><td>0,000207</td><td>0,000444</td><td>0,002177</td></tr><tr><td>5000</td><td>0,000384</td><td>0,000088</td><td>0,004443</td><td>1.453419</td></tr><tr><td>1.5.1.1<br/>1.5.2.1<br/>1.5.3.1<br/>1.5.4.1<br/>1.5.5.1<br/>1.5.6.1<br/>1.5.7.1<br/>1.5.8.1<br/>1.5.9.1<br/>2.5.1.1<br/>2.5.2.1<br/>2.5.3.1<br/>2.5.4.1<br/>2.5.5.1<br/>2.5.6.1<br/>2.5.7.1<br/>2.5.8.1<br/>2.5.9.1<br/>3.5.1.1<br/>3.5.2.1<br/>3.5.3.1<br/>3.5.4.1<br/>3.5.5.1<br/>3.5.6.1<br/>3.5.7.1<br/>3.5.8.1<br/>3.5.9.1<br/>4.5.1.1<br/>4.5.2.1<br/>4.5.3.1<br/>4.5.4.1<br/>4.5.5.1<br/>4.5.6.1<br/>4.5.7.1<br/>4.5.8.1<br/>4.5.9.1<br/>5.5.1.1<br/>5.5.2.1<br/>5.5.3.1<br/>5.5.4.1<br/>5.5.5.1<br/>5.5.6.1<br/>5.5.7.1<br/>5.5.8.1<br/>5.5.9.1<br/>6.5.1.1<br/>6.5.2.1<br/>6.5.3.1<br/>6.5.4.1<br/>6.5.5.1<br/>6.5.6.1<br/>6.5.7.1<br/>6.5.8.1<br/>6.5.9.<br/>7.<br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/><br/></tbody></table>
510
+
511
+ Table 1 shows the bias and MSE values of ML and Bayesian estimates for a successful probability of $\theta = 0.1$, 0.3 and 0.5.
512
+
513
+ From the table it can be seen that ML estimator produces smaller biases than Bayesian estimates for finite sample (i.e., *n* < 10**3). However, when the sample size equal or larger than 1**E** thousand (i.e., 5**E** thousand and 1**Z**E thousand), the biases of the Bayesian estimator are smaller than the ML estimator.
514
+
515
+ Even though the bias values of ML estimates changes inconsistently throughout the sample sizes, analytically it has been proved that ML estimator is an unbiased estimator.
516
+
517
+ This appears to be different from the bias values for Bayesian estimator.
518
+
519
+ It is because for all the considered success probabilities of the bias values become smaller when the sample size increases, although analytically it is found that Bayesian estimator is a biased estimator.
520
+
521
+ As a result the efficiency of the two estimators cannot be compared.
522
+
523
+ Therefore, to compare the best estimators we use MSE of both estimators.
524
+
525
+ This is because MSE considers both the bias and variance values.
526
+
527
+ The MSE values of ML and Bayesian estimators that have been shown in Table 1 have similarities, i.e., the MSE value decreases as the sample size increases and it closes to 1.
528
+
529
+ Thus, both estimators are consistent estimators.
530
+
531
+ This also corresponds to the results obtained analytically.
532
+
533
+ Based on the simulation results in this study, it can be seen that for the larger sample sizes Bayesian estimator is better than ML estimator.
534
+
535
+ This is because the MSE value of Bayesian estimator is smaller than the ML estimator.
536
+
537
+ As shown in Table 1, when $\theta = 2/3$, the MSE value
538
+ ---PAGE_BREAK---
539
+
540
+ of the Bayesian estimator is smaller than the ML estimator for *n* = 500, 1000, and 10000; and when $\theta$ = 0.3 and 0.5, the MSE values of the Bayesian estimator are smaller than the ML estimator for *n* = 1000, 5000, and 10000.
541
+
542
+ # 5. CONCLUSION
543
+
544
+ In this paper, we derived the ML and Bayesian estimator (using beta prior) of Bernoulli distribution parameter. Analytically we show that the ML estimator is an unbiased estimator and Bayesian estimator is a biased estimator for parameter $\theta$. However, Bayesian estimator is asymptotically unbiased. Based on the simulation result, both ML and Bayesian estimator are consistent estimators of $\theta$ because the two estimators satisfy the property of consistency, i.e. $E(\hat{\theta} - \theta)^2 \to 0$ when $\hat{n} \to \infty$. The simulation result also shows that the Bayesian estimator using beta prior is better than the MLE method for large sample sizes ($n \ge 1000$).
545
+
546
+ # REFERENCES
547
+
548
+ [1]. Bain, L.J. and Engelhardt, M. (1992). *Introduction to Probability and Mathematical Statistics*. Duxbury Press, California.
549
+
550
+ [2]. Walpole, R.E dan Myers, R.H. (1995). *Ilmu Peluang dan Statistika untuk Insinyur dan Ilmuwan*. ITB, Bandung.
551
+
552
+ [3]. Al-Kutubi H. S., Ibrahim N.A. (2009). Bayes Estimator for Exponential Distribution with Extension of Jeffery Prior Information. *Malaysian Journal of Mathematical Sciences*. 3(2):297-313.
553
+
554
+ [4]. Nurlaila, D., Kusnandar D.,& Sulistianingsih, E. (2013). Perbandingan Metode Maximum Likelihood Estimation (MLE) dan Metode Bayes dalam Pendugaan Parameter Distribusi Eksponensial. *Buletin Ilmiah Mat. Stat. dan Terapannya*. 2(1):51-56.
555
+
556
+ [5]. Fikhri, M., Yanuar, F., & Yudiantri A. (2014). Pendugaan Parameter dari Distribusi Poisson dengan Menggunakan Metode Maximum Likelihood Estimation (MLE) dan Metode Bayes. *Jurnal Matematika UNAND*. 3(4):152-159.
557
+
558
+ [6]. Singh S. K., Singh, U., & Kumar, M. (2014). Estimation for the Parameter of Poisson-Exponential Distribution under Bayesian Paradigm. *Journal of Data Science*.12:157-173.
559
+
560
+ [7]. Gupta, I. (2017). Bayesian and E-Bayesian Method of Estimation of Parameter of Rayleigh Distribution-A Bayesian Approach under Linex Loss Function. *International Journal of Statistics and Systems*.12(4):791-796.
561
+
562
+ [8]. Box, G.E.P& Tiao, G.C. (1973). *Bayesian Inference in Statistical Analysis*. Addision-Wesley Publishing Company, Philippines.
samples/texts_merged/500594.md ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ New Time Dependent Gravity
5
+ Displays Dark Matter and
6
+ Dark Energy Effects
7
+
8
+ Pharis E. Williams
9
+ Williams Research
10
+ 15247 W. Domingo Ln.
11
+ Sun City West AZ, 85375
12
+
13
+ It is shown that a time dependent gravitational field that is getting weaker with time will produce the effects measured for both the tangential velocity in the arms of spiral galaxies and for the high z supernovas. These results show that the effects that have led to the hypothesis of Dark Matter and Dark Energy may come from the same basic physical phenomena, namely that gravity is getting weaker as a function of time, and not from the existence of exotic matter.
14
+
15
+ *Keywords:* distances and red shifts, dark matter, dark energy, theory
16
+
17
+ **Introduction**
18
+
19
+ Much has been written, hypothesized, and calculated on the subject of Dark Matter and Dark Energy. However, none consider a time dependent gravitational field. A gravitational field that gets weaker with time will display galaxy dynamics responding to a much stronger field before sending light from space towards the Earth
20
+ ---PAGE_BREAK---
21
+
22
+ that can only be received many light years later. The theoretical
23
+ basis for such a time dependent gravitational field has already been
24
+ presented [1][2][3]. Three elements of this theory apply to the
25
+ potential explanation of Dark Matter and Dark Energy. These
26
+ elements include:
27
+
28
+ 1. The theory is a five dimensional gauge theory with Weyl geometry [4]. This means that the fields within the theory are gauge fields. However, the theory is not another Kalusa-Klein type of theory in that the fifth dimension describes a real physical property, mass density, and, therefore, is not hidden or obscured by some mathematical technique. The five dimensionality of the gauge theory requires that the gravitational field be time dependent.
29
+
30
+ 2. Quantum Mechanics is required by restricting the Weyl scale factor within the gauge theory to have only a value of unity. This was noted by Schrödinger [5] before he published his wave equations and later it was shown by London [6] that this restriction required Schrödinger's wave equations. This quantization requires that the gauge potentials be non-singular [1].
31
+
32
+ 3. The fundamental Weyl geometry requires that the Poisson brackets and the unit of action be dependent upon the gauge function [1]. This variable unit of action leads to a relation determining the red shift of light coming to Earth from distant stars [12].
33
+
34
+ These three aspects of the new theory suffice to offer a different
35
+ view of the data from which the hypothesis of dark matter and dark
36
+ energy have evolved.
37
+ ---PAGE_BREAK---
38
+
39
+ # Dark Matter
40
+
41
+ Data wherein the tangential velocities of stars in the arms of a spiral galaxy differed from Newtonian predictions were first reported nearly seventy years ago [7]. A fundamental theory supporting these data has not heretofore been given, though empirical theories have been presented. The best of these theories is the Modified Newtonian Dynamics (MOND) [8][9][10]. The theory presented here had its beginning in 1974 and only recently has it been applied to the dynamics of spiral galaxies.
42
+
43
+ Newtonian uniform circular motion equates the gravitational acceleration to the centripetal acceleration so that
44
+
45
+ $$ \frac{GMm}{r^2} = \frac{mv^2}{r}. \qquad (1) $$
46
+
47
+ A time-dependent, non-singular gravitational field, such as the Dynamic Theory predicts, alters Equation (1) to
48
+
49
+ $$ \frac{GMm(1 - H_0 z)}{r^2} \left(1 - \frac{\lambda}{r}\right) e^{-\frac{\lambda}{z}} = \frac{mv^2}{r}, \qquad (2) $$
50
+
51
+ where $H_0$ is Hubble's constant and
52
+
53
+ $$ \lambda \equiv \frac{GM}{c^2} \qquad (3) $$
54
+
55
+ as determined by planetary orbits. For this time dependent gravitational field the gravitational acceleration acting on an arm of a galaxy feels is due to the gravitational field of the mass M at a previous time. This previous time is given by the time that it takes for the field to travel from the site of the gravitational field to the point on the arm under consideration. This means that when all the mass is considered to be at the center of the galaxy the time that enters into
56
+ ---PAGE_BREAK---
57
+
58
+ Equation (2) is $\tau = \frac{-r}{c}$ so that, when $r \gg \lambda$ the velocity of the arm of the galaxy would be given by
59
+
60
+ $$v = \sqrt{\frac{GM(1-H_0\tau)}{r}} = \sqrt{GM\left(\frac{1}{r} + \frac{H_0}{c}\right)}. \quad (4)$$
61
+
62
+ Equation (4) shows a very different character than the expression for the velocity for the time independent gravitational field. This expression shows that the velocity of the galaxy arms should not be expected to drop off as the time independent Newtonian gravitational field does.
63
+
64
+ We may look at the 5-dimensional approach of the Dynamic Theory by looking at the Lagrangian
65
+
66
+ $$L = \frac{1}{2}mc^2 (\dot{\tau})^2 + \frac{1}{2}m\dot{r}^2 + \frac{1}{2}m(r\dot{\theta})^2 + GMm(1-H_o\tau)\frac{e^{-\frac{\lambda}{r}}}{r} \quad (5)$$
67
+
68
+ where the universe time, $\tau$, is treated as another variable and t is the local time. The universe time, $\tau$, becomes a geometrical coordinate that makes the problem local-time independent in five dimensions.
69
+
70
+ The time Lagrange equation may then be written as
71
+
72
+ $$\frac{d}{ds}\left[\frac{\partial L}{\partial \dot{\tau}}\right] - \frac{\partial L}{\partial \tau} = 0 = \frac{d}{dt}[m\dot{\tau}] + H_o \lambda m \frac{e^{-\frac{\lambda}{r}}}{r}. \quad (6)$$
73
+
74
+ For a spherically symmetric field the radial equation is
75
+
76
+ $$\frac{d}{dt}\left[\frac{\partial L}{\partial \dot{r}}\right] - \frac{\partial L}{\partial r} = 0 = \frac{d}{dt}[mr] - mr\dot{\theta}^2 + GMm(1-H_o\tau)\left(1-\frac{\lambda}{r}\right)\frac{e^{-\frac{\lambda}{r}}}{r^2}. \quad (7)$$
77
+
78
+ The third Lagrange equation becomes
79
+ ---PAGE_BREAK---
80
+
81
+ $$ \frac{d}{dt} \left[ \frac{\partial E}{\partial \dot{\theta}} \right] - \frac{\partial E}{\partial \theta} = 0 = \frac{d}{dt} \left[ m r^2 \dot{\theta} \right]. \quad (8) $$
82
+
83
+ For the problem of spiral galaxy behaviour we may assume the $\lambda << r$ and write the equations of motion as
84
+
85
+ $$ \ddot{\tau} = -\frac{H_o \lambda}{r}, \quad (9) $$
86
+
87
+ $$ (\ddot{r} - r\dot{\theta})^2 = -(1-H_o\tau)\frac{GM}{r^2}\left(1-\frac{\lambda}{r}\right) \quad (10) $$
88
+
89
+ and
90
+
91
+ $$ \ddot{\theta} + \frac{2}{r}\dot{r}\dot{\theta} = 0. \quad (11) $$
92
+
93
+ If we now look at uniform circular motion we find that Equation (9) becomes
94
+
95
+ $$ \ddot{\tau} = -\frac{H_o \lambda}{r} = \text{constant} \Rightarrow \frac{d\tau}{dt} = -\frac{H_o \lambda}{r} \quad (12) $$
96
+
97
+ so that this may be integrated to get
98
+
99
+ $$ \dot{\tau} = \dot{\tau}_o - \frac{H_o \lambda}{r} (t-t_o) \quad (13) $$
100
+
101
+ which may be integrated again to get
102
+
103
+ $$ \tau = \tau_o - \frac{H_o \lambda}{2r} t^2 + \left( \dot{\tau}_o + \frac{H_o \lambda}{r} t_o \right) t. \quad (14) $$
104
+
105
+ Also for the assumed uniform circular motion Equation (10) may be written as
106
+ ---PAGE_BREAK---
107
+
108
+ $$v^2 = (1 - H_o \tau) \frac{GM}{r} \left(1 - \frac{\lambda}{r}\right) \quad (15)$$
109
+
110
+ where $v$ is the tangential velocity of the uniform circular motion. Putting Equation (14) into Equation (15) obtains
111
+
112
+ $$v^2 = \frac{GM}{r} \left\{ 1 - H_o \tau_o + \frac{H_o^2 \lambda}{2r} t^2 - \left( H_o \tau_o + \frac{H_o^2 \lambda}{r} t_o \right) t \right\} \left( 1 - \frac{\lambda}{r} \right). (16)$$
113
+
114
+ We must keep in mind there are two times to be considered. First there is the time it takes for the gravitational change to travel from the center of the galaxy to the point of measurement in the galaxy arm. The second time is for the light signal to travel from the galaxy to the Earth.
115
+
116
+ Let us set $\tau_o = 0$ and $t_o = 0$ at the point in time when the light left the star on its way toward Earth. Now our Equation (16) becomes
117
+
118
+ $$v^2 \approx \frac{GM}{r} \left\{ 1 + \frac{H_o^2 \lambda}{2r} t^2 - H_o \tau_o t \right\} \left(1 - \frac{\lambda}{r}\right). \quad (17)$$
119
+
120
+ Time runs from the time the gravitational signal left the center of the galaxy at
121
+
122
+ $$t = \frac{-r}{c}, \qquad (18)$$
123
+
124
+ where $r$ is the distance from the center of the galaxy. Using Equation (18) in Equation (17) we find
125
+
126
+ $$v^2 \approx \frac{GM}{r} \left\{ 1 + \frac{H_o \tau_o r}{c} \left( 1 - \frac{H_o^2 \lambda}{2c^2} \right) \right\} \approx \frac{GM}{r} \left\{ 1 + \frac{H_o \tau_o r}{c} \right\}. \quad (19)$$
127
+
128
+ Now we need to establish a value for $\tau_o$. Look at the energy at time $t=0$ and $\tau=0$ with $r \gg \lambda$, or
129
+ ---PAGE_BREAK---
130
+
131
+ $$E_o = \frac{1}{2}mc^2(\dot{\tau}_o)^2 + \frac{1}{2}mv^2 - \frac{GMm}{r_o} \quad (20)$$
132
+
133
+ This may be rewritten as
134
+
135
+ $$\frac{2E_o}{mc^2} = (\dot{\tau}_o)^2 + \frac{v^2}{c^2} - \frac{\lambda}{r_o} \quad (21)$$
136
+
137
+ Since the tangential velocities are non-relativistic this requires that
138
+
139
+ $$\dot{\tau}_o = \sqrt{\frac{2E_o}{mc^2} + \frac{\lambda}{r_o}} \quad (22)$$
140
+
141
+ Equation (22) shows that the initial conditions establish the point at which the tangential velocities begin to differ from those predicted by Newtonian gravity. In the absence of a means of evaluating the initial conditions we may turn to experimental results. First, suppose we write the acceleration in the arms of the galaxy as
142
+
143
+ $$a = a_N \left\{ 1 - \frac{H_o^2 \lambda}{2r} t^2 - H_o \dot{\tau}_o t \right\} . \quad (23) \\ \approx a_N \left\{ 1 + H_o \dot{\tau}_o \frac{r}{c} \right\}$$
144
+
145
+ We now use the data that shows the acceleration begins to deviate from Newtonian when the acceleration drops to a value of $1.2 \times 10^{-10}$ m/sec² so that
146
+
147
+ $$a_N = \frac{GM}{r_c^2} \approx 1.2 \times 10^{-10} \Rightarrow r_c \approx \sqrt{\frac{GM}{1.2 \times 10^{-10}}} \quad (24)$$
148
+
149
+ Then requiring
150
+ ---PAGE_BREAK---
151
+
152
+ $$ \dot{\tau}_o = \frac{c}{r_c H_o} \qquad (25) $$
153
+
154
+ sets a value of $\dot{\tau}_o$ in keeping with the data. Equation (23) becomes
155
+
156
+ $$ a \approx a_N \left\{ 1 + \frac{r}{r_c} \right\} \qquad (26) $$
157
+
158
+ where we see the short range Newtonian acceleration and the long range acceleration predicted by MOND.
159
+
160
+ It should be noted that the approximate linearity of the tangential velocity with respect to time of Equations (17) and (19) displays an independence of the time it takes for light to travel from the galaxy to Earth. This apparent independence of time masks the fact that the gravitational strength of the galaxy, relative to the current epoch, depends upon the time of light travel to Earth.
161
+
162
+ ## Dark Energy
163
+
164
+ Data displaying evidence that provided the beginning of the hypothesized dark energy was first presented in 1998 [11]. To date no fundamental theory has had success in explaining these data.
165
+
166
+ The universe expansion factor is taken from general relativity and is
167
+
168
+ $$ \frac{\ddot{a}}{a} = -\frac{4}{3}\pi G \left( \rho + 3\frac{p}{c^2} \right). \qquad (27) $$
169
+
170
+ The mean density and pressure are currently taken to include dark energy and are taken to obey the local conservation of energy relation
171
+
172
+ $$ \dot{\rho} = -3\frac{\dot{a}}{a}\left(\rho + \frac{p}{c^2}\right). \qquad (28) $$
173
+ ---PAGE_BREAK---
174
+
175
+ The first integral of Equations (27) and (28) is the Friedman equation
176
+
177
+ $$ \dot{a}^2 = \frac{8}{3}\pi G\rho a^2 + \text{constant.} \quad (29) $$
178
+
179
+ But consider what happens if one wishes to compare this with the cosmology produced by the non-singular, time dependent, gravitational gauge potential. Then Equation (27) becomes
180
+
181
+ $$ m_g \frac{d^2 x}{dt^2} = \frac{4\pi}{3} \frac{x^3 \rho(t) G m_g}{x^2} \left(1 - \frac{\lambda}{x}\right) (1 - H_o \tau) e^{-\frac{\lambda}{x}} , \quad (30) \\ = \frac{4\pi}{3} G \rho(t) x \left(1 - \frac{\lambda}{x}\right) (1 - H_o \tau) e^{-\frac{\lambda}{x}} $$
182
+
183
+ where $\tau$ is the universe time.
184
+
185
+ Now let us replace $x$ with the co-moving coordinate $x=R(t)r$ where $R(t)$ is the scale factor of the universe and $r$ is the co-moving distance coordinate as is done in the standard model. When we also normalize the density to its value at the present epoch, $\rho_o$, by $\rho(t)=\rho_o R^{-3}(t)$ we obtain
186
+
187
+ $$ \frac{d^2 R}{dt^2} = \frac{4\pi G \rho R}{3} \left( 1 - \frac{\frac{\lambda}{r}}{R} \right) (1 - H_o \tau) e^{-\frac{\lambda}{R}} . \quad (31) $$
188
+
189
+ If we multiply Equation (31) by $dR/dt$ and integrate with respect to time we find
190
+
191
+ $$ \int \dot{R} \frac{d^2 R}{dt^2} dt = \frac{4\pi G \rho}{3} \int (1 - H_o \tau) R \dot{R} dt \quad (32) \\ \frac{\dot{R}^2}{2} - \frac{R_o^2}{2} = \frac{4\pi G \rho}{3} \int (1 - H_o \tau) R dR $$
192
+ ---PAGE_BREAK---
193
+
194
+ We now need to know how to integrate the right hand side of Equation (32). Suppose we consider the time it takes for light to travel from the distant star to Earth, or $t = -\frac{a}{c}$, where a is the distance from the star to Earth and the minus sign comes from looking backwards in time. The radius of the universe now has two parts. The first part is the radius of the universe when the light left the star on its journey to the Earth. Let this time be $R_0$. Thus we see that
195
+
196
+ $$R = R_0 + a \quad (33)$$
197
+
198
+ and
199
+
200
+ $$\dot{R} = \dot{a} \quad (34)$$
201
+
202
+ Further, from considerations of the dark matter it was determined that the world time was given by
203
+
204
+ $$\tau = \tau_o - \frac{H_o GM}{2c^2 R} t^2 + \left( \dot{\tau}_o + \frac{H_o GM}{c^2 R} t_o \right) t. \quad (35)$$
205
+
206
+ When we set both initial times to zero and use the value of
207
+
208
+ $$\lambda_U = \frac{GM}{c^2}, \quad (36)$$
209
+
210
+ Equation (35) becomes
211
+
212
+ $$\tau = -\frac{H_o \lambda_U}{2R} t^2 + \dot{\tau}_o t. \quad (37)$$
213
+
214
+ Now we find that Equation (32) may be written as
215
+
216
+ $$\dot{a}^2 = \frac{8\pi G\rho}{6c^2} \left\{ \begin{aligned} & 2R_0 c^2 a + (c^2 + H_0 \dot{\tau}_o c R_0) a^2 \\ & + \frac{1}{3} (H_0^2 \lambda_U + H_0 \dot{\tau}_o 2c) a^3 \end{aligned} \right\} + K. \quad (38)$$
217
+ ---PAGE_BREAK---
218
+
219
+ If we set the constant of integration, $K$, to zero, then Equation (38) becomes
220
+
221
+ $$ \dot{a}^2 = H_o^2 \Omega'_M \left\{ R_o a + \frac{1}{2} \left( 1 + \frac{H_o \dot{\tau}_o R_o}{c} \right) a^2 + \frac{1}{6} \left( \frac{H_o^2 \lambda_U}{c^2} + \frac{2H_o \dot{\tau}_o}{c} \right) a^3 \right\} . \quad (39) $$
222
+
223
+ where we have used the definitions
224
+
225
+ $$ \rho_c = \frac{3H_o^2}{8\pi G}, \quad \text{and} \quad \Omega'_M = \frac{\rho_c}{\rho_c}. \qquad (40) $$
226
+
227
+ In Equation (39) we find that the mass density term splits into three terms for a time-dependent gravitational field. For a time-independent gravitational field there was only one term.
228
+
229
+ An interesting aspect of Equation (39) is that the two new mass terms both involve the same time dependence factor as the one that causes the tangential velocity of the arms of spiral galaxies to differ from Newtonian behaviour. That is to say that should the two new terms provide a basis for the current experimental evidence for dark energy it comes from the same source as the basis for dark matter. The time dependence of the gravitational field explains both phenomena.
230
+
231
+ Consider Equation (39) again and add the usual term for radiation so that we find
232
+
233
+ $$ \left(\frac{\dot{a}}{a}\right)^2 = H_o^2 \left\{ \begin{aligned} & \Omega'_M \left[ \frac{R_o}{a} + \frac{1}{2} \left( 1 + \frac{H_o \dot{\tau}_o R_o}{c} \right) + \frac{1}{6} \left( \frac{H_o^2 \lambda_U}{c^2} + \frac{2H_o \dot{\tau}_o}{c} \right) a \right] \\ & + \Omega_{RO} \end{aligned} \right\} \quad (41) $$
234
+
235
+ where we did not add a term for the cosmological constant. If this is to compare with the usual expression we could write
236
+ ---PAGE_BREAK---
237
+
238
+ $$ \left(\frac{\dot{a}}{a}\right)^2 = H_o^2 \left\{ \begin{aligned} & \Omega_M' (1+z)^3 + \Omega_{DM}' (1+z)^3 + \Omega_{DE}' (1+z)^3 \\ & + \Omega_{RO} (1+z)^4 \end{aligned} \right\} \quad (42) $$
239
+
240
+ wherein the sum of the terms are taken to be unity at $z=0$ and the integration constant has been taken to be zero. Equation (41) and (42) would require
241
+
242
+ $$ \Omega_M' \left[ \frac{R_o}{a} + \frac{1}{2} \left( 1 + \frac{H_o \dot{\tau}_o R_o}{c} \right) + \frac{1}{6} \left( \frac{H_o^2 \lambda_U}{c^2} + \frac{2H_o \dot{\tau}_o}{c} \right) a \right] + \Omega_{RO} = 1. \quad (43) $$
243
+
244
+ The relation between the red shift and a is
245
+
246
+ $$ 1+z=\left[\frac{a(t_{\text{obs}})}{a(t_{\text{em}})}\right]=\frac{R_o+a}{R_o} \quad (44) $$
247
+
248
+ By putting Equation (44) into (41) we find
249
+
250
+ $$ \left(\frac{\dot{a}}{a}\right)^2 = H_o^2 \left\{ \begin{aligned} & \Omega_M' \left[ z + \frac{1}{2} \left( 1 + \frac{H_o \dot{\tau}_o c^3 a}{z c^4} \right) + \frac{1}{6} \left( \frac{H_o^2 G M}{c^4} + \frac{2 H_o \dot{\tau}_o}{c} \right) a \right] \\ & + \Omega_{RO} \end{aligned} \right\} \quad (45) $$
251
+
252
+ The fact that these terms have expressions relating them argues that their relative values may be determined.
253
+
254
+ For example, if $\Omega_{RO}$ is taken to be small compared with the mass terms and $\Omega_M$ is set at the typical value of 0.25, then we would require
255
+
256
+ $$ \dot{\tau}_o = \left( \frac{z c \left( 21 - 6z - \frac{H_o^2 \lambda_U a}{c^2} \right)}{H_o a (3+2z)} \right). \quad (46) $$
257
+ ---PAGE_BREAK---
258
+
259
+ Since the source of the light being measured left its origin some time
260
+ after the universe completed the exponential inflationary expansion
261
+ early in universe time, we would have
262
+
263
+ $$
264
+ \dot{\tau}_o = \left( \frac{z c 3 (7 - 2z)}{H_o a (3 + 2z)} \right). \qquad (47)
265
+ $$
266
+
267
+ Putting this back into Equation (45) we find
268
+
269
+ $$
270
+ \left(\frac{\dot{a}}{a}\right)^2 = H_o^2 \left\{ \Omega'_M \left[ z + \frac{12-2z}{3+2z} + \frac{z(7-2z)}{3+2z} \right] + \Omega_{RO} \right\}. \quad (48)
271
+ $$
272
+
273
+ There are three terms for the mass with different functions of z.
274
+ Now we would have
275
+
276
+ $$
277
+ \Omega'_{M} = \frac{(3+2z)}{4(3+2z)} = 0.25 \tag{49}
278
+ $$
279
+
280
+ as set above and we can then evaluate each term when z=0. Let us
281
+ associate the middle term with Ω<sub>M</sub>, the first term with Ω<sub>DMO</sub> and the
282
+ remaining term with Ω<sub>DEO</sub>. We would then have the values
283
+
284
+ $$
285
+ \begin{align*}
286
+ (\Omega_M]_{z=0} &= \left( \Omega'_M \frac{12-2z}{3+2z} \right]_{z=0} = 1 \\
287
+ (\Omega_{DM}]_{z=0} &= (\Omega'_M z]_{z=0} = 0 \tag{50} \\
288
+ (\Omega_{DE}]_{z=0} &= \left( \Omega'_M \frac{z(7-2z)}{3+2z} \right]_{z=0} = 0
289
+ \end{align*}
290
+ $$
291
+
292
+ for z=0.
293
+
294
+ Our overall equation would then be
295
+ ---PAGE_BREAK---
296
+
297
+ $$ \left(\frac{\dot{a}}{a}\right)^2 = H_o^2 \left\{ \left[ \Omega_M z + \Omega_M \frac{12-2z}{3+2z} + \Omega_M \frac{z(7-2z)}{3+2z} \right] + \Omega_{RO} \right\}, (51) $$
298
+
299
+ where $Ω_M$ varies as $(1+z)^3$.
300
+
301
+ ## Comparing with Experiment
302
+
303
+ The expansion of the universe means the distance between two distant galaxies varies with time as
304
+
305
+ $$ L(t) \propto a(t). \qquad (52) $$
306
+
307
+ The rate of change of the distance is the speed
308
+
309
+ $$ v = \frac{dl}{dt} = Hl, \quad H = \frac{a}{\dot{a}} \qquad (53) $$
310
+
311
+ where H is the time dependent Hubble parameter.
312
+
313
+ A method of measuring of the expansion of the universe comes from measuring the shift of frequencies of light, the red shift, coming from distant stars. The observed wave length, $λ_r$, of a feature in the spectrum that had wavelength $λ_e$ at emission is given by the relation
314
+
315
+ $$ 1+z = \frac{\lambda_r}{\lambda_e} = \frac{a(t_r)}{a(t_e)}. \qquad (54) $$
316
+
317
+ When the velocity is given by $cz$ then Hubble's law is written as
318
+
319
+ $$ cz = Hl \qquad (55) $$
320
+
321
+ from which we see that
322
+
323
+ $$ z = \frac{HL}{c}, \text{ or } H = \frac{cz}{L}. \qquad (56) $$
324
+ ---PAGE_BREAK---
325
+
326
+ An additional feature of the new theory presented here is the expression for the red shift of light from distant stars. This has been shown to be [12]
327
+
328
+ $$ z_{\text{exp}} = \frac{\Delta\lambda}{\lambda_e} = \exp\left\{ \left( \frac{-G}{c^2} \right) \left[ \frac{M_r e^{-\frac{\lambda_r}{R_r}}}{R_r} - \frac{M_e e^{-\frac{\lambda_e}{R_e}}}{R_e} \right] + \left( \frac{HL}{c} \right) \frac{\left( \frac{M_r}{R_r} \right)}{\left( \frac{M_E}{R_E} \right)} \right\} - 1, (57) $$
329
+
330
+ where the subscript r designates values at the time and point of reception, the subscript e represents values at the time and point of emission and the quantities, $M_E$ and $R_E$, represent the mass and mean radius of the Earth. We have also used the subscript 'exp' on the red shift to indicate that it is the experimental value of red shift measured at the receiving location.
331
+
332
+ There are two parts to the red shift. One part is due to the gravitational fields at the points and time of emission and reception and the other part is due to the travel time between emission and reception. This is the part that involves the expansion of the universe. Therefore let us rewrite Equation (57) as
333
+
334
+ $$ z_{\text{exp}} = \exp\left\{ \left( \frac{-G}{c^2} \right) \left[ \frac{M_r e^{-\frac{\lambda_r}{R_r}}}{R_r} - \frac{M_e e^{-\frac{\lambda_e}{R_e}}}{R_e} \right] \right\} \exp\left\{ \left( \frac{HL}{c} \right) \frac{\left( \frac{M_r}{R_r} \right)}{\left( \frac{M_E}{R_E} \right)} \right\} - 1, (58) $$
335
+
336
+ and then rearrange it to get
337
+ ---PAGE_BREAK---
338
+
339
+ $$ z \equiv \frac{HL}{c} = \left\{ \left( \frac{G}{c^2} \right) \left[ \frac{M_r e^{-\frac{\lambda_r}{R_r}}}{R_r} - \frac{M_e e^{-\frac{\lambda_e}{R_e}}}{R_e} \right] + \log (1 + z_{\text{exp}}) \right\} \left\{ \frac{\frac{M_E}{R_E}}{\frac{M_r}{R_r}} \right\} \quad (59) $$
340
+
341
+ which is the red shift of the universe expansion.
342
+
343
+ Two simplifications may now be made. First, in many cases the gravitational component of the experimental red shift may be ignored. Secondly, if we are only using the red shift data measured at the Earth's surface then Equation (59) reduces to
344
+
345
+ $$ z \equiv \frac{HL}{c} = \log(1 + z_{\text{exp}}). \qquad (60) $$
346
+
347
+ This is the red shift value to be used in the expansion velocity of the universe, Equation (51), so that we may write
348
+
349
+ $$ \left(\frac{\dot{a}}{a}\right)^2 = H_o^2 \left\{ \begin{aligned} & 0.25(1+\log(1+z_{\text{exp}}))^3 \log(1+z_{\text{exp}}) \\ & +0.25(1+\log(1+z_{\text{exp}}))^3 \frac{12-2\log(1+z_{\text{exp}})}{(3+2\log(1+z_{\text{exp}}))} \\ & +0.25(1+\log(1+z_{\text{exp}}))^3 \frac{z(7-2\log(1+z_{\text{exp}}))}{(3+2\log(1+z_{\text{exp}}))} \end{aligned} \right\} + \Omega_{RO} \quad (61) $$
350
+
351
+ If one can simultaneously measure the red shift and the distance to the object then Equation (56) gives a value of Hubble's parameter
352
+ ---PAGE_BREAK---
353
+
354
+ that may be used in Equation (53) to get the universe expansion velocity.
355
+
356
+ Standard Candles
357
+
358
+ One reason for choosing the Type Ia supernova in the universe expansion research is the assumption that the mass of this type supernova are all the same; roughly the Chandrasekhar Limit mass of 1.39 solar masses. However, a time dependent gravitational field causes this limit to change with time. This may be seen by considering the Newtonian equation of hydrostatic equilibrium known as the Tolman-Oppenheimer-Volkov [TOV] equation, or
359
+
360
+ $$ \frac{dp}{dr} = \frac{-GM(r)(1 - H_o\tau)\rho}{r^2}. \qquad (62) $$
361
+
362
+ The gravitational field that is holding the star together against the internal pressure is diminishing in time. This means that the limiting mass increases in time. Supernova found closer to Earth will have more mass and therefore greater luminosity, than more distant supernova. A reduction in luminosity from the assumed constancy would show up in an analysis by making the more distant supernova appear further away than it really is. The natural conclusion, based on the time-independent gravitational field that produces the constant Chandrasekhar limiting mass, would be that the expansion of the universe is accelerating.
363
+
364
+ Using the Virial Theorem development by Collins [13] who arrives at the Chandrasekhar limiting mass with the equation
365
+
366
+ $$ \frac{R_o}{\left(\frac{2GM}{c^2}\right)} > 228 \left(\frac{M_{\text{Sun}}}{M}\right)^{\frac{4}{9}} \approx 200 \qquad (63) $$
367
+ ---PAGE_BREAK---
368
+
369
+ the time dependent gravitational field requires that this relation
370
+ become
371
+
372
+ $$
373
+ \frac{R_o}{\left(\frac{2GM}{c^2}\right)(1-H_o\tau)} > 228 \left(\frac{M_{sun}}{M}\right)^{\frac{4}{9}} \approx 200. \quad (64)
374
+ $$
375
+
376
+ This gives the limiting mass as
377
+
378
+ $$
379
+ M_L = M_{Ch} (1 - H_o \tau)^{-\frac{9}{4}}, \qquad (65)
380
+ $$
381
+
382
+ where $M_{Ch}$ is the Chandrasekhar limiting mass. By differentiating
383
+ Equation (65) with respect to universe time we find the limiting mass
384
+ for the type Ia supernovae to change according to
385
+
386
+ $$
387
+ \frac{dM_L}{d\tau} = \left( \frac{9H_o}{4} \right) M_{Ch} (1 - H_o \tau)^{-\frac{13}{4}} . \quad (66)
388
+ $$
389
+
390
+ Conclusions
391
+
392
+ A time-dependent gravitational field, that gets weaker in time, shows
393
+ the physical effects of this past, stronger field in the dynamics of
394
+ spiral galaxies. This weakening gravitational field also shows up in
395
+ the analysis of the distances to, and red shift of light from,
396
+ supernovas. Here it adds terms to the universe expansion velocity
397
+ relations that are not present in the analysis of time-independent
398
+ fields. It also changes the luminosity of the supernovas that were
399
+ assumed to have constant luminosity. These effects of the time
400
+ dependent gravitational field remove the need for hypothesizing new
401
+ matter or energy to explain these effects.
402
+
403
+ There have been many attempts in the past to find different
404
+ solutions to Einstein's field equations and to show how an expanding
405
+ universe may be viewed in different ways. Portions of the above may
406
+ ---PAGE_BREAK---
407
+
408
+ be reminders of prior approaches. Therefore, it may prove useful to point out what is new in this article.
409
+
410
+ Fundamentally there are three things that are new in this article. First, the fifth dimension is considered to be a real physical entity. All five dimensional theories that I know of in the past, whether by Kalusa-Klein, Einstein with his many collaborators, and others, did not consider the fifth dimension to be real and, therefore, required several terms in the resulting gauge field equations to be zero. Here these terms are non-zero and require that the gravitational potential and field be time-dependent. Second, this article uses the Weyl Gauge Principle as its basis for quantum theory and this requires that the gravitational potential be a non-singular potential. These two things require the gravitational field to be a time-dependent, non-singular, gauge field not seen previously. The third aspect of the article is that the Weyl Gauge Principle requires that the unit of action be dependent upon the gauge function. This requires the red-shift from distant objects to have an exponential dependence upon both the time and distance between emission and reception. The new red-shift relation becomes important in both dark matter and dark energy predictions because both phenomena are witnessed by red-shifted light. The time dependence, or weakening, of the gravitational field is the major factor in predicting effects interpreted as dark matter. The time dependence of the gravitational field also provides the major factor in predictions with respect to dark energy as it is responsible for the diminishing of the luminosity of the distant supernovas used as standard candles and the expression for the expansion of the universe.
411
+
412
+ **References:**
413
+
414
+ [1] Williams, P.E., Mechanical Entropy and its Implications, Entropy, 3, 76-115.
415
+ http://www.mdpi.org/entropy/list01.htm#new
416
+ ---PAGE_BREAK---
417
+
418
+ [2] Williams, P. E., 2002, Energy and Entropy as the Fundaments of Theoretical Physics, Entropy, 4, 128-141.
419
+ http://www.mdpi.org/entropy/htm/e4040128.htm
420
+
421
+ [3] Williams, P. E., 2007, Alternate Communications for Space Travel, Space Technology and Applications International Forum (STAIM-2007), Albuquerque, NM.
422
+
423
+ [4] Weyl, H., 1918, Space Time Matter.
424
+
425
+ [5] Schrödinger, E., 1922, On a Remarkable Property of the Quantum-Orbits of a Single Electron, Zeit. F. Phys. 12.
426
+
427
+ [6] London, F., 1927, Quantum-Mechanical Interpretation of Weyl's Theory, Zeit. F. Phys. 42.
428
+
429
+ [7] Zwicky, F., 1937. On the Masses of Nebulae and of Clusters of Nebulae, Astrophysical Journal, Vol. 86, No. 3.
430
+
431
+ [8] Milgrom, M., 1983a, ApJ 270, 365.
432
+
433
+ [9] Milgrom, M., 1983b, ApJ 270, 371.
434
+
435
+ [10] Milgrom, M., 1983c, ApJ 270, 384.
436
+
437
+ [11] Riess, et. al., 1998, Observational Evidence from Supernovae for an Accelerating Universe and a Cosmological Constant, Astron.J. 116, 1009-1038.
438
+
439
+ [12] Williams, P.E., 2001b, Using the Hubble Telescope to Determine the Split of a Cosmological Object's Redshift in its Gravitational and Distance Parts, Apeiron, Vol. 8, No. 2,
440
+ http://redshift.vif.com/JournalFiles/V08NO2PDF/V08N2WIL.pdf
441
+
442
+ [13] Collins II, 2003, Virial Theorem in Stellar Astrophysics,
443
+ http://ads.harvard.edu/books/1978vtsa.book/
samples/texts_merged/6016935.md ADDED
The diff for this file is too large to render. See raw diff
 
samples/texts_merged/6218816.md ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ 0020-7683(94)00077-8
5
+
6
+ FORMULAS FOR THE STIFFNESS OF COMPOSITES
7
+ WITH PERIODIC MICROSTRUCTURE
8
+
9
+ R. LUCIANO
10
+
11
+ University of Cassino, via Zamosh 43, Cassino, Italy†
12
+
13
+ and
14
+
15
+ E. J. BARBERO
16
+
17
+ West Virginia University, Morgantown, WV 26506-6101, U.S.A.
18
+
19
+ *(Received 19 October 1993; in revised form 23 April 1994)*
20
+
21
+ **Abstract**—In this paper, the mechanical behavior of composite materials with periodic microstructure is analysed. The corresponding elastic problem is solved by using the Fourier series technique and assuming the homogenization eigenstrain to be piecewise constant. Then, the coefficients of the overall stiffness tensor of the composite material are expressed analytically in terms of the elastic properties of the constituents (fibers and matrix) and as a function of nine triple series which take into account the geometry of the inclusions. In the case of composite materials reinforced by long fibers, simple formulas for evaluating these series are proposed. Close-form expressions for the elastic moduli of the fiber reinforced composite with periodic microstructure and for the equivalent transversely isotropic material are obtained. Finally, several comparisons with experimental results are presented.
22
+
23
+ # I. INTRODUCTION
24
+
25
+ Micromechanical models represent an efficient tool to estimate the overall stiffness of composite materials and a large number of results and comparisons with experimental data are available (Aboudi, 1991; Mura, 1987; Nemat-Nasser and Hori, 1993). The simplest model is the composite sphere or cylinder scheme which was proposed by Hashin (1962). In this method the composite material is modeled as a gradation of sizes of spherical or cylindrical inclusions embedded in a continuous matrix phase. Otherwise, in the self-consistent scheme (S-C), formulated by Budiansky (1965) and Hill (1965a, b), the fibers or the defects are considered as a typical micro inclusion embedded in an unbounded homogeneous elastic solid characterized by the unknown moduli of the composite. Then the overall elastic properties are computed by an iterative numerical procedure to take into account the interaction effects between the phases. For example, Budiansky and O'Connell (1976), Laws (1977), Laws et al. (1983), Laws and Brockenbrough (1987), Laws and Dvorak (1987) and Hoening (1979) used the S-C method to estimate the elastic properties of cracked composite materials. They analysed several cases such as: different geometries of cracks (ellipsoidal or cylindrical), isotropic and orthotropic matrix, two or more phase composite materials and obtained closed form solutions useful for engineering applications. Hori and Nemat-Nasser (1983) applied the S-C method for materials damaged by open and closed cracks and obtained the anisotropic response of the composite as a function of the load conditions and of loading path. Although the self-consistent method is simple to use, in the case of high volume fraction of the inclusions, it cannot be always applied for the analysis of multi-phase composite materials (Christensen, 1990). Conversely, the generalized self-consistent method, proposed by Christensen and Lo (1979, 1986), gives good results also in this case. They used this scheme to estimate the effective shear modulus and obtained physically realistic results for both spherical and cylindrical inclusions. On the other hand, for different geometries of the inclusions, many authors employed another micromechanical model, based on the Mori-Tanaka's theory (Mori and Tanaka, 1973).
26
+
27
+ †Presently at Department of Mechanical and Aerospace Engineering, West Virginia University, Morgantown, WV 26506-6101, U.S.A.
28
+ ---PAGE_BREAK---
29
+
30
+ Fig. 1. Geometry of the unit cell D.
31
+
32
+ They considered isotropic, transversely isotropic and orthotropic matrix and ellipsoidal, cylindrical and ribbon fibers or cracks [see for example, Taya and Chou (1981), Weng (1984), Zhao *et al.* (1989), Tandon and Weng (1984) and Taya (1981)]. Recently, Benveniste proposed a mathematical justification of the Mori-Tanaka's method and, for composites with or without cracks, obtained estimates for the overall stiffness and compliance tensor (Benveniste, 1987). Finally the behavior of the advanced composites was analysed by Aboudi (1991), who proposed the method of cells, and by Iwakuma and Nemat-Nasser (1983) who formulated the linear elastic problem of composites with periodic microstructure. In Nemat-Nasser and Taya (1981, 1985) and Nemat-Nasser *et al.* (1982) the concept of a unit cell was introduced and the Fourier series technique was applied to estimate the overall elastic properties of materials with periodic distributed voids. Several approximations to the distribution of the homogenization eigenstrains were considered to solve the problem and in the hypothesis of piecewise constant eigenstrains, Nemat-Nasser *et al.* (1993) proposed analytical expressions to evaluate the coefficients of the stiffness tensor of cracked solids. Otherwise, for composites with periodic elastic inclusions, they proposed a procedure which entails considerable numerical efforts [see Iwakuma and Nemat-Nasser (1983)]. In the present paper, close-form expressions for the coefficients of the stiffness tensor and for technical elastic moduli of composites materials with periodically distributed elastic inclusions or voids are proposed. Moreover, analytical expressions are given for the elastic moduli of the transversely isotropic material equivalent to the solid reinforced by periodic long fibers. Finally, comparisons with available experimental data, numerical results obtained by Aboudi (1991) and results of the generalized self-consistent method (Christensen and Lo, 1979) are presented.
33
+
34
+ ## 2. RELATION BETWEEN THE EIGENSTRAIN AND THE STRAIN INSIDE THE INCLUSION
35
+
36
+ Consider an infinitely extended linearly elastic solid represented by an assembly of unit cells. For simplicity, let each cell $D$ be a parallelepiped with dimensions $a_j$ (Fig. 1) in the direction of the coordinate axes $x_j$ where $j = 1, 2, 3$, and let $V$ be its volume. Then denote by $\Omega$ the part of $D$ occupied by the inclusions, let $D - \Omega$ denote the matrix and let $f$ be the volume fraction of $\Omega$.
37
+
38
+ Next, the relation between the eigenstrain and the strain inside the inclusion is introduced. In order to simulate the inclusions inside the body, consider the homogenization eigenstrain $\epsilon^*$ defined in all $D$, which must be periodic for the particular geometry of the problem and different to zero only in $\Omega$. Since the material is linear elastic, the actual stress tensor $\sigma$ inside the unit cell can be expressed in terms of $\epsilon^*$ and the actual strain tensor $\epsilon$ in the following way:
39
+ ---PAGE_BREAK---
40
+
41
+ $$ \sigma = C(\epsilon - \epsilon^*) \text{ in } D \quad (1) $$
42
+
43
+ where C is the elasticity tensor of the matrix. Then, assuming the body forces equal to zero, the tensor $\sigma$ must satisfy the following equilibrium conditions:
44
+
45
+ $$ \operatorname{div} \sigma = 0 \text{ in } D \quad (2) $$
46
+
47
+ where div denotes the divergence of a tensor field. Furthermore, since in a solid with periodic structure and suitable boundary conditions the displacement $\mathbf{u}$ are periodic, the following Fourier series representation of $\mathbf{u}$, $\epsilon$ and $\epsilon^*$ can be considered:
48
+
49
+ $$ \mathbf{u}(x) = \sum_{\xi}^{\pm\infty} \bar{\mathbf{u}}(\xi) \exp(i\xi x) \quad (3) $$
50
+
51
+ $$ \varepsilon(x) = \operatorname{sym} (\nabla \mathbf{u}(x)) = \sum_{\xi}^{\pm\infty} \bar{\varepsilon}(\xi) \exp(i\xi x) \quad (4) $$
52
+
53
+ $$ \varepsilon^*(x) = \sum_{\xi}^{\pm\infty} \bar{\varepsilon}^*(\xi) \exp(i\xi x) \quad (5) $$
54
+
55
+ where $\xi = (\xi_1, \xi_2, \xi_3)$ with $\xi_j = 2\pi n_j / a_j$ ($n_j = 0, \pm 1, \pm 2, \dots$, $j$ not summed, $j = 1, 2, 3$) and:
56
+
57
+ $$ \bar{\mathbf{u}}(\xi) = \int_D \mathbf{u}(x) \exp(-i\xi x) dx \quad (6) $$
58
+
59
+ $$ \bar{\varepsilon}(\xi) = \frac{i}{2} [\xi \otimes \bar{\mathbf{u}}(\xi) + \bar{\mathbf{u}}(\xi) \otimes \xi] \quad (7) $$
60
+
61
+ $$ \bar{\varepsilon}^*(\xi) = \int_D \varepsilon^*(x) \exp(-i\xi x) dx. \quad (8) $$
62
+
63
+ Combination of eqns (1) and (2) gives:
64
+
65
+ $$ \operatorname{div} (C(\epsilon - \epsilon^*)) = 0 \text{ in } D \quad (9) $$
66
+
67
+ then by using eqns (4), (7) and (5) in (9) the following expressions are obtained:
68
+
69
+ $$ -\xi \cdot C(\xi \otimes \bar{\mathbf{u}}(\xi)) = i\xi \cdot C\bar{\varepsilon}^*(\xi) \text{ for every } \xi \neq 0 \quad (10) $$
70
+
71
+ where the symbols $\otimes$ and $\cdot$ represent the outer and the inner products, respectively (Spiegel, 1959). Thus, since C represents the elastic tensor of the matrix, the coefficients $\bar{\mathbf{u}}(\xi)$ are obtained uniquely in terms of the $\bar{\varepsilon}^*(\xi)$ in the following way:
72
+
73
+ $$ \bar{\mathbf{u}}(\xi) = -i(\xi \cdot C \circ \xi)^{-1} \circ \xi \cdot C\bar{\varepsilon}^*(\xi) \text{ for every } \xi \neq 0 \quad (11) $$
74
+
75
+ and from eqn (7) the Fourier coefficients of the corresponding strain are:
76
+
77
+ $$ \bar{\varepsilon}(\xi) = \operatorname{sym} (\xi \otimes (\xi \cdot C \circ \xi)^{-1} \otimes \xi): C\bar{\varepsilon}^*(\xi) \text{ for every } \xi \neq 0. \quad (12) $$
78
+
79
+ Finally denoting:
80
+ ---PAGE_BREAK---
81
+
82
+ $$P'(\xi) = \operatorname{sym} (\xi \otimes (\xi \cdot C \cdot \xi)^{-1} \otimes \xi) \quad (13)$$
83
+
84
+ obtain the actual strain inside the inclusion from eqn (12) using eqns (4) and (8) as:
85
+
86
+ $$\epsilon(x) = \frac{1}{V} \sum_{\xi} P'(\xi) : C \int_D \epsilon^*(x) \exp(-i\xi(x'-x)) dx' \quad (14)$$
87
+
88
+ where a prime on the sum indicates that $\xi = 0$ is excluded in the summation.
89
+
90
+ Now, note that the exact expression of the strain tensor $\epsilon(x)$ is not necessary to obtain the overall elastic tensor $C^*$ but only its volume average on $\Omega$ denoted by $(\bar{\epsilon} = \int_{\Omega} \epsilon(x) dx / V_{\Omega})$:
91
+
92
+ $$\bar{\epsilon} = \frac{1}{V} \sum_{\xi} P'(\xi) : C \left( \frac{g_0(\xi)}{V_{\Omega}} \right) \int_D \epsilon^*(x) \exp(-i\xi x') dx' \quad (15)$$
93
+
94
+ where $V_{\Omega}$ is the volume of the inclusion and:
95
+
96
+ $$g_0(\xi) = \int_{\Omega} \exp(i\xi x) dx. \quad (16)$$
97
+
98
+ A good approximation of eqn (15) is obtained when a constant $\epsilon^*$ is considered in $\Omega$, as shown in Nemat-Nasser *et al.* (1982). Then, replacing $\epsilon^*$ with its volume average $\bar{\epsilon}^*$, eqn (15) becomes:
99
+
100
+ $$\bar{\epsilon} = \frac{1}{V} \sum_{\xi} P'(\xi) : C \left( \frac{g_0(\xi)g_0(-\xi)}{V_{\Omega}} \right) \bar{\epsilon}^* \quad (17)$$
101
+
102
+ or
103
+
104
+ $$\bar{\epsilon} = f \sum_{\xi}^{+\infty'} \left( \frac{g_0(\xi)}{V_{\Omega}} \right) \left( \frac{g_0(-\xi)}{V_{\Omega}} \right) P'(\xi) : C : \bar{\epsilon}^* \quad (18)$$
105
+
106
+ and by denoting:
107
+
108
+ $$t(\xi) = f \left( \frac{g_0(\xi)}{V_{\Omega}} \right) \left( \frac{g_0(-\xi)}{V_{\Omega}} \right) \quad (19)$$
109
+
110
+ and
111
+
112
+ $$P = \sum_{\xi}^{+\infty'} t(\xi) P'(\xi) \quad (20)$$
113
+
114
+ the following expression holds:
115
+
116
+ $$\bar{\epsilon} = P : C : \bar{\epsilon}^* \text{ in } \Omega. \quad (21)$$
117
+
118
+ Note that eqn (21) represents the relation between the volume average of the strain inside the inclusion $\bar{\epsilon}$ and the volume average of the eigenstrain $\bar{\epsilon}^*$.
119
+
120
+ ### 3. OVERALL STIFFNESS TENSOR
121
+
122
+ In order to obtain the homogenization eigenstrain which simulates the presence of the periodic inclusions inside the body, consider an average strain tensor $\bar{\epsilon}_o$ in the unit cell, which is arbitrarily prescribed. In this hypothesis the following average consistency condition (equivalent eigenstrain method) can be used (Nemat-Nasser and Hori, 1993):
123
+ ---PAGE_BREAK---
124
+
125
+ $$C': (\bar{\varepsilon}_0 \div P : C : \bar{\varepsilon}^*) = C : (\bar{\varepsilon}_0 + (P : C - I^{(4)}) : \bar{\varepsilon}^*) \quad (22)$$
126
+
127
+ where $C'$ is the elastic tensor of the inclusion and $I^{(4)}$ is the identity fourth-order tensor. Observe that the tensor $P$ takes into account the geometry of the inclusion and can be evaluated once and for all. Then from eqn (22), the equivalent average volume eigenstrain $\bar{\varepsilon}^*$ can be solved in terms of the tensors $C'$, $C$, $P$ and $\bar{\varepsilon}_0$ as:
128
+
129
+ $$\bar{\varepsilon}^* = [((C-C')^{-1}-P)C]^{-1}\bar{\varepsilon}_0. \quad (23)$$
130
+
131
+ Furthermore, since in this case the uniform overall stress $\sigma_0$ in the unit cell is:
132
+
133
+ $$C^*: \bar{\epsilon}_0 = C: (\bar{\epsilon}_0 - f \bar{\epsilon}^*) \quad (24)$$
134
+
135
+ by using eqn (23) and noting that $\bar{\epsilon}_0$ is arbitrary, the following expression of the overall stiffness tensor of the composite material is obtained:
136
+
137
+ $$C^* = C - f((C-C')^{-1}-P)^{-1}. \quad (25)$$
138
+
139
+ It is worth noting that evaluation of $C^*$ [eqn (25)] involves the inversion of a symmetric tensor since $P$, $C$ and $C'$ are all symmetric tensors. In particular if the matrix is isotropic, denoting by $\bar{\xi} = \xi/|\xi|$, the tensor $P$ is (Mura, 1987; Nemat-Nasser and Hori, 1993):
140
+
141
+ $$P = \frac{1}{\mu_0} \sum_{\xi}^{\pm\infty} t(\xi) \left( \operatorname{sym}(\bar{\xi} \otimes I^{(2)} \otimes \bar{\xi}) - \frac{1}{2(1-\nu_0)} (\bar{\xi} \otimes \bar{\xi} \otimes \bar{\xi} \otimes \bar{\xi}) \right) \quad (26)$$
142
+
143
+ where $\mu_0$ and $\nu_0$ are the shear modulus and the Poisson ratio of the matrix, respectively and $I^{(2)}$ is the identity second-order tensor. Hence, when the matrix and the inclusion are both isotropic, eqn (25) can be written:
144
+
145
+ $$C^* = \lambda_0 I^{(2)} \otimes I^{(2)} + 2\mu_0 I^{(4)} - f[(\lambda_0 - \lambda_1)I^{(2)} \otimes I^{(2)} + 2(\mu_0 - \mu_1)I^{(4)}]^{-1} \\ - \frac{1}{\mu_0} \sum_{\xi}^{\pm\infty} t(\xi) \left[ \left( \operatorname{sym}(\bar{\xi} \otimes I^{(2)} \otimes \bar{\xi}) - \frac{1}{2(1-\nu_0)} (\bar{\xi} \otimes \bar{\xi} \otimes \bar{\xi} \otimes \bar{\xi}) \right) \right]^{-1} . \quad (27)$$
146
+
147
+ Here $\mu_0$, $\lambda_0$, $\mu_1$ and $\lambda_1$ are the Lamé constants of the matrix and the inclusion, respectively. Then, defining the following series $S_l$ (with $l=1-9$) as:
148
+
149
+ $$S_1 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_1^2, \quad S_2 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_2^2, \quad S_3 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_3^2$$
150
+
151
+ $$S_4 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_4^2, \quad S_5 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_5^2, \quad S_6 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_6^2$$
152
+
153
+ $$S_7 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_7^2, \quad S_8 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_8^2, \quad S_9 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_9^2$$
154
+
155
+ (28)
156
+
157
+ the final expressions of the components of the tensor $C^*$ different from zero can be written in the following way:
158
+
159
+ $$C_{1i}^* = \lambda_0 + 2\mu_0 - f \left( \frac{S_3 S_2}{\mu_0^2} - \frac{S_5 S_3 + S_6 S_2}{\mu_0^2 g} - \frac{a(S_2 + S_3)}{2\mu_0 c} + \frac{S_6 S_5 - S_7^2}{\mu_0^2 g^2} + \frac{a(S_5 + S_6) + 2bS_7}{2\mu_0 gc} + \frac{a^2 - b^2}{4c^2} \right) / D$$
160
+ ---PAGE_BREAK---
161
+
162
+ $$C_{12}^* = \lambda_0 + f\left(-\frac{S_9}{\mu_0^2 g} + \frac{b}{2c\mu_0}\right)S_3 + \frac{S_9 S_6 - S_8 S_7}{\mu_0^2 g^2} - \frac{b(S_6 - S_7) - bS_8 - aS_9}{2c\mu_0 g} - \frac{ba+b^2}{4c^2}\right)/D$$
163
+
164
+ $$C_{13}^* = \lambda_0 - f\left(\frac{S_8}{\mu_0^2 g} - \frac{b}{2c\mu_0}\right)S_2 - \frac{S_8 S_5 - S_9 S_7}{\mu_0^2 g^2} + \frac{b(S_5 - S_7) - aS_8 - bS_9}{2c\mu_0 g} + \frac{ab+b^2}{4c^2}\right)/D$$
165
+
166
+ $$C_{22}^* = \lambda_0 + 2\mu_0 - f\left(\frac{S_3 S_1}{\mu_0^2} - \frac{S_4 S_3 + S_6 S_1}{\mu_0^2 g} - \frac{a(S_1 + S_3)}{2\mu_0 c} + \frac{S_6 S_4 - S_8^2}{\mu_0^2 g^2} + \frac{a(S_4 + S_6) + 2bS_8}{2\mu_0 gc} + \frac{a^2 - b^2}{4c^2}\right)/D$$
167
+
168
+ $$C_{33}^* = \lambda_0 + 2\mu_0 - f\left(\frac{S_2 S_1}{\mu_0^2} - \frac{S_4 S_2 + S_5 S_1}{\mu_0^2 g} - \frac{a(S_1 + S_2)}{2\mu_0 c} + \frac{S_5 S_4 - S_9^2}{\mu_0^2 g^2} + \frac{a(S_5 + S_4) + 2bS_9}{2\mu_0 gc} + \frac{a^2 - b^2}{4c^2}\right)/D$$
169
+
170
+ $$C_{23}^* = \lambda_0 + f\left(-\frac{S_7}{\mu_0^2 g} + \frac{b}{2c\mu_0}\right)S_1 + \frac{S_7 S_4 - S_9 S_8}{\mu_0^2 g^2} - \frac{b(S_4 - S_8 - S_9) - aS_7}{2c\mu_0 g} - \frac{ab+b^2}{4c^2}\right)/D$$
171
+
172
+ $$C_{44}^* = \mu_0 - f\left(-\frac{S_2}{\mu_0} - \frac{S_3}{\mu_0} + (\mu_0 - \mu_1)^{-1} + \frac{4S_7}{\mu_0(2-2v_0)}\right)^{-1}$$
173
+
174
+ $$C_{55}^* = \mu_0 - f\left(-\frac{S_1}{\mu_0} - \frac{S_3}{\mu_0} + (\mu_0 - \mu_1)^{-1} + \frac{4S_8}{\mu_0(2-2v_0)}\right)^{-1}$$
175
+
176
+ $$C_{66}^* = \mu_0 - f\left(-\frac{S_1}{\mu_0} - \frac{S_2}{\mu_0} + (\mu_0 - \mu_1)^{-1} + \frac{4S_9}{\mu_0(2-2v_0)}\right)^{-1} \quad (29)$$
177
+
178
+ where:
179
+
180
+ $$
181
+ \begin{aligned}
182
+ D &= -\frac{S_{3}S_{2}S_{1}}{\mu_{0}^{3}} + \frac{(S_{6}S_{2} + S_{6}S_{2} + S_{6}S_{2})S_{1}}{\mu_{0}^{3}g} + \frac{a(S_{1}S_{2} + (S_{1}+S_{2})S_{3})}{2\mu_{0}^{2}c} \\
183
+ &\quad + \frac{(S_{5}S_{4} + S_{7}^{2})S_{1} + (S_{6}S_{4} + S_{8}^{2})S_{2} + (S_{5}S_{4} + S_{9}^{2})S_{3}}{\mu_{0}^{3}g^{2}} \\
184
+ &\quad - \frac{(aS_{5} + aS_{6} + 2bS_{7}^{2})S_{1} + (aS_{4} + aS_{6} + 2bS_{8}^{2})S_{2} + (aS_{4} + aS_{5} + 2bS_{9}^{2})S_{3}}{2\mu_{0}^{2}gc} \\
185
+ &\quad + \frac{(b^{2}-a^{2})(S_{1}+S_{2}+S_{3})}{4\mu_{0}c^{2}} + \frac{(S_{5}S_{6}-S_{7}^{2})S_{4}-S_{8}^{2}S_{5}-S_{9}^{2}S_{6}-2S_{8}S_{9}S_{7}}{\mu_{0}^{3}g^{3}} \\
186
+ &\quad + \frac{(aS_{5}+aS_{6}+2bS_{7})S_{4}-(aS_{7}+2bS_{8}+2bS_{9})S_{7}+(2bS_{5}-aS_{8}+2bS_{9})S_{8}}{-aS_{9}^{2}+(2bS_{9}+aS_{5})S_{6}} \\
187
+ &\quad + \frac{a(aS_{4}+aS_{5}+aS_{6}+2(bS_{7}+bS_{8}+bS_{9}))}{4\mu_{0}gc^{2}} + \frac{d(2(S_{7}+S_{8}+S_{9})-(S_{4}+S_{5}+S_{6}))}{4} \\
188
+ &\quad + \frac{a^{3}-3ab^{2}-2b^{3}}{8c^{3}}
189
+ \end{aligned}
190
+ $$
191
+
192
+ (30)
193
+ ---PAGE_BREAK---
194
+
195
+ and
196
+
197
+ $$
198
+ a = \mu_1 - \mu_0 - 2\mu_1 v_0 + 2\mu_0 v_1
199
+ $$
200
+
201
+ $$
202
+ b = -\mu_0 v_0 + \mu_1 v_1 + 2\mu_0 v_0 v_1 - 2\mu_1 v_0 v_1
203
+ $$
204
+
205
+ $$
206
+ c = (\mu_0 - \mu_1)(-\mu_0 + \mu_1 - \mu_0 v_0 - 2\mu_1 v_0 + 2\mu_0 v_1 + \mu_1 v_1 + 2\mu_0 v_0 v_1 - 2\mu_1 v_0 v_1)
207
+ $$
208
+
209
+ $$
210
+ d = b^2 / (\mu_0 gc^2)
211
+ $$
212
+
213
+ $$
214
+ g = (2 - 2v_0). \tag{31}
215
+ $$
216
+
217
+ Numerical values for the series $S_i$ are given by Nemat-Nasser *et al.* (1982) and Iwakuma and Nemat-Nasser (1983) for several geometries of the inclusions. It is worth noting that the stiffness values presented by Nemat-Nasser *et al.* (1982) and Iwakuma and Nemat-Nasser (1983) can be obtained by using eqns (29) to (31).
218
+
219
+ 4. UNIDIRECTIONAL COMPOSITE
220
+
221
+ In the case of composite material reinforced by long circular cylindrical fibers, five
222
+ series are different from zero and only three are independent (Nemat-Nasser et al., 1982).
223
+ For unidirectional fibers aligned with the x₁-axis, the tensor ε*(x) is constant in the x₁-
224
+ direction, therefore the Fourier series of ε*(x) in the x₁-direction reduces to a constant.
225
+ Then, for the case of fibers aligned with the x₁-axis, we have:
226
+
227
+ $$
228
+ S_1 = S_4 = S_8 = S_9 = 0 \\
229
+ S_2 = S_3, \quad S_5 = S_6. \tag{32}
230
+ $$
231
+
232
+ Therefore, the following formulas can be used to evaluate the stiffness tensor of a uni-
233
+ directional composite with periodic microstructure:
234
+
235
+ $$
236
+ C_{11}^* = \lambda_0 + 2\mu_0 - f \left[ \frac{S_3^2}{\mu_0^2} - \frac{2S_6S_3}{\mu_0^2 g} - \frac{aS_3}{\mu_0 c} + \frac{S_6^2 - S_7^2}{\mu_0^2 g^2} + \frac{aS_6 + bS_7}{\mu_0 gc} + \frac{a^2 - b^2}{4c^2} \right] / D
237
+ $$
238
+
239
+ $$
240
+ C_{12}^* = \lambda_0 + f \left[ \frac{S_3}{2c\mu_0} - \frac{S_6 - S_7}{2c\mu_0 g} - \frac{a+b}{4c^2} \right] / D
241
+ $$
242
+
243
+ $$
244
+ C_{23}^* = \lambda_0 + f \left[ \frac{aS_7}{2\mu_0 gc} - \frac{ba+b^2}{4c^2} \right] / D
245
+ $$
246
+
247
+ $$
248
+ C_{22}^* = \lambda_0 + 2\mu_0 - f \left[ -\frac{aS_3}{2\mu_0 c} + \frac{aS_6}{2\mu_0 gc} + \frac{a^2-b^2}{4c^2} \right] / D
249
+ $$
250
+
251
+ $$
252
+ C_{44}^* = \mu_0 - f \left[ -\frac{2S_3}{\mu_0} + (\mu_0 - \mu_1)^{-1} + \frac{4S_7}{\mu_0(2-2v_0)} \right]^{-1}
253
+ $$
254
+
255
+ $$
256
+ C_{66}^{*} = \mu_{0} - f \left[ -\frac{S_{3}}{\mu_{0}} + (\mu_{0} - \mu_{1})^{-1} \right]^{-1} \quad (33)
257
+ $$
258
+
259
+ where:
260
+ ---PAGE_BREAK---
261
+
262
+ $$D = \frac{aS_3^2}{2\mu_0^2 c} - \frac{aS_6 S_3}{\mu_0^2 gc} + \frac{a(S_6^2 - S_7^2)}{2\mu_0^2 g^2 c} + \frac{S_3(b^2 - a^2)}{2\mu_0 c^2} \\ + \frac{S_6(a^2 - b^2) + S_7(ab + b^2)}{2\mu_0 gc^2} + \frac{(a^3 - 2b^3 - 3ab^2)}{8c^3} \quad (34)$$
263
+
264
+ and
265
+
266
+ $$a = \mu_1 - \mu_0 - 2\mu_1 v_0 + 2\mu_0 v_1$$
267
+
268
+ $$b = -\mu_0 v_0 + \mu_1 v_1 + 2\mu_0 v_0 v_1 - 2\mu_1 v_0 v_1$$
269
+
270
+ $$c = (\mu_0 - \mu_1)(-\mu_0 + \mu_1 - \mu_0 v_0 - 2\mu_1 v_0 + 2\mu_0 v_1 + \mu_1 v_1 + 2\mu_0 v_0 v_1 - 2\mu_1 v_0 v_1)$$
271
+
272
+ $$g = (2 - 2v_0) \qquad (35)$$
273
+
274
+ where the series $S_3$, $S_6$, $S_7$ are given by Nemat-Nasser *et al.* (1982) in tabular form for several values of the volume fraction of the inclusions. However, the tabular data can be fitted with parabolic expressions using a least-square method. In the case of long fibers, the following expressions fit the data with a correlation coefficient $R = 1$:
275
+
276
+ $$S_3 = 0.49247 - 0.47603f - 0.02748f^2$$
277
+
278
+ $$S_6 = 0.36844 - 0.14944f - 0.27152f^2$$
279
+
280
+ $$S_7 = 0.12346 - 0.32035f + 0.23517f^2. \quad (36)$$
281
+
282
+ This procedure avoids the numerical evaluation of the series for each value of the fiber volume fraction, which entails significant computational effort, and also allows us to arrive at algebraic expressions for the elastic moduli.
283
+
284
+ ## 5. TRANSVERSELY ISOTROPIC MATERIAL
285
+
286
+ Because of the periodicity of the microstructure, the stiffness tensor $C^*$ for uni-directional composite represents an orthotropic material with square symmetry. In the case considered in the previous section, the directions $x_2$ and $x_3$ are equivalent and the stiffness tensor is unchanged by a rotation about $x_1$ of $n\pi/2$ ($n = 0, \pm 1, \pm 2, ...$). This implies that only six components are required to describe the tensor completely.
287
+
288
+ In order to obtain a transversely isotropic stiffness tensor, equivalent in average sense to the stiffness tensor with square symmetry, the following averaging procedure (Aboudi, 1991) is used. A rotation $\theta$ about the $x_1$-axis of the tensor $C^*$ produces
289
+
290
+ $$B(\theta) = Q(\theta) C^* Q^T(\theta) \quad (37)$$
291
+
292
+ where $Q(\theta)$ is the fourth-order orthogonal rotation tensor. Then the equivalent transversely isotropic tensor is obtained as:
293
+
294
+ $$\vec{B} = \frac{1}{\pi} \int_{0}^{\pi} B(\theta) d\theta. \quad (38)$$
295
+
296
+ Then, using the relations between the engineering constants and the components of the $\vec{B}$ tensor, the following expressions are obtained explicitly in terms of the coefficients of the tensor $C^*$ [eqns (33)-(36)]:
297
+ ---PAGE_BREAK---
298
+
299
+ Fig. 2. Comparison with experimental results of transverse modulus $E_T$ normalized with respect to the matrix modulus $E_0$.
300
+
301
+ $$
302
+ \begin{align*}
303
+ E_A &= C_{11}^* - \frac{2C_{12}^{*2}}{C_{22}^* + C_{23}^*} \\
304
+ E_T &= \frac{(2C_{11}^* C_{22}^* + 2C_{11}^* C_{23}^* - 4C_{12}^{*2})(C_{22}^* - C_{23}^* + 2C_{44}^*)}{3C_{11}^* C_{22}^* + C_{11}^* C_{23}^* + 2C_{11}^* C_{44}^* - 4C_{12}^{*2}} \\
305
+ G_A &= C_{66}^* \\
306
+ G_T &= \frac{C_{22}^*}{4} - \frac{C_{23}^*}{4} + \frac{C_{44}^*}{2} = \frac{E_T}{2(1+v_T)} \\
307
+ v_A &= \frac{C_{12}^*}{C_{22}^* + C_{23}^*}
308
+ \end{align*}
309
+ $$
310
+
311
+ $$ v_T = \frac{C_{11}^* C_{22}^* + 3C_{11}^* C_{23}^* - 2C_{11}^* C_{44}^* - 4C_{12}^{*2}}{3C_{11}^* C_{22}^* + C_{11}^* C_{23}^* + 2C_{11}^* C_{44}^* - 4C_{12}^{*2}} \quad (39) $$
312
+
313
+ In particular the transverse shear modulus $G_T$ can be written in the following way:
314
+
315
+ $$ G_T = \mu_0 - \frac{f}{4} \left[ \left( -\frac{aS_3}{2\mu_0 c} + \frac{a(S_7+S_6)}{2\mu_0 gc} - \frac{ba+2b^2-a^2}{4c^2} \right) / D + 2 \left( -\frac{2S_3}{\mu_0} + (\mu_0-\mu_1)^{-1} + \frac{4S_7}{\mu_0(2-2v_0)} \right)^{-1} \right] \quad (40) $$
316
+
317
+ where a, b, c, D and g are given in eqn (35) and $S_3$, $S_6$ and $S_7$ can be evaluated by eqn (36).
318
+
319
+ ## 6. COMPARISONS WITH EXPERIMENTAL RESULTS
320
+
321
+ Comparisons with experimental results and the expressions proposed by other authors are presented in this section. Tsai and Hahn (1980) measured the transverse Young's modulus $E_T$ and the axial shear modulus $G_A$ of glass-epoxy composite for several values of the fiber volume fraction. The properties of the constituents are $v_0 = 0.38$, $v_1 = 0.22$, and $E_1/E_0 = 21.19$. The results obtained with eqns (39) and (36) (present result) are compared to the experimental data and to predictions using the method of cells (Aboudi, 1991) in Figs 2 and 3. Predicted values of the axial and transverse Poisson ratios are shown in Fig. 4. For the same properties of the constituents, the axial modulus predicted by the first of eqns (39) coincides with the rule of mixture estimate and the axial shear modulus obtained
322
+ ---PAGE_BREAK---
323
+
324
+ Fig. 3. Comparison with experimental results of axial shear modulus $G_A$ normalized with respect to the matrix modulus $\mu_0$.
325
+
326
+ Fig. 4. Transverse and axial Poisson ratio ($v_T$ and $v_A$) as a function of the fiber volume fraction for glass-epoxy composite.
327
+
328
+ Fig. 5. Comparison with general self-consistent method of transverse shear modulus $G_T$ normalized with respect to the matrix modulus $\mu_0$.
329
+
330
+ from the third of eqns (39) gives the same values of the expressions proposed by Christensen and Lo (1979), for all values of the fiber volume fraction. Then, in Fig. 5 the transverse shear modulus obtained by eqn (40) is compared with the analytical expression proposed
331
+ ---PAGE_BREAK---
332
+
333
+ by Christensen and Lo (1979) and with the transverse shear modulus in the material with square symmetry $C_{44}^*$ [given by eqn (33)].
334
+
335
+ ## 7. CONCLUSIONS
336
+
337
+ Simple formulas for the coefficients of the stiffness tensor of composite materials with general types of elastic inclusions or voids with periodic microstructure are presented. These formulas are reduced for the particular case of long fiber composites and the engineering properties of equivalent transversely isotropic materials are proposed. Good agreement with available experimental data is obtained. The interaction effects between the constituents are fully accounted for.
338
+
339
+ *Acknowledgements*—This work was supported by the Italian National Council of Research (CNR) and the Constructed Facilities Center (CFC) at West Virginia University.
340
+
341
+ ## REFERENCES
342
+
343
+ - Aboudi, J. (1991). *Mechanics of Composite Materials*. Elsevier Science Publishers, Netherlands.
344
+ - Benveniste, Y. (1987). A new approach to the application of Mori-Tanaka's theory in composite materials. *Mech. Mater.* **6**, 147–157.
345
+ - Budiansky, B. (1965). On the elastic moduli of heterogeneous materials. *J. Mech. Phys. Solids* **13**, 213–227.
346
+ - Budiansky, B. and O'Connell, R. J. (1976). Elastic moduli of a cracked solid. *Int. J. Solids Structures* **12**, 81–97.
347
+ - Christensen, R. M. (1990). A critical evaluation for a class of micromechanics models. *J. Mech. Phys. Solids* **38**(3), 379–404.
348
+ - Christensen, R. M. and Lo, K. H. (1979). Solutions for effective shear properties in three phase sphere and cylinder models. *J. Mech. Phys. Solids* **27**, 315–330.
349
+ - Christensen, R. M. and Lo, K. H. (1986). Erratum: solutions for effective shear properties in three phase sphere and cylinder models. *J. Mech. Phys. Solids* **34**(6), 639.
350
+ - Hashin, Z. (1962). The elastic moduli of heterogeneous materials. *J. Appl. Mech.* **29**, Trans. ASME **84**(E), 143–150.
351
+ - Hill, R. (1965a). Continuum micromechanics of elasto-plastic polycrystal. *J. Mech. Phys. Solids* **13**, 89–100.
352
+ - Hill, R. (1965b). A self-consistent mechanics of composite materials. *J. Mech. Phys. Solids* **13**, 227–240.
353
+ - Hoening, A. (1979). Elastic moduli of a non-randomly cracked body. *Int. J. Solids Structures* **15**, 137–154.
354
+ - Hori, H. and Nemat-Nasser, S. (1983). Overall moduli of solids with microcracks: load-induced anisotropy. *J. Mech. Phys. Solids* **31**(2), 155–171.
355
+ - Iwakuma, T. and Nemat-Nasser, S. (1983). Composites with periodic microstructure. *Comput. Structures* **16**(1–4), 13–19.
356
+ - Laws, N. (1977). A note on interaction energies associated with cracks in anisotropic media. *Phil. Mag.* **36**, 367–372.
357
+ - Laws, N. and Brockenbrough, J. R. (1987). The effect of micro-crack system on the loss of stiffness of brittle solids. *Int. J. Solids Structures* **23**(9), 1247–1268.
358
+ - Laws, N. and Dvorak, G. J. (1987). The effect of fiber breaks and aligned penny-shaped cracks on the stiffness and energy release rates in unidirectional composites. *Int. J. Solids Structures* **23**(9), 1269–1283.
359
+ - Laws, N., Dvorak, G. J. and Hejazi, M. (1983). Stiffness changes in unidirectional composites caused by crack systems. *Mech. Mater.* **2**, 123–137.
360
+ - Mori, T. and Tanaka, K. (1973). Average stress in matrix and average elastic energy of materials with misfitting inclusions. *Acta Metall.* **21**, 571–574.
361
+ - Mura, T. (1987). *Micromechanics of Defects in Solids* (2nd edn, rev). Dordrecht, The Netherlands.
362
+ - Nemat-Nasser, S. and Hori, M. (1993). *Micromechanics: Overall Properties of Heterogeneous Solids*. Elsevier Science Publishers, Amsterdam.
363
+ - Nemat-Nasser, S., Iwakuma, T. and Hejazi, M. (1982). On composites with periodic structure. *Mech. Mater.* **1**, 239–267.
364
+ - Nemat-Nasser, S and Taya, M. (1981). On effective moduli of an elastic body containing periodically distributed voids. *Q. Appl. Math.* **39**, 43–59.
365
+ - Nemat-Nasser, S. and Taya, M. (1985). On effective moduli of an elastic body containing periodically distributed voids: comments and corrections. *Q. Appl. Math.* **43**, 187–188.
366
+ - Nemat-Nasser, S., Yu, N. and Hori, M. (1993). Solids with periodically distributed cracks. *Int. J. Solids Structures* **30**, 2071–2095.
367
+ - Spieget, M. R. (1959). *Vector Analysis*. Schum's Outline Series. McGraw-Hill, New York.
368
+ - Tandon, G. P. and Weng, G. J. (1984). The effect of aspect ratio of inclusions on the elastic properties of unidirectional aligned composites. *Polymer Compos.* **5**, 327–333.
369
+ - Taya, M. (1981). On stiffness and strength of an aligned short-fiber reinforced composite containing penny-shaped cracks in the matrix. *J. Compos. Mater.* **15**, 198–210.
370
+ - Taya, M. and Chou, T. W. (1981). On two kinds of ellipsoidal inhomogeneities in an infinite elastic body: an application to a hybrid composite. *Int. J. Solids Structures* **17**, 553–563.
371
+
372
+
373
+ ---PAGE_BREAK---
374
+
375
+ Tsai, S. W. and Hahn, H. T. (1980). *Introduction to Composite Materials*. Technomic, Lancaster, PA.
376
+
377
+ Weng, G. J. (1984). Some elastic properties of reinforced solids, with special reference to isotropic ones containing spherical inclusions. *Int. J. Engng Sci.* **22**(7), 845-856.
378
+
379
+ Zhao, Y. H., Tandon, G. P. and Weng, G. J. (1989). Elastic moduli for a class of porous materials. *Acta Mechanica* **76**, 105-130.
samples/texts_merged/6293016.md ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ On the Relation Between Primitive Recursion,
5
+ Schematization, and Divergence
6
+
7
+ Miki HERMANN*
8
+
9
+ CRIN (CNRS) and INRIA-Lorraine
10
+ Campus Scientifique, BP 239,
11
+ 54506 Vandœuvre-lès-Nancy, France
12
+
13
+ e-mail: Miki.Hermann@loria.fr
14
+
15
+ Abstract
16
+
17
+ The paper presents a new schematization of infinite families of terms called the primal grammars, based on the notion of primitive recursive rewrite systems. This schematization is presented by a generating term and a canonical rewrite system. It is proved that the class of primal grammars covers completely the class of crossed rewrite systems. This proof contains a construction of a primal grammar from a crossed rewrite system.
18
+
19
+ # 1 Introduction
20
+
21
+ Infinite sequences of terms, equations, rules or substitutions of common origin (sometimes called *infinite families of*) appear frequently at different moments within equational reasoning, automated deduction, and logic programming. One of these moments is e.g. the divergent behavior of the completion procedure when it is applied to certain rewrite systems. There exists sufficient conditions, presented in the form of patterns called *crossed rewrite systems*, whose presence guarantees the divergence. Unfortunately, there exist finitely presented decidable equational theories which imply a divergent behavior of the completion procedure. Nevertheless, sometimes there is a need to use even this infinite canonical rewrite system. Therefore one may want to capture by finite means the infinite family of rules originating from a crossed system. Other possibility for the use primal grammars presents equational unification when an infinite set of (most general) unifiers is generated.
22
+
23
+ *Schematizations* present a suitable formalism to cope directly, by finite means, with infinite families. To our knowledge, so far there are four schematizations of infinite fam- ilies. These are the *meta-rules* [Kir89], the *term schemes* [Gra88], the *recurrence do- mains* [CllK90], with their subclass *ω-terms* [Cll91] called also *ρ-terms*, and the rewrite
24
+
25
+ *Partially supported by Institut National Polytechnique de Lorraine grant 910 0146 R1.
26
+ ---PAGE_BREAK---
27
+
28
+ tization of infinite families of terms, but on the contrary to other schematizations (which usually exploit a more complicated notion, such as higher order terms or some sort of constraints) they are presented by a generating terms plus a canonical rewrite system. As we will see later, primal grammars correspond exactly with the class of crossed systems.
29
+
30
+ The idea of this paper originated from two different sources. On the one hand, this paper develops further the type of schematization introduced by Chen, Hsiang, and Kong [C11K90, C1191]. The second source was the paper of Sattler-Klein [SK91].
31
+
32
+ ## 2 Basic notation and definitions
33
+
34
+ It is supposed that the reader is familiar with the theory of rewrite systems. For reviews see e.g. [D.J90, Bac91]. The used notation is conform with that of [D.J91].
35
+
36
+ Denote by $\mathcal{T}(\mathcal{F}, \mathcal{X})$ the set of all terms over variables $\mathcal{X}$ and symbols $\mathcal{F}$. $\mathrm{Var}(t)$ denotes the set of all variables in the term $t$. $\mathrm{Head}(t)$ denotes the function symbol heading term $t$.
37
+
38
+ $\mathcal{Pos}(t)$ denotes the set of positions of the term $t$. The subset of variable positions of $t$ is denoted by $\mathcal{V}\mathcal{Pos}(t)$, the subset of non-variable positions of $t$ by $\mathcal{F}\mathcal{Pos}(t)$. The expression $a \leq b$ denotes a position *a* above the position *b*. The expression $a \parallel b$ denotes that the positions *a* and *b* are parallel (incomparable). A subterm of $t$ at a position $a \in \mathcal{Pos}(t)$ is denoted by $t|_a$. Denote by $s[t]_a$ a new term obtained from the term $s$ after replacing its subterm $s|_a$ by $t$. Denote by $s[\cdot]_a$ a context of $s$ with a hole at the position $a$.
39
+
40
+ Denote a substitution $\sigma: \mathcal{X} \to \mathcal{T}(\mathcal{F}, \mathcal{X})$ by $[x_1 \mapsto t_1, \ldots, x_n \mapsto t_n]$ when the terms $t_i$ are substituted for the variables $x_i$. A term $t$ instantiated by a substitution $\sigma$ is denoted by $\mathit{ta}_\sigma$. Denote by $\mathit{Dom}(\sigma)$, $\mathcal{V}\mathit{Ran}(\sigma)$, and $\mathcal{V}\mathit{ar}(\sigma)$ the variable domain, variable range, and all variables (union of variable domain and variable range) of a substitution $\sigma$, respectively.
41
+
42
+ A *rewrite rule* is an ordered pair of terms $s \to t$ such that $\mathcal{V}\mathit{ar}(t) \subseteq \mathcal{V}\mathit{ar}(s)$. A term *rewriting system* (or *rewrite system*) is a finite set of rules $R = \{s \to t \mid s, t \in \mathcal{T}(\mathcal{F}, \mathcal{X})\}$. A *rewriting relation* $\to_R$ is the smallest relation containing $R$, closed under substitution and replacement. The relation $\stackrel{*}{\to}_R$ denotes the reflexive and transitive closure of $\to_R$: the relation $\leftarrow_R$ denotes the converse of $\to_R$, the equivalence relation $\leftarrow^*_{R}$ denotes the reflexive, symmetric, and transitive closure of $\to_R$. The normal form of a term $t$ wrt a terminating rewrite relation $\to_R$ is denoted by $t\downarrow_R$.
43
+
44
+ Denote by $\vec{a}$ ambiguously either the vector of distinct objects $\langle a_1, \ldots, a_n \rangle$, or the sequence of distinct objects $a_1, \ldots, a_n$, or else the set $\{a_1, \ldots, a_n\}$. Therefore the expression $\tilde{f}(\vec{x})$ means $f_1(x_1, \ldots, x_k), \ldots, f_n(x_1, \ldots, x_k)$.
45
+
46
+ Suppose that $\succ$ is a precedence on $\mathcal{F}$. A lexicographic path ordering $\succ_{lpo}$ on $\mathcal{T}(\mathcal{F}, \mathcal{X})$ is defined by $s = f(\vec{s}) \succ_{lpo} g(\vec{t}) = t$ if one of the following holds: $\exists s_i \in \vec{s}^\top$ such that $s_i \supseteq_{lpo} t$, or $f \succ g$ and $\forall t_i \in \vec{t}^\top$ we have $s \succ_{lpo} t_i$, or $f \equiv g$ and $\vec{s}^\top \succ_{lpo}^\text{lex} \vec{t}^\top$, where $\succ_{lpo}^\text{lex}$ is the lexicographic extension of the ordering $\succ_{lpo}$.
47
+
48
+ ### 2.1 Crossed systems
49
+
50
+ The sum [ller90a] of $\varphi$ and $\psi$ is the substitution $\varphi \triangleq \psi$ defined as $[x \mapsto x\varphi\psi | x \in \mathit{Dom}(\varphi), x\varphi\psi \neq x]$. The iterative operator **turtle** [ller90a] on $\sigma, \psi$, and $\varphi$ is defined
51
+ ---PAGE_BREAK---
52
+
53
+ Recall that the crossed rewrite systems present a sufficient pattern for description and recognition of divergent rewrite systems. For crossed systems see Examples 3.1, 3.2 and 5.3, or the paper [ller90b].
54
+
55
+ **Definition 2.1** [Kl190] The rewrite rules $s_1 \rightarrow t_1$ and $s_2 \rightarrow t_2$ (with supposed disjoint variables) form a forward [... a backward] crossed rewrite system if $\int t_1$ is not a variable, there are substitutions $\sigma_2$ [... substitutions $\sigma_1$], $\varphi_1, \varphi_2$ in own variables of $s_2$ [... of $s_1$], an idempotent substitution $\sigma_1$ [... substitution $\sigma_2$], and positions $a \in \mathcal{F}Pos(s_1)$, $b \in \mathcal{F}Pos(t_2)$ [... and a position $b \in \mathcal{F}Pos(s_1)$] such that
56
+
57
+ 1. $\langle\sigma_1, \sigma_2\rangle$ is the most general semi-unifier of $s_1|_a$ [... of $s_1|_b$] and $s_2$: $$ s_1|_a\sigma_1 = s_2\sigma_2 \text{ [...] } s_1|_b\sigma_1 = s_2\sigma_2. $$
58
+
59
+ 2. $\langle\varphi_1, \varphi_2\rangle$ is the most general semi-unifier of $t_2|_b$ and $s_2$ [... of $t_1$ and $s_1|_b$]: $$ t_2|_b\varphi_1 = s_2\varphi_2 \text{ [...] } t_1\varphi_1 = s_1|_b\varphi_2. $$
60
+
61
+ 3. $\mathrm{Dom}(\varphi_1) \cap (\mathrm{Var}(\varphi_2) \cup \mathrm{Var}(\sigma_2)) = \emptyset$ or $\mathrm{Var}(\varphi_1) \cap (\mathrm{Dom}(\varphi_2) \cup \mathrm{Dom}(\sigma_2)) = \emptyset$ \\ [... $\mathrm{Dom}(\varphi_1) \cap (\mathrm{Var}(\varphi_2) \cup \mathrm{Var}(\sigma_1)) = \emptyset$ or $\mathrm{Var}(\varphi_1) \cap (\mathrm{Dom}(\varphi_2) \cup \mathrm{Dom}(\sigma_1)) = \emptyset$.]
62
+
63
+ This definition is a simplified and cumulated version of those given in [Kl190]. The latter, more general, definitions treat the case of crossed systems consisting of more than two rules, exploiting the notion of an *overlap closure* [GKM83] $s_2 \dashv\vdash t_2$ ($s_1 \dashv\vdash t_1$) instead of a simple rewrite rule $s_2 \rightarrow t_2$ ($s_1 \rightarrow t_1$). From the formal point of view the closure is treated in the same way as the rule, therefore we use the simplified definition(s) for our purposes.
64
+
65
+ It is evident from Definition 2.1 of crossed systems that $\mathrm{Dom}(\varphi_1) \cap \mathrm{Dom}(\varphi_2) = \emptyset$.
66
+
67
+ **Theorem 2.2** [Kl190] Let $S = \{s_1 \to t_1, s_2 \to t_2\}$ form a forward (... a backward) crossed system. Assume that each nontrivial critical pair $\langle s\sigma[t'\sigma]_c, t\sigma \rangle$ computed by the completion procedure from $S$ and an ordering $\succcurlyeq$ satisfies $s\sigma[t'\sigma]_c \succcurlyeq t\sigma$ (... satisfies $t\sigma$ $\succcurlyeq$ $s\sigma[t'\sigma]_c$). A fair completion procedure without interreduction produces from $S$ the sequence of rules
68
+
69
+ $$
70
+ \begin{array}{l@{\hspace{4em}}l}
71
+ \begin{array}{rcl}
72
+ \text{forward case} & & \\
73
+ u_1 & \to & v_1 = (s_1\sigma_1[t_2\sigma_2]_a)\rho_1 \to t_1\sigma_1\rho_1 \\
74
+ u_{n+1} & \to & v_{n+1} = u_n\omega_n[t_2\omega_n]_{ab^n} \to v_n\omega_n
75
+ \end{array}
76
+ &
77
+ \begin{array}{rcl}
78
+ \text{backward case} & & \\
79
+ u_1 & \to & v_1 = t_1\sigma_1\rho_1 \to (s_1\sigma_1[t_2\sigma_2]_b)\rho_1 \\
80
+ u_{n+1} & \to & v_{n+1} = t_1\omega_n \to s_1\omega_n[v_n\omega_n]_b
81
+ \end{array}
82
+ \end{array}
83
+ $$
84
+
85
+ called the iterated family $\mathcal{I}(S)$, where
86
+
87
+ $$ \omega_n = ((\pi_n \Delta (\varphi_1 \Delta T_{n-1}(\psi, \varphi_2, \varphi_1))) \cup (\varphi_2 \Delta T_{n-1}(\psi, \varphi_2, \varphi_1)))\rho_{n+1} $$
88
+
89
+ with $\psi = \sigma_2$ in forward case and $\psi = \sigma_1$ in backward case, is the iterative substitution and
90
+
91
+ $$
92
+ \begin{array}{l@{\hspace{4em}}l}
93
+ \begin{array}{l}
94
+ \text{forward case} \\
95
+ \pi_n = [x_n \mapsto x \mid x_n \in \mathcal{V}\mathrm{ar}(u_n|_{ab^n})] \\
96
+ \rho_n = [x \mapsto x_n \mid x \in \mathcal{V}\mathrm{ar}(s_2)]
97
+ \end{array}
98
+ &
99
+ \begin{array}{l}
100
+ \text{backward case} \\
101
+ \pi_n = [x_n \mapsto x \mid x_n \in \mathcal{V}\mathrm{ar}(u_n|_b)] \\
102
+ \rho_n = [x \mapsto x_n \mid x \in \mathcal{V}\mathrm{ar}(s_1)]
103
+ \end{array}
104
+ \end{array}
105
+ $$
106
+
107
+ is a pair of fold/unfold substitutions for explicit variable renaming.
108
+ ---PAGE_BREAK---
109
+
110
+ In addition to the signature of *plain* symbols $\mathcal{F}$, we consider also another signature of auxiliary symbols $\mathcal{H}$, where $\mathcal{F} \cap \mathcal{H} = \emptyset$, plus the special symbols successor $s$ and the zero constant 0, both not included neither in $\mathcal{F}$ nor in $\mathcal{H}$. The auxiliary symbols from $\mathcal{H}$ will be denoted by a hat to distinguish them from the 'bare headed' plain symbols from $\mathcal{F}$.
111
+
112
+ The arguments of the function symbols $\hat{f} \in \mathcal{H}$ are divided into two parts by a semi-colon. Those before the semicolon are called *counters*, or *counter variables* if they consist just of a variable. Each auxiliary symbol $\hat{f}$ has a *counter arity*, denoted by $ar_c(\hat{f})$, indicating its number of counters. The set $CPos(t) = \{a.n \mid Head(t|_a) = f \in \mathcal{H}, n \le ar_c(f)\}$ is called the set of *counter positions* in a term $t \in T(\mathcal{F} \cup \mathcal{H}, \mathcal{X})$. These are the positions in $t$ immediately below an auxiliary symbol $\hat{f}$, before the semicolon. The set of counter variables of a term $t$ is denoted by $CVar(t) = \{t|_a \mid a \in CPos(t) \cap VPos(t)\}$.
113
+
114
+ The auxiliary positions of the term $t \in T(\mathcal{F} \cup \mathcal{H}, \mathcal{X})$ are denoted by
115
+
116
+ $$ \mathrm{Pos}_{\mathcal{H}}(t) = \{a \in \mathcal{F} \mathrm{Pos}(t) \mid \mathrm{Head}(t|_a) \in \mathcal{H}\} $$
117
+
118
+ The outermost auxiliary positions of $t \in T(\mathcal{F} \cup \mathcal{H}, \mathcal{X})$ are denoted by
119
+
120
+ $$ \mathrm{OPos}_{\mathcal{H}}(t) = \{a \in \mathrm{Pos}_{\mathcal{H}}(t) \mid a \le b \text{ or } a \parallel b \text{ for all } b \in \mathrm{Pos}_{\mathcal{H}}(t)\} = \liminf_{\le} \mathrm{Pos}_{\mathcal{H}}(t) $$
121
+
122
+ **Definition 3.1** Suppose there exists a precedence $\succ$ on the auxiliary symbols $\mathcal{H}$. The prime rewrite system $P_\mathcal{H}$ upon $\mathcal{H}$ contains for each symbol $\hat{f} \in \mathcal{H}$ the pair of rewrite rules
123
+
124
+ $$ (\hat{f}(0, \vec{x}; \vec{y}) \rightarrow t_1) \quad (\hat{f}(s(z), \vec{x}; \vec{y}) \rightarrow t_2[\hat{f}(z, \vec{x}\delta(x); \vec{y})]_\Lambda) $$
125
+
126
+ where $\Lambda \subseteq Pos(t_2)$ is a finite set of mutually parallel positions incomparable with the auxiliary positions $Pos_H(t_2)$. $\vec{x}$ and $\vec{y}$ are variable vectors, $\delta(x)$ is the substitution $\delta(x) = [x \mapsto s(x)]$, and $t_1, t_2$ are terms from $T(\mathcal{F} \cup \mathcal{H} \cup \{s\}, \mathcal{X})$, such that for both $i=1,2$
127
+
128
+ • for all auxiliary positions $a \in Pos_H(t_i)$ there exists an auxiliary symbol $\hat{g} \in \mathcal{H}$ and a subsequence $\vec{w}$ of $\vec{x}$, such that $\hat{f} \succ \hat{g}$ and $t_i|_a = \hat{g}(\vec{w}; \vec{y})$;
129
+
130
+ • for all variable positions $a \in VPos(t_i)$, which are incomparable with all auxiliary positions $Pos_H(t_i)$, we have $t_i|_a = y$ or $t_i|_a = y_m$ where $y \in \vec{y}$ is a variable and $m$ is its mark, with either $m \in \{0\} \cup \vec{x}$ if $i=1$ or $m \in \{s(z)\} \cup \vec{x}$ if $i=2$.
131
+
132
+ Prime rewrite systems are primitive recursive rewrite systems of special type. The meaning of $\vec{x}\delta(x)$ is to transform the variable $x$ into $s(x)$ if $x$ belongs to the variable sequence $\vec{x}$. Prime rewrite systems violate the requirement $\mathrm{Var}(r) \subseteq \mathrm{Var}(l)$ for rewrite rules $l \to r$ of classic rewrite systems, because there may exist variables $\mathcal{V} \subseteq \mathrm{Var}(r) - \mathrm{Var}(l)$ for rules $l \to r \in P_\mathcal{H}$, and therefore they should be considered as production systems. If $y_m \in \mathrm{Var}(r) - \mathrm{Var}(l)$ is such a variable in a rule $l \to r \in P_\mathcal{H}$ of a prime rewrite system $P_\mathcal{H}$, then the mark $m$ is the counter subterm $l|_a$ for a counter position $a \in CPos(l)$ and the original variable is $y \in \mathrm{Var}(l) - CVar(l)$. For $y_m$ we say that the variable $y$ is marked by the counter expression $m$.
133
+ ---PAGE_BREAK---
134
+
135
+ rules are called flat.
136
+
137
+ **Example 3.2** Suppose that $\mathcal{H} = \{\hat{f}, \hat{g}, \hat{h}\}$ and $\hat{f} \succ \hat{g} \succ \hat{h}$. The rewrite system
138
+
139
+ $$
140
+ \begin{align*}
141
+ \hat{f}(0, v, w; x, y) &\rightarrow \hat{g}(v, w; x, y) \\
142
+ \hat{f}(s(u), v, w; x, y) &\rightarrow \hat{f}(u, v, w; x, y) + (\hat{f}(u, v, w; x, y) + \hat{f}(u, v, w; x, y)) \\
143
+ \hat{g}(0, w; x, y) &\rightarrow \hat{h}(w; x, y) \\
144
+ \hat{g}(s(v), w; x, y) &\rightarrow \hat{g}(v, w; x, y) * \hat{g}(v, w; x, y)
145
+ \end{align*}
146
+ \quad
147
+ \begin{align*}
148
+ \hat{h}(0; x, y) &\rightarrow A(x) \\
149
+ \hat{h}(s(w); x, y) &\rightarrow B(y_w). \hat{h}(w; x, y)
150
+ \end{align*}
151
+ $$
152
+
153
+ is prime, whereas each of the following systems contains a counterexample to the Definition 3.1:
154
+
155
+ • $\hat{f}(s(u), v, w) \to \hat{f}(u, s(v), w) * \hat{f}(u, v, s(w))$ does not match the right-hand side of prime rewrite systems because $\hat{f}(u, s(v), w)$ and $\hat{f}(u, v, s(w))$ are different.
156
+
157
+ • $\hat{f}(s(u); x) \to F(\hat{g}(u; \hat{f}(u; x)))$ is contrary to the fact that auxiliary symbols cannot be encapsulated.
158
+
159
+ • $\{\hat{f}(s(u)) \to \hat{g}(u) * \hat{f}(u), \hat{g}(s(u)) \to \hat{f}(u) + \hat{g}(u)\}$ violates the precedence requirement on the auxiliary symbols: these two rules would imply $\hat{f} \succ \hat{g} \succ \hat{f}$.
160
+
161
+ All prime rewrite systems are confluent because they are orthogonal and left-linear. Prime rewrite systems are terminating since we can construct a lexicographic path ordering $\succ_{lpo}$ for each prime system. The precedence $\succ$ on auxiliary symbols $\mathcal{H}$ can be enlarged to plain symbols $\mathcal{F}$ in the following way: $\forall \hat{f} \in \mathcal{H} \ \forall g \in \mathcal{F}$ we define $\hat{f} \succ g$. This enlarged precedence, together with the left-to-right status of all auxiliary symbols, defines the required ordering.
162
+
163
+ # 4 Generators and folded forms
164
+
165
+ If all counter positions of a term $t \in T(\mathcal{F} \cup \mathcal{H}, \mathcal{N})$ are occupied by variables, i.e. $CPos(t) \subseteq VPos(t)$, then the term $t$ is called a **generator**. We say also that a generator is a term with *open counters*.
166
+
167
+ Denote by $\mathcal{N} = \{s^i(0) | i \in \mathcal{N}\}$ the infinite set of terms representing *natural numbers*. A (partial) **enumerator** for a generator $t$ is a ground substitution $\xi: \mathcal{X} \to \mathcal{N}$ such that $Dom(\xi) = CVar(t)$ ($Dom(\xi) \subset CVar(t)$). A (partial) **enumerator** $\xi$ is called *basic* if for all variables $x \in Dom(\xi)$ we have $x\xi = 0$. Denote by $\Xi(t)$ ($\pi\Xi(t)$) the set of all possible (partial) enumerators for the generator $t$, called the (*partial enumeration of t*).
168
+
169
+ Speaking about the normal form $t\xi\downarrow_{P_i}$ makes sense only for *flat* prime rewrite systems $P_\mathcal{H}$. Otherwise the prime rewrite systems may introduce new variables.
170
+
171
+ ## 4.1 Production of fresh variables
172
+
173
+ A difficult problem in describing an infinite sequence of rewrite rules produced during divergence or an infinite sequence of unifiers as a solution of an equational unification problem is how to create fresh variables and how to manage properly this creation. This
174
+ ---PAGE_BREAK---
175
+
176
+ of a prime rewrite system for rewriting, not only the variables but also their marks get instantiated. This allows us to obtain richer structures as normal forms of enumerated generators using the prime rewrite systems. This is the case e.g. if the divergence makes new variables to appear originating from variable renamings during superpositions (see Theorem 2.2), or if an infinite sequence of unifiers in an equational unification problem creates new variables for the same reason.
177
+
178
+ **Example 4.1** Consider an equational unification [F1186] with the symbols $\mathcal{F}_0 = \{a, b\}$, $\mathcal{F}_1 = \{g\}$, $\mathcal{F}_2 = \{f\}$, and the set of equations $E = \{f(b, x) = x, g(f(x, y)) = g(y)\}$. The unification problem $g(x) =_E g(a)$ has the infinite sequence of unifiers
179
+
180
+ $$[x \mapsto a], [x \mapsto f(y_0, a)], [x \mapsto f(y_1, f(y_0, a))], \dots, [x \mapsto f(y_n, \dots, f(y_0, a)\dots)], \dots$$
181
+
182
+ This sequence can be produced from the generator $x \mapsto \hat{h}(z; y)$ using the prime system
183
+
184
+ $$\hat{h}(0; y) \rightarrow a \qquad \hat{h}(s(z); y) \rightarrow f(y_s, \hat{h}(z; y))$$
185
+
186
+ under the condition that we know to rename the variable $y_s$, marked by the counter expression $z$, in the term $f(y_s, \cdot)$ into the variables $y_0, y_1, \dots, y_n$.
187
+
188
+ Assume that a term $t \in T(\mathcal{F} \cup \mathcal{H}, \mathcal{X})$, with all counter variables enumerated, contains variables in a redex of $t$ headed by an auxiliary symbol $\hat{h}$ and suppose that these variables, marked by a counter, appear in the right-hand side $r$ of a rewrite rule $l \to r \in P_{\mathcal{H}}$, where $\text{Head}(l) = \hat{h}$, at a position not below $\hat{h}$ (we say that these variables get unfolded by the rule $l \to r$), exactly as the variable $y$ in the Example 4.1. During a rewrite step, these variables must be renamed, which is done by "marking" them, and which means they receive a subscript created according to the rule $l \to r$ being applied. Actually, this mark is the value of one counter expression of $\hat{h}$, in Example 4.1 it is the counter variable $z$. The rewriting relation coupled with the marking process is called *marked rewriting*.
189
+
190
+ Marking a term means the application of a substitution at positions not below an auxiliary symbol $\tilde{f} \in \mathcal{H}$ and also the evaluation of the counter expressions as marks by the same substitution. Let us denote by $t \bullet_{\mathcal{H}} \sigma$ such an application of a substitution $\sigma$, formally defined as
191
+
192
+ $$
193
+ \begin{array}{lll}
194
+ f(\vec{u}) \bullet_{\mathcal{H}} \sigma & = & f(\vec{u} \bullet_{\mathcal{H}} \sigma) \\
195
+ f(\vec{u}) \bullet_{\mathcal{H}} \sigma & = & f(\vec{u}) \\
196
+ y_m \bullet_{\mathcal{H}} \sigma & = & y_{\sigma m\sigma} \\
197
+ y \bullet_{\mathcal{H}} \sigma & = & y\sigma
198
+ \end{array}
199
+ \quad
200
+ \begin{array}{l}
201
+ \text{if } f \notin \mathcal{H}, \\
202
+ \text{if } f \in \mathcal{H}, \\
203
+ \text{if } y_m \text{ is a marked variable,} \\
204
+ \text{if } y \text{ is an unmarked variable.}
205
+ \end{array}
206
+ $$
207
+
208
+ for each term vector $\vec{u}$.
209
+
210
+ **Definition 4.2 (Marked rewriting)** Let $t, t' \in T(\mathcal{F} \cup \mathcal{H}, \mathcal{X})$ be two enumerated terms and $P_{\mathcal{H}}$ be a prime rewrite system. We write $t \implies_{P_{\mathcal{H}}} t'$ iff
211
+
212
+ * there exist an outermost position $a \in OP_{\mathcal{H}}(t)$, a rewrite rule $l \to r \in P_{\mathcal{H}}$ and a substitution $\sigma$, such that $t|_a = l\sigma$; and
213
+
214
+ * $t' = t[r \bullet_{\mathcal{H}} \sigma]_a$
215
+ ---PAGE_BREAK---
216
+
217
+ The expression $m \bullet_H \sigma$ yields the value of the mark $m$, determined by the match $\sigma$, for each marked variable $y_m \in Var(r)$. According to the choice of the mark of a variable, we get decreasing, increasing or stable markings of the variables within the marked rewriting relation $\Rightarrow_{P_{\mathcal{H}}}$.
218
+
219
+ **Example 4.3** Let us take the enumerated term $t = a + \hat{f}(s^3(0), s^2(0), 0; x)$. If we apply the prime rewrite system $P_H$ consisting of the rules
220
+
221
+ $$ \hat{f}(0, u, v; x) \rightarrow b \qquad \hat{f}(s(z), u, v; x) \rightarrow x_{s(z)} * \hat{f}(z, s(u), v; x) $$
222
+
223
+ on it, then we get $t' = a + (x_3 * \hat{f}(s^2(0), s^3(0), 0; x))$. If we change the second rule of the prime system to
224
+
225
+ $$ \hat{f}(s(z), u, v; x) \rightarrow x_u * \hat{f}(z, s(u), v; x) $$
226
+
227
+ we get $t' = a + (x_2 * \hat{f}(s^2(0), s^3(0), 0; x))$. Finally changing the second rule into
228
+
229
+ $$ \hat{f}(s(z), u, v; x) \rightarrow x_v * \hat{f}(z, s(u), v; x) $$
230
+
231
+ we get $t' = a + (x_0 * \hat{f}(s^2(0), s^3(0), 0; x))$ in the marked rewrite relation $t \Rightarrow_{P_{\mathcal{H}}} t'$. The normal form $\tau_{P_{\mathcal{H}}}$ of the term $t$ will be
232
+
233
+ $$
234
+ \begin{align*}
235
+ a + (x_3 * (x_2 * (x_1 * b))) & \quad \text{for } \hat{f}(s(z), u, v; x) \to x_{s(z)} * \hat{f}(z, s(u), v; x) & (\text{decreasing}), \\
236
+ a + (x_2 * (x_3 * (x_4 * b))) & \quad \text{for } \hat{f}(s(z), u, v; x) \to x_u * \hat{f}(z, s(u), v; x) & (\text{increasing}), \\
237
+ a + (x_0 * (x_0 * (x_0 * b))) & \quad \text{for } \hat{f}(s(z), u, v; x) \to x_v * \hat{f}(z, s(u), v; x) & (\text{stable}).
238
+ \end{align*}
239
+ $$
240
+
241
+ respectively.
242
+
243
+ ## 4.2 Primal grammars
244
+
245
+ We use generators to schematize recursive sets of terms from $\mathcal{T}(\mathcal{F}, \mathcal{N})$. For this reason we introduce the *primal term grammars*.
246
+
247
+ **Definition 4.4** A primal term grammar (or primal grammar for short) $G$ is a 4-tuple $(\mathcal{F}, \mathcal{H}, P_H, t)$, where $\mathcal{F}$ is a signature of plain symbols, $\mathcal{H}$ is a signature of auxiliary symbols, $P_H$ is a prime rewrite system, and $t$ is a (partially basically enumerated) generator.
248
+
249
+ The language generated by a primal term grammar $G = (\mathcal{F}, \mathcal{H}, P_H, t)$, denoted by $L(G)$, is the set of terms $L(G) = \{t\xi\downarrow_{P_{\mathcal{H}}} | \xi \in \Xi(t)\}$. The generator $t$ is called a **folded form** of $L(G)$.
250
+
251
+ The generator $t$ in Definition 4.4 extends to equations and rules just by considering them as terms in the extended signature $\mathcal{F} \cup \{\rhd\}$ and $\mathcal{F} \cup \{\rhd\}$, respectively.
252
+
253
+ The class of $\omega$-terms ($\rho$-terms) [C1191] is included in the class of primal grammars. Let $t$ be an $\omega$-term and $\vec{a}$ be the finite sequence of all positions such that $t|_{\alpha_i} = \Phi(h_i|b_i \leftarrow$
254
+ ---PAGE_BREAK---
255
+
256
+ bols $\mathcal{H} = \hat{f}$, the generator $t[\hat{f}_1(z_1; \vec{x}), \dots, \hat{f}_n(z_n; \vec{x})]_\mathfrak{A}$, and the prime system $P_\mathcal{H}$ containing
257
+ the pair of rules
258
+
259
+ $$
260
+ \hat{f}_i(0; \vec{x}) \rightarrow l_i \qquad \hat{f}_i(s(z_i); \vec{x}) \rightarrow h_i[\hat{f}_i(z_i; \vec{x})]_{b_i}
261
+ $$
262
+
263
+ for each $\hat{f}_i \in \mathcal{H}$, where $\vec{x} = \bigcup_i \mathrm{Var}(h_i[l_i]_{b_i})$, such that $\Omega(t) = L(G)$. No variable treatment is defined for $\omega$-terms, therefore there are no marks.
264
+
265
+ Like for classical terms, one may want to unify primal grammars. Since the prime
266
+ rewrite systems are canonical, the unification of two primal grammars $G_1 = (\mathcal{F}, \mathcal{H}_1, P_{\mathcal{H}_1}, t_1)$
267
+ and $G_2 = (\mathcal{F}, \mathcal{H}_2, P'_{\mathcal{H}_2}, t_2)$ by means of narrowing becomes possible, although it is undecidable in general. This unification problem can be viewed as the unification of the two
268
+ generators $t_1$ and $t_2$ modulo the equational theory presented by the canonical system
269
+ $P_{\mathcal{H}} = P_{\mathcal{H}_1} \cup P'_{\mathcal{H}_2}$, which is equivalent to the intersection of some instances of the infinite
270
+ sets $L(G_1)$ and $L(G_2)$. In this scope, it would be interesting to know which equational
271
+ theories are presentable by prime (or iterative) rewrite systems.
272
+
273
+ If the unification by narrowing is decidable, we can complete finite primal grammar
274
+ systems $\mathcal{G} = \{\langle \mathcal{F}, \mathcal{H}_i, P_{\mathcal{H}_i}, t_i \mid i = 1, \dots, n \}\}$ just by completing the rewrite systems
275
+ $\mathcal{R}(\mathcal{G}) = \{t_i \mid (\mathcal{F}, \mathcal{H}_i, P_{\mathcal{H}_i}, t_i) \in \mathcal{G}\}$, consisting of the generators in $\mathcal{G}$ – which are usual
276
+ terms in $\mathcal{T}(\mathcal{F} \cup \mathcal{H}, \mathcal{X})$, – modulo the rewrite system $P_{\mathcal{H}} = \bigcup_{i=1}^n P_{\mathcal{H}_i}$.
277
+
278
+ In the sequel, the partially basically enumerated generators, used in Section 5 as folded
279
+ forms for iterated families of rules, containing only one noninstantiated counter variable
280
+ are called axioms.
281
+
282
+ 5 Primal grammars for iterated families
283
+
284
+ We show how to produce a primal grammar *G*. based on a prime rewrite system *P*<sub>*H*</sub>, for
285
+ an iterated family *I*(*S*) of rules originating from a crossed system *S* during completion,
286
+ such that *L*(*G*) = *I*(*S*). The application of counters within a primal grammar *G* =
287
+ (*F*, *H*, *P*<sub>*H*</sub>, *t*) becomes evident now. The supporting counters, instantiated by zeros in the
288
+ axiom *t*, serve as interconnection mechanism between dependent auxiliary symbols inside
289
+ of the rules in the prime rewrite system *P*<sub>*H*</sub>. The main counter, namely the only one
290
+ remaining noninstantiated in the axiom *t*, serves as the index of elements in *I*(*S*). More
291
+ precisely, the instantiation of the main counter in the axiom *t* by *s*<sup>*n*</sup>(0), followed by a
292
+ reduction to normal form under the marked rewriting relation ⇌<sub>*P*<sub>*H*</sub></sub>, results in the *n*-th
293
+ element of the iterated family *I*(*S*).
294
+
295
+ Before presenting the theorem concerning this statement, let us consider some exam-
296
+ ples to explain the principles of the constructions developed in the sequel.
297
+
298
+ **Example 5.1** [ller90b] Consider the forward crossed system
299
+
300
+ $$
301
+ d(x' \ominus (x' \otimes y')) \to y' \qquad g(x) \ominus y \to g(x \ominus (x \circ y))
302
+ $$
303
+
304
+ where $a = 1, b = 1$, $\sigma_1 = [x' \mapsto g(x), y' \mapsto y]$, $\sigma_2 = [y \mapsto g(x) \otimes y]$, $\varphi_1 = [x \mapsto g(x)]$, and $\varphi_2 = [y \mapsto g(x) \circ y]$. The iterated family has the form
305
+
306
+ $$
307
+ d(g^n(x \ominus (x \circ (g(x) \circ \dots (g^n(x) \otimes y))))) \to y \quad (1)
308
+ $$
309
+ ---PAGE_BREAK---
310
+
311
+ into left-hand sides of produced rules during the observed divergence. This is captured
312
+ by the first part of the prime rewrite system:
313
+
314
+ $$
315
+ \begin{align*}
316
+ \hat{f}(0, z_y, z_x; x, y) &\rightarrow x \ominus (x \circ \hat{f}_y(z_y, z_x; x, y)) \\
317
+ \hat{f}(s(z), z_y, z_x; x, y) &\rightarrow g(\hat{f}(z, s(z_y), z_x; x, y))
318
+ \end{align*}
319
+ $$
320
+
321
+ The folded form of the iterated family (1) is the axiom $d(g(\hat{f}(z, 0, 0; x, y))) \to y$.
322
+
323
+ The auxiliary symbol $\hat{f}_y$, capturing the iterated instances of the variable $y$, will be
324
+ constructed from the substitutions $\varphi_2$, and $\sigma_2$. The second part of the prime rewrite
325
+ system will be
326
+
327
+ $$
328
+ \begin{align*}
329
+ \hat{f}_y(0, z_x; x, y) &\rightarrow g(\hat{f}_x(z_x; x)) \oslash y \\
330
+ \hat{f}_y(s(z), z_x; x, y) &\rightarrow g(\hat{f}_x(z_x; x)) \circ \hat{f}_y(z, s(z_x); x, y)
331
+ \end{align*}
332
+ $$
333
+
334
+ originating from the substitutions $\varphi_2$ and $\sigma_2$.
335
+
336
+ The same method applies on the variable $x$, producing the rewrite rules for the auxil-
337
+ iary symbol $\hat{f}_x$:
338
+
339
+ $$
340
+ \hat{f}_x(0; x) \rightarrow x \qquad \hat{f}_x(s(z); x) \rightarrow g(\hat{f}_x(z; x))
341
+ $$
342
+
343
+ We have constructed a prime rewrite system, a mark, and a folded form for the iterated
344
+ family (1).
345
+
346
+ The impact of marking can be nicely observed in the following example taken from a
347
+ specification of the reverse operation on lists.
348
+
349
+ **Example 5.2** The proof by consistency of the inductive theorem *rev(rev(x)) = x* within
350
+ the system
351
+
352
+ $$
353
+ \mathrm{rev}_1(\mathrm{nil}, y) \rightarrow \mathrm{nil} \quad \mathrm{rev}_1(\mathrm{xa.xb.y}) \rightarrow \mathrm{rev}_1(\mathrm{xb.xa.y}) \quad \mathrm{rev}(x) \rightarrow \mathrm{rev}_1(x,\mathrm{nil})
354
+ $$
355
+
356
+ leads to a divergent process with the iterated family
357
+
358
+ $$
359
+ \begin{align*}
360
+ \text{\textit{rev}}_1(\text{\textit{rev}}_1(\text{\textit{xb}} , \text{\textit{xa}}} , \text{\textit{nil}}) &\rightarrow \text{\textit{xa.xb}} \\
361
+ \text{\textit{rev}}_1(\text{\textit{rev}}_1(\text{\textit{xb}} , \text{\textit{xa1}}} , (\text{\textit{xa}}} , \text{\textit{nil}}) &\rightarrow \text{\textit{xa.(xa1.xb)}} \\
362
+ \text{\textit{rev}}_1(\text{\textit{rev}}_1(\text{\textit{xb}} , \text{\textit{xa2}}} , (\text{\textit{xa1}}} , (\text{\textit{xa}}} , \text{\textit{nil}})) &\rightarrow \text{\textit{xa.(xa1.(xa2.xb))}
363
+ }\end{align*}
364
+ $$
365
+
366
+ originating from the forward crossed system
367
+
368
+ $$
369
+ \operatorname{rev}_1(\operatorname{rev}_1(x.\nil).\nil) \rightarrow x \qquad \operatorname{rev}_1(xa.xb.y) \rightarrow \operatorname{rev}_1(xb.xa.y)
370
+ $$
371
+
372
+ where $a = 1$, $b = \Lambda$, $\sigma_1 = [x \mapsto xa.xb]$, $\sigma_2 = [y \mapsto nil]$, $\varphi_1 = [xb \mapsto xa.xb]$, $\varphi_2 = [y \mapsto xa.y]$. The resulting prime system will be
373
+
374
+ $$
375
+ \begin{align*}
376
+ \hat{f}(0, z_y, z_a, z_b; y, xa, xb) &\rightarrow rev_1(xb, xa_{z_y}, \hat{f}_y(z_y, z_a; y, xa)) \\
377
+ \hat{f}(s(z), z_y, z_a, z_b; y, xa, xb) &\rightarrow \hat{f}(z, s(z_y), z_a, z_b; y, xa, xb) \\
378
+ \hat{f}_y(0, z_a; y, xa) &\rightarrow nil \\
379
+ \hat{f}_{xb}(0, z_a; xa, xb) &\rightarrow xb \\
380
+ \hat{f}_{yb}(s(z_b), z_a; xa, xb) &\rightarrow xa_{s(z_a)}, \hat{f}_{yb}(z_b, s(z_a); xa, xb)
381
+ \end{align*}
382
+ $$
383
+
384
+ and the axiom $\mathbf{\overline{rev}}_1(\mathbf{\hat{f}}(v, 0, 0; y, xa, xb)) \to xa, \mathbf{\hat{f}}_{xb}(v, 0; xa, xb).$
385
+ ---PAGE_BREAK---
386
+
387
+ Example 5.3 [ller90b] Consider the backward crossed system
388
+
389
+ $$
390
+ (x \otimes f(y)) \ominus y \rightarrow (x \ominus y) \otimes y \qquad (x' \odot y') \otimes y' \rightarrow x'
391
+ $$
392
+
393
+ where $b = 1$, $\sigma_1 = [x \mapsto x \circ f(y)]$, $\sigma_2 = [x' \mapsto x, y' \mapsto f(y)]$, $\varphi_1 = [y \mapsto f(y)]$, and
394
+ $\varphi_2 = [x \mapsto x \ominus f(y)]$. The iterated family of rules has the form
395
+
396
+ $$
397
+ (((x \circled{f}^{n+1}(y)) \not\sqsubseteq f^n(y)) \not\sqsubseteq f(y)) \not\sqsubseteq y) \circled{y} \rightarrow ((x \not\sqsubseteq f^n(y))) \not\sqsubseteq f(y)) \not\sqsubseteq y
398
+ $$
399
+
400
+ We have $t_2\sigma_2 = x$ and $s_1[\cdot]_b = (\cdot \ominus y)$. Iterated instances of $s_1[\cdot]_b$ are pumped onto the root of right-hand sides of produced rules during the observed divergence. This will be captured by a part of the primitive recursive rewrite system as in Example 5.1, only that $t_2$ is replaced now by $s_1$:
401
+
402
+ $$
403
+ \hat{g}(0, z_y, z_x; x, y) \rightarrow x \qquad \hat{g}(s(z), z_y, z_x; x, y) \rightarrow \hat{g}(z, s(z_y), z_x; x, y) \ominus \hat{g}_y(z_y; y)
404
+ $$
405
+
406
+ Using the previous system for $\hat{g}$, we can produce a semi-product of an axiom from the
407
+ iterated family, schematizing the right-hand sides:
408
+
409
+ (((x ⊙ f<sup>n</sup>+1)(y) ⊙ f<sup>n</sup>(y)) ⊙ ... ⊙ f(y) ⊙ y) ⊙ y → ĝ(s<sup>n</sup>(0), 0, 0; x ⊙ f<sup>n</sup>(y), y)
410
+
411
+ The auxiliary symbols $\hat{g}_x$ and $\hat{g}_y$, capturing the iterated instances of the variables $x$ and $y$ respectively, are constructed from the substitutions $\varphi_1$, $\varphi_2$, and $\sigma_1$, the same way as in the forward crossed case.
412
+
413
+ $$
414
+ \begin{array}{rcl@{\hspace{4em}}rcl}
415
+ \hat{g}_x(0, z_y; x, y) & \to & x \circ f(\hat{g}_y(z_y; y)) & & \hat{g}_y(0; y) & \to & y \\
416
+ \hat{g}_x(s(z); z_y; x, y) & \to & \hat{g}_x(z, s(z_y); x, y) \ominus f(\hat{g}_y(z_y; y)) & & \hat{g}_y(s(z); y) & \to & f(\hat{g}_y(z; y))
417
+ \end{array}
418
+ $$
419
+
420
+ After considering the previous rewrite rules for $\hat{g}_x$ and $\hat{g}_y$, the iterated family in this example can be derived from the axiom $(\hat{g}_x(z, 0; x, y) \ominus y) \circled{y} \to \hat{g}(z, 0, 0; x \ominus \hat{g}_y(z; y), y)$.
421
+
422
+ We have got once more a prime rewrite system, a mark, and a folded form for the
423
+ iterated family.
424
+
425
+ **Theorem 5.4** For each iterated family *I*(S), originated from a crossed rewrite system S,
426
+ there exists a primal grammar *G* = (*F*, *H*, *P*<sub>*H*</sub>, *t*) with an axiom *t*, such that *L*(*G*) = *I*(S).
427
+
428
+ **Proof:** The basic ideas of the proof for forward crossed systems is given. The construction for backward crossed systems is similar.
429
+
430
+ First of all, let us introduce some more notation:
431
+
432
+ $$
433
+ \begin{align*}
434
+ \vec{w}_f &= \mathrm{Var}(t_2) & \vec{w}_b &= \mathrm{Var}(s_1) \\
435
+ \vec{x}_1 &= \mathrm{Dom}(\varphi_1) & \vec{y}_1 &= \mathrm{VRan}(\varphi_1) \\
436
+ \vec{x}_2 &= \mathrm{Dom}(\varphi_2) & \vec{y}_2 &= \mathrm{VRan}(\varphi_2) \\
437
+ \vec{x}_{12} &= \vec{x}_1 \cup \vec{x}_2 & \vec{y}_{12} &= \vec{y}_1 \cup \vec{y}_2 \\
438
+ \vec{c}_1 &= \{\mathfrak{z}_x \in \mathcal{X} \mid x \in \vec{x}_1\} & \alpha_1(\vec{u}; \vec{v}) &= [\mathfrak{x} \mapsto \hat{f}_x(\vec{u}; \vec{v}) \mid x \in \vec{x}_1] \\
439
+ \vec{c}_2 &= \{\mathfrak{z}_x \in \mathcal{X} \mid x \in \vec{x}_2\} & \alpha_2(\vec{u}; \vec{v}) &= [\mathfrak{x} \mapsto \hat{f}_x(\vec{u}; \vec{v}) \mid x \in \vec{x}_2] \\
440
+ \vec{c}_{12} &= \{\mathfrak{z}_x \in \mathcal{X} \mid x \in \vec{x}_{12}\} & \alpha_{12}(\vec{u}; \vec{v}) &= [\mathfrak{x} \mapsto \hat{f}_x(\vec{u}; \vec{v}) \mid x \in \vec{x}_{12}] \\
441
+ \vec{d}_1 &= \{\mathfrak{z}_x \in \mathcal{X} \mid x \in \vec{y}_1\} & \gamma_1 &= [\mathfrak{z} \mapsto s(\mathfrak{z}) \mid z \in \vec{c}_1] \\
442
+ \vec{d}_2 &= \{\mathfrak{z}_x \in \mathcal{X} \mid x \in \vec{y}_2\} & \gamma_2 &= [\mathfrak{z} \mapsto s(\mathfrak{z}) \mid z \in \vec{c}_2] \\
443
+ \vec{d}_{12} &= \{\mathfrak{z}_x \in \mathcal{X} \mid x \in \vec{y}_{12}\} & \gamma_{12} &= [\mathfrak{z} \mapsto s(\mathfrak{z}) \mid z \in \vec{c}_{12}] \\
444
+ \epsilon_1 &= [\mathfrak{z}_x \mapsto s(\mathfrak{z}_x) \mid x \in \vec{y}_1 - \vec{x}_1] & \epsilon_2 &= [\mathfrak{z}_x \mapsto s(\mathfrak{z}_x) \mid x \in \vec{y}_2 - \vec{x}_2] \\
445
+ q_1 &= (\vec{d}_1 - \vec{c}_1)\epsilon_1 & q_2 &= (\vec{d}_2 - \vec{c}_2)\gamma \\
446
+ V &= Var(t_2[\cdot]_b) - Var(t_2|b) & W &= (\vec{y}_1 - \vec{x}_1) \cap (\vec{y}_2 - \vec{x}_2)
447
+ \end{align*}
448
+ $$
449
+ ---PAGE_BREAK---
450
+
451
+ ular parameterized substitutions introducing the supporting symbols $\hat{f}_x$, and the $\gamma$-s are substitutions for advancing counters. The expression $\Theta_x$ means either $\Theta_1$ if $x \in Dom(\varphi_1)$ or $\Theta_2$ if $x \in Dom(\varphi_2)$, where $\Theta$ stands for one of the indexed symbols. All variables are considered to be global, e.g. $\tilde{c}_1 \cap \tilde{d}_2 = \{z_x \in \mathcal{X} \mid x \in \tilde{x}_1 \cap \tilde{y}_2\}$.
452
+
453
+ Moreover, let $\tau_V(v) = [u \mapsto u_v \mid u \in V]$ be the marking substitution for the variables $V$ with the counter expression $z$.
454
+
455
+ Suppose that $S = \{s_1 \to t_1, s_2 \to t_2\}$ is the forward crossed system as in Definition 2.1.
456
+
457
+ The set $\mathcal{H}$ contains the main symbol $\hat{f}$ for keeping track of the manipulations concerning the term $t_2$, together with the supporting symbols $\hat{f}_x$ for each variable $x \in \tilde{x}_{12}$.
458
+
459
+ The prime rewrite system $P_\mathcal{H}$ contains the rewrite rules
460
+
461
+ $$
462
+ \begin{aligned}
463
+ \hat{f}(0, \tilde{d}_{12}; \tilde{w}_f) &\rightarrow t_2|_b \alpha_2(\tilde{d}_2; \tilde{y}_2) \bullet_{\mathcal{H}} \tau_{V \cup W}(\tilde{c}_2 \cap \tilde{d}_{12}) \\
464
+ \hat{f}(s(z), \tilde{d}_{12}; \tilde{w}_f) &\rightarrow t_2 \alpha_1(\tilde{d}_1; \tilde{y}_1)[\hat{f}(z, \tilde{d}_{12}\gamma_2; \tilde{w}_f)]_b \bullet_{\mathcal{H}} \tau_{V \cup W}(\tilde{c}_2 \cap \tilde{d}_{12})
465
+ \end{aligned}
466
+ $$
467
+
468
+ for the main symbol $\hat{f}$ and the rewrite rules
469
+
470
+ $$
471
+ \begin{aligned}
472
+ \hat{f}_x(0, \tilde{d}_x - \{z_x\}; \tilde{y}_x) &\rightarrow x(\sigma_2 \Delta \alpha_1(\tilde{d}_1\epsilon_1; \tilde{y}_1)) \bullet_H \tau_{V \cup W}(q_x) \\
473
+ \hat{f}_x(s(z_x), \tilde{d}_x - \{z_x\}; \tilde{y}_x) &\rightarrow x(((\varphi_1 \cup \varphi_2) \Delta \alpha_1(\tilde{d}_1\epsilon_1; \tilde{y}_1)) \Delta \alpha_2(\tilde{d}_2\gamma_1; \tilde{y}_2)) \bullet_H \tau_{V \cup W}(q_x)
474
+ \end{aligned}
475
+ $$
476
+
477
+ for each variable $x \in \tilde{x}_{12}$, and subsequently also for each supporting symbol $\hat{f}_x$. The union $\varphi_1 \cup \varphi_2$ is a substitution because $Dom(\varphi_1) \cap Dom(\varphi_2) = \emptyset$ from Definition 2.1.
478
+
479
+ The axiom $t$ is the rule
480
+
481
+ $$ s_1\sigma_1\alpha_1(z, \vec{0}; \vec{y}_1)[t_2\sigma_2\alpha_2(z, \vec{0}; \vec{y}_2)]_a[\hat{f}(z, \vec{0}; \vec{w}_f)]_{ab} \rightarrow t_1\sigma_1\alpha_1(z, \vec{0}; \vec{y}_1) $$
482
+
483
+ The rest is proved by induction on $n$, proving that $t[z \mapsto s^n(0)]\downarrow_{P_i}$ is the $n$-th element of $\mathcal{I}(S)$. $\square$
484
+
485
+ Using techniques similar to those of Sattler-Klein [SK91], it is possible to construct a divergent rewrite system for each primal grammar.
486
+
487
+ # 6 Conclusion
488
+
489
+ A new schematization called *primal grammars* has been introduced, which presents a generalization of *recurrence domains* [C11K90, C1191] and which has similarities with *meta-rules* [Kir89]. In the proof of Theorem 5.4 an exact method was developed on how to construct primal grammars from iterated families of rules, originating from crossed rewrite systems during completion. Such a construction was not known for the recurrence domains.
490
+
491
+ Primal grammars can be unified via their generators by narrowing. Subsequently, if the unification by narrowing is decidable, it is possible to complete primal grammar systems. Together with the meta-rules [Kir89] and to a certain extent with the rewrite systems with membership constraints (infinite sets of ground equations are considered only) [Com91], the primal grammars represent the only known formalism permitting completion of infinite sets of rules.
492
+ ---PAGE_BREAK---
493
+
494
+ I am grateful to Pierre Lescanne who contributed to the readability of the paper.
495
+
496
+ References
497
+
498
+ [Bac91] L. Bachmair. *Canonical equational proofs*. Birkhäuser, Boston, 1991.
499
+
500
+ [C1191] Il. Chen and J. Ilsiang. Logic programming with recurrence domains. In J. Leach Albert, B. Monien, and M. Rodríguez Artalejo, editors, *Proceedings 18th ICALP Conference, Madrid (Spain)*, volume 510 of *Lecture Notes in Computer Science*, pages 20–34. Springer-Verlag, July 1991.
501
+
502
+ [C11K90] Il. Chen, J. Ilsiang, and Il.-C. Kong. On finite representations of infinite sequences of terms. In S. Kaplan and M. Okada, editors, *Proceedings 2nd International Workshop on Conditional and Typed Rewriting Systems (CTRS'90), Montreal (Canada)*, volume 516 of *Lecture Notes in Computer Science*, pages 100–114. Springer-Verlag, June 1990.
503
+
504
+ [Com91] Il. Comon. Completion of rewrite systems with membership constraints. Research report 699, Laboratoire de Recherche en Informatique, Orsay, France, 1991.
505
+
506
+ [DJ90] N. Dershowitz and J.-P. Jouannaud. Rewrite systems. In J. van Leeuwen, editor, *Handbook of Theoretical Computer Science B: Formal Methods and Semantics*, chapter 6, pages 243–309. Elsevier, Amsterdam, 1990.
507
+
508
+ [DJ91] N. Dershowitz and J.-P. Jouannaud. Notations for rewriting. *Bulletin of the European Association for Theoretical Computer Science*, 43:162–172, February 1991.
509
+
510
+ [F1186] F. Fages and G. Huet. Complete sets of unifiers and matchers in equational theories. *Theoretical Computer Science*, 43(1):189–200, 1986.
511
+
512
+ [GKM83] J.V. Guttag, D. Kapur, and D.R. Musser. On proving uniform termination and restricted termination of rewrite systems. *SIAM Journal on Computing*, 12(1):189–214, February 1983.
513
+
514
+ [Gra88] B. Gramlich. Unification of term schemes - theory and applications. SEKI Report SR-88-18, Universität Kaiserslautern, Germany, 1988.
515
+
516
+ [ller90a] M. Ilermann. Chain properties of rule closures. *Formal Aspects of Computing*, 2(3):207–225, 1990.
517
+
518
+ [ller90b] M. Ilermann. Vademecum of divergent term rewriting systems. In "Avancées en Programation" – *Journées ALCET-GROPLAN, Nice (France)*, volume 70, pages 148–164. BIGRE, January 1990.
519
+ ---PAGE_BREAK---
520
+
521
+ tems. In S. Kaplan and M. Okada, editors, *Proceedings 2nd International Workshop on Conditional and Typed Rewriting Systems (CTRS'90)*, Montreal (Canada), volume 516 of Lecture Notes in Computer Science, pages 143–154. Springer-Verlag, June 1990.
522
+
523
+ [Kir89] II. Kirchner. Schematization of infinite sets of rewrite rules generated by divergent completion process. *Theoretical Computer Science*, 67(2-3):303–332, 1989.
524
+
525
+ [SK91] A. Sattler-Klein. Divergence phenomena during completion. In R.V. Book, editor, *Proceedings 4th Conference on Rewriting Techniques and Applications (RTA'91), Como (Italy)*, volume 488 of Lecture Notes in Computer Science, pages 374–385. Springer-Verlag, April 1991.
samples/texts_merged/6426180.md ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ A TECHNIQUE FOR PROVING
5
+ INEQUALITIES IN CARDINAL
6
+ FUNCTIONS
7
+
8
+ by
9
+
10
+ R. E. HODEL
11
+
12
+ Topology Proceedings
13
+
14
+ **Web:** http://topology.auburn.edu/tp/
15
+
16
+ **Mail:** Topology Proceedings
17
+ Department of Mathematics & Statistics
18
+ Auburn University, Alabama 36849, USA
19
+
20
+ **E-mail:** topolog@auburn.edu
21
+
22
+ **ISSN:** 0146-4124
23
+
24
+ COPYRIGHT © by Topology Proceedings. All rights reserved.
25
+ ---PAGE_BREAK---
26
+
27
+ A TECHNIQUE FOR PROVING INEQUALITIES
28
+ IN CARDINAL FUNCTIONS
29
+
30
+ R. E. Hodel
31
+
32
+ **Introduction**
33
+
34
+ Let $d, L, c, s, \chi$ and $\psi$ denote the following standard cardinal functions: density, Lindelöf degree, cellularity, spread (= hereditary cellularity), character, and pseudo-character. (For definitions, see [7] or [14].) The following inequalities are basic in the theory of cardinal invariants: (1) if $X$ is Hausdorff, then $|X| \le 2^{c(X)} \chi(X)$; (2) if $X$ is $T_1$, then $|X| \le 2^{s(X)} \psi(X)$; (3) if $X$ is Hausdorff, then $d(X) \le 2^{s(X)}$; (4) if $X$ is Hausdorff, then $|X| \le 2^{2s(X)}$; (5) if $X$ is Hausdorff, then $|X| \le 2^{L(X)} \chi(X)$. (See [11] and [1].) Partition calculus and ramification arguments are used in the original proofs of these five inequalities.
35
+
36
+ (See [8] and [9].) Specifically, the Erdös-Rado theorem $(2^{\kappa})^+ + (\kappa^+)_\kappa^2$ is used in the proof of (1) and (2), the Erdös theorem $\kappa + (\kappa, \omega)^2$ is used in the proof of (3), the Erdös-Rado theorem $(2^{\kappa^+}) + (\kappa^+)_\kappa^3$ is used in the proof of (4), and in proving (5) Arhangel'skiǐ uses a difficult ramification argument to construct a free sequence of length $\kappa^+$.
37
+
38
+ In [16] Šapirovskii proved a fundamental theorem about the cardinal function s, and from this theorem one easily obtains the two inequalities $d(X) \le 2^{s(X)}$ and $|X| \le 2^{2^{s(X)}}$. Pol [15] has modified Šapirovskii's technique to give proofs of the two inequalities $|X| \le 2^{c(X)} \chi(X)$ and $|X| \le 2^{L(X)} \chi(X)$, and I have used this technique to prove the inequality
39
+ ---PAGE_BREAK---
40
+
41
+ $|x| \le 2^{S(X)} \psi(X)$. In summary, the work of Pol and Šapirovskiǐ gives an alternate, unified approach to the five inequalities stated above.
42
+
43
+ The point I would like to emphasize in this paper is that the Pol-Šapirovskiǐ technique plays a fundamental, unifying role in the theory of cardinal invariants and can be used to prove a wide variety of cardinal function inequali- ties. Specifically, I will illustrate their technique by proving that every $\chi_1$-compact space with a $G_δ$-diagonal has cardinality at most $2^ω$. The generalized version of this inequality is due to Ginsburg and Woods [10]; their proof uses the Erdös-Rado theorem $(2^κ)^+ + (ℓ^+)^2$. In addition, I will survey several other inequalities in cardinal functions, each of which can be proved using the Pol-Šapirovskiǐ tech- nique.
44
+
45
+ ## The Technique Illustrated
46
+
47
+ In order to take advantage of well known terminology, I will just prove the countable version of the Ginsburg-Woods inequality. (The proof I give can easily be extended to higher cardinality.) The following notation is used: if X is a set, $\mathcal{G}$ is a cover of X, and D is a subset of X, then $\text{st}(D, \mathcal{G}) = \bigcup\{\text{st}(x, \mathcal{G}) : x \in D\}$. Recall that a space is $\chi_1$-compact if every uncountable subset has a limit point.
48
+
49
+ **Lemma.** Let X be a T₁-space which is χ₁-compact, let $\mathcal{G}$ be an open cover of X, let $C \subseteq X$. Then there is a countable subset D of C such that $C \subseteq \text{st}(D, \mathcal{G})$.
50
+
51
+ **Proof.** Suppose false. Construct a subset $E = \{x_\alpha : 0 \le \alpha < \omega_1\}$ of C such that for all $\alpha < \omega_1$, $x_\alpha \notin U_{\beta<\alpha} \text{st}(x_\beta, \mathcal{G})$.
52
+ ---PAGE_BREAK---
53
+
54
+ Let $p$ be a limit point of $E$, and let $G$ be a member of $\mathcal{G}$ such that $p$ belongs to $G$. Since $p$ is a limit point of $E$ and $X$ is $T_1$, there exists $\alpha$ and $\beta$, $\alpha > \beta$, such that $x_\alpha$ and $x_\beta$ belong to $G$. This contradicts $x_\alpha \notin U_{\beta<\alpha}\text{st}(x_\beta, \mathcal{G})$.
55
+
56
+ **Theorem (Ginsburg and Woods).** Let $X$ be an $x_1$-compact space with a $G_\delta$-diagonal. Then $|X| \le 2^\omega$.
57
+
58
+ *Proof.* Since $X$ has a $G_\delta$-diagonal, there is a countable sequence $\mathcal{G}_1, \mathcal{G}_2, \dots$ of open covers of $X$ such that if $p$ and $q$ are any two distinct points in $X$, then for some $n < \omega$, $q \notin \text{st}(p, \mathcal{G}_n)$. (See [4].) Construct a sequence $\{E_\alpha : 0 \le \alpha < \omega_1\}$ of subsets of $X$ such that $(1) \ | E_\alpha | \le 2^\omega$, $(2)$ for $1 \le \alpha < \omega_1$, if $\{D_n : n < \omega\}$ is a countable collection of countable subsets of $U_{\beta<\alpha} E_\beta$, and $u_{n=1}^\infty \text{st}(D_n, \mathcal{G}_n) \ne X$, then $E_\alpha - U_{n=1}^\infty \text{st}(D_n, \mathcal{G}_n) \ne \emptyset$.
59
+
60
+ Let $E = U_{\alpha<\omega_1} E_\alpha$; since $|E| \le 2^\omega$, the proof is complete if we can show that $E = X$. Suppose not, and let $p \in E$. For each $n < \omega$ let $C_n = \{x : x \in E, p \notin \text{st}(x, \mathcal{G}_n)\}$; clearly $E = U_{n=1}^\infty C_n$. For each $n < \omega$, apply the Lemma to $\mathcal{G}_n$ and $C_n$: there is a countable subset $D_n$ of $C_n$ such that $C_n \subseteq \text{st}(D_n, \mathcal{G}_n)$. Note that $E \subseteq U_{n=1}^\infty \text{st}(D_n, \mathcal{G}_n)$ and $p \notin U_{n=1}^\infty \text{st}(D_n, \mathcal{G}_n)$. Now choose $\alpha < \omega_1$ such that $U_{n=1}^\infty D_n \subseteq U_{\beta<\alpha} E_\beta$. By (2), there is some $q$ in $E_\alpha$ such that $q \notin U_{n=1}^\infty \text{st}(D_n, \mathcal{G}_n)$. This contradicts $E \supseteq U_{n=1}^\infty \text{st}(D_n, \mathcal{G}_n)$.
61
+
62
+ ## Survey of Other Inequalities
63
+
64
+ First we need some definitions. For a $T_1$ space $X$, the point separating weight of $X$, denoted $\text{psw}(X)$, is the smallest infinite cardinal $\kappa$ such that $X$ has a separating open cover $\mathcal{S}$ with the property that every point of $X$ is in
65
+ ---PAGE_BREAK---
66
+
67
+ at most $\kappa$ members of $\mathcal{S}$. (The cover $\mathcal{S}$ is separating if given any two distinct points $p$ and $q$ in $X$, there is some $S \in \mathcal{S}$ such that $p \in S$, $q \notin S$.) If $psw(X) = \omega$, we say that $X$ has a *point-countable separating open cover*. The extent of $X$, denoted $e(X)$, is the smallest infinite cardinal $\kappa$ such that every closed, discrete subset of $X$ has cardinality at most $\kappa$. (See [7], [13]). Note that for a $T_1$ space $X$, $e(X) = \omega$ if and only if $X$ is $\chi_1$-compact. The weak Lindelöf number of $X$, denoted $wL(X)$, is the smallest infinite cardinal $\kappa$ such that every open cover of $X$ has a subcollection of cardinality $\le \kappa$ whose union is dense in $X$. Note that $wL(X) \le L(X)$ and $wL(X) \le c(X)$. If $wL(X) = \omega$, we say that $X$ is weakly Lindelöf.
68
+
69
+ Each of the following inequalities can be proved using the Pol-Šapirovskii technique. (1) If $X$ is $T_1$, then $|X| \le 2^{e(X)}psw(X)$. (2) If $X$ is $T_1$, then $|X| \le psw(X)^{L(X)\psi(X)}$. (3) If $X$ is normal and $T_1$, then $|X| \le 2^{wL(X)X(X)}$. (See [3], [5], and [2] respectively.)
70
+
71
+ The countable version of (1) states that an $\chi_1$-compact space with a point-countable separating open cover has cardinality at most $2^\omega$. (In fact, the number of compact subsets has cardinality at most $2^\omega$.) This result should be compared with the Ginsburg-Woods inequality. Two proofs of (1) are given in [3]; the first uses an intersection theorem of Erdős and Rado while the second proof uses the Pol-Šapirovskii technique. (This second proof is also closely related to a construction due to M. E. Rudin [6].)
72
+
73
+ Arhangel'skii has asked if every Lindelöf Hausdorff
74
+ ---PAGE_BREAK---
75
+
76
+ space with countable pseudo-character has cardinality at most $2^{\omega}$, and (2) gives a partial answer to this question. Specifically, the countable version of (2) states that a Lindelöf space having countable pseudo-character and point separating weight at most $2^{\omega}$ has cardinality at most $2^{\omega}$.
77
+
78
+ The countable version of (3) states that a weakly Lindelöf first countable Hausdorff space which is also normal has cardinality at most $2^{\omega}$. Except for the normality assumption, inequality (3) unifies the two inequalities $|x| \le 2^{c(X)} \chi(X)$ and $|x| \le 2^{L(X)} \chi(X)$.
79
+
80
+ The reader is referred to [2], [5], [15], and [17] for additional inequalities in cardinal functions which can be proved using the Pol-Sapirovskii technique.
81
+
82
+ ## References
83
+
84
+ 1. A. V. Arhangel'skii, *The cardinality of first countable bicompacta*, DAN SSSR 187 (1969), 967-970.
85
+
86
+ 2. M. Bell, J. Ginsburg and G. Woods, *Cardinal inequalities for topological spaces involving the weak Lindelöf number*, Pacific J. Math. 79 (1978), 37-45.
87
+
88
+ 3. D. K. Burke and R. E. Hodel, *On the number of compact subsets of a topological space*, Proc. Amer. Math. Soc. 58 (1976), 363-368.
89
+
90
+ 4. J. G. Ceder, *Some generalizations of metric spaces*, Pacific J. Math. 11 (1961), 105-126.
91
+
92
+ 5. A. Charlesworth, *On the cardinality of topological spaces*, Proc. Amer. Math. Soc. 66 (1977), 138-142.
93
+
94
+ 6. H. H. Corson and E. Michael, *Metrizability of certain countable unions*, Illinois J. Math. 8 (1964), 351-360.
95
+
96
+ 7. R. Engelking, *General Topology*, Warsaw, 1977.
97
+
98
+ 8. P. Erdös and R. Rado, *A partition calculus in set theory*, Bull. Amer. Math. Soc. 62 (1965), 427-489.
99
+
100
+ 9. ________, *Intersection theorems for systems of sets*, J. London Math. Soc. 35 (1960), 85-90.
101
+ ---PAGE_BREAK---
102
+
103
+ 10. J. Ginsburg and G. Woods, *A cardinal inequality for topological spaces involving closed discrete sets*, Proc. Amer. Math. Soc. **64** (1977), 357-360.
104
+
105
+ 11. A. Hajnal and I. Juhász, *Discrete subspaces of topological spaces*, Indag. Math. **29** (1967), 343-356.
106
+
107
+ 12. R. E. Hodel, *New proof of a theorem of Hajnal and Juhász on the cardinality of topological spaces*, Bull. Acad. Polon. Sci. Sér. Sci. Math. Astronom. Phys. **24** (1976), 999-1000.
108
+
109
+ 13. ________, *On a theorem of Arhangel'skii concerning Lindelöf p-spaces*, Can. J. Math. **27** (1975), 459-468.
110
+
111
+ 14. I. Juhász, *Cardinal functions in topology*, Math. Centr. Amsterdam, 1971.
112
+
113
+ 15. R. Pol, *Short proofs of two theorems on cardinality of topological spaces*, Bull. Acad. Polon. Sci. Sér. Sci. Math. Astronom. Phys. **22** (1974), 1245-1249.
114
+
115
+ 16. B. Šapirovskii, *Discrete subspaces of topological spaces*. Weight, tightness and Souslin number, DAN SSSR **202** (1972), 779-782.
116
+
117
+ 17. ________, *Canonical sets and character*. Density and weight in compact spaces*, Soviet Math. Dokl. **15** (1974), 1282-1287.
118
+
119
+ Duke University
120
+
121
+ Durham, North Carolina 27706
samples/texts_merged/6697438.md ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # Parallel Continuation-Based Global Optimization for Molecular Conformation and Protein Folding*
5
+
6
+ Thomas F. Coleman† and Zhijun Wu‡
7
+
8
+ **Abstract.** This paper presents our recent work on developing parallel algorithms and software for solving the global minimization problem for molecular conformation, especially protein folding. Global minimization problems are difficult to solve when the objective functions have many local minimizers, such as the energy functions for protein folding. In our approach, to avoid directly minimizing a "difficult" function, a special integral transformation is introduced to transform the function into a class of gradually deformed, but "smoother" or "easier" functions. An optimization procedure is then applied to the new functions successively, to trace their solutions back to the original function. The method can be applied to a large class of nonlinear partially separable functions including energy functions for molecular conformation and protein folding. Mathematical theory for the method, as a special continuation approach to global optimization, is established. Algorithms with different solution tracing strategies are developed. Different levels of parallelism are exploited for the implementation of the algorithms on massively parallel architectures.
9
+
10
+ **Abbreviated title:** Parallel Continuation-Based Global Optimization
11
+
12
+ **Key words:** global/local minimization, numerical continuation, parallel computation, protein folding
13
+
14
+ **AMS (MOS) subject classification:** 49M37, 65Y05, 68Q22, 92-08
15
+
16
+ *To be presented at Supercomputing '94, November, 1994, Washington D.C.
17
+ †Department of Computer Science and Center for Applied Mathematics, Cornell University, Ithaca, NY 14853.
18
+ ‡Advanced Computing Research Institute, Cornell University, Ithaca, NY 14853.
19
+ ---PAGE_BREAK---
20
+
21
+ # 1 Motivation
22
+
23
+ We are developing massively parallel algorithms and software for molecular conformation, especially protein folding. This paper reports on our recent progress.
24
+
25
+ The prediction of protein native structures and the understanding of how they fold from sequences of their constituent amino acids is one of the most important and challenging computational science problems of the decade. The protein folding problem is fundamental to almost all theoretical studies of proteins and protein related life processes. It also has many applications in the biotechnology industries such as structure-based drug design for the treatment of important diseases like polio, cancer, and AIDS.
26
+
27
+ Optimization approaches to the protein folding problem are based on the hypothesis that the protein native structure corresponds to the global minimum of the protein energy. The problem can be attacked computationally by minimizing the protein energy over all possible protein structures. The structure with the lowest energy is presumed to be the most stable protein structure.
28
+
29
+ Mathematically, for a protein molecule of $n$ atoms, let $x = \{x_i \in \mathbb{R}^3, i = 1, \dots, n\}$ represent the molecular structure with each $x_i$ specifying the spatial position of atom $i$. Then the computational problem for protein folding is to globally minimize a nonlinear function $f(x)$ for all $x \in S$, i.e.,
30
+
31
+ $$ \min_{x \in S} f(x) \qquad (1) $$
32
+
33
+ where $S$ is the set of all possible molecular structures, and $f(x)$ is the energy function for the protein defined for all $x$.
34
+
35
+ The difficulty with this approach is that global optimization problems are computationally intractable in general, and especially difficult to solve when problem sizes are large and objective functions contain many local minimizers. For protein folding, the problem sizes tend to be very large with possibly thousands of variables, and the objective functions usually have exponentially many local minimizers. Therefore, to solve the optimization problems for protein folding, special algorithms must be developed which exploit the problem structure. In addition, parallel high performance computing is also essential for the solutions to be computationally feasible.
36
+
37
+ Our work focuses on establishing a new continuation-based approach to global optimization; we develop efficient parallel algorithms and software specifically for molecular conformation and protein folding.
38
+ ---PAGE_BREAK---
39
+
40
+ ## 2 The basic approach
41
+
42
+ The idea behind our approach is the following. To avoid directly minimizing a "difficult" objective function, a smoothing technique is introduced to transform the function into a class of gradually deformed, but "smoother" or "easier" functions. An optimization procedure is then applied to the new functions successively, to trace their solutions back to the original function.
43
+
44
+ To obtain our smoothing transformation, a parametrized integral transformation is introduced, transforming a given function into a class of new functions corresponding to a set of parameter values. A transformed function is in some sense a coarse approximate to the original function. After applying the transform, the original function becomes smoother with small and narrow minimizers being removed while the overall structure of the function is maintained. This allows a solution tracing procedure to skip less interesting local minimizers, and concentrate on regions with average low function values where a global minimizer is most likely to be located.
45
+
46
+ Different methods can be employed to trace the solutions. For example, a simple method is to apply a random search procedure to the transformed functions successively to locate their low local minimizers. Another possible method is to apply local optimization procedures to each transformed function and trace a set of local minimizers.
47
+
48
+ Our approach is called continuation-based, because the transformation can actually be viewed as a special continuation process by the theory described in [7]. Following this theory, our new approach can be studied in a general numerical continuation setting, and algorithms can be developed by employing standard advanced numerical methods. We will discuss these issues later in this paper.
49
+
50
+ ## 3 Transformation
51
+
52
+ We first introduce the transformation.
53
+
54
+ **Definition 1** Given a nonlinear function $f$, the transformation $<f>_\lambda$ for $f$ is defined such that for all $x$,
55
+
56
+ $$ \langle f \rangle_{\lambda}(x) = C_{\lambda} \int f(x') e^{-\|x-x'\|^2/\lambda^2} dx', \quad (2) $$
57
+
58
+ or equivalently,
59
+
60
+ $$ \langle f \rangle_{\lambda}(x) = C_{\lambda} \int f(x-x') e^{-\|x'\|^2/\lambda^2} dx', \quad (3) $$
61
+ ---PAGE_BREAK---
62
+
63
+ where $\lambda$ is a positive number and $C_\lambda$ is a normalization constant such that
64
+
65
+ $$ C_{\lambda} \int e^{-\|x\|^2 / \lambda^2} dx = 1. \quad (4) $$
66
+
67
+ To understand this transformation, consider that given a random function $g(x')$ and a probability distribution function $p(x')$ for the random variable $x'$, the expectation of the function $g$ with respect to $p$ is
68
+
69
+ $$ \langle g \rangle_p = \int g(x') p(x') dx'. \quad (5) $$
70
+
71
+ In light of (5), the defined transformation (2) yields a function value for $\langle f \rangle_\lambda$ at any $x$ equal to the expectation for $f$ sampled by a Gaussian distribution function centered at $x$.
72
+
73
+ For example, consider the following nonlinear function:
74
+
75
+ $$ f(x) = (x - 1)^2 + 0.1 \sin(20(x - 1)) \quad (6) $$
76
+
77
+ which is a quadratic function augmented with a “noise” function. The transformation for this function can be computed:
78
+
79
+ $$ \langle f \rangle_{\lambda}(x) = (x-1)^2 + \frac{\lambda^2}{2} + 0.1e^{-(20\lambda)^2/4} \sin(20(x-1)). \quad (7) $$
80
+
81
+ The function value $\langle f \rangle_\lambda(x)$ for fixed $x$ is equal to the integration with respect to the product of two functions, the original function $f(x')$ and the Gaussian distribution function $p(x') = C_\lambda e^{-\|x-x'\|^2/\lambda^2}$ (Figure 1 (a)), where $\lambda$ determines the size of the dominant region of the Gaussian. Since the most significant part of the integration is that within the dominant region of the Gaussian, $\langle f \rangle_\lambda(x)$ can be viewed as the average value for the original function $f$ within a small $\lambda$-neighborhood around $x$. If $\lambda$ is equal to zero the transformed function is exactly the original function. Otherwise, original function variations in small regions are averaged out, and the transformed function will become “smoother” (Figure 1 (b)).
82
+
83
+ Figure 2 shows how the function $\langle f \rangle_\lambda$ in (7) behaves with increasing $\lambda$. Observe that when $\lambda = 0.0$ the function is the original function; when we increase $\lambda$ to 0.1, the function becomes “smoother”; when $\lambda$ is increased further to 0.2, the function becomes entirely “smooth”. As we will show in the following sections, what we observed here is a general property of the transformation, i.e., for any function $f$, the larger of $\lambda$, the “smoother” the transformed function.
84
+ ---PAGE_BREAK---
85
+
86
+ Figure 1: A 1-dimensional transformation example.
87
+ ---PAGE_BREAK---
88
+
89
+ Figure 2: A class of gradually deformed functions.
90
+ ---PAGE_BREAK---
91
+
92
+ # 4 Smoothness
93
+
94
+ Let $\hat{f}$ be the Fourier transformation for function $f$, and $\langle \tilde{f} \rangle_\lambda$ the Fourier transformation for function $\langle f \rangle_\lambda$. Recall that the transformation $\langle f \rangle_\lambda$ for $f$ is just a convolution of $f$ and $p$, where $p$ is the Gaussian distribution function
95
+
96
+ $$p(x) = C_\lambda e^{-\|x\|^2/\lambda^2}. \quad (8)$$
97
+
98
+ Therefore the Fourier transformation for $\langle f \rangle_\lambda$ is equal to the product of the Fourier transformations for $f$ and $p$. The Fourier transformation for the Gaussian distribution function is
99
+
100
+ $$\hat{g}(\omega) = e^{-\frac{\lambda^2 \|\omega\|^2}{4}}. \quad (9)$$
101
+
102
+ So, we have
103
+
104
+ $$\langle \tilde{f} \rangle_{\lambda}(\omega) = e^{-\frac{\lambda^2 \|\omega\|^2}{4}} \hat{f}(\omega). \quad (10)$$
105
+
106
+ We see from (10) that if $\lambda \to 0$, $\langle \tilde{f} \rangle_\lambda$ converges to $\hat{f}$, and $\langle f \rangle_\lambda$ converges to $f$.
107
+
108
+ Also by (10), for fixed $\lambda$, if $\omega$ is large $\langle \tilde{f} \rangle_\lambda(\omega)$ will be very small. This implies that high frequency components of the original function become very small after the transformation. This is why the transformed function is “smoother”. In addition, for larger $\lambda$ values, wider ranges of high frequency components of the original function practically vanish after the transformation. Therefore, the transformed function becomes increasingly smooth as $\lambda$ increases. We state these properties formally in the following theorem.
109
+
110
+ **Theorem 1** Let $f$, $\hat{f}$, $\langle f \rangle_\lambda$ and $\langle \tilde{f} \rangle_\lambda$ all be given and well defined. Then $\forall \varepsilon > 0$, $\exists \delta > 1/\lambda$ for fixed $\lambda$, such that $\forall \omega$ with $\|\omega\| > \delta$,
111
+
112
+ $$\frac{|\langle \tilde{f} \rangle_{\lambda}(\omega)|}{|\hat{f}(\omega)|} < \varepsilon. \quad (11)$$
113
+
114
+ *Proof:* See [7]. □
115
+
116
+ From this theorem we learn that the relative size of $\langle \tilde{f} \rangle_\lambda(\omega)$ can be made arbitrarily small for all $\omega$ with $\|\omega\|$ greater than a small value $\delta$. Since $\delta$ is inversely proportional to $\lambda$, high frequency components are removed when $\lambda$ is large.
117
+ ---PAGE_BREAK---
118
+
119
+ # 5 Numerical Properties
120
+
121
+ The definition of the transformation (2) involves high dimensional integration which cannot be computed in general (except perhaps by the Monte Carlo method which is not appropriate for our purposes because it is too expensive). So the transformation may not be applicable to arbitrary functions, at least numerically. However, this transformation does apply to a large class of nonlinear partially separable functions, and especially to typical molecular conformation and protein folding energy functions.
122
+
123
+ Consider a large class of nonlinear partially separable functions, called
124
+ generalized multilinear functions,
125
+
126
+ $$f = \sum_i \prod_j g_j^i, \quad (12)$$
127
+
128
+ where $g_j^i$'s are one dimensional nonlinear functions. It is easy to verify that
129
+
130
+ $$\langle f \rangle_{\lambda} = \sum_{i} \prod_{j} \langle g_{j}^{i} \rangle_{\lambda}. \qquad (13)$$
131
+
132
+ Since transformation $\langle g_j^i \rangle_\lambda$, for all $i$ and $j$, involves only one dimensional integration, the transformation for a generalized multilinear function can be numerically computed.
133
+
134
+ In particular, let us consider a typical n-atom molecular conformation energy function,
135
+
136
+ $$f(x) = \sum_{i=1, j=1}^{n} h_{ij}(\|x_i - x_j\|) \quad (14)$$
137
+
138
+ where $x = \{x_i \in \mathbb{R}^3, i = 1, \dots, n\}$ and $h_{ij}$ is the pairwise energy function determined by $\|x_i - x_j\|$, the distance between atoms $i$ and $j$. Because of the partial separability of this type of function, the transformation for $f$ is equal to the sum of the transformations for the pairwise functions $h_{ij}$. However the computation for the pairwise transformation still cannot be conducted directly, because there is still more than one variable. Nevertheless, the following theorem provides a feasible way to compute the molecular energy transformation:
139
+
140
+ **Theorem 2** Let $f$ be defined as in (14). Then the transformation (2) for $f$ can be computed using the formula
141
+
142
+ $$\langle f \rangle_{\lambda}(x) = \sum_{i=1, j=1}^{n} \langle h_{ij} \rangle_{\sqrt{2}\lambda} (\|r_{ij}\|) \quad (15)$$
143
+ ---PAGE_BREAK---
144
+
145
+ where $r_{ij} = x_i - x_j$ and
146
+
147
+ $$ \langle h_{ij} \rangle_{\sqrt{2}\lambda} (\|r_{ij}\|) = c_{\sqrt{2}\lambda} \int h_{ij}(\|r'_{ij}\|) e^{-\|r_{ij}-r'_{ij}\|^2 / 2\lambda^2} dr'_{ij}. \quad (16) $$
148
+
149
+ **Proof:** See [7]. □
150
+
151
+ Note that $\langle h_{ij} \rangle_{\sqrt{2}\lambda} (\|r_{ij}\|)$ can be computed with a standard numerical integration technique; therefore, the transformation $\langle f \rangle_\lambda (x)$ can be computed in this fashion.
152
+
153
+ # 6 Minimization
154
+
155
+ In summary, we have introduced a parametrized integral transformation to transform the object function of a global optimization problem. Statistically, the transformation averages the function values, and provides coarse estimates for the function variation. Geometrically, the transformation deforms the function into a class of “smoother” functions with small high frequency components removed in the transformed functions. Physically, the transformation allows a physical system to have small perturbations, and the transformed function reflects the average behavior of the system dynamics. Finally, the transformation can exploit partial separability, and is particularly suitable for molecular conformation and protein folding energy functions.
156
+
157
+ With this transformation, a general global minimization procedure can immediately be constructed as illustrated in Figure 3. That is, given a global minimization problem with a nonlinear objective function $f$, we first transform the function into a class of new functions $\langle f \rangle_{\lambda_1}$, $\langle f \rangle_{\lambda_2}$, ..., $\langle f \rangle_{\lambda_m}$ for $\lambda_1 > \lambda_2 > \dots > \lambda_m = 0$ with $\langle f \rangle_{\lambda_m}$ corresponding to $f$. We then apply local optimization procedures to the transformed functions successively, to trace their solutions back to the original function. Since the transformed function with a larger $\lambda$ value is “smoother” with possibly fewer local minimizers, we can start by minimizing $\langle f \rangle_{\lambda_1}$, and next, take its solution as the initial point and minimize $\langle f \rangle_{\lambda_2}$, and so on and so forth. Since a transformed function is also a coarse approximate to the original function, its solution should also be a rough estimate for the solution of the original function. So by minimizing the transformed functions successively,
158
+ ---PAGE_BREAK---
159
+
160
+ 1 Choose
161
+
162
+ $$ \{\lambda_i : i = 1, \dots, m, \lambda_1 > \dots > \lambda_m = 0\} $$
163
+
164
+ 2 For $i = 1, \dots, m$
165
+
166
+ $$ \min_{x \in S} <f>_{\lambda_i}(x) $$
167
+
168
+ Figure 3: A global minimization procedure.
169
+
170
+ the whole process is concentrated in regions where the solution of the original function is most likely to be located.
171
+
172
+ # 7 Tracing Solutions
173
+
174
+ The continuation-based global minimization approach contains two major components:
175
+
176
+ 1. Application and computation of the transformation (2),
177
+
178
+ 2. A solution tracing procedure.
179
+
180
+ Clearly, different algorithms can be implemented if different solution tracing procedures are employed. An efficient solution tracing method is crucial for the algorithm to be numerically effective and efficient.
181
+
182
+ In principle, tracing solutions means tracing global minimizers: the solution for a global minimization problem is sought for each transformed function. However, in a broader sense, the solutions can actually be either global or local, as long as they form a “path” that can lead to a global minimizer for the original objective function. Under some circumstances, such a “path” exists as a smooth curve, and then tracing solutions simply implies following a smooth solution curve determined by a set of transformed functions.
183
+ ---PAGE_BREAK---
184
+
185
+ A random search procedure is an example of a simple solution tracing method, e.g., the simulated annealing random search [1]. This method is easy to implement, and especially robust in the sense that the random search procedure can be designed to converge asymptotically to a global minimizer. However, convergence depends on how thoroughly the search can be conducted. Usually, an unaffordable amount of computation is required even for small problems. Another problem with this method is that the randomness introduces uncertainty.
186
+
187
+ A more deterministic and efficient alternative is to use a local minimization procedure. This method applies local minimization to the transformed functions successively, and returns a local minimizer as the candidate for the solution to the given problem. The method is relatively inexpensive, and clearly more feasible for large scale problems, e.g., the protein problems. In particular, it can take advantage of well-developed local optimization techniques [6].
188
+
189
+ The effectiveness of this method can be illustrated in the following simple experiment: Consider the function in (6), and suppose that we want to find its global minimizer. First we transform the function to obtain a class of new functions given in (7). Choose $\lambda_1 = 0.2$, $\lambda_2 = 0.1$ and $\lambda_3 = 0.0$. We then have three transformed functions as shown in Figure 2 (a), (b) and (c). The function in Figure 2 (c) is equivalent to the original function. Then we apply a local minimization procedure to the transformed functions from $\langle f \rangle_{\lambda_1}$ to $\langle f \rangle_{\lambda_3}$. Since $\langle f \rangle_{\lambda_1}$ is “smooth” with only one local minimizer, the solution can immediately be found for it. Started from this solution, a local minimizer, being also a global minimizer, for $\langle f \rangle_{\lambda_2}$ can be found subsequently. Continuing the process, the global minimizer for the original function can be located at the end.
190
+
191
+ The example shows that the local minimization skips small local minimizers at the first stages and goes directly to a region of interest, where a global minimizer is very likely to be found subsequently. In general, the method may not always be this fortunate. For example, the early transformed functions may still have more than one local minimizer; the chosen minimizer may not necessarily lead to a global minimizer for the function at the final stage.
192
+
193
+ To begin with the “right local minimizer”, either a good initial point is provided based on the known knowledge of given problem, or a set of local minimizers can be selected and traced, and one of them may lead to a good solution.
194
+ ---PAGE_BREAK---
195
+
196
+ ## 8 Numerical Continuation
197
+
198
+ Our recent work [7] shows that the parametrized integral transform in (2) defines for $f$ a homotopy on $[0, \lambda_0]$ for any $\lambda_0 < \infty$. Moreover, under appropriate assumptions, the transformed functions $\{\langle f \rangle_\lambda : \lambda \in [0, \lambda_0]\}$ determine for any given local minimizer $x_0$ of $\langle f \rangle_{\lambda_0}$ a continuous and differentiable curve $x(\lambda)$ so that for all $\lambda \in [0, \lambda_0]$, $x(\lambda)$ is a local minimizer of $\langle f \rangle_\lambda$. In this case, the deterministic trace of the solution, e.g., using local minimization, is equivalent to following a solution curve $x(\lambda)$ (or a set of such curves). This forms the theoretical basis for our method as a special continuation approach to global optimization. Therefore, an initial value problem to determine the solution curve can be derived in a simple and computable form:
199
+
200
+ $$ x' = -\frac{\lambda}{2} \langle \nabla^2 f \rangle_{\lambda}^{-1}(x) \langle \Delta g \rangle_{\lambda}(x) \quad (17) $$
201
+
202
+ $$ x_0 = x(\lambda_0) \quad (18) $$
203
+
204
+ where $\nabla^2 f$ is the Hessian of the function, and $\Delta g$ the Laplace operation applied to the components of the gradient. This result opens another direction for the effective trace of the solution - solve the initial value problem using standard numerical IVP-methods, e.g., the predictor-corrector methods [2]. One simple example is to use an Euler-Newton method as shown in Figure 4. In this method, at each iteration, an Euler predictor is computed to start a Newton's local minimization procedure to find a solution on the curve. The process is continued, and the solution curve is followed to its end.
205
+
206
+ ## 9 Parallelism
207
+
208
+ Different levels of parallelism can be exploited for continuation-based global optimization, e.g., parallel solution tracing, parallel function evaluation, and parallel linear algebra and optimization.
209
+
210
+ At the solution tracing level, parallelism can be exploited by using multiprocessors to generate multiple random searches, or trace a set of local minimizers in parallel. For the random search technique, increasing the number of processors is equivalent to increasing the number of trials. The more processors that are used, the higher the probability a solution can be found. For tracing multiple local minimizers, using multiprocessors simply reduces the total computation and increases the potential for finding the best
211
+ ---PAGE_BREAK---
212
+
213
+ $$ \lambda = \lambda_0, \quad x = x_0 $$
214
+
215
+ Repeat
216
+
217
+ $$ \text{Compute } x' = -\frac{\lambda}{2} \langle \nabla^2 f \rangle_{\lambda}^{-1}(x) \quad \langle \Delta g \rangle_{\lambda}(x) $$
218
+
219
+ $$ \lambda = \lambda + h, \quad x = x + x' h $$
220
+
221
+ Repeat
222
+
223
+ $$ \text{Compute } s = - \langle \nabla^2 f \rangle_{\lambda}^{-1}(x) \langle g \rangle_{\lambda}(x) $$
224
+
225
+ $$ x = x + \alpha s $$
226
+
227
+ End
228
+
229
+ End
230
+
231
+ Figure 4: Euler-Newton prediction and correction.
232
+ ---PAGE_BREAK---
233
+
234
+ possible local minimizer. In either case, the parallelism is coarsely grained
235
+ with little communication required among processors but intensive compu-
236
+ tation for each, which is good for massively parallel computation, especially
237
+ on the machines with high communication to computation ratios.
238
+
239
+ Parallel function evaluation is important for both local and global op-
240
+ timization. For the continuation-based global optimization method, more
241
+ than half of the total computation involves function evaluation, and each
242
+ evaluation is costly, requiring numerical integration. However, for molecular
243
+ conformation and protein folding, the energy functions to be minimized are
244
+ partially separable with typically a small number of element functions. So
245
+ for each element function, we can construct a function value look-up table.
246
+ The function evaluation can then be conducted with cubic spline interpola-
247
+ tion using the function values already calculated in the look-up tables. In
248
+ this way, the total function evaluation cost can be reduced; moreover, the
249
+ function value look-up tables, no matter how expensive they are, can be
250
+ computed in parallel with perfect parallel efficiency. In this sense, we say
251
+ that the function evaluation can be indirectly parallelized.
252
+
253
+ Finally, the continuation-based global optimization method is rich in
254
+ linear algebra which is good for high performance computing. When the
255
+ problem is large, say, the problem for a protein with ten thousand atoms,
256
+ the parallelism at this level can also be exploited by parallelizing the major
257
+ linear algebra operations, e.g., linear system solve and local minimization.
258
+ This type of parallelism has been well studied and understood, and can be
259
+ exploited using standard techniques.
260
+
261
+ # 10 Numerical Experience
262
+
263
+ The development of the continuation-based approach to global optimization
264
+ has been accompanied with a series of computational works [3, 4, 5]. The
265
+ algorithms have been implemented on parallel machines and tested with a
266
+ set of molecular conformation problems. The results we obtained support
267
+ the approach, and show that the algorithms perform much more effectively
268
+ and efficiently than conventional global optimization methods. They are also
269
+ very suitable for massively parallel computation. We illustrate in the follow-
270
+ ing some of our numerical experience with two particular algorithms. Both
271
+ methods are continuation-based, but differ in solution tracing strategies.
272
+
273
+ The first method, called the effective energy simulated annealing, uses
274
+ a random search procedure, the simulated annealing method, to trace the
275
+ ---PAGE_BREAK---
276
+
277
+ solutions. Recall that in the simulated annealing method, a temperature parameter $T$ is decreased from a positive number to zero as the iteration count increases. For each value of $T$, a number of random trials is applied to the given energy function. For the effective energy simulated annealing method, a function $\lambda = \alpha T$ first is defined, where $\alpha$ is a constant. For each value of $T$, a $\lambda$ value is determined, which, in turn, defines a transformed function, called the effective energy function. A number of random trials is then conducted on this function to locate a solution. The parameter $\lambda$ goes to zero as $T$ decreases, and the transformed function changes to the original function. The process is equivalent to tracing the solutions for a set of transformed functions using the Monte Carlo search with a different temperature $T$ for each different transformed function. Note that if $\alpha$ is set to zero, $\lambda$ is equal to zero for all $T$. In this case all transformed functions are the same original function, and the algorithm is reduced to a standard simulated annealing procedure.
278
+
279
+ The effective energy simulated annealing algorithm has been implemented on a 32-node Intel iPSC/860 at Cornell. The machine is a parallel distributed memory system with a hypercube interconnection network. Each processor has 8 Mbytes of local memory, and achieves a theoretical peak performance of 40 Mflops. The parallelization of the algorithm is straightforward: Multiple processors are used at each iteration to generate multiple sequences of trials independently. Little communication is required among processors except for calculating the global acceptance rate at the end of each iteration. The load also is well balanced: the number of trials is the same each processor. For more implementation details, readers are referred to [3].
280
+
281
+ The algorithm is tested with a set of small sizes of Lennard-Jones microcluster conformation problems, which have been well studied, and widely used as model problems for molecular conformation. Typical results for these problems are shown in Figure 5, where three pictures for clusters of $n = 8, 12, 16$ atoms are given. The curves indicate the energy levels for the solutions obtained by the algorithm with different $\alpha$ values. We see when $\alpha$ is equal to zero, the algorithm corresponding to a standard simulated annealing procedure can only find solutions with very high energy levels. However, within the same amount of computation time, the effective energy simulated annealing algorithm with a proper choice of positive $\lambda$ value can find solutions whose energy levels are already very close to the best known values (the bottom lines of the pictures). As a matter of fact, by applying a local minimization procedure started with these solutions, we obtained
282
+ ---PAGE_BREAK---
283
+
284
+ immediately the best known solutions for all the clusters. These results just show how effective the method with the transformation scheme can be for molecular conformation, compared with a conventional global optimization technique.
285
+
286
+ The parallel performance for the algorithm is illustrated in Figure 6, where two examples are given to show how rapidly the energy levels of the solutions found by the algorithm decrease with increasing numbers of processors.
287
+
288
+ The second algorithm we want to discuss is the deterministic local tracing algorithm, which uses local minimization as a solution tracing procedure. The algorithm first requires the objective function to be transformed into a class of new functions $<f>_{\lambda_1}$, $<f>_{\lambda_2}$, ..., $<f>_{\lambda_m}$ for a set of parameter values $\lambda_1 > \lambda_2 > ... > \lambda_m = 0$, with $<f>_{\lambda_m}$ corresponding to $f$. A set of starting points are sampled randomly so that a group of local minimizers for $<f>_{\lambda_1}$ are obtained at the beginning. Then local minimization is applied to the remaining transformed functions successively to trace the changes of these local minimizers, and the one with the lowest function value is selected at the last stage as a candidate for the solution to the given problem.
289
+
290
+ The deterministic local tracing algorithm has been implemented on a 64-node IBM SP1 at Cornell. The SP1 is a parallel distributed memory system with a high performance switch installed for better interprocessor communication. Each processor is an IBM RS/6000 with 128 Mbytes of memory and a peak performance of 125 Mflops. In this implementation, multiprocessors are used to trace multiple local minimizers in parallel with one local minimizer for each processor. Little communication is required. Each processor carries a sequence of local minimizations. Basically, the more processors used, the more local minimizers traced, and hence the higher the probability of obtaining a good solution. Also, the larger the problem sizes, the more intensive the computation for each processor. Since the problem sizes of practical interest tend to be very large, the machines with high communication to computation ratios, such as the IBM SP1, can be very suitable for the algorithm to achieve good performance in practice.
291
+
292
+ The algorithm has been tested with a set of "perturbed Lennard-Jones microcluster conformation problems". Such a problem is obtained by adding in each pairwise Lennard-Jones potential function a periodically varying term, $\rho \sin(\omega r)/r$, where $\rho$ and $\omega$ are constants, and $r$ is the distance between given pair of atoms. The functions with properly adjusted $\rho$ and $\omega$ can generate a set of even more complicated global optimization test problems. The perturbed functions reduce to pure Lennard-Jones problems when $\rho$ is
293
+ ---PAGE_BREAK---
294
+
295
+ Figure 5: Typical numerical results obtained by the effective energy simulated annealing algorithm.
296
+ ---PAGE_BREAK---
297
+
298
+ Figure 6: The parallel performance of the effective energy simulated annealing algorithm.
299
+ ---PAGE_BREAK---
300
+
301
+ <table>
302
+ <caption>Deterministic Local Tracing</caption>
303
+ <thead>
304
+ <tr>
305
+ <th rowspan="2">p</th>
306
+ <th colspan="2">n = 16</th>
307
+ <th colspan="2">n = 20</th>
308
+ <th colspan="2">n = 24</th>
309
+ </tr>
310
+ <tr>
311
+ <th>m = 1</th>
312
+ <th>m = 40</th>
313
+ <th>m = 1</th>
314
+ <th>m = 40</th>
315
+ <th>m = 1</th>
316
+ <th>m = 40</th>
317
+ </tr>
318
+ </thead>
319
+ <tbody>
320
+ <tr>
321
+ <td>1</td>
322
+ <td>-4.2805e1</td>
323
+ <td>-5.7933e1</td>
324
+ <td>-5.2270e1</td>
325
+ <td>-7.6255e1</td>
326
+ <td>-1.0112e2</td>
327
+ <td>-1.0312e2</td>
328
+ </tr>
329
+ <tr>
330
+ <td>2</td>
331
+ <td>-5.5878e1</td>
332
+ <td>-5.6551e1</td>
333
+ <td>-7.4508e1</td>
334
+ <td>-8.0626e1</td>
335
+ <td>-1.0129e2</td>
336
+ <td>-1.0048e2</td>
337
+ </tr>
338
+ <tr>
339
+ <td>4</td>
340
+ <td>-5.8068e1</td>
341
+ <td>-6.0420e1</td>
342
+ <td>-7.6577e1</td>
343
+ <td>-7.9048e1</td>
344
+ <td>-1.0555e2</td>
345
+ <td>-1.0419e2</td>
346
+ </tr>
347
+ <tr>
348
+ <td>8</td>
349
+ <td>-5.8068e1</td>
350
+ <td>-6.1350e1</td>
351
+ <td>-7.7593e1</td>
352
+ <td>-7.9561e1</td>
353
+ <td>-1.0250e2</td>
354
+ <td>-1.0419e2</td>
355
+ </tr>
356
+ <tr>
357
+ <td>16</td>
358
+ <td>-5.8068e1</td>
359
+ <td>-6.1350e1</td>
360
+ <td>-8.0518e1</td>
361
+ <td>-8.3793e1</td>
362
+ <td>-1.0411e2</td>
363
+ <td>-1.0604e2</td>
364
+ </tr>
365
+ <tr>
366
+ <td>32</td>
367
+ <td>-6.1350e1</td>
368
+ <td>-6.1350e1</td>
369
+ <td>-8.3664e1</td>
370
+ <td>-8.3793e1</td>
371
+ <td>-1.0463e2</td>
372
+ <td>-1.0604e2</td>
373
+ </tr>
374
+ </tbody>
375
+ </table>
376
+
377
+ Table 1: Energy values obtained by the deterministic local tracing method for the perturbed Lennard-Jones problems.
378
+
379
+ set to zero. In this test, *p* is set to 1, and *ω* to 10.
380
+
381
+ Table 1 lists the results for some example problems (n=16,20,24), obtained by the algorithm using different numbers of processors (*p*). The data in the table are the energy values for the solutions obtained by the algorithm. To transform the function, a set of values {*λ*ᵢ: *i* = 1, ..., *m*} are used with *λ*ᵢ = (*i* − 1)h, h = 0.01. So, *m* = 1 simply implies that no transformation is used, and the algorithm is just a local minimization sampling procedure. The comparison between the two cases, *m* = 1 and *m* = 40, shows that with transformation, the algorithm performs much more effectively than directly doing local minimization on the given function. In the table, we can also see that as the number of processors increases, the energy values for the solutions obtained by the algorithm decreases rapidly.
382
+
383
+ # 11 Software Development
384
+
385
+ Based on this work, we are currently developing a parallel continuation-based global optimization software system, called Cglop (Figure 7), for molecular conformation and protein folding. An initial version of the system has just been completed (see [5] for more details).
386
+
387
+ The system transforms the objective function into a sequence of gradually deformed functions. There are three subsystems corresponding to three different solution tracing procedures, namely, the global simulated annealing random search (GLOBAL), the Newton's local minimization method (LOCAL), and the Euler-Newton predictor-corrector method (PC). As we
388
+ ---PAGE_BREAK---
389
+
390
+ have discussed in this paper, the random search method is more robust but also costly. The deterministic local tracing is efficient, but may not guarantee a global minimizer. The predictor-corrector method provides a more accurate way to trace the solution. Overall, each of these methods has advantages and disadvantages, but the combination of them provides a robust set of numerical tools for both effective and efficient trace of the solutions. The system also provides transformation routines (TRANSFORMATION) to both transform user-supplied functions (USER FUNCTIONS) using numerical integration (INTEGRAL) and construct corresponding function values look-up tables. The function evaluations in the solution tracing process are conducted by cubic spline (SPLINE) using the function values in the look-up tables.
391
+
392
+ The system is written in C and developed on the IBM SP1 with PVM used for parallel message passing extensions. It is easy to port to a variety of parallel architectures including a cluster of local workstations. The system is meant to be used as a computational platform for basic interdisciplinary studies on molecular conformation and protein folding.
393
+
394
+ ## Acknowledgements
395
+
396
+ This research was supported partially by the Cornell Theory Center, which receives funding from members of its Corporate Research Institute, the National Science Foundation (NSF), the Advanced Research Projects Agency (ARPA), the National Institutes of Health (NIH), New York State, and IBM Corporation.
397
+
398
+ ## References
399
+
400
+ [1] Emile Aarts, and Jan Korst [1989]. *Simulated Annealing and Boltzmann Machines*. John Wiley & Sons, New York, NY.
401
+
402
+ [2] Eugene L. Allgower and Kurt Georg [1990]. *Numerical Continuation Methods*. Springer-Verlag, New York, NY.
403
+
404
+ [3] Thomas F. Coleman, David Shalloway and Zhijun Wu [1993]. *Isotropic Effective Energy Simulated Annealing Searches for Low Energy Molecular Cluster States*. Computational Optimization and Applications, 2, 145-170, 1993.
405
+ ---PAGE_BREAK---
406
+
407
+ [4] Thomas F. Coleman, David Shalloway and Zhijun Wu [1994]. *A Parallel Build-Up Algorithm for Global Energy Minimizations of Molecular Clusters Using Effective Energy Simulated Annealing*. Journal of Global Optimization, 4, 171-185, 1994.
408
+
409
+ [5] Thomas F. Coleman and Zhijun Wu [1994]. *Cglop – A Parallel Continuation-Based Global Optimization Package for Molecular Conformation*. Advanced Computing Research Institute, Cornell University, Ithaca, NY, to be submitted to ACM Transactions on Mathematical Software.
410
+
411
+ [6] J. E. Dennis, Jr. and R. B. Schnabel [1983]. *Numerical Methods for Unconstrained Optimization and Nonlinear Equations*. Prentice-Hall, Englewood Cliffs, NJ.
412
+
413
+ [7] Zhijun Wu [1993]. *The Effective Energy Transformation Scheme as a General Continuation Approach to Global Optimization with Application to Molecular Conformation*. Technical Report CTC93TR143, Advanced Computing Research Institute, Cornell University, Ithaca, NY, submitted to SIAM Journal on Optimization.
414
+ ---PAGE_BREAK---
415
+
416
+ Figure 7: The Cglop system structure.
samples/texts_merged/6813453.md ADDED
@@ -0,0 +1,826 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # Multipartite entanglement and high-precision metrology
5
+
6
+ Géza Tóth*
7
+
8
+ Department of Theoretical Physics, The University of the Basque Country, P.O. Box 644, E-48080 Bilbao, Spain;
9
+
10
+ IKERBASQUE, Basque Foundation for Science, E-48011 Bilbao, Spain; and
11
+
12
+ Research Institute for Solid State Physics and Optics, Hungarian Academy of Sciences, P.O. Box 49, H-1525 Budapest, Hungary
13
+
14
+ (Received 14 October 2011; published 16 February 2012)
15
+
16
+ We present several entanglement criteria in terms of the quantum Fisher information that help to relate various forms of multipartite entanglement to the sensitivity of phase estimation. We show that genuine multipartite entanglement is necessary to reach the maximum sensitivity in some very general metrological tasks using a two-arm linear interferometer. We also show that it is needed to reach the maximum average sensitivity in a certain combination of such metrological tasks.
17
+
18
+ DOI: 10.1103/PhysRevA.85.022322
19
+
20
+ PACS number(s): 03.67.Bg, 03.65.Ud, 42.50.St
21
+
22
+ ## I. INTRODUCTION
23
+
24
+ There has been a rapid development in the technology of quantum experiments with photons [1–6], trapped ions [7,8], and cold atoms [9]. In many of the experiments the goal is to create a state with genuine multipartite entanglement [1–8]. From the operational point of view, the meaning of such an entanglement is clear [7,10]. An *N*-qubit quantum state is a quantum state with genuine *N*-partite entanglement cannot be prepared by mixing *N*-qubit pure states, in which some groups of particles have not interacted. Thus, the experiment presents something qualitatively new compared to an (*N* − 1)-qubit experiment. There is an extensive literature on the detection of such entanglement (e.g., see Ref. [11] for a review).
25
+
26
+ One of the important applications of entangled multipartite quantum states is sub-shot-noise metrology [12]. In metrology, as can be seen in Fig. 1, one of the basic tasks is phase estimation connected to the unitary dynamics of a linear interferometer
27
+
28
+ $$ \varrho_{\text{output}} = e^{-i\theta J_{\vec{n}}} \varrho e^{+i\theta J_{\vec{n}}}, \quad (1) $$
29
+
30
+ where $\varrho$ is the input state of the interferometer, while $\varrho_{\text{output}}$ is the output state, and $J_{\vec{n}}$ is a component of the collective angular momentum in the direction $\vec{n}$. The important question is how well we can estimate the small angle $\theta$ measuring $\varrho_{\text{output}}$. For such an interferometer the phase estimation sensitivity, assuming *any* type of measurement, is limited by the quantum Cramér-Rao bound as [13,14]
31
+
32
+ $$ \Delta\theta \ge \frac{1}{\sqrt{F_Q[\varrho, J_{\vec{n}}]}}, \quad (2) $$
33
+
34
+ where $F_Q$ is the quantum Fisher information. The relationship between phase estimation sensitivity and entanglement in linear interferometers has already been examined [15], and an entanglement condition has been formulated with the sensitivity of the phase estimation, that is, with the quantum Fisher information. It has been found that some entangled states provide a better sensitivity in phase estimation than separable states. It has also been proven that not all entangled states are useful for phase estimation, at least in a linear interferometer [16]. Moreover, in another context, it has been noted that multipartite entanglement, not only simple
35
+
36
+ nonseparability, is needed for extreme spin squeezing [17]. While this finding is not directly related to the theory of the quantum Fisher information, it does show that multipartite entanglement is needed for a large sensitivity in certain concrete metrological tasks.
37
+
38
+ A question arises: Would it be possible to relate genuine multipartite entanglement or any other type of multipartite entanglement to the quantum Fisher information? Apart from the point of view of metrology, this is also interesting from the point of view of entanglement criteria. Bipartite entanglement criteria generalized for the multipartite case typically detect any, that is, not necessarily genuine, multipartite entanglement. In fact, so far conditions for genuine multipartite entanglement were mostly linear in operator expectation values (e.g., entanglement witnesses [18–21] or Bell inequalities [22–26]). There are also criteria quadratic in operator expectation values [27–29], a strong criterion based on the elements of the density matrix [30,31] and some spin squeezing inequalities [32–34]. For us, a starting point can be that existing entanglement conditions based on the Wigner-Yanase skew information [35] can also detect genuine multipartite entanglement and many properties of the skew information and the quantum Fisher information are similar.
39
+
40
+ In this paper, we examine what advantage states with various forms of multipartite entanglement offer over separable states in metrology. We show that in order to have the maximal sensitivity in certain metrological tasks, $\varrho$ must be genuinely multipartite entangled. It can also happen that an entangled state does not provide a sensitivity for phase estimation larger than the sensitivity achievable by separable states for any $J_{\vec{n}}$; however, the average sensitivity of phase estimation is still larger than for separable states. Thus, when asking about the advantage of entangled states over separable ones in phase estimation, it is not sufficient to consider the sensitivity in a single metrological task.
41
+
42
+ Now we are in a position to formulate our first main results; the proofs are given later.
43
+
44
+ *Observation 1.* For *N*-qubit separable states, the values of $F_Q[\varrho, J_l]$ for $l = x,y,z$ are bounded as
45
+
46
+ $$ \sum_{l=x,y,z} F_Q[\varrho, J_l] \le 2N. \quad (3) $$
47
+
48
+ Here $J_l = \frac{1}{2} \sum_{k=1}^N \sigma_l^{(k)}$, where $\sigma_l^{(k)}$ are the Pauli spin matrices for qubit ($k$). Later we also show that Eq. (3) is a condition
49
+
50
+ *toth@alumni.nd.edu
51
+ ---PAGE_BREAK---
52
+
53
+ FIG. 1. (Color online) The basic problem of linear interferometry. The parameter $\theta$ must be estimated by measuring $\rho_{\text{output}}$.
54
+
55
+ for the average sensitivity of the interferometer. All states violating Eq. (3) are entangled. Note that, according to Ref. [15], for separable states we have
56
+
57
+ $$F_Q[\varrho, J_l] \le N. \quad (4)$$
58
+
59
+ **Observation 2.** For quantum states, the quantum Fisher information is bounded by above as
60
+
61
+ $$\sum_{l=x,y,z} F_Q[\varrho, J_l] \le N(N+2). \quad (5)$$
62
+
63
+ Greenberger-Horne-Zeilinger states (GHZ states, [36]) and N-qubit symmetric Dicke states with $\frac{N}{2}$ excitations saturate Eq. (5). Note that the above symmetric Dicke state has been investigated recently due to its interesting entanglement properties [4,6,32]. It has also been noted that the above Dicke state gives an almost maximal phase measurement sensitivity in two orthogonal directions [16]. In general, pure symmetric states for which $\langle J_l \rangle = 0$ for $l = x, y, z$ saturate Eq. (5).
64
+
65
+ Next we consider *k*-producible states [35,37]. A pure state is *k* producible if it is a tensor product of at most *k*-qubit states [37]. A mixed state is *k* producible if it is a mixture of pure *k*-producible states.
66
+
67
+ **Observation 3.** For N-qubit *k*-producible states, the quantum Fisher information is bounded from above by
68
+
69
+ $$F_Q[\varrho, J_l] \le nk^2 + (N-nk)^2, \quad (6)$$
70
+
71
+ where *n* is the integer part of $\frac{N}{k}$. A condition similar to Eq. (6) has appeared in Ref. [35] for the Wigner-Yanase skew information.
72
+
73
+ **Observation 4.** For N-qubit *k*-producible states, the sum of three Fisher information terms is bounded from above by [38]
74
+
75
+ $$\sum_{l=x,y,z} F_Q[\varrho, J_l] \le \begin{cases} nk(k+2) + (N-nk)(N-nk+2) & \text{if } N-nk \neq 1, \\ nk(k+2)+2 & \text{if } N-nk = 1, \end{cases} \quad (7)$$
76
+
77
+ where *n* is the integer part of $\frac{N}{k}$. Any state that violates this bound is not *k* producible and contains (*k* + 1)-particle entanglement.
78
+
79
+ Next we consider criteria that show that the quantum state is not biseparable. A pure state is biseparable if it can be written as a tensor product of two multipartite states [10]. A mixed state is biseparable if it can be written as a mixture of biseparable pure states. The bounds for biseparable states for the left-hand- side of Eqs. (6) and (7) can be obtained from Observations 3 and 4 after taking $n=1$ and maximizing the bounds in those
80
+
81
+ Observations over $k = [\frac{N}{2}], [\frac{N}{2}] + 1, ..., N - 1$, where $[\frac{N}{2}]$ is the smallest integer not smaller than $\frac{N}{2}$. Hence, we obtain
82
+
83
+ $$F_Q[\varrho, J_l] \le (N-1)^2 + 1, \quad (8a)$$
84
+
85
+ $$\sum_{l=x,y,z} F_Q[\varrho, J_l] \le N^2 + 1. \quad (8b)$$
86
+
87
+ Any state that violates Eqs. (8a) or (8b) is genuine multipartite entangled.
88
+
89
+ The inequalities presented in Observations 1–3 correspond to planes in the ($F_Q[\varrho, J_x], F_Q[\varrho, J_y], F_Q[\varrho, J_z]$) space as can be seen in Fig. 1 for $N=6$ particles. These observations show that for fully separable states only a very small fraction of the ($F_Q[\varrho, J_x], F_Q[\varrho, J_y], F_Q[\varrho, J_z]$) space is allowed. This is also true for states with several forms of multipartite entanglement, for example, *k*-producible states with $k \ll N$. To reach the maximal phase sensitivity, genuine multipartite entanglement is needed.
90
+
91
+ The paper is organized as follows. In Sec. II, we prove Observations 1 and 2. In Sec. III, we prove Observations 3 and 4. In Sec. IV, we examine the characteristics of the states corresponding to interesting points in the ($F_Q[\varrho, J_x], F_Q[\varrho, J_y], F_Q[\varrho, J_z]$) space and determine which regions correspond to quantum states of different degrees of entanglement. In Sec. V, we discuss some similarities to entanglement detection with uncertainty relations. In Appendix A, we present a unified framework to derive entanglement conditions independent from the coordinate system chosen. In Appendix B, we give some details of our calculations.
92
+
93
+ ## II. PROOF OF OBSERVATIONS 1 AND 2
94
+
95
+ First, let us review some of the central notions concerning metrology and the quantum Fisher information. For calculating many quantities, it is sufficient to know the following two relations [13–15,39] for the quantum Fisher information.
96
+
97
+ (1) For a pure state $\varrho$, we have $F[\varrho, J_l] = 4(\Delta J_l)_\varrho^2$.
98
+
99
+ (2) $F[\varrho, J_l]$ is convex in the state; that is, $F[p_1\varrho_1 + p_2\varrho_2, J_l] \le p_1F[\varrho_1, J_l] + p_2F[\varrho_2, J_l]$.
100
+
101
+ From these two statements, it also follows that $F[\varrho, J_l] \le 4(\Delta J_l)_\varrho^2$.
102
+
103
+ There is also an explicit formula for computing the quantum Fisher information for a given state $\varrho$ and a collective observable $J_\vec{n}$ for any $\vec{n}$ as [16]
104
+
105
+ $$F_Q[\varrho, J_\vec{n}] = \vec{n}^T \Gamma_C \vec{n}. \quad (9)$$
106
+
107
+ Thus, the $\Gamma_C$ matrix carries all the information needed to compute $F_Q[\varrho, J_\vec{n}]$ for any direction $\vec{n}$. It is defined as [16]
108
+
109
+ $$[\Gamma_C]_{ij} = 2 \sum_{l,m} (\lambda_l + \lambda_m) \left( \frac{\lambda_l - \lambda_m}{\lambda_l + \lambda_m} \right)^2 \langle l | J_i | m \rangle \langle m | J_j | l \rangle, \quad (10)$$
110
+
111
+ where the sum is over the terms for which $\lambda_l + \lambda_m \neq 0$, and the density matrix has the decomposition
112
+
113
+ $$\varrho = \sum_k \lambda_k |k\rangle\langle k|. \quad (11)$$
114
+ ---PAGE_BREAK---
115
+
116
+ Note that for pure states $[\Gamma_C]_{ij} = \langle J_i J_j + J_j J_i \rangle / 2 - \langle J_i \rangle \langle J_j \rangle$ [16]. Later, we present entanglement conditions with $\Gamma_C$, besides entanglement conditions with $F[\varrho, J_l]$.
117
+
118
+ *Proof of Observation 1.* First we show that Observation 1 is true for pure states. We use here the theory of entanglement detection based on uncertainty relations [40]. According to this theory, for every *N*-qubit pure product state of the form
119
+
120
+ $$|\Psi_P\rangle = \bigotimes_{n=1}^{N} |\Psi_n\rangle, \quad (12)$$
121
+
122
+ the variance of the collective observable $J_l$ is the sum of the variances of the single-qubit observables $j_l^{(n)} = \frac{1}{2}\sigma_l^{(n)}$ for the single-qubit states $|\Psi_n\rangle$. Thus, we have for the sum of the variances of the three angular momentum components [41]
123
+
124
+ $$
125
+ \begin{align*}
126
+ \sum_{l=x,y,z} (\Delta J_l)^2_{|\Psi_P\rangle} &= \frac{1}{4} \sum_{l=x,y,z} \sum_{n=1}^{N} (\Delta \sigma_l)_{|\Psi_n\rangle}^2 \\
127
+ &= \frac{1}{4} \sum_{n=1}^{N} \left(3 - \langle \sigma_x^{(n)} \rangle^2 - \langle \sigma_y^{(n)} \rangle^2 - \langle \sigma_z^{(n)} \rangle^2\right) = \frac{N}{2}.
128
+ \end{align*}
129
+ $$
130
+
131
+ For the mixture of product states, that is, for mixed separable states, Eq. (3) follows from the convexity of the Fisher information. ■
132
+
133
+ Next we show that Eq. (3) can be interpreted as a condition on the average sensitivity of the interferometer. First, note that Eq. (3) can be reformulated with the eigenvalues of $\Gamma_C$ as
134
+
135
+ $$\mathrm{Tr}(\Gamma_C) \le 2N. \quad (13)$$
136
+
137
+ Then, using Eq. (9), we obtain
138
+
139
+ $$\mathrm{avg}_{\vec{n}}(F_Q[\varrho, J_{\vec{n}}]) = \mathrm{avg}_{\vec{n}}\{\mathrm{Tr}[\Gamma_C(\vec{n}\vec{n}^T)]\} = \mathrm{Tr}(\Gamma_C \frac{1}{3}), \quad (14)$$
140
+
141
+ where averaging is over all three-dimensional unit vectors. Thus, Eq. (3) can be rewritten as a condition for the average sensitivity of the interferometer as
142
+
143
+ $$\mathrm{avg}_{\vec{n}}(F_Q[\varrho, J_{\vec{n}}]) \le \frac{2}{3} N. \quad (15)$$
144
+
145
+ Let us calculate now the maximum of the left-hand side of Eq. (3).
146
+
147
+ *Proof of Observation 2.* We have to use that the quantum Fisher is never larger than the corresponding variance,
148
+
149
+ $$\sum_{l=x,y,z} F(\varrho, J_l) \le 4 \sum_{l=x,y,z} (\Delta J_l)^2, \quad (16)$$
150
+
151
+ and that the sum of the variances are bounded from above
152
+
153
+ $$4 \sum_{l=x,y,z} (\Delta J_l)^2 \le 4 \sum_{l=x,y,z} |J_l|^2 \le N(N+2). \quad (17)$$
154
+
155
+ For pure states, Eq. (16) is saturated. The second inequality of Eq. (17) appears as a fundamental equation in the theory of angular momentum. For symmetric states with $\langle J_l \rangle = 0$ for $l = x, y, z$, both inequalities of Eq. (17) are saturated. Hence, GHZ states and Dicke states with $\frac{N}{2}$ excitations saturate Eq. (5). ■
156
+
157
+ ### III. BOUNDS FOR MULTIPARTITE ENTANGLEMENT
158
+
159
+ In this section we present the proof of Observations 3 and 4 and also compute some bounds for other types of multipartite entanglement. For that, we use ideas similar to the ones in the proof of Observation 1.
160
+
161
+ *Proof of Observation 3.* Let us consider pure states that are the tensor product of at most *k*-qubit entangled states,
162
+
163
+ $$|\Psi_{k-\text{producible}}\rangle = |\Psi_1^{(N_1)}\rangle \otimes |\Psi_2^{(N_2)}\rangle \otimes |\Psi_3^{(N_3)}\rangle \otimes |\Psi_4^{(N_4)}\rangle \otimes \dots, \quad (18)$$
164
+
165
+ where $N_m \le k$ is the number of qubits for the $m$th term in the product. Hence, based on using $(\Delta J_l)^2|_{\Psi_m^{(N_m)}}^2 \le \frac{N_m^2}{4}$ for the $N_m$-qubit units, we obtain
166
+
167
+ $$(\Delta J_l)^2|_{\Psi_{k-\text{producible}}} = \sum_m (\Delta J_l)^2|_{\Psi_m^{(N_m)}} \le \sum_m \frac{N_m^2}{4}.$$
168
+
169
+ For the case when $k$ is a divisor of $N$, the largest variance can be obtained for a state for which all $N_m = k$. Hence, for the state Eq. (18) we obtain
170
+
171
+ $$(\Delta J_l)^2 \le \frac{N}{k} \times \frac{k^2}{4}. \quad (19)$$
172
+
173
+ If $k$ is not a divisor of $N$ then at least one of the states in the tensor product of Eq. (18) will have fewer than $k$ qubits. The maximum for the sum of the variances is obtained if all but a single state has $k$ qubits. Considering this, we obtain Eq. (6). The strong dependence of the bounds on $k$ in Eq. (6) indicates that for high-precision metrology states containing many-partite entanglement are needed. ■
174
+
175
+ *Proof of Observation 4.* Let us consider pure states that are the tensor product of at most *k*-qubit entangled states of the form Eq. (18) Hence, based on using Eq. (5) for the *k*-qubit units, we obtain
176
+
177
+ $$
178
+ \begin{align}
179
+ & \sum_{l=x,y,z} (\Delta J_l)^2_{|\Psi_{k-\text{producible}}\rangle} \\
180
+ &= \sum_m \sum_{l=x,y,z} (\Delta J_l)^2_{|\Psi_m^{(N_m)}\rangle} &\le \sum_m \frac{N_m(N_m+2)}{4}. \tag{20}
181
+ \end{align}
182
+ $$
183
+
184
+ For the case when $k$ is a divisor of $N$, the largest variance can be obtained for a state for which all $N_m = k$. Hence, for the state Eq. (18) we obtain
185
+
186
+ $$\sum_{l=x,y,z} (\Delta J_l)^2 \leq \frac{N k(k+2)}{4}. \quad (21)$$
187
+
188
+ If $k$ is not a divisor of $N$, then at least one of the states in the tensor product of Eq. (18) will have fewer than $k$ qubits. The maximum for the sum of the variances is obtained if all but a single state has $k$ qubits. Considering this, we obtain Eq. (7). We have to use that for pure states of $N \ge 2$ qubits, we have $\sum_k (\Delta J_k)^2 \le \frac{N(N+2)}{4}$, while for $N=1$ we have a better bound $\sum_k (\Delta J_k)^2 \le \frac{1}{2}$. ■
189
+
190
+ *Bound for states with a given number of unentangled particles.* Next, we obtain bound for systems that contain a given number of unentangled particles. A pure state is told to contain *M* unentangled particles if it can be written as [37,42]
191
+
192
+ $$\bigotimes_{k=1}^{M} |\Psi_k\rangle \otimes |\Psi_{M+1,\dots,N}\rangle. \quad (22)$$
193
+
194
+ We say that a mixed state contains at least *M* unentangled particles if it can be prepared by mixing pure states with *M* or more unentangled particles.
195
+ ---PAGE_BREAK---
196
+
197
+ Many interesting quantum states are highly entangled, but
198
+ still contain only two-particle entanglement. Nevertheless, it
199
+ is still important to know how large fraction of the particles
200
+ remain unentangled since the number of unentangled particles
201
+ is directly connected to metrological usefulness of the state.
202
+
203
+ *Observation 5.* For states with at least *M* unentangled particles, the quantum Fisher information is bounded from above by
204
+
205
+ $$
206
+ \sum_{l=x,y,z} F_Q[\varrho, J_l] \le M + (N-M)(N-M+2). \quad (23)
207
+ $$
208
+
209
+ Proof. For a pure state of the form Eq. (22), we have
210
+
211
+ $$
212
+ \sum_{l=x,y,z} (\Delta J_l)^2 \le \frac{M}{4} + \frac{(N-M)(N-M+2)}{4}. \quad (24)
213
+ $$
214
+
215
+ Any state that violates Eq. (23) has fewer than *M* unentangled
216
+ particles. The validity of Eq. (23) for mixed states is due to the
217
+ convexity of the quantum Fisher information.
218
+
219
+
220
+ So far, we presented entanglement conditions in terms of $F_Q[\varrho, J_l]$ for $l = x, y, z$. A more general approach is constructing entanglement conditions with the $\Gamma_C$ matrix defined in Eq. (10). In Appendix A, we present unified framework for determining entanglement conditions for $\Gamma_C$.
221
+
222
+ IV. INTERESTING POINTS IN THE
223
+ $(F_Q[\varrho, J_x], F_Q[\varrho, J_y], F_Q[\varrho, J_z])$ SPACE
224
+
225
+ In this section, we discuss which part of the
226
+ ($F_Q[\varrho, J_x]$, $F_Q[\varrho, J_y]$, $F_Q[\varrho, J_z]$) space contains points corre-
227
+ sponding to states with different degrees of entanglement. This
228
+ is important since, apart from finding inequalities for states of
229
+ various types of entanglement, we have to show that there are
230
+ states that fulfill these inequalities.
231
+
232
+ For that, let us see first the interesting points of the
233
+ ($F_Q[\varrho, J_x]$, $F_Q[\varrho, J_y]$, $F_Q[\varrho, J_z]$) space and the corresponding
234
+ quantum states, shown in Fig. 2.
235
+
236
+ (i) A completely mixed state,
237
+
238
+ $$
239
+ \rho_C = \frac{1}{2^N}, \qquad (25)
240
+ $$
241
+
242
+ corresponds to the point C(0,0,0) in the ($F_Q[\rho, J_x]$, $F_Q[\rho, J_y]$, $F_Q[\rho, J_z]$) space.
243
+
244
+ (ii) Product states of the form
245
+
246
+ $$
247
+ |\Psi\rangle_{S_l} = \left|+\frac{1}{2}\right\rangle_l^{\otimes N/2} \otimes \left|-\frac{1}{2}\right\rangle_l^{\otimes N/2} \quad (26)
248
+ $$
249
+
250
+ for *l* = x, y, z correspond to the points S<sub>x</sub>(0, N, N), S<sub>y</sub>(N, 0, N),
251
+ and S<sub>z</sub>(0, N, N), respectively.
252
+
253
+ (iii) An *N*-qubit symmetric Dicke state with $\frac{N}{2}$ excitations in the *z* basis is defined as
254
+
255
+ $$
256
+ |\mathcal{D}_N^{(N/2)}\rangle = \left(\begin{array}{c} N \\ N/2 \end{array}\right)^{-1/2} \sum_k P_k \{|0\rangle^{\otimes N/2} \otimes |1\rangle^{\otimes N/2}\}, \quad (27)
257
+ $$
258
+
259
+ where $\sum_k P_k$ denotes summation over all possible
260
+ permutations. Such a state corresponds to the point
261
+ $D_z(\frac{N(N+2)}{2}, \frac{N(N+2)}{2}, 0)$. Dicke states in the $x$ and $y$
262
+ bases correspond to the points $D_x(0, \frac{N(N+2)}{2}, \frac{N(N+2)}{2})$ and
263
+ $D_y(\frac{N(N+2)}{2}, 0, \frac{N(N+2)}{2})$, respectively.
264
+
265
+ FIG. 2. (Color online) Interesting points in the ($F_Q[\varrho, J_x]$, $F_Q[\varrho, J_y]$, $F_Q[\varrho, J_z]$) space for $N=6$ particles. Points corresponding to separable states satisfy Eq. (3) and are not above the $S_x-S_y-S_z$ plane. Points corresponding to biseparable states satisfy Eq. (8b) and are not above the $G_x-G_y-G_z$ plane. All states corresponding to points above the $G_x-G_y-G_z$ plane are genuine multipartite entangled. For the coordinates of the $S_l$, $G_l$, $D_l$, and $C$ points, see Sec. IV.
266
+
267
+ (iv) An *N*-qubit GHZ state in the *z* basis is defined as
268
+
269
+ $$
270
+ |\Psi\rangle_{GHZ_z} = \frac{1}{\sqrt{2}}(|0\rangle^{\otimes N} + |1\rangle^{\otimes N}). \quad (28)
271
+ $$
272
+
273
+ It corresponds to the point (N,N,N²). GHZ states in the *x* and *y* bases correspond to points (N²,N,N) and (N,N²,N), respectively.
274
+
275
+ (v) Finally, the tensor product of a single-qubit state and a Dicke state of the form
276
+
277
+ $$
278
+ |\Psi\rangle_{GZ} = |1\rangle \otimes |\mathcal{D}_{N-1}^{N/2-1}\rangle \quad (29)
279
+ $$
280
+
281
+ corresponds to the point $G_z(\frac{N^2}{2} + \frac{1}{2}, \frac{N^2}{2} + \frac{1}{2}, 0)$ [43]. States corresponding to the points $G_x$ and $G_y$ can be obtained from $|\Psi\rangle_{GZ}$ by basis transformations. After considering individual points, we now show that there are two-dimensional objects in the ($F_Q[\varrho, J_x]$, $F_Q[\varrho, J_y]$, $F_Q[\varrho, J_z]$) space such that for all of their points there is a corresponding separable or entangled quantum state.
282
+
283
+ (vi) For all points in the $S_x, S_y, S_z$ polytope, there is a corresponding pure product state for even *N*. Given $F[\varrho, J_l]$ for $l = x, y, z$, such a state is defined as
284
+
285
+ $$
286
+ \rho = \left[ \frac{1}{2} + \frac{1}{2} \sum_{l=x,y,z} c_l \sigma_l \right]^{\otimes N/2} \otimes \left[ \frac{1}{2} - \frac{1}{2} \sum_{l=x,y,z} c_l \sigma_l \right]^{\otimes N/2}, \quad (30)
287
+ $$
288
+
289
+ where $c_l^2 = 1 - \frac{F_Q[\varrho, J_l]}{N}$, where $\sum_l c_l^2 = 1$.
290
+
291
+ (vii) For all points in the $D_x, D_y, D_z$ polytope, there is a corresponding quantum state if *N* is divisible by 4. To see this, let us consider the following quantum states for even *N*:
292
+
293
+ $$
294
+ |\Psi_{\text{even}}\rangle = \sum_{n=0,2,4,\dots,N/2-2} c_n \frac{1}{\sqrt{2}} (|\mathcal{D}_N^{(n)}\rangle + |\mathcal{D}_N^{-(n)}\rangle) \\
295
+ \qquad + c_{N/2} |\mathcal{D}_N^{(N/2)}\rangle, \tag{31}
296
+ $$
297
+ ---PAGE_BREAK---
298
+
299
+ FIG. 3. (Color online) Randomly chosen points in the $(F_Q[\varrho, J_x], F_Q[\varrho, J_y], F_Q[\varrho, J_z])$ space corresponding to states of the form Eq. (32) for $N=8$. All the points are in the plane of $D_x$, $D_y$, and $D_z$.
300
+
301
+ where $c_n$ are complex coefficients. States Eq. (31) are special cases of symmetric states with an even parity [44]. For $|\Psi_{\text{even}}\rangle$, we have $\langle J_l\rangle = 0$ for $l=x,y,z$. Finally, $\langle J_l J_m + J_m J_l\rangle = 0$ if $l \neq m$; thus, for $|\Psi_{\text{even}}\rangle$ the matrix $\Gamma_C$ is diagonal. Let us now assume that $N$ is a multiple of 4 and consider the states of the form
302
+
303
+ $$ |\Psi(\alpha_x, \alpha_y, \alpha_z)\rangle = \alpha_x |D_N^{(N/2)}\rangle_x + \alpha_y |D_N^{(N/2)}\rangle_y + \alpha_z |D_N^{(N/2)}\rangle_z, \quad (32) $$
304
+
305
+ where $\alpha_l$ are complex coefficients. (Note that $|D_N^{(N/2)}\rangle_l$ are not pairwise orthogonal.) Simple analytical calculations show that such states are a subset of the states Eq. (31) [45]. The states (32) fill the polytope $D_x$, $D_y$, and $D_z$, which is demonstrated for $N=8$ in Fig. 3 [46] (see also Appendix B). Thus, there is a quantum state corresponding to all points of this polytope.
306
+
307
+ Next we examine, how to obtain states corresponding to three-dimensional polytopes. For that we use that when mixing two states, the points corresponding to the mixed state are on a curve in the $(F_Q[\varrho, J_x], F_Q[\varrho, J_y], F_Q[\varrho, J_z])$ space. In the general case, this curve is not a straight line. For the case of mixing a pure state with the completely mixed state, the curve is a straight line. Such a state is defined as
308
+
309
+ $$ \varrho^{(\text{mixed})}(p) = p\varrho + (1-p)\frac{1}{2^N}. \quad (33) $$
310
+
311
+ Using Eq. (10), after simple calculations we have
312
+
313
+ $$ \Gamma_C^{(\text{mixed})}(p) = \frac{p^2}{p + (1-p)2^{-(N-1)}} \Gamma_C^{(\varrho)}. \quad (34) $$
314
+
315
+ Hence, we can state the following.
316
+
317
+ *Observation 6.* If *N* is even, then there is a separable state for each point in the $S_x, S_y, S_z, C$ polytope.
318
+
319
+ *Proof.* This is because there is a pure product state corresponding to any point in the $S_x, S_y, S_z$ polytope. When mixing any of these states with the completely mixed state, we obtain states that correspond to points on the line connecting the pure state to point C. ■
320
+
321
+ *Observation 7.* If *N* is divisible by 4, then for all the points of the $D_x, D_y, D_z, G_x, G_y, G_z$ polytope, there is a quantum state with genuine multipartite entanglement.
322
+
323
+ *Proof.* There is a quantum state for all points in the $D_x, D_y, D_z$ polytope. Mixing them with the completely mixed state, states corresponding to all points of the $C, D_x, D_y, D_z$ polytope can be obtained. Based on Observation 2, states corresponding to the points in the $D_x, D_y, D_z, G_x, G_y, G_z$ polytope are genuine multipartite entangled. ■
324
+
325
+ Finally, note that all the quantum states we presented in this section have a diagonal $\Gamma_C$ matrix. Thus, our statements remain true even if the three coordinate axes in Fig. 2 correspond to the three eigenvalues of $\Gamma_C$.
326
+
327
+ V. DISCUSSION
328
+
329
+ The criterion in Eq. (3) contains several quantum Fisher information terms. It can happen that a state does not violate the criterion Eq. (4), but it violates the criterion Eq. (3). In this case, for a single metrological task of the type we considered in this paper its entanglement does not make it possible to outperform the metrology with separable states. However, if the state is used for several metrological tasks, then it makes it possible to achieve such an *average* sensitivity that would be not possible for separable states.
330
+
331
+ A related example is the proposal of using multipartite singlets for differential magnetometry [47]. Singlets are useful for differential magnetometry because they are insensitive to homogeneous fields, that is, $F[\varrho, J_l] = 0$ for $l = x, y, z$, which is the same as for the completely mixed state. However, when considering operators other than $J_\pi$, singlets turn out to be very sensitive, which is not the case for the completely mixed state. Thus, singlets can provide an advantage over separable states if the combination of two metrological tasks are considered.
332
+
333
+ It is instructive to compare the necessary condition for separability Eq. (3) to the condition presented in Refs. [41,48],
334
+
335
+ $$ \sum_{l=x,y,z} (\Delta J_l)^2 \le \frac{N}{2}. \quad (35) $$
336
+
337
+ Clearly, if a pure state is detected by Eq. (35), it is not detected by Eq. (3), and vice versa. In fact, Eqs. (35) and (3) together detect all entangled pure multiqubit states except for the ones for which
338
+
339
+ $$ \sum_{l=x,y,z} (\Delta J_l)^2 = \frac{N}{2}. \quad (36) $$
340
+
341
+ Of course, the two conditions also detect some mixed entangled states in the vicinity of the pure entangled states.
342
+
343
+ It is an interesting question whether multipartite states having a positive partial transpose for all bipartitions can violate any of the above entanglement criteria with the quantum Fisher information. Violating Eq. (3) would certainly mean that such bound entangled states are useful for certain metrological applications. To find such states, if they exist, might be difficult as typically bound entangled states are strongly mixed and the quantum Fisher information is convex.
344
+
345
+ Concerning multipartite entanglement, Observation 3 shows that for a single metrological task, genuine multipartite entanglement is needed to reach the maximum sensitivity.
346
+ ---PAGE_BREAK---
347
+
348
+ Observation 4 demonstrates that even for the maximum
349
+ average sensitivity for the metrological tasks considered can
350
+ be reached only by states possessing genuine multipartite
351
+ entanglement.
352
+
353
+ Finally, the definition of quantum Fisher information used in Ref. [15], while widely considered “the” quantum Fisher information, is not the only possible definition [49]. The Wigner-Yanase skew information is another possibility [50–52]. This quantity equals the variance for pure states, and it is also convex in the state. This has already been used to define entanglement criteria with the skew information [35,53]. Thus, all previous statements can easily be transformed into criteria with the skew information.
354
+
355
+ VI. CONCLUSIONS
356
+
357
+ In summary, we showed that genuine multipartite en-
358
+ tanglement, or in general, multipartite entanglement more
359
+ demanding than simple inseparability, is needed to achieve
360
+ a maximal accuracy using multipartite quantum states for
361
+ metrology. We also considered several relations with the
362
+ quantum Fisher information and determined the corresponding
363
+ bounds for various forms of entanglement.
364
+
365
+ *Note added in proof.* Independently from our work, another paper on the relationship between multipartite entanglement and Fisher information has been prepared [54].
366
+
367
+ ACKNOWLEDGMENTS
368
+
369
+ We thank O. Gühne and D. Petz for discussions. We thank the European Union (ERC Starting Grant GEDENTQOPT and CHIST-ERA QUASAR), the Spanish MICINN (Project No. FIS2009-12773-C02-02), the Basque Government (Project No. IT4720-10), and the support of the National Research Fund of Hungary OTKA (Contract No. K83858).
370
+
371
+ APPENDIX A: ENTANGLEMENT CONDITIONS
372
+ FOR THE $\Gamma_C$ MATRIX
373
+
374
+ In this Appendix, we present a unified framework to derive entanglement conditions for the $\Gamma_C$ matrix. For that aim, we use ideas from the derivation of the covariance matrix criterion [55,56] and the entanglement criteria for Gaussian multimode states [57,58]. We recall that a separable state is a mixture of pure product states [59],
375
+
376
+ $$ \varrho_{\text{sep}} = \sum_k p_k \rho_{\text{pure product},k}. \quad (\text{A1}) $$
377
+
378
+ Due to the convexity of the quantum Fisher information [15],
379
+ we have
380
+
381
+ $$ F[\varrho_{\text{sep}}, J_n] \le \sum_k p_k F[\rho_{\text{pure product},k}, J_n]. \quad (\text{A2}) $$
382
+
383
+ Thus, for every separable state there must be a set of $p_k$
384
+ and $\rho_{\text{pure product},k}$ fulfilling Eq. (A2). Hence, we can say the
385
+
386
+ following. For every separable state, there is a set of $p_k$ and
387
+ $\rho_{\text{pure product},k}$ such that
388
+
389
+ $$ \Gamma_C^{(\text{sep})} \le \sum_k p_k \Gamma_C^{(\text{pure product},k)}. \quad (\text{A3}) $$
390
+
391
+ Any state for which there are not such a set of probabilities
392
+ and pure product density matrices is entangled [60].
393
+
394
+ It is known that for N-qubit pure product states we have the following two constraints for the variances of the three angular momentum components,
395
+
396
+ $$ \sum_{l=x,y,z} (\Delta J_l)^2 = \frac{N}{2}, \quad (\text{A4a}) $$
397
+
398
+ $$ (\Delta J_m)^2 \leq \frac{N}{4}, \quad (\text{A4b}) $$
399
+
400
+ which has been used to derive entanglement criteria with the three variances [41,42,48,61]. Equation (A4a) also appeared in the proof of Observation 1. Based on Eq. (A4), the conditions for the eigenvalues of $\Gamma_C^{(\text{pure product})}$ are clearly
401
+
402
+ $$ \begin{gathered} \sum_{l=x,y,z} \Lambda_l^{(\text{pure product})} = 2N, \\ 0 \le \Lambda_m^{(\text{pure product})} \le N \end{gathered} \quad (\text{A5}) $$
403
+
404
+ for $m = x, y, z$. Using now our knowledge about $\Gamma_C^{(\text{pure product},k)}$, the condition Eq. (A3) leads to the following equations for the eigenvalues of $\Gamma_C^{(\text{sep})}$:
405
+
406
+ $$ \sum_{l=x,y,z} \Lambda_l^{(\text{sep})} \le 2N, \quad (\text{A6a}) $$
407
+
408
+ $$ 0 \le \Lambda_m^{(\text{sep})} \le N, \quad (\text{A6b}) $$
409
+
410
+ for $m = x, y, z$. Equation (A6) can be reformulated with $\Gamma_C$
411
+ as
412
+
413
+ $$ \operatorname{Tr}(\Gamma_C^{(\text{sep})}) \le 2N, \quad (\text{A7a}) $$
414
+
415
+ $$ \Lambda_{\max}(\Gamma_C^{(\text{sep})}) \le N, \quad (\text{A7b}) $$
416
+
417
+ where $\Lambda_{\max}(A)$ is the largest eigenvalue of A. Equation (A7b) has appeared in Ref. [16].
418
+
419
+ Hence, quantum states fulfilling Eq. (A3) must fulfill Eq. (A7). In Observation 1 and also for the criterion Eq. (4), the most entangled states are detected if $F[\varrho_{\text{sep}}, J_l]$ correspond to the three eigenvalues of $\Gamma_C$. For this case, Eq. (A7a) is equivalent to Observation 1 and Eq. (A7b) is equivalent to Eq. (4).
420
+
421
+ In a similar manner, conditions for multipartite entangle-
422
+ ment can also be obtained. Thus, analogously to Observation 3
423
+ and Observation 4, for *N*-qubit *k*-producible states, we
424
+ obtain
425
+
426
+ $$
427
+ \begin{aligned}
428
+ & \mathrm{Tr}(\Gamma_C^{(\mathrm{sep})}) \\
429
+ & \le \begin{cases} nk(k+2)+(N-nk)(N-nk+2) & \text{if } N-nk \ne 1, \\ nk(k+2)+2 & \text{if } N-nk = 1, \end{cases}
430
+ \end{aligned}
431
+ \quad (\text{A8a}) $$
432
+
433
+ $$ \Lambda_{\max}(\Gamma_C^{(\text{sep})}) \le nk^2 + (N - nk)^2, \quad (\text{A8b}) $$
434
+
435
+ where *n* is the largest integer such that *nk* ≤ *N*. We
436
+ can obtain the bounds for biseparability setting *n* = 1 and
437
+ ---PAGE_BREAK---
438
+
439
+ $k = N - 1$. Any state that violates one of the criteria for $n = 1$ and $k = N - 1$ is genuine multipartite entangled. The inequalities (A8a) and (A8b) are essentially the criteria of Observations 3 and 4 rewritten in a coordinate system independent way.
440
+
441
+ ## APPENDIX B: $\Gamma_C$ MATRIX FOR THE STATE EQ. (32)
442
+
443
+ In this Appendix, we compute the $\Gamma_C$ matrix for the superposition of three Dicke states given in Eq. (32). We show that for any point in the $D_x$, $D_y$, $D_z$ triangle in Fig. 3 there is a corresponding state of this type.
444
+
445
+ First we need to know that
446
+
447
+ $$ k \langle D_N^{(N/2)} | J_l^2 | D_N^{(N/2)} \rangle_m = \begin{cases} \frac{N(N+2)}{8} & \text{if } k=m \neq l, \\ Q & \text{if } k \neq m \text{ and } m \neq l \text{ and } k \neq l, \\ 0 & \text{otherwise} \end{cases} \quad (\text{B1}) $$
448
+
449
+ for $k,l,m \in \{x,y,z\}$. In the second line on the right-hand side of Eq. (B1), $Q = {}_x\langle D_N^{(N/2)} | J_y^2 | D_N^{(N/2)} \rangle_z$. Since the state vector of $|D_N^{(N/2)}\rangle_x$ and $|D_N^{(N/2)}\rangle_z$ all have real elements, and $J_y^2$ also have only real elements for even $N$, $Q$ is also real. Its precise value is not important for proving the main statement of this section. The last line on the right-hand side of Eq. (B1) is due to the fact that $J_l|D_N^{(N/2)}\rangle_l = 0$.
450
+
451
+ Hence, the $\Gamma_C$ matrix for state Eq. (32) is a diagonal matrix, with
452
+
453
+ $$ \Gamma_{C,xx} = (|\alpha_y|^2 + |\alpha_z|^2) \frac{N(N+2)}{2} + 2 \operatorname{Re}(\alpha_y^* \alpha_z Q). \quad (\text{B2}) $$
454
+
455
+ The elements $\Gamma_{C,yy}$ and $\Gamma_{C,zz}$ can be obtained in a similar way, after relabeling the coordinates. Clearly, for $(\alpha_x, \alpha_y, \alpha_z) = (1,0,0)$, the state Eq. (32) corresponds to the $D_x$ point in Fig. 3. Similarly, $(\alpha_x, \alpha_y, \alpha_z) = (0,1,0)$ and $(0,0,1)$ correspond to the $D_y$ and $D_z$ points, respectively. With an appropriate choice of phases for $\alpha_i$, a state with $|\alpha_x\rangle = |\alpha_y\rangle = |\alpha_z\rangle$ corresponds to the center of the $D_x$, $D_y$, $D_z$ triangle. Moreover, a state with $\alpha_x = i\alpha_y$ and $\alpha_z = 0$ corresponds to a point halfway between $D_x$ and $D_y$. In a similar manner, states of the form Eq. (32) can be obtained for the points halfway between $D_x$ and $D_z$, and $D_y$ and $D_z$.
456
+
457
+ Similar arguments show that with the appropriate choice of the absolute values and phases of $\alpha_k$, it is possible to get all the matrices,
458
+
459
+ $$ \begin{align} \Gamma_c ={}& \alpha'_x \operatorname{diag} \left( 0, \frac{N(N+2)}{2}, \frac{N(N+2)}{2} \right) \nonumber \\ & + \alpha'_y \operatorname{diag} \left( \frac{N(N+2)}{2}, 0, \frac{N(N+2)}{2} \right) \nonumber \\ & + \alpha'_z \operatorname{diag} \left( \frac{N(N+2)}{2}, \frac{N(N+2)}{2}, 0 \right), \tag{B3} \end{align} $$
460
+
461
+ with $0 \le \alpha'_l \le 1$ and $\alpha'_x + \alpha'_y + \alpha'_z = 1$. That is, we can get any point corresponding of the $D_x$, $D_y$, $D_z$ triangle in Fig. 3.
462
+
463
+ [1] J.-W. Pan, D. Bouwmeester, M. Daniell, H. Weinfurter, and A. Zeilinger, Nature (London) **403**, 515 (2000).
464
+
465
+ [2] M. Bourennane, M. Eibl, C. Kurtsiefer, S. Gaertner, H. Weinfurter, O. Gühne, P. Hyllus, D. Bruß, M. Lewenstein, and A. Sanpera, Phys. Rev. Lett. **92**, 087902 (2004).
466
+
467
+ [3] N. Kiesel, C. Schmid, U. Weber, G. Tóth, O. Gühne, R. Ursin, and H. Weinfurter, Phys. Rev. Lett. **95**, 210502 (2005).
468
+
469
+ [4] N. Kiesel, C. Schmid, G. Tóth, E. Solano, and H. Weinfurter, Phys. Rev. Lett. **98**, 063604 (2007).
470
+
471
+ [5] W. Wieczorek, R. Krischek, N. Kiesel, P. Michelberger, G. Tóth, and H. Weinfurter, Phys. Rev. Lett. **103**, 020504 (2009); G. Tóth, W. Wieczorek, R. Krischek, N. Kiesel, P. Michelberger, and H. Weinfurter, New J. Phys. **11**, 083002 (2009).
472
+
473
+ [6] R. Prevedel, G. Cronenberg, M. S. Tame, M. Paternostro, P. Walther, M. S. Kim, and A. Zeilinger, Phys. Rev. Lett. **103**, 020503 (2009); S. Campbell, M. S. Tame, and M. Paternostro, New J. Phys. **11**, 073039 (2009).
474
+
475
+ [7] C. A. Sackett et al., Nature (London) **404**, 256 (2000).
476
+
477
+ [8] H. Häffner et al., Nature (London) **438**, 643 (2005).
478
+
479
+ [9] O. Mandel, M. Greiner, A. Widera, T. Rom, T. W. Hänsch, and I. Bloch, Nature (London) **425**, 937 (2003).
480
+
481
+ [10] A. Acín, D. Bruß, M. Lewenstein, and A. Sanpera, Phys. Rev. Lett. **87**, 040401 (2001).
482
+
483
+ [11] O. Gühne and G. Tóth, Phys. Rep. **474**, 1 (2009).
484
+
485
+ [12] V. Giovannetti, S. Lloyd, and L. Maccone, Science **306**, 1330 (2004).
486
+
487
+ [13] A. S. Holevo, Probabilistic and Statistical Aspect of Quantum Theory (North-Holland, Amsterdam, 1982).
488
+
489
+ [14] C. W. Helstrom, *Quantum Detection and Estimation Theory* (Academic Press, New York, 1976).
490
+
491
+ [15] L. Pezzé and A. Smerzi, Phys. Rev. Lett. **102**, 100401 (2009).
492
+
493
+ [16] P. Hyllus, O. Gühne, and A. Smerzi, Phys. Rev. A **82**, 012337 (2010).
494
+
495
+ [17] A. Sørensen and K. Mølmer, Phys. Rev. Lett. **86**, 4431 (2001).
496
+
497
+ [18] For entanglement witnesses, see M. Horodecki, P. Horodecki, and R. Horodecki, Phys. Lett. A **223**, 1 (1996); B. M. Terhal, ibid. **271**, 319 (2000); M. Lewenstein, B. Kraus, J. I. Cirac, and P. Horodecki, Phys. Rev. A **62**, 052310 (2000); D. Bruß, J. I. Cirac, P. Horodecki, F. Hulpke, B. Kraus, M. Lewenstein, and A. Sanpera, J. Mod. Opt. **49**, 1399 (2002); for the detection of genuine multipartite entanglement, see M. Bourennane, M. Eibl, C. Kurtsiefer, S. Gaertner, H. Weinfurter, O. Gühne, P. Hyllus, D. Bruß, M. Lewenstein, and A. Sanpera, Phys. Rev. Lett. **92**, 087902 (2004); G. Tóth and O. Gühne, ibid. **94**, 060501 (2005); G. A. Durkin and C. Simon, ibid. **95**, 180402 (2005).
498
+
499
+ [19] It has also been worked out how to detect the genuine multipartite entanglement that can be obtained in a selected part of a very large quantum system through local operations around the boundary of that selected part. This makes it possible to study multipartite entanglement in the three-, four-, and five-particle blocks of a large quantum system and produce an entanglement map. See E. Alba, G. Tóth, and J. J. García-Ripoll, Phys. Rev. A **82**, 062321 (2010).
500
+ ---PAGE_BREAK---
501
+
502
+ [20] Recently, via semidefinite programming, it has become possible to find an entanglement witness detecting genuine multipartite entanglement for a given quantum state. See B. Jungnitsch, T. Moroder, and O. Gühne, Phys. Rev. Lett. 106, 190502 (2011).
503
+
504
+ [21] For device independent entanglement witnesses for multipartite entanglement, see J.-D. Bancal, N. Gisin, Y.-C. Liang, and S. Pironio, Phys. Rev. Lett. 106, 250404 (2011).
505
+
506
+ [22] J. S. Bell, Physics (Long Island City, NY) 1, 195 (1964).
507
+
508
+ [23] N. D. Mermin, Phys. Rev. Lett. 65, 1838 (1990).
509
+
510
+ [24] N. Gisin and H. Bechmann-Pasquinucci, Phys. Lett. A 246, 1 (1998).
511
+
512
+ [25] M. Seevinck and J. Uffink, Phys. Rev. A 65, 012107 (2001).
513
+
514
+ [26] D. Collins, N. Gisin, S. Popescu, D. Roberts, and V. Scarani, Phys. Rev. Lett. 88, 170405 (2002).
515
+
516
+ [27] K. Nagata, M. Koashi, and N. Imoto, Phys. Rev. Lett. 89, 260401 (2002).
517
+
518
+ [28] J. Uffink, Phys. Rev. Lett. 88, 230406 (2002).
519
+
520
+ [29] J. I. de Vicente and M. Huber, Phys. Rev. A 84, 062306 (2011).
521
+
522
+ [30] M. Seevinck and O. Gühne, New J. Phys. 12, 053002 (2010).
523
+
524
+ [31] M. Huber, F. Mintert, A. Gabriel, and B. C. Hiesmayr, Phys. Rev. Lett. 104, 210501 (2010).
525
+
526
+ [32] G. Tóth, J. Opt. Soc. Am. B 24, 275 (2007).
527
+
528
+ [33] G. Vitagliano, P. Hyllus, I. L. Egusquiza, and G. Tóth, Phys. Rev. Lett. 107, 240502 (2011).
529
+
530
+ [34] L.-M. Duan, Phys. Rev. Lett. 107, 180502 (2011).
531
+
532
+ [35] Z. Chen, Phys. Rev. A 71, 052302 (2005).
533
+
534
+ [36] D. M. Greenberger, M. A. Horne, A. Shimony, and A. Zeilinger, Am. J. Phys. 58, 1131 (1990).
535
+
536
+ [37] O. Gühne, G. Tóth, and H. J. Briegel, New J. Phys. 7, 229 (2005).
537
+
538
+ [38] We thank P. Hyllus for pointing out that the $N-nk = 1$ case is special.
539
+
540
+ [39] S. L. Braunstein and C. M. Caves, Phys. Rev. Lett. 72, 3439 (1994).
541
+
542
+ [40] For the general theory of entanglement detection with uncertainty relations, see H. F. Hofmann and S. Takeuchi, Phys. Rev. A 68, 032103 (2003); O. Gühne, Phys. Rev. Lett. 92, 117903 (2004).
543
+
544
+ [41] G. Tóth, Phys. Rev. A 69, 052327 (2004).
545
+
546
+ [42] G. Tóth, C. Knapp, O. Gühne, and H. J. Briegel, Phys. Rev. A 79, 042334 (2009).
547
+
548
+ [43] For the values of $(\Delta J_l)^2$ for $l=x,y,z$ for Dicke states, see Eq. (25) of Ref. [42].
549
+
550
+ [44] X. Yin, X. Wang, J. Ma, and X. Wang, J. Phys. B: At. Mol. Opt. Phys. 44, 015501 (2011).
551
+
552
+ [45] In Ref. [44], it has been shown that for states with an even parity $\langle J_z J_l + J_l J_z \rangle = 0$ for $l=x,y$. For states of the form Eq. (31), $\langle J_x J_y + J_y J_x \rangle = 0$ due to $|\Psi_{even}\rangle = \sigma_x^{\otimes N} |\Psi_{even}\rangle$. Equation (32) is of the form Eq. (31) because for this state $|\Psi(\alpha_x,\alpha_y,\alpha_z)\rangle = \sigma_x^{\otimes N} |\Psi(\alpha_x,\alpha_y,\alpha_z)\rangle$, and the overlap of this state with symmetric Dicke states with an odd number of l's is zero, which can be seen as follows. When writing $|D_N^{(N/2)}\rangle_x$ in the x basis, we find that it is an equal superposition of several computational basis states in the x basis. If $|b_1,b_2,...,b_N\rangle_x$ appears in this superposition, so does $|\bar{b}_1,\bar{b}_2,...,\bar{b}_N\rangle_x$, where $b \in \{0,1\}$ and $\bar{b}$ denotes the logical inversion. All the terms of the superposition have $N/2$'s and $N/2$'s.
553
+
554
+ [46] The calculations have been made with QUBIT4MATLAB v3.0.
555
+
556
+ [47] G.Tóth, Comput.
557
+
558
+ [48] M.
559
+
560
+ [49] D.
561
+
562
+ [50] D.
563
+
564
+ [51] S.-L.
565
+
566
+ [52] E.
567
+
568
+ [53] Zh.
569
+
570
+ [54] P.
571
+
572
+ W.
573
+
574
+ W.
575
+
576
+ W.
577
+
578
+ W.
579
+
580
+ C.
581
+
582
+ Schwemmer,
583
+
584
+ Schwemmer,
585
+
586
+ Schwemmer,
587
+
588
+ Schwemmer,
589
+
590
+ Schwemmer,
591
+
592
+ Schwemmer,
593
+
594
+ Schwemmer,
595
+
596
+ Schwemmer,
597
+
598
+ Schwemmer,
599
+
600
+ Schwemmer,
601
+
602
+ Schwemmer,
603
+
604
+ Schwemmer,
605
+
606
+ Schwemmer,
607
+
608
+ Schwemmer,
609
+
610
+ Schwemmer,
611
+
612
+ Schwemmer,
613
+
614
+ Schwemmer,
615
+
616
+ Schwemmer,
617
+
618
+ Schwemmer,
619
+
620
+ Schwemmer,
621
+
622
+ Schwemmer,
623
+
624
+ Schwemmer,
625
+
626
+ Schwemmer,
627
+
628
+ Schwemmer,
629
+
630
+ Schwemmer,
631
+
632
+ Schwemmer,
633
+
634
+ Schwemmer,
635
+
636
+ Schwemmer,
637
+
638
+ Schwemmer,
639
+
640
+ Schwemmer,
641
+
642
+ Schwemmer,
643
+
644
+ Schwemmer,
645
+
646
+ Schwemmer,
647
+
648
+ Schwemmer,
649
+
650
+ Schwemmer,
651
+
652
+ Schwemmer,
653
+
654
+ Schwemmer,
655
+
656
+ Schwemmer,
657
+
658
+ Schwemmer,
659
+
660
+ Schwemmer,
661
+
662
+ Schwemmer,
663
+
664
+ Schwemmer,
665
+
666
+ Schwemmer,
667
+
668
+ Schwemmer,
669
+
670
+ Schwemmer,
671
+
672
+ Schwemmer,
673
+
674
+ Schwemmer,
675
+
676
+ Schwemmer,
677
+
678
+ Schwemmer,
679
+
680
+ Schwemmer,
681
+
682
+ Schwemmer,
683
+
684
+ Schwemmer,
685
+
686
+ Schwemmer,
687
+
688
+ Schwemmer,
689
+
690
+ Schwemmer,
691
+
692
+ Schwemmer,
693
+
694
+ Schwemmer,
695
+
696
+ Schwemmer,
697
+
698
+ Schwemmer,
699
+
700
+ Schwemmer,
701
+
702
+ Schwemmer,
703
+
704
+ Schwemmer,
705
+
706
+ Schwemmer,
707
+
708
+ Schwemmer,
709
+
710
+ Schwemmer,
711
+
712
+ Schwemmer,
713
+
714
+ Schwemmer,
715
+
716
+ Schwemmer,
717
+
718
+ Schwemmer,
719
+
720
+ Schwemmer,
721
+
722
+ Schwemmer,
723
+
724
+ Schwemmer,
725
+
726
+ Schwemmer,
727
+
728
+ Schwemmer,
729
+
730
+ Schwemmer,
731
+
732
+ Schwemmer,
733
+
734
+ Schwemmer,
735
+
736
+ Schwemmer,
737
+
738
+ Schwemmer,
739
+
740
+ Schwemmer,
741
+
742
+ Schwemmer,
743
+
744
+ Schwemmer,
745
+
746
+ Schwemmer,
747
+
748
+ Schwemmer,
749
+
750
+ Schwemmer,
751
+
752
+ Schwemmer,
753
+
754
+ Schwemmer
755
+
756
+ [55] O.
757
+
758
+ [56] O.
759
+
760
+ O.
761
+
762
+ Gittsovich
763
+
764
+ [57] L.-M.
765
+
766
+ G.
767
+
768
+ J.
769
+
770
+ Cirac
771
+
772
+ P.
773
+
774
+ Zoller
775
+
776
+ P.
777
+
778
+ Lett.
779
+
780
+ 84
781
+
782
+ 79
783
+
784
+ [58] R.
785
+
786
+ [59] R.
787
+
788
+ [60] Note that this idea can also be applied for the covariance matrix defined as $[\Gamma]_{ij} = \langle J_i J_j + J_j J_i \rangle / 2 - \langle J_i J_j \rangle$. Due to the concavity of the variance, for any separable state there must be a set of $p_k$ and $\rho_{\text{pureproduct},k}$ such that $\Gamma^{(\text{sep})} \ge \sum_k p_k \Gamma^{(\text{pureproduct},k)}$.
789
+
790
+ [61] G.Tóth, C.
791
+
792
+ O.
793
+
794
+ Gühne
795
+
796
+ H.
797
+
798
+ J.
799
+
800
+ Briegel
801
+
802
+ Phys.
803
+
804
+ 99
805
+
806
+ 99
807
+
808
+ 99
809
+
810
+ 99
811
+
812
+ 99
813
+
814
+ 99
815
+
816
+ 99
817
+
818
+ 99
819
+
820
+ 99
821
+
822
+ 99
823
+
824
+ 99
825
+
826
+ 99
samples/texts_merged/7089754.md ADDED
The diff for this file is too large to render. See raw diff
 
samples/texts_merged/7334540.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # Solution to 2018-1 Problem 1
5
+
6
+ VIERA ČERŇANOVÁ
7
+
8
+ Trnava University, Faculty of Education,
9
+ Department of Mathematics and Computer Science,
10
+ Priemyselná 4, 918 43 Trnava, Slovakia
11
+ e-mail:vieracernanova@hotmail.com
12
+
13
+ **Abstract.** We give a solution to 2018-1 Problem 1.
14
+
15
+ **Keywords.** sangaku, square, equilateral triangle, incircle.
16
+
17
+ Mathematics Subject Classification (2010). 51M04.
18
+
19
+ **Problem 1.** ABCD is a square (see Figure 1), $F$ and $E$ are the points on the sides $AB$ and $DA$, respectively, such that $CEF$ is an equilateral triangle, $G$ and $H$ are points on the segment $EF$ such that $AGH$ is an equilateral triangle. Prove or disprove that the diameter of the incircle of $CEF$ equals $AG$.
20
+
21
+ FIGURE 1.
22
+
23
+ ¹This article is distributed under the terms of the Creative Commons Attribution License which permits any use, distribution, and reproduction in any medium, provided the original author(s) and the source are credited.
24
+ ---PAGE_BREAK---
25
+
26
+ **Solution.** Denote $I$ the foot of perpendicular from $G$ to $AB$, and $\mathcal{K}$ the incircle of $CEF$. Set $\theta = \angle BCF = \angle IAG$. Notice that $IF = IG$. Then
27
+
28
+ $$CB = AI + IF + FB$$
29
+
30
+ implies
31
+
32
+ $$CF \cos \theta = AG \cos \theta + AG \sin \theta + CF \sin \theta,$$
33
+
34
+ and consequently
35
+
36
+ $$\frac{AG}{CF} = \frac{\cos \theta - \sin \theta}{\cos \theta + \sin \theta} = \frac{\cos 2\theta}{1 + \sin 2\theta} = \frac{\sqrt{3}/2}{1 + 1/2} = \frac{\sqrt{3}}{3}.$$
37
+
38
+ Finally, if $d$ is a diameter of $\mathcal{K}$, then
39
+
40
+ $$d = \frac{2\sqrt{3}}{3}CF = AG.$$
41
+
42
+ **Remark.** The equilateral triangle $AGH$ is homothetic to $CEF$ through a homothety $\mathcal{H}$ with center in the common midpoint $M$ of the segments $EF$ and $GH$, and ratio $-AG/CF$.
43
+
44
+ Applying $\mathcal{H}, \mathcal{H}^2, \dots$ to the square $ABCD$, the triangle $CEF$ and the circle $\mathcal{K}$, we obtain a sequence of squares, equilateral triangles and their incircles alternating on both sides of $EF$ (see Figure 2).
45
+
46
+ FIGURE 2.
47
+
48
+ Let $O$ be the center of $\mathcal{K}$. Notice that for $n = 0, 1, 2, \dots$, $\mathcal{H}^n(O)$ coincides with $\mathcal{H}^{n+2}(C)$. To prove this, it suffices to verify $AO = AA'$, where $A' = \mathcal{H}(A) = \mathcal{H}^2(C)$.
49
+
50
+ From the squares, we obtain
51
+
52
+ $$AA' = \frac{\sqrt{3}}{3}CA = \frac{\sqrt{3}}{3}(CM + MA) = \frac{\sqrt{3}+1}{3}CM.$$
53
+
54
+ Since $C, M, A, O$ are collinear and $MO$ is inradius of $\triangle CEF$,
55
+
56
+ $$AO = AM + MO = \frac{\sqrt{3}+1}{3}CM.$$
samples/texts_merged/7342615.md ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ # Analytical Tools for Point Source Interferometry
5
+
6
+ Gregory W. Hoth, Bruno Pelle, John Kitching, and Elizabeth A. Donley
7
+
8
+ NIST, 325 Broadway, Boulder, CO 80305
9
+
10
+ ## ABSTRACT
11
+
12
+ Light pulse atom interferometry can be used to realize high-performance sensors of accelerations and rotations. In order to broaden the range of applications of these sensors, it is desirable to reduce their size and complexity. Point source interferometry (PSI) is a promising technique for accomplishing both of these goals. With PSI, rotations are measured by detecting the orientation and frequency of spatial fringe patterns in the atomic state. These spatial fringes are primarily due to a correlation between an atom's initial velocity and its final position, which is created by the expansion of a cold atom cloud. However, the fringe patterns are also influenced by the structure of the initial atomic distribution. We summarize several methods that can be used to investigate the relationship between the spatial fringe pattern and the initial atomic distribution. This relationship will need to be understood in detail to realize an accurate gyroscope based on PSI.
13
+
14
+ **Keywords:** Light pulse atom interferometer, cold atom gyroscope, point source interferometry
15
+
16
+ ## 1. INTRODUCTION
17
+
18
+ Light pulse atom interferometers (LPAIs) have achieved excellent performance as sensors of acceleration,<sup>1,2</sup> rotation,<sup>3-6</sup> gravity,<sup>7,8</sup> and gravity gradients.<sup>9</sup> So far, LPAIs have mostly been realized as large, laboratory scale experiments. In order to realize the full potential of these sensors for applications such as inertial navigation and gravimetric surveys, it is desirable to reduce the size and complexity of these systems to enable them to move outside the laboratory environment.<sup>6,10,11</sup> Towards this goal, we are investigating the Point Source Interferometry (PSI) technique introduced by Dickerson<sup>12</sup> et al. as an approach to realizing a compact, high performance LPAI gyroscope.
19
+
20
+ In PSI, a $\frac{\pi}{2}-\pi-\frac{\pi}{2}$ pulse sequence is applied to an expanding cloud of cold, two-level atoms, and the expanded cloud is imaged with state-selective detection.<sup>12</sup> With this three-pulse sequence, the initial $\frac{\pi}{2}$ pulse puts each atom into a superposition of two states with different momenta.<sup>13</sup> After the first pulse, there is a free expansion period of duration $T_R$ which allows the two parts of the superposition to separate in space. Then, the $\pi$ pulse exchanges the momentum kick between the two parts of the superposition. After a second free expansion period with duration $T_R$, the two parts of the superposition overlap again, and the final $\frac{\pi}{2}$ pulse closes the interferometer. The pulses are typically implemented with stimulated Raman transitions.<sup>13,14</sup> With this approach, the internal state of the atoms after the pulse sequence depends on the phase shift between the two paths the atoms can take through the interferometer. With the three-pulse sequence, both accelerations (a) and rotations ($\Omega$) of the apparatus produce phase shifts, which are given by
21
+
22
+ $$ \Phi_a = \vec{k}_{\text{eff}} \cdot \vec{a} T_R^2, \quad (1) $$
23
+
24
+ $$ \Phi_{\Omega} = 2\vec{k}_{\text{eff}} \cdot (\vec{\Omega} \times \vec{v}) T_R^2, \quad (2) $$
25
+
26
+ where $\vec{k}_{\text{eff}}$ is the effective wave-vector for the Raman transitions, $\vec{v}$ is the velocity of the atoms, and $T_R$ is the time between consecutive pulses.
27
+
28
+ In order to realize a gyroscope, we must be able to isolate the phase shift due to rotations. With PSI, this is accomplished by exploiting the correlation between an atom's initial velocity and its final position created by
29
+
30
+ E-mail: gregory.hoth@nist.gov
31
+ ---PAGE_BREAK---
32
+
33
+ the expansion of the cloud. In particular, we can make the approximation $\vec{r} \approx \vec{v} T_{\text{ex}}$, where $\vec{r}$ is an atom's final position and $T_{\text{ex}}$ is the total expansion time. With this approximation, Eq. 2 becomes
34
+
35
+ $$ \Phi_{\Omega} = \left( \frac{2T_{\text{R}}^2 (\vec{k}_{\text{eff}} \times \vec{\Omega})}{T_{\text{ex}}} \right) \cdot \vec{r} = \vec{k}_{\Omega} \cdot \vec{r}. \quad (3) $$
36
+
37
+ Through the cloud expansion, the velocity-dependent rotation phase shift becomes a spatial gradient in the interferometer phase described by $\vec{k}_{\Omega}$. This phase gradient will give rise to a spatial fringe pattern which can be detected by imaging the cloud. By measuring the frequency and orientation of the spatial fringes, we can infer two components of $\vec{\Omega}$.
38
+
39
+ Point source interferometry has several features which suggest it is a promising candidate for a compact LPAI gyroscope. The spatial fringe pattern makes it possible to isolate the rotation phase shift with only one atomic source. In other three-pulse LPAI gyroscopes, two counter-propagating sources are required to distinguish the effects of rotations and accelerations.<sup>3,6,15,16</sup> With PSI, the cold atom cloud does not have to be launched, which further simplifies the experimental sequence compared to other cold-atom LPAI gyroscopes. The rotational dynamic range can also be increased by the use of spatially resolved detection. Finally, PSI could enable the characterization of the wave-front aberrations of the beam used to drive the interferometer pulse sequence.<sup>12</sup> These aberrations are an important limitation to the long-term stability of state of the art LPAI sensors.<sup>15,17</sup>
40
+
41
+ However, the benefits offered by PSI come with a cost. In a real system, the correlation between the atoms' initial velocities and their final positions is not completely determined by the expansion time. It also depends on the detailed structure of the initial distribution. In previous work,<sup>18</sup> we have shown that the structure of the initial distribution can cause shifts in the gyroscope scale factor. Navigation grade gyroscopes are expected<sup>19</sup> to have a scale factor stability of a few parts-per-million (ppm), and so the initial distribution will need to be carefully controlled to realize a high-performance PSI gyroscope. If the initial distribution has a Gaussian density profile and a velocity distribution characterized by a temperature $T$, then the bias introduced by the initial distribution can be described as a scale factor shift. Here, we show that this is not the case for most initial distributions. Therefore, it is important to investigate other biases on the rotation measurement that can be introduced by structure in the initial distribution.
42
+
43
+ In this work, we describe several analytical tools that can be used to investigate the relationship between the initial distribution and the spatial fringe patterns. Section 2 derives an expression for the PSI signal in the point-source limit. Section 3 develops a model for the PSI fringes in the case of an initial cloud with an extended spatial distribution, $n_0(r)$, and a temperature, $T$, by treating the initial cloud as a collection of many point sources. The case of a cloud with a Gaussian initial density profile is considered in detail. In this case, it is possible to obtain an analytical solution for the PSI fringes that reveals several new features. Section 4 considers the spatial fringes in the Fourier domain. This picture reveals that essentially any structure in the initial distribution is expected to bias the frequency of the spatial fringes away from the point-source limit. Section 5 develops a model of the PSI fringes in phase space. This phase-space picture both provides an intuitive explanation of the effects of a finite initial cloud size and makes it possible to consider initial distributions where the velocity distribution is not described by a temperature. Finally, Section 6 summarizes the conclusions from these models.
44
+
45
+ ## 2. THE POINT-SOURCE LIMIT
46
+
47
+ It is useful to consider the case where the atoms are initially concentrated in an infinitesimally small point source. This case is analytically tractable, and it reveals the essential physics of the PSI measurement. In this section, we will derive an expression for the density distribution of the population in one of the interferometer states after the $\frac{\pi}{2} - \pi - \frac{\pi}{2}$ pulse sequence in the point-source limit. In the following sections, we will leverage this simple case to model a cloud with a spatially extended initial distribution. To begin, consider a point source with $N$ two-level atoms at temperature $T$. The atoms have a velocity distribution given by
48
+
49
+ $$ p(\vec{v}) = \frac{N}{(2\pi)^{\frac{3}{2}} \sigma_v^3} \exp(-\vec{v}^2 / 2\sigma_v^2), \quad (4) $$
50
+ ---PAGE_BREAK---
51
+
52
+ where the width of the velocity distribution is $\sigma_v = \sqrt{k_B T/m}$. At $t=0$, the cloud begins to expand. Since the initial cloud is a point source, the position of each atom after an expansion time $T_{\text{ex}}$ is given by $\vec{r} = \vec{v} T_{\text{ex}}$. The cloud expansion essentially maps the velocity distribution into a spatial distribution. Applying this principle to Eq. 4 gives
53
+
54
+ $$n(\vec{r}, T_{\text{ex}}) = \frac{N}{(2\pi)^{\frac{3}{2}} \sigma_{\text{ps}}^3} \exp(-\vec{r}^2/2\sigma_{\text{ps}}^2), \quad (5)$$
55
+
56
+ where $\sigma_{\text{ps}} = \sigma_v T_{\text{ex}}$ characterizes the width of the expanded point source.
57
+
58
+ As the cloud expands, a $\frac{\pi}{2} - \pi - \frac{\pi}{2}$ pulse sequence is applied. The effect of the interferometer pulse sequence is to change the internal state of the atoms. The probability for an atom to change its internal state can be described by
59
+
60
+ $$p = (1 + c \cos(\Phi)) / 2, \quad (6)$$
61
+
62
+ where c is the contrast and $\Phi$ is the interferometer phase shift. In the point-source limit, the rotation phase shift can be expressed as a phase gradient given by Eq. 3.
63
+
64
+ By combining Eqs. 3, 5, and 6, we obtain an expression for the final density profile of one of the interferometer states in the point-source limit. The result is
65
+
66
+ $$n_{\text{ps}}(\vec{r}, t) = \frac{N \exp(-\vec{r}^2/2\sigma_{\text{ps}}^2) \left(1 + c \cos(\vec{k}_{\Omega} \cdot \vec{r} + \phi_0)\right)}{(2\pi)^{\frac{3}{2}} \sigma_{\text{ps}}^3} \quad (7)$$
67
+
68
+ where $\phi_0$ is a phase offset due to other sources of interferometer phase shifts. The density distribution for the other output state has the same form with the sign of the contrast reversed.
69
+
70
+ In this derivation, we have neglected the effects of gravity and the Raman momentum kick on the motion of the atoms because neither of these effects alter the wave-vector of the spatial fringes. The effects of gravity and the Raman momentum kick during the $\frac{\pi}{2} - \pi - \frac{\pi}{2}$ pulse sequence are accounted for in the derivation of the interferometer phase shifts. If $\vec{g}$ is not parallel to $\vec{k}_{\text{eff}}$ and the cloud is allowed to fall before the beginning of the interferometer pulse sequence, then the change in the atoms' velocity due to gravity will produce a phase shift via Eq. 2. However, this phase shift will be the same for all the atoms in the cloud so it will not affect the phase gradient in Eq. 3. The Raman momentum kick and gravity do influence the final position of the atoms during detection. It is straight forward to include these effects in Eq. 7, but these details would only muddy the waters.
71
+
72
+ ### 3. MANY POINT SOURCES—A GAUSSIAN INITIAL CLOUD
73
+
74
+ The case of an initial cloud with a density distribution $n_0(\vec{r})$ and a temperature $T$ can be described as a collection of many point sources. The evolution of each point source during the interferometer sequence can be described by Eq. 7, and the final distribution is given by the sum of all the expanded point sources.
75
+
76
+ It is useful to begin by considering a small volume $dV$ of the initial cloud located at a position $\vec{R}$. The number of atoms contained in this region is given by $N_i = n_0(R) dV$. After a time $T_{\text{ex}}$, the atoms from this region will have expanded into a density distribution described by $(n_0(R) dV) n_{\text{ps}}(\vec{r}-\vec{R}, T_{\text{ex}})$ where $n_{\text{ps}}(\vec{s}, T_{\text{ex}})$ describes the profile of an expanded point source centered on $\vec{s}=0$. The final density distribution can be obtained by integrating over all possible values of $\vec{R}$, which gives
77
+
78
+ $$n(\vec{r}, T_{\text{ex}}) = \int d^3R \, n_0(R) n_{\text{ps}}(\vec{r} - \vec{R}, T_{\text{ex}}) = n_0(\vec{r}) * n_{\text{ps}}(\vec{r}, T_{\text{ex}}), \quad (8)$$
79
+
80
+ where the $*$ operator represents convolution.
81
+
82
+ In order to assess the implications of a finite initial size, it is useful to consider a Gaussian initial density distribution. In this case, it is possible to evaluate Eq. 8 analytically. Formally, the initial density distribution can be described by Eq. 5 with a characteristic width $\sigma_0$, and the point-source solution is described by Eq. 7. After evaluating the integral, we find that the final density distribution of each of the interferometer states is described by a Gaussian modulated by a spatial fringe as was the case for the point-source solution in Eq. 7.
83
+ ---PAGE_BREAK---
84
+
85
+ However, the cloud size, the fringe frequency, and the fringe contrast are all modified. The final cloud size becomes $\sigma_f = \sqrt{\sigma_0^2 + \sigma_{\text{ps}}^2}$, which is the familiar result for an expanding cloud with a Gaussian initial density profile. The spatial fringe wave-vector becomes
86
+
87
+ $$ \vec{k}_{\Omega,g} = \vec{k}_{\Omega,\text{ps}} (1 - \sigma_0^2 / \sigma_f^2), \quad (9) $$
88
+
89
+ where $\vec{k}_{\Omega,\text{ps}}$ is the expected phase gradient in the point-source limit (Eq. 3). The fringe contrast becomes
90
+
91
+ $$ c(\Omega) = c_0 \exp(-k_{\Omega,\text{ps}}^2 \sigma_0^2 (1 - \sigma_0^2 / \sigma_f^2) / 2), \quad (10) $$
92
+
93
+ where $c_0$ is the interferometer contrast with $\Omega = 0$.
94
+
95
+ Looking at Eq. 9 and Eq. 10, we can see that a finite initial cloud size has two effects. It causes the spatial fringe contrast to decrease as a function of $\Omega$, and it leads to a shift in the spatial fringe frequency. The loss of contrast can be understood by noting that when the cloud has a finite initial size, atoms with different initial velocities will end up at the same final position. Since our detection method is only sensitive to the final position of the atoms, we must average over this distribution of phase shifts, which will wash out the spatial fringes.
96
+
97
+ The velocity spread at a point in the expanded cloud can be estimated by modeling the initial cloud as a uniform ball with diameter $d$. The maximum velocity spread will be the difference in velocities between atoms that start on opposite sides of the initial cloud, which is given by $\delta v = d/T_{\text{ex}}$. Via Eq. 2, this velocity spread corresponds to a range of phase shifts $\delta\phi = 2k_{\text{eff}}T_R^2\Omega\delta v$. The maximum observable rotation rate roughly corresponds to $\delta\phi = 2\pi$, which leads to
98
+
99
+ $$ \Omega_{\max} = \frac{\pi}{k_{\text{eff}}d} \frac{T_{\text{ex}}}{T_R^2}. \quad (11) $$
100
+
101
+ The prediction of this simple model of the fringe contrast loss can be compared to the result obtained from Eq. 8 by calculating a rotation rate that characterizes the contrast loss. A convenient choice is the rotation rate where the contrast has fallen to 50 % of its initial value. For the case of a Gaussian initial cloud, Eq. 10 leads to
102
+
103
+ $$ \Omega_{50\%} = \frac{\sqrt{\ln 4} T_{\text{ex}}}{k_{\text{eff}} \sigma_0 2 T_R^2} \left(1 - \left(\sigma_0 / \sigma_f\right)^2\right)^{-1/2}. \quad (12) $$
104
+
105
+ Looking at Eqs. 11 and 12, we can see that these two models make similar predictions for the rotation rate that characterizes the contrast loss. In the limit $\sigma_f \gg \sigma_0$, the two rotation rates differ by only a constant, which indicates that the simple picture of the contrast loss captures the essential physics.
106
+
107
+ Unlike the contrast loss, it is difficult to precisely identify the source of the spatial frequency shift with this picture. By evaluating the integral in Eq. 8, we added up a large number of truncated sinusoids with central frequency $k_\Omega$ and obtained a truncated sinusoid with a different central frequency $k'_\Omega$. If we were working with the more familiar case of infinite sinusoids, this would not be possible. In the case of truncated sinusoids, it is possible for interference effects to shift the dominant frequency because each sinusoid actually contains a range of frequencies.
108
+
109
+ This interference effect can be visualized by comparing the spatial fringes from different parts of the initial cloud as shown in Fig. 1. In the left panel, each expanded point-source has a width that is equal to the initial cloud width ($\sigma_{\text{ps}} = \sigma_0$). In this small expansion regime, the spatial fringes from different regions of the initial cloud are out of phase with each other. As a result, they interfere and produce a final cloud with a spatial frequency that is significantly shifted from the frequency of the individual point sources. In the right panel, the point sources have expanded to be significantly larger than the initial cloud ($\sigma_{\text{ps}} = 3\sigma_0$), and so the spatial fringes from the individual point sources are more in phase with each other. As a result, the individual point sources combine to produce a cloud with a spatial frequency that is much closer to the point-source limit.
110
+
111
+ This spatial interference effect provides an explanation for the frequency shift caused by the structure of the initial distribution, but it is difficult to generalize. We can gain more insight into the connection between the spatial fringe frequency and the structure of the initial distribution by studying the spatial fringe patterns in the Fourier domain.
112
+ ---PAGE_BREAK---
113
+
114
+ Figure 1. A visualization of the interference effect which leads to the spatial fringe frequency shift. Seven point sources (colored lines) are used to approximate the spatial fringes produced with a Gaussian initial density distribution. The number of atoms in each point source is determined by the initial cloud shape. The sum of the seven point sources (solid black line) can be compared to a single point source (dashed line). In order to emphasize the spatial fringe, we plot the difference of the density distribution for the two interferometer states, which eliminates the offset in Eq. 7. (a) The width of each point source, $\sigma_{\text{ps}}$, is equal to the initial cloud size, $\sigma_0$, which corresponds to a final cloud size $\sigma_f = \sqrt{2}\sigma_0$. In this case, the fringes from the individual point sources are out of phase with each other. When all the individual point sources are added together, the frequency of the resulting spatial fringe is clearly different than the frequency of the individual point sources. (b) $\sigma_{\text{ps}} = 3\sigma_0$, which corresponds to a final cloud size $\sigma_f = \sqrt{10}\sigma_0$. In this case, the fringes from the individual point sources are more in phase and the frequency shift is much smaller.
115
+
116
+ ## 4. PSI FRINGES IN THE FOURIER DOMAIN
117
+
118
+ So far, we have seen that the structure of the initial distribution can bias the frequency of the detected spatial fringes away from the prediction of the point-source limit. For a Gaussian initial density distribution, this bias takes the form of a shift in the scale factor connecting the spatial fringe to the detected fringe frequency, but it is not clear if this shift is somehow unique to the case of a Gaussian cloud. In this section, we will see that frequency shifts are expected to occur with essentially any initial distribution. We will also see that it is challenging to identify a general procedure that can be used to exactly determine the rotation rate from a measurement of the spatial fringe pattern. This indicates that detailed knowledge of the initial distribution will be necessary to realize an accurate PSI gyroscope. As a first step, we will take a closer look at the fringe patterns produced by a single point source.
119
+
120
+ ### 4.1 One Point Source
121
+
122
+ Consider the density distribution for a single point source (Eq. 7) again. For simplicity, we'll work in one dimension with perfect fringe contrast ($c=1$) and set the total atom number $N=1$. With these simplifications, a point source centered at position $x_c$ expands into a density distribution given by
123
+
124
+ $$n_{\text{ps}}(x) = \frac{1}{\sqrt{2\pi}\sigma_{\text{ps}}} \exp\left(-\frac{(x-x_c)^2}{2\sigma_{\text{ps}}^2}\right) \frac{(1+\cos(k_{\Omega}(x-x_c)+\phi))}{2} \quad (13)$$
125
+
126
+ The Fourier transform of the point-source solution is given by
127
+
128
+ $$\hat{n}_{\text{ps}}(k) = \frac{e^{-ikx_c}}{\sqrt{8\pi}} \left( \exp\left(-\frac{k^2\sigma_{\text{ps}}^2}{2}\right) + \frac{e^{i\phi}}{2} \exp\left(-\frac{(k+k_{\Omega})^2\sigma_{\text{ps}}^2}{2}\right) + \frac{e^{-i\phi}}{2} \exp\left(-\frac{(k-k_{\Omega})^2\sigma_{\text{ps}}^2}{2}\right) \right). \quad (14)$$
129
+
130
+ The Fourier transform of the expanded point source, $\hat{n}_{\text{ps}}(k)$, has an intuitively appealing structure. It is a sum of three Gaussian peaks centered at $k = \pm k_{\Omega}$ and $k = 0$. Each peak has a width $\sigma_k = 1/\sigma_{\text{ps}}$, determined by the spatial width of the expanded point source. This structure is illustrated in Fig. 2a.
131
+ ---PAGE_BREAK---
132
+
133
+ Figure 2. An illustration of the Fourier spectrum of the spatial fringes from a single point source in various cases. Panel a) illustrates a case where the spatial fringes are well resolved. The three peak structure is intuitive, but it only occurs if the spatial frequency $k_Ω$ is sufficiently large ($k_Ω \gg 1/σ_{ps}$). Panels b) and c) show that when $k_Ω \approx 1/σ_{ps}$, the Fourier spectrum can have only one peak or even peaks at frequencies other than $k_Ω$, depending on the interferometer phase $φ$. Panel d) illustrates the real-space density distribution for the three cases shown panels in a) to c).
134
+
135
+ It is important to consider how we can determine the rotation rate from a measurement of the density distribution in practice. One approach is to estimate the dominant frequency in the Fourier spectrum of the imaged density distribution and convert it to a rotation rate with Eq. 3. This approach will work well as long as the point-source spatial fringe frequency satisfies $k_Ω \gg 1/σ_{ps}$. Physically, this corresponds to the case where one can observe several periods of the fringe pattern across the expanded cloud. In this case, the dominant frequency in the Fourier spectrum coincides with $k_Ω$ as we intuitively expect. However, when $k_Ω \sim 1/σ_{ps}$, the Fourier peaks are not resolved. In this case, the Fourier spectrum can depend strongly on the interferometer phase $φ$ and peaks can appear at frequencies other than $k_{Ω,ps}$ as shown in Fig. 2b,c. This makes it difficult to determine the rotation rate from the Fourier spectrum for small $Ω$.
136
+
137
+ In the point-source limit, we can accurately estimate the rotation rate by combining several measurements of the density distribution with different overall phases. Each point in the cloud can be described by a fringe of the form $n(x) = y_0 + A \cos(\phi_0 + \varphi(x))$, where $y_0$ is the fringe offset, $A$ is the fringe amplitude, $\varphi(x)$ is the spatial phase shift, and $\phi_0$ is an overall phase-shift which can be controlled experimentally. The three fringe parameters, $A$, $y_0$, and $\varphi(x)$, can be determined from three separate images of the density distribution that correspond to different overall phases. A convenient choice is the set $\phi_0 = 0, \frac{\pi}{2},$ and $\pi$. In this case, the spatial phase is given
138
+ ---PAGE_BREAK---
139
+
140
+ by
141
+
142
+ $$
143
+ \varphi(r) = \arctan \left( \frac{I_0 - I_{\pi}}{2I_{\pi/2} - (I_0 + I_{\pi})/2} \right), \quad (15)
144
+ $$
145
+
146
+ where $I_\phi$ refers to the image with the corresponding value of $\phi_0$. For a single point source, $\varphi(x) = k_\Omega x$ with $k_\Omega$
147
+ given by Eq. 3 in all cases. However, things get more complicated if we allow even two point sources.
148
+
149
+ 4.2 Two Point Sources
150
+
151
+ Consider two point sources with the same number of atoms separated by a distance x₀. Using the Fourier shift theorem, F(n(x − x₀)) = exp(−ikx₀) ŝ(k), the two point-source Fourier spectrum can be expressed as
152
+
153
+ $$
154
+ \hat{n}_{2\text{ps}}(k) = (1 + \exp(-ikx_0)) \hat{n}_{\text{ps}}(k). \tag{16}
155
+ $$
156
+
157
+ It is easy to find cases where the dominant frequency in the two point-source spectrum is not kΩ. For example,
158
+ if kΩx₀ = π, then n̂₂ps(kΩ) = 0. The Fourier spectrum for this case is shown as a blue curve in Fig. 3a. Note
159
+ that while we have complete destructive interference at kΩ, there are still two peaks in the Fourier spectrum on
160
+ either side of kΩ. If we were dealing with infinite sine waves, we would have perfect destructive interference and
161
+ these peaks would vanish. However, because we are dealing with truncated sine waves, some oscillations remain
162
+ at frequencies other than kΩ.
163
+
164
+ We can also find cases where the Fourier spectrum is nearly a three peak spectrum, like the single point-source
165
+ spectrum shown in Fig. 2a, except that the dominant frequency is not kΩ. For example, the purple curve in
166
+ Fig. 3a illustrates the case where kΩx₀ = 3.5, and the dominant peak in the Fourier spectrum is shifted to a
167
+ slightly higher frequency. A close inspection will show that there are also two small additional peaks at lower
168
+ frequencies in this case.
169
+
170
+ The spatial phase for these two examples is plotted in Fig. 3c. In the case $k_{\Omega}x_{0} = \pi$, the phase is a pure gradient with slope $k_{\Omega}$ except for a $\pi$ phase jump at $x = 0.5$. The case with $k_{\Omega}x_{0} = 3.5$ is more complicated. Looking at Fig. 3c, we can see $\varphi(x)$ is well approximated by a gradient with a slope slightly larger than $k_{\Omega}$. However, if we subtract the expected phase gradient $k_{\Omega}x$, we can see that $\varphi(x)$ is actually nonlinear as shown in Fig. 3d.
171
+
172
+ It is useful to examine how these results change as the point sources expand for a longer time before detection.
173
+ Formally, this corresponds to increasing $\sigma_{\text{ps}}$. In the limit $\sigma_{\text{ps}} \to \infty$, we are working with the familiar case of
174
+ infinite sinusoids. By adding many infinite sinusoids together, we can change the amplitude and the overall phase
175
+ of the sinusoids, but the frequency of the sinusoids remains constant. Based on this limit, we can expect that as
176
+ $\sigma_{\text{ps}}$ increases, the dominant frequency in the Fourier spectrum and the spatially resolved phase should converge
177
+ to the values that we expect for a single point source, possibly with a reduced amplitude or an overall phase shift.
178
+ Figure 4 illustrates this principle for the case of $k_0 x_0 = 3.5$. In the case of $k_{\Omega} x_0 = \pi$, the oscillations completely
179
+ vanish in the limit $\sigma_{\text{ps}} \to \infty$, which is just what we expect for two sinusoids with an equal amplitude and a
180
+ $\pi$ phase shift. In the case $k_{\Omega} x_0 = 3.5$, we see that the peak in the Fourier spectrum converges to $k_{\Omega}$, and the
181
+ difference between the spatially resolved phase and the expected phase gradient becomes a constant phase shift.
182
+
183
+ This two point-source model offers several new insights. First, we can see how two truncated sine waves with central frequency *k* can interfere to produce oscillations with a different dominant frequency, *k'*. The reason is that the truncated sinusoids each contain a range of frequencies and each frequency will have a different phase shift (represented by the exp(−ikx₀) factor in Eq. 16). Since each frequency component has a different phase shift, it is possible to suppress the oscillation at *k* while enhancing the oscillation at *k'*. Second, detailed knowledge of the source distribution is needed to accurately estimate Ω from a measurement of either the spatial phase or the dominant spatial frequency. We can see this by considering the variety of structure in the spatial phase and Fourier spectrum in just the examples considered so far. In the point-source limit, the spatial phase is a pure gradient, and the magnitude of the gradient is linearly related to the rotation rate. With only two point sources, both of these features disappear. If the spatial phase is approximated as a gradient, it will be important to ensure that any frequency biases introduced by non-linearities in the spatial phase are accounted for and acceptably small. One approach to minimizing these frequency biases is to ensure that the cloud expansion is sufficiently large. In the long expansion limit ($\sigma_{\text{ps}} \to \infty$), we must recover the dominant frequency and spatial phase gradient predicted by the point source model.
184
+ ---PAGE_BREAK---
185
+
186
+ Figure 3. Examples of interference effects with two point sources. The model parameters are $\sigma_{\text{ps}} = 3$ and $x_0 = 1$. (a) Examples of the two point-source model where the dominant spatial frequency is not $k_{\Omega}$. If $k_{\Omega}x_0 = \pi$ (blue), there is no oscillation at $k_{\Omega}$ at all. If $k_{\Omega}x_0 = 3.5$ (purple), the dominant frequency is shifted to a higher frequency. The dashed lines indicate $k_{\Omega}$ for these two cases. (b) Real-space density distribution for these two cases of the two point-source model. (c) Spatial phase for these two cases of the two point-source model (solid lines) compared to the phase gradient we would expect for a single point source (dashed lines). If $k_{\Omega}x_0 = \pi$, the spatial phase is a gradient with slope $k_{\Omega}$, except for a $\pi$ phase jump at $x = 0.5$. For $k_{\Omega}x_0 = 3.5$, the spatially resolved phase is well approximated by a gradient with a slightly higher slope than $k_{\Omega}$, but a closer look reveals that the phase is no longer a pure gradient as shown in panel d). (d) $\varphi(x) - k_{\Omega}x$ for the case $k_{\Omega}x_0 = 3.5$. By examining this difference, we can see that in this case the spatial phase is no longer a pure gradient. This is also illustrated in Fig. 4.
187
+
188
+ ## 4.3 Other Initial Density Distributions
189
+
190
+ Now we will revisit the model for an arbitrary initial density distribution. Since Eq. 8 is a convolution, its Fourier transform has the form
191
+
192
+ $$ \hat{n}(k,t) = \sqrt{2\pi} \hat{n}_0(k) \hat{n}_{\text{ps}}(k,t), \quad (17) $$
193
+
194
+ where $\hat{n}_0(k)$ is the Fourier transform of the initial distribution and $\hat{n}_{\text{ps}}$ is the Fourier transform of the point-source solution. Based on Eq. 17, we can expect that for any localized initial density distribution, there will be a decay of the spatial fringe contrast at large rotation rates and a shift in the frequency of the spatial fringes compared to the point-source limit. This is because any localized density distribution will have a Fourier transform that rolls off at large $k$ so we can think of $\hat{n}_0(k)$ as slowly decaying envelope. The decreasing amplitude of $\hat{n}_0(k)$ will lead to a decay in the contrast, and the slope of the envelope will cause a shift in the central frequency of the spatial fringes. The details of the contrast decay and the spatial fringe frequency shift will depend on the initial density profile.
195
+
196
+ These general features can be illustrated by comparing two initial density profiles: a Gaussian and a box, both with full width characterized by $2\sigma_0$. The initial density distributions and their Fourier transforms are
197
+ ---PAGE_BREAK---
198
+
199
+ Figure 4. Effect of increasing $\sigma_{\text{ps}}$ in the two point-source model with $k_{\Omega} = 3.5$ and $x_0 = 1$. (a) An illustration of the evolution of the spectrum as the point sources expand. The peak in the Fourier spectrum reduces in amplitude and converges to the expected frequency. (b) Difference between the spatial phase and the point-source phase gradient. As $\sigma_{\text{ps}}$ increases, this difference smooths out to a constant so that the spatial phase is equal to the point-source phase gradient with an overall offset.
200
+
201
+ shown in Fig. 5a)-b). By looking at the Fourier transform of the initial density distributions, we can conclude
202
+ that the contrast will decay more slowly for the box-like density distribution than the Gaussian one. We can
203
+ also predict that the shifts in the central frequency of the spatial fringes will be smaller for the uniform box than
204
+ for the Gaussian because the roll-off is slower.
205
+
206
+ These conclusions can be verified by examining the spatial fringe patterns produced with these initial density
207
+ distributions. Two cases are illustrated in Fig. 5c)-d). In panel c), the cloud has roughly tripled in size ($\sigma_{\text{ps}} = 3\sigma_0$), and one can clearly see shifts in the dominant frequency for both initial cloud shapes. As predicted, the initially Gaussian cloud leads to a larger frequency shift and a smaller fringe contrast. In panel d), $\sigma_{\text{ps}} = 15\sigma_0$ and the dominant frequency in the spectrum cannot be distinguished from $k_{\Omega}$ by eye for either initial distribution.
208
+
209
+ **5. PSI IN PHASE SPACE**
210
+
211
+ In the previous two sections, we have modeled the PSI fringes produced by a cloud with a finite initial size by
212
+ breaking the initial distribution down into many point sources. With this approach, we've identified two main
213
+ effects of the initial distribution. First, the spatial fringe contrast will decrease as a function of Ω. Second,
214
+ the dominant frequency of the spatial fringes for a given Ω will be shifted from the frequency calculated in the
215
+ point-source limit (Eq. 3). So far, we have explained this frequency shift as a consequence of the interference
216
+ of truncated sinusoids. This picture has allowed us to build some useful mathematical formalism, but it is also
217
+ rather abstract. It is important to note that all of the models presented so far assume the velocity distribution
218
+ of the atoms can be characterized by a uniform temperature.
219
+
220
+ We can gain more insight into the origins of the frequency shift by tracking the distribution of the atoms in
221
+ both position and velocity. This distribution is often called the phase-space density $\rho(x, v)$. In this section, we
222
+ will develop an alternative perspective on the PSI fringes by studying how the phase-space density evolves as
223
+ the cloud expands. The first step is to derive an evolution equation for $\rho$. When the cloud is freely expanding,
224
+ the velocity of each individual atom is constant. In a time $dt$, each atom moves a distance $dx = vdt$. Thus, we
225
+ have $\rho(x + vdt, v, t + dt) = \rho(x, v, t)$. After expanding $\rho$ to first order, we find that
226
+
227
+ $$ \frac{\partial \rho}{\partial t} = -v \frac{\partial \rho}{\partial x}, \qquad (18) $$
228
+
229
+ which has solutions of the form $\rho(x, v, t) = f(x - vt)$. In particular, if $\rho_0(x, v)$ describes the phase-space density at $t=0$, then at future times
230
+
231
+ $$ \rho(x, v, t) = \rho_0(x - vt, v). \tag{19} $$
232
+ ---PAGE_BREAK---
233
+
234
+ Figure 5. A comparison of the spatial fringes produced with a box-shaped initial cloud and a Gaussian initial cloud with parameters $\sigma_{ps} = 2$ and $k_\Omega = 0.8$. Panel a) shows the initial density distributions. Both are normalized to have area 1. The rectangle has a diameter of $d = 2\sigma_0$. Panel b) shows the Fourier spectrum of these initial distributions. According to Eq. 17, the Fourier spectrum of the initial density distribution can be thought of as a filter acting on the point source spectrum. Panels c) and d) show examples of the Fourier spectrum of the expanded cloud for these two initial density distributions. In panel c), $\sigma_{ps} = 3\sigma_0$ so the cloud has roughly tripled in size. For both initial cloud profiles, the dominant frequency is clearly shifted from $k_\Omega$, but the frequency shift is smaller for the box-like distribution. In panel d), $\sigma_{ps} = 15\sigma_0$, and the dominant frequency cannot be distinguished from $k_\Omega$ by eye.
235
+
236
+ The phase space density for one of the interferometer states can be found by multiplying $\rho$ by the probability for an atom to occupy that state. This leads to
237
+
238
+ $$ \rho_e(x, v, t) = \rho_0(x - vt, v) \frac{1 + c \cos (2k_{\text{eff}}v\Omega T_R^2 + \phi_0)}{2}, \quad (20) $$
239
+
240
+ where $\rho_e$ is the phase-space density for atoms in state $|e\rangle$. The density distribution at time $t$ can be found by integrating over all velocities
241
+
242
+ $$ n(x,t) = \int dv \rho(x,v,t). \quad (21) $$
243
+
244
+ So far, this is more mathematical abstraction. The real power of this approach comes from visualizing the geometry of phase space. This is typically done by plotting position on the horizontal axis and velocity on the vertical axis. With this approach, the expansion of the cloud can be visualized by noting that atoms in the upper half of the plane move to the right, and the atoms in the lower half of the plane move to the left. At $t=0$, a point source is represented by a vertical line. At future times, the point-source phase-space density remains a straight line described by $v = x/T_{\text{ex}}$, which corresponds to a perfect correlation between the atoms' position and their velocity. The cloud expansion causes the line to rotate in the $x-v$ plane.
245
+ ---PAGE_BREAK---
246
+
247
+ When the cloud has a finite initial size, its initial phase-space density can be visualized as a blob that is roughly symmetric around the origin. The cloud expansion stretches the initial phase-space blob horizontally. Since phase-space volume is conserved, this stretching also causes the phase-space density to thin out vertically so that it tends to become like a long thin cigar. This thinning out of the phase-space distribution corresponds to the build up of the correlation between the atoms' final position and their initial velocity. The expansion of the cloud in phase-space is illustrated for both a point source and a cloud with a Gaussian initial density distribution and a temperature T in Fig. 6. The lower half of the figure illustrates the distribution of interferometer phase shifts and the detected fringes. In these phase-space pictures, the interferometer phase shift is constant along horizontal lines because the phase shift depends only on the atoms' velocity and not their position.
248
+
249
+ By comparing the phase-space distribution for the Gaussian cloud to the distribution for a point-source, we can see two effects of a finite initial size. First, the Gaussian cloud has a range of velocities at every point. As we saw in Sec. 3, this blurring of the correlation between the atoms' position and their velocity tends to reduce the contrast of the spatial fringes. Second, the expanded Gaussian cloud is tilted at a different angle than the point-source phase-space distribution. This tilt indicates that the average velocity is lower for the Gaussian initial cloud then we would expect based on the point-source limit. With the phase-space formalism, it is straightforward to calculate the average velocity at a given position for the Gaussian cloud case. The result is
250
+
251
+ $$v_{\text{avg}}(x) = \left(1 - \frac{\sigma_0^2}{\sigma_f^2}\right) v_{\text{ps}} \quad (22)$$
252
+
253
+ where $v_{\text{ps}} = x/T_{\text{ex}}$ is the velocity the atoms would have if the cloud were a point source. For the Gaussian cloud, this reduction in the average velocity corresponds exactly to the shift in the spatial fringe frequency we first calculated in Eq. 9. With this perspective, we can see that the spatial frequency shift is fundamentally due to the imperfect correlation between the atoms' initial velocity and their final position.
254
+
255
+ ## 6. CONCLUSION
256
+
257
+ We have described three pictures that can be used to quantify the relationship between the initial atomic distribution and the detected spatial fringe patterns. In the first picture, the final density distribution is calculated as the convolution of the initial density distribution with the point-source solution. This space-domain approach can yield exact solutions for a few initial distributions, and it is a useful tool for studying the implications of particular density profiles, but it is difficult to draw general conclusions about the relationship between the initial distribution and the fringe pattern with this approach. In the second picture, we consider the detected density distribution in the Fourier domain. With this picture, the Fourier transform of the initial density distribution can be thought of as a transfer function which filters the point-source solution. This picture revealed that frequency shifts are expected for essentially any localized initial distribution. The case of a Gaussian initial cloud is somewhat special because the frequency shift takes the form of a scale factor shift, and the spatial phase is expected to be a pure gradient. This will not be the case for most initial distributions. In the third picture, we consider the phase space density. By tracking the distribution of the atoms in both position and velocity, we can see that the spatial fringe frequency shift comes about because the atom's final position is an imperfect proxy for their initial velocity.
258
+
259
+ In order to realize a high performance, PSI gyroscope, these finite size effects will have to be understood and controlled with excellent precision and stability. One approach would be to use an optical trap to control the initial atomic distribution.²⁰ Despite the challenges posed by these finite size effects, we believe the advantages offered by the PSI technique indicate that this route to a compact, cold-atom LPAI gyroscope is worth pursuing.
260
+
261
+ ## ACKNOWLEDGMENTS
262
+
263
+ This work was funded by NIST. NIST is a US government agency and this work is not subject to copyright.
264
+ ---PAGE_BREAK---
265
+
266
+ Figure 6. An illustration of the phase space picture of the PSI fringes. (Top left) The initial phase-space density for a point source (dashed line) and a Gaussian cloud. (Top right) Phase-space density for the point source and the Gaussian cloud after a time $T_{\text{ex}}$. The point-source phase-space density simply rotates, but the phase-space density for the Gaussian cloud elongates and thins out. (Bottom left) Phase-space distribution colored with the interferometer phase. Detection corresponds to averaging over the vertical velocity axis. (Bottom right) Detected spatial fringes in the point source and Gaussian cloud cases.
267
+ ---PAGE_BREAK---
268
+
269
+ REFERENCES
270
+
271
+ [1] McGuinness, H. J., Rakholia, A. V., and Biedermann, G. W., "High data-rate atom interferometer for measuring acceleration," *Applied Physics Letters* **100**, 011106 (Jan. 2012).
272
+
273
+ [2] Lautier, J., Volodimer, L., Hardin, T., Merlet, S., Lours, M., Pereira Dos Santos, F., and Landragin, A., "Hybridizing matter-wave and classical accelerometers," *Applied Physics Letters* **105**, 144102 (Oct. 2014).
274
+
275
+ [3] Gustavson, T. L., Landragin, A., and Kasevich, M. A., "Rotation sensing with a dual atom-interferometer Sagnac gyroscope," *Classical and Quantum Gravity* **17**, 2385-2398 (June 2000).
276
+
277
+ [4] Barrett, B., Geiger, R., Dutta, I., Meunier, M., Canuel, B., Gauguet, A., Bouyer, P., and Landragin, A., "The Sagnac effect: 20 years of development in matter-wave interferometry," *Comptes Rendus Physique* **15**, 875-883 (Dec. 2014).
278
+
279
+ [5] Berg, P., Abend, S., Tackmann, G., Schubert, C., Giese, E., Schleich, W., Narducci, F., Ertmer, W., and Rasel, E., "Composite-Light-Pulse Technique for High-Precision Atom Interferometry," *Physical Review Letters* **114**, 063002 (Feb. 2015).
280
+
281
+ [6] Rakholia, A. V., McGuinness, H. J., and Biedermann, G. W., "Dual-Axis High-Data-Rate Atom Interferometer via Cold Ensemble Exchange," *Physical Review Applied* **2**, 054012 (Nov. 2014).
282
+
283
+ [7] Merlet, S., Bodart, Q., Malossi, N., Landragin, A., Santos, F. P. D., Gitlein, O., and Timmen, L., "Comparison between two mobile absolute gravimeters: optical versus atomic interferometers," *Metrologia* **47**(4), L9 (2010).
284
+
285
+ [8] Hu, Z.-K., Sun, B.-L., Duan, X.-C., Zhou, M.-K., Chen, L.-L., Zhan, S., Zhang, Q.-Z., and Luo, J., "Demonstration of an ultrahigh-sensitivity atom-interferometry absolute gravimeter," *Physical Review A* **88**, 043610 (Oct. 2013).
286
+
287
+ [9] Biedermann, G. W., Wu, X., Deslauriers, L., Roy, S., Mahadeswaraswamy, C., and Kasevich, M. A., "Testing gravity with cold-atom interferometers," *Physical Review A* **91**, 033629 (Mar. 2015).
288
+
289
+ [10] Hauth, M., Freier, C., Schkolnik, V., Senger, A., Schmidt, M., and Peters, A., "First gravity measurements using the mobile atom interferometer GAIN," *Applied Physics B* **113**, 49-55 (Apr. 2013).
290
+
291
+ [11] Battelier, B., Barrett, B., Fouché, L., Chichet, L., Antoni-Micollier, L., Porte, H., Napolitano, F., Lautier, J., Landragin, A., and Bouyer, P., "Development of compact cold-atom sensors for inertial navigation," *Proceedings of SPIE, Quantum Optics* **9900**, 990004 (Apr. 2016).
292
+
293
+ [12] Dickerson, S. M., Hogan, J. M., Sugarbaker, A., Johnson, D. M. S., and Kasevich, M. A., "Multiaxis Inertial Sensing with Long-Time Point Source Atom Interferometry," *Physical Review Letters* **111**, 083001 (Aug. 2013).
294
+
295
+ [13] Kasevich, M. and Chu, S., "Atomic interferometry using stimulated Raman transitions," *Physical Review Letters* **67**, 181-184 (July 1991).
296
+
297
+ [14] Bordé, C. J., "Atomic interferometry with internal state labelling," *Physics Letters A* **140**, 10-12 (Sept. 1989).
298
+
299
+ [15] Gauguet, A., Canuel, B., Lévèque, T., Chaibi, W., and Landragin, A., "Characterization and limits of a cold-atom Sagnac interferometer," *Physical Review A* **80**, 063604 (Dec. 2009).
300
+
301
+ [16] Tackmann, G., Berg, P., Abend, S., Schubert, C., Ertmer, W., and Rasel, E. M., "Large-area Sagnac atom interferometer with robust phase read out," *Comptes Rendus Physique* **15**, 884-897 (Dec. 2014).
302
+
303
+ [17] Schkolnik, V., Leykauf, B., Hauth, M., Freier, C., and Peters, A., "The effect of wavefront aberrations in atom interferometry," *Applied Physics B* **120**, 311-316 (June 2015).
304
+
305
+ [18] Hoth, G. W., Pelle, B., Riedl, S., Kitching, J., and Donley, E. A., "Point source atom interferometry with a cloud of finite size," *Applied Physics Letters* **109**, 071113 (Aug. 2016).
306
+
307
+ [19] Durfee, D. S., Shaham, Y. K., and Kasevich, M. A., "Long-Term Stability of an Area-Reversible Atom-Interferometer Sagnac Gyroscope," *Physical Review Letters* **97**, 240801 (Dec. 2006).
308
+
309
+ [20] Grimm, R., Weidemüller, M., and Ovchinnikov, Y. B., "Optical Dipole Traps for Neutral Atoms," in [*Advances In Atomic, Molecular, and Optical Physics*], Walther, B. B. and Walther, H., eds., **42**, 95-170, Academic Press (2000).
samples/texts_merged/7563909.md ADDED
The diff for this file is too large to render. See raw diff
 
samples/texts_merged/7569662.md ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ ON SINGULAR POINTS OF ELECTRICAL CIRCUITS
5
+
6
+ By
7
+ SHIGEO ICHIRAKU
8
+
9
+ (Received April 27, 1978)
10
+
11
+ # 1. Introduction.
12
+
13
+ A state of an electrical circuit with $b$ elements is specified by a current vector $i=(i_1, \cdots, i_b) \in \mathbb{R}^b$ and a voltage vector $v=(v_1, \cdots, v_b) \in \mathbb{R}^b$. Let $G$ be the oriented graph of the circuit, and we can regard naturally $v$ and $i$ as a real 1-chain and 1-cochain of $G$, i.e., $i \in C_1(G)$, $v \in C^1(G)$. Kirchhoff laws restricts the possible states to a $b$-dimensional subspace $K=\text{Ker } \partial \times \text{Im } \partial^* \subset C_1(G) \times C^1(G)$, where $\partial: C_1(G) \to C_0(G)$ ($\partial^*: C^0(G) \to C^1(G)$) is the boundary (coboundary) operator. The characteristics of resistors (possibly with couplings) of the circuit give the restraint that $(i_R, v_R)$ to be in an $n_R$-dimensional submanifold $A_R \subset C_1(G_R) \times C^1(G_R)$, where $(i_R, v_R)$ denotes the currents and voltages of resistive elements, $n_R$ the number of resistive elements in the circuit, $G_R$ the subgraph of $G$ consisting of all resistive elements.
14
+
15
+ Combining Kirchhoff laws and the restraint of the characteristics of resistors, we have a space $\Sigma=K \cap A \subset C_1(G) \times C^1(G)$, where $A=\{(i,v): (i_R, v_R, i_L, v_L, i_C, v_C); (i_R, v_R) \in A_R\}$, on which the dynamics of the circuit takes place. Now, we assume the transversality of $K$ and hence $\Sigma$ is $(b-n_R)$-dimensional submanifold of $C_1(G) \times C^1(G)$.
16
+
17
+ The dynamics is described by the following form ([6], [4]). Let
18
+
19
+ $$J = \Sigma C_{mn}(v_C) dv_{C,m} \otimes dv_{C,n} - \Sigma L_{mn}(i_L) di_{L,m} \otimes di_{L,n}$$
20
+
21
+ be a 2-tensor on $C_1(G) \times C^1(G)$, where $C_{mn}(v_C)$ ($L_{mn}(i_L)$) is incremental capacitance (inductance) matrix and is assumed symmetric and positive definite ([4]).
22
+
23
+ The vector field $X$ on $\Sigma$ which describes the dynamics satisfies the following:
24
+
25
+ $$ (\pi^* J)_{(i,v)}(X_{(i,v)}, \xi) = (\xi^* \eta)_{(i,v)}(\xi), \quad \text{for } \xi \in T_{(i,v)}(\Sigma), $$
26
+
27
+ where $\eta$ is a certain 1-form and $\pi$ is the projection to the components of inductor currents and capacitor voltages,
28
+
29
+ $$ \pi^t: C_1(G) \times C^1(G) \to C_1(G_L) \times C^1(G_C), $$
30
+
31
+ with its domain restricted to $\Sigma$, and
32
+
33
+ $$ t: \Sigma \to C_1(G) \times C^1(G) $$
34
+ ---PAGE_BREAK---
35
+
36
+ is the natural inclusion.
37
+
38
+ If $\pi: \Sigma \to C_1(G_L) \times C^1(G_C)$ is regular at $(i, v)$, i.e., the differential of $\pi$ at $(i, v)$,
39
+ $D\pi(i, v)$ has full rank ($b-n_R$), then $X_{(i,v)}$ is uniquely determined by the above
40
+ equation, for $J$ is non-degenerate bi-linear from at every point. A point $(i, v) \in \Sigma$
41
+ is called *singular point* iff $\pi$ is not regular at $(i, v) \in \Sigma'$. Since $\pi^*J$ is degenerate
42
+ at the singular point $(i, v) \in \Sigma'$, $X$ is not determined at $(i, v)$. In fact, there is a
43
+ case in which we cannot define $X_{(i,v)}$ at some singular points consistently with
44
+ other regular points governed by the above equation. In most cases, however,
45
+ we can remove singular points by adding arbitrarily small capacitors and inductors
46
+ appropriately to the original circuits. This procedure is called "*regularization*" and justified by the fact that it corresponds "*to take account of parasitive elements*
47
+ " in circuit theory ([6], [1]). But at least theoretically there is a circuit which is
48
+ not regularizable, and even in regularizable cases the regularized circuit have more
49
+ reactive elements than the original one ([3]). The purpose of this paper is to
50
+ point out that singular points are derived from conflictions of Kirchhoff laws
51
+ and the restraints of resistive characteristics, therefore in general at singular
52
+ points the solution jumps to another branch of the characteristic submanifold.
53
+ This process is just a kind of "*catastrophe*". Of course, this phenomenon is
54
+ already known by circuit theorists, for example, as "*relaxed oscillation*" or "*dis-
55
+ continuous oscillation*" ([1]).
56
+
57
+ ## 2. Statement of results.
58
+
59
+ A tree $T$ is called proper iff $T$ contains all the capacitance branches and contains no inductance branch. The complements of $T$ in $G$ is called the link of $T$ and is denoted by $L$. If the graph of the circuit has no proper tree, the map $\pi: \Sigma \to C_1(G_L) \times C^1(G_C)$ is singular at any point $(i, v) \in \Sigma'$, for the projection $\pi'|_K: K \to C_1(G_L) \times C^1(G_C)$ is already singular. This situation is called "forced degeneracy" ([6], [4]). Excluding the forced degeneracy, we assume the existence of proper tree.
60
+
61
+ Let $B$ and $Q$ are the fundamental loop matrix and the fundamental cutset matrix with respect to a proper tree. (For definition of $B$ and $Q$, see [5], [2].)
62
+ And Kirchhoff space $K$ is the image of the following into-isomorphism:
63
+
64
+ $$ \left[ \begin{matrix} B^t & 0 \\ 0 & Q^t \end{matrix} \right] : C_1(L) \times C^1(T) \to C_1(G) \times C^1(G), $$
65
+
66
+ where $L$ is the link of $T$ in $G$. Let $K(i_L, v_C)$ be the affine subspace of $K$ determined by fixing the currents of inductors and the voltages of capacitors, this is possible because vector $(i_L, v_C)$ is subvector of $(i_L, v_T) \in C_1(L) \times C^1(T)$. Clearly the
67
+ ---PAGE_BREAK---
68
+
69
+ space $K(i_L, v_C)$ is the parallel translation in $K$ of $K(0, 0)$ to the point
70
+
71
+ $$b(i_L, v_C) = (i, v) = \begin{bmatrix} B^t & 0 \\ 0 & Q^t \end{bmatrix} \begin{bmatrix} i_L \\ 0 \\ 0 \\ v_C \end{bmatrix},$$
72
+
73
+ here we assume the numbering of the elements is appropriately arranged.
74
+
75
+ Let
76
+
77
+ $$\pi_R: C_1(G) \times C^1(G) \to C_1(G_R) \times C^1(G_R)$$
78
+
79
+ be the natural projection to the currents and voltages of resistors, and $\pi'_R(K(i_L, v_C))= K_0+(i_R, v_R)$ where $K_0=K(0,0) \subset C_1(G_R) \times C^1(G_R)$ and $(i_R, v_R)=\pi'_R(b(i_L, v_C))$.
80
+
81
+ Now we can state our result.
82
+
83
+ **Theorem.** Let $C$ be a circuit whose graph has a proper tree. Suppose $\Lambda$ and $\Sigma$ are transversal. Then, a point $(i, v) = (i_L, i_C, i_R, v_L, v_C, v_R)$ is singular point if and only if the characteristic submanifold $A_R$ and the affine subspace $K_0(i_R, v_R)$ are not transverse at $(i_R, v_R)$ in $C_1(G_R) \times C^1(G_R)$.
84
+
85
+ ### 3. Proof of Theorem.
86
+
87
+ Let $i(L)$, $v(T)$, $i(R(L))$, $v(R(T))$ denote the currents of link branches, voltages of tree branches, currents of link resistors, and voltages of tree resistors, respectively. For $(i_L, v_C) \in C_1(G_L) \times C^1(G_C)$, we define the map $k_{i(L),v_C}: C_1(G_{R(L)}) \times C^1(G_{R(T)}) \to K(i_L, v_C)$ by the following:
88
+
89
+ $$k_{i(L),v_C}(i_{R(L)}, v_{R(T)}) = \begin{bmatrix} B^t & 0 \\ 0 & Q^t \end{bmatrix} \begin{bmatrix} i_L \\ i_{R(L)} \\ v_C \\ v_{R(T)} \end{bmatrix}.$$
90
+
91
+ Then $k_{i(L),v_C}$ is an isomorphism with its inverse:
92
+
93
+ $$\pi'_{R(L,R,T)}(\pi'_R K(i_L, v_C)) : K(i_L, v_C) \to C_1(G_{R(L)}) \times C^1(G_{R(T)}),$$
94
+
95
+ for $\pi'_{R(L,R,T)} k_{i_L,v_C} = i d_{C_1(G_{R(L)})} \times C^1(G_{R(T)})$ and $\dim K(i_L, v_C) = n_R = \dim C_1(G_{R(L)}) \times C^1(G_{R(T)})$.
96
+
97
+ And hence, the projection $\pi'_R$ with its domain and range restricted as follows:
98
+
99
+ $$\pi'_R[K(i_L, v_C): K(i_L, v_C) \to \pi'_R(K(i_L, v_C)) = K_0 + (i_R, v_R)$$
100
+
101
+ is also an isomorphism with inverse:
102
+
103
+ $$k_{(i_L,v_C)} \circ \pi'_{(i_L,i_T)}: K_0 + (i_R,v_R) \to K(i_L,v_C).$$
104
+ ---PAGE_BREAK---
105
+
106
+ Now, we prove the theorem. Suppose $\mathfrak{p}=(i, v) \in \Sigma'$ is a singular point, i.e., $D\pi(\mathfrak{p}): T_p(\Sigma') \to T_{\pi(p)}(C_1(G_L) \times C^1(G_C))$ is singular. Then $\text{Ker}(D\pi'(\mathfrak{p}) \cap T_p(\Sigma)) \neq \{0\}$, by projecting this to the space $C_1(G_R) \times C^1(G_R)$, we obtain:
107
+
108
+ $$ \pi'_R(\text{Ker } D\pi'(\mathfrak{p}) \cap T_p(\Sigma)) \neq \{0\}. $$
109
+
110
+ Since
111
+
112
+ $$ T_p(\Sigma) = T_p(A) \cap K = (\pi'_R)^{-1}(T_{\pi(R; p)}(A_R)) \cap K, $$
113
+
114
+ the above equation implies:
115
+
116
+ $$ \pi'_R(K \cap \text{Ker } D\pi'(\mathfrak{p})) \cap T_{\pi(R; p)}(A_R) \neq 0. $$
117
+
118
+ But,
119
+
120
+ $$ \pi'_R(K \cap \text{Ker } D\pi'(\mathfrak{p})) = \pi'_R(K \cap \pi'^{-1}(\mathbf{i}_L, \mathbf{v}_C)) = \pi'_R(K(\mathbf{i}_L, \mathbf{v}_C)) = K_0|_{(\mathbf{i}_R, \mathbf{v}_R)}, $$
121
+
122
+ this shows that $K_0|_{(\mathbf{i}_R, \mathbf{v}_C)}$ and $A_R$ are not transverse at $(\mathbf{i}_R, \mathbf{v}_R)$. This proves the sufficiency of the theorem.
123
+
124
+ Conversely, if $K_0|_{(\mathbf{i}_R, \mathbf{v}_R)}$ and $A_R$ are not transverse at $(\mathbf{i}_R, \mathbf{v}_R)$ with $\mathfrak{p}= (\mathbf{i}_{R'}, \mathbf{i}_{L'}, \mathbf{i}_{C'}, \mathbf{v}_{R'}, \mathbf{v}_{L'}, \mathbf{v}_{C'}) \subseteq \Sigma$, then
125
+
126
+ $$ \pi'_R(K \cap \text{Ker } D\pi'(\mathfrak{p})) \cap T_{\pi(R; p)}(A_R) \neq 0. $$
127
+
128
+ Since $\pi'_R(K(i_L, v_C)$ is isomorphism,
129
+
130
+ $$ (K \cap \text{Ker } D\pi'(\mathfrak{p})) \cap (\pi'_R K(i_L, v_C))^{-1} (T_{\pi(R; p)}(A_R)) \neq 0, $$
131
+
132
+ this means:
133
+
134
+ $$ \text{Ker } D\pi'(\mathfrak{p}) \cap T_p(\Sigma) \neq \{0\}. $$
135
+
136
+ This proves the necessity of the theorem.
137
+
138
+ **Remark.** In terms of B and Q, the space $K_0 \subseteq C_1(G_R) \times C^1(G_R)$ is given as follows. Let us decompose the matrices B and Q into the following forms:
139
+
140
+ $$ B = \begin{bmatrix} R(L) & L & R(T) & C \\ 1 & 0 & A_{RT} & A_{RC} \\ 0 & 1 & A_{LT} & A_{LC} \\ 0 & 0 & A_{RT} & A_{LC} \end{bmatrix} R(L), $$
141
+
142
+ $$ Q = \begin{bmatrix} R(L) & L & R(T) & C \\ -A_{RT}^t & -A_{LT}^t & 1 & 0 \\ -A_{RC}^t & -A_{LC}^t & 0 & 1 \\ 0 & 0 & 1 & C \end{bmatrix} R(T). $$
143
+
144
+ Then, it is easily seen that
145
+
146
+ $$ K_0 = \pi'_R(K(0, 0)) $$
147
+
148
+ $$ = \{(i_R, v_R) = (i_{R(L)}, i_{R(T)}, v_{R(L)}, v_{R(T)}) | i_{R(T)} = A_{RT}^t i_{R(L)}, v_{R(L)} = -A_{RT} v_{R(T)}, (i_{R(T)}, v_{R(T)}) \in C_1(G_{R(L)}) \times C^1(G_{R(T)})\}. $$
149
+
150
+ This means that the space $K_0$ is just Kirchhoff space of the resistive circuit obtained from the given one by open-circuitting all inductance branches and short-circuiting all capacitance branches.
151
+ ---PAGE_BREAK---
152
+
153
+ **Examples 1. (Example 5 in [6].)** Consider a circuit of Fig. 1 consisting of one non-linear resistor with characteristic of Fig. 2, one capacitor and one inductor. By Remark, $K_0$ is Kirchhoff space of Fig. 3, i.e., $K_0$ is just v-axis in Fig. 2. Therefore at $p_i$ the solution must jump into $p'_i$.
154
+
155
+ 2. (Example 6 in [6], A regularization of the above example.) To regularize the above example, we add a parasitic element $C'$ in parallel to L as in Fig. 4.
156
+
157
+ Fig. 1.
158
+
159
+ Fig. 2.
160
+
161
+ Fig. 3.
162
+
163
+ Fig. 4.
164
+
165
+ Fig. 5.
166
+
167
+ Fig. 6.
168
+ ---PAGE_BREAK---
169
+
170
+ Then, $K_0$ is Kirchhoff space of Fig. 5, i.e., $K_0$ is just *i*-axis. Therefore $K_0+(v_R, i_R)$ is always transverse to $\Lambda_R$, and hence the circuit of Fig. 4 has no singular point at all.
171
+
172
+ Finally, we propose an engineering problem concerning the catastrophy theory.
173
+
174
+ **Problem.** Is it possible to make a device (coupled resistors) with its characteristic “cusp type singularity”, as is shown in Fig. 6?
175
+
176
+ Certainly, Esaki diode has a characteristic of “fold type singularity”. The “cusp type singularity” is the simplest singularity next to the “fold type singularity”. The cusp type device may be very usefull as was the case in Esaki diodes.
177
+
178
+ **Addendum.** Professor H. Kawakami informed the author that the coupled resistors with “cusp type” characteristics ($y=-3bx+x^3$) could be constructed from operational amplifiers and nonlinear analog elements [7].
179
+
180
+ References
181
+
182
+ [1] A. A. Andronov, A. A. Vitt and S. E. Khaikin: *Theory of oscillation*. Pergmamon Press.
183
+
184
+ [2] S. Ichiraku: *On the transversality conditions in electrical circuits*. Yokohama Math. J. 25 (1977), 85-89.
185
+
186
+ [3] E. Ihring: *The regularization of nonlinear electrical circuits*, Proc. of A.M.S., **47** (1975), 179-183.
187
+
188
+ [4] T. Matsumoto: *On the dynamics of electrical networks*, J. Differential Equation, **21** (1976), 179-196.
189
+
190
+ [5] R. Rohrer: *Circuit Theory*. McGraw-Hill, 1970.
191
+
192
+ [6] S. Smale: *On the mathematical foundation of electrical circuit theory*, J. Differential Geometry, 7 (1972), 193-210.
193
+
194
+ [7] H. Kawakami, Kunihiro Kobayashi and T. Matsumura, *A realization of voltage controlled nonlinear resistors*, Trans. IECE, (1977) 60-A No. 10, 990-991, (Japanese).
195
+
196
+ Department of Mathematics
197
+ Yokohama City University
198
+ Yokohama, Japan
samples/texts_merged/904681.md ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---PAGE_BREAK---
3
+
4
+ $$B_J(5840)$$
5
+
6
+ $$I(J^P) = \frac{1}{2}(??)$$
7
+
8
+ I, J, P need confirmation.
9
+
10
+ OMITTED FROM SUMMARY TABLE
11
+
12
+ Quantum numbers shown are quark-model predictions.
13
+
14
+ ## $B_J(5840)^+$ MASS
15
+
16
+ OUR FIT uses $m_{B0}$ and $m_{B_J(5840)^+} - m_{B0}$ to determine $m_{B_J(5840)^+}$.
17
+
18
+ VALUE (MeV)
19
+
20
+ DOCUMENT ID
21
+
22
+ 5851 ± 19 OUR FIT
23
+
24
+ $m_{B_J(5840)^+} - m_{B0}$
25
+
26
+ VALUE (MeV)
27
+
28
+ EVTS
29
+
30
+ DOCUMENT ID
31
+
32
+ TECN
33
+
34
+ COMMENT
35
+
36
+ 571 ± 19 OUR FIT
37
+
38
+ 571 ± 13 ± 14
39
+
40
+ 7k
41
+
42
+ $^1$AAIJ
43
+
44
+ 15AB LHCB pp at 7, 8 TeV
45
+
46
+ • • • We do not use the following data for averages, fits, limits, etc. • • •
47
+
48
+ 595 ± 26 ± 14
49
+
50
+ 7k
51
+
52
+ $^2$AAIJ
53
+
54
+ 15AB LHCB pp at 7, 8 TeV
55
+
56
+ $^1$AAIJ 15AB reports [$m_{B_j^+} - m_{B_0}^0} - m_{\pi^+}$] = [431 ± 13 ± 14 MeV which we adjust by the $\pi^+$ mass. The masses inside the square brackets were measured for each candidate event. The result assumes $P = (-1)^J$ and uses two relativistic Breit-Wigner functions in the fit for mass difference.]
57
+
58
+ $^2$AAIJ 15AB reports [$m_{B_j^+} - m_{B_0}^0} - m_{\pi^+}$] = [455 ± 26 ± 14 MeV which we adjust by the $\pi^+$ mass. The masses inside the square brackets were measured for each candidate event. The result assumes $P = (-1)^J$ and uses three relativistic Breit-Wigner functions in the fit for mass difference.]
59
+
60
+ $m_{B_J(5840)^+} - m_{B^*0}$
61
+
62
+ VALUE (MeV)
63
+
64
+ EVTS
65
+
66
+ DOCUMENT ID
67
+
68
+ TECN
69
+
70
+ COMMENT
71
+
72
+ • • • We do not use the following data for averages, fits, limits, etc. • • •
73
+
74
+ 565 ± 15 ± 14
75
+
76
+ 7k
77
+
78
+ $^1$AAIJ
79
+
80
+ 15AB LHCB pp at 7, 8 TeV
81
+
82
+ $^1$AAIJ 15AB reports [$m_{B_j^{+}} - m_{B_0^{0}} - (m_{B^{*+}} - m_{B^{+}}) - m_{\pi^{+}}$] = [425 ± 15 ± 14 MeV which we adjust by the $\pi^+$ mass. The masses inside the square brackets were measured for each candidate event. The result assumes $P = -(-1)^J$, $(m_{B^{*0}} - m_{B_0^{0}}) = (m_{B^{*+}} - m_{B^{+}})$ = [45.01 ± 0.30 ± 0.23 MeV, and uses three relativistic Breit-Wigner functions in the fit for mass difference.]
83
+
84
+ ## $B_J(5840)^0$ MASS
85
+
86
+ OUR FIT uses $m_{B^+}$ and $m_{B_J(5840)^0} - m_{B^+}$ to determine $m_{B_J(5840)^0}$.
87
+
88
+ VALUE (MeV)
89
+
90
+ DOCUMENT ID
91
+
92
+ 5863 ± 9 OUR FIT
93
+
94
+ $m_{B_J(5840)^0} - m_{B^+}$
95
+
96
+ VALUE (MeV)
97
+
98
+ EVTS
99
+
100
+ DOCUMENT ID
101
+
102
+ TECN
103
+
104
+ COMMENT
105
+
106
+ 584 ± 9 OUR FIT
107
+
108
+ 584 ± 5 ± 7
109
+
110
+ 12k
111
+
112
+ $^1$AAIJ
113
+
114
+ 15AB LHCB pp at 7, 8 TeV
115
+ ---PAGE_BREAK---
116
+
117
+ • • • We do not use the following data for averages, fits, limits, etc. • • •
118
+
119
+ $$610 \pm 22 \pm 7 \qquad 12k \qquad ^{2}AAIJ \qquad 15AB \text{ LHCB } pp \text{ at } 7, 8 \text{ TeV}$$
120
+
121
+ $^{1}$AAIJ 15AB reports $[m_{B^0_j} - m_{B^+}] - m_{\pi^-} = 444 \pm 5 \pm 7$ MeV which we adjust by the $\pi^-$ mass. The masses inside the square brackets were measured for each candidate event. The result assumes $P = (-1)^J$ and uses two relativistic Breit-Wigner functions in the fit for mass difference.
122
+
123
+ $^{2}$AAIJ 15AB reports $[m_{B^0_j} - m_{B^+}] - m_{\pi^-} = 471 \pm 22 \pm 7$ MeV which we adjust by the $\pi^-$ mass. The masses inside the square brackets were measured for each candidate event. The result assumes $P = (-1)^J$ and uses three relativistic Breit-Wigner functions in the fit for mass difference.
124
+
125
+ ## $m_{B_j(5840)^0} - m_{B^{*+}}$
126
+
127
+ <table><thead><tr><th>VALUE (MeV)</th><th>EVTS</th><th>DOCUMENT ID</th><th>TECN</th><th>COMMENT</th></tr></thead><tbody><tr><td>584 ± 5 ± 7</td><td>12k</td><td><sup>1</sup>AAIJ</td><td>15AB LHCB</td><td>pp at 7, 8 TeV</td></tr></tbody></table>
128
+
129
+ $^{1}$AAIJ 15AB reports $[m_{B^0_j} - m_{B^+}] - (m_{B^{*+}} - m_{B^+}) - m_{\pi^-} = 444 \pm 5 \pm 7$ MeV which we adjust by the $\pi^-$ mass. The masses inside the square brackets were measured for each candidate event. The result assumes $P = -(-1)^J$, $(m_{B^{*+}} - m_{B^+}) = 45.01 \pm 0.30 \pm 0.23$ MeV, and uses three relativistic Breit-Wigner functions in the fit for mass difference.
130
+
131
+ ## $B_j(5840)^+$ WIDTH
132
+
133
+ <table><thead><tr><th>VALUE (MeV)</th><th>EVTS</th><th>DOCUMENT ID</th><th>TECN</th><th>COMMENT</th></tr></thead><tbody><tr><td>224 ± 24 ± 80</td><td>7k</td><td><sup>1</sup>AAIJ</td><td>15AB LHCB</td><td>pp at 7, 8 TeV</td></tr></tbody></table>
134
+
135
+ • • • We do not use the following data for averages, fits, limits, etc. • • •
136
+
137
+ $$215 \pm 27 \pm 80 \qquad 7k \qquad ^{2}AAIJ \qquad 15AB \text{ LHCB } pp \text{ at } 7, 8 \text{ TeV}$$
138
+
139
+ $$229 \pm 27 \pm 80 \qquad 7k \qquad ^{3}AAIJ \qquad 15AB \text{ LHCB } pp \text{ at } 7, 8 \text{ TeV}$$
140
+
141
+ $^{1}$Assuming $P = (-1)^J$ and using two relativistic Breit-Wigner functions in the fit for mass difference.
142
+
143
+ $^{2}$Assuming $P = (-1)^J$ and using three relativistic Breit-Wigner functions in the fit for mass difference.
144
+
145
+ $^{3}$Assuming $P = -(-1)^J$ and using three relativistic Breit-Wigner functions in the fit for mass difference.
146
+
147
+ ## $B_j(5840)^0$ WIDTH
148
+
149
+ <table><thead><tr><th>VALUE (MeV)</th><th>EVTS</th><th>DOCUMENT ID</th><th>TECN</th><th>COMMENT</th></tr></thead><tbody><tr><td>127 ± 17 ± 34</td><td>12k</td><td><sup>1</sup>AAIJ</td><td>15AB LHCB</td><td>pp at 7, 8 TeV</td></tr></tbody></table>
150
+
151
+ • • • We do not use the following data for averages, fits, limits, etc. • • •
152
+
153
+ $$107 \pm 20 \pm 34 \qquad 12k \qquad ^{2}AAIJ \qquad 15AB \text{ LHCB } pp \text{ at } 7, 8 \text{ TeV}$$
154
+
155
+ $$119 \pm 17 \pm 34 \qquad 12k \qquad ^{3}AAIJ \qquad 15AB \text{ LHCB } pp \text{ at } 7, 8 \text{ TeV}$$
156
+
157
+ $^{1}$Assuming $P = (-1)^J$ and using two relativistic Breit-Wigner functions in the fit for mass difference.
158
+
159
+ $^{2}$Assuming $P = (-1)^J$ and using three relativistic Breit-Wigner functions in the fit for mass difference.
160
+
161
+ $^{3}$Assuming $P = -(-1)^J$ and using three relativistic Breit-Wigner functions in the fit for mass difference.
162
+ ---PAGE_BREAK---
163
+
164
+ B$_J$(5840) DECAY MODES
165
+
166
+ <table>
167
+ <thead>
168
+ <tr>
169
+ <th></th>
170
+ <th>Mode</th>
171
+ <th>Fraction (Γ<sub>j</sub>/Γ)</th>
172
+ </tr>
173
+ </thead>
174
+ <tbody>
175
+ <tr>
176
+ <td>Γ<sub>1</sub></td>
177
+ <td>B* π</td>
178
+ <td>seen</td>
179
+ </tr>
180
+ <tr>
181
+ <td>Γ<sub>2</sub></td>
182
+ <td>B π</td>
183
+ <td>possibly seen</td>
184
+ </tr>
185
+ </tbody>
186
+ </table>
187
+
188
+ B$_J$(5840) BRANCHING RATIOS
189
+
190
+ $$
191
+ \begin{tabular}{lcccccc}
192
+ \hline
193
+ $\Gamma(B^*\pi)/\Gamma_{\text{total}}$ & & & & & $\Gamma_1/\Gamma$ & \\
194
+ \cline{2-7}
195
+ \multicolumn{1}{c|}{\underline{VALUE}} & \multicolumn{1}{c|}{\underline{EVTS}} & \multicolumn{1}{c}{\underline{DOCUMENT ID}} & \multicolumn{1}{c}{\underline{TECN}} & \multicolumn{1}{c}{\underline{CHG}} & \multicolumn{1}{c}{\underline{COMMENT}} & \\
196
+ \cline{2-7}
197
+ \multicolumn{1}{c|}{seen} & \multicolumn{1}{c|}{7k} & \multicolumn{1}{c}{AAIJ} & \multicolumn{1}{c}{15AB LHCB} & \multicolumn{1}{c}{$\pm$} & \multicolumn{1}{c}{pp at 7, 8 TeV} & \\
198
+ \cline{2-7}
199
+ \multicolumn{1}{c|}{seen} & \multicolumn{1}{c|}{12k} & \multicolumn{1}{c}{AAIJ} & \multicolumn{1}{c}{15AB LHCB} & \multicolumn{1}{c}{$0$} & \multicolumn{1}{c}{pp at 7, 8 TeV} & \\
200
+ \hline
201
+ \end{tabular}
202
+ $$
203
+
204
+ $$
205
+ \begin{tabular}{lcccccc}
206
+ \hline
207
+ $\Gamma(B\pi)/\Gamma_{\text{total}}$ & & & & & $\Gamma_2/\Gamma$ & \\
208
+ \cline{2-7}
209
+ \multicolumn{1}{c|}{\underline{VALUE}} & \multicolumn{1}{c|}{\underline{EVTS}} & \multicolumn{1}{c}{\underline{DOCUMENT ID}} & \multicolumn{1}{c}{\underline{TECN}} & \multicolumn{1}{c}{\underline{CHG}} & \multicolumn{1}{c}{\underline{COMMENT}} & \\
210
+ \cline{2-7}
211
+ \multicolumn{1}{c|}{\textbf{possibly seen}} & \multicolumn{1}{c|}{7k} & \multicolumn{1}{c}{\footnotesize 1 AAIJ} & \multicolumn{1}{c}{\footnotesize 15AB LHCB} & \multicolumn{1}{c}{$\pm$} & \multicolumn{1}{c}{\footnotesize pp at 7, 8 TeV} & \\
212
+ \cline{2-7}
213
+ \multicolumn{1}{c|}{\textbf{possibly seen}} & & \multicolumn{1}{c}{\footnotesize 1 AAIJ} & \multicolumn{1}{c}{\footnotesize 15AB LHCB} & \multicolumn{1}{c}{$0$} & \multicolumn{1}{c}{\footnotesize pp at 7, 8 TeV} & \\
214
+ \hline
215
+ \end{tabular}
216
+ $$
217
+
218
+ ¹A Bπ decay is forbidden from a $P = -(-1)^J$ parent, whereas B*π is allowed.
219
+
220
+ B$_J$(5840) REFERENCES
221
+
222
+ AAIJ
223
+
224
+ 15AB JHEP 1504 024
225
+
226
+ R. Aaij et al.
227
+
228
+ (LHCb Collab.)