CombinedText stringlengths 4 3.42M |
|---|
use seed::prelude::*;
use crate::Msg;
pub fn render() -> seed::dom_types::Node<Msg> {
div![
h1!["The Perfect Numbers"],
br![],
br![],
br![],
//todo: replace with rust elements
El::from_html("<table cellpadding=\"0\" cellspacing=\"0\" border=\"0\" class=\"perfecttable text\">
<tbody><tr>
<td width=\"60\"><b>Rank</b></td>
<td width=\"170\"><b>Perfect Number</b></td>
<td width=\"80\"><b>Digits</b></td>
<td width=\"290\"><b>Discoverer</b></td>
<td align=\"center\" colspan=\"2\"><b>Downloads</b></td>
</tr>
<tr>
<td>50</td>
<td>2<sup>77232916</sup> × (2<sup>77232917</sup>-1)</td>
<td>46498850</td>
<td>2017 Jonathan Pace, George Woltman, Scott Kurowski, Aaron Blosser, et al..</td>
<td></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/50.zip\">ZIP</a></td>
</tr>
<tr>
<td>49</td>
<td>2<sup>74207280</sup> × (2<sup>74207281</sup>-1)</td>
<td>44677235</td>
<td>2016 Cooper, Woltman, Kurowski, Blosser et al.</td>
<td></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/49.zip\">ZIP</a></td>
</tr>
<tr>
<td>48</td>
<td>2<sup>57885160</sup> × (2<sup>57885161</sup>-1)</td>
<td>34850340</td>
<td>2013 Cooper, Woltman, Kurowski, et al.</td>
<td></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/48.zip\">ZIP</a></td>
</tr>
<tr>
<td>47</td>
<td>2<sup>43112608</sup> × (2<sup>43112609</sup>-1)</td>
<td>25956377</td>
<td>2008 Smith, Woltman, Kurowski, et al.</td>
<td></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/47.zip\">ZIP</a></td>
</tr>
<tr>
<td>46</td>
<td>2<sup>42643800</sup> × (2<sup>42643801</sup>-1)</td>
<td>42643801</td>
<td>2009 Strindmo, Woltman, Kurowski, et al.</td>
<td></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/46.zip\">ZIP</a></td>
</tr>
<tr>
<td>45</td>
<td>2<sup>37156666</sup> × (2<sup>37156667</sup>-1)</td>
<td>22370543</td>
<td>2008 Elvenich, Woltman, Kurowski, et al.</td>
<td></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/45.zip\">ZIP</a></td>
</tr>
<tr>
<td>44</td>
<td>2<sup>32582656</sup> × (2<sup>32582657</sup>-1)</td>
<td>19616714</td>
<td>2006 Cooper, Boone, Woltman, Kurowski, et al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/44.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/44.zip\">ZIP</a></td>
</tr>
<tr>
<td>43</td>
<td>2<sup>30402456</sup> × (2<sup>30402457</sup>-1)</td>
<td>18304103</td>
<td>2005 Cooper, Boone, Woltman, Kurowski, et al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/43.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/43.zip\">ZIP</a></td>
</tr>
<tr>
<td>42</td>
<td>2<sup>25964950</sup> × (2<sup>25964951</sup>-1)</td>
<td>15632458</td>
<td>2005 Nowak, Woltman, Kurowski, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/42.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/42.zip\">ZIP</a></td>
</tr>
<tr>
<td>41</td>
<td>2<sup>24036582</sup> × (2<sup>24036583</sup>-1)</td>
<td>14471465</td>
<td>2004 Findley, Woltman, Kurowski, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/41.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/41.zip\">ZIP</a></td>
</tr>
<tr>
<td>40</td>
<td>2<sup>20996010</sup> × (2<sup>20996011</sup>-1)</td>
<td>12640858</td>
<td>2003 Shafer, Woltman, Kurowski, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/40.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/40.zip\">ZIP</a></td>
</tr>
<tr>
<td>39</td>
<td>2<sup>13466916</sup> × (2<sup>13466917</sup>-1)</td>
<td>8107892</td>
<td>2001 Cameron, Woltman, Kurowski, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/39.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/39.zip\">ZIP</a></td>
</tr>
<tr>
<td>38</td>
<td>2<sup>6972592</sup> × (2<sup>6972593</sup>-1)</td>
<td>4197919</td>
<td>1999 Hajratwala, Woltman, Kurowski, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/38.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/38.zip\">ZIP</a></td>
</tr>
<tr>
<td>37</td>
<td>2<sup>3021376</sup> × (2<sup>3021377</sup>-1)</td>
<td>1819050</td>
<td>1998 Clarkson, Woltman, Kurowski, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/37.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/37.zip\">ZIP</a></td>
</tr>
<tr>
<td>36</td>
<td>2<sup>2976220</sup> × (2<sup>2976221</sup>-1)</td>
<td>1791864</td>
<td>1997 Spence, Woltman, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/36.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/36.zip\">ZIP</a></td>
</tr>
<tr>
<td>35</td>
<td>2<sup>1398268</sup> × (2<sup>1398269</sup>-1)</td>
<td>841842</td>
<td>1996 Armengaud, Woltman, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/35.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/35.zip\">ZIP</a></td>
</tr>
<tr>
<td>34</td>
<td>2<sup>1257786</sup> × (2<sup>1257787</sup>-1)</td>
<td>757263</td>
<td>1996 Slowinski&Gage</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/34.txt\">TXT</a></td>
</tr>
<tr>
<td>33</td>
<td>2<sup>859432</sup> × (2<sup>859433</sup>-1)</td>
<td>517430</td>
<td>1994 Slowinski&Gage</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/33.txt\">TXT</a></td>
</tr>
<tr>
<td>32</td>
<td>2<sup>756838</sup> × (2<sup>756839</sup>-1)</td>
<td>455663</td>
<td>1992 Slowinski&Gage</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/32.txt\">TXT</a></td>
</tr>
<tr>
<td>31</td>
<td>2<sup>216090</sup> × (2<sup>216091</sup>-1)</td>
<td>130100</td>
<td>1985 Slowinski</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/31.txt\">TXT</a></td>
</tr>
<tr>
<td>30</td>
<td>2<sup>132048</sup> × (2<sup>132049</sup>-1)</td>
<td>79502</td>
<td>1983 Slowinski</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/30.txt\">TXT</a></td>
</tr>
<tr>
<td>29</td>
<td>2<sup>110502</sup> × (2<sup>110503</sup>-1)</td>
<td>66530</td>
<td>1988 Colquitt&Welsh</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/29.txt\">TXT</a></td>
</tr>
<tr>
<td>28</td>
<td>2<sup>86242</sup> × (2<sup>86243</sup>-1)</td>
<td>51924</td>
<td>1982 Slowinski</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/28.txt\">TXT</a></td>
</tr>
<tr>
<td>27</td>
<td>2<sup>44496</sup> × (2<sup>44497</sup>-1)</td>
<td>26790</td>
<td>1979 Nelson&Slowinski</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/27.txt\">TXT</a></td>
</tr>
<tr>
<td>26</td>
<td>2<sup>23208</sup> × (2<sup>23209</sup>-1)</td>
<td>13973</td>
<td>1979 Noll</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/26.txt\">TXT</a></td>
</tr>
<tr>
<td>25</td>
<td>2<sup>21700</sup> × (2<sup>21701</sup>-1)</td>
<td>13066</td>
<td>1978 Noll&Nickel</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/25.txt\">TXT</a></td>
</tr>
<tr>
<td>24</td>
<td>2<sup>19936</sup> × (2<sup>19937</sup>-1)</td>
<td>12003</td>
<td>1971 Tuckerman</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/24.txt\">TXT</a></td>
</tr>
<tr>
<td>23</td>
<td>2<sup>11212</sup> × (2<sup>11213</sup>-1)</td>
<td>6751</td>
<td>1963 Gillies</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/23.txt\">TXT</a></td>
</tr>
<tr>
<td>22</td>
<td>2<sup>9940</sup> × (2<sup>9941</sup>-1)</td>
<td>5985</td>
<td>1963 Gillies</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/22.txt\">TXT</a></td>
</tr>
<tr>
<td>21</td>
<td>2<sup>9688</sup> × (2<sup>9689</sup>-1)</td>
<td>5834</td>
<td>1963 Gillies</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/21.txt\">TXT</a></td>
</tr>
<tr>
<td>20</td>
<td>2<sup>4422</sup> × (2<sup>4423</sup>-1)</td>
<td>2663</td>
<td>1961 Hurwitz</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/20.txt\">TXT</a></td>
</tr>
<tr>
<td>19</td>
<td>2<sup>4252</sup> × (2<sup>4253</sup>-1)</td>
<td>2561</td>
<td>1961 Hurwitz</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/19.txt\">TXT</a></td>
</tr>
<tr>
<td>18</td>
<td>2<sup>3216</sup> × (2<sup>3217</sup>-1)</td>
<td>1937</td>
<td>1957 Riesel</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/18.txt\">TXT</a></td>
</tr>
<tr>
<td>17</td>
<td>2<sup>2280</sup> × (2<sup>2281</sup>-1)</td>
<td>1373</td>
<td>1952 Robinson</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/17.txt\">TXT</a></td>
</tr>
<tr>
<td>16</td>
<td>2<sup>2202</sup> × (2<sup>2203</sup>-1)</td>
<td>1327</td>
<td>1952 Robinson</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/16.txt\">TXT</a></td>
</tr>
<tr>
<td>15</td>
<td>2<sup>1278</sup> × (2<sup>1279</sup>-1)</td>
<td>770</td>
<td>1952 Robinson</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/15.txt\">TXT</a></td>
</tr>
<tr>
<td>14</td>
<td>2<sup>606</sup> × (2<sup>607</sup>-1)</td>
<td>366</td>
<td>1952 Robinson</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/14.txt\">TXT</a></td>
</tr>
<tr>
<td>13</td>
<td>2<sup>520</sup> × (2<sup>521</sup>-1)</td>
<td>314</td>
<td>1952 Robinson</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/13.txt\">TXT</a></td>
</tr>
<tr>
<td>12</td>
<td>2<sup>126</sup> × (2<sup>127</sup>-1)</td>
<td>77</td>
<td>1876 Lucas</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/12.txt\">TXT</a></td>
</tr>
<tr>
<td>11</td>
<td>2<sup>106</sup> × (2<sup>107</sup>-1)</td>
<td>65</td>
<td>1914 Powers</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/11.txt\">TXT</a></td>
</tr>
<tr>
<td>10</td>
<td>2<sup>88</sup> × (2<sup>89</sup>-1)</td>
<td>54</td>
<td>1911 Powers</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/10.txt\">TXT</a></td>
</tr>
<tr>
<td>9</td>
<td>2<sup>60</sup> × (2<sup>61</sup>-1)</td>
<td>37</td>
<td>1883 Pervushin</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/9.txt\">TXT</a></td>
</tr>
<tr>
<td>8</td>
<td>2<sup>30</sup> × (2<sup>31</sup>-1)</td>
<td>19</td>
<td>1772 Euler</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/8.txt\">TXT</a></td>
</tr>
<tr>
<td>7</td>
<td>2<sup>18</sup> × (2<sup>19</sup>-1)</td>
<td>12</td>
<td>1588 Cataldi</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/7.txt\">TXT</a></td>
</tr>
<tr>
<td>6</td>
<td>2<sup>16</sup> × (2<sup>17</sup>-1)</td>
<td>10</td>
<td>1588 Cataldi</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/6.txt\">TXT</a></td>
</tr>
<tr>
<td>5</td>
<td>2<sup>12</sup> × (2<sup>13</sup>-1)</td>
<td>8</td>
<td>1456 ?</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/5.txt\">TXT</a></td>
</tr>
<tr>
<td>4</td>
<td>2<sup>6</sup> × (2<sup>7</sup>-1)</td>
<td>4</td>
<td>?</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/4.txt\">TXT</a></td>
</tr>
<tr>
<td>3</td>
<td>2<sup>4</sup> × (2<sup>5</sup>-1)</td>
<td>3</td>
<td>?</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/3.txt\">TXT</a></td>
</tr>
<tr>
<td>2</td>
<td>2<sup>2</sup> × (2<sup>3</sup>-1)</td>
<td>2</td>
<td>?</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/2.txt\">TXT</a></td>
</tr>
<tr>
<td>1</td>
<td>2<sup>1</sup> × (2<sup>2</sup>-1)</td>
<td>1</td>
<td>?</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/1.txt\">TXT</a></td>
</tr>
</tbody></table>"),
]
}
Began refactor of render() function
use seed::prelude::*;
use crate::Msg;
mod perfects_utils {
pub fn perfects() -> Vec<Vec<usize>> {
vec![
//vec![p,digits]
vec![0, 0],//faux zero entry to make things easier when reading this vector
vec![2, 1],
]
}
}
pub fn render() -> seed::dom_types::Node<Msg> {
let mut html = vec![];
let perfects = perfects_utils::perfects();
for n in 1..perfects.len() {
let download_txt:String = vec!["https://static.bigprimes.net/archive/perfect/M",&n.to_string(),".txt"].into_iter().collect();
let download_zip:String = vec!["https://static.bigprimes.net/archive/perfect/M",&n.to_string(),".zip"].into_iter().collect();
html.push(
tr![
td![n.to_string()],//rank
td!["2",sup![perfects[n][0].to_string()],"-1"],//perfect number as a formula
td![perfects[n][1].to_string()],//digits in length
td![perfects_utils::perfects_discovery_dates(n)],//disocvery
if n >= 30 {a![attrs!{At::Href => download_zip},"ZIP"]} else {a![attrs!{At::Href => download_txt},"TXT"]}//downloads
]
);
}
html.reverse();
div![
h1!["The Mersenne Numbers"],
br![],
br![],
br![],
table![
attrs!{At::Class => "mersennetable text"},
tbody![
tr![
td![
b!["No."]
],
td![
b!["Prime"]
],
td![
b!["Digits"]
],
td![
b!["Discovered"]
],
td![
b!["Download"]
]
],
html
]
]
]
}
pub fn render() -> seed::dom_types::Node<Msg> {
div![
h1!["The Perfect Numbers"],
br![],
br![],
br![],
//todo: replace with rust elements
El::from_html("<table cellpadding=\"0\" cellspacing=\"0\" border=\"0\" class=\"perfecttable text\">
<tbody><tr>
<td width=\"60\"><b>Rank</b></td>
<td width=\"170\"><b>Perfect Number</b></td>
<td width=\"80\"><b>Digits</b></td>
<td width=\"290\"><b>Discoverer</b></td>
<td align=\"center\" colspan=\"2\"><b>Downloads</b></td>
</tr>
<tr>
<td>50</td>
<td>2<sup>77232916</sup> × (2<sup>77232917</sup>-1)</td>
<td>46498850</td>
<td>2017 Jonathan Pace, George Woltman, Scott Kurowski, Aaron Blosser, et al..</td>
<td></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/50.zip\">ZIP</a></td>
</tr>
<tr>
<td>49</td>
<td>2<sup>74207280</sup> × (2<sup>74207281</sup>-1)</td>
<td>44677235</td>
<td>2016 Cooper, Woltman, Kurowski, Blosser et al.</td>
<td></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/49.zip\">ZIP</a></td>
</tr>
<tr>
<td>48</td>
<td>2<sup>57885160</sup> × (2<sup>57885161</sup>-1)</td>
<td>34850340</td>
<td>2013 Cooper, Woltman, Kurowski, et al.</td>
<td></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/48.zip\">ZIP</a></td>
</tr>
<tr>
<td>47</td>
<td>2<sup>43112608</sup> × (2<sup>43112609</sup>-1)</td>
<td>25956377</td>
<td>2008 Smith, Woltman, Kurowski, et al.</td>
<td></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/47.zip\">ZIP</a></td>
</tr>
<tr>
<td>46</td>
<td>2<sup>42643800</sup> × (2<sup>42643801</sup>-1)</td>
<td>42643801</td>
<td>2009 Strindmo, Woltman, Kurowski, et al.</td>
<td></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/46.zip\">ZIP</a></td>
</tr>
<tr>
<td>45</td>
<td>2<sup>37156666</sup> × (2<sup>37156667</sup>-1)</td>
<td>22370543</td>
<td>2008 Elvenich, Woltman, Kurowski, et al.</td>
<td></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/45.zip\">ZIP</a></td>
</tr>
<tr>
<td>44</td>
<td>2<sup>32582656</sup> × (2<sup>32582657</sup>-1)</td>
<td>19616714</td>
<td>2006 Cooper, Boone, Woltman, Kurowski, et al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/44.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/44.zip\">ZIP</a></td>
</tr>
<tr>
<td>43</td>
<td>2<sup>30402456</sup> × (2<sup>30402457</sup>-1)</td>
<td>18304103</td>
<td>2005 Cooper, Boone, Woltman, Kurowski, et al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/43.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/43.zip\">ZIP</a></td>
</tr>
<tr>
<td>42</td>
<td>2<sup>25964950</sup> × (2<sup>25964951</sup>-1)</td>
<td>15632458</td>
<td>2005 Nowak, Woltman, Kurowski, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/42.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/42.zip\">ZIP</a></td>
</tr>
<tr>
<td>41</td>
<td>2<sup>24036582</sup> × (2<sup>24036583</sup>-1)</td>
<td>14471465</td>
<td>2004 Findley, Woltman, Kurowski, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/41.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/41.zip\">ZIP</a></td>
</tr>
<tr>
<td>40</td>
<td>2<sup>20996010</sup> × (2<sup>20996011</sup>-1)</td>
<td>12640858</td>
<td>2003 Shafer, Woltman, Kurowski, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/40.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/40.zip\">ZIP</a></td>
</tr>
<tr>
<td>39</td>
<td>2<sup>13466916</sup> × (2<sup>13466917</sup>-1)</td>
<td>8107892</td>
<td>2001 Cameron, Woltman, Kurowski, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/39.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/39.zip\">ZIP</a></td>
</tr>
<tr>
<td>38</td>
<td>2<sup>6972592</sup> × (2<sup>6972593</sup>-1)</td>
<td>4197919</td>
<td>1999 Hajratwala, Woltman, Kurowski, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/38.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/38.zip\">ZIP</a></td>
</tr>
<tr>
<td>37</td>
<td>2<sup>3021376</sup> × (2<sup>3021377</sup>-1)</td>
<td>1819050</td>
<td>1998 Clarkson, Woltman, Kurowski, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/37.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/37.zip\">ZIP</a></td>
</tr>
<tr>
<td>36</td>
<td>2<sup>2976220</sup> × (2<sup>2976221</sup>-1)</td>
<td>1791864</td>
<td>1997 Spence, Woltman, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/36.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/36.zip\">ZIP</a></td>
</tr>
<tr>
<td>35</td>
<td>2<sup>1398268</sup> × (2<sup>1398269</sup>-1)</td>
<td>841842</td>
<td>1996 Armengaud, Woltman, et. al.</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/35.txt\">TXT</a></td>
<td><a href=\"//static.bigprimes.net/archive/perfect/35.zip\">ZIP</a></td>
</tr>
<tr>
<td>34</td>
<td>2<sup>1257786</sup> × (2<sup>1257787</sup>-1)</td>
<td>757263</td>
<td>1996 Slowinski&Gage</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/34.txt\">TXT</a></td>
</tr>
<tr>
<td>33</td>
<td>2<sup>859432</sup> × (2<sup>859433</sup>-1)</td>
<td>517430</td>
<td>1994 Slowinski&Gage</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/33.txt\">TXT</a></td>
</tr>
<tr>
<td>32</td>
<td>2<sup>756838</sup> × (2<sup>756839</sup>-1)</td>
<td>455663</td>
<td>1992 Slowinski&Gage</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/32.txt\">TXT</a></td>
</tr>
<tr>
<td>31</td>
<td>2<sup>216090</sup> × (2<sup>216091</sup>-1)</td>
<td>130100</td>
<td>1985 Slowinski</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/31.txt\">TXT</a></td>
</tr>
<tr>
<td>30</td>
<td>2<sup>132048</sup> × (2<sup>132049</sup>-1)</td>
<td>79502</td>
<td>1983 Slowinski</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/30.txt\">TXT</a></td>
</tr>
<tr>
<td>29</td>
<td>2<sup>110502</sup> × (2<sup>110503</sup>-1)</td>
<td>66530</td>
<td>1988 Colquitt&Welsh</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/29.txt\">TXT</a></td>
</tr>
<tr>
<td>28</td>
<td>2<sup>86242</sup> × (2<sup>86243</sup>-1)</td>
<td>51924</td>
<td>1982 Slowinski</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/28.txt\">TXT</a></td>
</tr>
<tr>
<td>27</td>
<td>2<sup>44496</sup> × (2<sup>44497</sup>-1)</td>
<td>26790</td>
<td>1979 Nelson&Slowinski</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/27.txt\">TXT</a></td>
</tr>
<tr>
<td>26</td>
<td>2<sup>23208</sup> × (2<sup>23209</sup>-1)</td>
<td>13973</td>
<td>1979 Noll</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/26.txt\">TXT</a></td>
</tr>
<tr>
<td>25</td>
<td>2<sup>21700</sup> × (2<sup>21701</sup>-1)</td>
<td>13066</td>
<td>1978 Noll&Nickel</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/25.txt\">TXT</a></td>
</tr>
<tr>
<td>24</td>
<td>2<sup>19936</sup> × (2<sup>19937</sup>-1)</td>
<td>12003</td>
<td>1971 Tuckerman</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/24.txt\">TXT</a></td>
</tr>
<tr>
<td>23</td>
<td>2<sup>11212</sup> × (2<sup>11213</sup>-1)</td>
<td>6751</td>
<td>1963 Gillies</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/23.txt\">TXT</a></td>
</tr>
<tr>
<td>22</td>
<td>2<sup>9940</sup> × (2<sup>9941</sup>-1)</td>
<td>5985</td>
<td>1963 Gillies</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/22.txt\">TXT</a></td>
</tr>
<tr>
<td>21</td>
<td>2<sup>9688</sup> × (2<sup>9689</sup>-1)</td>
<td>5834</td>
<td>1963 Gillies</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/21.txt\">TXT</a></td>
</tr>
<tr>
<td>20</td>
<td>2<sup>4422</sup> × (2<sup>4423</sup>-1)</td>
<td>2663</td>
<td>1961 Hurwitz</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/20.txt\">TXT</a></td>
</tr>
<tr>
<td>19</td>
<td>2<sup>4252</sup> × (2<sup>4253</sup>-1)</td>
<td>2561</td>
<td>1961 Hurwitz</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/19.txt\">TXT</a></td>
</tr>
<tr>
<td>18</td>
<td>2<sup>3216</sup> × (2<sup>3217</sup>-1)</td>
<td>1937</td>
<td>1957 Riesel</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/18.txt\">TXT</a></td>
</tr>
<tr>
<td>17</td>
<td>2<sup>2280</sup> × (2<sup>2281</sup>-1)</td>
<td>1373</td>
<td>1952 Robinson</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/17.txt\">TXT</a></td>
</tr>
<tr>
<td>16</td>
<td>2<sup>2202</sup> × (2<sup>2203</sup>-1)</td>
<td>1327</td>
<td>1952 Robinson</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/16.txt\">TXT</a></td>
</tr>
<tr>
<td>15</td>
<td>2<sup>1278</sup> × (2<sup>1279</sup>-1)</td>
<td>770</td>
<td>1952 Robinson</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/15.txt\">TXT</a></td>
</tr>
<tr>
<td>14</td>
<td>2<sup>606</sup> × (2<sup>607</sup>-1)</td>
<td>366</td>
<td>1952 Robinson</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/14.txt\">TXT</a></td>
</tr>
<tr>
<td>13</td>
<td>2<sup>520</sup> × (2<sup>521</sup>-1)</td>
<td>314</td>
<td>1952 Robinson</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/13.txt\">TXT</a></td>
</tr>
<tr>
<td>12</td>
<td>2<sup>126</sup> × (2<sup>127</sup>-1)</td>
<td>77</td>
<td>1876 Lucas</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/12.txt\">TXT</a></td>
</tr>
<tr>
<td>11</td>
<td>2<sup>106</sup> × (2<sup>107</sup>-1)</td>
<td>65</td>
<td>1914 Powers</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/11.txt\">TXT</a></td>
</tr>
<tr>
<td>10</td>
<td>2<sup>88</sup> × (2<sup>89</sup>-1)</td>
<td>54</td>
<td>1911 Powers</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/10.txt\">TXT</a></td>
</tr>
<tr>
<td>9</td>
<td>2<sup>60</sup> × (2<sup>61</sup>-1)</td>
<td>37</td>
<td>1883 Pervushin</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/9.txt\">TXT</a></td>
</tr>
<tr>
<td>8</td>
<td>2<sup>30</sup> × (2<sup>31</sup>-1)</td>
<td>19</td>
<td>1772 Euler</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/8.txt\">TXT</a></td>
</tr>
<tr>
<td>7</td>
<td>2<sup>18</sup> × (2<sup>19</sup>-1)</td>
<td>12</td>
<td>1588 Cataldi</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/7.txt\">TXT</a></td>
</tr>
<tr>
<td>6</td>
<td>2<sup>16</sup> × (2<sup>17</sup>-1)</td>
<td>10</td>
<td>1588 Cataldi</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/6.txt\">TXT</a></td>
</tr>
<tr>
<td>5</td>
<td>2<sup>12</sup> × (2<sup>13</sup>-1)</td>
<td>8</td>
<td>1456 ?</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/5.txt\">TXT</a></td>
</tr>
<tr>
<td>4</td>
<td>2<sup>6</sup> × (2<sup>7</sup>-1)</td>
<td>4</td>
<td>?</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/4.txt\">TXT</a></td>
</tr>
<tr>
<td>3</td>
<td>2<sup>4</sup> × (2<sup>5</sup>-1)</td>
<td>3</td>
<td>?</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/3.txt\">TXT</a></td>
</tr>
<tr>
<td>2</td>
<td>2<sup>2</sup> × (2<sup>3</sup>-1)</td>
<td>2</td>
<td>?</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/2.txt\">TXT</a></td>
</tr>
<tr>
<td>1</td>
<td>2<sup>1</sup> × (2<sup>2</sup>-1)</td>
<td>1</td>
<td>?</td>
<td><a href=\"//static.bigprimes.net/archive/perfect/1.txt\">TXT</a></td>
</tr>
</tbody></table>"),
]
} |
use std;
use parser::js::*;
use parser::js::terminals::*;
#[derive(Debug, PartialEq)]
pub enum Expression<'a> {
CallExpression(CallExpression<'a>),
ConditionalExpression(ConditionalExpression<'a>),
BinaryExpression(BinaryExpression<'a>),
NotExpression(NotExpression<'a>),
TildeExpression(TildeExpression<'a>),
UnaryMinusExpression(UnaryMinusExpression<'a>),
UnaryPlusExpression(UnaryPlusExpression<'a>),
PreDecrementExpression(PreDecrementExpression<'a>),
PreIncrementExpression(PreIncrementExpression<'a>),
TypeOfExpression(TypeOfExpression<'a>),
VoidExpression(VoidExpression<'a>),
DeleteExpression(DeleteExpression<'a>),
PostDecrementExpression(PostDecrementExpression<'a>),
PostIncrementExpression(PostIncrementExpression<'a>),
FunctionExpression(FunctionExpression<'a>),
ArrayMemberExpression(ArrayMemberExpression<'a>),
FieldMemberExpression(FieldMemberExpression<'a>),
NewMemberExpression(NewMemberExpression<'a>),
ThisExpression(ThisExpression),
IdentifierExpression(IdentifierExpression<'a>),
NullExpression(NullExpression),
TrueLiteral(TrueLiteral),
FalseLiteral(FalseLiteral),
NumericLiteral(NumericLiteral),
//TODO: Multiline string literal
StringLiteral(StringLiteral<'a>),
//TODO: regexp
ArrayLiteral(ArrayLiteral<'a>),
ObjectLiteral(ObjectLiteral<'a>),
NestedExpression(NestedExpression<'a>),
ExpressionList(ExpressionList<'a>),
NewExpression(NewExpression<'a>),
}
#[derive(Debug, PartialEq)]
pub struct CallExpression<'a> {
pub base: Box<Expression<'a>>,
pub arguments: Option<ArgumentList<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct ArgumentList<'a>(pub std::vec::Vec<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct ConditionalExpression<'a> {
pub expression: Box<Expression<'a>>,
pub ok: Box<Expression<'a>>,
pub ko: Box<Expression<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct BinaryExpression<'a> {
pub left: Box<Expression<'a>>,
pub operator: &'a str,
pub right: Box<Expression<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct NotExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct TildeExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct UnaryMinusExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct UnaryPlusExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct PreDecrementExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct PreIncrementExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct TypeOfExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct VoidExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct DeleteExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct PostDecrementExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct PostIncrementExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct FunctionExpression<'a> {
pub name: Option<&'a str>,
pub formals: Option<FormalParameterList<'a>>,
pub body: Option<FunctionBody<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct ArrayMemberExpression<'a> {
pub base: Box<Expression<'a>>,
pub expression: Box<Expression<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct FieldMemberExpression<'a> {
pub base: Box<Expression<'a>>,
pub name: &'a str,
}
#[derive(Debug, PartialEq)]
pub struct NewMemberExpression<'a> {
pub base: Box<Expression<'a>>,
pub arguments: Option<ArgumentList<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct ThisExpression;
#[derive(Debug, PartialEq)]
pub struct IdentifierExpression<'a>(pub &'a str);
#[derive(Debug, PartialEq)]
pub struct NullExpression;
#[derive(Debug, PartialEq)]
pub struct TrueLiteral;
#[derive(Debug, PartialEq)]
pub struct FalseLiteral;
#[derive(Debug, PartialEq)]
pub struct ArrayLiteral<'a> {
pub elements: Option<ElementList<'a>>,
pub elision: Option<Elision>,
}
#[derive(Debug, PartialEq)]
pub struct ElementList<'a>(pub std::vec::Vec<Element<'a>>);
#[derive(Debug, PartialEq)]
pub struct Element<'a> {
pub elision: Option<Elision>,
pub expression: Expression<'a>,
}
#[derive(Debug, PartialEq)]
pub struct Elision(pub usize);
#[derive(Debug, PartialEq)]
pub struct ObjectLiteral<'a> {
pub properties: Option<PropertyAssignmentList<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct PropertyAssignmentList<'a>(pub std::vec::Vec<PropertyAssignment<'a>>);
#[derive(Debug, PartialEq)]
pub enum PropertyAssignment<'a> {
PropertyNameAndValue(PropertyNameAndValue<'a>),
PropertyGetterSetter(PropertyGetterSetter<'a>),
}
#[derive(Debug, PartialEq)]
pub struct PropertyNameAndValue<'a> {
pub name: PropertyName<'a>,
pub value: Expression<'a>,
}
#[derive(Debug, PartialEq)]
pub struct PropertyGetterSetter<'a> {
pub name: PropertyName<'a>,
pub getter_setter_type: PropertyGetterSetterType,
pub formals: Option<FormalParameterList<'a>>,
pub function_body: Option<FunctionBody<'a>>,
}
#[derive(Debug, PartialEq)]
pub enum PropertyGetterSetterType {
Getter,
Setter,
}
#[derive(Debug, PartialEq)]
pub enum PropertyName<'a> {
IdentifierPropertyName(IdentifierPropertyName<'a>),
StringLiteralPropertyName(StringLiteralPropertyName<'a>),
NumericLiteralPropertyName(NumericLiteralPropertyName),
}
#[derive(Debug, PartialEq)]
pub struct IdentifierPropertyName<'a>(pub &'a str);
#[derive(Debug, PartialEq)]
pub struct StringLiteralPropertyName<'a>(pub StringLiteral<'a>);
#[derive(Debug, PartialEq)]
pub struct NumericLiteralPropertyName(pub NumericLiteral);
#[derive(Debug, PartialEq)]
pub struct NestedExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct ExpressionList<'a> {
pub left: Box<Expression<'a>>,
pub right: Box<Expression<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct NewExpression<'a>(pub Box<Expression<'a>>);
pub mod parsing {
macro_rules! binary_op {
($name:ident, [ $op1:tt $(, $ops:tt)* ], $next:ident) => (
fn $name<'a>(i: &'a str) -> IResult<&'a str, Expression<'a>> {
do_parse!(i,
first: $next >>
fold: fold_many0!(
do_parse!(
operator: alt!(
keyword!($op1)
$(
|
keyword!($ops)
)*
) >>
expr: $next >>
(operator, expr)
),
first,
|acc: Expression<'a>, item: (&'a str, Expression<'a>)| {
Expression::BinaryExpression(BinaryExpression {
left: Box::new(acc),
operator: item.0,
right: Box::new(item.1),
})
}
) >>
(fold)
)
}
);
($name:ident, $op:tt, $next:ident) => (binary_op!($name, [$op], $next););
}
use nom::IResult;
use parser::helpers::parsing::*;
use parser::js::parsing::*;
use parser::js::terminals::parsing::*;
use super::*;
pub fn expression_list<'a>(i: &'a str) -> IResult<&'a str, Expression<'a>> { // called Expression in qqmljs.g
do_parse!(i,
first: assignment_expression >>
fold: fold_many0!(
do_parse!(
keyword!(",") >>
expr: assignment_expression >>
(expr)
),
first,
|acc: Expression<'a>, item: Expression<'a>| {
Expression::ExpressionList(ExpressionList {
left: Box::new(acc),
right: Box::new(item),
})
}
) >>
(fold)
)
}
pub fn expression_list_not_in<'a>(i: &'a str) -> IResult<&'a str, Expression<'a>> { // called ExpressionNotIn in qqmljs.g
do_parse!(i,
first: assignment_expression_not_in >>
fold: fold_many0!(
do_parse!(
keyword!(",") >>
expr: assignment_expression_not_in >>
(expr)
),
first,
|acc: Expression<'a>, item: Expression<'a>| {
Expression::ExpressionList(ExpressionList {
left: Box::new(acc),
right: Box::new(item),
})
}
) >>
(fold)
)
}
named!(pub assignment_expression<&str, Expression>, alt!(
do_parse!(
left: left_hand_side_expression >>
operator: assignment_operator >>
right: assignment_expression >>
(Expression::BinaryExpression(BinaryExpression {
left: Box::new(left),
operator: operator,
right: Box::new(right),
}))
)
|
conditional_expression
));
named!(pub assignment_expression_not_in<&str, Expression>, alt!(
do_parse!(
left: left_hand_side_expression >>
operator: assignment_operator >>
right: assignment_expression_not_in >>
(Expression::BinaryExpression(BinaryExpression {
left: Box::new(left),
operator: operator,
right: Box::new(right),
}))
)
|
conditional_expression_not_in
));
named!(conditional_expression<&str, Expression>, alt!(
do_parse!(
expression: logical_or_expression >>
keyword!("?") >>
ok: assignment_expression >>
keyword!(":") >>
ko: assignment_expression >>
(Expression::ConditionalExpression(ConditionalExpression {
expression: Box::new(expression),
ok: Box::new(ok),
ko: Box::new(ko),
}))
)
|
logical_or_expression
));
named!(conditional_expression_not_in<&str, Expression>, alt!(
do_parse!(
expression: logical_or_expression_not_in >>
keyword!("?") >>
ok: assignment_expression_not_in >>
keyword!(":") >>
ko: assignment_expression_not_in >>
(Expression::ConditionalExpression(ConditionalExpression {
expression: Box::new(expression),
ok: Box::new(ok),
ko: Box::new(ko),
}))
)
|
logical_or_expression
));
binary_op!(logical_or_expression, "||", logical_and_expression);
binary_op!(logical_or_expression_not_in, "||", logical_and_expression_not_in); // FIXME: not_in when macros are allowed as idents
binary_op!(logical_and_expression, "&&", bitwise_or_expression);
binary_op!(logical_and_expression_not_in, "&&", bitwise_or_expression_not_in);
binary_op!(bitwise_or_expression, "|", bitwise_xor_expression);
binary_op!(bitwise_or_expression_not_in, "|", bitwise_xor_expression_not_in);
binary_op!(bitwise_xor_expression, "^", bitwise_and_expression);
binary_op!(bitwise_xor_expression_not_in, "^", bitwise_and_expression_not_in);
binary_op!(bitwise_and_expression, "&", equality_expression);
binary_op!(bitwise_and_expression_not_in, "&", equality_expression_not_in);
binary_op!(equality_expression, ["===", "!==", "==", "!="], relational_expression);
binary_op!(equality_expression_not_in, ["===", "!==", "==", "!="], relational_expression_not_in);
binary_op!(relational_expression, [">=", "<=", ">", "<", "instanceof", "in"], shift_expression);
binary_op!(relational_expression_not_in, [">=", "<=", ">", "<", "instanceof"], shift_expression);
binary_op!(shift_expression, [">>>", ">>", "<<"], additive_expression);
binary_op!(additive_expression, ["+", "-"], multiplicative_expression);
binary_op!(multiplicative_expression, ["*", "/", "%"], unary_expression);
named!(unary_expression<&str, Expression>, alt!(
do_parse!(
keyword!("delete") >>
expression: unary_expression >>
(Expression::DeleteExpression(DeleteExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("void") >>
expression: unary_expression >>
(Expression::VoidExpression(VoidExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("typeof") >>
expression: unary_expression >>
(Expression::TypeOfExpression(TypeOfExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("++") >>
expression: unary_expression >>
(Expression::PreIncrementExpression(PreIncrementExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("--") >>
expression: unary_expression >>
(Expression::PreDecrementExpression(PreDecrementExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("+") >>
expression: unary_expression >>
(Expression::UnaryPlusExpression(UnaryPlusExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("-") >>
expression: unary_expression >>
(Expression::UnaryMinusExpression(UnaryMinusExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("~") >>
expression: unary_expression >>
(Expression::TildeExpression(TildeExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("!") >>
expression: unary_expression >>
(Expression::NotExpression(NotExpression(Box::new(expression))))
)
|
postfix_expression
));
named!(postfix_expression<&str, Expression>, alt!(
do_parse!(
expression: left_hand_side_expression >>
not!(line_terminator) >>
keyword!("++") >>
(Expression::PostIncrementExpression(PostIncrementExpression(Box::new(expression))))
)
|
do_parse!(
expression: left_hand_side_expression >>
not!(line_terminator) >>
keyword!("--") >>
(Expression::PostDecrementExpression(PostDecrementExpression(Box::new(expression))))
)
|
left_hand_side_expression
));
named!(pub left_hand_side_expression<&str, Expression>, alt!(
new_expression
|
call_expression
));
named!(new_expression<&str, Expression>, alt!(
do_parse!(
keyword!("new") >>
expression: new_expression >>
(Expression::NewExpression(NewExpression(Box::new(expression))))
)
|
member_expression
));
#[allow(cyclomatic_complexity)]
fn call_expression<'a>(i: &'a str) -> IResult<&'a str, Expression<'a>> {
do_parse!(i,
first: do_parse!(
base: member_expression >>
keyword!("(") >>
arguments: opt!(argument_list) >>
keyword!(")") >>
(Expression::CallExpression(CallExpression {
base: Box::new(base),
arguments: arguments,
}))
) >>
fold: fold_many0!(
alt!(
do_parse!(
keyword!("(") >>
arguments: opt!(argument_list) >>
keyword!(")") >>
(Some(arguments), None, None)
)
|
do_parse!(
keyword!("[") >>
expression: expression_list >>
keyword!("]") >>
(None, Some(expression), None)
)
|
do_parse!(
keyword!(".") >>
name: property_identifier >>
(None, None, Some(name))
)
),
first,
|acc: Expression<'a>, item: (Option<Option<ArgumentList<'a>>>, Option<Expression<'a>>, Option<&'a str>)| {
match item {
(Some(al), None, None) => Expression::CallExpression(CallExpression {
base: Box::new(acc),
arguments: al,
}),
(None, Some(e), None) => Expression::ArrayMemberExpression(ArrayMemberExpression {
base: Box::new(acc),
expression: Box::new(e),
}),
(None, None, Some(n)) => Expression::FieldMemberExpression(FieldMemberExpression {
base: Box::new(acc),
name: n,
}),
_ => panic!("call_expression is broken")
}
}
) >>
(fold)
)
}
#[allow(cyclomatic_complexity)]
pub fn member_expression<'a>(i: &'a str) -> IResult<&'a str, Expression<'a>> {
alt!(i,
do_parse!(
keyword!("new") >>
base: member_expression >>
keyword!("(") >>
arguments: opt!(argument_list) >>
keyword!(")") >>
(Expression::NewMemberExpression(NewMemberExpression {
base: Box::new(base),
arguments: arguments,
}))
)
|
do_parse!(
first: alt!(
primary_expression
|
function_expression
) >>
fold: fold_many0!(
alt!(
do_parse!(
keyword!("[") >>
expression: expression_list >>
keyword!("]") >>
(Some(expression), None)
)
|
do_parse!(
keyword!(".") >>
name: property_identifier >>
(None, Some(name))
)
),
first,
|acc: Expression<'a>, item: (Option<Expression<'a>>, Option<&'a str>)| {
match item {
(Some(e), None) => {
Expression::ArrayMemberExpression(ArrayMemberExpression {
base: Box::new(acc),
expression: Box::new(e),
})
},
(None, Some(n)) => {
Expression::FieldMemberExpression(FieldMemberExpression {
base: Box::new(acc),
name: n,
})
},
_ => panic!("member_expression is broken"),
}
}
) >>
(fold)
)
)
}
named!(argument_list<&str, ArgumentList>, conv!(ArgumentList(separated_nonempty_list!(
keyword!(","),
assignment_expression
))));
named!(primary_expression<&str, Expression>, alt!(
do_parse!(
keyword!("this") >>
(Expression::ThisExpression(ThisExpression))
)
|
do_parse!(
keyword!("null") >>
(Expression::NullExpression(NullExpression))
)
|
do_parse!(
keyword!("true") >>
(Expression::TrueLiteral(TrueLiteral))
)
|
do_parse!(
keyword!("false") >>
(Expression::FalseLiteral(FalseLiteral))
)
|
do_parse!(
keyword!("[") >>
elements: opt!(do_parse!(
element_list: element_list >>
opt!(keyword!(",")) >>
(element_list)
)) >>
elision: opt!(elision) >>
keyword!("]") >>
(Expression::ArrayLiteral(ArrayLiteral {
elements: elements,
elision: elision,
}))
)
|
do_parse!(
keyword!("{") >>
properties: opt!(do_parse!(
property_assignment_list: property_assignment_list >>
opt!(keyword!(",")) >>
(property_assignment_list)
)) >>
keyword!("}") >>
(Expression::ObjectLiteral(ObjectLiteral {
properties: properties,
}))
)
|
do_parse!(
keyword!("(") >>
expression: expression_list >>
keyword!(")") >>
(Expression::NestedExpression(NestedExpression(Box::new(expression))))
)
|
conv!(Expression::NumericLiteral(numeric_literal))
|
conv!(Expression::StringLiteral(string_literal))
|
conv!(Expression::IdentifierExpression(conv!(IdentifierExpression(js_identifier))))
));
named!(element_list<&str, ElementList>, conv!(ElementList(separated_nonempty_list!(
keyword!(","),
do_parse!(
elision_opt: opt!(elision) >>
assignment_expression: assignment_expression >>
(Element {
elision: elision_opt,
expression: assignment_expression,
})
)
))));
named!(elision<&str, Elision>, do_parse!(
vec: many1!(keyword!(",")) >>
(Elision(vec.len()))
));
named!(property_assignment_list<&str, PropertyAssignmentList>, conv!(PropertyAssignmentList(separated_nonempty_list!(
keyword!(","),
property_assignment
))));
named!(property_assignment<&str, PropertyAssignment>, alt!(
do_parse!(
keyword!("get") >>
name: property_name >>
keyword!("(") >>
keyword!(")") >>
keyword!("{") >>
function_body: opt!(function_body) >>
keyword!("}") >>
(PropertyAssignment::PropertyGetterSetter(PropertyGetterSetter {
name: name,
getter_setter_type: PropertyGetterSetterType::Getter,
formals: None,
function_body: function_body,
}))
)
|
do_parse!(
keyword!("set") >>
name: property_name >>
keyword!("(") >>
formals: opt!(formal_parameter_list) >>
keyword!(")") >>
keyword!("{") >>
function_body: opt!(function_body) >>
keyword!("}") >>
(PropertyAssignment::PropertyGetterSetter(PropertyGetterSetter {
name: name,
getter_setter_type: PropertyGetterSetterType::Setter,
formals: formals,
function_body: function_body,
}))
)
|
do_parse!(
name: property_name >>
keyword!(":") >>
value: assignment_expression >>
(PropertyAssignment::PropertyNameAndValue(PropertyNameAndValue {
name: name,
value: value,
}))
)
));
named!(property_name<&str, PropertyName>, alt!(
conv!(PropertyName::IdentifierPropertyName(conv!(IdentifierPropertyName(reserved_identifier))))
|
conv!(PropertyName::IdentifierPropertyName(conv!(IdentifierPropertyName(js_identifier))))
|
conv!(PropertyName::StringLiteralPropertyName(conv!(StringLiteralPropertyName(string_literal))))
|
conv!(PropertyName::NumericLiteralPropertyName(conv!(NumericLiteralPropertyName(numeric_literal))))
));
named!(function_expression<&str, Expression>, do_parse!(
keyword!("function") >>
name: opt!(js_identifier) >>
keyword!("(") >>
formals: opt!(formal_parameter_list) >>
keyword!(")") >>
keyword!("{") >>
body: opt!(function_body) >>
keyword!("}") >>
(Expression::FunctionExpression(FunctionExpression {
name: name,
formals: formals,
body: body,
}))
));
#[cfg(test)]
mod tests {
use nom::{ErrorKind, Needed};
use super::*;
#[test]
fn function_expression() {
assert_eq!(
super::function_expression(" function () {} "),
IResult::Done(" ", Expression::FunctionExpression(FunctionExpression {
name: None,
formals: None,
body: None,
}))
);
{
let name = "foo";
let formals = "bar, test";
let body = ";;";
let input = format!("function {}({}) {{{}}} ", name, formals, body);
assert_eq!(
super::function_expression(&input),
IResult::Done(" ", Expression::FunctionExpression(FunctionExpression {
name: Some(super::js_identifier(name).unwrap().1),
formals: Some(super::formal_parameter_list(formals).unwrap().1),
body: Some(super::function_body(body).unwrap().1),
}))
);
}
assert_eq!(super::function_expression(""), IResult::Incomplete(Needed::Size(8)));
assert_eq!(
super::function_expression("function () {{ "),
IResult::Incomplete(Needed::Size(16))
);
assert_eq!(
super::function_expression("function ( {{}} "),
IResult::Error(ErrorKind::Tag)
);
}
}
}
Make valid expressions at end of input treated as complete instead of incomplete
use std;
use parser::js::*;
use parser::js::terminals::*;
#[derive(Debug, PartialEq)]
pub enum Expression<'a> {
CallExpression(CallExpression<'a>),
ConditionalExpression(ConditionalExpression<'a>),
BinaryExpression(BinaryExpression<'a>),
NotExpression(NotExpression<'a>),
TildeExpression(TildeExpression<'a>),
UnaryMinusExpression(UnaryMinusExpression<'a>),
UnaryPlusExpression(UnaryPlusExpression<'a>),
PreDecrementExpression(PreDecrementExpression<'a>),
PreIncrementExpression(PreIncrementExpression<'a>),
TypeOfExpression(TypeOfExpression<'a>),
VoidExpression(VoidExpression<'a>),
DeleteExpression(DeleteExpression<'a>),
PostDecrementExpression(PostDecrementExpression<'a>),
PostIncrementExpression(PostIncrementExpression<'a>),
FunctionExpression(FunctionExpression<'a>),
ArrayMemberExpression(ArrayMemberExpression<'a>),
FieldMemberExpression(FieldMemberExpression<'a>),
NewMemberExpression(NewMemberExpression<'a>),
ThisExpression(ThisExpression),
IdentifierExpression(IdentifierExpression<'a>),
NullExpression(NullExpression),
TrueLiteral(TrueLiteral),
FalseLiteral(FalseLiteral),
NumericLiteral(NumericLiteral),
//TODO: Multiline string literal
StringLiteral(StringLiteral<'a>),
//TODO: regexp
ArrayLiteral(ArrayLiteral<'a>),
ObjectLiteral(ObjectLiteral<'a>),
NestedExpression(NestedExpression<'a>),
ExpressionList(ExpressionList<'a>),
NewExpression(NewExpression<'a>),
}
#[derive(Debug, PartialEq)]
pub struct CallExpression<'a> {
pub base: Box<Expression<'a>>,
pub arguments: Option<ArgumentList<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct ArgumentList<'a>(pub std::vec::Vec<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct ConditionalExpression<'a> {
pub expression: Box<Expression<'a>>,
pub ok: Box<Expression<'a>>,
pub ko: Box<Expression<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct BinaryExpression<'a> {
pub left: Box<Expression<'a>>,
pub operator: &'a str,
pub right: Box<Expression<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct NotExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct TildeExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct UnaryMinusExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct UnaryPlusExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct PreDecrementExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct PreIncrementExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct TypeOfExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct VoidExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct DeleteExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct PostDecrementExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct PostIncrementExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct FunctionExpression<'a> {
pub name: Option<&'a str>,
pub formals: Option<FormalParameterList<'a>>,
pub body: Option<FunctionBody<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct ArrayMemberExpression<'a> {
pub base: Box<Expression<'a>>,
pub expression: Box<Expression<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct FieldMemberExpression<'a> {
pub base: Box<Expression<'a>>,
pub name: &'a str,
}
#[derive(Debug, PartialEq)]
pub struct NewMemberExpression<'a> {
pub base: Box<Expression<'a>>,
pub arguments: Option<ArgumentList<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct ThisExpression;
#[derive(Debug, PartialEq)]
pub struct IdentifierExpression<'a>(pub &'a str);
#[derive(Debug, PartialEq)]
pub struct NullExpression;
#[derive(Debug, PartialEq)]
pub struct TrueLiteral;
#[derive(Debug, PartialEq)]
pub struct FalseLiteral;
#[derive(Debug, PartialEq)]
pub struct ArrayLiteral<'a> {
pub elements: Option<ElementList<'a>>,
pub elision: Option<Elision>,
}
#[derive(Debug, PartialEq)]
pub struct ElementList<'a>(pub std::vec::Vec<Element<'a>>);
#[derive(Debug, PartialEq)]
pub struct Element<'a> {
pub elision: Option<Elision>,
pub expression: Expression<'a>,
}
#[derive(Debug, PartialEq)]
pub struct Elision(pub usize);
#[derive(Debug, PartialEq)]
pub struct ObjectLiteral<'a> {
pub properties: Option<PropertyAssignmentList<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct PropertyAssignmentList<'a>(pub std::vec::Vec<PropertyAssignment<'a>>);
#[derive(Debug, PartialEq)]
pub enum PropertyAssignment<'a> {
PropertyNameAndValue(PropertyNameAndValue<'a>),
PropertyGetterSetter(PropertyGetterSetter<'a>),
}
#[derive(Debug, PartialEq)]
pub struct PropertyNameAndValue<'a> {
pub name: PropertyName<'a>,
pub value: Expression<'a>,
}
#[derive(Debug, PartialEq)]
pub struct PropertyGetterSetter<'a> {
pub name: PropertyName<'a>,
pub getter_setter_type: PropertyGetterSetterType,
pub formals: Option<FormalParameterList<'a>>,
pub function_body: Option<FunctionBody<'a>>,
}
#[derive(Debug, PartialEq)]
pub enum PropertyGetterSetterType {
Getter,
Setter,
}
#[derive(Debug, PartialEq)]
pub enum PropertyName<'a> {
IdentifierPropertyName(IdentifierPropertyName<'a>),
StringLiteralPropertyName(StringLiteralPropertyName<'a>),
NumericLiteralPropertyName(NumericLiteralPropertyName),
}
#[derive(Debug, PartialEq)]
pub struct IdentifierPropertyName<'a>(pub &'a str);
#[derive(Debug, PartialEq)]
pub struct StringLiteralPropertyName<'a>(pub StringLiteral<'a>);
#[derive(Debug, PartialEq)]
pub struct NumericLiteralPropertyName(pub NumericLiteral);
#[derive(Debug, PartialEq)]
pub struct NestedExpression<'a>(pub Box<Expression<'a>>);
#[derive(Debug, PartialEq)]
pub struct ExpressionList<'a> {
pub left: Box<Expression<'a>>,
pub right: Box<Expression<'a>>,
}
#[derive(Debug, PartialEq)]
pub struct NewExpression<'a>(pub Box<Expression<'a>>);
pub mod parsing {
macro_rules! binary_op {
($name:ident, [ $op1:tt $(, $ops:tt)* ], $next:ident) => (
fn $name<'a>(i: &'a str) -> IResult<&'a str, Expression<'a>> {
do_parse!(i,
first: $next >>
fold: fold_many0!(
do_parse!(
operator: alt!(
keyword!($op1)
$(
|
keyword!($ops)
)*
) >>
expr: $next >>
(operator, expr)
),
first,
|acc: Expression<'a>, item: (&'a str, Expression<'a>)| {
Expression::BinaryExpression(BinaryExpression {
left: Box::new(acc),
operator: item.0,
right: Box::new(item.1),
})
}
) >>
(fold)
)
}
);
($name:ident, $op:tt, $next:ident) => (binary_op!($name, [$op], $next););
}
use nom::IResult;
use parser::helpers::parsing::*;
use parser::js::parsing::*;
use parser::js::terminals::parsing::*;
use super::*;
pub fn expression_list<'a>(i: &'a str) -> IResult<&'a str, Expression<'a>> { // called Expression in qqmljs.g
do_parse!(i,
first: assignment_expression >>
fold: fold_many0!(
do_parse!(
keyword!(",") >>
expr: assignment_expression >>
(expr)
),
first,
|acc: Expression<'a>, item: Expression<'a>| {
Expression::ExpressionList(ExpressionList {
left: Box::new(acc),
right: Box::new(item),
})
}
) >>
(fold)
)
}
pub fn expression_list_not_in<'a>(i: &'a str) -> IResult<&'a str, Expression<'a>> { // called ExpressionNotIn in qqmljs.g
do_parse!(i,
first: assignment_expression_not_in >>
fold: fold_many0!(
do_parse!(
keyword!(",") >>
expr: assignment_expression_not_in >>
(expr)
),
first,
|acc: Expression<'a>, item: Expression<'a>| {
Expression::ExpressionList(ExpressionList {
left: Box::new(acc),
right: Box::new(item),
})
}
) >>
(fold)
)
}
named!(pub assignment_expression<&str, Expression>, alt!(
do_parse!(
left: left_hand_side_expression >>
operator: complete!(assignment_operator) >>
right: assignment_expression >>
(Expression::BinaryExpression(BinaryExpression {
left: Box::new(left),
operator: operator,
right: Box::new(right),
}))
)
|
conditional_expression
));
named!(pub assignment_expression_not_in<&str, Expression>, alt!(
do_parse!(
left: left_hand_side_expression >>
operator: complete!(assignment_operator) >>
right: assignment_expression_not_in >>
(Expression::BinaryExpression(BinaryExpression {
left: Box::new(left),
operator: operator,
right: Box::new(right),
}))
)
|
conditional_expression_not_in
));
named!(conditional_expression<&str, Expression>, alt!(
do_parse!(
expression: logical_or_expression >>
complete!(keyword!("?")) >>
ok: assignment_expression >>
keyword!(":") >>
ko: assignment_expression >>
(Expression::ConditionalExpression(ConditionalExpression {
expression: Box::new(expression),
ok: Box::new(ok),
ko: Box::new(ko),
}))
)
|
logical_or_expression
));
named!(conditional_expression_not_in<&str, Expression>, alt!(
do_parse!(
expression: logical_or_expression_not_in >>
complete!(keyword!("?")) >>
ok: assignment_expression_not_in >>
keyword!(":") >>
ko: assignment_expression_not_in >>
(Expression::ConditionalExpression(ConditionalExpression {
expression: Box::new(expression),
ok: Box::new(ok),
ko: Box::new(ko),
}))
)
|
logical_or_expression
));
binary_op!(logical_or_expression, "||", logical_and_expression);
binary_op!(logical_or_expression_not_in, "||", logical_and_expression_not_in); // FIXME: not_in when macros are allowed as idents
binary_op!(logical_and_expression, "&&", bitwise_or_expression);
binary_op!(logical_and_expression_not_in, "&&", bitwise_or_expression_not_in);
binary_op!(bitwise_or_expression, "|", bitwise_xor_expression);
binary_op!(bitwise_or_expression_not_in, "|", bitwise_xor_expression_not_in);
binary_op!(bitwise_xor_expression, "^", bitwise_and_expression);
binary_op!(bitwise_xor_expression_not_in, "^", bitwise_and_expression_not_in);
binary_op!(bitwise_and_expression, "&", equality_expression);
binary_op!(bitwise_and_expression_not_in, "&", equality_expression_not_in);
binary_op!(equality_expression, ["===", "!==", "==", "!="], relational_expression);
binary_op!(equality_expression_not_in, ["===", "!==", "==", "!="], relational_expression_not_in);
binary_op!(relational_expression, [">=", "<=", ">", "<", "instanceof", "in"], shift_expression);
binary_op!(relational_expression_not_in, [">=", "<=", ">", "<", "instanceof"], shift_expression);
binary_op!(shift_expression, [">>>", ">>", "<<"], additive_expression);
binary_op!(additive_expression, ["+", "-"], multiplicative_expression);
binary_op!(multiplicative_expression, ["*", "/", "%"], unary_expression);
named!(unary_expression<&str, Expression>, alt!(
do_parse!(
keyword!("delete") >>
expression: unary_expression >>
(Expression::DeleteExpression(DeleteExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("void") >>
expression: unary_expression >>
(Expression::VoidExpression(VoidExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("typeof") >>
expression: unary_expression >>
(Expression::TypeOfExpression(TypeOfExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("++") >>
expression: unary_expression >>
(Expression::PreIncrementExpression(PreIncrementExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("--") >>
expression: unary_expression >>
(Expression::PreDecrementExpression(PreDecrementExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("+") >>
expression: unary_expression >>
(Expression::UnaryPlusExpression(UnaryPlusExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("-") >>
expression: unary_expression >>
(Expression::UnaryMinusExpression(UnaryMinusExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("~") >>
expression: unary_expression >>
(Expression::TildeExpression(TildeExpression(Box::new(expression))))
)
|
do_parse!(
keyword!("!") >>
expression: unary_expression >>
(Expression::NotExpression(NotExpression(Box::new(expression))))
)
|
postfix_expression
));
named!(postfix_expression<&str, Expression>, alt!(
do_parse!(
expression: left_hand_side_expression >>
not!(line_terminator) >>
complete!(keyword!("++")) >>
(Expression::PostIncrementExpression(PostIncrementExpression(Box::new(expression))))
)
|
do_parse!(
expression: left_hand_side_expression >>
not!(line_terminator) >>
complete!(keyword!("--")) >>
(Expression::PostDecrementExpression(PostDecrementExpression(Box::new(expression))))
)
|
left_hand_side_expression
));
named!(pub left_hand_side_expression<&str, Expression>, alt!(
new_expression
|
call_expression
));
named!(new_expression<&str, Expression>, alt!(
do_parse!(
keyword!("new") >>
expression: new_expression >>
(Expression::NewExpression(NewExpression(Box::new(expression))))
)
|
member_expression
));
#[allow(cyclomatic_complexity)]
fn call_expression<'a>(i: &'a str) -> IResult<&'a str, Expression<'a>> {
do_parse!(i,
first: do_parse!(
base: member_expression >>
complete!(keyword!("(")) >>
arguments: opt!(argument_list) >>
keyword!(")") >>
(Expression::CallExpression(CallExpression {
base: Box::new(base),
arguments: arguments,
}))
) >>
fold: fold_many0!(
alt!(
do_parse!(
keyword!("(") >>
arguments: opt!(argument_list) >>
keyword!(")") >>
(Some(arguments), None, None)
)
|
do_parse!(
keyword!("[") >>
expression: expression_list >>
keyword!("]") >>
(None, Some(expression), None)
)
|
do_parse!(
keyword!(".") >>
name: property_identifier >>
(None, None, Some(name))
)
),
first,
|acc: Expression<'a>, item: (Option<Option<ArgumentList<'a>>>, Option<Expression<'a>>, Option<&'a str>)| {
match item {
(Some(al), None, None) => Expression::CallExpression(CallExpression {
base: Box::new(acc),
arguments: al,
}),
(None, Some(e), None) => Expression::ArrayMemberExpression(ArrayMemberExpression {
base: Box::new(acc),
expression: Box::new(e),
}),
(None, None, Some(n)) => Expression::FieldMemberExpression(FieldMemberExpression {
base: Box::new(acc),
name: n,
}),
_ => panic!("call_expression is broken")
}
}
) >>
(fold)
)
}
#[allow(cyclomatic_complexity)]
pub fn member_expression<'a>(i: &'a str) -> IResult<&'a str, Expression<'a>> {
alt!(i,
do_parse!(
keyword!("new") >>
base: member_expression >>
keyword!("(") >>
arguments: opt!(argument_list) >>
keyword!(")") >>
(Expression::NewMemberExpression(NewMemberExpression {
base: Box::new(base),
arguments: arguments,
}))
)
|
do_parse!(
first: alt!(
primary_expression
|
function_expression
) >>
fold: fold_many0!(
alt!(
do_parse!(
keyword!("[") >>
expression: expression_list >>
keyword!("]") >>
(Some(expression), None)
)
|
do_parse!(
keyword!(".") >>
name: property_identifier >>
(None, Some(name))
)
),
first,
|acc: Expression<'a>, item: (Option<Expression<'a>>, Option<&'a str>)| {
match item {
(Some(e), None) => {
Expression::ArrayMemberExpression(ArrayMemberExpression {
base: Box::new(acc),
expression: Box::new(e),
})
},
(None, Some(n)) => {
Expression::FieldMemberExpression(FieldMemberExpression {
base: Box::new(acc),
name: n,
})
},
_ => panic!("member_expression is broken"),
}
}
) >>
(fold)
)
)
}
named!(argument_list<&str, ArgumentList>, conv!(ArgumentList(separated_nonempty_list!(
keyword!(","),
assignment_expression
))));
named!(primary_expression<&str, Expression>, alt!(
do_parse!(
keyword!("this") >>
(Expression::ThisExpression(ThisExpression))
)
|
do_parse!(
keyword!("null") >>
(Expression::NullExpression(NullExpression))
)
|
do_parse!(
keyword!("true") >>
(Expression::TrueLiteral(TrueLiteral))
)
|
do_parse!(
keyword!("false") >>
(Expression::FalseLiteral(FalseLiteral))
)
|
do_parse!(
keyword!("[") >>
elements: opt!(do_parse!(
element_list: element_list >>
opt!(keyword!(",")) >>
(element_list)
)) >>
elision: opt!(elision) >>
keyword!("]") >>
(Expression::ArrayLiteral(ArrayLiteral {
elements: elements,
elision: elision,
}))
)
|
do_parse!(
keyword!("{") >>
properties: opt!(do_parse!(
property_assignment_list: property_assignment_list >>
opt!(keyword!(",")) >>
(property_assignment_list)
)) >>
keyword!("}") >>
(Expression::ObjectLiteral(ObjectLiteral {
properties: properties,
}))
)
|
do_parse!(
keyword!("(") >>
expression: expression_list >>
keyword!(")") >>
(Expression::NestedExpression(NestedExpression(Box::new(expression))))
)
|
conv!(Expression::NumericLiteral(numeric_literal))
|
conv!(Expression::StringLiteral(string_literal))
|
conv!(Expression::IdentifierExpression(conv!(IdentifierExpression(js_identifier))))
));
named!(element_list<&str, ElementList>, conv!(ElementList(separated_nonempty_list!(
keyword!(","),
do_parse!(
elision_opt: opt!(elision) >>
assignment_expression: assignment_expression >>
(Element {
elision: elision_opt,
expression: assignment_expression,
})
)
))));
named!(elision<&str, Elision>, do_parse!(
vec: many1!(keyword!(",")) >>
(Elision(vec.len()))
));
named!(property_assignment_list<&str, PropertyAssignmentList>, conv!(PropertyAssignmentList(separated_nonempty_list!(
keyword!(","),
property_assignment
))));
named!(property_assignment<&str, PropertyAssignment>, alt!(
do_parse!(
keyword!("get") >>
name: property_name >>
keyword!("(") >>
keyword!(")") >>
keyword!("{") >>
function_body: opt!(function_body) >>
keyword!("}") >>
(PropertyAssignment::PropertyGetterSetter(PropertyGetterSetter {
name: name,
getter_setter_type: PropertyGetterSetterType::Getter,
formals: None,
function_body: function_body,
}))
)
|
do_parse!(
keyword!("set") >>
name: property_name >>
keyword!("(") >>
formals: opt!(formal_parameter_list) >>
keyword!(")") >>
keyword!("{") >>
function_body: opt!(function_body) >>
keyword!("}") >>
(PropertyAssignment::PropertyGetterSetter(PropertyGetterSetter {
name: name,
getter_setter_type: PropertyGetterSetterType::Setter,
formals: formals,
function_body: function_body,
}))
)
|
do_parse!(
name: property_name >>
keyword!(":") >>
value: assignment_expression >>
(PropertyAssignment::PropertyNameAndValue(PropertyNameAndValue {
name: name,
value: value,
}))
)
));
named!(property_name<&str, PropertyName>, alt!(
conv!(PropertyName::IdentifierPropertyName(conv!(IdentifierPropertyName(reserved_identifier))))
|
conv!(PropertyName::IdentifierPropertyName(conv!(IdentifierPropertyName(js_identifier))))
|
conv!(PropertyName::StringLiteralPropertyName(conv!(StringLiteralPropertyName(string_literal))))
|
conv!(PropertyName::NumericLiteralPropertyName(conv!(NumericLiteralPropertyName(numeric_literal))))
));
named!(function_expression<&str, Expression>, do_parse!(
keyword!("function") >>
name: opt!(js_identifier) >>
keyword!("(") >>
formals: opt!(formal_parameter_list) >>
keyword!(")") >>
keyword!("{") >>
body: opt!(function_body) >>
keyword!("}") >>
(Expression::FunctionExpression(FunctionExpression {
name: name,
formals: formals,
body: body,
}))
));
#[cfg(test)]
mod tests {
use nom::{ErrorKind, Needed};
use super::*;
#[test]
fn function_expression() {
assert_eq!(
super::function_expression(" function () {} "),
IResult::Done(" ", Expression::FunctionExpression(FunctionExpression {
name: None,
formals: None,
body: None,
}))
);
{
let name = "foo";
let formals = "bar, test";
let body = ";;";
let input = format!("function {}({}) {{{}}} ", name, formals, body);
assert_eq!(
super::function_expression(&input),
IResult::Done(" ", Expression::FunctionExpression(FunctionExpression {
name: Some(super::js_identifier(name).unwrap().1),
formals: Some(super::formal_parameter_list(formals).unwrap().1),
body: Some(super::function_body(body).unwrap().1),
}))
);
}
assert_eq!(super::function_expression(""), IResult::Incomplete(Needed::Size(8)));
assert_eq!(
super::function_expression("function () {{ "),
IResult::Incomplete(Needed::Size(16))
);
assert_eq!(
super::function_expression("function ( {{}} "),
IResult::Error(ErrorKind::Tag)
);
}
}
} |
use gemlab::util::PI;
use gemlab::StrError;
use russell_lab::Vector;
struct Processor<'a, T> {
callback: Option<Box<dyn Fn(&mut Vector, &[f64], &T) -> Result<(), StrError> + 'a>>,
}
impl<'a, T> Processor<'a, T> {
fn set_callback(&mut self, c: impl Fn(&mut Vector, &[f64], &T) -> Result<(), StrError> + 'a) {
self.callback = Some(Box::new(c));
}
fn process_events(&self, args: &T) -> Result<(), StrError> {
let ksi = vec![0.0; 2];
let mut x = Vector::new(2);
match &self.callback {
Some(cb) => cb(&mut x, &ksi, args)?,
None => (),
}
println!("got x =\n{}", x);
Ok(())
}
}
fn simple_callback<T>(_: &mut Vector, _: &[f64], _: &T) -> Result<(), StrError> {
Err("nothing to see here")
}
pub struct NaturalToReal {
pub rmin: f64,
pub rmax: f64,
pub amin: f64,
pub amax: f64,
pub zmin: f64,
pub zmax: f64,
}
impl NaturalToReal {
pub fn new() -> Self {
NaturalToReal {
rmin: 5.0,
rmax: 10.0,
amin: 30.0 * PI / 180.0,
amax: 60.0 * PI / 180.0,
zmin: 0.0,
zmax: 1.0,
}
}
}
pub fn natural_to_real_cylindrical(x: &mut Vector, ksi: &[f64], args: &NaturalToReal) -> Result<(), StrError> {
if x.dim() != ksi.len() {
return Err("x.dim() must be equal to ksi.len()");
}
const KSI_MIN: f64 = -1.0;
const KSI_DEL: f64 = 2.0;
let r = args.rmin + (ksi[0] - KSI_MIN) * (args.rmax - args.rmin) / KSI_DEL;
let a = args.amin + (ksi[1] - KSI_MIN) * (args.amax - args.amin) / KSI_DEL;
x[0] = r * f64::cos(a);
x[1] = r * f64::sin(a);
if x.dim() == 3 {
x[2] = args.zmin + (ksi[2] - KSI_MIN) * (args.zmax - args.zmin) / KSI_DEL;
}
Ok(())
}
fn main() -> Result<(), StrError> {
let mut p = Processor {
callback: Some(Box::new(simple_callback)),
// callback: None,
};
let args = NaturalToReal::new();
println!("{:?}", p.process_events(&args).err());
let v = Vector::from(&[1.0, 2.0]);
let my_fun = move |x: &mut Vector, ksi: &[f64], _: &NaturalToReal| {
println!("using ksi = {:?}", ksi);
x[0] = v[0];
x[1] = v[1];
Ok(())
};
// println!("v = {:?}", v);
p.set_callback(my_fun);
p.process_events(&args)?;
p.set_callback(natural_to_real_cylindrical);
p.process_events(&args)?;
Ok(())
}
Remove temp file: study on callbacks
|
extern crate libc;
extern crate "termbox-sys" as termbox;
pub use self::running::running;
pub use self::style::{Style, RB_BOLD, RB_UNDERLINE, RB_REVERSE, RB_NORMAL};
use std::error::Error;
use std::fmt;
use std::kinds::marker;
use std::time::duration::Duration;
use termbox::RawEvent;
use libc::{c_int, c_uint};
#[deriving(Copy)]
pub enum Event {
KeyEvent(u8, u16, u32),
ResizeEvent(i32, i32),
NoEvent
}
#[deriving(Copy)]
#[repr(C,u16)]
pub enum Color {
Default = 0x00,
Black = 0x01,
Red = 0x02,
Green = 0x03,
Yellow = 0x04,
Blue = 0x05,
Magenta = 0x06,
Cyan = 0x07,
White = 0x08,
}
mod style {
bitflags! {
#[repr(C)]
flags Style: u16 {
const TB_NORMAL_COLOR = 0x000F,
const RB_BOLD = 0x0100,
const RB_UNDERLINE = 0x0200,
const RB_REVERSE = 0x0400,
const RB_NORMAL = 0x0000,
const TB_ATTRIB = RB_BOLD.bits | RB_UNDERLINE.bits | RB_REVERSE.bits,
}
}
impl Style {
pub fn from_color(color: super::Color) -> Style {
Style { bits: color as u16 & TB_NORMAL_COLOR.bits }
}
}
}
const NIL_RAW_EVENT: RawEvent = RawEvent { etype: 0, emod: 0, key: 0, ch: 0, w: 0, h: 0 };
// FIXME: Rust doesn't support this enum representation.
// #[deriving(Copy,FromPrimitive,Show)]
// #[repr(C,int)]
// pub enum EventErrorKind {
// Error = -1,
// }
// pub type EventError = Option<EventErrorKind>;
#[allow(non_snake_case)]
pub mod EventErrorKind {
#[deriving(Copy,Show)]
pub struct Error;
}
pub type EventError = Option<EventErrorKind::Error>;
pub type EventResult<T> = Result<T, EventError>;
impl Error for EventError {
fn description(&self) -> &str {
match *self {
// TODO: Check errno here
Some(EventErrorKind::Error) => "Unknown error.",
None => "Unexpected return code."
}
}
}
fn unpack_event(ev_type: c_int, ev: &RawEvent) -> EventResult<Event> {
match ev_type {
0 => Ok(Event::NoEvent),
1 => Ok(Event::KeyEvent(ev.emod, ev.key, ev.ch)),
2 => Ok(Event::ResizeEvent(ev.w, ev.h)),
// FIXME: Rust doesn't support this error representation
// res => FromPrimitive::from_int(res as int),
-1 => Err(Some(EventErrorKind::Error)),
_ => Err(None)
}
}
#[deriving(Copy,FromPrimitive,Show)]
#[repr(C,int)]
pub enum InitErrorKind {
UnsupportedTerminal = -1,
FailedToOpenTty = -2,
PipeTrapError = -3,
}
pub enum InitError {
Opt(InitOption, Option<Box<Error>>),
AlreadyOpen,
TermBox(Option<InitErrorKind>),
}
impl fmt::Show for InitError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", self.description())
}
}
impl Error for InitError {
fn description(&self) -> &str {
match *self {
InitError::Opt(InitOption::BufferStderr, _) => "Could not redirect stderr.",
InitError::AlreadyOpen => "RustBox is already open.",
InitError::TermBox(e) => e.map_or("Unexpected TermBox return code.", |e| match e {
InitErrorKind::UnsupportedTerminal => "Unsupported terminal.",
InitErrorKind::FailedToOpenTty => "Failed to open TTY.",
InitErrorKind::PipeTrapError => "Pipe trap error.",
}),
}
}
fn cause(&self) -> Option<&Error> {
match *self {
InitError::Opt(_, Some(ref e)) => Some(&**e),
_ => None
}
}
}
mod running {
use std::sync::atomic::{mod, AtomicBool};
// The state of the RustBox is protected by the lock. Yay, global state!
static RUSTBOX_RUNNING: AtomicBool = atomic::INIT_ATOMIC_BOOL;
/// true iff RustBox is currently running. Beware of races here--don't rely on this for anything
/// critical unless you happen to know that RustBox cannot change state when it is called (a good
/// usecase would be checking to see if it's worth risking double printing backtraces to avoid
/// having them swallowed up by RustBox).
pub fn running() -> bool {
RUSTBOX_RUNNING.load(atomic::SeqCst)
}
// Internal RAII guard used to ensure we release the running lock whenever we acquire it.
#[allow(missing_copy_implementations)]
pub struct RunningGuard(());
pub fn run() -> Option<RunningGuard> {
// Ensure that we are not already running and simultaneously set RUSTBOX_RUNNING using an
// atomic swap. This ensures that contending threads don't trample each other.
if RUSTBOX_RUNNING.swap(true, atomic::SeqCst) {
// The Rustbox was already running.
None
} else {
// The RustBox was not already running, and now we have the lock.
Some(RunningGuard(()))
}
}
impl Drop for RunningGuard {
fn drop(&mut self) {
// Indicate that we're free now. We could probably get away with lower atomicity here,
// but there's no reason to take that chance.
RUSTBOX_RUNNING.store(false, atomic::SeqCst);
}
}
}
// RAII guard for input redirection
#[cfg(unix)]
mod redirect {
use std::error::Error;
use libc;
use std::io::{util, IoError, PipeStream};
use std::io::pipe::PipePair;
use std::os::unix::AsRawFd;
use super::{InitError, InitOption, RustBox};
pub struct Redirect {
pair: PipePair,
fd: PipeStream,
}
impl Drop for Redirect {
fn drop(&mut self) {
// We make sure that we never actually create the Redirect without also putting it in a
// RustBox. This means that we know that this will always be dropped immediately after
// the RustBox is destroyed. We rely on destructor order here: destructors are always
// executed top-down, so as long as this is included above the RunningGuard in the
// RustBox struct, we can be confident that it is destroyed while we're still holding
// onto the lock.
unsafe {
let old_fd = self.pair.writer.as_raw_fd();
let new_fd = self.fd.as_raw_fd();
// Reopen new_fd as writer.
// (Note that if we fail here, we can't really do anything about it, so just ignore any
// errors).
if libc::dup2(old_fd, new_fd) != new_fd { return }
}
// Copy from reader to writer.
drop(util::copy(&mut self.pair.reader, &mut self.pair.writer));
}
}
fn redirect(new: PipeStream) -> Result<Redirect, Option<Box<Error>>> {
// Create a pipe pair.
let mut pair = try!(PipeStream::pair().map_err( |e| Some(box e as Box<Error>)));
unsafe {
let new_fd = new.as_raw_fd();
// Copy new_fd to dup_fd.
let dup_fd = match libc::dup(new_fd) {
-1 => return Err(Some(box IoError::last_error() as Box<Error>)),
fd => try!(PipeStream::open(fd).map_err( |e| Some(box e as Box<Error>))),
};
// Reopen new_fd as writer.
let old_fd = pair.writer.as_raw_fd();
let fd = libc::dup2(old_fd, new_fd);
if fd == new_fd {
// On success, the new file descriptor should be returned. Replace the old one
// with dup_fd, since we no longer need an explicit reference to the writer.
pair.writer = dup_fd;
Ok(Redirect {
pair: pair,
fd: new,
})
} else {
Err(if fd == -1 { Some(box IoError::last_error() as Box<Error>) } else { None })
}
}
}
// The reason we take the rb reference is mostly to make sure we don't try to redirect before
// the TermBox is set up. Otherwise it is too easy to leave the file handles in a bad state.
pub fn redirect_stderr(rb: &mut RustBox) -> Result<(), InitError> {
match rb.stderr {
Some(_) => {
// Can only redirect once.
Err(InitError::Opt(InitOption::BufferStderr, None))
},
None => {
rb.stderr = Some(try!(redirect(
try!(PipeStream::open(libc::STDERR_FILENO)
.map_err( |e| InitError::Opt(InitOption::BufferStderr,
Some(box e as Box<Error>)))))
.map_err( |e| InitError::Opt(InitOption::BufferStderr, e))));
Ok(())
}
}
}
}
#[cfg(not(unix))]
// Not sure how we'll do this on Windows, unimplemented for now.
mod redirect {
pub enum Redirect { }
pub fn redirect_stderr(_: &mut super::RustBox) -> Result<(), super::InitError> {
Err(super::InitError::Opt(super::InitOption::BufferStderr, None))
}
}
#[allow(missing_copy_implementations)]
pub struct RustBox {
// Termbox is not thread safe
no_sync: marker::NoSync,
// We only bother to redirect stderr for the moment, since it's used for panic!
stderr: Option<redirect::Redirect>,
// RAII lock.
//
// Note that running *MUST* be the last field in the destructor, since destructors run in
// top-down order. Otherwise it will not properly protect the above fields.
_running: running::RunningGuard,
}
#[deriving(Copy,Show)]
pub enum InitOption {
/// Use this option to automatically buffer stderr while RustBox is running. It will be
/// written when RustBox exits.
BufferStderr,
}
impl RustBox {
pub fn init(opts: &[Option<InitOption>]) -> Result<RustBox, InitError> {
// Acquire RAII lock. This might seem like overkill, but it is easy to forget to release
// it in the maze of error conditions below.
let running = match running::run() {
Some(r) => r,
None => return Err(InitError::AlreadyOpen)
};
// Create the RustBox.
let mut rb = unsafe {
match termbox::tb_init() {
0 => RustBox {
no_sync: marker::NoSync,
stderr: None,
_running: running,
},
res => {
return Err(InitError::TermBox(FromPrimitive::from_int(res as int)))
}
}
};
// Time to check our options.
for opt in opts.iter().filter_map(|&opt| opt) {
match opt {
InitOption::BufferStderr => try!(redirect::redirect_stderr(&mut rb)),
}
}
Ok(rb)
}
pub fn width(&self) -> uint {
unsafe { termbox::tb_width() as uint }
}
pub fn height(&self) -> uint {
unsafe { termbox::tb_height() as uint }
}
pub fn clear(&self) {
unsafe { termbox::tb_clear() }
}
pub fn present(&self) {
unsafe { termbox::tb_present() }
}
pub fn set_cursor(&self, x: int, y: int) {
unsafe { termbox::tb_set_cursor(x as c_int, y as c_int) }
}
// Unsafe because u8 is not guaranteed to be a UTF-8 character
pub unsafe fn change_cell(&self, x: uint, y: uint, ch: u32, fg: u16, bg: u16) {
termbox::tb_change_cell(x as c_uint, y as c_uint, ch, fg, bg)
}
pub fn print(&self, x: uint, y: uint, sty: Style, fg: Color, bg: Color, s: &str) {
let fg = Style::from_color(fg) | (sty & style::TB_ATTRIB);
let bg = Style::from_color(bg);
for (i, ch) in s.chars().enumerate() {
unsafe {
self.change_cell(x+i, y, ch as u32, fg.bits(), bg.bits());
}
}
}
pub fn print_char(&self, x: uint, y: uint, sty: Style, fg: Color, bg: Color, ch: char) {
let fg = Style::from_color(fg) | (sty & style::TB_ATTRIB);
let bg = Style::from_color(bg);
unsafe {
self.change_cell(x, y, ch as u32, fg.bits(), bg.bits());
}
}
pub fn poll_event(&self) -> EventResult<Event> {
let ev = NIL_RAW_EVENT;
let rc = unsafe {
termbox::tb_poll_event(&ev as *const RawEvent)
};
unpack_event(rc, &ev)
}
pub fn peek_event(&self, timeout: Duration) -> EventResult<Event> {
let ev = NIL_RAW_EVENT;
let rc = unsafe {
termbox::tb_peek_event(&ev as *const RawEvent, timeout.num_milliseconds() as c_uint)
};
unpack_event(rc, &ev)
}
}
impl Drop for RustBox {
fn drop(&mut self) {
// Since only one instance of the RustBox is ever accessible, we should not
// need to do this atomically.
// Note: we should definitely have RUSTBOX_RUNNING = true here.
unsafe {
termbox::tb_shutdown();
}
}
}
Derive PartialEq for Color
Allows direct comparison of `Color` values, useful for tests like `cell.fg == cell.bg` or `oldcolor != newcolor`.
extern crate libc;
extern crate "termbox-sys" as termbox;
pub use self::running::running;
pub use self::style::{Style, RB_BOLD, RB_UNDERLINE, RB_REVERSE, RB_NORMAL};
use std::error::Error;
use std::fmt;
use std::kinds::marker;
use std::time::duration::Duration;
use termbox::RawEvent;
use libc::{c_int, c_uint};
#[deriving(Copy)]
pub enum Event {
KeyEvent(u8, u16, u32),
ResizeEvent(i32, i32),
NoEvent
}
#[deriving(Copy, PartialEq)]
#[repr(C,u16)]
pub enum Color {
Default = 0x00,
Black = 0x01,
Red = 0x02,
Green = 0x03,
Yellow = 0x04,
Blue = 0x05,
Magenta = 0x06,
Cyan = 0x07,
White = 0x08,
}
mod style {
bitflags! {
#[repr(C)]
flags Style: u16 {
const TB_NORMAL_COLOR = 0x000F,
const RB_BOLD = 0x0100,
const RB_UNDERLINE = 0x0200,
const RB_REVERSE = 0x0400,
const RB_NORMAL = 0x0000,
const TB_ATTRIB = RB_BOLD.bits | RB_UNDERLINE.bits | RB_REVERSE.bits,
}
}
impl Style {
pub fn from_color(color: super::Color) -> Style {
Style { bits: color as u16 & TB_NORMAL_COLOR.bits }
}
}
}
const NIL_RAW_EVENT: RawEvent = RawEvent { etype: 0, emod: 0, key: 0, ch: 0, w: 0, h: 0 };
// FIXME: Rust doesn't support this enum representation.
// #[deriving(Copy,FromPrimitive,Show)]
// #[repr(C,int)]
// pub enum EventErrorKind {
// Error = -1,
// }
// pub type EventError = Option<EventErrorKind>;
#[allow(non_snake_case)]
pub mod EventErrorKind {
#[deriving(Copy,Show)]
pub struct Error;
}
pub type EventError = Option<EventErrorKind::Error>;
pub type EventResult<T> = Result<T, EventError>;
impl Error for EventError {
fn description(&self) -> &str {
match *self {
// TODO: Check errno here
Some(EventErrorKind::Error) => "Unknown error.",
None => "Unexpected return code."
}
}
}
fn unpack_event(ev_type: c_int, ev: &RawEvent) -> EventResult<Event> {
match ev_type {
0 => Ok(Event::NoEvent),
1 => Ok(Event::KeyEvent(ev.emod, ev.key, ev.ch)),
2 => Ok(Event::ResizeEvent(ev.w, ev.h)),
// FIXME: Rust doesn't support this error representation
// res => FromPrimitive::from_int(res as int),
-1 => Err(Some(EventErrorKind::Error)),
_ => Err(None)
}
}
#[deriving(Copy,FromPrimitive,Show)]
#[repr(C,int)]
pub enum InitErrorKind {
UnsupportedTerminal = -1,
FailedToOpenTty = -2,
PipeTrapError = -3,
}
pub enum InitError {
Opt(InitOption, Option<Box<Error>>),
AlreadyOpen,
TermBox(Option<InitErrorKind>),
}
impl fmt::Show for InitError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", self.description())
}
}
impl Error for InitError {
fn description(&self) -> &str {
match *self {
InitError::Opt(InitOption::BufferStderr, _) => "Could not redirect stderr.",
InitError::AlreadyOpen => "RustBox is already open.",
InitError::TermBox(e) => e.map_or("Unexpected TermBox return code.", |e| match e {
InitErrorKind::UnsupportedTerminal => "Unsupported terminal.",
InitErrorKind::FailedToOpenTty => "Failed to open TTY.",
InitErrorKind::PipeTrapError => "Pipe trap error.",
}),
}
}
fn cause(&self) -> Option<&Error> {
match *self {
InitError::Opt(_, Some(ref e)) => Some(&**e),
_ => None
}
}
}
mod running {
use std::sync::atomic::{mod, AtomicBool};
// The state of the RustBox is protected by the lock. Yay, global state!
static RUSTBOX_RUNNING: AtomicBool = atomic::INIT_ATOMIC_BOOL;
/// true iff RustBox is currently running. Beware of races here--don't rely on this for anything
/// critical unless you happen to know that RustBox cannot change state when it is called (a good
/// usecase would be checking to see if it's worth risking double printing backtraces to avoid
/// having them swallowed up by RustBox).
pub fn running() -> bool {
RUSTBOX_RUNNING.load(atomic::SeqCst)
}
// Internal RAII guard used to ensure we release the running lock whenever we acquire it.
#[allow(missing_copy_implementations)]
pub struct RunningGuard(());
pub fn run() -> Option<RunningGuard> {
// Ensure that we are not already running and simultaneously set RUSTBOX_RUNNING using an
// atomic swap. This ensures that contending threads don't trample each other.
if RUSTBOX_RUNNING.swap(true, atomic::SeqCst) {
// The Rustbox was already running.
None
} else {
// The RustBox was not already running, and now we have the lock.
Some(RunningGuard(()))
}
}
impl Drop for RunningGuard {
fn drop(&mut self) {
// Indicate that we're free now. We could probably get away with lower atomicity here,
// but there's no reason to take that chance.
RUSTBOX_RUNNING.store(false, atomic::SeqCst);
}
}
}
// RAII guard for input redirection
#[cfg(unix)]
mod redirect {
use std::error::Error;
use libc;
use std::io::{util, IoError, PipeStream};
use std::io::pipe::PipePair;
use std::os::unix::AsRawFd;
use super::{InitError, InitOption, RustBox};
pub struct Redirect {
pair: PipePair,
fd: PipeStream,
}
impl Drop for Redirect {
fn drop(&mut self) {
// We make sure that we never actually create the Redirect without also putting it in a
// RustBox. This means that we know that this will always be dropped immediately after
// the RustBox is destroyed. We rely on destructor order here: destructors are always
// executed top-down, so as long as this is included above the RunningGuard in the
// RustBox struct, we can be confident that it is destroyed while we're still holding
// onto the lock.
unsafe {
let old_fd = self.pair.writer.as_raw_fd();
let new_fd = self.fd.as_raw_fd();
// Reopen new_fd as writer.
// (Note that if we fail here, we can't really do anything about it, so just ignore any
// errors).
if libc::dup2(old_fd, new_fd) != new_fd { return }
}
// Copy from reader to writer.
drop(util::copy(&mut self.pair.reader, &mut self.pair.writer));
}
}
fn redirect(new: PipeStream) -> Result<Redirect, Option<Box<Error>>> {
// Create a pipe pair.
let mut pair = try!(PipeStream::pair().map_err( |e| Some(box e as Box<Error>)));
unsafe {
let new_fd = new.as_raw_fd();
// Copy new_fd to dup_fd.
let dup_fd = match libc::dup(new_fd) {
-1 => return Err(Some(box IoError::last_error() as Box<Error>)),
fd => try!(PipeStream::open(fd).map_err( |e| Some(box e as Box<Error>))),
};
// Reopen new_fd as writer.
let old_fd = pair.writer.as_raw_fd();
let fd = libc::dup2(old_fd, new_fd);
if fd == new_fd {
// On success, the new file descriptor should be returned. Replace the old one
// with dup_fd, since we no longer need an explicit reference to the writer.
pair.writer = dup_fd;
Ok(Redirect {
pair: pair,
fd: new,
})
} else {
Err(if fd == -1 { Some(box IoError::last_error() as Box<Error>) } else { None })
}
}
}
// The reason we take the rb reference is mostly to make sure we don't try to redirect before
// the TermBox is set up. Otherwise it is too easy to leave the file handles in a bad state.
pub fn redirect_stderr(rb: &mut RustBox) -> Result<(), InitError> {
match rb.stderr {
Some(_) => {
// Can only redirect once.
Err(InitError::Opt(InitOption::BufferStderr, None))
},
None => {
rb.stderr = Some(try!(redirect(
try!(PipeStream::open(libc::STDERR_FILENO)
.map_err( |e| InitError::Opt(InitOption::BufferStderr,
Some(box e as Box<Error>)))))
.map_err( |e| InitError::Opt(InitOption::BufferStderr, e))));
Ok(())
}
}
}
}
#[cfg(not(unix))]
// Not sure how we'll do this on Windows, unimplemented for now.
mod redirect {
pub enum Redirect { }
pub fn redirect_stderr(_: &mut super::RustBox) -> Result<(), super::InitError> {
Err(super::InitError::Opt(super::InitOption::BufferStderr, None))
}
}
#[allow(missing_copy_implementations)]
pub struct RustBox {
// Termbox is not thread safe
no_sync: marker::NoSync,
// We only bother to redirect stderr for the moment, since it's used for panic!
stderr: Option<redirect::Redirect>,
// RAII lock.
//
// Note that running *MUST* be the last field in the destructor, since destructors run in
// top-down order. Otherwise it will not properly protect the above fields.
_running: running::RunningGuard,
}
#[deriving(Copy,Show)]
pub enum InitOption {
/// Use this option to automatically buffer stderr while RustBox is running. It will be
/// written when RustBox exits.
BufferStderr,
}
impl RustBox {
pub fn init(opts: &[Option<InitOption>]) -> Result<RustBox, InitError> {
// Acquire RAII lock. This might seem like overkill, but it is easy to forget to release
// it in the maze of error conditions below.
let running = match running::run() {
Some(r) => r,
None => return Err(InitError::AlreadyOpen)
};
// Create the RustBox.
let mut rb = unsafe {
match termbox::tb_init() {
0 => RustBox {
no_sync: marker::NoSync,
stderr: None,
_running: running,
},
res => {
return Err(InitError::TermBox(FromPrimitive::from_int(res as int)))
}
}
};
// Time to check our options.
for opt in opts.iter().filter_map(|&opt| opt) {
match opt {
InitOption::BufferStderr => try!(redirect::redirect_stderr(&mut rb)),
}
}
Ok(rb)
}
pub fn width(&self) -> uint {
unsafe { termbox::tb_width() as uint }
}
pub fn height(&self) -> uint {
unsafe { termbox::tb_height() as uint }
}
pub fn clear(&self) {
unsafe { termbox::tb_clear() }
}
pub fn present(&self) {
unsafe { termbox::tb_present() }
}
pub fn set_cursor(&self, x: int, y: int) {
unsafe { termbox::tb_set_cursor(x as c_int, y as c_int) }
}
// Unsafe because u8 is not guaranteed to be a UTF-8 character
pub unsafe fn change_cell(&self, x: uint, y: uint, ch: u32, fg: u16, bg: u16) {
termbox::tb_change_cell(x as c_uint, y as c_uint, ch, fg, bg)
}
pub fn print(&self, x: uint, y: uint, sty: Style, fg: Color, bg: Color, s: &str) {
let fg = Style::from_color(fg) | (sty & style::TB_ATTRIB);
let bg = Style::from_color(bg);
for (i, ch) in s.chars().enumerate() {
unsafe {
self.change_cell(x+i, y, ch as u32, fg.bits(), bg.bits());
}
}
}
pub fn print_char(&self, x: uint, y: uint, sty: Style, fg: Color, bg: Color, ch: char) {
let fg = Style::from_color(fg) | (sty & style::TB_ATTRIB);
let bg = Style::from_color(bg);
unsafe {
self.change_cell(x, y, ch as u32, fg.bits(), bg.bits());
}
}
pub fn poll_event(&self) -> EventResult<Event> {
let ev = NIL_RAW_EVENT;
let rc = unsafe {
termbox::tb_poll_event(&ev as *const RawEvent)
};
unpack_event(rc, &ev)
}
pub fn peek_event(&self, timeout: Duration) -> EventResult<Event> {
let ev = NIL_RAW_EVENT;
let rc = unsafe {
termbox::tb_peek_event(&ev as *const RawEvent, timeout.num_milliseconds() as c_uint)
};
unpack_event(rc, &ev)
}
}
impl Drop for RustBox {
fn drop(&mut self) {
// Since only one instance of the RustBox is ever accessible, we should not
// need to do this atomically.
// Note: we should definitely have RUSTBOX_RUNNING = true here.
unsafe {
termbox::tb_shutdown();
}
}
}
|
#[derive(Debug, PartialEq)]
pub enum Token {
LeftParen,
RightParen,
LeftBrace,
RightBrace,
Comma,
Dot,
Minus,
Plus,
Semicolon,
Slash,
Star,
Bang,
BangEqual,
Equal,
EqualEqual,
Greater,
GreaterEqual,
Less,
LessEqual,
Identifier(String),
StringLiteral(String),
NumberLiteral(f64),
// Keywords
And,
Class,
Else,
False,
Fun,
For,
If,
Nil,
Or,
Print,
Return,
Super,
This,
True,
Var,
While,
Eof,
/*
* The book doesn't have tokens for comments and
* whitespaces. Introducing them the scanner can
* deal with them uniformly and in a more
* functional way.
*/
Comment,
Whitespace,
}
#[derive(Debug)]
pub struct TokenWithContext {
token: Token,
lexeme: String, // TODO: make a reference
line: usize,
}
struct Scanner {
start: usize,
current: usize,
line: usize,
source: String, // TODO: make a reference
}
fn is_digit(c: char) -> bool {
c >= '0' && c <= '9'
}
fn is_alpha(c: char) -> bool {
(c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_'
}
fn is_alphanumeric(c: char) -> bool {
is_digit(c) || is_alpha(c)
}
impl Scanner {
fn initialize(source: &String) -> Scanner {
Scanner {
start: 0,
current: 0,
line: 0,
source: source.clone(),
}
}
fn is_at_end(&self) -> bool {
self.current >= self.source.len()
}
fn char_at(&self, index: usize) -> char {
// TODO: there must be a better way
self.source.chars().nth(index).unwrap()
}
fn substring(&self, start: usize, end: usize) -> String {
self.source.chars().skip(start).take(end - start).collect()
}
fn peek(&self) -> char() {
if self.is_at_end() {
'\0'
} else {
self.char_at(self.current)
}
}
fn peek_next(&self) -> char() {
if self.current + 1 >= self.source.len() {
'\0'
} else {
self.char_at(self.current + 1)
}
}
fn advance(&mut self) -> char {
self.current += 1;
self.char_at(self.current - 1)
}
fn is_match(&mut self, expected: char) -> bool {
if self.is_at_end() {
return false;
}
if self.char_at(self.current) != expected {
return false;
}
self.current += 1;
return true;
}
fn add_context(&self, token: Token) -> TokenWithContext {
TokenWithContext {
token: token,
lexeme: self.substring(self.start, self.current),
line: self.line + 1, // Converts from 0-indexed to 1-indexed
}
}
fn string(&mut self) -> Result<Token, String> {
let initial_line = self.line;
while self.peek() != '"' && !self.is_at_end() {
if self.peek() == '\n' {
self.line += 1
}
self.advance();
}
if self.is_at_end() {
return Err(format!("Unterminated string at line {}", initial_line));
}
self.advance();
Ok(Token::StringLiteral(self.substring(self.start + 1, self.current - 1)))
}
fn number(&mut self) -> Token {
while is_digit(self.peek()) {
self.advance();
}
if self.peek() == '.' && is_digit(self.peek_next()) {
self.advance(); // Consume the .
while is_digit(self.peek()) {
self.advance();
}
}
let literal = self.substring(self.start, self.current);
let value = literal.parse::<f64>().unwrap();
Token::NumberLiteral(value)
}
fn identifier(&mut self) -> Token {
while is_alphanumeric(self.peek()) {
self.advance();
}
// TODO: take a ref in the first place
match self.substring(self.start, self.current).as_ref() {
"and" => Token::And,
"class" => Token::Class,
"else" => Token::Else,
"false" => Token::False,
"for" => Token::For,
"fun" => Token::Fun,
"if" => Token::If,
"nil" => Token::Nil,
"or" => Token::Or,
"print" => Token::Print,
"return" => Token::Return,
"super" => Token::Super,
"this" => Token::This,
"true" => Token::True,
"var" => Token::Var,
"while" => Token::While,
identifier => Token::Identifier(identifier.into()),
}
}
fn scan_next(&mut self) -> Result<TokenWithContext, String> {
self.start = self.current;
let token = match self.advance() {
'(' => Token::LeftParen,
')' => Token::RightParen,
'{' => Token::LeftBrace,
'}' => Token::RightBrace,
',' => Token::Comma,
'.' => Token::Dot,
'-' => Token::Minus,
'+' => Token::Plus,
';' => Token::Semicolon,
'*' => Token::Star,
'!' => {
if self.is_match('=') {
Token::BangEqual
} else {
Token::Bang
}
}
'=' => {
if self.is_match('=') {
Token::EqualEqual
} else {
Token::Equal
}
}
'<' => {
if self.is_match('=') {
Token::LessEqual
} else {
Token::Less
}
}
'>' => {
if self.is_match('=') {
Token::GreaterEqual
} else {
Token::Greater
}
}
'/' => {
if self.is_match('/') {
// Comments go on till the end of the line
while self.peek() != '\n' && !self.is_at_end() {
self.advance();
}
Token::Comment
} else {
Token::Slash
}
}
' ' => Token::Whitespace,
'\r' => Token::Whitespace,
'\t' => Token::Whitespace,
'\n' => {
self.line += 1;
Token::Whitespace
}
'"' => try!(self.string()),
c if is_digit(c) => self.number(),
c if is_alpha(c) => self.identifier(),
c => {
return Err(format!("Unexpected character {} at line {}, pos {}",
c,
self.line,
self.current - 1));
}
};
Ok(self.add_context(token))
}
}
pub fn scan(source: &String) -> Result<Vec<TokenWithContext>, String> {
let mut scanner = Scanner::initialize(source);
let mut tokens = Vec::new();
while !scanner.is_at_end() {
let token_with_context = try!(scanner.scan_next());
match token_with_context.token {
// Ignoring tokens we don't care about
Token::Comment => {}
Token::Whitespace => {}
_ => tokens.push(token_with_context),
}
}
tokens.push(TokenWithContext {
token: Token::Eof,
lexeme: "".into(),
line: scanner.line,
});
Ok(tokens)
}
#[cfg(test)]
mod tests {
use scanner::*;
#[test]
fn single_token() {
let tokens = scan(&"+".into()).unwrap();
assert_eq!(tokens[0].token, Token::Plus);
}
#[test]
fn expression() {
let tokens = scan(&"1+2".into()).unwrap();
assert_eq!(tokens[0].token, Token::NumberLiteral(1.0f64));
assert_eq!(tokens[1].token, Token::Plus);
assert_eq!(tokens[2].token, Token::NumberLiteral(2.0f64));
assert_eq!(tokens[3].token, Token::Eof);
}
#[test]
fn expression_with_whitespaces() {
let tokens = scan(&"1 + 2".into()).unwrap();
assert_eq!(tokens[0].token, Token::NumberLiteral(1.0f64));
assert_eq!(tokens[1].token, Token::Plus);
assert_eq!(tokens[2].token, Token::NumberLiteral(2.0f64));
assert_eq!(tokens[3].token, Token::Eof);
}
#[test]
fn assignement_with_comment() {
let tokens = scan(&"var a = 1.0; // A comment".into()).unwrap();
assert_eq!(tokens[0].token, Token::Var);
assert_eq!(tokens[1].token, Token::Identifier("a".into()));
assert_eq!(tokens[2].token, Token::Equal);
assert_eq!(tokens[3].token, Token::NumberLiteral(1.0f64));
assert_eq!(tokens[4].token, Token::Semicolon);
assert_eq!(tokens[5].token, Token::Eof);
}
#[test]
fn multiline_statements(){
let tokens = scan(&r#"var a = 1.0;
var b = "Hello";"#.into()).unwrap();
assert_eq!(tokens[0].token, Token::Var);
assert_eq!(tokens[1].token, Token::Identifier("a".into()));
assert_eq!(tokens[2].token, Token::Equal);
assert_eq!(tokens[3].token, Token::NumberLiteral(1.0f64));
assert_eq!(tokens[4].token, Token::Semicolon);
assert_eq!(tokens[5].token, Token::Var);
assert_eq!(tokens[6].token, Token::Identifier("b".into()));
assert_eq!(tokens[7].token, Token::Equal);
assert_eq!(tokens[8].token, Token::StringLiteral("Hello".into()));
assert_eq!(tokens[9].token, Token::Semicolon);
assert_eq!(tokens[10].token, Token::Eof);
assert_eq!(tokens[1].line, 1);
assert_eq!(tokens[9].line, 2);
}
}
Added is_whitespace and moved line count to advance
#[derive(Debug, PartialEq)]
pub enum Token {
LeftParen,
RightParen,
LeftBrace,
RightBrace,
Comma,
Dot,
Minus,
Plus,
Semicolon,
Slash,
Star,
Bang,
BangEqual,
Equal,
EqualEqual,
Greater,
GreaterEqual,
Less,
LessEqual,
Identifier(String),
StringLiteral(String),
NumberLiteral(f64),
// Keywords
And,
Class,
Else,
False,
Fun,
For,
If,
Nil,
Or,
Print,
Return,
Super,
This,
True,
Var,
While,
Eof,
/*
* The book doesn't have tokens for comments and
* whitespaces. Introducing them the scanner can
* deal with them uniformly and in a more
* functional way.
*/
Comment,
Whitespace,
}
#[derive(Debug)]
pub struct TokenWithContext {
token: Token,
lexeme: String, // TODO: make a reference
line: usize,
}
struct Scanner {
start: usize,
current: usize,
line: usize,
source: String, // TODO: make a reference
}
fn is_digit(c: char) -> bool {
c >= '0' && c <= '9'
}
fn is_alpha(c: char) -> bool {
(c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_'
}
fn is_alphanumeric(c: char) -> bool {
is_digit(c) || is_alpha(c)
}
fn is_whitespace(c: char) -> bool{
match c {
' ' => true,
'\r' => true,
'\t' => true,
'\n' => true,
_ => false
}
}
impl Scanner {
fn initialize(source: &String) -> Scanner {
Scanner {
start: 0,
current: 0,
line: 1, // 1-indexed,
source: source.clone(),
}
}
fn is_at_end(&self) -> bool {
self.current >= self.source.len()
}
fn char_at(&self, index: usize) -> char {
// TODO: there must be a better way
self.source.chars().nth(index).unwrap()
}
fn substring(&self, start: usize, end: usize) -> String {
self.source.chars().skip(start).take(end - start).collect()
}
fn peek(&self) -> char() {
if self.is_at_end() {
'\0'
} else {
self.char_at(self.current)
}
}
fn peek_next(&self) -> char() {
if self.current + 1 >= self.source.len() {
'\0'
} else {
self.char_at(self.current + 1)
}
}
fn advance(&mut self) -> char {
self.current += 1;
let c = self.char_at(self.current - 1);
if c == '\n' {
self.line += 1;
}
c
}
fn is_match(&mut self, expected: char) -> bool {
if self.is_at_end() {
return false;
}
if self.char_at(self.current) != expected {
return false;
}
self.current += 1;
return true;
}
fn add_context(&self, token: Token) -> TokenWithContext {
TokenWithContext {
token: token,
lexeme: self.substring(self.start, self.current),
line: self.line,
}
}
fn string(&mut self) -> Result<Token, String> {
let initial_line = self.line;
while self.peek() != '"' && !self.is_at_end() {
self.advance();
}
if self.is_at_end() {
return Err(format!("Unterminated string at line {}", initial_line));
}
self.advance();
Ok(Token::StringLiteral(self.substring(self.start + 1, self.current - 1)))
}
fn number(&mut self) -> Token {
while is_digit(self.peek()) {
self.advance();
}
if self.peek() == '.' && is_digit(self.peek_next()) {
self.advance(); // Consume the .
while is_digit(self.peek()) {
self.advance();
}
}
let literal = self.substring(self.start, self.current);
let value = literal.parse::<f64>().unwrap();
Token::NumberLiteral(value)
}
fn identifier(&mut self) -> Token {
while is_alphanumeric(self.peek()) {
self.advance();
}
// TODO: take a ref in the first place
match self.substring(self.start, self.current).as_ref() {
"and" => Token::And,
"class" => Token::Class,
"else" => Token::Else,
"false" => Token::False,
"for" => Token::For,
"fun" => Token::Fun,
"if" => Token::If,
"nil" => Token::Nil,
"or" => Token::Or,
"print" => Token::Print,
"return" => Token::Return,
"super" => Token::Super,
"this" => Token::This,
"true" => Token::True,
"var" => Token::Var,
"while" => Token::While,
identifier => Token::Identifier(identifier.into()),
}
}
fn scan_next(&mut self) -> Result<TokenWithContext, String> {
self.start = self.current;
let token = match self.advance() {
'(' => Token::LeftParen,
')' => Token::RightParen,
'{' => Token::LeftBrace,
'}' => Token::RightBrace,
',' => Token::Comma,
'.' => Token::Dot,
'-' => Token::Minus,
'+' => Token::Plus,
';' => Token::Semicolon,
'*' => Token::Star,
'!' => {
if self.is_match('=') {
Token::BangEqual
} else {
Token::Bang
}
}
'=' => {
if self.is_match('=') {
Token::EqualEqual
} else {
Token::Equal
}
}
'<' => {
if self.is_match('=') {
Token::LessEqual
} else {
Token::Less
}
}
'>' => {
if self.is_match('=') {
Token::GreaterEqual
} else {
Token::Greater
}
}
'/' => {
if self.is_match('/') {
// Comments go on till the end of the line
while self.peek() != '\n' && !self.is_at_end() {
self.advance();
}
Token::Comment
} else {
Token::Slash
}
},
'"' => try!(self.string()),
c if is_whitespace(c) => Token::Whitespace,
c if is_digit(c) => self.number(),
c if is_alpha(c) => self.identifier(),
c => {
return Err(format!("Unexpected character {} at line {}, pos {}",
c,
self.line,
self.current - 1));
}
};
Ok(self.add_context(token))
}
}
pub fn scan(source: &String) -> Result<Vec<TokenWithContext>, String> {
let mut scanner = Scanner::initialize(source);
let mut tokens = Vec::new();
while !scanner.is_at_end() {
let token_with_context = try!(scanner.scan_next());
match token_with_context.token {
// Ignoring tokens we don't care about
Token::Comment => {}
Token::Whitespace => {}
_ => tokens.push(token_with_context),
}
}
tokens.push(TokenWithContext {
token: Token::Eof,
lexeme: "".into(),
line: scanner.line,
});
Ok(tokens)
}
#[cfg(test)]
mod tests {
use scanner::*;
#[test]
fn single_token() {
let tokens = scan(&"+".into()).unwrap();
assert_eq!(tokens[0].token, Token::Plus);
}
#[test]
fn expression() {
let tokens = scan(&"1+2".into()).unwrap();
assert_eq!(tokens[0].token, Token::NumberLiteral(1.0f64));
assert_eq!(tokens[1].token, Token::Plus);
assert_eq!(tokens[2].token, Token::NumberLiteral(2.0f64));
assert_eq!(tokens[3].token, Token::Eof);
}
#[test]
fn expression_with_whitespaces() {
let tokens = scan(&"1 + 2".into()).unwrap();
assert_eq!(tokens[0].token, Token::NumberLiteral(1.0f64));
assert_eq!(tokens[1].token, Token::Plus);
assert_eq!(tokens[2].token, Token::NumberLiteral(2.0f64));
assert_eq!(tokens[3].token, Token::Eof);
}
#[test]
fn assignement_with_comment() {
let tokens = scan(&"var a = 1.0; // A comment".into()).unwrap();
assert_eq!(tokens[0].token, Token::Var);
assert_eq!(tokens[1].token, Token::Identifier("a".into()));
assert_eq!(tokens[2].token, Token::Equal);
assert_eq!(tokens[3].token, Token::NumberLiteral(1.0f64));
assert_eq!(tokens[4].token, Token::Semicolon);
assert_eq!(tokens[5].token, Token::Eof);
}
#[test]
fn multiline_statements(){
let tokens = scan(&r#"var a = 1.0;
var b = "Hello";"#.into()).unwrap();
assert_eq!(tokens[0].token, Token::Var);
assert_eq!(tokens[1].token, Token::Identifier("a".into()));
assert_eq!(tokens[2].token, Token::Equal);
assert_eq!(tokens[3].token, Token::NumberLiteral(1.0f64));
assert_eq!(tokens[4].token, Token::Semicolon);
assert_eq!(tokens[5].token, Token::Var);
assert_eq!(tokens[6].token, Token::Identifier("b".into()));
assert_eq!(tokens[7].token, Token::Equal);
assert_eq!(tokens[8].token, Token::StringLiteral("Hello".into()));
assert_eq!(tokens[9].token, Token::Semicolon);
assert_eq!(tokens[10].token, Token::Eof);
assert_eq!(tokens[1].line, 1);
assert_eq!(tokens[9].line, 2);
}
} |
use error::Error;
use header::Header;
use relocation::Relocation;
use dynamic::Dynamic;
use symbol::Symbol;
use strtab::Strtab;
use types;
use std::io::{Read, Seek, SeekFrom, Write};
use std::io::BufWriter;
#[derive(Default, Debug, Clone)]
pub struct SectionHeader {
pub name: u32,
pub shtype: types::SectionType,
pub flags: types::SectionFlags,
pub addr: u64,
pub offset: u64,
pub size: u64,
pub link: u32,
pub info: u32,
pub addralign: u64,
pub entsize: u64,
}
impl SectionHeader {
pub fn entsize(eh: &Header) -> usize {
4 + 4 + match eh.ident_class {
types::Class::Class64 => 6 * 8,
types::Class::Class32 => 6 * 4,
} + 4 + 4
}
pub fn from_reader<R>(io: &mut R, eh: &Header) -> Result<SectionHeader, Error>
where
R: Read,
{
elf_dispatch_endianness!(eh => {
let mut r = SectionHeader::default();
r.name = read_u32(io)?;
let reb = read_u32(io)?;
r.shtype = types::SectionType(reb);
elf_dispatch_uclass!(eh => {
let reb = read_uclass(io)?;
r.flags = match types::SectionFlags::from_bits(reb) {
Some(v) => v,
None => return Err(Error::InvalidSectionFlags(reb)),
};
r.addr = read_uclass(io)?;
r.offset = read_uclass(io)?;
r.size = read_uclass(io)?;
r.link = read_u32(io)?;
r.info = read_u32(io)?;
r.addralign = read_uclass(io)?;
r.entsize = read_uclass(io)?;
Ok(r)
})
})
}
pub fn to_writer<R>(&self, eh: &Header, io: &mut R) -> Result<(), Error>
where
R: Write,
{
let mut w = BufWriter::new(io);
elf_write_u32!(eh, w, self.name)?;
elf_write_u32!(eh, w, self.shtype.to_u32())?;
elf_write_uclass!(eh, w, self.flags.bits())?;
elf_write_uclass!(eh, w, self.addr)?;
elf_write_uclass!(eh, w, self.offset)?;
elf_write_uclass!(eh, w, self.size)?;
elf_write_u32!(eh, w, self.link)?;
elf_write_u32!(eh, w, self.info)?;
elf_write_uclass!(eh, w, self.addralign)?;
elf_write_uclass!(eh, w, self.entsize)?;
Ok(())
}
}
#[derive(Debug, Clone)]
pub enum SectionContent {
None,
Unloaded,
Raw(Vec<u8>),
Relocations(Vec<Relocation>),
Symbols(Vec<Symbol>),
Dynamic(Vec<Dynamic>),
Strtab(Strtab),
}
impl Default for SectionContent {
fn default() -> Self {
SectionContent::None
}
}
impl SectionContent {
pub fn as_dynamic_mut(&mut self) -> Option<&mut Vec<Dynamic>> {
match self {
&mut SectionContent::Dynamic(ref mut v) => Some(v),
_ => None,
}
}
pub fn as_strtab_mut(&mut self) -> Option<&mut Strtab> {
match self {
&mut SectionContent::Strtab(ref mut v) => Some(v),
_ => None,
}
}
pub fn as_symbols(&self) -> Option<&Vec<Symbol>> {
match self {
&SectionContent::Symbols(ref v) => Some(v),
_ => None,
}
}
pub fn as_symbols_mut(&mut self) -> Option<&mut Vec<Symbol>> {
match self {
&mut SectionContent::Symbols(ref mut v) => Some(v),
_ => None,
}
}
pub fn into_symbols(self) -> Option<Vec<Symbol>> {
match self {
SectionContent::Symbols(v) => Some(v),
_ => None,
}
}
pub fn as_relocations(&self) -> Option<&Vec<Relocation>> {
match self {
&SectionContent::Relocations(ref v) => Some(v),
_ => None,
}
}
pub fn as_relocations_mut(&mut self) -> Option<&mut Vec<Relocation>> {
match self {
&mut SectionContent::Relocations(ref mut v) => Some(v),
_ => None,
}
}
pub fn into_relocations(self) -> Option<Vec<Relocation>> {
match self {
SectionContent::Relocations(v) => Some(v),
_ => None,
}
}
pub fn as_raw(&self) -> Option<&Vec<u8>> {
match self {
&SectionContent::Raw(ref v) => Some(v),
_ => None,
}
}
pub fn as_raw_mut(&mut self) -> Option<&mut Vec<u8>> {
match self {
&mut SectionContent::Raw(ref mut v) => Some(v),
_ => None,
}
}
pub fn into_raw(self) -> Option<Vec<u8>> {
match self {
SectionContent::Raw(v) => Some(v),
_ => None,
}
}
pub fn size(&self, eh: &Header) -> usize {
match self {
&SectionContent::Unloaded => panic!("cannot size unloaded section"),
&SectionContent::None => 0,
&SectionContent::Raw(ref v) => v.len(),
&SectionContent::Dynamic(ref v) => v.len() * Dynamic::entsize(eh),
&SectionContent::Strtab(ref v) => v.len(eh),
&SectionContent::Symbols(ref v) => v.len() * Symbol::entsize(eh),
&SectionContent::Relocations(ref v) => v.len() * Relocation::entsize(eh),
}
}
}
#[derive(Debug, Default, Clone)]
pub struct Section {
pub header: SectionHeader,
pub name: Vec<u8>,
pub content: SectionContent,
pub addrlock: bool,
}
impl Section {
pub fn size(&self, eh: &Header) -> usize {
self.content.size(eh)
}
pub fn new(
name: Vec<u8>,
shtype: types::SectionType,
flags: types::SectionFlags,
content: SectionContent,
link: u32,
info: u32,
) -> Section {
Section {
name: name,
header: SectionHeader {
name: 0,
shtype: shtype,
flags: flags,
addr: 0,
offset: 0,
size: 0,
link: link,
info: info,
addralign: 0,
entsize: 0,
},
content: content,
addrlock: false,
}
}
pub fn sync(
&mut self,
eh: &Header,
mut linked: Option<&mut SectionContent>,
) -> Result<(), Error> {
match self.content {
SectionContent::Unloaded => {
return Err(Error::SyncingUnloadedSection);
},
SectionContent::Relocations(_) => {
self.header.entsize = Relocation::entsize(eh) as u64;
}
SectionContent::Symbols(ref mut vv) => {
for (i, sym) in vv.iter().enumerate() {
if sym.bind == types::SymbolBind::GLOBAL {
self.header.info = i as u32;
break;
}
}
for v in vv {
v.sync(linked.as_mut().map(|r| &mut **r), eh)?;
}
self.header.entsize = Symbol::entsize(eh) as u64;
}
SectionContent::Dynamic(ref mut vv) => {
for v in vv {
v.sync(linked.as_mut().map(|r| &mut **r), eh)?;
}
self.header.entsize = Dynamic::entsize(eh) as u64;
}
SectionContent::Strtab(_) => {
self.header.entsize = Strtab::entsize(eh) as u64;
}
SectionContent::None | SectionContent::Raw(_) => {}
}
if self.header.shtype != types::SectionType::NOBITS {
self.header.size = self.size(eh) as u64;
}
Ok(())
}
pub fn from_reader<T>(
&mut self,
mut io: T,
linked: Option<&Section>,
eh: &Header,
) -> Result<(), Error> where T: Read + Seek {
match self.content {
SectionContent::Unloaded => {},
_ => return Ok(()),
};
if self.header.shtype == types::SectionType::NOBITS {
self.content = SectionContent::None;
return Ok(());
};
io.seek(SeekFrom::Start(self.header.offset))?;
let mut bb = vec![0; self.header.size as usize];
io.read_exact(&mut bb)?;
let linked = linked.map(|s|&s.content);
self.content = match self.header.shtype {
types::SectionType::NOBITS => {
unreachable!();
},
types::SectionType::STRTAB => {
let io = bb.as_slice();
Strtab::from_reader(io, linked, eh)?
}
types::SectionType::RELA => {
let io = bb.as_slice();
Relocation::from_reader(io, linked, eh)?
}
types::SectionType::SYMTAB | types::SectionType::DYNSYM => {
let io = bb.as_slice();
Symbol::from_reader(io, linked, eh)?
}
types::SectionType::DYNAMIC => {
let io = bb.as_slice();
Dynamic::from_reader(io, linked, eh)?
}
_ => {
SectionContent::Raw(bb)
}
};
Ok(())
}
pub fn to_writer<R>(
&self,
mut io: R,
eh: &Header,
) -> Result<(), Error> where R: Write + Seek {
match self.content {
SectionContent::Unloaded => return Ok(()),
_ => {},
};
io.seek(SeekFrom::Start(self.header.offset))?;
let rs = match &self.content {
&SectionContent::Unloaded => {
return Err(Error::WritingUnloadedSection);
},
&SectionContent::Relocations(ref vv) => {
let mut rs = 0;
for v in vv {
rs += v.to_writer(&mut io, eh)?;
}
rs
}
&SectionContent::Symbols(ref vv) => {
let mut rs = 0;
for v in vv {
rs += v.to_writer(&mut io, eh)?;
}
rs
}
&SectionContent::Dynamic(ref vv) => {
let mut rs = 0;
for v in vv {
rs += v.to_writer(&mut io, eh)?;
}
rs
}
&SectionContent::Strtab(ref v) => {
v.to_writer(&mut io, eh)?
}
&SectionContent::None => {
0
},
&SectionContent::Raw(ref raw) => {
io.write(&raw)?
}
};
assert_eq!(
io.seek(SeekFrom::Current(0))?,
self.header.offset + self.content.size(eh) as u64,
"writing {} with header.size {} and content.size {} returned a written size {}",
String::from_utf8_lossy(&self.name),
self.content.size(eh),
self.header.size,
rs
);
Ok(())
}
}
add more section conversion
use error::Error;
use header::Header;
use relocation::Relocation;
use dynamic::Dynamic;
use symbol::Symbol;
use strtab::Strtab;
use types;
use std::io::{Read, Seek, SeekFrom, Write};
use std::io::BufWriter;
#[derive(Default, Debug, Clone)]
pub struct SectionHeader {
pub name: u32,
pub shtype: types::SectionType,
pub flags: types::SectionFlags,
pub addr: u64,
pub offset: u64,
pub size: u64,
pub link: u32,
pub info: u32,
pub addralign: u64,
pub entsize: u64,
}
impl SectionHeader {
pub fn entsize(eh: &Header) -> usize {
4 + 4 + match eh.ident_class {
types::Class::Class64 => 6 * 8,
types::Class::Class32 => 6 * 4,
} + 4 + 4
}
pub fn from_reader<R>(io: &mut R, eh: &Header) -> Result<SectionHeader, Error>
where
R: Read,
{
elf_dispatch_endianness!(eh => {
let mut r = SectionHeader::default();
r.name = read_u32(io)?;
let reb = read_u32(io)?;
r.shtype = types::SectionType(reb);
elf_dispatch_uclass!(eh => {
let reb = read_uclass(io)?;
r.flags = match types::SectionFlags::from_bits(reb) {
Some(v) => v,
None => return Err(Error::InvalidSectionFlags(reb)),
};
r.addr = read_uclass(io)?;
r.offset = read_uclass(io)?;
r.size = read_uclass(io)?;
r.link = read_u32(io)?;
r.info = read_u32(io)?;
r.addralign = read_uclass(io)?;
r.entsize = read_uclass(io)?;
Ok(r)
})
})
}
pub fn to_writer<R>(&self, eh: &Header, io: &mut R) -> Result<(), Error>
where
R: Write,
{
let mut w = BufWriter::new(io);
elf_write_u32!(eh, w, self.name)?;
elf_write_u32!(eh, w, self.shtype.to_u32())?;
elf_write_uclass!(eh, w, self.flags.bits())?;
elf_write_uclass!(eh, w, self.addr)?;
elf_write_uclass!(eh, w, self.offset)?;
elf_write_uclass!(eh, w, self.size)?;
elf_write_u32!(eh, w, self.link)?;
elf_write_u32!(eh, w, self.info)?;
elf_write_uclass!(eh, w, self.addralign)?;
elf_write_uclass!(eh, w, self.entsize)?;
Ok(())
}
}
#[derive(Debug, Clone)]
pub enum SectionContent {
None,
Unloaded,
Raw(Vec<u8>),
Relocations(Vec<Relocation>),
Symbols(Vec<Symbol>),
Dynamic(Vec<Dynamic>),
Strtab(Strtab),
}
impl Default for SectionContent {
fn default() -> Self {
SectionContent::None
}
}
impl SectionContent {
pub fn as_dynamic_mut(&mut self) -> Option<&mut Vec<Dynamic>> {
match self {
&mut SectionContent::Dynamic(ref mut v) => Some(v),
_ => None,
}
}
pub fn as_dynamic(&self) -> Option<&Vec<Dynamic>> {
match self {
&SectionContent::Dynamic(ref v) => Some(v),
_ => None,
}
}
pub fn into_dynamic(self) -> Option<Vec<Dynamic>> {
match self {
SectionContent::Dynamic(v) => Some(v),
_ => None,
}
}
pub fn as_strtab_mut(&mut self) -> Option<&mut Strtab> {
match self {
&mut SectionContent::Strtab(ref mut v) => Some(v),
_ => None,
}
}
pub fn as_symbols(&self) -> Option<&Vec<Symbol>> {
match self {
&SectionContent::Symbols(ref v) => Some(v),
_ => None,
}
}
pub fn as_symbols_mut(&mut self) -> Option<&mut Vec<Symbol>> {
match self {
&mut SectionContent::Symbols(ref mut v) => Some(v),
_ => None,
}
}
pub fn into_symbols(self) -> Option<Vec<Symbol>> {
match self {
SectionContent::Symbols(v) => Some(v),
_ => None,
}
}
pub fn as_relocations(&self) -> Option<&Vec<Relocation>> {
match self {
&SectionContent::Relocations(ref v) => Some(v),
_ => None,
}
}
pub fn as_relocations_mut(&mut self) -> Option<&mut Vec<Relocation>> {
match self {
&mut SectionContent::Relocations(ref mut v) => Some(v),
_ => None,
}
}
pub fn into_relocations(self) -> Option<Vec<Relocation>> {
match self {
SectionContent::Relocations(v) => Some(v),
_ => None,
}
}
pub fn as_raw(&self) -> Option<&Vec<u8>> {
match self {
&SectionContent::Raw(ref v) => Some(v),
_ => None,
}
}
pub fn as_raw_mut(&mut self) -> Option<&mut Vec<u8>> {
match self {
&mut SectionContent::Raw(ref mut v) => Some(v),
_ => None,
}
}
pub fn into_raw(self) -> Option<Vec<u8>> {
match self {
SectionContent::Raw(v) => Some(v),
_ => None,
}
}
pub fn size(&self, eh: &Header) -> usize {
match self {
&SectionContent::Unloaded => panic!("cannot size unloaded section"),
&SectionContent::None => 0,
&SectionContent::Raw(ref v) => v.len(),
&SectionContent::Dynamic(ref v) => v.len() * Dynamic::entsize(eh),
&SectionContent::Strtab(ref v) => v.len(eh),
&SectionContent::Symbols(ref v) => v.len() * Symbol::entsize(eh),
&SectionContent::Relocations(ref v) => v.len() * Relocation::entsize(eh),
}
}
}
#[derive(Debug, Default, Clone)]
pub struct Section {
pub header: SectionHeader,
pub name: Vec<u8>,
pub content: SectionContent,
pub addrlock: bool,
}
impl Section {
pub fn size(&self, eh: &Header) -> usize {
self.content.size(eh)
}
pub fn new(
name: Vec<u8>,
shtype: types::SectionType,
flags: types::SectionFlags,
content: SectionContent,
link: u32,
info: u32,
) -> Section {
Section {
name: name,
header: SectionHeader {
name: 0,
shtype: shtype,
flags: flags,
addr: 0,
offset: 0,
size: 0,
link: link,
info: info,
addralign: 0,
entsize: 0,
},
content: content,
addrlock: false,
}
}
pub fn sync(
&mut self,
eh: &Header,
mut linked: Option<&mut SectionContent>,
) -> Result<(), Error> {
match self.content {
SectionContent::Unloaded => {
return Err(Error::SyncingUnloadedSection);
},
SectionContent::Relocations(_) => {
self.header.entsize = Relocation::entsize(eh) as u64;
}
SectionContent::Symbols(ref mut vv) => {
for (i, sym) in vv.iter().enumerate() {
if sym.bind == types::SymbolBind::GLOBAL {
self.header.info = i as u32;
break;
}
}
for v in vv {
v.sync(linked.as_mut().map(|r| &mut **r), eh)?;
}
self.header.entsize = Symbol::entsize(eh) as u64;
}
SectionContent::Dynamic(ref mut vv) => {
for v in vv {
v.sync(linked.as_mut().map(|r| &mut **r), eh)?;
}
self.header.entsize = Dynamic::entsize(eh) as u64;
}
SectionContent::Strtab(_) => {
self.header.entsize = Strtab::entsize(eh) as u64;
}
SectionContent::None | SectionContent::Raw(_) => {}
}
if self.header.shtype != types::SectionType::NOBITS {
self.header.size = self.size(eh) as u64;
}
Ok(())
}
pub fn from_reader<T>(
&mut self,
mut io: T,
linked: Option<&Section>,
eh: &Header,
) -> Result<(), Error> where T: Read + Seek {
match self.content {
SectionContent::Unloaded => {},
_ => return Ok(()),
};
if self.header.shtype == types::SectionType::NOBITS {
self.content = SectionContent::None;
return Ok(());
};
io.seek(SeekFrom::Start(self.header.offset))?;
let mut bb = vec![0; self.header.size as usize];
io.read_exact(&mut bb)?;
let linked = linked.map(|s|&s.content);
self.content = match self.header.shtype {
types::SectionType::NOBITS => {
unreachable!();
},
types::SectionType::STRTAB => {
let io = bb.as_slice();
Strtab::from_reader(io, linked, eh)?
}
types::SectionType::RELA => {
let io = bb.as_slice();
Relocation::from_reader(io, linked, eh)?
}
types::SectionType::SYMTAB | types::SectionType::DYNSYM => {
let io = bb.as_slice();
Symbol::from_reader(io, linked, eh)?
}
types::SectionType::DYNAMIC => {
let io = bb.as_slice();
Dynamic::from_reader(io, linked, eh)?
}
_ => {
SectionContent::Raw(bb)
}
};
Ok(())
}
pub fn to_writer<R>(
&self,
mut io: R,
eh: &Header,
) -> Result<(), Error> where R: Write + Seek {
match self.content {
SectionContent::Unloaded => return Ok(()),
_ => {},
};
io.seek(SeekFrom::Start(self.header.offset))?;
let rs = match &self.content {
&SectionContent::Unloaded => {
return Err(Error::WritingUnloadedSection);
},
&SectionContent::Relocations(ref vv) => {
let mut rs = 0;
for v in vv {
rs += v.to_writer(&mut io, eh)?;
}
rs
}
&SectionContent::Symbols(ref vv) => {
let mut rs = 0;
for v in vv {
rs += v.to_writer(&mut io, eh)?;
}
rs
}
&SectionContent::Dynamic(ref vv) => {
let mut rs = 0;
for v in vv {
rs += v.to_writer(&mut io, eh)?;
}
rs
}
&SectionContent::Strtab(ref v) => {
v.to_writer(&mut io, eh)?
}
&SectionContent::None => {
0
},
&SectionContent::Raw(ref raw) => {
io.write(&raw)?
}
};
assert_eq!(
io.seek(SeekFrom::Current(0))?,
self.header.offset + self.content.size(eh) as u64,
"writing {} with header.size {} and content.size {} returned a written size {}",
String::from_utf8_lossy(&self.name),
self.content.size(eh),
self.header.size,
rs
);
Ok(())
}
}
|
// Copyright 2017-2018, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
//! Module that contains the basic infrastructure for subclassing `GObject`.
use ffi;
use gobject_ffi;
use std::marker;
use std::mem;
use std::ops;
use std::ptr;
use translate::*;
use wrapper::Wrapper;
use {IsA, IsClassFor, Object, StaticType, Type};
use super::object::ObjectImpl;
use object::ObjectExt;
/// A newly registered `glib::Type` that is currently still being initialized.
///
/// This allows running additional type-setup functions, e.g. for implementing
/// interfaces on the type.
#[derive(Debug, PartialEq, Eq)]
pub struct InitializingType<T: ObjectSubclass>(Type, marker::PhantomData<T>);
impl<T: ObjectSubclass> ops::Deref for InitializingType<T> {
type Target = Type;
fn deref(&self) -> &Type {
&self.0
}
}
/// Trait implemented by structs that implement a `GObject` C instance struct.
///
/// The struct must be `#[repr(C)]` and have the parent type's instance struct
/// as the first field.
///
/// See [`simple::InstanceStruct`] for a basic implementation of this that can
/// be used most of the time and should only not be used if additional fields are
/// required in the instance struct.
///
/// [`simple::InstanceStruct`]: ../simple/struct.InstanceStruct.html
pub unsafe trait InstanceStruct: Sized + 'static {
/// Corresponding object subclass type for this instance struct.
type Type: ObjectSubclass;
/// Returns the implementation for from this instance struct, that
/// is the implementor of [`ObjectImpl`] or subtraits.
///
/// [`ObjectImpl`]: ../object/trait.ObjectImpl.html
fn get_impl(&self) -> &Self::Type {
unsafe {
let data = Self::Type::type_data();
let private_offset = data.as_ref().private_offset;
let ptr: *const u8 = self as *const _ as *const u8;
let priv_ptr = ptr.offset(private_offset);
let imp = priv_ptr as *const Option<Self::Type>;
(*imp).as_ref().expect("No private struct")
}
}
/// Returns the class struct for this specific instance.
fn get_class(&self) -> &<Self::Type as ObjectSubclass>::Class {
unsafe { &**(self as *const _ as *const *const <Self::Type as ObjectSubclass>::Class) }
}
}
/// Trait implemented by structs that implement a `GObject` C class struct.
///
/// The struct must be `#[repr(C)]` and have the parent type's class struct
/// as the first field.
///
/// See [`simple::ClassStruct`] for a basic implementation of this that can
/// be used most of the time and should only not be used if additional fields are
/// required in the class struct, e.g. for declaring new virtual methods.
///
/// [`simple::ClassStruct`]: ../simple/struct.ClassStruct.html
pub unsafe trait ClassStruct: Sized + 'static {
/// Corresponding object subclass type for this class struct.
type Type: ObjectSubclass;
/// Override the vfuncs of all parent types.
///
/// This is automatically called during type initialization.
fn override_vfuncs(&mut self)
where
<<Self::Type as ObjectSubclass>::ParentType as Wrapper>::RustClassType:
IsSubclassable<Self::Type>,
{
unsafe {
let base = &mut *(self as *mut _
as *mut <<Self::Type as ObjectSubclass>::ParentType as Wrapper>::RustClassType);
base.override_vfuncs();
}
}
}
/// Trait for subclassable class structs.
pub unsafe trait IsSubclassable<T: ObjectSubclass>: IsClassFor {
/// Override the virtual methods of this class for the given subclass.
///
/// This is automatically called during type initialization.
fn override_vfuncs(&mut self);
}
/// Type-specific data that is filled in during type creation.
pub struct TypeData {
#[doc(hidden)]
pub type_: Type,
#[doc(hidden)]
pub parent_class: ffi::gpointer,
#[doc(hidden)]
pub interface_data: *const Vec<(ffi::GType, ffi::gpointer)>,
#[doc(hidden)]
pub private_offset: isize,
}
unsafe impl Send for TypeData {}
unsafe impl Sync for TypeData {}
impl TypeData {
/// Returns the type ID.
pub fn get_type(&self) -> Type {
self.type_
}
/// Returns a pointer to the native parent class.
///
/// This is used for chaining up to the parent class' implementation
/// of virtual methods.
pub fn get_parent_class(&self) -> ffi::gpointer {
self.parent_class
}
/// Returns a pointer to the interface implementation specific data.
///
/// This is used for interface implementations to store additional data.
pub fn get_interface_data(&self, type_: ffi::GType) -> ffi::gpointer {
unsafe {
if self.interface_data.is_null() {
return ptr::null_mut();
}
for &(t, p) in &(*self.interface_data) {
if t == type_ {
return p;
}
}
ptr::null_mut()
}
}
/// Returns the offset of the private struct in bytes relative to the
/// beginning of the instance struct.
pub fn get_private_offset(&self) -> isize {
self.private_offset
}
}
#[macro_export]
/// Macro for boilerplate of [`ObjectSubclass`] implementations.
///
/// [`ObjectSubclass`]: subclass/types/trait.ObjectSubclass.html
macro_rules! glib_object_subclass {
() => {
fn type_data() -> ::std::ptr::NonNull<$crate::subclass::TypeData> {
static mut DATA: $crate::subclass::TypeData = $crate::subclass::TypeData {
type_: $crate::Type::Invalid,
parent_class: ::std::ptr::null_mut(),
interface_data: ::std::ptr::null_mut(),
private_offset: 0,
};
unsafe { ::std::ptr::NonNull::new_unchecked(&mut DATA) }
}
fn get_type() -> $crate::Type {
static ONCE: ::std::sync::Once = ::std::sync::Once::new();
ONCE.call_once(|| {
$crate::subclass::register_type::<Self>();
});
unsafe {
let data = Self::type_data();
let type_ = data.as_ref().get_type();
assert_ne!(type_, $crate::Type::Invalid);
type_
}
}
};
}
/// The central trait for subclassing a `GObject` type.
///
/// Links together the type name, parent type and the instance and
/// class structs for type registration and allows subclasses to
/// hook into various steps of the type registration and initialization.
///
/// See [`register_type`] for registering an implementation of this trait
/// with the type system.
///
/// [`register_type`]: fn.register_type.html
pub trait ObjectSubclass: ObjectImpl + Sized + 'static {
/// `GObject` type name.
///
/// This must be unique in the whole process.
const NAME: &'static str;
/// Parent Rust type to inherit from.
type ParentType: IsA<Object>
+ FromGlibPtrBorrow<*mut <Self::ParentType as Wrapper>::GlibType>
+ FromGlibPtrNone<*mut <Self::ParentType as Wrapper>::GlibType>;
/// The C instance struct.
///
/// See [`simple::InstanceStruct`] for an basic instance struct that should be
/// used in most cases.
///
/// [`simple::InstanceStruct`]: ../simple/struct.InstanceStruct.html
// TODO: Should default to simple::InstanceStruct<Self> once associated
// type defaults are stabilized https://github.com/rust-lang/rust/issues/29661
type Instance: InstanceStruct<Type = Self>;
/// The C class struct.
///
/// See [`simple::ClassStruct`] for an basic instance struct that should be
/// used in most cases.
///
/// [`simple::ClassStruct`]: ../simple/struct.ClassStruct.html
// TODO: Should default to simple::ClassStruct<Self> once associated
// type defaults are stabilized https://github.com/rust-lang/rust/issues/29661
type Class: ClassStruct<Type = Self>;
/// Storage for the type-specific data used during registration.
///
/// This is usually generated by the [`glib_object_subclass!`] macro.
///
/// [`glib_object_subclass!`]: ../../macro.glib_object_subclass.html
fn type_data() -> ptr::NonNull<TypeData>;
/// Returns the `glib::Type` ID of the subclass.
///
/// This will register the type with the type system on the first call and is usually generated
/// by the [`glib_object_subclass!`] macro.
///
/// [`glib_object_subclass!`]: ../../macro.glib_object_subclass.html
fn get_type() -> Type;
/// Returns the corresponding object instance.
fn get_instance(&self) -> Self::ParentType {
unsafe {
let data = Self::type_data();
let type_ = data.as_ref().get_type();
assert_ne!(type_, Type::Invalid);
let offset = -data.as_ref().private_offset;
assert_ne!(offset, 0);
let ptr = self as *const Self as *const u8;
let ptr = ptr.offset(offset);
let ptr = ptr as *mut u8 as *mut <Self::ParentType as Wrapper>::GlibType;
from_glib_none(ptr)
}
}
/// Returns the implementation from an instance.
///
/// Panics if called on an object of the wrong type.
fn from_instance<T: IsA<Self::ParentType> + IsA<::object::Object>>(obj: &T) -> &Self {
unsafe {
let data = Self::type_data();
let type_ = data.as_ref().get_type();
assert_ne!(type_, Type::Invalid);
assert!(obj.get_type().is_a(&type_));
let ptr: *mut gobject_ffi::GObject = obj.to_glib_none().0;
let ptr = ptr as *const Self::Instance;
(*ptr).get_impl()
}
}
/// Additional type initialization.
///
/// This is called right after the type was registered and allows
/// subclasses to do additional type-specific initialization, e.g.
/// for implementing `GObject` interfaces.
///
/// Optional
fn type_init(_type_: &InitializingType<Self>) {}
/// Class initialization.
///
/// This is called after `type_init` and before the first instance
/// of the subclass is created. Subclasses can use this to do class-
/// specific initialization, e.g. for installing properties or signals
/// on the class or calling class methods.
///
/// Optional
fn class_init(_klass: &mut Self::Class) {}
/// Constructor.
///
/// This is called during object instantiation before further subclasses
/// are initialized, and should return a new instance of the subclass
/// private struct.
fn new() -> Self;
/// Constructor.
///
/// This is called during object instantiation before further subclasses
/// are initialized, and should return a new instance of the subclass
/// private struct.
///
/// Different to `new()` above it also gets the class of this type passed
/// to itself for providing additional context.
///
/// Optional, calls `new()` by default.
fn new_with_class(_klass: &Self::Class) -> Self {
Self::new()
}
}
unsafe extern "C" fn class_init<T: ObjectSubclass>(klass: ffi::gpointer, _klass_data: ffi::gpointer)
where
<<T as ObjectSubclass>::ParentType as Wrapper>::RustClassType: IsSubclassable<T>,
{
let mut data = T::type_data();
// We have to update the private struct offset once the class is actually
// being initialized.
{
let mut private_offset = data.as_ref().private_offset as i32;
gobject_ffi::g_type_class_adjust_private_offset(klass, &mut private_offset);
(*data.as_mut()).private_offset = private_offset as isize;
}
// Set trampolines for the basic GObject virtual methods.
{
let gobject_klass = &mut *(klass as *mut gobject_ffi::GObjectClass);
gobject_klass.finalize = Some(finalize::<T>);
}
// And finally peek the parent class struct (containing the parent class'
// implementations of virtual methods for chaining up), and call the subclass'
// class initialization function.
{
let klass = &mut *(klass as *mut T::Class);
let parent_class = gobject_ffi::g_type_class_peek_parent(klass as *mut _ as ffi::gpointer)
as *mut <T::ParentType as Wrapper>::GlibClassType;
assert!(!parent_class.is_null());
(*data.as_mut()).parent_class = parent_class as ffi::gpointer;
klass.override_vfuncs();
T::class_init(klass);
}
}
unsafe extern "C" fn instance_init<T: ObjectSubclass>(
obj: *mut gobject_ffi::GTypeInstance,
klass: ffi::gpointer,
) {
glib_floating_reference_guard!(obj);
// Get offset to the storage of our private struct, create it
// and actually store it in that place.
let mut data = T::type_data();
let private_offset = (*data.as_mut()).private_offset;
let ptr: *mut u8 = obj as *mut _ as *mut u8;
let priv_ptr = ptr.offset(private_offset);
let imp_storage = priv_ptr as *mut Option<T>;
let klass = &*(klass as *const T::Class);
let imp = T::new_with_class(klass);
ptr::write(imp_storage, Some(imp));
}
unsafe extern "C" fn finalize<T: ObjectSubclass>(obj: *mut gobject_ffi::GObject) {
// Retrieve the private struct, take it out of its storage and
// drop it for freeing all associated memory.
let mut data = T::type_data();
let private_offset = (*data.as_mut()).private_offset;
let ptr: *mut u8 = obj as *mut _ as *mut u8;
let priv_ptr = ptr.offset(private_offset);
let imp_storage = priv_ptr as *mut Option<T>;
let imp = (*imp_storage).take().expect("No private struct");
drop(imp);
// Chain up to the parent class' finalize implementation, if any.
let parent_class = &*(data.as_ref().get_parent_class() as *const gobject_ffi::GObjectClass);
if let Some(ref func) = parent_class.finalize {
func(obj);
}
}
/// Register a `glib::Type` ID for `T`.
///
/// This must be called only once and will panic on a second call.
///
/// The [`glib_object_subclass!`] macro will create a `get_type()` function around this, which will
/// ensure that it's only ever called once.
///
/// [`glib_object_subclass!`]: ../../macro.glib_object_subclass.html
pub fn register_type<T: ObjectSubclass>() -> Type
where
<<T as ObjectSubclass>::ParentType as Wrapper>::RustClassType: IsSubclassable<T>,
{
unsafe {
use std::ffi::CString;
let type_info = gobject_ffi::GTypeInfo {
class_size: mem::size_of::<T::Class>() as u16,
base_init: None,
base_finalize: None,
class_init: Some(class_init::<T>),
class_finalize: None,
class_data: ptr::null_mut(),
instance_size: mem::size_of::<T::Instance>() as u16,
n_preallocs: 0,
instance_init: Some(instance_init::<T>),
value_table: ptr::null(),
};
let type_name = CString::new(T::NAME).unwrap();
assert_eq!(
gobject_ffi::g_type_from_name(type_name.as_ptr()),
gobject_ffi::G_TYPE_INVALID
);
let type_ = from_glib(gobject_ffi::g_type_register_static(
<T::ParentType as StaticType>::static_type().to_glib(),
type_name.as_ptr(),
&type_info,
0,
));
let mut data = T::type_data();
(*data.as_mut()).type_ = type_;
let private_offset =
gobject_ffi::g_type_add_instance_private(type_.to_glib(), mem::size_of::<Option<T>>());
(*data.as_mut()).private_offset = private_offset as isize;
T::type_init(&InitializingType::<T>(type_, marker::PhantomData));
type_
}
}
Implement ToGlib for InitializingType
// Copyright 2017-2018, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
//! Module that contains the basic infrastructure for subclassing `GObject`.
use ffi;
use gobject_ffi;
use std::marker;
use std::mem;
use std::ops;
use std::ptr;
use translate::*;
use wrapper::Wrapper;
use {IsA, IsClassFor, Object, StaticType, Type};
use super::object::ObjectImpl;
use object::ObjectExt;
/// A newly registered `glib::Type` that is currently still being initialized.
///
/// This allows running additional type-setup functions, e.g. for implementing
/// interfaces on the type.
#[derive(Debug, PartialEq, Eq)]
pub struct InitializingType<T: ObjectSubclass>(Type, marker::PhantomData<T>);
impl<T: ObjectSubclass> ops::Deref for InitializingType<T> {
type Target = Type;
fn deref(&self) -> &Type {
&self.0
}
}
impl<T: ObjectSubclass> ToGlib for InitializingType<T> {
type GlibType = ffi::GType;
fn to_glib(&self) -> ffi::GType {
self.0.to_glib()
}
}
/// Trait implemented by structs that implement a `GObject` C instance struct.
///
/// The struct must be `#[repr(C)]` and have the parent type's instance struct
/// as the first field.
///
/// See [`simple::InstanceStruct`] for a basic implementation of this that can
/// be used most of the time and should only not be used if additional fields are
/// required in the instance struct.
///
/// [`simple::InstanceStruct`]: ../simple/struct.InstanceStruct.html
pub unsafe trait InstanceStruct: Sized + 'static {
/// Corresponding object subclass type for this instance struct.
type Type: ObjectSubclass;
/// Returns the implementation for from this instance struct, that
/// is the implementor of [`ObjectImpl`] or subtraits.
///
/// [`ObjectImpl`]: ../object/trait.ObjectImpl.html
fn get_impl(&self) -> &Self::Type {
unsafe {
let data = Self::Type::type_data();
let private_offset = data.as_ref().private_offset;
let ptr: *const u8 = self as *const _ as *const u8;
let priv_ptr = ptr.offset(private_offset);
let imp = priv_ptr as *const Option<Self::Type>;
(*imp).as_ref().expect("No private struct")
}
}
/// Returns the class struct for this specific instance.
fn get_class(&self) -> &<Self::Type as ObjectSubclass>::Class {
unsafe { &**(self as *const _ as *const *const <Self::Type as ObjectSubclass>::Class) }
}
}
/// Trait implemented by structs that implement a `GObject` C class struct.
///
/// The struct must be `#[repr(C)]` and have the parent type's class struct
/// as the first field.
///
/// See [`simple::ClassStruct`] for a basic implementation of this that can
/// be used most of the time and should only not be used if additional fields are
/// required in the class struct, e.g. for declaring new virtual methods.
///
/// [`simple::ClassStruct`]: ../simple/struct.ClassStruct.html
pub unsafe trait ClassStruct: Sized + 'static {
/// Corresponding object subclass type for this class struct.
type Type: ObjectSubclass;
/// Override the vfuncs of all parent types.
///
/// This is automatically called during type initialization.
fn override_vfuncs(&mut self)
where
<<Self::Type as ObjectSubclass>::ParentType as Wrapper>::RustClassType:
IsSubclassable<Self::Type>,
{
unsafe {
let base = &mut *(self as *mut _
as *mut <<Self::Type as ObjectSubclass>::ParentType as Wrapper>::RustClassType);
base.override_vfuncs();
}
}
}
/// Trait for subclassable class structs.
pub unsafe trait IsSubclassable<T: ObjectSubclass>: IsClassFor {
/// Override the virtual methods of this class for the given subclass.
///
/// This is automatically called during type initialization.
fn override_vfuncs(&mut self);
}
/// Type-specific data that is filled in during type creation.
pub struct TypeData {
#[doc(hidden)]
pub type_: Type,
#[doc(hidden)]
pub parent_class: ffi::gpointer,
#[doc(hidden)]
pub interface_data: *const Vec<(ffi::GType, ffi::gpointer)>,
#[doc(hidden)]
pub private_offset: isize,
}
unsafe impl Send for TypeData {}
unsafe impl Sync for TypeData {}
impl TypeData {
/// Returns the type ID.
pub fn get_type(&self) -> Type {
self.type_
}
/// Returns a pointer to the native parent class.
///
/// This is used for chaining up to the parent class' implementation
/// of virtual methods.
pub fn get_parent_class(&self) -> ffi::gpointer {
self.parent_class
}
/// Returns a pointer to the interface implementation specific data.
///
/// This is used for interface implementations to store additional data.
pub fn get_interface_data(&self, type_: ffi::GType) -> ffi::gpointer {
unsafe {
if self.interface_data.is_null() {
return ptr::null_mut();
}
for &(t, p) in &(*self.interface_data) {
if t == type_ {
return p;
}
}
ptr::null_mut()
}
}
/// Returns the offset of the private struct in bytes relative to the
/// beginning of the instance struct.
pub fn get_private_offset(&self) -> isize {
self.private_offset
}
}
#[macro_export]
/// Macro for boilerplate of [`ObjectSubclass`] implementations.
///
/// [`ObjectSubclass`]: subclass/types/trait.ObjectSubclass.html
macro_rules! glib_object_subclass {
() => {
fn type_data() -> ::std::ptr::NonNull<$crate::subclass::TypeData> {
static mut DATA: $crate::subclass::TypeData = $crate::subclass::TypeData {
type_: $crate::Type::Invalid,
parent_class: ::std::ptr::null_mut(),
interface_data: ::std::ptr::null_mut(),
private_offset: 0,
};
unsafe { ::std::ptr::NonNull::new_unchecked(&mut DATA) }
}
fn get_type() -> $crate::Type {
static ONCE: ::std::sync::Once = ::std::sync::Once::new();
ONCE.call_once(|| {
$crate::subclass::register_type::<Self>();
});
unsafe {
let data = Self::type_data();
let type_ = data.as_ref().get_type();
assert_ne!(type_, $crate::Type::Invalid);
type_
}
}
};
}
/// The central trait for subclassing a `GObject` type.
///
/// Links together the type name, parent type and the instance and
/// class structs for type registration and allows subclasses to
/// hook into various steps of the type registration and initialization.
///
/// See [`register_type`] for registering an implementation of this trait
/// with the type system.
///
/// [`register_type`]: fn.register_type.html
pub trait ObjectSubclass: ObjectImpl + Sized + 'static {
/// `GObject` type name.
///
/// This must be unique in the whole process.
const NAME: &'static str;
/// Parent Rust type to inherit from.
type ParentType: IsA<Object>
+ FromGlibPtrBorrow<*mut <Self::ParentType as Wrapper>::GlibType>
+ FromGlibPtrNone<*mut <Self::ParentType as Wrapper>::GlibType>;
/// The C instance struct.
///
/// See [`simple::InstanceStruct`] for an basic instance struct that should be
/// used in most cases.
///
/// [`simple::InstanceStruct`]: ../simple/struct.InstanceStruct.html
// TODO: Should default to simple::InstanceStruct<Self> once associated
// type defaults are stabilized https://github.com/rust-lang/rust/issues/29661
type Instance: InstanceStruct<Type = Self>;
/// The C class struct.
///
/// See [`simple::ClassStruct`] for an basic instance struct that should be
/// used in most cases.
///
/// [`simple::ClassStruct`]: ../simple/struct.ClassStruct.html
// TODO: Should default to simple::ClassStruct<Self> once associated
// type defaults are stabilized https://github.com/rust-lang/rust/issues/29661
type Class: ClassStruct<Type = Self>;
/// Storage for the type-specific data used during registration.
///
/// This is usually generated by the [`glib_object_subclass!`] macro.
///
/// [`glib_object_subclass!`]: ../../macro.glib_object_subclass.html
fn type_data() -> ptr::NonNull<TypeData>;
/// Returns the `glib::Type` ID of the subclass.
///
/// This will register the type with the type system on the first call and is usually generated
/// by the [`glib_object_subclass!`] macro.
///
/// [`glib_object_subclass!`]: ../../macro.glib_object_subclass.html
fn get_type() -> Type;
/// Returns the corresponding object instance.
fn get_instance(&self) -> Self::ParentType {
unsafe {
let data = Self::type_data();
let type_ = data.as_ref().get_type();
assert_ne!(type_, Type::Invalid);
let offset = -data.as_ref().private_offset;
assert_ne!(offset, 0);
let ptr = self as *const Self as *const u8;
let ptr = ptr.offset(offset);
let ptr = ptr as *mut u8 as *mut <Self::ParentType as Wrapper>::GlibType;
from_glib_none(ptr)
}
}
/// Returns the implementation from an instance.
///
/// Panics if called on an object of the wrong type.
fn from_instance<T: IsA<Self::ParentType> + IsA<::object::Object>>(obj: &T) -> &Self {
unsafe {
let data = Self::type_data();
let type_ = data.as_ref().get_type();
assert_ne!(type_, Type::Invalid);
assert!(obj.get_type().is_a(&type_));
let ptr: *mut gobject_ffi::GObject = obj.to_glib_none().0;
let ptr = ptr as *const Self::Instance;
(*ptr).get_impl()
}
}
/// Additional type initialization.
///
/// This is called right after the type was registered and allows
/// subclasses to do additional type-specific initialization, e.g.
/// for implementing `GObject` interfaces.
///
/// Optional
fn type_init(_type_: &InitializingType<Self>) {}
/// Class initialization.
///
/// This is called after `type_init` and before the first instance
/// of the subclass is created. Subclasses can use this to do class-
/// specific initialization, e.g. for installing properties or signals
/// on the class or calling class methods.
///
/// Optional
fn class_init(_klass: &mut Self::Class) {}
/// Constructor.
///
/// This is called during object instantiation before further subclasses
/// are initialized, and should return a new instance of the subclass
/// private struct.
fn new() -> Self;
/// Constructor.
///
/// This is called during object instantiation before further subclasses
/// are initialized, and should return a new instance of the subclass
/// private struct.
///
/// Different to `new()` above it also gets the class of this type passed
/// to itself for providing additional context.
///
/// Optional, calls `new()` by default.
fn new_with_class(_klass: &Self::Class) -> Self {
Self::new()
}
}
unsafe extern "C" fn class_init<T: ObjectSubclass>(klass: ffi::gpointer, _klass_data: ffi::gpointer)
where
<<T as ObjectSubclass>::ParentType as Wrapper>::RustClassType: IsSubclassable<T>,
{
let mut data = T::type_data();
// We have to update the private struct offset once the class is actually
// being initialized.
{
let mut private_offset = data.as_ref().private_offset as i32;
gobject_ffi::g_type_class_adjust_private_offset(klass, &mut private_offset);
(*data.as_mut()).private_offset = private_offset as isize;
}
// Set trampolines for the basic GObject virtual methods.
{
let gobject_klass = &mut *(klass as *mut gobject_ffi::GObjectClass);
gobject_klass.finalize = Some(finalize::<T>);
}
// And finally peek the parent class struct (containing the parent class'
// implementations of virtual methods for chaining up), and call the subclass'
// class initialization function.
{
let klass = &mut *(klass as *mut T::Class);
let parent_class = gobject_ffi::g_type_class_peek_parent(klass as *mut _ as ffi::gpointer)
as *mut <T::ParentType as Wrapper>::GlibClassType;
assert!(!parent_class.is_null());
(*data.as_mut()).parent_class = parent_class as ffi::gpointer;
klass.override_vfuncs();
T::class_init(klass);
}
}
unsafe extern "C" fn instance_init<T: ObjectSubclass>(
obj: *mut gobject_ffi::GTypeInstance,
klass: ffi::gpointer,
) {
glib_floating_reference_guard!(obj);
// Get offset to the storage of our private struct, create it
// and actually store it in that place.
let mut data = T::type_data();
let private_offset = (*data.as_mut()).private_offset;
let ptr: *mut u8 = obj as *mut _ as *mut u8;
let priv_ptr = ptr.offset(private_offset);
let imp_storage = priv_ptr as *mut Option<T>;
let klass = &*(klass as *const T::Class);
let imp = T::new_with_class(klass);
ptr::write(imp_storage, Some(imp));
}
unsafe extern "C" fn finalize<T: ObjectSubclass>(obj: *mut gobject_ffi::GObject) {
// Retrieve the private struct, take it out of its storage and
// drop it for freeing all associated memory.
let mut data = T::type_data();
let private_offset = (*data.as_mut()).private_offset;
let ptr: *mut u8 = obj as *mut _ as *mut u8;
let priv_ptr = ptr.offset(private_offset);
let imp_storage = priv_ptr as *mut Option<T>;
let imp = (*imp_storage).take().expect("No private struct");
drop(imp);
// Chain up to the parent class' finalize implementation, if any.
let parent_class = &*(data.as_ref().get_parent_class() as *const gobject_ffi::GObjectClass);
if let Some(ref func) = parent_class.finalize {
func(obj);
}
}
/// Register a `glib::Type` ID for `T`.
///
/// This must be called only once and will panic on a second call.
///
/// The [`glib_object_subclass!`] macro will create a `get_type()` function around this, which will
/// ensure that it's only ever called once.
///
/// [`glib_object_subclass!`]: ../../macro.glib_object_subclass.html
pub fn register_type<T: ObjectSubclass>() -> Type
where
<<T as ObjectSubclass>::ParentType as Wrapper>::RustClassType: IsSubclassable<T>,
{
unsafe {
use std::ffi::CString;
let type_info = gobject_ffi::GTypeInfo {
class_size: mem::size_of::<T::Class>() as u16,
base_init: None,
base_finalize: None,
class_init: Some(class_init::<T>),
class_finalize: None,
class_data: ptr::null_mut(),
instance_size: mem::size_of::<T::Instance>() as u16,
n_preallocs: 0,
instance_init: Some(instance_init::<T>),
value_table: ptr::null(),
};
let type_name = CString::new(T::NAME).unwrap();
assert_eq!(
gobject_ffi::g_type_from_name(type_name.as_ptr()),
gobject_ffi::G_TYPE_INVALID
);
let type_ = from_glib(gobject_ffi::g_type_register_static(
<T::ParentType as StaticType>::static_type().to_glib(),
type_name.as_ptr(),
&type_info,
0,
));
let mut data = T::type_data();
(*data.as_mut()).type_ = type_;
let private_offset =
gobject_ffi::g_type_add_instance_private(type_.to_glib(), mem::size_of::<Option<T>>());
(*data.as_mut()).private_offset = private_offset as isize;
T::type_init(&InitializingType::<T>(type_, marker::PhantomData));
type_
}
}
|
extern crate podio;
use std::io;
use podio::{LittleEndian, BigEndian};
use podio::{ReadPodExt, WritePodExt};
#[test]
fn write_be() {
let buf: &mut [u8] = &mut [0u8; 8];
let mut writer = io::Cursor::new(buf);
writer.set_position(0);
writer.write_u64::<BigEndian>(0x01_23_45_67_89_ab_cd_ef).unwrap();
assert_eq!(&writer.get_ref()[0..8], &[0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]);
writer.set_position(0);
writer.write_u32::<BigEndian>(0x01_23_45_67).unwrap();
assert_eq!(&writer.get_ref()[0..4], &[0x01, 0x23, 0x45, 0x67]);
writer.set_position(0);
writer.write_u16::<BigEndian>(0x01_23).unwrap();
assert_eq!(&writer.get_ref()[0..2], &[0x01, 0x23]);
}
#[test]
fn write_le() {
let buf: &mut [u8] = &mut [0u8; 8];
let mut writer = io::Cursor::new(buf);
writer.set_position(0);
writer.write_u64::<LittleEndian>(0x01_23_45_67_89_ab_cd_ef).unwrap();
assert_eq!(&writer.get_ref()[0..8], &[0xef, 0xcd, 0xab, 0x89, 0x67, 0x45, 0x23, 0x01]);
writer.set_position(0);
writer.write_u32::<LittleEndian>(0x01_23_45_67).unwrap();
assert_eq!(&writer.get_ref()[0..4], &[0x67, 0x45, 0x23, 0x01]);
writer.set_position(0);
writer.write_u16::<LittleEndian>(0x01_23).unwrap();
assert_eq!(&writer.get_ref()[0..2], &[0x23, 0x01]);
}
#[test]
fn write_octet() {
let buf: &mut [u8] = &mut [0u8; 8];
let mut writer = io::Cursor::new(buf);
writer.set_position(0);
writer.write_u8(0x01).unwrap();
assert_eq!(&writer.get_ref()[0..1], &[0x01]);
}
#[test]
fn write_float() {
let buf: &mut [u8] = &mut [0u8; 8];
let mut writer = io::Cursor::new(buf);
writer.set_position(0);
writer.write_f32::<LittleEndian>(10.12f32).unwrap();
assert_eq!(&writer.get_ref()[0..4], &[0x85, 0xEB, 0x21, 0x41]);
writer.set_position(0);
writer.write_f32::<BigEndian>(10.12f32).unwrap();
assert_eq!(&writer.get_ref()[0..4], &[0x41, 0x21, 0xEB, 0x85]);
writer.set_position(0);
writer.write_f64::<LittleEndian>(10.12f64).unwrap();
assert_eq!(&writer.get_ref()[0..8], &[0x3D, 0x0A, 0xD7, 0xA3, 0x70, 0x3D, 0x24, 0x40]);
writer.set_position(0);
writer.write_f64::<BigEndian>(10.12f64).unwrap();
assert_eq!(&writer.get_ref()[0..8], &[0x40, 0x24, 0x3D, 0x70, 0xA3, 0xD7, 0x0A, 0x3D]);
}
#[test]
fn read_be() {
let buf: &[u8] = &[0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef];
let mut reader = io::Cursor::new(buf);
reader.set_position(0);
assert_eq!(reader.read_u64::<BigEndian>().unwrap(), 0x0123456789abcdef);
reader.set_position(0);
assert_eq!(reader.read_u32::<BigEndian>().unwrap(), 0x01234567);
reader.set_position(0);
assert_eq!(reader.read_u16::<BigEndian>().unwrap(), 0x0123);
}
#[test]
fn read_le() {
let buf: &[u8] = &[0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef];
let mut reader = io::Cursor::new(buf);
reader.set_position(0);
assert_eq!(reader.read_u64::<LittleEndian>().unwrap(), 0xefcdab8967452301);
reader.set_position(0);
assert_eq!(reader.read_u32::<LittleEndian>().unwrap(), 0x67452301);
reader.set_position(0);
assert_eq!(reader.read_u16::<LittleEndian>().unwrap(), 0x2301);
}
#[test]
fn read_octet() {
let buf: &[u8] = &[0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef];
let mut reader = io::Cursor::new(buf);
reader.set_position(0);
assert_eq!(reader.read_u8().unwrap(), 0x01);
}
#[test]
fn read_float() {
let mut buf: &[u8] = &[0x41, 0x21, 0xEB, 0x85];
assert_eq!(buf.read_f32::<BigEndian>().unwrap(), 10.12f32);
let mut buf: &[u8] = &[0x85, 0xEB, 0x21, 0x41];
assert_eq!(buf.read_f32::<LittleEndian>().unwrap(), 10.12f32);
let mut buf: &[u8] = &[0x40, 0x24, 0x3D, 0x70, 0xA3, 0xD7, 0x0A, 0x3D];
assert_eq!(buf.read_f64::<BigEndian>().unwrap(), 10.12f64);
let mut buf: &[u8] = &[0x3D, 0x0A, 0xD7, 0xA3, 0x70, 0x3D, 0x24, 0x40];
assert_eq!(buf.read_f64::<LittleEndian>().unwrap(), 10.12f64);
}
#[test]
fn read_exact() {
let mut buf: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8];
assert_eq!(<&[u8] as ReadPodExt>::read_exact(&mut buf, 2).unwrap(), [1,2]);
assert_eq!(<&[u8] as ReadPodExt>::read_exact(&mut buf, 1).unwrap(), [3]);
assert_eq!(<&[u8] as ReadPodExt>::read_exact(&mut buf, 0).unwrap(), []);
assert_eq!(<&[u8] as ReadPodExt>::read_exact(&mut buf, 5).unwrap(), [4,5,6,7,8]);
}
Update read_exact test
extern crate podio;
use std::io;
use podio::{LittleEndian, BigEndian};
use podio::{ReadPodExt, WritePodExt};
#[test]
fn write_be() {
let buf: &mut [u8] = &mut [0u8; 8];
let mut writer = io::Cursor::new(buf);
writer.set_position(0);
writer.write_u64::<BigEndian>(0x01_23_45_67_89_ab_cd_ef).unwrap();
assert_eq!(&writer.get_ref()[0..8], &[0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]);
writer.set_position(0);
writer.write_u32::<BigEndian>(0x01_23_45_67).unwrap();
assert_eq!(&writer.get_ref()[0..4], &[0x01, 0x23, 0x45, 0x67]);
writer.set_position(0);
writer.write_u16::<BigEndian>(0x01_23).unwrap();
assert_eq!(&writer.get_ref()[0..2], &[0x01, 0x23]);
}
#[test]
fn write_le() {
let buf: &mut [u8] = &mut [0u8; 8];
let mut writer = io::Cursor::new(buf);
writer.set_position(0);
writer.write_u64::<LittleEndian>(0x01_23_45_67_89_ab_cd_ef).unwrap();
assert_eq!(&writer.get_ref()[0..8], &[0xef, 0xcd, 0xab, 0x89, 0x67, 0x45, 0x23, 0x01]);
writer.set_position(0);
writer.write_u32::<LittleEndian>(0x01_23_45_67).unwrap();
assert_eq!(&writer.get_ref()[0..4], &[0x67, 0x45, 0x23, 0x01]);
writer.set_position(0);
writer.write_u16::<LittleEndian>(0x01_23).unwrap();
assert_eq!(&writer.get_ref()[0..2], &[0x23, 0x01]);
}
#[test]
fn write_octet() {
let buf: &mut [u8] = &mut [0u8; 8];
let mut writer = io::Cursor::new(buf);
writer.set_position(0);
writer.write_u8(0x01).unwrap();
assert_eq!(&writer.get_ref()[0..1], &[0x01]);
}
#[test]
fn write_float() {
let buf: &mut [u8] = &mut [0u8; 8];
let mut writer = io::Cursor::new(buf);
writer.set_position(0);
writer.write_f32::<LittleEndian>(10.12f32).unwrap();
assert_eq!(&writer.get_ref()[0..4], &[0x85, 0xEB, 0x21, 0x41]);
writer.set_position(0);
writer.write_f32::<BigEndian>(10.12f32).unwrap();
assert_eq!(&writer.get_ref()[0..4], &[0x41, 0x21, 0xEB, 0x85]);
writer.set_position(0);
writer.write_f64::<LittleEndian>(10.12f64).unwrap();
assert_eq!(&writer.get_ref()[0..8], &[0x3D, 0x0A, 0xD7, 0xA3, 0x70, 0x3D, 0x24, 0x40]);
writer.set_position(0);
writer.write_f64::<BigEndian>(10.12f64).unwrap();
assert_eq!(&writer.get_ref()[0..8], &[0x40, 0x24, 0x3D, 0x70, 0xA3, 0xD7, 0x0A, 0x3D]);
}
#[test]
fn read_be() {
let buf: &[u8] = &[0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef];
let mut reader = io::Cursor::new(buf);
reader.set_position(0);
assert_eq!(reader.read_u64::<BigEndian>().unwrap(), 0x0123456789abcdef);
reader.set_position(0);
assert_eq!(reader.read_u32::<BigEndian>().unwrap(), 0x01234567);
reader.set_position(0);
assert_eq!(reader.read_u16::<BigEndian>().unwrap(), 0x0123);
}
#[test]
fn read_le() {
let buf: &[u8] = &[0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef];
let mut reader = io::Cursor::new(buf);
reader.set_position(0);
assert_eq!(reader.read_u64::<LittleEndian>().unwrap(), 0xefcdab8967452301);
reader.set_position(0);
assert_eq!(reader.read_u32::<LittleEndian>().unwrap(), 0x67452301);
reader.set_position(0);
assert_eq!(reader.read_u16::<LittleEndian>().unwrap(), 0x2301);
}
#[test]
fn read_octet() {
let buf: &[u8] = &[0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef];
let mut reader = io::Cursor::new(buf);
reader.set_position(0);
assert_eq!(reader.read_u8().unwrap(), 0x01);
}
#[test]
fn read_float() {
let mut buf: &[u8] = &[0x41, 0x21, 0xEB, 0x85];
assert_eq!(buf.read_f32::<BigEndian>().unwrap(), 10.12f32);
let mut buf: &[u8] = &[0x85, 0xEB, 0x21, 0x41];
assert_eq!(buf.read_f32::<LittleEndian>().unwrap(), 10.12f32);
let mut buf: &[u8] = &[0x40, 0x24, 0x3D, 0x70, 0xA3, 0xD7, 0x0A, 0x3D];
assert_eq!(buf.read_f64::<BigEndian>().unwrap(), 10.12f64);
let mut buf: &[u8] = &[0x3D, 0x0A, 0xD7, 0xA3, 0x70, 0x3D, 0x24, 0x40];
assert_eq!(buf.read_f64::<LittleEndian>().unwrap(), 10.12f64);
}
#[test]
fn read_exact() {
let mut buf: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8];
assert_eq!(buf.read_exact(2).unwrap(), [1,2]);
assert_eq!(buf.read_exact(1).unwrap(), [3]);
assert_eq!(buf.read_exact(0).unwrap(), []);
assert_eq!(buf.read_exact(5).unwrap(), [4,5,6,7,8]);
assert_eq!(buf.read_exact(0).unwrap(), []);
assert!(buf.read_exact(1).is_err());
assert_eq!(buf.read_exact(0).unwrap(), []);
}
|
Make xelatex default latex command
|
use support::project;
#[test]
fn test_git_clean_removes_local_branches() {
let project = project("git-clean_removes_local").build();
project.setup_command("git branch test1");
project.setup_command("git branch test2");
let verify = project.setup_command("git branch");
assert!(verify.stdout().contains("test1"), verify.failure_message("test1"));
assert!(verify.stdout().contains("test2"), verify.failure_message("test2"));
let result = project.git_clean_command("-y").run();
assert!(result.is_success(), result.failure_message("command to succeed"));
assert!(result.stdout().contains("Deleted branch test1"), result.failure_message("command to delete test1"));
assert!(result.stdout().contains("Deleted branch test2"), result.failure_message("command to delete test2"));
}
#[test]
fn test_git_clean_does_not_remove_ignored_local_branches() {
let project = project("git-clean_removes_local").build();
project.setup_command("git branch test1");
project.setup_command("git branch test2");
let verify = project.setup_command("git branch");
assert!(verify.stdout().contains("test1"), verify.failure_message("test1"));
assert!(verify.stdout().contains("test2"), verify.failure_message("test2"));
let result = project.git_clean_command("-y -i test2").run();
assert!(result.is_success(), result.failure_message("command to succeed"));
assert!(result.stdout().contains("Deleted branch test1"), result.failure_message("command to delete test1"));
assert!(!result.stdout().contains("Deleted branch test2"), result.failure_message("command to delete test2"));
}
add test for list of branches
use support::project;
#[test]
fn test_git_clean_removes_local_branches() {
let project = project("git-clean_removes_local").build();
project.setup_command("git branch test1");
project.setup_command("git branch test2");
let verify = project.setup_command("git branch");
assert!(verify.stdout().contains("test1"), verify.failure_message("test1"));
assert!(verify.stdout().contains("test2"), verify.failure_message("test2"));
let result = project.git_clean_command("-y").run();
assert!(result.is_success(), result.failure_message("command to succeed"));
assert!(result.stdout().contains("Deleted branch test1"), result.failure_message("command to delete test1"));
assert!(result.stdout().contains("Deleted branch test2"), result.failure_message("command to delete test2"));
}
#[test]
fn test_git_clean_does_not_remove_ignored_local_branches() {
let project = project("git-clean_removes_local").build();
project.setup_command("git branch test1");
project.setup_command("git branch test2");
let verify = project.setup_command("git branch");
assert!(verify.stdout().contains("test1"), verify.failure_message("test1"));
assert!(verify.stdout().contains("test2"), verify.failure_message("test2"));
let result = project.git_clean_command("-y -i test2").run();
assert!(result.is_success(), result.failure_message("command to succeed"));
assert!(result.stdout().contains("Deleted branch test1"), result.failure_message("command to delete test1"));
assert!(!result.stdout().contains("Deleted branch test2"), result.failure_message("command to delete test2"));
}
#[test]
fn test_git_clean_does_not_remove_list_of_ignored_local_branches() {
let project = project("git-clean_removes_local").build();
project.setup_command("git branch test1");
project.setup_command("git branch test2");
project.setup_command("git branch test3");
let verify = project.setup_command("git branch");
assert!(verify.stdout().contains("test1"), verify.failure_message("test1"));
assert!(verify.stdout().contains("test2"), verify.failure_message("test2"));
assert!(verify.stdout().contains("test3"), verify.failure_message("test3"));
let result = project.git_clean_command("-y -i test1 -i test3").run();
assert!(result.is_success(), result.failure_message("command to succeed"));
assert!(!result.stdout().contains("Deleted branch test1"), result.failure_message("command to delete test1"));
assert!(result.stdout().contains("Deleted branch test2"), result.failure_message("command to delete test2"));
assert!(!result.stdout().contains("Deleted branch test3"), result.failure_message("command to delete test3"));
}
|
use serde::{de, ser};
use std::fmt;
pub(crate) const NAME: &str = "$__toml_private_Spanned";
pub(crate) const START: &str = "$__toml_private_start";
pub(crate) const END: &str = "$__toml_private_end";
pub(crate) const VALUE: &str = "$__toml_private_value";
/// A spanned value, indicating the range at which it is defined in the source.
///
/// ```
/// use serde_derive::Deserialize;
/// use toml::Spanned;
///
/// #[derive(Deserialize)]
/// struct Value {
/// s: Spanned<String>,
/// }
///
/// fn main() {
/// let t = "s = \"value\"\n";
///
/// let u: Value = toml::from_str(t).unwrap();
///
/// assert_eq!(u.s.start(), 4);
/// assert_eq!(u.s.end(), 11);
/// assert_eq!(u.s.get_ref(), "value");
/// assert_eq!(u.s.into_inner(), String::from("value"));
/// }
/// ```
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct Spanned<T> {
/// The start range.
start: usize,
/// The end range (exclusive).
end: usize,
/// The spanned value.
value: T,
}
impl<T> Spanned<T> {
/// Access the start of the span of the contained value.
pub fn start(&self) -> usize {
self.start
}
/// Access the end of the span of the contained value.
pub fn end(&self) -> usize {
self.end
}
/// Get the span of the contained value.
pub fn span(&self) -> (usize, usize) {
(self.start, self.end)
}
/// Consumes the spanned value and returns the contained value.
pub fn into_inner(self) -> T {
self.value
}
/// Returns a reference to the contained value.
pub fn get_ref(&self) -> &T {
&self.value
}
/// Returns a mutable reference to the contained value.
pub fn get_mut(&self) -> &T {
&self.value
}
}
impl<'de, T> de::Deserialize<'de> for Spanned<T>
where
T: de::Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Spanned<T>, D::Error>
where
D: de::Deserializer<'de>,
{
struct SpannedVisitor<T>(::std::marker::PhantomData<T>);
impl<'de, T> de::Visitor<'de> for SpannedVisitor<T>
where
T: de::Deserialize<'de>,
{
type Value = Spanned<T>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a TOML spanned")
}
fn visit_map<V>(self, mut visitor: V) -> Result<Spanned<T>, V::Error>
where
V: de::MapAccess<'de>,
{
if visitor.next_key()? != Some(START) {
return Err(de::Error::custom("spanned start key not found"));
}
let start: usize = visitor.next_value()?;
if visitor.next_key()? != Some(END) {
return Err(de::Error::custom("spanned end key not found"));
}
let end: usize = visitor.next_value()?;
if visitor.next_key()? != Some(VALUE) {
return Err(de::Error::custom("spanned value key not found"));
}
let value: T = visitor.next_value()?;
Ok(Spanned { start, end, value })
}
}
let visitor = SpannedVisitor(::std::marker::PhantomData);
static FIELDS: [&str; 3] = [START, END, VALUE];
deserializer.deserialize_struct(NAME, &FIELDS, visitor)
}
}
impl<T: ser::Serialize> ser::Serialize for Spanned<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
self.value.serialize(serializer)
}
}
get_mut should return a mutable reference (#338)
use serde::{de, ser};
use std::fmt;
pub(crate) const NAME: &str = "$__toml_private_Spanned";
pub(crate) const START: &str = "$__toml_private_start";
pub(crate) const END: &str = "$__toml_private_end";
pub(crate) const VALUE: &str = "$__toml_private_value";
/// A spanned value, indicating the range at which it is defined in the source.
///
/// ```
/// use serde_derive::Deserialize;
/// use toml::Spanned;
///
/// #[derive(Deserialize)]
/// struct Value {
/// s: Spanned<String>,
/// }
///
/// fn main() {
/// let t = "s = \"value\"\n";
///
/// let u: Value = toml::from_str(t).unwrap();
///
/// assert_eq!(u.s.start(), 4);
/// assert_eq!(u.s.end(), 11);
/// assert_eq!(u.s.get_ref(), "value");
/// assert_eq!(u.s.into_inner(), String::from("value"));
/// }
/// ```
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct Spanned<T> {
/// The start range.
start: usize,
/// The end range (exclusive).
end: usize,
/// The spanned value.
value: T,
}
impl<T> Spanned<T> {
/// Access the start of the span of the contained value.
pub fn start(&self) -> usize {
self.start
}
/// Access the end of the span of the contained value.
pub fn end(&self) -> usize {
self.end
}
/// Get the span of the contained value.
pub fn span(&self) -> (usize, usize) {
(self.start, self.end)
}
/// Consumes the spanned value and returns the contained value.
pub fn into_inner(self) -> T {
self.value
}
/// Returns a reference to the contained value.
pub fn get_ref(&self) -> &T {
&self.value
}
/// Returns a mutable reference to the contained value.
pub fn get_mut(&mut self) -> &mut T {
&mut self.value
}
}
impl<'de, T> de::Deserialize<'de> for Spanned<T>
where
T: de::Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Spanned<T>, D::Error>
where
D: de::Deserializer<'de>,
{
struct SpannedVisitor<T>(::std::marker::PhantomData<T>);
impl<'de, T> de::Visitor<'de> for SpannedVisitor<T>
where
T: de::Deserialize<'de>,
{
type Value = Spanned<T>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a TOML spanned")
}
fn visit_map<V>(self, mut visitor: V) -> Result<Spanned<T>, V::Error>
where
V: de::MapAccess<'de>,
{
if visitor.next_key()? != Some(START) {
return Err(de::Error::custom("spanned start key not found"));
}
let start: usize = visitor.next_value()?;
if visitor.next_key()? != Some(END) {
return Err(de::Error::custom("spanned end key not found"));
}
let end: usize = visitor.next_value()?;
if visitor.next_key()? != Some(VALUE) {
return Err(de::Error::custom("spanned value key not found"));
}
let value: T = visitor.next_value()?;
Ok(Spanned { start, end, value })
}
}
let visitor = SpannedVisitor(::std::marker::PhantomData);
static FIELDS: [&str; 3] = [START, END, VALUE];
deserializer.deserialize_struct(NAME, &FIELDS, visitor)
}
}
impl<T: ser::Serialize> ser::Serialize for Spanned<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
self.value.serialize(serializer)
}
}
|
//! A UTF-8 encoded, growable string.
//!
//! This module contains the [`String`] type, a trait for converting
//! [`ToString`]s, and several error types that may result from working with
//! [`String`]s.
//!
//! [`ToString`]: trait.ToString.html
//!
//! # Examples
//!
//! There are multiple ways to create a new [`String`] from a string literal:
//!
//! ```
//! let s = "Hello".to_string();
//!
//! let s = String::from("world");
//! let s: String = "also this".into();
//! ```
//!
//! You can create a new [`String`] from an existing one by concatenating with
//! `+`:
//!
//! [`String`]: struct.String.html
//!
//! ```
//! let s = "Hello".to_string();
//!
//! let message = s + " world!";
//! ```
//!
//! If you have a vector of valid UTF-8 bytes, you can make a [`String`] out of
//! it. You can do the reverse too.
//!
//! ```
//! let sparkle_heart = vec![240, 159, 146, 150];
//!
//! // We know these bytes are valid, so we'll use `unwrap()`.
//! let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
//!
//! assert_eq!("💖", sparkle_heart);
//!
//! let bytes = sparkle_heart.into_bytes();
//!
//! assert_eq!(bytes, [240, 159, 146, 150]);
//! ```
#![stable(feature = "rust1", since = "1.0.0")]
use core::char::{decode_utf16, REPLACEMENT_CHARACTER};
use core::fmt;
use core::hash;
use core::iter::{FromIterator, FusedIterator};
use core::ops::Bound::{Excluded, Included, Unbounded};
use core::ops::{self, Add, AddAssign, Index, IndexMut, RangeBounds};
use core::ptr;
use core::str::{lossy, pattern::Pattern};
use crate::borrow::{Cow, ToOwned};
use crate::boxed::Box;
use crate::collections::TryReserveError;
use crate::str::{self, from_boxed_utf8_unchecked, Chars, FromStr, Utf8Error};
use crate::vec::Vec;
/// A UTF-8 encoded, growable string.
///
/// The `String` type is the most common string type that has ownership over the
/// contents of the string. It has a close relationship with its borrowed
/// counterpart, the primitive [`str`].
///
/// [`str`]: ../../std/primitive.str.html
///
/// # Examples
///
/// You can create a `String` from a literal string with [`String::from`]:
///
/// ```
/// let hello = String::from("Hello, world!");
/// ```
///
/// You can append a [`char`] to a `String` with the [`push`] method, and
/// append a [`&str`] with the [`push_str`] method:
///
/// ```
/// let mut hello = String::from("Hello, ");
///
/// hello.push('w');
/// hello.push_str("orld!");
/// ```
///
/// [`String::from`]: #method.from
/// [`char`]: ../../std/primitive.char.html
/// [`push`]: #method.push
/// [`push_str`]: #method.push_str
///
/// If you have a vector of UTF-8 bytes, you can create a `String` from it with
/// the [`from_utf8`] method:
///
/// ```
/// // some bytes, in a vector
/// let sparkle_heart = vec![240, 159, 146, 150];
///
/// // We know these bytes are valid, so we'll use `unwrap()`.
/// let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
///
/// assert_eq!("💖", sparkle_heart);
/// ```
///
/// [`from_utf8`]: #method.from_utf8
///
/// # UTF-8
///
/// `String`s are always valid UTF-8. This has a few implications, the first of
/// which is that if you need a non-UTF-8 string, consider [`OsString`]. It is
/// similar, but without the UTF-8 constraint. The second implication is that
/// you cannot index into a `String`:
///
/// ```compile_fail,E0277
/// let s = "hello";
///
/// println!("The first letter of s is {}", s[0]); // ERROR!!!
/// ```
///
/// [`OsString`]: ../../std/ffi/struct.OsString.html
///
/// Indexing is intended to be a constant-time operation, but UTF-8 encoding
/// does not allow us to do this. Furthermore, it's not clear what sort of
/// thing the index should return: a byte, a codepoint, or a grapheme cluster.
/// The [`bytes`] and [`chars`] methods return iterators over the first
/// two, respectively.
///
/// [`bytes`]: #method.bytes
/// [`chars`]: #method.chars
///
/// # Deref
///
/// `String`s implement [`Deref`]`<Target=str>`, and so inherit all of [`str`]'s
/// methods. In addition, this means that you can pass a `String` to a
/// function which takes a [`&str`] by using an ampersand (`&`):
///
/// ```
/// fn takes_str(s: &str) { }
///
/// let s = String::from("Hello");
///
/// takes_str(&s);
/// ```
///
/// This will create a [`&str`] from the `String` and pass it in. This
/// conversion is very inexpensive, and so generally, functions will accept
/// [`&str`]s as arguments unless they need a `String` for some specific
/// reason.
///
/// In certain cases Rust doesn't have enough information to make this
/// conversion, known as [`Deref`] coercion. In the following example a string
/// slice [`&'a str`][`&str`] implements the trait `TraitExample`, and the function
/// `example_func` takes anything that implements the trait. In this case Rust
/// would need to make two implicit conversions, which Rust doesn't have the
/// means to do. For that reason, the following example will not compile.
///
/// ```compile_fail,E0277
/// trait TraitExample {}
///
/// impl<'a> TraitExample for &'a str {}
///
/// fn example_func<A: TraitExample>(example_arg: A) {}
///
/// let example_string = String::from("example_string");
/// example_func(&example_string);
/// ```
///
/// There are two options that would work instead. The first would be to
/// change the line `example_func(&example_string);` to
/// `example_func(example_string.as_str());`, using the method [`as_str()`]
/// to explicitly extract the string slice containing the string. The second
/// way changes `example_func(&example_string);` to
/// `example_func(&*example_string);`. In this case we are dereferencing a
/// `String` to a [`str`][`&str`], then referencing the [`str`][`&str`] back to
/// [`&str`]. The second way is more idiomatic, however both work to do the
/// conversion explicitly rather than relying on the implicit conversion.
///
/// # Representation
///
/// A `String` is made up of three components: a pointer to some bytes, a
/// length, and a capacity. The pointer points to an internal buffer `String`
/// uses to store its data. The length is the number of bytes currently stored
/// in the buffer, and the capacity is the size of the buffer in bytes. As such,
/// the length will always be less than or equal to the capacity.
///
/// This buffer is always stored on the heap.
///
/// You can look at these with the [`as_ptr`], [`len`], and [`capacity`]
/// methods:
///
/// ```
/// use std::mem;
///
/// let story = String::from("Once upon a time...");
///
// FIXME Update this when vec_into_raw_parts is stabilized
/// // Prevent automatically dropping the String's data
/// let mut story = mem::ManuallyDrop::new(story);
///
/// let ptr = story.as_mut_ptr();
/// let len = story.len();
/// let capacity = story.capacity();
///
/// // story has nineteen bytes
/// assert_eq!(19, len);
///
/// // We can re-build a String out of ptr, len, and capacity. This is all
/// // unsafe because we are responsible for making sure the components are
/// // valid:
/// let s = unsafe { String::from_raw_parts(ptr, len, capacity) } ;
///
/// assert_eq!(String::from("Once upon a time..."), s);
/// ```
///
/// [`as_ptr`]: #method.as_ptr
/// [`len`]: #method.len
/// [`capacity`]: #method.capacity
///
/// If a `String` has enough capacity, adding elements to it will not
/// re-allocate. For example, consider this program:
///
/// ```
/// let mut s = String::new();
///
/// println!("{}", s.capacity());
///
/// for _ in 0..5 {
/// s.push_str("hello");
/// println!("{}", s.capacity());
/// }
/// ```
///
/// This will output the following:
///
/// ```text
/// 0
/// 5
/// 10
/// 20
/// 20
/// 40
/// ```
///
/// At first, we have no memory allocated at all, but as we append to the
/// string, it increases its capacity appropriately. If we instead use the
/// [`with_capacity`] method to allocate the correct capacity initially:
///
/// ```
/// let mut s = String::with_capacity(25);
///
/// println!("{}", s.capacity());
///
/// for _ in 0..5 {
/// s.push_str("hello");
/// println!("{}", s.capacity());
/// }
/// ```
///
/// [`with_capacity`]: #method.with_capacity
///
/// We end up with a different output:
///
/// ```text
/// 25
/// 25
/// 25
/// 25
/// 25
/// 25
/// ```
///
/// Here, there's no need to allocate more memory inside the loop.
///
/// [`&str`]: ../../std/primitive.str.html
/// [`Deref`]: ../../std/ops/trait.Deref.html
/// [`as_str()`]: struct.String.html#method.as_str
#[derive(PartialOrd, Eq, Ord)]
#[cfg_attr(not(test), rustc_diagnostic_item = "string_type")]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct String {
vec: Vec<u8>,
}
/// A possible error value when converting a `String` from a UTF-8 byte vector.
///
/// This type is the error type for the [`from_utf8`] method on [`String`]. It
/// is designed in such a way to carefully avoid reallocations: the
/// [`into_bytes`] method will give back the byte vector that was used in the
/// conversion attempt.
///
/// [`from_utf8`]: struct.String.html#method.from_utf8
/// [`String`]: struct.String.html
/// [`into_bytes`]: struct.FromUtf8Error.html#method.into_bytes
///
/// The [`Utf8Error`] type provided by [`std::str`] represents an error that may
/// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's
/// an analogue to `FromUtf8Error`, and you can get one from a `FromUtf8Error`
/// through the [`utf8_error`] method.
///
/// [`Utf8Error`]: ../../std/str/struct.Utf8Error.html
/// [`std::str`]: ../../std/str/index.html
/// [`u8`]: ../../std/primitive.u8.html
/// [`&str`]: ../../std/primitive.str.html
/// [`utf8_error`]: #method.utf8_error
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // some invalid bytes, in a vector
/// let bytes = vec![0, 159];
///
/// let value = String::from_utf8(bytes);
///
/// assert!(value.is_err());
/// assert_eq!(vec![0, 159], value.unwrap_err().into_bytes());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FromUtf8Error {
bytes: Vec<u8>,
error: Utf8Error,
}
/// A possible error value when converting a `String` from a UTF-16 byte slice.
///
/// This type is the error type for the [`from_utf16`] method on [`String`].
///
/// [`from_utf16`]: struct.String.html#method.from_utf16
/// [`String`]: struct.String.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // 𝄞mu<invalid>ic
/// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
/// 0xD800, 0x0069, 0x0063];
///
/// assert!(String::from_utf16(v).is_err());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct FromUtf16Error(());
impl String {
/// Creates a new empty `String`.
///
/// Given that the `String` is empty, this will not allocate any initial
/// buffer. While that means that this initial operation is very
/// inexpensive, it may cause excessive allocation later when you add
/// data. If you have an idea of how much data the `String` will hold,
/// consider the [`with_capacity`] method to prevent excessive
/// re-allocation.
///
/// [`with_capacity`]: #method.with_capacity
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = String::new();
/// ```
#[inline]
#[rustc_const_stable(feature = "const_string_new", since = "1.32.0")]
#[stable(feature = "rust1", since = "1.0.0")]
pub const fn new() -> String {
String { vec: Vec::new() }
}
/// Creates a new empty `String` with a particular capacity.
///
/// `String`s have an internal buffer to hold their data. The capacity is
/// the length of that buffer, and can be queried with the [`capacity`]
/// method. This method creates an empty `String`, but one with an initial
/// buffer that can hold `capacity` bytes. This is useful when you may be
/// appending a bunch of data to the `String`, reducing the number of
/// reallocations it needs to do.
///
/// [`capacity`]: #method.capacity
///
/// If the given capacity is `0`, no allocation will occur, and this method
/// is identical to the [`new`] method.
///
/// [`new`]: #method.new
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::with_capacity(10);
///
/// // The String contains no chars, even though it has capacity for more
/// assert_eq!(s.len(), 0);
///
/// // These are all done without reallocating...
/// let cap = s.capacity();
/// for _ in 0..10 {
/// s.push('a');
/// }
///
/// assert_eq!(s.capacity(), cap);
///
/// // ...but this may make the string reallocate
/// s.push('a');
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(capacity: usize) -> String {
String { vec: Vec::with_capacity(capacity) }
}
// HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
// required for this method definition, is not available. Since we don't
// require this method for testing purposes, I'll just stub it
// NB see the slice::hack module in slice.rs for more information
#[inline]
#[cfg(test)]
pub fn from_str(_: &str) -> String {
panic!("not available with cfg(test)");
}
/// Converts a vector of bytes to a `String`.
///
/// A string ([`String`]) is made of bytes ([`u8`]), and a vector of bytes
/// ([`Vec<u8>`]) is made of bytes, so this function converts between the
/// two. Not all byte slices are valid `String`s, however: `String`
/// requires that it is valid UTF-8. `from_utf8()` checks to ensure that
/// the bytes are valid UTF-8, and then does the conversion.
///
/// If you are sure that the byte slice is valid UTF-8, and you don't want
/// to incur the overhead of the validity check, there is an unsafe version
/// of this function, [`from_utf8_unchecked`], which has the same behavior
/// but skips the check.
///
/// This method will take care to not copy the vector, for efficiency's
/// sake.
///
/// If you need a [`&str`] instead of a `String`, consider
/// [`str::from_utf8`].
///
/// The inverse of this method is [`into_bytes`].
///
/// # Errors
///
/// Returns [`Err`] if the slice is not UTF-8 with a description as to why the
/// provided bytes are not UTF-8. The vector you moved in is also included.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // some bytes, in a vector
/// let sparkle_heart = vec![240, 159, 146, 150];
///
/// // We know these bytes are valid, so we'll use `unwrap()`.
/// let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
///
/// assert_eq!("💖", sparkle_heart);
/// ```
///
/// Incorrect bytes:
///
/// ```
/// // some invalid bytes, in a vector
/// let sparkle_heart = vec![0, 159, 146, 150];
///
/// assert!(String::from_utf8(sparkle_heart).is_err());
/// ```
///
/// See the docs for [`FromUtf8Error`] for more details on what you can do
/// with this error.
///
/// [`from_utf8_unchecked`]: struct.String.html#method.from_utf8_unchecked
/// [`String`]: struct.String.html
/// [`u8`]: ../../std/primitive.u8.html
/// [`Vec<u8>`]: ../../std/vec/struct.Vec.html
/// [`&str`]: ../../std/primitive.str.html
/// [`str::from_utf8`]: ../../std/str/fn.from_utf8.html
/// [`into_bytes`]: struct.String.html#method.into_bytes
/// [`FromUtf8Error`]: struct.FromUtf8Error.html
/// [`Err`]: ../../std/result/enum.Result.html#variant.Err
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_utf8(vec: Vec<u8>) -> Result<String, FromUtf8Error> {
match str::from_utf8(&vec) {
Ok(..) => Ok(String { vec }),
Err(e) => Err(FromUtf8Error { bytes: vec, error: e }),
}
}
/// Converts a slice of bytes to a string, including invalid characters.
///
/// Strings are made of bytes ([`u8`]), and a slice of bytes
/// ([`&[u8]`][byteslice]) is made of bytes, so this function converts
/// between the two. Not all byte slices are valid strings, however: strings
/// are required to be valid UTF-8. During this conversion,
/// `from_utf8_lossy()` will replace any invalid UTF-8 sequences with
/// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD], which looks like this: �
///
/// [`u8`]: ../../std/primitive.u8.html
/// [byteslice]: ../../std/primitive.slice.html
/// [U+FFFD]: ../char/constant.REPLACEMENT_CHARACTER.html
///
/// If you are sure that the byte slice is valid UTF-8, and you don't want
/// to incur the overhead of the conversion, there is an unsafe version
/// of this function, [`from_utf8_unchecked`], which has the same behavior
/// but skips the checks.
///
/// [`from_utf8_unchecked`]: struct.String.html#method.from_utf8_unchecked
///
/// This function returns a [`Cow<'a, str>`]. If our byte slice is invalid
/// UTF-8, then we need to insert the replacement characters, which will
/// change the size of the string, and hence, require a `String`. But if
/// it's already valid UTF-8, we don't need a new allocation. This return
/// type allows us to handle both cases.
///
/// [`Cow<'a, str>`]: ../../std/borrow/enum.Cow.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // some bytes, in a vector
/// let sparkle_heart = vec![240, 159, 146, 150];
///
/// let sparkle_heart = String::from_utf8_lossy(&sparkle_heart);
///
/// assert_eq!("💖", sparkle_heart);
/// ```
///
/// Incorrect bytes:
///
/// ```
/// // some invalid bytes
/// let input = b"Hello \xF0\x90\x80World";
/// let output = String::from_utf8_lossy(input);
///
/// assert_eq!("Hello �World", output);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_utf8_lossy(v: &[u8]) -> Cow<'_, str> {
let mut iter = lossy::Utf8Lossy::from_bytes(v).chunks();
let (first_valid, first_broken) = if let Some(chunk) = iter.next() {
let lossy::Utf8LossyChunk { valid, broken } = chunk;
if valid.len() == v.len() {
debug_assert!(broken.is_empty());
return Cow::Borrowed(valid);
}
(valid, broken)
} else {
return Cow::Borrowed("");
};
const REPLACEMENT: &str = "\u{FFFD}";
let mut res = String::with_capacity(v.len());
res.push_str(first_valid);
if !first_broken.is_empty() {
res.push_str(REPLACEMENT);
}
for lossy::Utf8LossyChunk { valid, broken } in iter {
res.push_str(valid);
if !broken.is_empty() {
res.push_str(REPLACEMENT);
}
}
Cow::Owned(res)
}
/// Decode a UTF-16 encoded vector `v` into a `String`, returning [`Err`]
/// if `v` contains any invalid data.
///
/// [`Err`]: ../../std/result/enum.Result.html#variant.Err
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // 𝄞music
/// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
/// 0x0073, 0x0069, 0x0063];
/// assert_eq!(String::from("𝄞music"),
/// String::from_utf16(v).unwrap());
///
/// // 𝄞mu<invalid>ic
/// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
/// 0xD800, 0x0069, 0x0063];
/// assert!(String::from_utf16(v).is_err());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_utf16(v: &[u16]) -> Result<String, FromUtf16Error> {
// This isn't done via collect::<Result<_, _>>() for performance reasons.
// FIXME: the function can be simplified again when #48994 is closed.
let mut ret = String::with_capacity(v.len());
for c in decode_utf16(v.iter().cloned()) {
if let Ok(c) = c {
ret.push(c);
} else {
return Err(FromUtf16Error(()));
}
}
Ok(ret)
}
/// Decode a UTF-16 encoded slice `v` into a `String`, replacing
/// invalid data with [the replacement character (`U+FFFD`)][U+FFFD].
///
/// Unlike [`from_utf8_lossy`] which returns a [`Cow<'a, str>`],
/// `from_utf16_lossy` returns a `String` since the UTF-16 to UTF-8
/// conversion requires a memory allocation.
///
/// [`from_utf8_lossy`]: #method.from_utf8_lossy
/// [`Cow<'a, str>`]: ../borrow/enum.Cow.html
/// [U+FFFD]: ../char/constant.REPLACEMENT_CHARACTER.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // 𝄞mus<invalid>ic<invalid>
/// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
/// 0x0073, 0xDD1E, 0x0069, 0x0063,
/// 0xD834];
///
/// assert_eq!(String::from("𝄞mus\u{FFFD}ic\u{FFFD}"),
/// String::from_utf16_lossy(v));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_utf16_lossy(v: &[u16]) -> String {
decode_utf16(v.iter().cloned()).map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)).collect()
}
/// Decomposes a `String` into its raw components.
///
/// Returns the raw pointer to the underlying data, the length of
/// the string (in bytes), and the allocated capacity of the data
/// (in bytes). These are the same arguments in the same order as
/// the arguments to [`from_raw_parts`].
///
/// After calling this function, the caller is responsible for the
/// memory previously managed by the `String`. The only way to do
/// this is to convert the raw pointer, length, and capacity back
/// into a `String` with the [`from_raw_parts`] function, allowing
/// the destructor to perform the cleanup.
///
/// [`from_raw_parts`]: #method.from_raw_parts
///
/// # Examples
///
/// ```
/// #![feature(vec_into_raw_parts)]
/// let s = String::from("hello");
///
/// let (ptr, len, cap) = s.into_raw_parts();
///
/// let rebuilt = unsafe { String::from_raw_parts(ptr, len, cap) };
/// assert_eq!(rebuilt, "hello");
/// ```
#[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
pub fn into_raw_parts(self) -> (*mut u8, usize, usize) {
self.vec.into_raw_parts()
}
/// Creates a new `String` from a length, capacity, and pointer.
///
/// # Safety
///
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
/// * The memory at `ptr` needs to have been previously allocated by the
/// same allocator the standard library uses, with a required alignment of exactly 1.
/// * `length` needs to be less than or equal to `capacity`.
/// * `capacity` needs to be the correct value.
///
/// Violating these may cause problems like corrupting the allocator's
/// internal data structures.
///
/// The ownership of `ptr` is effectively transferred to the
/// `String` which may then deallocate, reallocate or change the
/// contents of memory pointed to by the pointer at will. Ensure
/// that nothing else uses the pointer after calling this
/// function.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::mem;
///
/// unsafe {
/// let s = String::from("hello");
///
// FIXME Update this when vec_into_raw_parts is stabilized
/// // Prevent automatically dropping the String's data
/// let mut s = mem::ManuallyDrop::new(s);
///
/// let ptr = s.as_mut_ptr();
/// let len = s.len();
/// let capacity = s.capacity();
///
/// let s = String::from_raw_parts(ptr, len, capacity);
///
/// assert_eq!(String::from("hello"), s);
/// }
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_raw_parts(buf: *mut u8, length: usize, capacity: usize) -> String {
String { vec: Vec::from_raw_parts(buf, length, capacity) }
}
/// Converts a vector of bytes to a `String` without checking that the
/// string contains valid UTF-8.
///
/// See the safe version, [`from_utf8`], for more details.
///
/// [`from_utf8`]: struct.String.html#method.from_utf8
///
/// # Safety
///
/// This function is unsafe because it does not check that the bytes passed
/// to it are valid UTF-8. If this constraint is violated, it may cause
/// memory unsafety issues with future users of the `String`, as the rest of
/// the standard library assumes that `String`s are valid UTF-8.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // some bytes, in a vector
/// let sparkle_heart = vec![240, 159, 146, 150];
///
/// let sparkle_heart = unsafe {
/// String::from_utf8_unchecked(sparkle_heart)
/// };
///
/// assert_eq!("💖", sparkle_heart);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_utf8_unchecked(bytes: Vec<u8>) -> String {
String { vec: bytes }
}
/// Converts a `String` into a byte vector.
///
/// This consumes the `String`, so we do not need to copy its contents.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = String::from("hello");
/// let bytes = s.into_bytes();
///
/// assert_eq!(&[104, 101, 108, 108, 111][..], &bytes[..]);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_bytes(self) -> Vec<u8> {
self.vec
}
/// Extracts a string slice containing the entire `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = String::from("foo");
///
/// assert_eq!("foo", s.as_str());
/// ```
#[inline]
#[stable(feature = "string_as_str", since = "1.7.0")]
pub fn as_str(&self) -> &str {
self
}
/// Converts a `String` into a mutable string slice.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("foobar");
/// let s_mut_str = s.as_mut_str();
///
/// s_mut_str.make_ascii_uppercase();
///
/// assert_eq!("FOOBAR", s_mut_str);
/// ```
#[inline]
#[stable(feature = "string_as_str", since = "1.7.0")]
pub fn as_mut_str(&mut self) -> &mut str {
self
}
/// Appends a given string slice onto the end of this `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("foo");
///
/// s.push_str("bar");
///
/// assert_eq!("foobar", s);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push_str(&mut self, string: &str) {
self.vec.extend_from_slice(string.as_bytes())
}
/// Returns this `String`'s capacity, in bytes.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = String::with_capacity(10);
///
/// assert!(s.capacity() >= 10);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize {
self.vec.capacity()
}
/// Ensures that this `String`'s capacity is at least `additional` bytes
/// larger than its length.
///
/// The capacity may be increased by more than `additional` bytes if it
/// chooses, to prevent frequent reallocations.
///
/// If you do not want this "at least" behavior, see the [`reserve_exact`]
/// method.
///
/// # Panics
///
/// Panics if the new capacity overflows [`usize`].
///
/// [`reserve_exact`]: struct.String.html#method.reserve_exact
/// [`usize`]: ../../std/primitive.usize.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::new();
///
/// s.reserve(10);
///
/// assert!(s.capacity() >= 10);
/// ```
///
/// This may not actually increase the capacity:
///
/// ```
/// let mut s = String::with_capacity(10);
/// s.push('a');
/// s.push('b');
///
/// // s now has a length of 2 and a capacity of 10
/// assert_eq!(2, s.len());
/// assert_eq!(10, s.capacity());
///
/// // Since we already have an extra 8 capacity, calling this...
/// s.reserve(8);
///
/// // ... doesn't actually increase.
/// assert_eq!(10, s.capacity());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
self.vec.reserve(additional)
}
/// Ensures that this `String`'s capacity is `additional` bytes
/// larger than its length.
///
/// Consider using the [`reserve`] method unless you absolutely know
/// better than the allocator.
///
/// [`reserve`]: #method.reserve
///
/// # Panics
///
/// Panics if the new capacity overflows `usize`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::new();
///
/// s.reserve_exact(10);
///
/// assert!(s.capacity() >= 10);
/// ```
///
/// This may not actually increase the capacity:
///
/// ```
/// let mut s = String::with_capacity(10);
/// s.push('a');
/// s.push('b');
///
/// // s now has a length of 2 and a capacity of 10
/// assert_eq!(2, s.len());
/// assert_eq!(10, s.capacity());
///
/// // Since we already have an extra 8 capacity, calling this...
/// s.reserve_exact(8);
///
/// // ... doesn't actually increase.
/// assert_eq!(10, s.capacity());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve_exact(&mut self, additional: usize) {
self.vec.reserve_exact(additional)
}
/// Tries to reserve capacity for at least `additional` more elements to be inserted
/// in the given `String`. The collection may reserve more space to avoid
/// frequent reallocations. After calling `reserve`, capacity will be
/// greater than or equal to `self.len() + additional`. Does nothing if
/// capacity is already sufficient.
///
/// # Errors
///
/// If the capacity overflows, or the allocator reports a failure, then an error
/// is returned.
///
/// # Examples
///
/// ```
/// #![feature(try_reserve)]
/// use std::collections::TryReserveError;
///
/// fn process_data(data: &str) -> Result<String, TryReserveError> {
/// let mut output = String::new();
///
/// // Pre-reserve the memory, exiting if we can't
/// output.try_reserve(data.len())?;
///
/// // Now we know this can't OOM in the middle of our complex work
/// output.push_str(data);
///
/// Ok(output)
/// }
/// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?");
/// ```
#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
self.vec.try_reserve(additional)
}
/// Tries to reserves the minimum capacity for exactly `additional` more elements to
/// be inserted in the given `String`. After calling `reserve_exact`,
/// capacity will be greater than or equal to `self.len() + additional`.
/// Does nothing if the capacity is already sufficient.
///
/// Note that the allocator may give the collection more space than it
/// requests. Therefore, capacity can not be relied upon to be precisely
/// minimal. Prefer `reserve` if future insertions are expected.
///
/// # Errors
///
/// If the capacity overflows, or the allocator reports a failure, then an error
/// is returned.
///
/// # Examples
///
/// ```
/// #![feature(try_reserve)]
/// use std::collections::TryReserveError;
///
/// fn process_data(data: &str) -> Result<String, TryReserveError> {
/// let mut output = String::new();
///
/// // Pre-reserve the memory, exiting if we can't
/// output.try_reserve(data.len())?;
///
/// // Now we know this can't OOM in the middle of our complex work
/// output.push_str(data);
///
/// Ok(output)
/// }
/// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?");
/// ```
#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
self.vec.try_reserve_exact(additional)
}
/// Shrinks the capacity of this `String` to match its length.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("foo");
///
/// s.reserve(100);
/// assert!(s.capacity() >= 100);
///
/// s.shrink_to_fit();
/// assert_eq!(3, s.capacity());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn shrink_to_fit(&mut self) {
self.vec.shrink_to_fit()
}
/// Shrinks the capacity of this `String` with a lower bound.
///
/// The capacity will remain at least as large as both the length
/// and the supplied value.
///
/// Panics if the current capacity is smaller than the supplied
/// minimum capacity.
///
/// # Examples
///
/// ```
/// #![feature(shrink_to)]
/// let mut s = String::from("foo");
///
/// s.reserve(100);
/// assert!(s.capacity() >= 100);
///
/// s.shrink_to(10);
/// assert!(s.capacity() >= 10);
/// s.shrink_to(0);
/// assert!(s.capacity() >= 3);
/// ```
#[inline]
#[unstable(feature = "shrink_to", reason = "new API", issue = "56431")]
pub fn shrink_to(&mut self, min_capacity: usize) {
self.vec.shrink_to(min_capacity)
}
/// Appends the given [`char`] to the end of this `String`.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("abc");
///
/// s.push('1');
/// s.push('2');
/// s.push('3');
///
/// assert_eq!("abc123", s);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push(&mut self, ch: char) {
match ch.len_utf8() {
1 => self.vec.push(ch as u8),
_ => self.vec.extend_from_slice(ch.encode_utf8(&mut [0; 4]).as_bytes()),
}
}
/// Returns a byte slice of this `String`'s contents.
///
/// The inverse of this method is [`from_utf8`].
///
/// [`from_utf8`]: #method.from_utf8
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = String::from("hello");
///
/// assert_eq!(&[104, 101, 108, 108, 111], s.as_bytes());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn as_bytes(&self) -> &[u8] {
&self.vec
}
/// Shortens this `String` to the specified length.
///
/// If `new_len` is greater than the string's current length, this has no
/// effect.
///
/// Note that this method has no effect on the allocated capacity
/// of the string
///
/// # Panics
///
/// Panics if `new_len` does not lie on a [`char`] boundary.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("hello");
///
/// s.truncate(2);
///
/// assert_eq!("he", s);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn truncate(&mut self, new_len: usize) {
if new_len <= self.len() {
assert!(self.is_char_boundary(new_len));
self.vec.truncate(new_len)
}
}
/// Removes the last character from the string buffer and returns it.
///
/// Returns [`None`] if this `String` is empty.
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("foo");
///
/// assert_eq!(s.pop(), Some('o'));
/// assert_eq!(s.pop(), Some('o'));
/// assert_eq!(s.pop(), Some('f'));
///
/// assert_eq!(s.pop(), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn pop(&mut self) -> Option<char> {
let ch = self.chars().rev().next()?;
let newlen = self.len() - ch.len_utf8();
unsafe {
self.vec.set_len(newlen);
}
Some(ch)
}
/// Removes a [`char`] from this `String` at a byte position and returns it.
///
/// This is an `O(n)` operation, as it requires copying every element in the
/// buffer.
///
/// # Panics
///
/// Panics if `idx` is larger than or equal to the `String`'s length,
/// or if it does not lie on a [`char`] boundary.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("foo");
///
/// assert_eq!(s.remove(0), 'f');
/// assert_eq!(s.remove(1), 'o');
/// assert_eq!(s.remove(0), 'o');
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove(&mut self, idx: usize) -> char {
let ch = match self[idx..].chars().next() {
Some(ch) => ch,
None => panic!("cannot remove a char from the end of a string"),
};
let next = idx + ch.len_utf8();
let len = self.len();
unsafe {
ptr::copy(self.vec.as_ptr().add(next), self.vec.as_mut_ptr().add(idx), len - next);
self.vec.set_len(len - (next - idx));
}
ch
}
/// Retains only the characters specified by the predicate.
///
/// In other words, remove all characters `c` such that `f(c)` returns `false`.
/// This method operates in place, visiting each character exactly once in the
/// original order, and preserves the order of the retained characters.
///
/// # Examples
///
/// ```
/// let mut s = String::from("f_o_ob_ar");
///
/// s.retain(|c| c != '_');
///
/// assert_eq!(s, "foobar");
/// ```
///
/// The exact order may be useful for tracking external state, like an index.
///
/// ```
/// let mut s = String::from("abcde");
/// let keep = [false, true, true, false, true];
/// let mut i = 0;
/// s.retain(|_| (keep[i], i += 1).0);
/// assert_eq!(s, "bce");
/// ```
#[inline]
#[stable(feature = "string_retain", since = "1.26.0")]
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(char) -> bool,
{
let len = self.len();
let mut del_bytes = 0;
let mut idx = 0;
while idx < len {
let ch = unsafe { self.get_unchecked(idx..len).chars().next().unwrap() };
let ch_len = ch.len_utf8();
if !f(ch) {
del_bytes += ch_len;
} else if del_bytes > 0 {
unsafe {
ptr::copy(
self.vec.as_ptr().add(idx),
self.vec.as_mut_ptr().add(idx - del_bytes),
ch_len,
);
}
}
// Point idx to the next char
idx += ch_len;
}
if del_bytes > 0 {
unsafe {
self.vec.set_len(len - del_bytes);
}
}
}
/// Inserts a character into this `String` at a byte position.
///
/// This is an `O(n)` operation as it requires copying every element in the
/// buffer.
///
/// # Panics
///
/// Panics if `idx` is larger than the `String`'s length, or if it does not
/// lie on a [`char`] boundary.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::with_capacity(3);
///
/// s.insert(0, 'f');
/// s.insert(1, 'o');
/// s.insert(2, 'o');
///
/// assert_eq!("foo", s);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(&mut self, idx: usize, ch: char) {
assert!(self.is_char_boundary(idx));
let mut bits = [0; 4];
let bits = ch.encode_utf8(&mut bits).as_bytes();
unsafe {
self.insert_bytes(idx, bits);
}
}
unsafe fn insert_bytes(&mut self, idx: usize, bytes: &[u8]) {
let len = self.len();
let amt = bytes.len();
self.vec.reserve(amt);
ptr::copy(self.vec.as_ptr().add(idx), self.vec.as_mut_ptr().add(idx + amt), len - idx);
ptr::copy(bytes.as_ptr(), self.vec.as_mut_ptr().add(idx), amt);
self.vec.set_len(len + amt);
}
/// Inserts a string slice into this `String` at a byte position.
///
/// This is an `O(n)` operation as it requires copying every element in the
/// buffer.
///
/// # Panics
///
/// Panics if `idx` is larger than the `String`'s length, or if it does not
/// lie on a [`char`] boundary.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("bar");
///
/// s.insert_str(0, "foo");
///
/// assert_eq!("foobar", s);
/// ```
#[inline]
#[stable(feature = "insert_str", since = "1.16.0")]
pub fn insert_str(&mut self, idx: usize, string: &str) {
assert!(self.is_char_boundary(idx));
unsafe {
self.insert_bytes(idx, string.as_bytes());
}
}
/// Returns a mutable reference to the contents of this `String`.
///
/// # Safety
///
/// This function is unsafe because it does not check that the bytes passed
/// to it are valid UTF-8. If this constraint is violated, it may cause
/// memory unsafety issues with future users of the `String`, as the rest of
/// the standard library assumes that `String`s are valid UTF-8.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("hello");
///
/// unsafe {
/// let vec = s.as_mut_vec();
/// assert_eq!(&[104, 101, 108, 108, 111][..], &vec[..]);
///
/// vec.reverse();
/// }
/// assert_eq!(s, "olleh");
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn as_mut_vec(&mut self) -> &mut Vec<u8> {
&mut self.vec
}
/// Returns the length of this `String`, in bytes, not [`char`]s or
/// graphemes. In other words, it may not be what a human considers the
/// length of the string.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let a = String::from("foo");
/// assert_eq!(a.len(), 3);
///
/// let fancy_f = String::from("ƒoo");
/// assert_eq!(fancy_f.len(), 4);
/// assert_eq!(fancy_f.chars().count(), 3);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn len(&self) -> usize {
self.vec.len()
}
/// Returns `true` if this `String` has a length of zero, and `false` otherwise.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut v = String::new();
/// assert!(v.is_empty());
///
/// v.push('a');
/// assert!(!v.is_empty());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Splits the string into two at the given index.
///
/// Returns a newly allocated `String`. `self` contains bytes `[0, at)`, and
/// the returned `String` contains bytes `[at, len)`. `at` must be on the
/// boundary of a UTF-8 code point.
///
/// Note that the capacity of `self` does not change.
///
/// # Panics
///
/// Panics if `at` is not on a `UTF-8` code point boundary, or if it is beyond the last
/// code point of the string.
///
/// # Examples
///
/// ```
/// # fn main() {
/// let mut hello = String::from("Hello, World!");
/// let world = hello.split_off(7);
/// assert_eq!(hello, "Hello, ");
/// assert_eq!(world, "World!");
/// # }
/// ```
#[inline]
#[stable(feature = "string_split_off", since = "1.16.0")]
#[must_use = "use `.truncate()` if you don't need the other half"]
pub fn split_off(&mut self, at: usize) -> String {
assert!(self.is_char_boundary(at));
let other = self.vec.split_off(at);
unsafe { String::from_utf8_unchecked(other) }
}
/// Truncates this `String`, removing all contents.
///
/// While this means the `String` will have a length of zero, it does not
/// touch its capacity.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("foo");
///
/// s.clear();
///
/// assert!(s.is_empty());
/// assert_eq!(0, s.len());
/// assert_eq!(3, s.capacity());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn clear(&mut self) {
self.vec.clear()
}
/// Creates a draining iterator that removes the specified range in the `String`
/// and yields the removed `chars`.
///
/// Note: The element range is removed even if the iterator is not
/// consumed until the end.
///
/// # Panics
///
/// Panics if the starting point or end point do not lie on a [`char`]
/// boundary, or if they're out of bounds.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("α is alpha, β is beta");
/// let beta_offset = s.find('β').unwrap_or(s.len());
///
/// // Remove the range up until the β from the string
/// let t: String = s.drain(..beta_offset).collect();
/// assert_eq!(t, "α is alpha, ");
/// assert_eq!(s, "β is beta");
///
/// // A full range clears the string
/// s.drain(..);
/// assert_eq!(s, "");
/// ```
#[stable(feature = "drain", since = "1.6.0")]
pub fn drain<R>(&mut self, range: R) -> Drain<'_>
where
R: RangeBounds<usize>,
{
// Memory safety
//
// The String version of Drain does not have the memory safety issues
// of the vector version. The data is just plain bytes.
// Because the range removal happens in Drop, if the Drain iterator is leaked,
// the removal will not happen.
let len = self.len();
let start = match range.start_bound() {
Included(&n) => n,
Excluded(&n) => n + 1,
Unbounded => 0,
};
let end = match range.end_bound() {
Included(&n) => n + 1,
Excluded(&n) => n,
Unbounded => len,
};
// Take out two simultaneous borrows. The &mut String won't be accessed
// until iteration is over, in Drop.
let self_ptr = self as *mut _;
// slicing does the appropriate bounds checks
let chars_iter = self[start..end].chars();
Drain { start, end, iter: chars_iter, string: self_ptr }
}
/// Removes the specified range in the string,
/// and replaces it with the given string.
/// The given string doesn't need to be the same length as the range.
///
/// # Panics
///
/// Panics if the starting point or end point do not lie on a [`char`]
/// boundary, or if they're out of bounds.
///
/// [`char`]: ../../std/primitive.char.html
/// [`Vec::splice`]: ../../std/vec/struct.Vec.html#method.splice
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("α is alpha, β is beta");
/// let beta_offset = s.find('β').unwrap_or(s.len());
///
/// // Replace the range up until the β from the string
/// s.replace_range(..beta_offset, "Α is capital alpha; ");
/// assert_eq!(s, "Α is capital alpha; β is beta");
/// ```
#[stable(feature = "splice", since = "1.27.0")]
pub fn replace_range<R>(&mut self, range: R, replace_with: &str)
where
R: RangeBounds<usize>,
{
// Memory safety
//
// Replace_range does not have the memory safety issues of a vector Splice.
// of the vector version. The data is just plain bytes.
match range.start_bound() {
Included(&n) => assert!(self.is_char_boundary(n)),
Excluded(&n) => assert!(self.is_char_boundary(n + 1)),
Unbounded => {}
};
match range.end_bound() {
Included(&n) => assert!(self.is_char_boundary(n + 1)),
Excluded(&n) => assert!(self.is_char_boundary(n)),
Unbounded => {}
};
unsafe { self.as_mut_vec() }.splice(range, replace_with.bytes());
}
/// Converts this `String` into a [`Box`]`<`[`str`]`>`.
///
/// This will drop any excess capacity.
///
/// [`Box`]: ../../std/boxed/struct.Box.html
/// [`str`]: ../../std/primitive.str.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = String::from("hello");
///
/// let b = s.into_boxed_str();
/// ```
#[stable(feature = "box_str", since = "1.4.0")]
#[inline]
pub fn into_boxed_str(self) -> Box<str> {
let slice = self.vec.into_boxed_slice();
unsafe { from_boxed_utf8_unchecked(slice) }
}
}
impl FromUtf8Error {
/// Returns a slice of [`u8`]s bytes that were attempted to convert to a `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // some invalid bytes, in a vector
/// let bytes = vec![0, 159];
///
/// let value = String::from_utf8(bytes);
///
/// assert_eq!(&[0, 159], value.unwrap_err().as_bytes());
/// ```
#[stable(feature = "from_utf8_error_as_bytes", since = "1.26.0")]
pub fn as_bytes(&self) -> &[u8] {
&self.bytes[..]
}
/// Returns the bytes that were attempted to convert to a `String`.
///
/// This method is carefully constructed to avoid allocation. It will
/// consume the error, moving out the bytes, so that a copy of the bytes
/// does not need to be made.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // some invalid bytes, in a vector
/// let bytes = vec![0, 159];
///
/// let value = String::from_utf8(bytes);
///
/// assert_eq!(vec![0, 159], value.unwrap_err().into_bytes());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_bytes(self) -> Vec<u8> {
self.bytes
}
/// Fetch a `Utf8Error` to get more details about the conversion failure.
///
/// The [`Utf8Error`] type provided by [`std::str`] represents an error that may
/// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's
/// an analogue to `FromUtf8Error`. See its documentation for more details
/// on using it.
///
/// [`Utf8Error`]: ../../std/str/struct.Utf8Error.html
/// [`std::str`]: ../../std/str/index.html
/// [`u8`]: ../../std/primitive.u8.html
/// [`&str`]: ../../std/primitive.str.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // some invalid bytes, in a vector
/// let bytes = vec![0, 159];
///
/// let error = String::from_utf8(bytes).unwrap_err().utf8_error();
///
/// // the first byte is invalid here
/// assert_eq!(1, error.valid_up_to());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn utf8_error(&self) -> Utf8Error {
self.error
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Display for FromUtf8Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.error, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Display for FromUtf16Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt("invalid utf-16: lone surrogate found", f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Clone for String {
fn clone(&self) -> Self {
String { vec: self.vec.clone() }
}
fn clone_from(&mut self, source: &Self) {
self.vec.clone_from(&source.vec);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl FromIterator<char> for String {
fn from_iter<I: IntoIterator<Item = char>>(iter: I) -> String {
let mut buf = String::new();
buf.extend(iter);
buf
}
}
#[stable(feature = "string_from_iter_by_ref", since = "1.17.0")]
impl<'a> FromIterator<&'a char> for String {
fn from_iter<I: IntoIterator<Item = &'a char>>(iter: I) -> String {
let mut buf = String::new();
buf.extend(iter);
buf
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> FromIterator<&'a str> for String {
fn from_iter<I: IntoIterator<Item = &'a str>>(iter: I) -> String {
let mut buf = String::new();
buf.extend(iter);
buf
}
}
#[stable(feature = "extend_string", since = "1.4.0")]
impl FromIterator<String> for String {
fn from_iter<I: IntoIterator<Item = String>>(iter: I) -> String {
let mut iterator = iter.into_iter();
// Because we're iterating over `String`s, we can avoid at least
// one allocation by getting the first string from the iterator
// and appending to it all the subsequent strings.
match iterator.next() {
None => String::new(),
Some(mut buf) => {
buf.extend(iterator);
buf
}
}
}
}
#[stable(feature = "herd_cows", since = "1.19.0")]
impl<'a> FromIterator<Cow<'a, str>> for String {
fn from_iter<I: IntoIterator<Item = Cow<'a, str>>>(iter: I) -> String {
let mut iterator = iter.into_iter();
// Because we're iterating over CoWs, we can (potentially) avoid at least
// one allocation by getting the first item and appending to it all the
// subsequent items.
match iterator.next() {
None => String::new(),
Some(cow) => {
let mut buf = cow.into_owned();
buf.extend(iterator);
buf
}
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Extend<char> for String {
fn extend<I: IntoIterator<Item = char>>(&mut self, iter: I) {
let iterator = iter.into_iter();
let (lower_bound, _) = iterator.size_hint();
self.reserve(lower_bound);
iterator.for_each(move |c| self.push(c));
}
}
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a> Extend<&'a char> for String {
fn extend<I: IntoIterator<Item = &'a char>>(&mut self, iter: I) {
self.extend(iter.into_iter().cloned());
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Extend<&'a str> for String {
fn extend<I: IntoIterator<Item = &'a str>>(&mut self, iter: I) {
iter.into_iter().for_each(move |s| self.push_str(s));
}
}
#[stable(feature = "extend_string", since = "1.4.0")]
impl Extend<String> for String {
fn extend<I: IntoIterator<Item = String>>(&mut self, iter: I) {
iter.into_iter().for_each(move |s| self.push_str(&s));
}
}
#[stable(feature = "herd_cows", since = "1.19.0")]
impl<'a> Extend<Cow<'a, str>> for String {
fn extend<I: IntoIterator<Item = Cow<'a, str>>>(&mut self, iter: I) {
iter.into_iter().for_each(move |s| self.push_str(&s));
}
}
/// A convenience impl that delegates to the impl for `&str`.
///
/// # Examples
///
/// ```
/// assert_eq!(String::from("Hello world").find("world"), Some(6));
/// ```
#[unstable(
feature = "pattern",
reason = "API not fully fleshed out and ready to be stabilized",
issue = "27721"
)]
impl<'a, 'b> Pattern<'a> for &'b String {
type Searcher = <&'b str as Pattern<'a>>::Searcher;
fn into_searcher(self, haystack: &'a str) -> <&'b str as Pattern<'a>>::Searcher {
self[..].into_searcher(haystack)
}
#[inline]
fn is_contained_in(self, haystack: &'a str) -> bool {
self[..].is_contained_in(haystack)
}
#[inline]
fn is_prefix_of(self, haystack: &'a str) -> bool {
self[..].is_prefix_of(haystack)
}
#[inline]
fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
self[..].strip_prefix_of(haystack)
}
#[inline]
fn is_suffix_of(self, haystack: &'a str) -> bool {
self[..].is_suffix_of(haystack)
}
#[inline]
fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str> {
self[..].strip_suffix_of(haystack)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialEq for String {
#[inline]
fn eq(&self, other: &String) -> bool {
PartialEq::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &String) -> bool {
PartialEq::ne(&self[..], &other[..])
}
}
macro_rules! impl_eq {
($lhs:ty, $rhs: ty) => {
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(unused_lifetimes)]
impl<'a, 'b> PartialEq<$rhs> for $lhs {
#[inline]
fn eq(&self, other: &$rhs) -> bool {
PartialEq::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &$rhs) -> bool {
PartialEq::ne(&self[..], &other[..])
}
}
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(unused_lifetimes)]
impl<'a, 'b> PartialEq<$lhs> for $rhs {
#[inline]
fn eq(&self, other: &$lhs) -> bool {
PartialEq::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &$lhs) -> bool {
PartialEq::ne(&self[..], &other[..])
}
}
};
}
impl_eq! { String, str }
impl_eq! { String, &'a str }
impl_eq! { Cow<'a, str>, str }
impl_eq! { Cow<'a, str>, &'b str }
impl_eq! { Cow<'a, str>, String }
#[stable(feature = "rust1", since = "1.0.0")]
impl Default for String {
/// Creates an empty `String`.
#[inline]
fn default() -> String {
String::new()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Display for String {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for String {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl hash::Hash for String {
#[inline]
fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
(**self).hash(hasher)
}
}
/// Implements the `+` operator for concatenating two strings.
///
/// This consumes the `String` on the left-hand side and re-uses its buffer (growing it if
/// necessary). This is done to avoid allocating a new `String` and copying the entire contents on
/// every operation, which would lead to `O(n^2)` running time when building an `n`-byte string by
/// repeated concatenation.
///
/// The string on the right-hand side is only borrowed; its contents are copied into the returned
/// `String`.
///
/// # Examples
///
/// Concatenating two `String`s takes the first by value and borrows the second:
///
/// ```
/// let a = String::from("hello");
/// let b = String::from(" world");
/// let c = a + &b;
/// // `a` is moved and can no longer be used here.
/// ```
///
/// If you want to keep using the first `String`, you can clone it and append to the clone instead:
///
/// ```
/// let a = String::from("hello");
/// let b = String::from(" world");
/// let c = a.clone() + &b;
/// // `a` is still valid here.
/// ```
///
/// Concatenating `&str` slices can be done by converting the first to a `String`:
///
/// ```
/// let a = "hello";
/// let b = " world";
/// let c = a.to_string() + b;
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
impl Add<&str> for String {
type Output = String;
#[inline]
fn add(mut self, other: &str) -> String {
self.push_str(other);
self
}
}
/// Implements the `+=` operator for appending to a `String`.
///
/// This has the same behavior as the [`push_str`][String::push_str] method.
#[stable(feature = "stringaddassign", since = "1.12.0")]
impl AddAssign<&str> for String {
#[inline]
fn add_assign(&mut self, other: &str) {
self.push_str(other);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl ops::Index<ops::Range<usize>> for String {
type Output = str;
#[inline]
fn index(&self, index: ops::Range<usize>) -> &str {
&self[..][index]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl ops::Index<ops::RangeTo<usize>> for String {
type Output = str;
#[inline]
fn index(&self, index: ops::RangeTo<usize>) -> &str {
&self[..][index]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl ops::Index<ops::RangeFrom<usize>> for String {
type Output = str;
#[inline]
fn index(&self, index: ops::RangeFrom<usize>) -> &str {
&self[..][index]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl ops::Index<ops::RangeFull> for String {
type Output = str;
#[inline]
fn index(&self, _index: ops::RangeFull) -> &str {
unsafe { str::from_utf8_unchecked(&self.vec) }
}
}
#[stable(feature = "inclusive_range", since = "1.26.0")]
impl ops::Index<ops::RangeInclusive<usize>> for String {
type Output = str;
#[inline]
fn index(&self, index: ops::RangeInclusive<usize>) -> &str {
Index::index(&**self, index)
}
}
#[stable(feature = "inclusive_range", since = "1.26.0")]
impl ops::Index<ops::RangeToInclusive<usize>> for String {
type Output = str;
#[inline]
fn index(&self, index: ops::RangeToInclusive<usize>) -> &str {
Index::index(&**self, index)
}
}
#[stable(feature = "derefmut_for_string", since = "1.3.0")]
impl ops::IndexMut<ops::Range<usize>> for String {
#[inline]
fn index_mut(&mut self, index: ops::Range<usize>) -> &mut str {
&mut self[..][index]
}
}
#[stable(feature = "derefmut_for_string", since = "1.3.0")]
impl ops::IndexMut<ops::RangeTo<usize>> for String {
#[inline]
fn index_mut(&mut self, index: ops::RangeTo<usize>) -> &mut str {
&mut self[..][index]
}
}
#[stable(feature = "derefmut_for_string", since = "1.3.0")]
impl ops::IndexMut<ops::RangeFrom<usize>> for String {
#[inline]
fn index_mut(&mut self, index: ops::RangeFrom<usize>) -> &mut str {
&mut self[..][index]
}
}
#[stable(feature = "derefmut_for_string", since = "1.3.0")]
impl ops::IndexMut<ops::RangeFull> for String {
#[inline]
fn index_mut(&mut self, _index: ops::RangeFull) -> &mut str {
unsafe { str::from_utf8_unchecked_mut(&mut *self.vec) }
}
}
#[stable(feature = "inclusive_range", since = "1.26.0")]
impl ops::IndexMut<ops::RangeInclusive<usize>> for String {
#[inline]
fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut str {
IndexMut::index_mut(&mut **self, index)
}
}
#[stable(feature = "inclusive_range", since = "1.26.0")]
impl ops::IndexMut<ops::RangeToInclusive<usize>> for String {
#[inline]
fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut str {
IndexMut::index_mut(&mut **self, index)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl ops::Deref for String {
type Target = str;
#[inline]
fn deref(&self) -> &str {
unsafe { str::from_utf8_unchecked(&self.vec) }
}
}
#[stable(feature = "derefmut_for_string", since = "1.3.0")]
impl ops::DerefMut for String {
#[inline]
fn deref_mut(&mut self) -> &mut str {
unsafe { str::from_utf8_unchecked_mut(&mut *self.vec) }
}
}
/// A type alias for [`Infallible`].
///
/// This alias exists for backwards compatibility, and may be eventually deprecated.
///
/// [`Infallible`]: ../../core/convert/enum.Infallible.html
#[stable(feature = "str_parse_error", since = "1.5.0")]
pub type ParseError = core::convert::Infallible;
#[stable(feature = "rust1", since = "1.0.0")]
impl FromStr for String {
type Err = core::convert::Infallible;
#[inline]
fn from_str(s: &str) -> Result<String, Self::Err> {
Ok(String::from(s))
}
}
/// A trait for converting a value to a `String`.
///
/// This trait is automatically implemented for any type which implements the
/// [`Display`] trait. As such, `ToString` shouldn't be implemented directly:
/// [`Display`] should be implemented instead, and you get the `ToString`
/// implementation for free.
///
/// [`Display`]: ../../std/fmt/trait.Display.html
#[stable(feature = "rust1", since = "1.0.0")]
pub trait ToString {
/// Converts the given value to a `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let i = 5;
/// let five = String::from("5");
///
/// assert_eq!(five, i.to_string());
/// ```
#[rustc_conversion_suggestion]
#[stable(feature = "rust1", since = "1.0.0")]
fn to_string(&self) -> String;
}
/// # Panics
///
/// In this implementation, the `to_string` method panics
/// if the `Display` implementation returns an error.
/// This indicates an incorrect `Display` implementation
/// since `fmt::Write for String` never returns an error itself.
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Display + ?Sized> ToString for T {
#[inline]
default fn to_string(&self) -> String {
use fmt::Write;
let mut buf = String::new();
buf.write_fmt(format_args!("{}", self))
.expect("a Display implementation returned an error unexpectedly");
buf.shrink_to_fit();
buf
}
}
#[stable(feature = "str_to_string_specialization", since = "1.9.0")]
impl ToString for str {
#[inline]
fn to_string(&self) -> String {
String::from(self)
}
}
#[stable(feature = "cow_str_to_string_specialization", since = "1.17.0")]
impl ToString for Cow<'_, str> {
#[inline]
fn to_string(&self) -> String {
self[..].to_owned()
}
}
#[stable(feature = "string_to_string_specialization", since = "1.17.0")]
impl ToString for String {
#[inline]
fn to_string(&self) -> String {
self.to_owned()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl AsRef<str> for String {
#[inline]
fn as_ref(&self) -> &str {
self
}
}
#[stable(feature = "string_as_mut", since = "1.43.0")]
impl AsMut<str> for String {
#[inline]
fn as_mut(&mut self) -> &mut str {
self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl AsRef<[u8]> for String {
#[inline]
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl From<&str> for String {
#[inline]
fn from(s: &str) -> String {
s.to_owned()
}
}
#[stable(feature = "from_mut_str_for_string", since = "1.44.0")]
impl From<&mut str> for String {
/// Converts a `&mut str` into a `String`.
///
/// The result is allocated on the heap.
#[inline]
fn from(s: &mut str) -> String {
s.to_owned()
}
}
#[stable(feature = "from_ref_string", since = "1.35.0")]
impl From<&String> for String {
#[inline]
fn from(s: &String) -> String {
s.clone()
}
}
// note: test pulls in libstd, which causes errors here
#[cfg(not(test))]
#[stable(feature = "string_from_box", since = "1.18.0")]
impl From<Box<str>> for String {
/// Converts the given boxed `str` slice to a `String`.
/// It is notable that the `str` slice is owned.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s1: String = String::from("hello world");
/// let s2: Box<str> = s1.into_boxed_str();
/// let s3: String = String::from(s2);
///
/// assert_eq!("hello world", s3)
/// ```
fn from(s: Box<str>) -> String {
s.into_string()
}
}
#[stable(feature = "box_from_str", since = "1.20.0")]
impl From<String> for Box<str> {
/// Converts the given `String` to a boxed `str` slice that is owned.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s1: String = String::from("hello world");
/// let s2: Box<str> = Box::from(s1);
/// let s3: String = String::from(s2);
///
/// assert_eq!("hello world", s3)
/// ```
fn from(s: String) -> Box<str> {
s.into_boxed_str()
}
}
#[stable(feature = "string_from_cow_str", since = "1.14.0")]
impl<'a> From<Cow<'a, str>> for String {
fn from(s: Cow<'a, str>) -> String {
s.into_owned()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> From<&'a str> for Cow<'a, str> {
#[inline]
fn from(s: &'a str) -> Cow<'a, str> {
Cow::Borrowed(s)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> From<String> for Cow<'a, str> {
#[inline]
fn from(s: String) -> Cow<'a, str> {
Cow::Owned(s)
}
}
#[stable(feature = "cow_from_string_ref", since = "1.28.0")]
impl<'a> From<&'a String> for Cow<'a, str> {
#[inline]
fn from(s: &'a String) -> Cow<'a, str> {
Cow::Borrowed(s.as_str())
}
}
#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
impl<'a> FromIterator<char> for Cow<'a, str> {
fn from_iter<I: IntoIterator<Item = char>>(it: I) -> Cow<'a, str> {
Cow::Owned(FromIterator::from_iter(it))
}
}
#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
impl<'a, 'b> FromIterator<&'b str> for Cow<'a, str> {
fn from_iter<I: IntoIterator<Item = &'b str>>(it: I) -> Cow<'a, str> {
Cow::Owned(FromIterator::from_iter(it))
}
}
#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
impl<'a> FromIterator<String> for Cow<'a, str> {
fn from_iter<I: IntoIterator<Item = String>>(it: I) -> Cow<'a, str> {
Cow::Owned(FromIterator::from_iter(it))
}
}
#[stable(feature = "from_string_for_vec_u8", since = "1.14.0")]
impl From<String> for Vec<u8> {
/// Converts the given `String` to a vector `Vec` that holds values of type `u8`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s1 = String::from("hello world");
/// let v1 = Vec::from(s1);
///
/// for b in v1 {
/// println!("{}", b);
/// }
/// ```
fn from(string: String) -> Vec<u8> {
string.into_bytes()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Write for String {
#[inline]
fn write_str(&mut self, s: &str) -> fmt::Result {
self.push_str(s);
Ok(())
}
#[inline]
fn write_char(&mut self, c: char) -> fmt::Result {
self.push(c);
Ok(())
}
}
/// A draining iterator for `String`.
///
/// This struct is created by the [`drain`] method on [`String`]. See its
/// documentation for more.
///
/// [`drain`]: struct.String.html#method.drain
/// [`String`]: struct.String.html
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<'a> {
/// Will be used as &'a mut String in the destructor
string: *mut String,
/// Start of part to remove
start: usize,
/// End of part to remove
end: usize,
/// Current remaining range to remove
iter: Chars<'a>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl fmt::Debug for Drain<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("Drain { .. }")
}
}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl Sync for Drain<'_> {}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl Send for Drain<'_> {}
#[stable(feature = "drain", since = "1.6.0")]
impl Drop for Drain<'_> {
fn drop(&mut self) {
unsafe {
// Use Vec::drain. "Reaffirm" the bounds checks to avoid
// panic code being inserted again.
let self_vec = (*self.string).as_mut_vec();
if self.start <= self.end && self.end <= self_vec.len() {
self_vec.drain(self.start..self.end);
}
}
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl Iterator for Drain<'_> {
type Item = char;
#[inline]
fn next(&mut self) -> Option<char> {
self.iter.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
#[inline]
fn last(mut self) -> Option<char> {
self.next_back()
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl DoubleEndedIterator for Drain<'_> {
#[inline]
fn next_back(&mut self) -> Option<char> {
self.iter.next_back()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl FusedIterator for Drain<'_> {}
added .collect() into String from Box<str> with fake feature/stability annotation
//! A UTF-8 encoded, growable string.
//!
//! This module contains the [`String`] type, a trait for converting
//! [`ToString`]s, and several error types that may result from working with
//! [`String`]s.
//!
//! [`ToString`]: trait.ToString.html
//!
//! # Examples
//!
//! There are multiple ways to create a new [`String`] from a string literal:
//!
//! ```
//! let s = "Hello".to_string();
//!
//! let s = String::from("world");
//! let s: String = "also this".into();
//! ```
//!
//! You can create a new [`String`] from an existing one by concatenating with
//! `+`:
//!
//! [`String`]: struct.String.html
//!
//! ```
//! let s = "Hello".to_string();
//!
//! let message = s + " world!";
//! ```
//!
//! If you have a vector of valid UTF-8 bytes, you can make a [`String`] out of
//! it. You can do the reverse too.
//!
//! ```
//! let sparkle_heart = vec![240, 159, 146, 150];
//!
//! // We know these bytes are valid, so we'll use `unwrap()`.
//! let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
//!
//! assert_eq!("💖", sparkle_heart);
//!
//! let bytes = sparkle_heart.into_bytes();
//!
//! assert_eq!(bytes, [240, 159, 146, 150]);
//! ```
#![stable(feature = "rust1", since = "1.0.0")]
use core::char::{decode_utf16, REPLACEMENT_CHARACTER};
use core::fmt;
use core::hash;
use core::iter::{FromIterator, FusedIterator};
use core::ops::Bound::{Excluded, Included, Unbounded};
use core::ops::{self, Add, AddAssign, Index, IndexMut, RangeBounds};
use core::ptr;
use core::str::{lossy, pattern::Pattern};
use crate::borrow::{Cow, ToOwned};
use crate::boxed::Box;
use crate::collections::TryReserveError;
use crate::str::{self, from_boxed_utf8_unchecked, Chars, FromStr, Utf8Error};
use crate::vec::Vec;
/// A UTF-8 encoded, growable string.
///
/// The `String` type is the most common string type that has ownership over the
/// contents of the string. It has a close relationship with its borrowed
/// counterpart, the primitive [`str`].
///
/// [`str`]: ../../std/primitive.str.html
///
/// # Examples
///
/// You can create a `String` from a literal string with [`String::from`]:
///
/// ```
/// let hello = String::from("Hello, world!");
/// ```
///
/// You can append a [`char`] to a `String` with the [`push`] method, and
/// append a [`&str`] with the [`push_str`] method:
///
/// ```
/// let mut hello = String::from("Hello, ");
///
/// hello.push('w');
/// hello.push_str("orld!");
/// ```
///
/// [`String::from`]: #method.from
/// [`char`]: ../../std/primitive.char.html
/// [`push`]: #method.push
/// [`push_str`]: #method.push_str
///
/// If you have a vector of UTF-8 bytes, you can create a `String` from it with
/// the [`from_utf8`] method:
///
/// ```
/// // some bytes, in a vector
/// let sparkle_heart = vec![240, 159, 146, 150];
///
/// // We know these bytes are valid, so we'll use `unwrap()`.
/// let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
///
/// assert_eq!("💖", sparkle_heart);
/// ```
///
/// [`from_utf8`]: #method.from_utf8
///
/// # UTF-8
///
/// `String`s are always valid UTF-8. This has a few implications, the first of
/// which is that if you need a non-UTF-8 string, consider [`OsString`]. It is
/// similar, but without the UTF-8 constraint. The second implication is that
/// you cannot index into a `String`:
///
/// ```compile_fail,E0277
/// let s = "hello";
///
/// println!("The first letter of s is {}", s[0]); // ERROR!!!
/// ```
///
/// [`OsString`]: ../../std/ffi/struct.OsString.html
///
/// Indexing is intended to be a constant-time operation, but UTF-8 encoding
/// does not allow us to do this. Furthermore, it's not clear what sort of
/// thing the index should return: a byte, a codepoint, or a grapheme cluster.
/// The [`bytes`] and [`chars`] methods return iterators over the first
/// two, respectively.
///
/// [`bytes`]: #method.bytes
/// [`chars`]: #method.chars
///
/// # Deref
///
/// `String`s implement [`Deref`]`<Target=str>`, and so inherit all of [`str`]'s
/// methods. In addition, this means that you can pass a `String` to a
/// function which takes a [`&str`] by using an ampersand (`&`):
///
/// ```
/// fn takes_str(s: &str) { }
///
/// let s = String::from("Hello");
///
/// takes_str(&s);
/// ```
///
/// This will create a [`&str`] from the `String` and pass it in. This
/// conversion is very inexpensive, and so generally, functions will accept
/// [`&str`]s as arguments unless they need a `String` for some specific
/// reason.
///
/// In certain cases Rust doesn't have enough information to make this
/// conversion, known as [`Deref`] coercion. In the following example a string
/// slice [`&'a str`][`&str`] implements the trait `TraitExample`, and the function
/// `example_func` takes anything that implements the trait. In this case Rust
/// would need to make two implicit conversions, which Rust doesn't have the
/// means to do. For that reason, the following example will not compile.
///
/// ```compile_fail,E0277
/// trait TraitExample {}
///
/// impl<'a> TraitExample for &'a str {}
///
/// fn example_func<A: TraitExample>(example_arg: A) {}
///
/// let example_string = String::from("example_string");
/// example_func(&example_string);
/// ```
///
/// There are two options that would work instead. The first would be to
/// change the line `example_func(&example_string);` to
/// `example_func(example_string.as_str());`, using the method [`as_str()`]
/// to explicitly extract the string slice containing the string. The second
/// way changes `example_func(&example_string);` to
/// `example_func(&*example_string);`. In this case we are dereferencing a
/// `String` to a [`str`][`&str`], then referencing the [`str`][`&str`] back to
/// [`&str`]. The second way is more idiomatic, however both work to do the
/// conversion explicitly rather than relying on the implicit conversion.
///
/// # Representation
///
/// A `String` is made up of three components: a pointer to some bytes, a
/// length, and a capacity. The pointer points to an internal buffer `String`
/// uses to store its data. The length is the number of bytes currently stored
/// in the buffer, and the capacity is the size of the buffer in bytes. As such,
/// the length will always be less than or equal to the capacity.
///
/// This buffer is always stored on the heap.
///
/// You can look at these with the [`as_ptr`], [`len`], and [`capacity`]
/// methods:
///
/// ```
/// use std::mem;
///
/// let story = String::from("Once upon a time...");
///
// FIXME Update this when vec_into_raw_parts is stabilized
/// // Prevent automatically dropping the String's data
/// let mut story = mem::ManuallyDrop::new(story);
///
/// let ptr = story.as_mut_ptr();
/// let len = story.len();
/// let capacity = story.capacity();
///
/// // story has nineteen bytes
/// assert_eq!(19, len);
///
/// // We can re-build a String out of ptr, len, and capacity. This is all
/// // unsafe because we are responsible for making sure the components are
/// // valid:
/// let s = unsafe { String::from_raw_parts(ptr, len, capacity) } ;
///
/// assert_eq!(String::from("Once upon a time..."), s);
/// ```
///
/// [`as_ptr`]: #method.as_ptr
/// [`len`]: #method.len
/// [`capacity`]: #method.capacity
///
/// If a `String` has enough capacity, adding elements to it will not
/// re-allocate. For example, consider this program:
///
/// ```
/// let mut s = String::new();
///
/// println!("{}", s.capacity());
///
/// for _ in 0..5 {
/// s.push_str("hello");
/// println!("{}", s.capacity());
/// }
/// ```
///
/// This will output the following:
///
/// ```text
/// 0
/// 5
/// 10
/// 20
/// 20
/// 40
/// ```
///
/// At first, we have no memory allocated at all, but as we append to the
/// string, it increases its capacity appropriately. If we instead use the
/// [`with_capacity`] method to allocate the correct capacity initially:
///
/// ```
/// let mut s = String::with_capacity(25);
///
/// println!("{}", s.capacity());
///
/// for _ in 0..5 {
/// s.push_str("hello");
/// println!("{}", s.capacity());
/// }
/// ```
///
/// [`with_capacity`]: #method.with_capacity
///
/// We end up with a different output:
///
/// ```text
/// 25
/// 25
/// 25
/// 25
/// 25
/// 25
/// ```
///
/// Here, there's no need to allocate more memory inside the loop.
///
/// [`&str`]: ../../std/primitive.str.html
/// [`Deref`]: ../../std/ops/trait.Deref.html
/// [`as_str()`]: struct.String.html#method.as_str
#[derive(PartialOrd, Eq, Ord)]
#[cfg_attr(not(test), rustc_diagnostic_item = "string_type")]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct String {
vec: Vec<u8>,
}
/// A possible error value when converting a `String` from a UTF-8 byte vector.
///
/// This type is the error type for the [`from_utf8`] method on [`String`]. It
/// is designed in such a way to carefully avoid reallocations: the
/// [`into_bytes`] method will give back the byte vector that was used in the
/// conversion attempt.
///
/// [`from_utf8`]: struct.String.html#method.from_utf8
/// [`String`]: struct.String.html
/// [`into_bytes`]: struct.FromUtf8Error.html#method.into_bytes
///
/// The [`Utf8Error`] type provided by [`std::str`] represents an error that may
/// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's
/// an analogue to `FromUtf8Error`, and you can get one from a `FromUtf8Error`
/// through the [`utf8_error`] method.
///
/// [`Utf8Error`]: ../../std/str/struct.Utf8Error.html
/// [`std::str`]: ../../std/str/index.html
/// [`u8`]: ../../std/primitive.u8.html
/// [`&str`]: ../../std/primitive.str.html
/// [`utf8_error`]: #method.utf8_error
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // some invalid bytes, in a vector
/// let bytes = vec![0, 159];
///
/// let value = String::from_utf8(bytes);
///
/// assert!(value.is_err());
/// assert_eq!(vec![0, 159], value.unwrap_err().into_bytes());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FromUtf8Error {
bytes: Vec<u8>,
error: Utf8Error,
}
/// A possible error value when converting a `String` from a UTF-16 byte slice.
///
/// This type is the error type for the [`from_utf16`] method on [`String`].
///
/// [`from_utf16`]: struct.String.html#method.from_utf16
/// [`String`]: struct.String.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // 𝄞mu<invalid>ic
/// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
/// 0xD800, 0x0069, 0x0063];
///
/// assert!(String::from_utf16(v).is_err());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct FromUtf16Error(());
impl String {
/// Creates a new empty `String`.
///
/// Given that the `String` is empty, this will not allocate any initial
/// buffer. While that means that this initial operation is very
/// inexpensive, it may cause excessive allocation later when you add
/// data. If you have an idea of how much data the `String` will hold,
/// consider the [`with_capacity`] method to prevent excessive
/// re-allocation.
///
/// [`with_capacity`]: #method.with_capacity
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = String::new();
/// ```
#[inline]
#[rustc_const_stable(feature = "const_string_new", since = "1.32.0")]
#[stable(feature = "rust1", since = "1.0.0")]
pub const fn new() -> String {
String { vec: Vec::new() }
}
/// Creates a new empty `String` with a particular capacity.
///
/// `String`s have an internal buffer to hold their data. The capacity is
/// the length of that buffer, and can be queried with the [`capacity`]
/// method. This method creates an empty `String`, but one with an initial
/// buffer that can hold `capacity` bytes. This is useful when you may be
/// appending a bunch of data to the `String`, reducing the number of
/// reallocations it needs to do.
///
/// [`capacity`]: #method.capacity
///
/// If the given capacity is `0`, no allocation will occur, and this method
/// is identical to the [`new`] method.
///
/// [`new`]: #method.new
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::with_capacity(10);
///
/// // The String contains no chars, even though it has capacity for more
/// assert_eq!(s.len(), 0);
///
/// // These are all done without reallocating...
/// let cap = s.capacity();
/// for _ in 0..10 {
/// s.push('a');
/// }
///
/// assert_eq!(s.capacity(), cap);
///
/// // ...but this may make the string reallocate
/// s.push('a');
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(capacity: usize) -> String {
String { vec: Vec::with_capacity(capacity) }
}
// HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
// required for this method definition, is not available. Since we don't
// require this method for testing purposes, I'll just stub it
// NB see the slice::hack module in slice.rs for more information
#[inline]
#[cfg(test)]
pub fn from_str(_: &str) -> String {
panic!("not available with cfg(test)");
}
/// Converts a vector of bytes to a `String`.
///
/// A string ([`String`]) is made of bytes ([`u8`]), and a vector of bytes
/// ([`Vec<u8>`]) is made of bytes, so this function converts between the
/// two. Not all byte slices are valid `String`s, however: `String`
/// requires that it is valid UTF-8. `from_utf8()` checks to ensure that
/// the bytes are valid UTF-8, and then does the conversion.
///
/// If you are sure that the byte slice is valid UTF-8, and you don't want
/// to incur the overhead of the validity check, there is an unsafe version
/// of this function, [`from_utf8_unchecked`], which has the same behavior
/// but skips the check.
///
/// This method will take care to not copy the vector, for efficiency's
/// sake.
///
/// If you need a [`&str`] instead of a `String`, consider
/// [`str::from_utf8`].
///
/// The inverse of this method is [`into_bytes`].
///
/// # Errors
///
/// Returns [`Err`] if the slice is not UTF-8 with a description as to why the
/// provided bytes are not UTF-8. The vector you moved in is also included.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // some bytes, in a vector
/// let sparkle_heart = vec![240, 159, 146, 150];
///
/// // We know these bytes are valid, so we'll use `unwrap()`.
/// let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
///
/// assert_eq!("💖", sparkle_heart);
/// ```
///
/// Incorrect bytes:
///
/// ```
/// // some invalid bytes, in a vector
/// let sparkle_heart = vec![0, 159, 146, 150];
///
/// assert!(String::from_utf8(sparkle_heart).is_err());
/// ```
///
/// See the docs for [`FromUtf8Error`] for more details on what you can do
/// with this error.
///
/// [`from_utf8_unchecked`]: struct.String.html#method.from_utf8_unchecked
/// [`String`]: struct.String.html
/// [`u8`]: ../../std/primitive.u8.html
/// [`Vec<u8>`]: ../../std/vec/struct.Vec.html
/// [`&str`]: ../../std/primitive.str.html
/// [`str::from_utf8`]: ../../std/str/fn.from_utf8.html
/// [`into_bytes`]: struct.String.html#method.into_bytes
/// [`FromUtf8Error`]: struct.FromUtf8Error.html
/// [`Err`]: ../../std/result/enum.Result.html#variant.Err
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_utf8(vec: Vec<u8>) -> Result<String, FromUtf8Error> {
match str::from_utf8(&vec) {
Ok(..) => Ok(String { vec }),
Err(e) => Err(FromUtf8Error { bytes: vec, error: e }),
}
}
/// Converts a slice of bytes to a string, including invalid characters.
///
/// Strings are made of bytes ([`u8`]), and a slice of bytes
/// ([`&[u8]`][byteslice]) is made of bytes, so this function converts
/// between the two. Not all byte slices are valid strings, however: strings
/// are required to be valid UTF-8. During this conversion,
/// `from_utf8_lossy()` will replace any invalid UTF-8 sequences with
/// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD], which looks like this: �
///
/// [`u8`]: ../../std/primitive.u8.html
/// [byteslice]: ../../std/primitive.slice.html
/// [U+FFFD]: ../char/constant.REPLACEMENT_CHARACTER.html
///
/// If you are sure that the byte slice is valid UTF-8, and you don't want
/// to incur the overhead of the conversion, there is an unsafe version
/// of this function, [`from_utf8_unchecked`], which has the same behavior
/// but skips the checks.
///
/// [`from_utf8_unchecked`]: struct.String.html#method.from_utf8_unchecked
///
/// This function returns a [`Cow<'a, str>`]. If our byte slice is invalid
/// UTF-8, then we need to insert the replacement characters, which will
/// change the size of the string, and hence, require a `String`. But if
/// it's already valid UTF-8, we don't need a new allocation. This return
/// type allows us to handle both cases.
///
/// [`Cow<'a, str>`]: ../../std/borrow/enum.Cow.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // some bytes, in a vector
/// let sparkle_heart = vec![240, 159, 146, 150];
///
/// let sparkle_heart = String::from_utf8_lossy(&sparkle_heart);
///
/// assert_eq!("💖", sparkle_heart);
/// ```
///
/// Incorrect bytes:
///
/// ```
/// // some invalid bytes
/// let input = b"Hello \xF0\x90\x80World";
/// let output = String::from_utf8_lossy(input);
///
/// assert_eq!("Hello �World", output);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_utf8_lossy(v: &[u8]) -> Cow<'_, str> {
let mut iter = lossy::Utf8Lossy::from_bytes(v).chunks();
let (first_valid, first_broken) = if let Some(chunk) = iter.next() {
let lossy::Utf8LossyChunk { valid, broken } = chunk;
if valid.len() == v.len() {
debug_assert!(broken.is_empty());
return Cow::Borrowed(valid);
}
(valid, broken)
} else {
return Cow::Borrowed("");
};
const REPLACEMENT: &str = "\u{FFFD}";
let mut res = String::with_capacity(v.len());
res.push_str(first_valid);
if !first_broken.is_empty() {
res.push_str(REPLACEMENT);
}
for lossy::Utf8LossyChunk { valid, broken } in iter {
res.push_str(valid);
if !broken.is_empty() {
res.push_str(REPLACEMENT);
}
}
Cow::Owned(res)
}
/// Decode a UTF-16 encoded vector `v` into a `String`, returning [`Err`]
/// if `v` contains any invalid data.
///
/// [`Err`]: ../../std/result/enum.Result.html#variant.Err
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // 𝄞music
/// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
/// 0x0073, 0x0069, 0x0063];
/// assert_eq!(String::from("𝄞music"),
/// String::from_utf16(v).unwrap());
///
/// // 𝄞mu<invalid>ic
/// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
/// 0xD800, 0x0069, 0x0063];
/// assert!(String::from_utf16(v).is_err());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_utf16(v: &[u16]) -> Result<String, FromUtf16Error> {
// This isn't done via collect::<Result<_, _>>() for performance reasons.
// FIXME: the function can be simplified again when #48994 is closed.
let mut ret = String::with_capacity(v.len());
for c in decode_utf16(v.iter().cloned()) {
if let Ok(c) = c {
ret.push(c);
} else {
return Err(FromUtf16Error(()));
}
}
Ok(ret)
}
/// Decode a UTF-16 encoded slice `v` into a `String`, replacing
/// invalid data with [the replacement character (`U+FFFD`)][U+FFFD].
///
/// Unlike [`from_utf8_lossy`] which returns a [`Cow<'a, str>`],
/// `from_utf16_lossy` returns a `String` since the UTF-16 to UTF-8
/// conversion requires a memory allocation.
///
/// [`from_utf8_lossy`]: #method.from_utf8_lossy
/// [`Cow<'a, str>`]: ../borrow/enum.Cow.html
/// [U+FFFD]: ../char/constant.REPLACEMENT_CHARACTER.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // 𝄞mus<invalid>ic<invalid>
/// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
/// 0x0073, 0xDD1E, 0x0069, 0x0063,
/// 0xD834];
///
/// assert_eq!(String::from("𝄞mus\u{FFFD}ic\u{FFFD}"),
/// String::from_utf16_lossy(v));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_utf16_lossy(v: &[u16]) -> String {
decode_utf16(v.iter().cloned()).map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)).collect()
}
/// Decomposes a `String` into its raw components.
///
/// Returns the raw pointer to the underlying data, the length of
/// the string (in bytes), and the allocated capacity of the data
/// (in bytes). These are the same arguments in the same order as
/// the arguments to [`from_raw_parts`].
///
/// After calling this function, the caller is responsible for the
/// memory previously managed by the `String`. The only way to do
/// this is to convert the raw pointer, length, and capacity back
/// into a `String` with the [`from_raw_parts`] function, allowing
/// the destructor to perform the cleanup.
///
/// [`from_raw_parts`]: #method.from_raw_parts
///
/// # Examples
///
/// ```
/// #![feature(vec_into_raw_parts)]
/// let s = String::from("hello");
///
/// let (ptr, len, cap) = s.into_raw_parts();
///
/// let rebuilt = unsafe { String::from_raw_parts(ptr, len, cap) };
/// assert_eq!(rebuilt, "hello");
/// ```
#[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
pub fn into_raw_parts(self) -> (*mut u8, usize, usize) {
self.vec.into_raw_parts()
}
/// Creates a new `String` from a length, capacity, and pointer.
///
/// # Safety
///
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
/// * The memory at `ptr` needs to have been previously allocated by the
/// same allocator the standard library uses, with a required alignment of exactly 1.
/// * `length` needs to be less than or equal to `capacity`.
/// * `capacity` needs to be the correct value.
///
/// Violating these may cause problems like corrupting the allocator's
/// internal data structures.
///
/// The ownership of `ptr` is effectively transferred to the
/// `String` which may then deallocate, reallocate or change the
/// contents of memory pointed to by the pointer at will. Ensure
/// that nothing else uses the pointer after calling this
/// function.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::mem;
///
/// unsafe {
/// let s = String::from("hello");
///
// FIXME Update this when vec_into_raw_parts is stabilized
/// // Prevent automatically dropping the String's data
/// let mut s = mem::ManuallyDrop::new(s);
///
/// let ptr = s.as_mut_ptr();
/// let len = s.len();
/// let capacity = s.capacity();
///
/// let s = String::from_raw_parts(ptr, len, capacity);
///
/// assert_eq!(String::from("hello"), s);
/// }
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_raw_parts(buf: *mut u8, length: usize, capacity: usize) -> String {
String { vec: Vec::from_raw_parts(buf, length, capacity) }
}
/// Converts a vector of bytes to a `String` without checking that the
/// string contains valid UTF-8.
///
/// See the safe version, [`from_utf8`], for more details.
///
/// [`from_utf8`]: struct.String.html#method.from_utf8
///
/// # Safety
///
/// This function is unsafe because it does not check that the bytes passed
/// to it are valid UTF-8. If this constraint is violated, it may cause
/// memory unsafety issues with future users of the `String`, as the rest of
/// the standard library assumes that `String`s are valid UTF-8.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // some bytes, in a vector
/// let sparkle_heart = vec![240, 159, 146, 150];
///
/// let sparkle_heart = unsafe {
/// String::from_utf8_unchecked(sparkle_heart)
/// };
///
/// assert_eq!("💖", sparkle_heart);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_utf8_unchecked(bytes: Vec<u8>) -> String {
String { vec: bytes }
}
/// Converts a `String` into a byte vector.
///
/// This consumes the `String`, so we do not need to copy its contents.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = String::from("hello");
/// let bytes = s.into_bytes();
///
/// assert_eq!(&[104, 101, 108, 108, 111][..], &bytes[..]);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_bytes(self) -> Vec<u8> {
self.vec
}
/// Extracts a string slice containing the entire `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = String::from("foo");
///
/// assert_eq!("foo", s.as_str());
/// ```
#[inline]
#[stable(feature = "string_as_str", since = "1.7.0")]
pub fn as_str(&self) -> &str {
self
}
/// Converts a `String` into a mutable string slice.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("foobar");
/// let s_mut_str = s.as_mut_str();
///
/// s_mut_str.make_ascii_uppercase();
///
/// assert_eq!("FOOBAR", s_mut_str);
/// ```
#[inline]
#[stable(feature = "string_as_str", since = "1.7.0")]
pub fn as_mut_str(&mut self) -> &mut str {
self
}
/// Appends a given string slice onto the end of this `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("foo");
///
/// s.push_str("bar");
///
/// assert_eq!("foobar", s);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push_str(&mut self, string: &str) {
self.vec.extend_from_slice(string.as_bytes())
}
/// Returns this `String`'s capacity, in bytes.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = String::with_capacity(10);
///
/// assert!(s.capacity() >= 10);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize {
self.vec.capacity()
}
/// Ensures that this `String`'s capacity is at least `additional` bytes
/// larger than its length.
///
/// The capacity may be increased by more than `additional` bytes if it
/// chooses, to prevent frequent reallocations.
///
/// If you do not want this "at least" behavior, see the [`reserve_exact`]
/// method.
///
/// # Panics
///
/// Panics if the new capacity overflows [`usize`].
///
/// [`reserve_exact`]: struct.String.html#method.reserve_exact
/// [`usize`]: ../../std/primitive.usize.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::new();
///
/// s.reserve(10);
///
/// assert!(s.capacity() >= 10);
/// ```
///
/// This may not actually increase the capacity:
///
/// ```
/// let mut s = String::with_capacity(10);
/// s.push('a');
/// s.push('b');
///
/// // s now has a length of 2 and a capacity of 10
/// assert_eq!(2, s.len());
/// assert_eq!(10, s.capacity());
///
/// // Since we already have an extra 8 capacity, calling this...
/// s.reserve(8);
///
/// // ... doesn't actually increase.
/// assert_eq!(10, s.capacity());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
self.vec.reserve(additional)
}
/// Ensures that this `String`'s capacity is `additional` bytes
/// larger than its length.
///
/// Consider using the [`reserve`] method unless you absolutely know
/// better than the allocator.
///
/// [`reserve`]: #method.reserve
///
/// # Panics
///
/// Panics if the new capacity overflows `usize`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::new();
///
/// s.reserve_exact(10);
///
/// assert!(s.capacity() >= 10);
/// ```
///
/// This may not actually increase the capacity:
///
/// ```
/// let mut s = String::with_capacity(10);
/// s.push('a');
/// s.push('b');
///
/// // s now has a length of 2 and a capacity of 10
/// assert_eq!(2, s.len());
/// assert_eq!(10, s.capacity());
///
/// // Since we already have an extra 8 capacity, calling this...
/// s.reserve_exact(8);
///
/// // ... doesn't actually increase.
/// assert_eq!(10, s.capacity());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve_exact(&mut self, additional: usize) {
self.vec.reserve_exact(additional)
}
/// Tries to reserve capacity for at least `additional` more elements to be inserted
/// in the given `String`. The collection may reserve more space to avoid
/// frequent reallocations. After calling `reserve`, capacity will be
/// greater than or equal to `self.len() + additional`. Does nothing if
/// capacity is already sufficient.
///
/// # Errors
///
/// If the capacity overflows, or the allocator reports a failure, then an error
/// is returned.
///
/// # Examples
///
/// ```
/// #![feature(try_reserve)]
/// use std::collections::TryReserveError;
///
/// fn process_data(data: &str) -> Result<String, TryReserveError> {
/// let mut output = String::new();
///
/// // Pre-reserve the memory, exiting if we can't
/// output.try_reserve(data.len())?;
///
/// // Now we know this can't OOM in the middle of our complex work
/// output.push_str(data);
///
/// Ok(output)
/// }
/// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?");
/// ```
#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
self.vec.try_reserve(additional)
}
/// Tries to reserves the minimum capacity for exactly `additional` more elements to
/// be inserted in the given `String`. After calling `reserve_exact`,
/// capacity will be greater than or equal to `self.len() + additional`.
/// Does nothing if the capacity is already sufficient.
///
/// Note that the allocator may give the collection more space than it
/// requests. Therefore, capacity can not be relied upon to be precisely
/// minimal. Prefer `reserve` if future insertions are expected.
///
/// # Errors
///
/// If the capacity overflows, or the allocator reports a failure, then an error
/// is returned.
///
/// # Examples
///
/// ```
/// #![feature(try_reserve)]
/// use std::collections::TryReserveError;
///
/// fn process_data(data: &str) -> Result<String, TryReserveError> {
/// let mut output = String::new();
///
/// // Pre-reserve the memory, exiting if we can't
/// output.try_reserve(data.len())?;
///
/// // Now we know this can't OOM in the middle of our complex work
/// output.push_str(data);
///
/// Ok(output)
/// }
/// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?");
/// ```
#[unstable(feature = "try_reserve", reason = "new API", issue = "48043")]
pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
self.vec.try_reserve_exact(additional)
}
/// Shrinks the capacity of this `String` to match its length.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("foo");
///
/// s.reserve(100);
/// assert!(s.capacity() >= 100);
///
/// s.shrink_to_fit();
/// assert_eq!(3, s.capacity());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn shrink_to_fit(&mut self) {
self.vec.shrink_to_fit()
}
/// Shrinks the capacity of this `String` with a lower bound.
///
/// The capacity will remain at least as large as both the length
/// and the supplied value.
///
/// Panics if the current capacity is smaller than the supplied
/// minimum capacity.
///
/// # Examples
///
/// ```
/// #![feature(shrink_to)]
/// let mut s = String::from("foo");
///
/// s.reserve(100);
/// assert!(s.capacity() >= 100);
///
/// s.shrink_to(10);
/// assert!(s.capacity() >= 10);
/// s.shrink_to(0);
/// assert!(s.capacity() >= 3);
/// ```
#[inline]
#[unstable(feature = "shrink_to", reason = "new API", issue = "56431")]
pub fn shrink_to(&mut self, min_capacity: usize) {
self.vec.shrink_to(min_capacity)
}
/// Appends the given [`char`] to the end of this `String`.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("abc");
///
/// s.push('1');
/// s.push('2');
/// s.push('3');
///
/// assert_eq!("abc123", s);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push(&mut self, ch: char) {
match ch.len_utf8() {
1 => self.vec.push(ch as u8),
_ => self.vec.extend_from_slice(ch.encode_utf8(&mut [0; 4]).as_bytes()),
}
}
/// Returns a byte slice of this `String`'s contents.
///
/// The inverse of this method is [`from_utf8`].
///
/// [`from_utf8`]: #method.from_utf8
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = String::from("hello");
///
/// assert_eq!(&[104, 101, 108, 108, 111], s.as_bytes());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn as_bytes(&self) -> &[u8] {
&self.vec
}
/// Shortens this `String` to the specified length.
///
/// If `new_len` is greater than the string's current length, this has no
/// effect.
///
/// Note that this method has no effect on the allocated capacity
/// of the string
///
/// # Panics
///
/// Panics if `new_len` does not lie on a [`char`] boundary.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("hello");
///
/// s.truncate(2);
///
/// assert_eq!("he", s);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn truncate(&mut self, new_len: usize) {
if new_len <= self.len() {
assert!(self.is_char_boundary(new_len));
self.vec.truncate(new_len)
}
}
/// Removes the last character from the string buffer and returns it.
///
/// Returns [`None`] if this `String` is empty.
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("foo");
///
/// assert_eq!(s.pop(), Some('o'));
/// assert_eq!(s.pop(), Some('o'));
/// assert_eq!(s.pop(), Some('f'));
///
/// assert_eq!(s.pop(), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn pop(&mut self) -> Option<char> {
let ch = self.chars().rev().next()?;
let newlen = self.len() - ch.len_utf8();
unsafe {
self.vec.set_len(newlen);
}
Some(ch)
}
/// Removes a [`char`] from this `String` at a byte position and returns it.
///
/// This is an `O(n)` operation, as it requires copying every element in the
/// buffer.
///
/// # Panics
///
/// Panics if `idx` is larger than or equal to the `String`'s length,
/// or if it does not lie on a [`char`] boundary.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("foo");
///
/// assert_eq!(s.remove(0), 'f');
/// assert_eq!(s.remove(1), 'o');
/// assert_eq!(s.remove(0), 'o');
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove(&mut self, idx: usize) -> char {
let ch = match self[idx..].chars().next() {
Some(ch) => ch,
None => panic!("cannot remove a char from the end of a string"),
};
let next = idx + ch.len_utf8();
let len = self.len();
unsafe {
ptr::copy(self.vec.as_ptr().add(next), self.vec.as_mut_ptr().add(idx), len - next);
self.vec.set_len(len - (next - idx));
}
ch
}
/// Retains only the characters specified by the predicate.
///
/// In other words, remove all characters `c` such that `f(c)` returns `false`.
/// This method operates in place, visiting each character exactly once in the
/// original order, and preserves the order of the retained characters.
///
/// # Examples
///
/// ```
/// let mut s = String::from("f_o_ob_ar");
///
/// s.retain(|c| c != '_');
///
/// assert_eq!(s, "foobar");
/// ```
///
/// The exact order may be useful for tracking external state, like an index.
///
/// ```
/// let mut s = String::from("abcde");
/// let keep = [false, true, true, false, true];
/// let mut i = 0;
/// s.retain(|_| (keep[i], i += 1).0);
/// assert_eq!(s, "bce");
/// ```
#[inline]
#[stable(feature = "string_retain", since = "1.26.0")]
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(char) -> bool,
{
let len = self.len();
let mut del_bytes = 0;
let mut idx = 0;
while idx < len {
let ch = unsafe { self.get_unchecked(idx..len).chars().next().unwrap() };
let ch_len = ch.len_utf8();
if !f(ch) {
del_bytes += ch_len;
} else if del_bytes > 0 {
unsafe {
ptr::copy(
self.vec.as_ptr().add(idx),
self.vec.as_mut_ptr().add(idx - del_bytes),
ch_len,
);
}
}
// Point idx to the next char
idx += ch_len;
}
if del_bytes > 0 {
unsafe {
self.vec.set_len(len - del_bytes);
}
}
}
/// Inserts a character into this `String` at a byte position.
///
/// This is an `O(n)` operation as it requires copying every element in the
/// buffer.
///
/// # Panics
///
/// Panics if `idx` is larger than the `String`'s length, or if it does not
/// lie on a [`char`] boundary.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::with_capacity(3);
///
/// s.insert(0, 'f');
/// s.insert(1, 'o');
/// s.insert(2, 'o');
///
/// assert_eq!("foo", s);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(&mut self, idx: usize, ch: char) {
assert!(self.is_char_boundary(idx));
let mut bits = [0; 4];
let bits = ch.encode_utf8(&mut bits).as_bytes();
unsafe {
self.insert_bytes(idx, bits);
}
}
unsafe fn insert_bytes(&mut self, idx: usize, bytes: &[u8]) {
let len = self.len();
let amt = bytes.len();
self.vec.reserve(amt);
ptr::copy(self.vec.as_ptr().add(idx), self.vec.as_mut_ptr().add(idx + amt), len - idx);
ptr::copy(bytes.as_ptr(), self.vec.as_mut_ptr().add(idx), amt);
self.vec.set_len(len + amt);
}
/// Inserts a string slice into this `String` at a byte position.
///
/// This is an `O(n)` operation as it requires copying every element in the
/// buffer.
///
/// # Panics
///
/// Panics if `idx` is larger than the `String`'s length, or if it does not
/// lie on a [`char`] boundary.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("bar");
///
/// s.insert_str(0, "foo");
///
/// assert_eq!("foobar", s);
/// ```
#[inline]
#[stable(feature = "insert_str", since = "1.16.0")]
pub fn insert_str(&mut self, idx: usize, string: &str) {
assert!(self.is_char_boundary(idx));
unsafe {
self.insert_bytes(idx, string.as_bytes());
}
}
/// Returns a mutable reference to the contents of this `String`.
///
/// # Safety
///
/// This function is unsafe because it does not check that the bytes passed
/// to it are valid UTF-8. If this constraint is violated, it may cause
/// memory unsafety issues with future users of the `String`, as the rest of
/// the standard library assumes that `String`s are valid UTF-8.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("hello");
///
/// unsafe {
/// let vec = s.as_mut_vec();
/// assert_eq!(&[104, 101, 108, 108, 111][..], &vec[..]);
///
/// vec.reverse();
/// }
/// assert_eq!(s, "olleh");
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn as_mut_vec(&mut self) -> &mut Vec<u8> {
&mut self.vec
}
/// Returns the length of this `String`, in bytes, not [`char`]s or
/// graphemes. In other words, it may not be what a human considers the
/// length of the string.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let a = String::from("foo");
/// assert_eq!(a.len(), 3);
///
/// let fancy_f = String::from("ƒoo");
/// assert_eq!(fancy_f.len(), 4);
/// assert_eq!(fancy_f.chars().count(), 3);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn len(&self) -> usize {
self.vec.len()
}
/// Returns `true` if this `String` has a length of zero, and `false` otherwise.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut v = String::new();
/// assert!(v.is_empty());
///
/// v.push('a');
/// assert!(!v.is_empty());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Splits the string into two at the given index.
///
/// Returns a newly allocated `String`. `self` contains bytes `[0, at)`, and
/// the returned `String` contains bytes `[at, len)`. `at` must be on the
/// boundary of a UTF-8 code point.
///
/// Note that the capacity of `self` does not change.
///
/// # Panics
///
/// Panics if `at` is not on a `UTF-8` code point boundary, or if it is beyond the last
/// code point of the string.
///
/// # Examples
///
/// ```
/// # fn main() {
/// let mut hello = String::from("Hello, World!");
/// let world = hello.split_off(7);
/// assert_eq!(hello, "Hello, ");
/// assert_eq!(world, "World!");
/// # }
/// ```
#[inline]
#[stable(feature = "string_split_off", since = "1.16.0")]
#[must_use = "use `.truncate()` if you don't need the other half"]
pub fn split_off(&mut self, at: usize) -> String {
assert!(self.is_char_boundary(at));
let other = self.vec.split_off(at);
unsafe { String::from_utf8_unchecked(other) }
}
/// Truncates this `String`, removing all contents.
///
/// While this means the `String` will have a length of zero, it does not
/// touch its capacity.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("foo");
///
/// s.clear();
///
/// assert!(s.is_empty());
/// assert_eq!(0, s.len());
/// assert_eq!(3, s.capacity());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn clear(&mut self) {
self.vec.clear()
}
/// Creates a draining iterator that removes the specified range in the `String`
/// and yields the removed `chars`.
///
/// Note: The element range is removed even if the iterator is not
/// consumed until the end.
///
/// # Panics
///
/// Panics if the starting point or end point do not lie on a [`char`]
/// boundary, or if they're out of bounds.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("α is alpha, β is beta");
/// let beta_offset = s.find('β').unwrap_or(s.len());
///
/// // Remove the range up until the β from the string
/// let t: String = s.drain(..beta_offset).collect();
/// assert_eq!(t, "α is alpha, ");
/// assert_eq!(s, "β is beta");
///
/// // A full range clears the string
/// s.drain(..);
/// assert_eq!(s, "");
/// ```
#[stable(feature = "drain", since = "1.6.0")]
pub fn drain<R>(&mut self, range: R) -> Drain<'_>
where
R: RangeBounds<usize>,
{
// Memory safety
//
// The String version of Drain does not have the memory safety issues
// of the vector version. The data is just plain bytes.
// Because the range removal happens in Drop, if the Drain iterator is leaked,
// the removal will not happen.
let len = self.len();
let start = match range.start_bound() {
Included(&n) => n,
Excluded(&n) => n + 1,
Unbounded => 0,
};
let end = match range.end_bound() {
Included(&n) => n + 1,
Excluded(&n) => n,
Unbounded => len,
};
// Take out two simultaneous borrows. The &mut String won't be accessed
// until iteration is over, in Drop.
let self_ptr = self as *mut _;
// slicing does the appropriate bounds checks
let chars_iter = self[start..end].chars();
Drain { start, end, iter: chars_iter, string: self_ptr }
}
/// Removes the specified range in the string,
/// and replaces it with the given string.
/// The given string doesn't need to be the same length as the range.
///
/// # Panics
///
/// Panics if the starting point or end point do not lie on a [`char`]
/// boundary, or if they're out of bounds.
///
/// [`char`]: ../../std/primitive.char.html
/// [`Vec::splice`]: ../../std/vec/struct.Vec.html#method.splice
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut s = String::from("α is alpha, β is beta");
/// let beta_offset = s.find('β').unwrap_or(s.len());
///
/// // Replace the range up until the β from the string
/// s.replace_range(..beta_offset, "Α is capital alpha; ");
/// assert_eq!(s, "Α is capital alpha; β is beta");
/// ```
#[stable(feature = "splice", since = "1.27.0")]
pub fn replace_range<R>(&mut self, range: R, replace_with: &str)
where
R: RangeBounds<usize>,
{
// Memory safety
//
// Replace_range does not have the memory safety issues of a vector Splice.
// of the vector version. The data is just plain bytes.
match range.start_bound() {
Included(&n) => assert!(self.is_char_boundary(n)),
Excluded(&n) => assert!(self.is_char_boundary(n + 1)),
Unbounded => {}
};
match range.end_bound() {
Included(&n) => assert!(self.is_char_boundary(n + 1)),
Excluded(&n) => assert!(self.is_char_boundary(n)),
Unbounded => {}
};
unsafe { self.as_mut_vec() }.splice(range, replace_with.bytes());
}
/// Converts this `String` into a [`Box`]`<`[`str`]`>`.
///
/// This will drop any excess capacity.
///
/// [`Box`]: ../../std/boxed/struct.Box.html
/// [`str`]: ../../std/primitive.str.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = String::from("hello");
///
/// let b = s.into_boxed_str();
/// ```
#[stable(feature = "box_str", since = "1.4.0")]
#[inline]
pub fn into_boxed_str(self) -> Box<str> {
let slice = self.vec.into_boxed_slice();
unsafe { from_boxed_utf8_unchecked(slice) }
}
}
impl FromUtf8Error {
/// Returns a slice of [`u8`]s bytes that were attempted to convert to a `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // some invalid bytes, in a vector
/// let bytes = vec![0, 159];
///
/// let value = String::from_utf8(bytes);
///
/// assert_eq!(&[0, 159], value.unwrap_err().as_bytes());
/// ```
#[stable(feature = "from_utf8_error_as_bytes", since = "1.26.0")]
pub fn as_bytes(&self) -> &[u8] {
&self.bytes[..]
}
/// Returns the bytes that were attempted to convert to a `String`.
///
/// This method is carefully constructed to avoid allocation. It will
/// consume the error, moving out the bytes, so that a copy of the bytes
/// does not need to be made.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // some invalid bytes, in a vector
/// let bytes = vec![0, 159];
///
/// let value = String::from_utf8(bytes);
///
/// assert_eq!(vec![0, 159], value.unwrap_err().into_bytes());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_bytes(self) -> Vec<u8> {
self.bytes
}
/// Fetch a `Utf8Error` to get more details about the conversion failure.
///
/// The [`Utf8Error`] type provided by [`std::str`] represents an error that may
/// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's
/// an analogue to `FromUtf8Error`. See its documentation for more details
/// on using it.
///
/// [`Utf8Error`]: ../../std/str/struct.Utf8Error.html
/// [`std::str`]: ../../std/str/index.html
/// [`u8`]: ../../std/primitive.u8.html
/// [`&str`]: ../../std/primitive.str.html
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // some invalid bytes, in a vector
/// let bytes = vec![0, 159];
///
/// let error = String::from_utf8(bytes).unwrap_err().utf8_error();
///
/// // the first byte is invalid here
/// assert_eq!(1, error.valid_up_to());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn utf8_error(&self) -> Utf8Error {
self.error
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Display for FromUtf8Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.error, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Display for FromUtf16Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt("invalid utf-16: lone surrogate found", f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Clone for String {
fn clone(&self) -> Self {
String { vec: self.vec.clone() }
}
fn clone_from(&mut self, source: &Self) {
self.vec.clone_from(&source.vec);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl FromIterator<char> for String {
fn from_iter<I: IntoIterator<Item = char>>(iter: I) -> String {
let mut buf = String::new();
buf.extend(iter);
buf
}
}
#[stable(feature = "string_from_iter_by_ref", since = "1.17.0")]
impl<'a> FromIterator<&'a char> for String {
fn from_iter<I: IntoIterator<Item = &'a char>>(iter: I) -> String {
let mut buf = String::new();
buf.extend(iter);
buf
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> FromIterator<&'a str> for String {
fn from_iter<I: IntoIterator<Item = &'a str>>(iter: I) -> String {
let mut buf = String::new();
buf.extend(iter);
buf
}
}
#[stable(feature = "extend_string", since = "1.4.0")]
impl FromIterator<String> for String {
fn from_iter<I: IntoIterator<Item = String>>(iter: I) -> String {
let mut iterator = iter.into_iter();
// Because we're iterating over `String`s, we can avoid at least
// one allocation by getting the first string from the iterator
// and appending to it all the subsequent strings.
match iterator.next() {
None => String::new(),
Some(mut buf) => {
buf.extend(iterator);
buf
}
}
}
}
#[stable(feature = "box_str2", since = "1.45.0")]
impl FromIterator<Box<str>> for String {
fn from_iter<I: IntoIterator<Item = Box<str>>>(iter: I) -> String {
let mut buf = String::new();
buf.extend(iter);
buf
}
}
#[stable(feature = "herd_cows", since = "1.19.0")]
impl<'a> FromIterator<Cow<'a, str>> for String {
fn from_iter<I: IntoIterator<Item = Cow<'a, str>>>(iter: I) -> String {
let mut iterator = iter.into_iter();
// Because we're iterating over CoWs, we can (potentially) avoid at least
// one allocation by getting the first item and appending to it all the
// subsequent items.
match iterator.next() {
None => String::new(),
Some(cow) => {
let mut buf = cow.into_owned();
buf.extend(iterator);
buf
}
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Extend<char> for String {
fn extend<I: IntoIterator<Item = char>>(&mut self, iter: I) {
let iterator = iter.into_iter();
let (lower_bound, _) = iterator.size_hint();
self.reserve(lower_bound);
iterator.for_each(move |c| self.push(c));
}
}
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a> Extend<&'a char> for String {
fn extend<I: IntoIterator<Item = &'a char>>(&mut self, iter: I) {
self.extend(iter.into_iter().cloned());
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Extend<&'a str> for String {
fn extend<I: IntoIterator<Item = &'a str>>(&mut self, iter: I) {
iter.into_iter().for_each(move |s| self.push_str(s));
}
}
#[stable(feature = "box_str2", since = "1.45.0")]
impl Extend<Box<str>> for String {
fn extend<I: IntoIterator<Item = Box<str>>>(&mut self, iter: I) {
iter.into_iter().for_each(move |s| self.push_str(&s));
}
}
#[stable(feature = "extend_string", since = "1.4.0")]
impl Extend<String> for String {
fn extend<I: IntoIterator<Item = String>>(&mut self, iter: I) {
iter.into_iter().for_each(move |s| self.push_str(&s));
}
}
#[stable(feature = "herd_cows", since = "1.19.0")]
impl<'a> Extend<Cow<'a, str>> for String {
fn extend<I: IntoIterator<Item = Cow<'a, str>>>(&mut self, iter: I) {
iter.into_iter().for_each(move |s| self.push_str(&s));
}
}
/// A convenience impl that delegates to the impl for `&str`.
///
/// # Examples
///
/// ```
/// assert_eq!(String::from("Hello world").find("world"), Some(6));
/// ```
#[unstable(
feature = "pattern",
reason = "API not fully fleshed out and ready to be stabilized",
issue = "27721"
)]
impl<'a, 'b> Pattern<'a> for &'b String {
type Searcher = <&'b str as Pattern<'a>>::Searcher;
fn into_searcher(self, haystack: &'a str) -> <&'b str as Pattern<'a>>::Searcher {
self[..].into_searcher(haystack)
}
#[inline]
fn is_contained_in(self, haystack: &'a str) -> bool {
self[..].is_contained_in(haystack)
}
#[inline]
fn is_prefix_of(self, haystack: &'a str) -> bool {
self[..].is_prefix_of(haystack)
}
#[inline]
fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
self[..].strip_prefix_of(haystack)
}
#[inline]
fn is_suffix_of(self, haystack: &'a str) -> bool {
self[..].is_suffix_of(haystack)
}
#[inline]
fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str> {
self[..].strip_suffix_of(haystack)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialEq for String {
#[inline]
fn eq(&self, other: &String) -> bool {
PartialEq::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &String) -> bool {
PartialEq::ne(&self[..], &other[..])
}
}
macro_rules! impl_eq {
($lhs:ty, $rhs: ty) => {
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(unused_lifetimes)]
impl<'a, 'b> PartialEq<$rhs> for $lhs {
#[inline]
fn eq(&self, other: &$rhs) -> bool {
PartialEq::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &$rhs) -> bool {
PartialEq::ne(&self[..], &other[..])
}
}
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(unused_lifetimes)]
impl<'a, 'b> PartialEq<$lhs> for $rhs {
#[inline]
fn eq(&self, other: &$lhs) -> bool {
PartialEq::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &$lhs) -> bool {
PartialEq::ne(&self[..], &other[..])
}
}
};
}
impl_eq! { String, str }
impl_eq! { String, &'a str }
impl_eq! { Cow<'a, str>, str }
impl_eq! { Cow<'a, str>, &'b str }
impl_eq! { Cow<'a, str>, String }
#[stable(feature = "rust1", since = "1.0.0")]
impl Default for String {
/// Creates an empty `String`.
#[inline]
fn default() -> String {
String::new()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Display for String {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for String {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl hash::Hash for String {
#[inline]
fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
(**self).hash(hasher)
}
}
/// Implements the `+` operator for concatenating two strings.
///
/// This consumes the `String` on the left-hand side and re-uses its buffer (growing it if
/// necessary). This is done to avoid allocating a new `String` and copying the entire contents on
/// every operation, which would lead to `O(n^2)` running time when building an `n`-byte string by
/// repeated concatenation.
///
/// The string on the right-hand side is only borrowed; its contents are copied into the returned
/// `String`.
///
/// # Examples
///
/// Concatenating two `String`s takes the first by value and borrows the second:
///
/// ```
/// let a = String::from("hello");
/// let b = String::from(" world");
/// let c = a + &b;
/// // `a` is moved and can no longer be used here.
/// ```
///
/// If you want to keep using the first `String`, you can clone it and append to the clone instead:
///
/// ```
/// let a = String::from("hello");
/// let b = String::from(" world");
/// let c = a.clone() + &b;
/// // `a` is still valid here.
/// ```
///
/// Concatenating `&str` slices can be done by converting the first to a `String`:
///
/// ```
/// let a = "hello";
/// let b = " world";
/// let c = a.to_string() + b;
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
impl Add<&str> for String {
type Output = String;
#[inline]
fn add(mut self, other: &str) -> String {
self.push_str(other);
self
}
}
/// Implements the `+=` operator for appending to a `String`.
///
/// This has the same behavior as the [`push_str`][String::push_str] method.
#[stable(feature = "stringaddassign", since = "1.12.0")]
impl AddAssign<&str> for String {
#[inline]
fn add_assign(&mut self, other: &str) {
self.push_str(other);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl ops::Index<ops::Range<usize>> for String {
type Output = str;
#[inline]
fn index(&self, index: ops::Range<usize>) -> &str {
&self[..][index]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl ops::Index<ops::RangeTo<usize>> for String {
type Output = str;
#[inline]
fn index(&self, index: ops::RangeTo<usize>) -> &str {
&self[..][index]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl ops::Index<ops::RangeFrom<usize>> for String {
type Output = str;
#[inline]
fn index(&self, index: ops::RangeFrom<usize>) -> &str {
&self[..][index]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl ops::Index<ops::RangeFull> for String {
type Output = str;
#[inline]
fn index(&self, _index: ops::RangeFull) -> &str {
unsafe { str::from_utf8_unchecked(&self.vec) }
}
}
#[stable(feature = "inclusive_range", since = "1.26.0")]
impl ops::Index<ops::RangeInclusive<usize>> for String {
type Output = str;
#[inline]
fn index(&self, index: ops::RangeInclusive<usize>) -> &str {
Index::index(&**self, index)
}
}
#[stable(feature = "inclusive_range", since = "1.26.0")]
impl ops::Index<ops::RangeToInclusive<usize>> for String {
type Output = str;
#[inline]
fn index(&self, index: ops::RangeToInclusive<usize>) -> &str {
Index::index(&**self, index)
}
}
#[stable(feature = "derefmut_for_string", since = "1.3.0")]
impl ops::IndexMut<ops::Range<usize>> for String {
#[inline]
fn index_mut(&mut self, index: ops::Range<usize>) -> &mut str {
&mut self[..][index]
}
}
#[stable(feature = "derefmut_for_string", since = "1.3.0")]
impl ops::IndexMut<ops::RangeTo<usize>> for String {
#[inline]
fn index_mut(&mut self, index: ops::RangeTo<usize>) -> &mut str {
&mut self[..][index]
}
}
#[stable(feature = "derefmut_for_string", since = "1.3.0")]
impl ops::IndexMut<ops::RangeFrom<usize>> for String {
#[inline]
fn index_mut(&mut self, index: ops::RangeFrom<usize>) -> &mut str {
&mut self[..][index]
}
}
#[stable(feature = "derefmut_for_string", since = "1.3.0")]
impl ops::IndexMut<ops::RangeFull> for String {
#[inline]
fn index_mut(&mut self, _index: ops::RangeFull) -> &mut str {
unsafe { str::from_utf8_unchecked_mut(&mut *self.vec) }
}
}
#[stable(feature = "inclusive_range", since = "1.26.0")]
impl ops::IndexMut<ops::RangeInclusive<usize>> for String {
#[inline]
fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut str {
IndexMut::index_mut(&mut **self, index)
}
}
#[stable(feature = "inclusive_range", since = "1.26.0")]
impl ops::IndexMut<ops::RangeToInclusive<usize>> for String {
#[inline]
fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut str {
IndexMut::index_mut(&mut **self, index)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl ops::Deref for String {
type Target = str;
#[inline]
fn deref(&self) -> &str {
unsafe { str::from_utf8_unchecked(&self.vec) }
}
}
#[stable(feature = "derefmut_for_string", since = "1.3.0")]
impl ops::DerefMut for String {
#[inline]
fn deref_mut(&mut self) -> &mut str {
unsafe { str::from_utf8_unchecked_mut(&mut *self.vec) }
}
}
/// A type alias for [`Infallible`].
///
/// This alias exists for backwards compatibility, and may be eventually deprecated.
///
/// [`Infallible`]: ../../core/convert/enum.Infallible.html
#[stable(feature = "str_parse_error", since = "1.5.0")]
pub type ParseError = core::convert::Infallible;
#[stable(feature = "rust1", since = "1.0.0")]
impl FromStr for String {
type Err = core::convert::Infallible;
#[inline]
fn from_str(s: &str) -> Result<String, Self::Err> {
Ok(String::from(s))
}
}
/// A trait for converting a value to a `String`.
///
/// This trait is automatically implemented for any type which implements the
/// [`Display`] trait. As such, `ToString` shouldn't be implemented directly:
/// [`Display`] should be implemented instead, and you get the `ToString`
/// implementation for free.
///
/// [`Display`]: ../../std/fmt/trait.Display.html
#[stable(feature = "rust1", since = "1.0.0")]
pub trait ToString {
/// Converts the given value to a `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let i = 5;
/// let five = String::from("5");
///
/// assert_eq!(five, i.to_string());
/// ```
#[rustc_conversion_suggestion]
#[stable(feature = "rust1", since = "1.0.0")]
fn to_string(&self) -> String;
}
/// # Panics
///
/// In this implementation, the `to_string` method panics
/// if the `Display` implementation returns an error.
/// This indicates an incorrect `Display` implementation
/// since `fmt::Write for String` never returns an error itself.
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Display + ?Sized> ToString for T {
#[inline]
default fn to_string(&self) -> String {
use fmt::Write;
let mut buf = String::new();
buf.write_fmt(format_args!("{}", self))
.expect("a Display implementation returned an error unexpectedly");
buf.shrink_to_fit();
buf
}
}
#[stable(feature = "str_to_string_specialization", since = "1.9.0")]
impl ToString for str {
#[inline]
fn to_string(&self) -> String {
String::from(self)
}
}
#[stable(feature = "cow_str_to_string_specialization", since = "1.17.0")]
impl ToString for Cow<'_, str> {
#[inline]
fn to_string(&self) -> String {
self[..].to_owned()
}
}
#[stable(feature = "string_to_string_specialization", since = "1.17.0")]
impl ToString for String {
#[inline]
fn to_string(&self) -> String {
self.to_owned()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl AsRef<str> for String {
#[inline]
fn as_ref(&self) -> &str {
self
}
}
#[stable(feature = "string_as_mut", since = "1.43.0")]
impl AsMut<str> for String {
#[inline]
fn as_mut(&mut self) -> &mut str {
self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl AsRef<[u8]> for String {
#[inline]
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl From<&str> for String {
#[inline]
fn from(s: &str) -> String {
s.to_owned()
}
}
#[stable(feature = "from_mut_str_for_string", since = "1.44.0")]
impl From<&mut str> for String {
/// Converts a `&mut str` into a `String`.
///
/// The result is allocated on the heap.
#[inline]
fn from(s: &mut str) -> String {
s.to_owned()
}
}
#[stable(feature = "from_ref_string", since = "1.35.0")]
impl From<&String> for String {
#[inline]
fn from(s: &String) -> String {
s.clone()
}
}
// note: test pulls in libstd, which causes errors here
#[cfg(not(test))]
#[stable(feature = "string_from_box", since = "1.18.0")]
impl From<Box<str>> for String {
/// Converts the given boxed `str` slice to a `String`.
/// It is notable that the `str` slice is owned.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s1: String = String::from("hello world");
/// let s2: Box<str> = s1.into_boxed_str();
/// let s3: String = String::from(s2);
///
/// assert_eq!("hello world", s3)
/// ```
fn from(s: Box<str>) -> String {
s.into_string()
}
}
#[stable(feature = "box_from_str", since = "1.20.0")]
impl From<String> for Box<str> {
/// Converts the given `String` to a boxed `str` slice that is owned.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s1: String = String::from("hello world");
/// let s2: Box<str> = Box::from(s1);
/// let s3: String = String::from(s2);
///
/// assert_eq!("hello world", s3)
/// ```
fn from(s: String) -> Box<str> {
s.into_boxed_str()
}
}
#[stable(feature = "string_from_cow_str", since = "1.14.0")]
impl<'a> From<Cow<'a, str>> for String {
fn from(s: Cow<'a, str>) -> String {
s.into_owned()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> From<&'a str> for Cow<'a, str> {
#[inline]
fn from(s: &'a str) -> Cow<'a, str> {
Cow::Borrowed(s)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> From<String> for Cow<'a, str> {
#[inline]
fn from(s: String) -> Cow<'a, str> {
Cow::Owned(s)
}
}
#[stable(feature = "cow_from_string_ref", since = "1.28.0")]
impl<'a> From<&'a String> for Cow<'a, str> {
#[inline]
fn from(s: &'a String) -> Cow<'a, str> {
Cow::Borrowed(s.as_str())
}
}
#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
impl<'a> FromIterator<char> for Cow<'a, str> {
fn from_iter<I: IntoIterator<Item = char>>(it: I) -> Cow<'a, str> {
Cow::Owned(FromIterator::from_iter(it))
}
}
#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
impl<'a, 'b> FromIterator<&'b str> for Cow<'a, str> {
fn from_iter<I: IntoIterator<Item = &'b str>>(it: I) -> Cow<'a, str> {
Cow::Owned(FromIterator::from_iter(it))
}
}
#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
impl<'a> FromIterator<String> for Cow<'a, str> {
fn from_iter<I: IntoIterator<Item = String>>(it: I) -> Cow<'a, str> {
Cow::Owned(FromIterator::from_iter(it))
}
}
#[stable(feature = "from_string_for_vec_u8", since = "1.14.0")]
impl From<String> for Vec<u8> {
/// Converts the given `String` to a vector `Vec` that holds values of type `u8`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s1 = String::from("hello world");
/// let v1 = Vec::from(s1);
///
/// for b in v1 {
/// println!("{}", b);
/// }
/// ```
fn from(string: String) -> Vec<u8> {
string.into_bytes()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Write for String {
#[inline]
fn write_str(&mut self, s: &str) -> fmt::Result {
self.push_str(s);
Ok(())
}
#[inline]
fn write_char(&mut self, c: char) -> fmt::Result {
self.push(c);
Ok(())
}
}
/// A draining iterator for `String`.
///
/// This struct is created by the [`drain`] method on [`String`]. See its
/// documentation for more.
///
/// [`drain`]: struct.String.html#method.drain
/// [`String`]: struct.String.html
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<'a> {
/// Will be used as &'a mut String in the destructor
string: *mut String,
/// Start of part to remove
start: usize,
/// End of part to remove
end: usize,
/// Current remaining range to remove
iter: Chars<'a>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl fmt::Debug for Drain<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("Drain { .. }")
}
}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl Sync for Drain<'_> {}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl Send for Drain<'_> {}
#[stable(feature = "drain", since = "1.6.0")]
impl Drop for Drain<'_> {
fn drop(&mut self) {
unsafe {
// Use Vec::drain. "Reaffirm" the bounds checks to avoid
// panic code being inserted again.
let self_vec = (*self.string).as_mut_vec();
if self.start <= self.end && self.end <= self_vec.len() {
self_vec.drain(self.start..self.end);
}
}
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl Iterator for Drain<'_> {
type Item = char;
#[inline]
fn next(&mut self) -> Option<char> {
self.iter.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
#[inline]
fn last(mut self) -> Option<char> {
self.next_back()
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl DoubleEndedIterator for Drain<'_> {
#[inline]
fn next_back(&mut self) -> Option<char> {
self.iter.next_back()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl FusedIterator for Drain<'_> {}
|
use ndarray::*;
use ndarray_linalg::*;
#[test]
fn solve_random() {
let a: Array2<f64> = random((3, 3));
let x: Array1<f64> = random(3);
let b = a.dot(&x);
let y = a.solve_into(b).unwrap();
assert_close_l2!(&x, &y, 1e-7);
}
#[test]
fn solve_random_t() {
let a: Array2<f64> = random((3, 3).f());
let x: Array1<f64> = random(3);
let b = a.dot(&x);
let y = a.solve_into(b).unwrap();
assert_close_l2!(&x, &y, 1e-7);
}
#[test]
fn rcond() {
macro_rules! rcond {
($elem:ty, $rows:expr, $atol:expr) => {
let a: Array2<$elem> = random(($rows, $rows));
let rcond = 1. / (a.opnorm_one().unwrap() * a.inv().unwrap().opnorm_one().unwrap());
assert_aclose!(a.rcond().unwrap(), rcond, $atol);
assert_aclose!(a.rcond_into().unwrap(), rcond, $atol);
};
}
for rows in 1..6 {
rcond!(f64, rows, 0.2);
rcond!(f32, rows, 0.5);
rcond!(c64, rows, 0.2);
rcond!(c32, rows, 0.5);
}
}
#[test]
fn rcond_hilbert() {
macro_rules! rcond_hilbert {
($elem:ty, $rows:expr, $atol:expr) => {
let a = Array2::<$elem>::from_shape_fn(($rows, $rows), |(i, j)| 1. / (i as $elem + j as $elem - 1.));
assert_aclose!(a.rcond().unwrap(), 0., $atol);
assert_aclose!(a.rcond_into().unwrap(), 0., $atol);
};
}
rcond_hilbert!(f64, 10, 1e-9);
rcond_hilbert!(f32, 10, 1e-3);
}
#[test]
fn rcond_identity() {
macro_rules! rcond_identity {
($elem:ty, $rows:expr, $atol:expr) => {
let a = Array2::<$elem>::eye($rows);
assert_aclose!(a.rcond().unwrap(), 1., $atol);
assert_aclose!(a.rcond_into().unwrap(), 1., $atol);
};
}
for rows in 1..6 {
rcond_identity!(f64, rows, 1e-9);
rcond_identity!(f32, rows, 1e-3);
rcond_identity!(c64, rows, 1e-9);
rcond_identity!(c32, rows, 1e-3);
}
}
Use rand_hpd for rcond test
use ndarray::*;
use ndarray_linalg::*;
#[test]
fn solve_random() {
let a: Array2<f64> = random((3, 3));
let x: Array1<f64> = random(3);
let b = a.dot(&x);
let y = a.solve_into(b).unwrap();
assert_close_l2!(&x, &y, 1e-7);
}
#[test]
fn solve_random_t() {
let a: Array2<f64> = random((3, 3).f());
let x: Array1<f64> = random(3);
let b = a.dot(&x);
let y = a.solve_into(b).unwrap();
assert_close_l2!(&x, &y, 1e-7);
}
#[test]
fn rcond() {
macro_rules! rcond {
($elem:ty, $rows:expr, $atol:expr) => {
let a: Array2<$elem> = random_hpd($rows);
let rcond = 1. / (a.opnorm_one().unwrap() * a.inv().unwrap().opnorm_one().unwrap());
assert_aclose!(a.rcond().unwrap(), rcond, $atol);
assert_aclose!(a.rcond_into().unwrap(), rcond, $atol);
};
}
for rows in 1..6 {
rcond!(f64, rows, 0.2);
rcond!(f32, rows, 0.5);
rcond!(c64, rows, 0.2);
rcond!(c32, rows, 0.5);
}
}
#[test]
fn rcond_hilbert() {
macro_rules! rcond_hilbert {
($elem:ty, $rows:expr, $atol:expr) => {
let a = Array2::<$elem>::from_shape_fn(($rows, $rows), |(i, j)| 1. / (i as $elem + j as $elem - 1.));
assert_aclose!(a.rcond().unwrap(), 0., $atol);
assert_aclose!(a.rcond_into().unwrap(), 0., $atol);
};
}
rcond_hilbert!(f64, 10, 1e-9);
rcond_hilbert!(f32, 10, 1e-3);
}
#[test]
fn rcond_identity() {
macro_rules! rcond_identity {
($elem:ty, $rows:expr, $atol:expr) => {
let a = Array2::<$elem>::eye($rows);
assert_aclose!(a.rcond().unwrap(), 1., $atol);
assert_aclose!(a.rcond_into().unwrap(), 1., $atol);
};
}
for rows in 1..6 {
rcond_identity!(f64, rows, 1e-9);
rcond_identity!(f32, rows, 1e-3);
rcond_identity!(c64, rows, 1e-9);
rcond_identity!(c32, rows, 1e-3);
}
}
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The `bitflags!` macro generates a `struct` that holds a set of C-style
//! bitmask flags. It is useful for creating typesafe wrappers for C APIs.
//!
//! The flags should only be defined for integer types, otherwise unexpected
//! type errors may occur at compile time.
//!
//! # Example
//!
//! ~~~rust
//! bitflags!(
//! flags Flags: u32 {
//! static FlagA = 0x00000001,
//! static FlagB = 0x00000010,
//! static FlagC = 0x00000100,
//! static FlagABC = FlagA.bits
//! | FlagB.bits
//! | FlagC.bits
//! }
//! )
//!
//! fn main() {
//! let e1 = FlagA | FlagC;
//! let e2 = FlagB | FlagC;
//! assert!((e1 | e2) == FlagABC); // union
//! assert!((e1 & e2) == FlagC); // intersection
//! assert!((e1 - e2) == FlagA); // set difference
//! assert!(!e2 == FlagA); // set complement
//! }
//! ~~~
//!
//! The generated `struct`s can also be extended with type and trait implementations:
//!
//! ~~~rust
//! use std::fmt;
//!
//! bitflags!(
//! flags Flags: u32 {
//! static FlagA = 0x00000001,
//! static FlagB = 0x00000010
//! }
//! )
//!
//! impl Flags {
//! pub fn clear(&mut self) {
//! self.bits = 0; // The `bits` field can be accessed from within the
//! // same module where the `bitflags!` macro was invoked.
//! }
//! }
//!
//! impl fmt::Show for Flags {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! write!(f, "hi!")
//! }
//! }
//!
//! fn main() {
//! let mut flags = FlagA | FlagB;
//! flags.clear();
//! assert!(flags.is_empty());
//! assert_eq!(format!("{}", flags).as_slice(), "hi!");
//! }
//! ~~~
//!
//! # Attributes
//!
//! Attributes can be attached to the generated `struct` by placing them
//! before the `flags` keyword.
//!
//! # Derived traits
//!
//! The `PartialEq` and `Clone` traits are automatically derived for the `struct` using
//! the `deriving` attribute. Additional traits can be derived by providing an
//! explicit `deriving` attribute on `flags`.
//!
//! # Operators
//!
//! The following operator traits are implemented for the generated `struct`:
//!
//! - `BitOr`: union
//! - `BitAnd`: intersection
//! - `Sub`: set difference
//! - `Not`: set complement
//!
//! # Methods
//!
//! The following methods are defined for the generated `struct`:
//!
//! - `empty`: an empty set of flags
//! - `all`: the set of all flags
//! - `bits`: the raw value of the flags currently stored
//! - `is_empty`: `true` if no flags are currently stored
//! - `is_all`: `true` if all flags are currently set
//! - `intersects`: `true` if there are flags common to both `self` and `other`
//! - `contains`: `true` all of the flags in `other` are contained within `self`
//! - `insert`: inserts the specified flags in-place
//! - `remove`: removes the specified flags in-place
#![experimental]
#![macro_escape]
#[macro_export]
macro_rules! bitflags(
($(#[$attr:meta])* flags $BitFlags:ident: $T:ty {
$($(#[$Flag_attr:meta])* static $Flag:ident = $value:expr),+
}) => (
#[deriving(PartialEq, Eq, Clone, PartialOrd, Ord, Hash)]
$(#[$attr])*
pub struct $BitFlags {
bits: $T,
}
$($(#[$Flag_attr])* pub static $Flag: $BitFlags = $BitFlags { bits: $value };)+
impl $BitFlags {
/// Returns an empty set of flags.
pub fn empty() -> $BitFlags {
$BitFlags { bits: 0 }
}
/// Returns the set containing all flags.
pub fn all() -> $BitFlags {
$BitFlags { bits: $($value)|+ }
}
/// Returns the raw value of the flags currently stored.
pub fn bits(&self) -> $T {
self.bits
}
/// Convert from underlying bit representation, unless that
/// representation contains bits that do not correspond to a flag.
pub fn from_bits(bits: $T) -> ::std::option::Option<$BitFlags> {
if (bits & !$BitFlags::all().bits()) != 0 {
::std::option::None
} else {
::std::option::Some($BitFlags { bits: bits })
}
}
/// Convert from underlying bit representation, dropping any bits
/// that do not correspond to flags.
pub fn from_bits_truncate(bits: $T) -> $BitFlags {
$BitFlags { bits: bits } & $BitFlags::all()
}
/// Returns `true` if no flags are currently stored.
pub fn is_empty(&self) -> bool {
*self == $BitFlags::empty()
}
/// Returns `true` if all flags are currently set.
pub fn is_all(&self) -> bool {
*self == $BitFlags::all()
}
/// Returns `true` if there are flags common to both `self` and `other`.
pub fn intersects(&self, other: $BitFlags) -> bool {
!(self & other).is_empty()
}
/// Returns `true` all of the flags in `other` are contained within `self`.
pub fn contains(&self, other: $BitFlags) -> bool {
(self & other) == other
}
/// Inserts the specified flags in-place.
pub fn insert(&mut self, other: $BitFlags) {
self.bits |= other.bits;
}
/// Removes the specified flags in-place.
pub fn remove(&mut self, other: $BitFlags) {
self.bits &= !other.bits;
}
}
impl BitOr<$BitFlags, $BitFlags> for $BitFlags {
/// Returns the union of the two sets of flags.
#[inline]
fn bitor(&self, other: &$BitFlags) -> $BitFlags {
$BitFlags { bits: self.bits | other.bits }
}
}
impl BitAnd<$BitFlags, $BitFlags> for $BitFlags {
/// Returns the intersection between the two sets of flags.
#[inline]
fn bitand(&self, other: &$BitFlags) -> $BitFlags {
$BitFlags { bits: self.bits & other.bits }
}
}
impl Sub<$BitFlags, $BitFlags> for $BitFlags {
/// Returns the set difference of the two sets of flags.
#[inline]
fn sub(&self, other: &$BitFlags) -> $BitFlags {
$BitFlags { bits: self.bits & !other.bits }
}
}
impl Not<$BitFlags> for $BitFlags {
/// Returns the complement of this set of flags.
#[inline]
fn not(&self) -> $BitFlags {
$BitFlags { bits: !self.bits } & $BitFlags::all()
}
}
)
)
#[cfg(test)]
mod tests {
use hash;
use option::{Some, None};
use ops::{BitOr, BitAnd, Sub, Not};
bitflags!(
flags Flags: u32 {
static FlagA = 0x00000001,
static FlagB = 0x00000010,
static FlagC = 0x00000100,
static FlagABC = FlagA.bits
| FlagB.bits
| FlagC.bits
}
)
#[test]
fn test_bits(){
assert_eq!(Flags::empty().bits(), 0x00000000);
assert_eq!(FlagA.bits(), 0x00000001);
assert_eq!(FlagABC.bits(), 0x00000111);
}
#[test]
fn test_from_bits() {
assert!(Flags::from_bits(0) == Some(Flags::empty()));
assert!(Flags::from_bits(0x1) == Some(FlagA));
assert!(Flags::from_bits(0x10) == Some(FlagB));
assert!(Flags::from_bits(0x11) == Some(FlagA | FlagB));
assert!(Flags::from_bits(0x1000) == None);
}
#[test]
fn test_from_bits_truncate() {
assert!(Flags::from_bits_truncate(0) == Flags::empty());
assert!(Flags::from_bits_truncate(0x1) == FlagA);
assert!(Flags::from_bits_truncate(0x10) == FlagB);
assert!(Flags::from_bits_truncate(0x11) == (FlagA | FlagB));
assert!(Flags::from_bits_truncate(0x1000) == Flags::empty());
assert!(Flags::from_bits_truncate(0x1001) == FlagA);
}
#[test]
fn test_is_empty(){
assert!(Flags::empty().is_empty());
assert!(!FlagA.is_empty());
assert!(!FlagABC.is_empty());
}
#[test]
fn test_is_all() {
assert!(Flags::all().is_all());
assert!(!FlagA.is_all());
assert!(FlagABC.is_all());
}
#[test]
fn test_two_empties_do_not_intersect() {
let e1 = Flags::empty();
let e2 = Flags::empty();
assert!(!e1.intersects(e2));
}
#[test]
fn test_empty_does_not_intersect_with_full() {
let e1 = Flags::empty();
let e2 = FlagABC;
assert!(!e1.intersects(e2));
}
#[test]
fn test_disjoint_intersects() {
let e1 = FlagA;
let e2 = FlagB;
assert!(!e1.intersects(e2));
}
#[test]
fn test_overlapping_intersects() {
let e1 = FlagA;
let e2 = FlagA | FlagB;
assert!(e1.intersects(e2));
}
#[test]
fn test_contains() {
let e1 = FlagA;
let e2 = FlagA | FlagB;
assert!(!e1.contains(e2));
assert!(e2.contains(e1));
assert!(FlagABC.contains(e2));
}
#[test]
fn test_insert(){
let mut e1 = FlagA;
let e2 = FlagA | FlagB;
e1.insert(e2);
assert!(e1 == e2);
}
#[test]
fn test_remove(){
let mut e1 = FlagA | FlagB;
let e2 = FlagA | FlagC;
e1.remove(e2);
assert!(e1 == FlagB);
}
#[test]
fn test_operators() {
let e1 = FlagA | FlagC;
let e2 = FlagB | FlagC;
assert!((e1 | e2) == FlagABC); // union
assert!((e1 & e2) == FlagC); // intersection
assert!((e1 - e2) == FlagA); // set difference
assert!(!e2 == FlagA); // set complement
}
#[test]
fn test_lt() {
let mut a = Flags::empty();
let mut b = Flags::empty();
assert!(!(a < b) && !(b < a));
b = FlagB;
assert!(a < b);
a = FlagC;
assert!(!(a < b) && b < a);
b = FlagC | FlagB;
assert!(a < b);
}
#[test]
fn test_ord() {
let mut a = Flags::empty();
let mut b = Flags::empty();
assert!(a <= b && a >= b);
a = FlagA;
assert!(a > b && a >= b);
assert!(b < a && b <= a);
b = FlagB;
assert!(b > a && b >= a);
assert!(a < b && a <= b);
}
#[test]
fn test_hash() {
let mut x = Flags::empty();
let mut y = Flags::empty();
assert!(hash::hash(&x) == hash::hash(&y));
x = Flags::all();
y = FlagABC;
assert!(hash::hash(&x) == hash::hash(&y));
}
}
Allow trailing commas in bitflags! macro
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The `bitflags!` macro generates a `struct` that holds a set of C-style
//! bitmask flags. It is useful for creating typesafe wrappers for C APIs.
//!
//! The flags should only be defined for integer types, otherwise unexpected
//! type errors may occur at compile time.
//!
//! # Example
//!
//! ~~~rust
//! bitflags!(
//! flags Flags: u32 {
//! static FlagA = 0x00000001,
//! static FlagB = 0x00000010,
//! static FlagC = 0x00000100,
//! static FlagABC = FlagA.bits
//! | FlagB.bits
//! | FlagC.bits,
//! }
//! )
//!
//! fn main() {
//! let e1 = FlagA | FlagC;
//! let e2 = FlagB | FlagC;
//! assert!((e1 | e2) == FlagABC); // union
//! assert!((e1 & e2) == FlagC); // intersection
//! assert!((e1 - e2) == FlagA); // set difference
//! assert!(!e2 == FlagA); // set complement
//! }
//! ~~~
//!
//! The generated `struct`s can also be extended with type and trait implementations:
//!
//! ~~~rust
//! use std::fmt;
//!
//! bitflags!(
//! flags Flags: u32 {
//! static FlagA = 0x00000001,
//! static FlagB = 0x00000010,
//! }
//! )
//!
//! impl Flags {
//! pub fn clear(&mut self) {
//! self.bits = 0; // The `bits` field can be accessed from within the
//! // same module where the `bitflags!` macro was invoked.
//! }
//! }
//!
//! impl fmt::Show for Flags {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! write!(f, "hi!")
//! }
//! }
//!
//! fn main() {
//! let mut flags = FlagA | FlagB;
//! flags.clear();
//! assert!(flags.is_empty());
//! assert_eq!(format!("{}", flags).as_slice(), "hi!");
//! }
//! ~~~
//!
//! # Attributes
//!
//! Attributes can be attached to the generated `struct` by placing them
//! before the `flags` keyword.
//!
//! # Derived traits
//!
//! The `PartialEq` and `Clone` traits are automatically derived for the `struct` using
//! the `deriving` attribute. Additional traits can be derived by providing an
//! explicit `deriving` attribute on `flags`.
//!
//! # Operators
//!
//! The following operator traits are implemented for the generated `struct`:
//!
//! - `BitOr`: union
//! - `BitAnd`: intersection
//! - `Sub`: set difference
//! - `Not`: set complement
//!
//! # Methods
//!
//! The following methods are defined for the generated `struct`:
//!
//! - `empty`: an empty set of flags
//! - `all`: the set of all flags
//! - `bits`: the raw value of the flags currently stored
//! - `is_empty`: `true` if no flags are currently stored
//! - `is_all`: `true` if all flags are currently set
//! - `intersects`: `true` if there are flags common to both `self` and `other`
//! - `contains`: `true` all of the flags in `other` are contained within `self`
//! - `insert`: inserts the specified flags in-place
//! - `remove`: removes the specified flags in-place
#![experimental]
#![macro_escape]
#[macro_export]
macro_rules! bitflags(
($(#[$attr:meta])* flags $BitFlags:ident: $T:ty {
$($(#[$Flag_attr:meta])* static $Flag:ident = $value:expr),+
}) => (
#[deriving(PartialEq, Eq, Clone, PartialOrd, Ord, Hash)]
$(#[$attr])*
pub struct $BitFlags {
bits: $T,
}
$($(#[$Flag_attr])* pub static $Flag: $BitFlags = $BitFlags { bits: $value };)+
impl $BitFlags {
/// Returns an empty set of flags.
pub fn empty() -> $BitFlags {
$BitFlags { bits: 0 }
}
/// Returns the set containing all flags.
pub fn all() -> $BitFlags {
$BitFlags { bits: $($value)|+ }
}
/// Returns the raw value of the flags currently stored.
pub fn bits(&self) -> $T {
self.bits
}
/// Convert from underlying bit representation, unless that
/// representation contains bits that do not correspond to a flag.
pub fn from_bits(bits: $T) -> ::std::option::Option<$BitFlags> {
if (bits & !$BitFlags::all().bits()) != 0 {
::std::option::None
} else {
::std::option::Some($BitFlags { bits: bits })
}
}
/// Convert from underlying bit representation, dropping any bits
/// that do not correspond to flags.
pub fn from_bits_truncate(bits: $T) -> $BitFlags {
$BitFlags { bits: bits } & $BitFlags::all()
}
/// Returns `true` if no flags are currently stored.
pub fn is_empty(&self) -> bool {
*self == $BitFlags::empty()
}
/// Returns `true` if all flags are currently set.
pub fn is_all(&self) -> bool {
*self == $BitFlags::all()
}
/// Returns `true` if there are flags common to both `self` and `other`.
pub fn intersects(&self, other: $BitFlags) -> bool {
!(self & other).is_empty()
}
/// Returns `true` all of the flags in `other` are contained within `self`.
pub fn contains(&self, other: $BitFlags) -> bool {
(self & other) == other
}
/// Inserts the specified flags in-place.
pub fn insert(&mut self, other: $BitFlags) {
self.bits |= other.bits;
}
/// Removes the specified flags in-place.
pub fn remove(&mut self, other: $BitFlags) {
self.bits &= !other.bits;
}
}
impl BitOr<$BitFlags, $BitFlags> for $BitFlags {
/// Returns the union of the two sets of flags.
#[inline]
fn bitor(&self, other: &$BitFlags) -> $BitFlags {
$BitFlags { bits: self.bits | other.bits }
}
}
impl BitAnd<$BitFlags, $BitFlags> for $BitFlags {
/// Returns the intersection between the two sets of flags.
#[inline]
fn bitand(&self, other: &$BitFlags) -> $BitFlags {
$BitFlags { bits: self.bits & other.bits }
}
}
impl Sub<$BitFlags, $BitFlags> for $BitFlags {
/// Returns the set difference of the two sets of flags.
#[inline]
fn sub(&self, other: &$BitFlags) -> $BitFlags {
$BitFlags { bits: self.bits & !other.bits }
}
}
impl Not<$BitFlags> for $BitFlags {
/// Returns the complement of this set of flags.
#[inline]
fn not(&self) -> $BitFlags {
$BitFlags { bits: !self.bits } & $BitFlags::all()
}
}
);
($(#[$attr:meta])* flags $BitFlags:ident: $T:ty {
$($(#[$Flag_attr:meta])* static $Flag:ident = $value:expr),+,
}) => (
bitflags!(
$(#[$attr])*
flags $BitFlags: u32 {
$($(#[$Flag_attr])* static $Flag = $value),+
}
)
);
)
#[cfg(test)]
mod tests {
use hash;
use option::{Some, None};
use ops::{BitOr, BitAnd, Sub, Not};
bitflags!(
flags Flags: u32 {
static FlagA = 0x00000001,
static FlagB = 0x00000010,
static FlagC = 0x00000100,
static FlagABC = FlagA.bits
| FlagB.bits
| FlagC.bits,
}
)
#[test]
fn test_bits(){
assert_eq!(Flags::empty().bits(), 0x00000000);
assert_eq!(FlagA.bits(), 0x00000001);
assert_eq!(FlagABC.bits(), 0x00000111);
}
#[test]
fn test_from_bits() {
assert!(Flags::from_bits(0) == Some(Flags::empty()));
assert!(Flags::from_bits(0x1) == Some(FlagA));
assert!(Flags::from_bits(0x10) == Some(FlagB));
assert!(Flags::from_bits(0x11) == Some(FlagA | FlagB));
assert!(Flags::from_bits(0x1000) == None);
}
#[test]
fn test_from_bits_truncate() {
assert!(Flags::from_bits_truncate(0) == Flags::empty());
assert!(Flags::from_bits_truncate(0x1) == FlagA);
assert!(Flags::from_bits_truncate(0x10) == FlagB);
assert!(Flags::from_bits_truncate(0x11) == (FlagA | FlagB));
assert!(Flags::from_bits_truncate(0x1000) == Flags::empty());
assert!(Flags::from_bits_truncate(0x1001) == FlagA);
}
#[test]
fn test_is_empty(){
assert!(Flags::empty().is_empty());
assert!(!FlagA.is_empty());
assert!(!FlagABC.is_empty());
}
#[test]
fn test_is_all() {
assert!(Flags::all().is_all());
assert!(!FlagA.is_all());
assert!(FlagABC.is_all());
}
#[test]
fn test_two_empties_do_not_intersect() {
let e1 = Flags::empty();
let e2 = Flags::empty();
assert!(!e1.intersects(e2));
}
#[test]
fn test_empty_does_not_intersect_with_full() {
let e1 = Flags::empty();
let e2 = FlagABC;
assert!(!e1.intersects(e2));
}
#[test]
fn test_disjoint_intersects() {
let e1 = FlagA;
let e2 = FlagB;
assert!(!e1.intersects(e2));
}
#[test]
fn test_overlapping_intersects() {
let e1 = FlagA;
let e2 = FlagA | FlagB;
assert!(e1.intersects(e2));
}
#[test]
fn test_contains() {
let e1 = FlagA;
let e2 = FlagA | FlagB;
assert!(!e1.contains(e2));
assert!(e2.contains(e1));
assert!(FlagABC.contains(e2));
}
#[test]
fn test_insert(){
let mut e1 = FlagA;
let e2 = FlagA | FlagB;
e1.insert(e2);
assert!(e1 == e2);
}
#[test]
fn test_remove(){
let mut e1 = FlagA | FlagB;
let e2 = FlagA | FlagC;
e1.remove(e2);
assert!(e1 == FlagB);
}
#[test]
fn test_operators() {
let e1 = FlagA | FlagC;
let e2 = FlagB | FlagC;
assert!((e1 | e2) == FlagABC); // union
assert!((e1 & e2) == FlagC); // intersection
assert!((e1 - e2) == FlagA); // set difference
assert!(!e2 == FlagA); // set complement
}
#[test]
fn test_lt() {
let mut a = Flags::empty();
let mut b = Flags::empty();
assert!(!(a < b) && !(b < a));
b = FlagB;
assert!(a < b);
a = FlagC;
assert!(!(a < b) && b < a);
b = FlagC | FlagB;
assert!(a < b);
}
#[test]
fn test_ord() {
let mut a = Flags::empty();
let mut b = Flags::empty();
assert!(a <= b && a >= b);
a = FlagA;
assert!(a > b && a >= b);
assert!(b < a && b <= a);
b = FlagB;
assert!(b > a && b >= a);
assert!(a < b && a <= b);
}
#[test]
fn test_hash() {
let mut x = Flags::empty();
let mut y = Flags::empty();
assert!(hash::hash(&x) == hash::hash(&y));
x = Flags::all();
y = FlagABC;
assert!(hash::hash(&x) == hash::hash(&y));
}
}
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use io::prelude::*;
use cell::RefCell;
use cmp;
use fmt;
use io::lazy::Lazy;
use io::{self, BufReader, LineWriter};
use sync::{Arc, Mutex, MutexGuard};
use sys::stdio;
/// Stdout used by print! and println! macroses
thread_local! {
static LOCAL_STDOUT: RefCell<Option<Box<Write + Send>>> = {
RefCell::new(None)
}
}
/// A handle to a raw instance of the standard input stream of this process.
///
/// This handle is not synchronized or buffered in any fashion. Constructed via
/// the `std::io::stdio::stdin_raw` function.
struct StdinRaw(stdio::Stdin);
/// A handle to a raw instance of the standard output stream of this process.
///
/// This handle is not synchronized or buffered in any fashion. Constructed via
/// the `std::io::stdio::stdout_raw` function.
struct StdoutRaw(stdio::Stdout);
/// A handle to a raw instance of the standard output stream of this process.
///
/// This handle is not synchronized or buffered in any fashion. Constructed via
/// the `std::io::stdio::stderr_raw` function.
struct StderrRaw(stdio::Stderr);
/// Construct a new raw handle to the standard input of this process.
///
/// The returned handle does not interact with any other handles created nor
/// handles returned by `std::io::stdin`. Data buffered by the `std::io::stdin`
/// handles is **not** available to raw handles returned from this function.
///
/// The returned handle has no external synchronization or buffering.
fn stdin_raw() -> StdinRaw { StdinRaw(stdio::Stdin::new()) }
/// Construct a new raw handle to the standard input stream of this process.
///
/// The returned handle does not interact with any other handles created nor
/// handles returned by `std::io::stdout`. Note that data is buffered by the
/// `std::io::stdin` handles so writes which happen via this raw handle may
/// appear before previous writes.
///
/// The returned handle has no external synchronization or buffering layered on
/// top.
fn stdout_raw() -> StdoutRaw { StdoutRaw(stdio::Stdout::new()) }
/// Construct a new raw handle to the standard input stream of this process.
///
/// The returned handle does not interact with any other handles created nor
/// handles returned by `std::io::stdout`.
///
/// The returned handle has no external synchronization or buffering layered on
/// top.
fn stderr_raw() -> StderrRaw { StderrRaw(stdio::Stderr::new()) }
impl Read for StdinRaw {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
}
impl Write for StdoutRaw {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) }
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
impl Write for StderrRaw {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) }
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
/// A handle to the standard input stream of a process.
///
/// Each handle is a shared reference to a global buffer of input data to this
/// process. A handle can be `lock`'d to gain full access to `BufRead` methods
/// (e.g. `.lines()`). Writes to this handle are otherwise locked with respect
/// to other writes.
///
/// This handle implements the `Read` trait, but beware that concurrent reads
/// of `Stdin` must be executed with care.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Stdin {
inner: Arc<Mutex<BufReader<StdinRaw>>>,
}
/// A locked reference to the a `Stdin` handle.
///
/// This handle implements both the `Read` and `BufRead` traits and is
/// constructed via the `lock` method on `Stdin`.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct StdinLock<'a> {
inner: MutexGuard<'a, BufReader<StdinRaw>>,
}
/// Create a new handle to the global standard input stream of this process.
///
/// The handle returned refers to a globally shared buffer between all threads.
/// Access is synchronized and can be explicitly controlled with the `lock()`
/// method.
///
/// The `Read` trait is implemented for the returned value but the `BufRead`
/// trait is not due to the global nature of the standard input stream. The
/// locked version, `StdinLock`, implements both `Read` and `BufRead`, however.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stdin() -> Stdin {
static INSTANCE: Lazy<Mutex<BufReader<StdinRaw>>> = lazy_init!(stdin_init);
return Stdin {
inner: INSTANCE.get().expect("cannot access stdin during shutdown"),
};
fn stdin_init() -> Arc<Mutex<BufReader<StdinRaw>>> {
// The default buffer capacity is 64k, but apparently windows
// doesn't like 64k reads on stdin. See #13304 for details, but the
// idea is that on windows we use a slightly smaller buffer that's
// been seen to be acceptable.
Arc::new(Mutex::new(if cfg!(windows) {
BufReader::with_capacity(8 * 1024, stdin_raw())
} else {
BufReader::new(stdin_raw())
}))
}
}
impl Stdin {
/// Lock this handle to the standard input stream, returning a readable
/// guard.
///
/// The lock is released when the returned lock goes out of scope. The
/// returned guard also implements the `Read` and `BufRead` traits for
/// accessing the underlying data.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn lock(&self) -> StdinLock {
StdinLock { inner: self.inner.lock().unwrap() }
}
/// Locks this handle and reads a line of input into the specified buffer.
///
/// For detailed semantics of this method, see the documentation on
/// `BufRead::read_line`.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn read_line(&mut self, buf: &mut String) -> io::Result<usize> {
self.lock().read_line(buf)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Read for Stdin {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.lock().read(buf)
}
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.lock().read_to_end(buf)
}
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
self.lock().read_to_string(buf)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Read for StdinLock<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> BufRead for StdinLock<'a> {
fn fill_buf(&mut self) -> io::Result<&[u8]> { self.inner.fill_buf() }
fn consume(&mut self, n: usize) { self.inner.consume(n) }
}
// As with stdin on windows, stdout often can't handle writes of large
// sizes. For an example, see #14940. For this reason, don't try to
// write the entire output buffer on windows. On unix we can just
// write the whole buffer all at once.
//
// For some other references, it appears that this problem has been
// encountered by others [1] [2]. We choose the number 8KB just because
// libuv does the same.
//
// [1]: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232
// [2]: http://www.mail-archive.com/log4net-dev@logging.apache.org/msg00661.html
#[cfg(windows)]
const OUT_MAX: usize = 8192;
#[cfg(unix)]
const OUT_MAX: usize = ::usize::MAX;
/// A handle to the global standard output stream of the current process.
///
/// Each handle shares a global buffer of data to be written to the standard
/// output stream. Access is also synchronized via a lock and explicit control
/// over locking is available via the `lock` method.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Stdout {
// FIXME: this should be LineWriter or BufWriter depending on the state of
// stdout (tty or not). Note that if this is not line buffered it
// should also flush-on-panic or some form of flush-on-abort.
inner: Arc<Mutex<LineWriter<StdoutRaw>>>,
}
/// A locked reference to the a `Stdout` handle.
///
/// This handle implements the `Write` trait and is constructed via the `lock`
/// method on `Stdout`.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct StdoutLock<'a> {
inner: MutexGuard<'a, LineWriter<StdoutRaw>>,
}
/// Constructs a new reference to the standard output of the current process.
///
/// Each handle returned is a reference to a shared global buffer whose access
/// is synchronized via a mutex. Explicit control over synchronization is
/// provided via the `lock` method.
///
/// The returned handle implements the `Write` trait.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stdout() -> Stdout {
static INSTANCE: Lazy<Mutex<LineWriter<StdoutRaw>>> = lazy_init!(stdout_init);
return Stdout {
inner: INSTANCE.get().expect("cannot access stdout during shutdown"),
};
fn stdout_init() -> Arc<Mutex<LineWriter<StdoutRaw>>> {
Arc::new(Mutex::new(LineWriter::new(stdout_raw())))
}
}
impl Stdout {
/// Lock this handle to the standard output stream, returning a writable
/// guard.
///
/// The lock is released when the returned lock goes out of scope. The
/// returned guard also implements the `Write` trait for writing data.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn lock(&self) -> StdoutLock {
StdoutLock { inner: self.inner.lock().unwrap() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for Stdout {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.lock().write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.lock().flush()
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.lock().write_all(buf)
}
fn write_fmt(&mut self, fmt: fmt::Arguments) -> io::Result<()> {
self.lock().write_fmt(fmt)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Write for StdoutLock<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(&buf[..cmp::min(buf.len(), OUT_MAX)])
}
fn flush(&mut self) -> io::Result<()> { self.inner.flush() }
}
/// A handle to the standard error stream of a process.
///
/// For more information, see `stderr`
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Stderr {
inner: Arc<Mutex<StderrRaw>>,
}
/// A locked reference to the a `Stderr` handle.
///
/// This handle implements the `Write` trait and is constructed via the `lock`
/// method on `Stderr`.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct StderrLock<'a> {
inner: MutexGuard<'a, StderrRaw>,
}
/// Constructs a new reference to the standard error stream of a process.
///
/// Each returned handle is synchronized amongst all other handles created from
/// this function. No handles are buffered, however.
///
/// The returned handle implements the `Write` trait.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stderr() -> Stderr {
static INSTANCE: Lazy<Mutex<StderrRaw>> = lazy_init!(stderr_init);
return Stderr {
inner: INSTANCE.get().expect("cannot access stderr during shutdown"),
};
fn stderr_init() -> Arc<Mutex<StderrRaw>> {
Arc::new(Mutex::new(stderr_raw()))
}
}
impl Stderr {
/// Lock this handle to the standard error stream, returning a writable
/// guard.
///
/// The lock is released when the returned lock goes out of scope. The
/// returned guard also implements the `Write` trait for writing data.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn lock(&self) -> StderrLock {
StderrLock { inner: self.inner.lock().unwrap() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for Stderr {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.lock().write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.lock().flush()
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.lock().write_all(buf)
}
fn write_fmt(&mut self, fmt: fmt::Arguments) -> io::Result<()> {
self.lock().write_fmt(fmt)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Write for StderrLock<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(&buf[..cmp::min(buf.len(), OUT_MAX)])
}
fn flush(&mut self) -> io::Result<()> { self.inner.flush() }
}
/// Resets the task-local stderr handle to the specified writer
///
/// This will replace the current task's stderr handle, returning the old
/// handle. All future calls to `panic!` and friends will emit their output to
/// this specified handle.
///
/// Note that this does not need to be called for all new tasks; the default
/// output handle is to the process's stderr stream.
#[unstable(feature = "set_stdio",
reason = "this function may disappear completely or be replaced \
with a more general mechanism")]
#[doc(hidden)]
pub fn set_panic(sink: Box<Write + Send>) -> Option<Box<Write + Send>> {
use panicking::LOCAL_STDERR;
use mem;
LOCAL_STDERR.with(move |slot| {
mem::replace(&mut *slot.borrow_mut(), Some(sink))
}).and_then(|mut s| {
let _ = s.flush();
Some(s)
})
}
/// Resets the task-local stdout handle to the specified writer
///
/// This will replace the current task's stdout handle, returning the old
/// handle. All future calls to `print!` and friends will emit their output to
/// this specified handle.
///
/// Note that this does not need to be called for all new tasks; the default
/// output handle is to the process's stdout stream.
#[unstable(feature = "set_stdio",
reason = "this function may disappear completely or be replaced \
with a more general mechanism")]
#[doc(hidden)]
pub fn set_print(sink: Box<Write + Send>) -> Option<Box<Write + Send>> {
use mem;
LOCAL_STDOUT.with(move |slot| {
mem::replace(&mut *slot.borrow_mut(), Some(sink))
}).and_then(|mut s| {
let _ = s.flush();
Some(s)
})
}
#[unstable(feature = "print",
reason = "implementation detail which may disappear or be replaced at any time")]
#[doc(hidden)]
pub fn _print(args: fmt::Arguments) {
if let Err(e) = LOCAL_STDOUT.with(|s| match s.borrow_mut().as_mut() {
Some(w) => w.write_fmt(args),
None => stdout().write_fmt(args)
}) {
panic!("failed printing to stdout: {}", e);
}
}
Rollup merge of #23468 - sfackler:stdio-panic, r=alexcrichton
Nothing inside of the read/write interface itself can panic, so any
poison must have been the result of user code which the lock isn't
protecting.
This seems safe to me, but if we don't want to go this route we should update the docs to indicate that these methods can panic.
r? @alexcrichton
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use io::prelude::*;
use cell::RefCell;
use cmp;
use fmt;
use io::lazy::Lazy;
use io::{self, BufReader, LineWriter};
use sync::{Arc, Mutex, MutexGuard};
use sys::stdio;
/// Stdout used by print! and println! macroses
thread_local! {
static LOCAL_STDOUT: RefCell<Option<Box<Write + Send>>> = {
RefCell::new(None)
}
}
/// A handle to a raw instance of the standard input stream of this process.
///
/// This handle is not synchronized or buffered in any fashion. Constructed via
/// the `std::io::stdio::stdin_raw` function.
struct StdinRaw(stdio::Stdin);
/// A handle to a raw instance of the standard output stream of this process.
///
/// This handle is not synchronized or buffered in any fashion. Constructed via
/// the `std::io::stdio::stdout_raw` function.
struct StdoutRaw(stdio::Stdout);
/// A handle to a raw instance of the standard output stream of this process.
///
/// This handle is not synchronized or buffered in any fashion. Constructed via
/// the `std::io::stdio::stderr_raw` function.
struct StderrRaw(stdio::Stderr);
/// Construct a new raw handle to the standard input of this process.
///
/// The returned handle does not interact with any other handles created nor
/// handles returned by `std::io::stdin`. Data buffered by the `std::io::stdin`
/// handles is **not** available to raw handles returned from this function.
///
/// The returned handle has no external synchronization or buffering.
fn stdin_raw() -> StdinRaw { StdinRaw(stdio::Stdin::new()) }
/// Construct a new raw handle to the standard input stream of this process.
///
/// The returned handle does not interact with any other handles created nor
/// handles returned by `std::io::stdout`. Note that data is buffered by the
/// `std::io::stdin` handles so writes which happen via this raw handle may
/// appear before previous writes.
///
/// The returned handle has no external synchronization or buffering layered on
/// top.
fn stdout_raw() -> StdoutRaw { StdoutRaw(stdio::Stdout::new()) }
/// Construct a new raw handle to the standard input stream of this process.
///
/// The returned handle does not interact with any other handles created nor
/// handles returned by `std::io::stdout`.
///
/// The returned handle has no external synchronization or buffering layered on
/// top.
fn stderr_raw() -> StderrRaw { StderrRaw(stdio::Stderr::new()) }
impl Read for StdinRaw {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) }
}
impl Write for StdoutRaw {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) }
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
impl Write for StderrRaw {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) }
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
/// A handle to the standard input stream of a process.
///
/// Each handle is a shared reference to a global buffer of input data to this
/// process. A handle can be `lock`'d to gain full access to `BufRead` methods
/// (e.g. `.lines()`). Writes to this handle are otherwise locked with respect
/// to other writes.
///
/// This handle implements the `Read` trait, but beware that concurrent reads
/// of `Stdin` must be executed with care.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Stdin {
inner: Arc<Mutex<BufReader<StdinRaw>>>,
}
/// A locked reference to the a `Stdin` handle.
///
/// This handle implements both the `Read` and `BufRead` traits and is
/// constructed via the `lock` method on `Stdin`.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct StdinLock<'a> {
inner: MutexGuard<'a, BufReader<StdinRaw>>,
}
/// Create a new handle to the global standard input stream of this process.
///
/// The handle returned refers to a globally shared buffer between all threads.
/// Access is synchronized and can be explicitly controlled with the `lock()`
/// method.
///
/// The `Read` trait is implemented for the returned value but the `BufRead`
/// trait is not due to the global nature of the standard input stream. The
/// locked version, `StdinLock`, implements both `Read` and `BufRead`, however.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stdin() -> Stdin {
static INSTANCE: Lazy<Mutex<BufReader<StdinRaw>>> = lazy_init!(stdin_init);
return Stdin {
inner: INSTANCE.get().expect("cannot access stdin during shutdown"),
};
fn stdin_init() -> Arc<Mutex<BufReader<StdinRaw>>> {
// The default buffer capacity is 64k, but apparently windows
// doesn't like 64k reads on stdin. See #13304 for details, but the
// idea is that on windows we use a slightly smaller buffer that's
// been seen to be acceptable.
Arc::new(Mutex::new(if cfg!(windows) {
BufReader::with_capacity(8 * 1024, stdin_raw())
} else {
BufReader::new(stdin_raw())
}))
}
}
impl Stdin {
/// Lock this handle to the standard input stream, returning a readable
/// guard.
///
/// The lock is released when the returned lock goes out of scope. The
/// returned guard also implements the `Read` and `BufRead` traits for
/// accessing the underlying data.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn lock(&self) -> StdinLock {
StdinLock { inner: self.inner.lock().unwrap_or_else(|e| e.into_inner()) }
}
/// Locks this handle and reads a line of input into the specified buffer.
///
/// For detailed semantics of this method, see the documentation on
/// `BufRead::read_line`.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn read_line(&mut self, buf: &mut String) -> io::Result<usize> {
self.lock().read_line(buf)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Read for Stdin {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.lock().read(buf)
}
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.lock().read_to_end(buf)
}
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
self.lock().read_to_string(buf)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Read for StdinLock<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> BufRead for StdinLock<'a> {
fn fill_buf(&mut self) -> io::Result<&[u8]> { self.inner.fill_buf() }
fn consume(&mut self, n: usize) { self.inner.consume(n) }
}
// As with stdin on windows, stdout often can't handle writes of large
// sizes. For an example, see #14940. For this reason, don't try to
// write the entire output buffer on windows. On unix we can just
// write the whole buffer all at once.
//
// For some other references, it appears that this problem has been
// encountered by others [1] [2]. We choose the number 8KB just because
// libuv does the same.
//
// [1]: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232
// [2]: http://www.mail-archive.com/log4net-dev@logging.apache.org/msg00661.html
#[cfg(windows)]
const OUT_MAX: usize = 8192;
#[cfg(unix)]
const OUT_MAX: usize = ::usize::MAX;
/// A handle to the global standard output stream of the current process.
///
/// Each handle shares a global buffer of data to be written to the standard
/// output stream. Access is also synchronized via a lock and explicit control
/// over locking is available via the `lock` method.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Stdout {
// FIXME: this should be LineWriter or BufWriter depending on the state of
// stdout (tty or not). Note that if this is not line buffered it
// should also flush-on-panic or some form of flush-on-abort.
inner: Arc<Mutex<LineWriter<StdoutRaw>>>,
}
/// A locked reference to the a `Stdout` handle.
///
/// This handle implements the `Write` trait and is constructed via the `lock`
/// method on `Stdout`.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct StdoutLock<'a> {
inner: MutexGuard<'a, LineWriter<StdoutRaw>>,
}
/// Constructs a new reference to the standard output of the current process.
///
/// Each handle returned is a reference to a shared global buffer whose access
/// is synchronized via a mutex. Explicit control over synchronization is
/// provided via the `lock` method.
///
/// The returned handle implements the `Write` trait.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stdout() -> Stdout {
static INSTANCE: Lazy<Mutex<LineWriter<StdoutRaw>>> = lazy_init!(stdout_init);
return Stdout {
inner: INSTANCE.get().expect("cannot access stdout during shutdown"),
};
fn stdout_init() -> Arc<Mutex<LineWriter<StdoutRaw>>> {
Arc::new(Mutex::new(LineWriter::new(stdout_raw())))
}
}
impl Stdout {
/// Lock this handle to the standard output stream, returning a writable
/// guard.
///
/// The lock is released when the returned lock goes out of scope. The
/// returned guard also implements the `Write` trait for writing data.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn lock(&self) -> StdoutLock {
StdoutLock { inner: self.inner.lock().unwrap_or_else(|e| e.into_inner()) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for Stdout {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.lock().write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.lock().flush()
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.lock().write_all(buf)
}
fn write_fmt(&mut self, fmt: fmt::Arguments) -> io::Result<()> {
self.lock().write_fmt(fmt)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Write for StdoutLock<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(&buf[..cmp::min(buf.len(), OUT_MAX)])
}
fn flush(&mut self) -> io::Result<()> { self.inner.flush() }
}
/// A handle to the standard error stream of a process.
///
/// For more information, see `stderr`
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Stderr {
inner: Arc<Mutex<StderrRaw>>,
}
/// A locked reference to the a `Stderr` handle.
///
/// This handle implements the `Write` trait and is constructed via the `lock`
/// method on `Stderr`.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct StderrLock<'a> {
inner: MutexGuard<'a, StderrRaw>,
}
/// Constructs a new reference to the standard error stream of a process.
///
/// Each returned handle is synchronized amongst all other handles created from
/// this function. No handles are buffered, however.
///
/// The returned handle implements the `Write` trait.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stderr() -> Stderr {
static INSTANCE: Lazy<Mutex<StderrRaw>> = lazy_init!(stderr_init);
return Stderr {
inner: INSTANCE.get().expect("cannot access stderr during shutdown"),
};
fn stderr_init() -> Arc<Mutex<StderrRaw>> {
Arc::new(Mutex::new(stderr_raw()))
}
}
impl Stderr {
/// Lock this handle to the standard error stream, returning a writable
/// guard.
///
/// The lock is released when the returned lock goes out of scope. The
/// returned guard also implements the `Write` trait for writing data.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn lock(&self) -> StderrLock {
StderrLock { inner: self.inner.lock().unwrap_or_else(|e| e.into_inner()) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for Stderr {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.lock().write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.lock().flush()
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.lock().write_all(buf)
}
fn write_fmt(&mut self, fmt: fmt::Arguments) -> io::Result<()> {
self.lock().write_fmt(fmt)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Write for StderrLock<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(&buf[..cmp::min(buf.len(), OUT_MAX)])
}
fn flush(&mut self) -> io::Result<()> { self.inner.flush() }
}
/// Resets the task-local stderr handle to the specified writer
///
/// This will replace the current task's stderr handle, returning the old
/// handle. All future calls to `panic!` and friends will emit their output to
/// this specified handle.
///
/// Note that this does not need to be called for all new tasks; the default
/// output handle is to the process's stderr stream.
#[unstable(feature = "set_stdio",
reason = "this function may disappear completely or be replaced \
with a more general mechanism")]
#[doc(hidden)]
pub fn set_panic(sink: Box<Write + Send>) -> Option<Box<Write + Send>> {
use panicking::LOCAL_STDERR;
use mem;
LOCAL_STDERR.with(move |slot| {
mem::replace(&mut *slot.borrow_mut(), Some(sink))
}).and_then(|mut s| {
let _ = s.flush();
Some(s)
})
}
/// Resets the task-local stdout handle to the specified writer
///
/// This will replace the current task's stdout handle, returning the old
/// handle. All future calls to `print!` and friends will emit their output to
/// this specified handle.
///
/// Note that this does not need to be called for all new tasks; the default
/// output handle is to the process's stdout stream.
#[unstable(feature = "set_stdio",
reason = "this function may disappear completely or be replaced \
with a more general mechanism")]
#[doc(hidden)]
pub fn set_print(sink: Box<Write + Send>) -> Option<Box<Write + Send>> {
use mem;
LOCAL_STDOUT.with(move |slot| {
mem::replace(&mut *slot.borrow_mut(), Some(sink))
}).and_then(|mut s| {
let _ = s.flush();
Some(s)
})
}
#[unstable(feature = "print",
reason = "implementation detail which may disappear or be replaced at any time")]
#[doc(hidden)]
pub fn _print(args: fmt::Arguments) {
if let Err(e) = LOCAL_STDOUT.with(|s| match s.borrow_mut().as_mut() {
Some(w) => w.write_fmt(args),
None => stdout().write_fmt(args)
}) {
panic!("failed printing to stdout: {}", e);
}
}
#[cfg(test)]
mod test {
use thread;
use super::*;
#[test]
fn panic_doesnt_poison() {
thread::spawn(|| {
let _a = stdin();
let _a = _a.lock();
let _a = stdout();
let _a = _a.lock();
let _a = stderr();
let _a = _a.lock();
panic!();
}).join().unwrap_err();
let _a = stdin();
let _a = _a.lock();
let _a = stdout();
let _a = _a.lock();
let _a = stderr();
let _a = _a.lock();
}
}
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Defines a [`BufferBuilder`](crate::array::BufferBuilder) capable
//! of creating a [`Buffer`](crate::buffer::Buffer) which can be used
//! as an internal buffer in an [`ArrayData`](crate::array::ArrayData)
//! object.
use std::any::Any;
use std::collections::HashMap;
use std::io::Write;
use std::marker::PhantomData;
use std::mem;
use std::sync::Arc;
use crate::array::*;
use crate::buffer::{Buffer, MutableBuffer};
use crate::datatypes::*;
use crate::error::{ArrowError, Result};
use crate::util::bit_util;
/// Converts a `MutableBuffer` to a `BufferBuilder<T>`.
///
/// `slots` is the number of array slots currently represented in the `MutableBuffer`.
pub(crate) fn mutable_buffer_to_builder<T: ArrowPrimitiveType>(
mutable_buffer: MutableBuffer,
slots: usize,
) -> BufferBuilder<T> {
BufferBuilder::<T> {
buffer: mutable_buffer,
len: slots,
_marker: PhantomData,
}
}
/// Converts a `BufferBuilder<T>` into it's underlying `MutableBuffer`.
///
/// `From` is not implemented because associated type bounds are unstable.
pub(crate) fn builder_to_mutable_buffer<T: ArrowPrimitiveType>(
builder: BufferBuilder<T>,
) -> MutableBuffer {
builder.buffer
}
/// Builder for creating a [`Buffer`](crate::buffer::Buffer) object.
///
/// This builder is implemented for primitive types and creates a
/// buffer with a zero-copy `build()` method.
///
/// See trait [`BufferBuilderTrait`](crate::array::BufferBuilderTrait)
/// for further documentation and examples.
///
/// A [`Buffer`](crate::buffer::Buffer) is the underlying data
/// structure of Arrow's [`Arrays`](crate::array::Array).
///
/// For all supported types, there are type definitions for the
/// generic version of `BufferBuilder<T>`, e.g. `UInt8BufferBuilder`.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// # fn main() -> arrow::error::Result<()> {
/// let mut builder = UInt8BufferBuilder::new(100);
/// builder.append_slice(&[42, 43, 44]);
/// builder.append(45);
/// let buffer = builder.finish();
///
/// assert_eq!(unsafe { buffer.typed_data::<u8>() }, &[42, 43, 44, 45]);
/// # Ok(())
/// # }
/// ```
pub struct BufferBuilder<T: ArrowPrimitiveType> {
buffer: MutableBuffer,
len: usize,
_marker: PhantomData<T>,
}
/// Trait for simplifying the construction of [`Buffers`](crate::buffer::Buffer).
///
/// This trait is used mainly to offer separate implementations for
/// numeric types and boolean types, while still be able to call methods on buffer builder
/// with generic primitive type.
/// Separate implementations of this trait allow to add implementation-details,
/// e.g. the implementation for boolean types uses bit-packing.
pub trait BufferBuilderTrait<T: ArrowPrimitiveType> {
/// Creates a new builder with initial capacity for _at least_ `capacity`
/// elements of type `T`.
///
/// The capacity can later be manually adjusted with the
/// [`reserve()`](BufferBuilderTrait::reserve) method.
/// Also the
/// [`append()`](BufferBuilderTrait::append),
/// [`append_slice()`](BufferBuilderTrait::append_slice) and
/// [`advance()`](BufferBuilderTrait::advance)
/// methods automatically increase the capacity if needed.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
///
/// assert!(builder.capacity() >= 10);
/// ```
fn new(capacity: usize) -> Self;
/// Returns the current number of array elements in the internal buffer.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
/// builder.append(42);
///
/// assert_eq!(builder.len(), 1);
/// ```
fn len(&self) -> usize;
/// Returns the actual capacity (number of elements) of the internal buffer.
///
/// Note: the internal capacity returned by this method might be larger than
/// what you'd expect after setting the capacity in the `new()` or `reserve()`
/// functions.
fn capacity(&self) -> usize;
/// Increases the number of elements in the internal buffer by `n`
/// and resizes the buffer as needed.
///
/// The values of the newly added elements are undefined.
/// This method is usually used when appending `NULL` values to the buffer
/// as they still require physical memory space.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
/// builder.advance(2);
///
/// assert_eq!(builder.len(), 2);
/// ```
fn advance(&mut self, n: usize) -> Result<()>;
/// Reserves memory for _at least_ `n` more elements of type `T`.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
/// builder.reserve(10);
///
/// assert!(builder.capacity() >= 20);
/// ```
fn reserve(&mut self, n: usize) -> Result<()>;
/// Appends a value of type `T` into the builder,
/// growing the internal buffer as needed.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
/// builder.append(42);
///
/// assert_eq!(builder.len(), 1);
/// ```
fn append(&mut self, value: T::Native) -> Result<()>;
/// Appends a value of type `T` into the builder N times,
/// growing the internal buffer as needed.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
/// builder.append_n(10, 42);
///
/// assert_eq!(builder.len(), 10);
/// ```
fn append_n(&mut self, n: usize, value: T::Native) -> Result<()>;
/// Appends a slice of type `T`, growing the internal buffer as needed.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
/// builder.append_slice(&[42, 44, 46]);
///
/// assert_eq!(builder.len(), 3);
/// ```
fn append_slice(&mut self, slice: &[T::Native]) -> Result<()>;
/// Resets this builder and returns an immutable [`Buffer`](crate::buffer::Buffer).
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
/// builder.append_slice(&[42, 44, 46]);
///
/// let buffer = builder.finish();
///
/// assert_eq!(unsafe { buffer.typed_data::<u8>() }, &[42, 44, 46]);
/// ```
fn finish(&mut self) -> Buffer;
}
impl<T: ArrowPrimitiveType> BufferBuilderTrait<T> for BufferBuilder<T> {
default fn new(capacity: usize) -> Self {
let buffer = MutableBuffer::new(capacity * mem::size_of::<T::Native>());
Self {
buffer,
len: 0,
_marker: PhantomData,
}
}
fn len(&self) -> usize {
self.len
}
fn capacity(&self) -> usize {
let bit_capacity = self.buffer.capacity() * 8;
bit_capacity / T::get_bit_width()
}
default fn advance(&mut self, i: usize) -> Result<()> {
let new_buffer_len = (self.len + i) * mem::size_of::<T::Native>();
self.buffer.resize(new_buffer_len)?;
self.len += i;
Ok(())
}
default fn reserve(&mut self, n: usize) -> Result<()> {
let new_capacity = self.len + n;
let byte_capacity = mem::size_of::<T::Native>() * new_capacity;
self.buffer.reserve(byte_capacity)?;
Ok(())
}
default fn append(&mut self, v: T::Native) -> Result<()> {
self.reserve(1)?;
self.write_bytes(v.to_byte_slice(), 1)
}
default fn append_n(&mut self, n: usize, v: T::Native) -> Result<()> {
self.reserve(n)?;
for _ in 0..n {
self.write_bytes(v.to_byte_slice(), 1)?;
}
Ok(())
}
default fn append_slice(&mut self, slice: &[T::Native]) -> Result<()> {
let array_slots = slice.len();
self.reserve(array_slots)?;
self.write_bytes(slice.to_byte_slice(), array_slots)
}
default fn finish(&mut self) -> Buffer {
let buf = std::mem::replace(&mut self.buffer, MutableBuffer::new(0));
self.len = 0;
buf.freeze()
}
}
impl<T: ArrowPrimitiveType> BufferBuilder<T> {
/// Writes a byte slice to the underlying buffer and updates the `len`, i.e. the
/// number array elements in the builder. Also, converts the `io::Result`
/// required by the `Write` trait to the Arrow `Result` type.
fn write_bytes(&mut self, bytes: &[u8], len_added: usize) -> Result<()> {
let write_result = self.buffer.write(bytes);
// `io::Result` has many options one of which we use, so pattern matching is
// overkill here
if write_result.is_err() {
Err(ArrowError::MemoryError(
"Could not write to Buffer, not big enough".to_string(),
))
} else {
self.len += len_added;
Ok(())
}
}
}
impl BufferBuilderTrait<BooleanType> for BufferBuilder<BooleanType> {
fn new(capacity: usize) -> Self {
let byte_capacity = bit_util::ceil(capacity, 8);
let actual_capacity = bit_util::round_upto_multiple_of_64(byte_capacity);
let mut buffer = MutableBuffer::new(actual_capacity);
buffer.set_null_bits(0, actual_capacity);
Self {
buffer,
len: 0,
_marker: PhantomData,
}
}
fn advance(&mut self, i: usize) -> Result<()> {
let new_buffer_len = bit_util::ceil(self.len + i, 8);
self.buffer.resize(new_buffer_len)?;
self.len += i;
Ok(())
}
fn append(&mut self, v: bool) -> Result<()> {
self.reserve(1)?;
if v {
// For performance the `len` of the buffer is not updated on each append but
// is updated in the `freeze` method instead.
unsafe {
bit_util::set_bit_raw(self.buffer.raw_data_mut(), self.len);
}
}
self.len += 1;
Ok(())
}
fn append_n(&mut self, n: usize, v: bool) -> Result<()> {
self.reserve(n)?;
if v {
unsafe {
bit_util::set_bits_raw(self.buffer.raw_data_mut(), self.len, self.len + n)
}
}
self.len += n;
Ok(())
}
fn append_slice(&mut self, slice: &[bool]) -> Result<()> {
self.reserve(slice.len())?;
for v in slice {
if *v {
// For performance the `len` of the buffer is not
// updated on each append but is updated in the
// `freeze` method instead.
unsafe {
bit_util::set_bit_raw(self.buffer.raw_data_mut(), self.len);
}
}
self.len += 1;
}
Ok(())
}
fn reserve(&mut self, n: usize) -> Result<()> {
let new_capacity = self.len + n;
if new_capacity > self.capacity() {
let new_byte_capacity = bit_util::ceil(new_capacity, 8);
let existing_capacity = self.buffer.capacity();
let new_capacity = self.buffer.reserve(new_byte_capacity)?;
self.buffer
.set_null_bits(existing_capacity, new_capacity - existing_capacity);
}
Ok(())
}
fn finish(&mut self) -> Buffer {
// `append` does not update the buffer's `len` so do it before `freeze` is called.
let new_buffer_len = bit_util::ceil(self.len, 8);
debug_assert!(new_buffer_len >= self.buffer.len());
let mut buf = std::mem::replace(&mut self.buffer, MutableBuffer::new(0));
self.len = 0;
buf.resize(new_buffer_len).unwrap();
buf.freeze()
}
}
/// Trait for dealing with different array builders at runtime
pub trait ArrayBuilder: Any {
/// Returns the number of array slots in the builder
fn len(&self) -> usize;
/// Builds the array
fn finish(&mut self) -> ArrayRef;
/// Returns the builder as a non-mutable `Any` reference.
///
/// This is most useful when one wants to call non-mutable APIs on a specific builder
/// type. In this case, one can first cast this into a `Any`, and then use
/// `downcast_ref` to get a reference on the specific builder.
fn as_any(&self) -> &Any;
/// Returns the builder as a mutable `Any` reference.
///
/// This is most useful when one wants to call mutable APIs on a specific builder
/// type. In this case, one can first cast this into a `Any`, and then use
/// `downcast_mut` to get a reference on the specific builder.
fn as_any_mut(&mut self) -> &mut Any;
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any>;
}
/// Array builder for fixed-width primitive types
pub struct PrimitiveBuilder<T: ArrowPrimitiveType> {
values_builder: BufferBuilder<T>,
bitmap_builder: BooleanBufferBuilder,
}
impl<T: ArrowPrimitiveType> ArrayBuilder for PrimitiveBuilder<T> {
/// Returns the builder as a non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as a mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.values_builder.len
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl<T: ArrowPrimitiveType> PrimitiveBuilder<T> {
/// Creates a new primitive array builder
pub fn new(capacity: usize) -> Self {
Self {
values_builder: BufferBuilder::<T>::new(capacity),
bitmap_builder: BooleanBufferBuilder::new(capacity),
}
}
/// Returns the capacity of this builder measured in slots of type `T`
pub fn capacity(&self) -> usize {
self.values_builder.capacity()
}
/// Appends a value of type `T` into the builder
pub fn append_value(&mut self, v: T::Native) -> Result<()> {
self.bitmap_builder.append(true)?;
self.values_builder.append(v)?;
Ok(())
}
/// Appends a null slot into the builder
pub fn append_null(&mut self) -> Result<()> {
self.bitmap_builder.append(false)?;
self.values_builder.advance(1)?;
Ok(())
}
/// Appends an `Option<T>` into the builder
pub fn append_option(&mut self, v: Option<T::Native>) -> Result<()> {
match v {
None => self.append_null()?,
Some(v) => self.append_value(v)?,
};
Ok(())
}
/// Appends a slice of type `T` into the builder
pub fn append_slice(&mut self, v: &[T::Native]) -> Result<()> {
self.bitmap_builder.append_n(v.len(), true)?;
self.values_builder.append_slice(v)?;
Ok(())
}
/// Builds the `PrimitiveArray` and reset this builder.
pub fn finish(&mut self) -> PrimitiveArray<T> {
let len = self.len();
let null_bit_buffer = self.bitmap_builder.finish();
let null_count = len - bit_util::count_set_bits(null_bit_buffer.data());
let mut builder = ArrayData::builder(T::get_data_type())
.len(len)
.add_buffer(self.values_builder.finish());
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(null_bit_buffer);
}
let data = builder.build();
PrimitiveArray::<T>::from(data)
}
/// Builds the `DictionaryArray` and reset this builder.
pub fn finish_dict(&mut self, values: ArrayRef) -> DictionaryArray<T> {
let len = self.len();
let null_bit_buffer = self.bitmap_builder.finish();
let null_count = len - bit_util::count_set_bits(null_bit_buffer.data());
let data_type = DataType::Dictionary(
Box::new(T::get_data_type()),
Box::new(values.data_type().clone()),
);
let mut builder = ArrayData::builder(data_type)
.len(len)
.add_buffer(self.values_builder.finish());
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(null_bit_buffer);
}
builder = builder.add_child_data(values.data());
DictionaryArray::<T>::from(builder.build())
}
}
/// Array builder for `ListArray`
pub struct ListBuilder<T: ArrayBuilder> {
offsets_builder: Int32BufferBuilder,
bitmap_builder: BooleanBufferBuilder,
values_builder: T,
len: usize,
}
impl<T: ArrayBuilder> ListBuilder<T> {
/// Creates a new `ListArrayBuilder` from a given values array builder
pub fn new(values_builder: T) -> Self {
let capacity = values_builder.len();
Self::with_capacity(values_builder, capacity)
}
/// Creates a new `ListArrayBuilder` from a given values array builder
/// `capacity` is the number of items to pre-allocate space for in this builder
pub fn with_capacity(values_builder: T, capacity: usize) -> Self {
let mut offsets_builder = Int32BufferBuilder::new(capacity + 1);
offsets_builder.append(0).unwrap();
Self {
offsets_builder,
bitmap_builder: BooleanBufferBuilder::new(capacity),
values_builder,
len: 0,
}
}
}
impl<T: ArrayBuilder> ArrayBuilder for ListBuilder<T>
where
T: 'static,
{
/// Returns the builder as a non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as a mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.len
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl<T: ArrayBuilder> ListBuilder<T>
where
T: 'static,
{
/// Returns the child array builder as a mutable reference.
///
/// This mutable reference can be used to append values into the child array builder,
/// but you must call `append` to delimit each distinct list value.
pub fn values(&mut self) -> &mut T {
&mut self.values_builder
}
/// Finish the current variable-length list array slot
pub fn append(&mut self, is_valid: bool) -> Result<()> {
self.offsets_builder
.append(self.values_builder.len() as i32)?;
self.bitmap_builder.append(is_valid)?;
self.len += 1;
Ok(())
}
/// Builds the `ListArray` and reset this builder.
pub fn finish(&mut self) -> ListArray {
let len = self.len();
self.len = 0;
let values_arr = self
.values_builder
.as_any_mut()
.downcast_mut::<T>()
.unwrap()
.finish();
let values_data = values_arr.data();
let offset_buffer = self.offsets_builder.finish();
let null_bit_buffer = self.bitmap_builder.finish();
self.offsets_builder.append(0).unwrap();
let data =
ArrayData::builder(DataType::List(Box::new(values_data.data_type().clone())))
.len(len)
.null_count(len - bit_util::count_set_bits(null_bit_buffer.data()))
.add_buffer(offset_buffer)
.add_child_data(values_data)
.null_bit_buffer(null_bit_buffer)
.build();
ListArray::from(data)
}
}
/// Array builder for `ListArray`
pub struct FixedSizeListBuilder<T: ArrayBuilder> {
bitmap_builder: BooleanBufferBuilder,
values_builder: T,
len: usize,
list_len: i32,
}
impl<T: ArrayBuilder> FixedSizeListBuilder<T> {
/// Creates a new `FixedSizeListBuilder` from a given values array builder
/// `length` is the number of values within each array
pub fn new(values_builder: T, length: i32) -> Self {
let capacity = values_builder.len();
Self::with_capacity(values_builder, length, capacity)
}
/// Creates a new `FixedSizeListBuilder` from a given values array builder
/// `length` is the number of values within each array
/// `capacity` is the number of items to pre-allocate space for in this builder
pub fn with_capacity(values_builder: T, length: i32, capacity: usize) -> Self {
let mut offsets_builder = Int32BufferBuilder::new(capacity + 1);
offsets_builder.append(0).unwrap();
Self {
bitmap_builder: BooleanBufferBuilder::new(capacity),
values_builder,
len: 0,
list_len: length,
}
}
}
impl<T: ArrayBuilder> ArrayBuilder for FixedSizeListBuilder<T>
where
T: 'static,
{
/// Returns the builder as a non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as a mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.len
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl<T: ArrayBuilder> FixedSizeListBuilder<T>
where
T: 'static,
{
/// Returns the child array builder as a mutable reference.
///
/// This mutable reference can be used to append values into the child array builder,
/// but you must call `append` to delimit each distinct list value.
pub fn values(&mut self) -> &mut T {
&mut self.values_builder
}
pub fn value_length(&self) -> i32 {
self.list_len
}
/// Finish the current variable-length list array slot
pub fn append(&mut self, is_valid: bool) -> Result<()> {
self.bitmap_builder.append(is_valid)?;
self.len += 1;
Ok(())
}
/// Builds the `FixedSizeListBuilder` and reset this builder.
pub fn finish(&mut self) -> FixedSizeListArray {
let len = self.len();
self.len = 0;
let values_arr = self
.values_builder
.as_any_mut()
.downcast_mut::<T>()
.unwrap()
.finish();
let values_data = values_arr.data();
// check that values_data length is multiple of len if we have data
if len != 0 {
assert!(
values_data.len() / len == self.list_len as usize,
"Values of FixedSizeList must have equal lengths, values have length {} and list has {}",
values_data.len(),
len
);
}
let null_bit_buffer = self.bitmap_builder.finish();
let data = ArrayData::builder(DataType::FixedSizeList(
Box::new(values_data.data_type().clone()),
self.list_len,
))
.len(len)
.null_count(len - bit_util::count_set_bits(null_bit_buffer.data()))
.add_child_data(values_data)
.null_bit_buffer(null_bit_buffer)
.build();
FixedSizeListArray::from(data)
}
}
/// Array builder for `BinaryArray`
pub struct BinaryBuilder {
builder: ListBuilder<UInt8Builder>,
}
pub struct StringBuilder {
builder: ListBuilder<UInt8Builder>,
}
pub struct FixedSizeBinaryBuilder {
builder: FixedSizeListBuilder<UInt8Builder>,
}
pub trait BinaryArrayBuilder: ArrayBuilder {}
impl BinaryArrayBuilder for BinaryBuilder {}
impl BinaryArrayBuilder for StringBuilder {}
impl BinaryArrayBuilder for FixedSizeBinaryBuilder {}
impl ArrayBuilder for BinaryBuilder {
/// Returns the builder as a non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as a mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.builder.len()
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl ArrayBuilder for StringBuilder {
/// Returns the builder as a non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as a mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.builder.len()
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl ArrayBuilder for FixedSizeBinaryBuilder {
/// Returns the builder as a non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as a mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.builder.len()
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl BinaryBuilder {
/// Creates a new `BinaryBuilder`, `capacity` is the number of bytes in the values
/// array
pub fn new(capacity: usize) -> Self {
let values_builder = UInt8Builder::new(capacity);
Self {
builder: ListBuilder::new(values_builder),
}
}
/// Appends a single byte value into the builder's values array.
///
/// Note, when appending individual byte values you must call `append` to delimit each
/// distinct list value.
pub fn append_byte(&mut self, value: u8) -> Result<()> {
self.builder.values().append_value(value)?;
Ok(())
}
/// Appends a byte slice into the builder.
///
/// Automatically calls the `append` method to delimit the slice appended in as a
/// distinct array element.
pub fn append_value(&mut self, value: &[u8]) -> Result<()> {
self.builder.values().append_slice(value)?;
self.builder.append(true)?;
Ok(())
}
/// Finish the current variable-length list array slot.
pub fn append(&mut self, is_valid: bool) -> Result<()> {
self.builder.append(is_valid)
}
/// Append a null value to the array.
pub fn append_null(&mut self) -> Result<()> {
self.append(false)
}
/// Builds the `BinaryArray` and reset this builder.
pub fn finish(&mut self) -> BinaryArray {
BinaryArray::from(self.builder.finish())
}
}
impl StringBuilder {
/// Creates a new `StringBuilder`,
/// `capacity` is the number of bytes of string data to pre-allocate space for in this builder
pub fn new(capacity: usize) -> Self {
let values_builder = UInt8Builder::new(capacity);
Self {
builder: ListBuilder::new(values_builder),
}
}
/// Creates a new `StringBuilder`,
/// `data_capacity` is the number of bytes of string data to pre-allocate space for in this builder
/// `item_capacity` is the number of items to pre-allocate space for in this builder
pub fn with_capacity(item_capacity: usize, data_capacity: usize) -> Self {
let values_builder = UInt8Builder::new(data_capacity);
Self {
builder: ListBuilder::with_capacity(values_builder, item_capacity),
}
}
/// Appends a string into the builder.
///
/// Automatically calls the `append` method to delimit the string appended in as a
/// distinct array element.
pub fn append_value(&mut self, value: &str) -> Result<()> {
self.builder.values().append_slice(value.as_bytes())?;
self.builder.append(true)?;
Ok(())
}
/// Finish the current variable-length list array slot.
pub fn append(&mut self, is_valid: bool) -> Result<()> {
self.builder.append(is_valid)
}
/// Append a null value to the array.
pub fn append_null(&mut self) -> Result<()> {
self.append(false)
}
/// Builds the `StringArray` and reset this builder.
pub fn finish(&mut self) -> StringArray {
StringArray::from(self.builder.finish())
}
}
impl FixedSizeBinaryBuilder {
/// Creates a new `BinaryBuilder`, `capacity` is the number of bytes in the values
/// array
pub fn new(capacity: usize, byte_width: i32) -> Self {
let values_builder = UInt8Builder::new(capacity);
Self {
builder: FixedSizeListBuilder::new(values_builder, byte_width),
}
}
/// Appends a byte slice into the builder.
///
/// Automatically calls the `append` method to delimit the slice appended in as a
/// distinct array element.
pub fn append_value(&mut self, value: &[u8]) -> Result<()> {
assert_eq!(
self.builder.value_length(),
value.len() as i32,
"Byte slice does not have the same length as FixedSizeBinaryBuilder value lengths"
);
self.builder.values().append_slice(value)?;
self.builder.append(true)
}
/// Append a null value to the array.
pub fn append_null(&mut self) -> Result<()> {
let length: usize = self.builder.value_length() as usize;
self.builder.values().append_slice(&vec![0u8; length][..])?;
self.builder.append(false)
}
/// Builds the `FixedSizeBinaryArray` and reset this builder.
pub fn finish(&mut self) -> FixedSizeBinaryArray {
FixedSizeBinaryArray::from(self.builder.finish())
}
}
/// Array builder for Struct types.
///
/// Note that callers should make sure that methods of all the child field builders are
/// properly called to maintain the consistency of the data structure.
pub struct StructBuilder {
fields: Vec<Field>,
field_anys: Vec<Box<Any>>,
field_builders: Vec<Box<ArrayBuilder>>,
bitmap_builder: BooleanBufferBuilder,
len: usize,
}
impl ArrayBuilder for StructBuilder {
/// Returns the number of array slots in the builder.
///
/// Note that this always return the first child field builder's length, and it is
/// the caller's responsibility to maintain the consistency that all the child field
/// builder should have the equal number of elements.
fn len(&self) -> usize {
self.len
}
/// Builds the array.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
/// Returns the builder as a non-mutable `Any` reference.
///
/// This is most useful when one wants to call non-mutable APIs on a specific builder
/// type. In this case, one can first cast this into a `Any`, and then use
/// `downcast_ref` to get a reference on the specific builder.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as a mutable `Any` reference.
///
/// This is most useful when one wants to call mutable APIs on a specific builder
/// type. In this case, one can first cast this into a `Any`, and then use
/// `downcast_mut` to get a reference on the specific builder.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
}
impl StructBuilder {
pub fn new(fields: Vec<Field>, builders: Vec<Box<ArrayBuilder>>) -> Self {
let mut field_anys = Vec::with_capacity(builders.len());
let mut field_builders = Vec::with_capacity(builders.len());
// Create and maintain two references for each of the input builder. We need the
// extra `Any` reference because we need to cast the builder to a specific type
// in `field_builder()` by calling `downcast_mut`.
for f in builders.into_iter() {
let raw_f = Box::into_raw(f);
let raw_f_copy = raw_f;
unsafe {
field_anys.push(Box::from_raw(raw_f).into_box_any());
field_builders.push(Box::from_raw(raw_f_copy));
}
}
Self {
fields,
field_anys,
field_builders,
bitmap_builder: BooleanBufferBuilder::new(0),
len: 0,
}
}
pub fn from_schema(schema: Schema, capacity: usize) -> Self {
let fields = schema.fields();
let mut builders = Vec::with_capacity(fields.len());
for f in schema.fields() {
builders.push(Self::from_field(f.clone(), capacity));
}
Self::new(schema.fields, builders)
}
fn from_field(f: Field, capacity: usize) -> Box<ArrayBuilder> {
match f.data_type() {
DataType::Null => unimplemented!(),
DataType::Boolean => Box::new(BooleanBuilder::new(capacity)),
DataType::Int8 => Box::new(Int8Builder::new(capacity)),
DataType::Int16 => Box::new(Int16Builder::new(capacity)),
DataType::Int32 => Box::new(Int32Builder::new(capacity)),
DataType::Int64 => Box::new(Int64Builder::new(capacity)),
DataType::UInt8 => Box::new(UInt8Builder::new(capacity)),
DataType::UInt16 => Box::new(UInt16Builder::new(capacity)),
DataType::UInt32 => Box::new(UInt32Builder::new(capacity)),
DataType::UInt64 => Box::new(UInt64Builder::new(capacity)),
DataType::Float32 => Box::new(Float32Builder::new(capacity)),
DataType::Float64 => Box::new(Float64Builder::new(capacity)),
DataType::Binary => Box::new(BinaryBuilder::new(capacity)),
DataType::FixedSizeBinary(len) => {
Box::new(FixedSizeBinaryBuilder::new(capacity, *len))
}
DataType::Utf8 => Box::new(StringBuilder::new(capacity)),
DataType::Date32(DateUnit::Day) => Box::new(Date32Builder::new(capacity)),
DataType::Date64(DateUnit::Millisecond) => {
Box::new(Date64Builder::new(capacity))
}
DataType::Time32(TimeUnit::Second) => {
Box::new(Time32SecondBuilder::new(capacity))
}
DataType::Time32(TimeUnit::Millisecond) => {
Box::new(Time32MillisecondBuilder::new(capacity))
}
DataType::Time64(TimeUnit::Microsecond) => {
Box::new(Time64MicrosecondBuilder::new(capacity))
}
DataType::Time64(TimeUnit::Nanosecond) => {
Box::new(Time64NanosecondBuilder::new(capacity))
}
DataType::Timestamp(TimeUnit::Second, _) => {
Box::new(TimestampSecondBuilder::new(capacity))
}
DataType::Timestamp(TimeUnit::Millisecond, _) => {
Box::new(TimestampMillisecondBuilder::new(capacity))
}
DataType::Timestamp(TimeUnit::Microsecond, _) => {
Box::new(TimestampMicrosecondBuilder::new(capacity))
}
DataType::Timestamp(TimeUnit::Nanosecond, _) => {
Box::new(TimestampNanosecondBuilder::new(capacity))
}
DataType::Interval(IntervalUnit::YearMonth) => {
Box::new(IntervalYearMonthBuilder::new(capacity))
}
DataType::Interval(IntervalUnit::DayTime) => {
Box::new(IntervalDayTimeBuilder::new(capacity))
}
DataType::Duration(TimeUnit::Second) => {
Box::new(DurationSecondBuilder::new(capacity))
}
DataType::Duration(TimeUnit::Millisecond) => {
Box::new(DurationMillisecondBuilder::new(capacity))
}
DataType::Duration(TimeUnit::Microsecond) => {
Box::new(DurationMicrosecondBuilder::new(capacity))
}
DataType::Duration(TimeUnit::Nanosecond) => {
Box::new(DurationNanosecondBuilder::new(capacity))
}
DataType::Struct(fields) => {
let schema = Schema::new(fields.clone());
Box::new(Self::from_schema(schema, capacity))
}
t => panic!("Data type {:?} is not currently supported", t),
}
}
/// Returns a mutable reference to the child field builder at index `i`.
/// Result will be `None` if the input type `T` provided doesn't match the actual
/// field builder's type.
pub fn field_builder<T: ArrayBuilder>(&mut self, i: usize) -> Option<&mut T> {
self.field_anys[i].downcast_mut::<T>()
}
/// Returns the number of fields for the struct this builder is building.
pub fn num_fields(&self) -> usize {
self.field_builders.len()
}
/// Appends an element (either null or non-null) to the struct. The actual elements
/// should be appended for each child sub-array in a consistent way.
pub fn append(&mut self, is_valid: bool) -> Result<()> {
self.bitmap_builder.append(is_valid)?;
self.len += 1;
Ok(())
}
/// Appends a null element to the struct.
pub fn append_null(&mut self) -> Result<()> {
self.append(false)
}
/// Builds the `StructArray` and reset this builder.
pub fn finish(&mut self) -> StructArray {
let mut child_data = Vec::with_capacity(self.field_builders.len());
for f in &mut self.field_builders {
let arr = f.finish();
child_data.push(arr.data());
}
let null_bit_buffer = self.bitmap_builder.finish();
let null_count = self.len - bit_util::count_set_bits(null_bit_buffer.data());
let mut builder = ArrayData::builder(DataType::Struct(self.fields.clone()))
.len(self.len)
.child_data(child_data);
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(null_bit_buffer);
}
self.len = 0;
StructArray::from(builder.build())
}
}
impl Drop for StructBuilder {
fn drop(&mut self) {
// To avoid double drop on the field array builders.
let builders = std::mem::replace(&mut self.field_builders, Vec::new());
std::mem::forget(builders);
}
}
/// Array builder for `DictionaryArray`. For example to map a set of byte indices
/// to f32 values. Note that the use of a `HashMap` here will not scale to very large
/// arrays or result in an ordered dictionary.
pub struct PrimitiveDictionaryBuilder<K, V>
where
K: ArrowPrimitiveType,
V: ArrowPrimitiveType,
{
keys_builder: PrimitiveBuilder<K>,
values_builder: PrimitiveBuilder<V>,
map: HashMap<Box<[u8]>, K::Native>,
}
impl<K, V> PrimitiveDictionaryBuilder<K, V>
where
K: ArrowPrimitiveType,
V: ArrowPrimitiveType,
{
/// Creates a new `PrimitiveDictionaryBuilder` from a keys builder and a value builder.
pub fn new(
keys_builder: PrimitiveBuilder<K>,
values_builder: PrimitiveBuilder<V>,
) -> Self {
Self {
keys_builder,
values_builder,
map: HashMap::new(),
}
}
}
impl<K, V> ArrayBuilder for PrimitiveDictionaryBuilder<K, V>
where
K: ArrowPrimitiveType,
V: ArrowPrimitiveType,
{
/// Returns the builder as an non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as an mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.keys_builder.len()
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl<K, V> PrimitiveDictionaryBuilder<K, V>
where
K: ArrowPrimitiveType,
V: ArrowPrimitiveType,
{
/// Append a primitive value to the array. Return an existing index
/// if already present in the values array or a new index if the
/// value is appended to the values array.
pub fn append(&mut self, value: V::Native) -> Result<K::Native> {
if let Some(&key) = self.map.get(value.to_byte_slice()) {
// Append existing value.
self.keys_builder.append_value(key)?;
Ok(key)
} else {
// Append new value.
let key = K::Native::from_usize(self.values_builder.len())
.ok_or(ArrowError::DictionaryKeyOverflowError)?;
self.values_builder.append_value(value)?;
self.keys_builder.append_value(key as K::Native)?;
self.map.insert(value.to_byte_slice().into(), key);
Ok(key)
}
}
pub fn append_null(&mut self) -> Result<()> {
self.keys_builder.append_null()
}
/// Builds the `DictionaryArray` and reset this builder.
pub fn finish(&mut self) -> DictionaryArray<K> {
self.map.clear();
let value_ref: ArrayRef = Arc::new(self.values_builder.finish());
self.keys_builder.finish_dict(value_ref)
}
}
/// Array builder for `DictionaryArray`. For example to map a set of byte indices
/// to f32 values. Note that the use of a `HashMap` here will not scale to very large
/// arrays or result in an ordered dictionary.
pub struct StringDictionaryBuilder<K>
where
K: ArrowDictionaryKeyType,
{
keys_builder: PrimitiveBuilder<K>,
values_builder: StringBuilder,
map: HashMap<Box<[u8]>, K::Native>,
}
impl<K> StringDictionaryBuilder<K>
where
K: ArrowDictionaryKeyType,
{
/// Creates a new `StringDictionaryBuilder` from a keys builder and a value builder.
pub fn new(keys_builder: PrimitiveBuilder<K>, values_builder: StringBuilder) -> Self {
Self {
keys_builder,
values_builder,
map: HashMap::new(),
}
}
/// Creates a new `StringDictionaryBuilder` from a keys builder and a dictionary
/// which is initialized with the given values.
/// The indices of those dictionary values are used as keys.
///
/// # Example
///
/// ```
/// use arrow::datatypes::Int16Type;
/// use arrow::array::{StringArray, StringDictionaryBuilder, PrimitiveBuilder};
/// use std::convert::TryFrom;
///
/// let dictionary_values = StringArray::try_from(vec![None, Some("abc"), Some("def")]).unwrap();
///
/// let mut builder = StringDictionaryBuilder::new_with_dictionary(PrimitiveBuilder::<Int16Type>::new(3), &dictionary_values).unwrap();
/// builder.append("def").unwrap();
/// builder.append_null().unwrap();
/// builder.append("abc").unwrap();
///
/// let dictionary_array = builder.finish();
///
/// let keys: Vec<Option<i16>> = dictionary_array.keys().collect();
///
/// assert_eq!(keys, vec![Some(2), None, Some(1)]);
/// ```
pub fn new_with_dictionary(
keys_builder: PrimitiveBuilder<K>,
dictionary_values: &StringArray,
) -> Result<Self> {
let dict_len = dictionary_values.len();
let mut values_builder =
StringBuilder::with_capacity(dict_len, dictionary_values.value_data().len());
let mut map: HashMap<Box<[u8]>, K::Native> = HashMap::with_capacity(dict_len);
for i in 0..dict_len {
if dictionary_values.is_valid(i) {
let value = dictionary_values.value(i);
map.insert(
value.as_bytes().into(),
K::Native::from_usize(i)
.ok_or(ArrowError::DictionaryKeyOverflowError)?,
);
values_builder.append_value(value)?;
} else {
values_builder.append_null()?;
}
}
Ok(Self {
keys_builder,
values_builder,
map,
})
}
}
impl<K> ArrayBuilder for StringDictionaryBuilder<K>
where
K: ArrowDictionaryKeyType,
{
/// Returns the builder as an non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as an mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.keys_builder.len()
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl<K> StringDictionaryBuilder<K>
where
K: ArrowDictionaryKeyType,
{
/// Append a primitive value to the array. Return an existing index
/// if already present in the values array or a new index if the
/// value is appended to the values array.
pub fn append(&mut self, value: &str) -> Result<K::Native> {
if let Some(&key) = self.map.get(value.as_bytes()) {
// Append existing value.
self.keys_builder.append_value(key)?;
Ok(key)
} else {
// Append new value.
let key = K::Native::from_usize(self.values_builder.len())
.ok_or(ArrowError::DictionaryKeyOverflowError)?;
self.values_builder.append_value(value)?;
self.keys_builder.append_value(key as K::Native)?;
self.map.insert(value.as_bytes().into(), key);
Ok(key)
}
}
pub fn append_null(&mut self) -> Result<()> {
self.keys_builder.append_null()
}
/// Builds the `DictionaryArray` and reset this builder.
pub fn finish(&mut self) -> DictionaryArray<K> {
self.map.clear();
let value_ref: ArrayRef = Arc::new(self.values_builder.finish());
self.keys_builder.finish_dict(value_ref)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::array::Array;
use crate::bitmap::Bitmap;
use std::convert::TryFrom;
#[test]
fn test_builder_i32_empty() {
let mut b = Int32BufferBuilder::new(5);
assert_eq!(0, b.len());
assert_eq!(16, b.capacity());
let a = b.finish();
assert_eq!(0, a.len());
}
#[test]
fn test_builder_i32_alloc_zero_bytes() {
let mut b = Int32BufferBuilder::new(0);
b.append(123).unwrap();
let a = b.finish();
assert_eq!(4, a.len());
}
#[test]
fn test_builder_i32() {
let mut b = Int32BufferBuilder::new(5);
for i in 0..5 {
b.append(i).unwrap();
}
assert_eq!(16, b.capacity());
let a = b.finish();
assert_eq!(20, a.len());
}
#[test]
fn test_builder_i32_grow_buffer() {
let mut b = Int32BufferBuilder::new(2);
assert_eq!(16, b.capacity());
for i in 0..20 {
b.append(i).unwrap();
}
assert_eq!(32, b.capacity());
let a = b.finish();
assert_eq!(80, a.len());
}
#[test]
fn test_builder_finish() {
let mut b = Int32BufferBuilder::new(5);
assert_eq!(16, b.capacity());
for i in 0..10 {
b.append(i).unwrap();
}
let mut a = b.finish();
assert_eq!(40, a.len());
assert_eq!(0, b.len());
assert_eq!(0, b.capacity());
// Try build another buffer after cleaning up.
for i in 0..20 {
b.append(i).unwrap()
}
assert_eq!(32, b.capacity());
a = b.finish();
assert_eq!(80, a.len());
}
#[test]
fn test_reserve() {
let mut b = UInt8BufferBuilder::new(2);
assert_eq!(64, b.capacity());
b.reserve(64).unwrap();
assert_eq!(64, b.capacity());
b.reserve(65).unwrap();
assert_eq!(128, b.capacity());
let mut b = Int32BufferBuilder::new(2);
assert_eq!(16, b.capacity());
b.reserve(16).unwrap();
assert_eq!(16, b.capacity());
b.reserve(17).unwrap();
assert_eq!(32, b.capacity());
}
#[test]
fn test_append_slice() {
let mut b = UInt8BufferBuilder::new(0);
b.append_slice("Hello, ".as_bytes()).unwrap();
b.append_slice("World!".as_bytes()).unwrap();
let buffer = b.finish();
assert_eq!(13, buffer.len());
let mut b = Int32BufferBuilder::new(0);
b.append_slice(&[32, 54]).unwrap();
let buffer = b.finish();
assert_eq!(8, buffer.len());
}
#[test]
fn test_write_bytes() {
let mut b = BooleanBufferBuilder::new(4);
b.append(false).unwrap();
b.append(true).unwrap();
b.append(false).unwrap();
b.append(true).unwrap();
assert_eq!(4, b.len());
assert_eq!(512, b.capacity());
let buffer = b.finish();
assert_eq!(1, buffer.len());
let mut b = BooleanBufferBuilder::new(4);
b.append_slice(&[false, true, false, true]).unwrap();
assert_eq!(4, b.len());
assert_eq!(512, b.capacity());
let buffer = b.finish();
assert_eq!(1, buffer.len());
}
#[test]
fn test_write_bytes_i32() {
let mut b = Int32BufferBuilder::new(4);
let bytes = [8, 16, 32, 64].to_byte_slice();
b.write_bytes(bytes, 4).unwrap();
assert_eq!(4, b.len());
assert_eq!(16, b.capacity());
let buffer = b.finish();
assert_eq!(16, buffer.len());
}
#[test]
#[should_panic(expected = "Could not write to Buffer, not big enough")]
fn test_write_too_many_bytes() {
let mut b = Int32BufferBuilder::new(0);
let bytes = [8, 16, 32, 64].to_byte_slice();
b.write_bytes(bytes, 4).unwrap();
}
#[test]
fn test_boolean_array_builder_append_slice() {
let arr1 =
BooleanArray::from(vec![Some(true), Some(false), None, None, Some(false)]);
let mut builder = BooleanArray::builder(0);
builder.append_slice(&[true, false]).unwrap();
builder.append_null().unwrap();
builder.append_null().unwrap();
builder.append_value(false).unwrap();
let arr2 = builder.finish();
assert_eq!(arr1.len(), arr2.len());
assert_eq!(arr1.offset(), arr2.offset());
assert_eq!(arr1.null_count(), arr2.null_count());
for i in 0..5 {
assert_eq!(arr1.is_null(i), arr2.is_null(i));
assert_eq!(arr1.is_valid(i), arr2.is_valid(i));
if arr1.is_valid(i) {
assert_eq!(arr1.value(i), arr2.value(i));
}
}
}
#[test]
fn test_boolean_builder_increases_buffer_len() {
// 00000010 01001000
let buf = Buffer::from([72_u8, 2_u8]);
let mut builder = BooleanBufferBuilder::new(8);
for i in 0..10 {
if i == 3 || i == 6 || i == 9 {
builder.append(true).unwrap();
} else {
builder.append(false).unwrap();
}
}
let buf2 = builder.finish();
assert_eq!(buf.len(), buf2.len());
assert_eq!(buf.data(), buf2.data());
}
#[test]
fn test_primitive_array_builder_i32() {
let mut builder = Int32Array::builder(5);
for i in 0..5 {
builder.append_value(i).unwrap();
}
let arr = builder.finish();
assert_eq!(5, arr.len());
assert_eq!(0, arr.offset());
assert_eq!(0, arr.null_count());
for i in 0..5 {
assert!(!arr.is_null(i));
assert!(arr.is_valid(i));
assert_eq!(i as i32, arr.value(i));
}
}
#[test]
fn test_primitive_array_builder_date32() {
let mut builder = Date32Array::builder(5);
for i in 0..5 {
builder.append_value(i).unwrap();
}
let arr = builder.finish();
assert_eq!(5, arr.len());
assert_eq!(0, arr.offset());
assert_eq!(0, arr.null_count());
for i in 0..5 {
assert!(!arr.is_null(i));
assert!(arr.is_valid(i));
assert_eq!(i as i32, arr.value(i));
}
}
#[test]
fn test_primitive_array_builder_timestamp_second() {
let mut builder = TimestampSecondArray::builder(5);
for i in 0..5 {
builder.append_value(i).unwrap();
}
let arr = builder.finish();
assert_eq!(5, arr.len());
assert_eq!(0, arr.offset());
assert_eq!(0, arr.null_count());
for i in 0..5 {
assert!(!arr.is_null(i));
assert!(arr.is_valid(i));
assert_eq!(i as i64, arr.value(i));
}
}
#[test]
fn test_primitive_array_builder_bool() {
// 00000010 01001000
let buf = Buffer::from([72_u8, 2_u8]);
let mut builder = BooleanArray::builder(10);
for i in 0..10 {
if i == 3 || i == 6 || i == 9 {
builder.append_value(true).unwrap();
} else {
builder.append_value(false).unwrap();
}
}
let arr = builder.finish();
assert_eq!(buf, arr.values());
assert_eq!(10, arr.len());
assert_eq!(0, arr.offset());
assert_eq!(0, arr.null_count());
for i in 0..10 {
assert!(!arr.is_null(i));
assert!(arr.is_valid(i));
assert_eq!(i == 3 || i == 6 || i == 9, arr.value(i), "failed at {}", i)
}
}
#[test]
fn test_primitive_array_builder_append_option() {
let arr1 = Int32Array::from(vec![Some(0), None, Some(2), None, Some(4)]);
let mut builder = Int32Array::builder(5);
builder.append_option(Some(0)).unwrap();
builder.append_option(None).unwrap();
builder.append_option(Some(2)).unwrap();
builder.append_option(None).unwrap();
builder.append_option(Some(4)).unwrap();
let arr2 = builder.finish();
assert_eq!(arr1.len(), arr2.len());
assert_eq!(arr1.offset(), arr2.offset());
assert_eq!(arr1.null_count(), arr2.null_count());
for i in 0..5 {
assert_eq!(arr1.is_null(i), arr2.is_null(i));
assert_eq!(arr1.is_valid(i), arr2.is_valid(i));
if arr1.is_valid(i) {
assert_eq!(arr1.value(i), arr2.value(i));
}
}
}
#[test]
fn test_primitive_array_builder_append_null() {
let arr1 = Int32Array::from(vec![Some(0), Some(2), None, None, Some(4)]);
let mut builder = Int32Array::builder(5);
builder.append_value(0).unwrap();
builder.append_value(2).unwrap();
builder.append_null().unwrap();
builder.append_null().unwrap();
builder.append_value(4).unwrap();
let arr2 = builder.finish();
assert_eq!(arr1.len(), arr2.len());
assert_eq!(arr1.offset(), arr2.offset());
assert_eq!(arr1.null_count(), arr2.null_count());
for i in 0..5 {
assert_eq!(arr1.is_null(i), arr2.is_null(i));
assert_eq!(arr1.is_valid(i), arr2.is_valid(i));
if arr1.is_valid(i) {
assert_eq!(arr1.value(i), arr2.value(i));
}
}
}
#[test]
fn test_primitive_array_builder_append_slice() {
let arr1 = Int32Array::from(vec![Some(0), Some(2), None, None, Some(4)]);
let mut builder = Int32Array::builder(5);
builder.append_slice(&[0, 2]).unwrap();
builder.append_null().unwrap();
builder.append_null().unwrap();
builder.append_value(4).unwrap();
let arr2 = builder.finish();
assert_eq!(arr1.len(), arr2.len());
assert_eq!(arr1.offset(), arr2.offset());
assert_eq!(arr1.null_count(), arr2.null_count());
for i in 0..5 {
assert_eq!(arr1.is_null(i), arr2.is_null(i));
assert_eq!(arr1.is_valid(i), arr2.is_valid(i));
if arr1.is_valid(i) {
assert_eq!(arr1.value(i), arr2.value(i));
}
}
}
#[test]
fn test_primitive_array_builder_finish() {
let mut builder = Int32Builder::new(5);
builder.append_slice(&[2, 4, 6, 8]).unwrap();
let mut arr = builder.finish();
assert_eq!(4, arr.len());
assert_eq!(0, builder.len());
builder.append_slice(&[1, 3, 5, 7, 9]).unwrap();
arr = builder.finish();
assert_eq!(5, arr.len());
assert_eq!(0, builder.len());
}
#[test]
fn test_list_array_builder() {
let values_builder = Int32Builder::new(10);
let mut builder = ListBuilder::new(values_builder);
// [[0, 1, 2], [3, 4, 5], [6, 7]]
builder.values().append_value(0).unwrap();
builder.values().append_value(1).unwrap();
builder.values().append_value(2).unwrap();
builder.append(true).unwrap();
builder.values().append_value(3).unwrap();
builder.values().append_value(4).unwrap();
builder.values().append_value(5).unwrap();
builder.append(true).unwrap();
builder.values().append_value(6).unwrap();
builder.values().append_value(7).unwrap();
builder.append(true).unwrap();
let list_array = builder.finish();
let values = list_array.values().data().buffers()[0].clone();
assert_eq!(
Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7].to_byte_slice()),
values
);
assert_eq!(
Buffer::from(&[0, 3, 6, 8].to_byte_slice()),
list_array.data().buffers()[0].clone()
);
assert_eq!(DataType::Int32, list_array.value_type());
assert_eq!(3, list_array.len());
assert_eq!(0, list_array.null_count());
assert_eq!(6, list_array.value_offset(2));
assert_eq!(2, list_array.value_length(2));
for i in 0..3 {
assert!(list_array.is_valid(i));
assert!(!list_array.is_null(i));
}
}
#[test]
fn test_list_array_builder_nulls() {
let values_builder = Int32Builder::new(10);
let mut builder = ListBuilder::new(values_builder);
// [[0, 1, 2], null, [3, null, 5], [6, 7]]
builder.values().append_value(0).unwrap();
builder.values().append_value(1).unwrap();
builder.values().append_value(2).unwrap();
builder.append(true).unwrap();
builder.append(false).unwrap();
builder.values().append_value(3).unwrap();
builder.values().append_null().unwrap();
builder.values().append_value(5).unwrap();
builder.append(true).unwrap();
builder.values().append_value(6).unwrap();
builder.values().append_value(7).unwrap();
builder.append(true).unwrap();
let list_array = builder.finish();
assert_eq!(DataType::Int32, list_array.value_type());
assert_eq!(4, list_array.len());
assert_eq!(1, list_array.null_count());
assert_eq!(3, list_array.value_offset(2));
assert_eq!(3, list_array.value_length(2));
}
#[test]
fn test_fixed_size_list_array_builder() {
let values_builder = Int32Builder::new(10);
let mut builder = FixedSizeListBuilder::new(values_builder, 3);
// [[0, 1, 2], null, [3, null, 5], [6, 7, null]]
builder.values().append_value(0).unwrap();
builder.values().append_value(1).unwrap();
builder.values().append_value(2).unwrap();
builder.append(true).unwrap();
builder.values().append_null().unwrap();
builder.values().append_null().unwrap();
builder.values().append_null().unwrap();
builder.append(false).unwrap();
builder.values().append_value(3).unwrap();
builder.values().append_null().unwrap();
builder.values().append_value(5).unwrap();
builder.append(true).unwrap();
builder.values().append_value(6).unwrap();
builder.values().append_value(7).unwrap();
builder.values().append_null().unwrap();
builder.append(true).unwrap();
let list_array = builder.finish();
assert_eq!(DataType::Int32, list_array.value_type());
assert_eq!(4, list_array.len());
assert_eq!(1, list_array.null_count());
assert_eq!(6, list_array.value_offset(2));
assert_eq!(3, list_array.value_length());
}
#[test]
fn test_list_array_builder_finish() {
let values_builder = Int32Array::builder(5);
let mut builder = ListBuilder::new(values_builder);
builder.values().append_slice(&[1, 2, 3]).unwrap();
builder.append(true).unwrap();
builder.values().append_slice(&[4, 5, 6]).unwrap();
builder.append(true).unwrap();
let mut arr = builder.finish();
assert_eq!(2, arr.len());
assert_eq!(0, builder.len());
builder.values().append_slice(&[7, 8, 9]).unwrap();
builder.append(true).unwrap();
arr = builder.finish();
assert_eq!(1, arr.len());
assert_eq!(0, builder.len());
}
#[test]
fn test_fixed_size_list_array_builder_empty() {
let values_builder = Int32Array::builder(5);
let mut builder = FixedSizeListBuilder::new(values_builder, 3);
let arr = builder.finish();
assert_eq!(0, arr.len());
assert_eq!(0, builder.len());
}
#[test]
fn test_fixed_size_list_array_builder_finish() {
let values_builder = Int32Array::builder(5);
let mut builder = FixedSizeListBuilder::new(values_builder, 3);
builder.values().append_slice(&[1, 2, 3]).unwrap();
builder.append(true).unwrap();
builder.values().append_slice(&[4, 5, 6]).unwrap();
builder.append(true).unwrap();
let mut arr = builder.finish();
assert_eq!(2, arr.len());
assert_eq!(0, builder.len());
builder.values().append_slice(&[7, 8, 9]).unwrap();
builder.append(true).unwrap();
arr = builder.finish();
assert_eq!(1, arr.len());
assert_eq!(0, builder.len());
}
#[test]
fn test_list_list_array_builder() {
let primitive_builder = Int32Builder::new(10);
let values_builder = ListBuilder::new(primitive_builder);
let mut builder = ListBuilder::new(values_builder);
// [[[1, 2], [3, 4]], [[5, 6, 7], null, [8]], null, [[9, 10]]]
builder.values().values().append_value(1).unwrap();
builder.values().values().append_value(2).unwrap();
builder.values().append(true).unwrap();
builder.values().values().append_value(3).unwrap();
builder.values().values().append_value(4).unwrap();
builder.values().append(true).unwrap();
builder.append(true).unwrap();
builder.values().values().append_value(5).unwrap();
builder.values().values().append_value(6).unwrap();
builder.values().values().append_value(7).unwrap();
builder.values().append(true).unwrap();
builder.values().append(false).unwrap();
builder.values().values().append_value(8).unwrap();
builder.values().append(true).unwrap();
builder.append(true).unwrap();
builder.append(false).unwrap();
builder.values().values().append_value(9).unwrap();
builder.values().values().append_value(10).unwrap();
builder.values().append(true).unwrap();
builder.append(true).unwrap();
let list_array = builder.finish();
assert_eq!(4, list_array.len());
assert_eq!(1, list_array.null_count());
assert_eq!(
Buffer::from(&[0, 2, 5, 5, 6].to_byte_slice()),
list_array.data().buffers()[0].clone()
);
assert_eq!(6, list_array.values().data().len());
assert_eq!(1, list_array.values().data().null_count());
assert_eq!(
Buffer::from(&[0, 2, 4, 7, 7, 8, 10].to_byte_slice()),
list_array.values().data().buffers()[0].clone()
);
assert_eq!(10, list_array.values().data().child_data()[0].len());
assert_eq!(0, list_array.values().data().child_data()[0].null_count());
assert_eq!(
Buffer::from(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10].to_byte_slice()),
list_array.values().data().child_data()[0].buffers()[0].clone()
);
}
#[test]
fn test_binary_array_builder() {
let mut builder = BinaryBuilder::new(20);
builder.append_byte(b'h').unwrap();
builder.append_byte(b'e').unwrap();
builder.append_byte(b'l').unwrap();
builder.append_byte(b'l').unwrap();
builder.append_byte(b'o').unwrap();
builder.append(true).unwrap();
builder.append(true).unwrap();
builder.append_byte(b'w').unwrap();
builder.append_byte(b'o').unwrap();
builder.append_byte(b'r').unwrap();
builder.append_byte(b'l').unwrap();
builder.append_byte(b'd').unwrap();
builder.append(true).unwrap();
let array = builder.finish();
let binary_array = BinaryArray::from(array);
assert_eq!(3, binary_array.len());
assert_eq!(0, binary_array.null_count());
assert_eq!([b'h', b'e', b'l', b'l', b'o'], binary_array.value(0));
assert_eq!([] as [u8; 0], binary_array.value(1));
assert_eq!([b'w', b'o', b'r', b'l', b'd'], binary_array.value(2));
assert_eq!(5, binary_array.value_offset(2));
assert_eq!(5, binary_array.value_length(2));
}
#[test]
fn test_string_array_builder() {
let mut builder = StringBuilder::new(20);
builder.append_value("hello").unwrap();
builder.append(true).unwrap();
builder.append_value("world").unwrap();
let array = builder.finish();
let string_array = StringArray::from(array);
assert_eq!(3, string_array.len());
assert_eq!(0, string_array.null_count());
assert_eq!("hello", string_array.value(0));
assert_eq!("", string_array.value(1));
assert_eq!("world", string_array.value(2));
assert_eq!(5, string_array.value_offset(2));
assert_eq!(5, string_array.value_length(2));
}
#[test]
fn test_fixed_size_binary_builder() {
let mut builder = FixedSizeBinaryBuilder::new(15, 5);
// [b"hello", null, "arrow"]
builder.append_value(b"hello").unwrap();
builder.append_null().unwrap();
builder.append_value(b"arrow").unwrap();
let fixed_size_binary_array: FixedSizeBinaryArray = builder.finish();
assert_eq!(
&DataType::FixedSizeBinary(5),
fixed_size_binary_array.data_type()
);
assert_eq!(3, fixed_size_binary_array.len());
assert_eq!(1, fixed_size_binary_array.null_count());
assert_eq!(10, fixed_size_binary_array.value_offset(2));
assert_eq!(5, fixed_size_binary_array.value_length());
}
#[test]
fn test_string_array_builder_finish() {
let mut builder = StringBuilder::new(10);
builder.append_value("hello").unwrap();
builder.append_value("world").unwrap();
let mut arr = builder.finish();
assert_eq!(2, arr.len());
assert_eq!(0, builder.len());
builder.append_value("arrow").unwrap();
arr = builder.finish();
assert_eq!(1, arr.len());
assert_eq!(0, builder.len());
}
#[test]
fn test_string_array_builder_append_string() {
let mut builder = StringBuilder::new(20);
let var = "hello".to_owned();
builder.append_value(&var).unwrap();
builder.append(true).unwrap();
builder.append_value("world").unwrap();
let array = builder.finish();
let string_array = StringArray::from(array);
assert_eq!(3, string_array.len());
assert_eq!(0, string_array.null_count());
assert_eq!("hello", string_array.value(0));
assert_eq!("", string_array.value(1));
assert_eq!("world", string_array.value(2));
assert_eq!(5, string_array.value_offset(2));
assert_eq!(5, string_array.value_length(2));
}
#[test]
fn test_struct_array_builder() {
let string_builder = StringBuilder::new(4);
let int_builder = Int32Builder::new(4);
let mut fields = Vec::new();
let mut field_builders = Vec::new();
fields.push(Field::new("f1", DataType::Utf8, false));
field_builders.push(Box::new(string_builder) as Box<ArrayBuilder>);
fields.push(Field::new("f2", DataType::Int32, false));
field_builders.push(Box::new(int_builder) as Box<ArrayBuilder>);
let mut builder = StructBuilder::new(fields, field_builders);
assert_eq!(2, builder.num_fields());
let string_builder = builder
.field_builder::<StringBuilder>(0)
.expect("builder at field 0 should be string builder");
string_builder.append_value("joe").unwrap();
string_builder.append_null().unwrap();
string_builder.append_null().unwrap();
string_builder.append_value("mark").unwrap();
let int_builder = builder
.field_builder::<Int32Builder>(1)
.expect("builder at field 1 should be int builder");
int_builder.append_value(1).unwrap();
int_builder.append_value(2).unwrap();
int_builder.append_null().unwrap();
int_builder.append_value(4).unwrap();
builder.append(true).unwrap();
builder.append(true).unwrap();
builder.append_null().unwrap();
builder.append(true).unwrap();
let arr = builder.finish();
let struct_data = arr.data();
assert_eq!(4, struct_data.len());
assert_eq!(1, struct_data.null_count());
assert_eq!(
&Some(Bitmap::from(Buffer::from(&[11_u8]))),
struct_data.null_bitmap()
);
let expected_string_data = ArrayData::builder(DataType::Utf8)
.len(4)
.null_count(2)
.null_bit_buffer(Buffer::from(&[9_u8]))
.add_buffer(Buffer::from(&[0, 3, 3, 3, 7].to_byte_slice()))
.add_buffer(Buffer::from("joemark".as_bytes()))
.build();
let expected_int_data = ArrayData::builder(DataType::Int32)
.len(4)
.null_count(1)
.null_bit_buffer(Buffer::from(&[11_u8]))
.add_buffer(Buffer::from(&[1, 2, 0, 4].to_byte_slice()))
.build();
assert_eq!(expected_string_data, arr.column(0).data());
// TODO: implement equality for ArrayData
assert_eq!(expected_int_data.len(), arr.column(1).data().len());
assert_eq!(
expected_int_data.null_count(),
arr.column(1).data().null_count()
);
assert_eq!(
expected_int_data.null_bitmap(),
arr.column(1).data().null_bitmap()
);
let expected_value_buf = expected_int_data.buffers()[0].clone();
let actual_value_buf = arr.column(1).data().buffers()[0].clone();
for i in 0..expected_int_data.len() {
if !expected_int_data.is_null(i) {
assert_eq!(
expected_value_buf.data()[i * 4..(i + 1) * 4],
actual_value_buf.data()[i * 4..(i + 1) * 4]
);
}
}
}
#[test]
fn test_struct_array_builder_finish() {
let int_builder = Int32Builder::new(10);
let bool_builder = BooleanBuilder::new(10);
let mut fields = Vec::new();
let mut field_builders = Vec::new();
fields.push(Field::new("f1", DataType::Int32, false));
field_builders.push(Box::new(int_builder) as Box<ArrayBuilder>);
fields.push(Field::new("f2", DataType::Boolean, false));
field_builders.push(Box::new(bool_builder) as Box<ArrayBuilder>);
let mut builder = StructBuilder::new(fields, field_builders);
builder
.field_builder::<Int32Builder>(0)
.unwrap()
.append_slice(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
.unwrap();
builder
.field_builder::<BooleanBuilder>(1)
.unwrap()
.append_slice(&[
false, true, false, true, false, true, false, true, false, true,
])
.unwrap();
// Append slot values - all are valid.
for _ in 0..10 {
assert!(builder.append(true).is_ok())
}
assert_eq!(10, builder.len());
let arr = builder.finish();
assert_eq!(10, arr.len());
assert_eq!(0, builder.len());
builder
.field_builder::<Int32Builder>(0)
.unwrap()
.append_slice(&[1, 3, 5, 7, 9])
.unwrap();
builder
.field_builder::<BooleanBuilder>(1)
.unwrap()
.append_slice(&[false, true, false, true, false])
.unwrap();
// Append slot values - all are valid.
for _ in 0..5 {
assert!(builder.append(true).is_ok())
}
assert_eq!(5, builder.len());
let arr = builder.finish();
assert_eq!(5, arr.len());
assert_eq!(0, builder.len());
}
#[test]
fn test_struct_array_builder_from_schema() {
let mut fields = Vec::new();
fields.push(Field::new("f1", DataType::Float32, false));
fields.push(Field::new("f2", DataType::Utf8, false));
let mut sub_fields = Vec::new();
sub_fields.push(Field::new("g1", DataType::Int32, false));
sub_fields.push(Field::new("g2", DataType::Boolean, false));
let struct_type = DataType::Struct(sub_fields);
fields.push(Field::new("f3", struct_type, false));
let mut builder = StructBuilder::from_schema(Schema::new(fields), 5);
assert_eq!(3, builder.num_fields());
assert!(builder.field_builder::<Float32Builder>(0).is_some());
assert!(builder.field_builder::<StringBuilder>(1).is_some());
assert!(builder.field_builder::<StructBuilder>(2).is_some());
}
#[test]
#[should_panic(expected = "Data type List(Int64) is not currently supported")]
fn test_struct_array_builder_from_schema_unsupported_type() {
let mut fields = Vec::new();
fields.push(Field::new("f1", DataType::Int16, false));
let list_type = DataType::List(Box::new(DataType::Int64));
fields.push(Field::new("f2", list_type, false));
let _ = StructBuilder::from_schema(Schema::new(fields), 5);
}
#[test]
fn test_struct_array_builder_field_builder_type_mismatch() {
let int_builder = Int32Builder::new(10);
let mut fields = Vec::new();
let mut field_builders = Vec::new();
fields.push(Field::new("f1", DataType::Int32, false));
field_builders.push(Box::new(int_builder) as Box<ArrayBuilder>);
let mut builder = StructBuilder::new(fields, field_builders);
assert!(builder.field_builder::<BinaryBuilder>(0).is_none());
}
#[test]
fn test_primitive_dictionary_builder() {
let key_builder = PrimitiveBuilder::<UInt8Type>::new(3);
let value_builder = PrimitiveBuilder::<UInt32Type>::new(2);
let mut builder = PrimitiveDictionaryBuilder::new(key_builder, value_builder);
builder.append(12345678).unwrap();
builder.append_null().unwrap();
builder.append(22345678).unwrap();
let array = builder.finish();
// Keys are strongly typed.
let aks: Vec<_> = array.keys().collect();
// Values are polymorphic and so require a downcast.
let av = array.values();
let ava: &UInt32Array = av.as_any().downcast_ref::<UInt32Array>().unwrap();
let avs: &[u32] = ava.value_slice(0, array.values().len());
assert_eq!(array.is_null(0), false);
assert_eq!(array.is_null(1), true);
assert_eq!(array.is_null(2), false);
assert_eq!(aks, vec![Some(0), None, Some(1)]);
assert_eq!(avs, &[12345678, 22345678]);
}
#[test]
fn test_string_dictionary_builder() {
let key_builder = PrimitiveBuilder::<Int8Type>::new(5);
let value_builder = StringBuilder::new(2);
let mut builder = StringDictionaryBuilder::new(key_builder, value_builder);
builder.append("abc").unwrap();
builder.append_null().unwrap();
builder.append("def").unwrap();
builder.append("def").unwrap();
builder.append("abc").unwrap();
let array = builder.finish();
// Keys are strongly typed.
let aks: Vec<_> = array.keys().collect();
// Values are polymorphic and so require a downcast.
let av = array.values();
let ava: &StringArray = av.as_any().downcast_ref::<StringArray>().unwrap();
assert_eq!(aks, vec![Some(0), None, Some(1), Some(1), Some(0)]);
assert_eq!(ava.value(0), "abc");
assert_eq!(ava.value(1), "def");
}
#[test]
fn test_string_dictionary_builder_with_existing_dictionary() {
let dictionary =
StringArray::try_from(vec![None, Some("def"), Some("abc")]).unwrap();
let key_builder = PrimitiveBuilder::<Int8Type>::new(6);
let mut builder =
StringDictionaryBuilder::new_with_dictionary(key_builder, &dictionary)
.unwrap();
builder.append("abc").unwrap();
builder.append_null().unwrap();
builder.append("def").unwrap();
builder.append("def").unwrap();
builder.append("abc").unwrap();
builder.append("ghi").unwrap();
let array = builder.finish();
// Keys are strongly typed.
let aks: Vec<_> = array.keys().collect();
// Values are polymorphic and so require a downcast.
let av = array.values();
let ava: &StringArray = av.as_any().downcast_ref::<StringArray>().unwrap();
assert_eq!(aks, vec![Some(2), None, Some(1), Some(1), Some(2), Some(3)]);
assert_eq!(ava.is_valid(0), false);
assert_eq!(ava.value(1), "def");
assert_eq!(ava.value(2), "abc");
assert_eq!(ava.value(3), "ghi");
}
#[test]
fn test_string_dictionary_builder_with_reserved_null_value() {
let dictionary = StringArray::try_from(vec![None]).unwrap();
let key_builder = PrimitiveBuilder::<Int16Type>::new(4);
let mut builder =
StringDictionaryBuilder::new_with_dictionary(key_builder, &dictionary)
.unwrap();
builder.append("abc").unwrap();
builder.append_null().unwrap();
builder.append("def").unwrap();
builder.append("abc").unwrap();
let array = builder.finish();
assert_eq!(array.is_null(1), true);
assert_eq!(array.is_valid(1), false);
let keys: Int16Array = array.data().into();
assert_eq!(keys.value(0), 1);
assert_eq!(keys.is_null(1), true);
// zero initialization is currently guaranteed by Buffer allocation and resizing
assert_eq!(keys.value(1), 0);
assert_eq!(keys.value(2), 2);
assert_eq!(keys.value(3), 1);
}
#[test]
fn test_primitive_dictionary_overflow() {
let key_builder = PrimitiveBuilder::<UInt8Type>::new(257);
let value_builder = PrimitiveBuilder::<UInt32Type>::new(257);
let mut builder = PrimitiveDictionaryBuilder::new(key_builder, value_builder);
// 256 unique keys.
for i in 0..256 {
builder.append(i + 1000).unwrap();
}
// Special error if the key overflows (256th entry)
assert_eq!(
builder.append(1257),
Err(ArrowError::DictionaryKeyOverflowError)
);
}
}
ARROW-9047: [Rust] Fix a segfault when setting zero bits in a zero-length bitset.
If the mutable bitset is allocated with zero elements it'll have an
address that, when accessed, segfaults. Even if the number of bits (n)
set is zero, it still reads from this invalid address and things crash.
Closes #7360 from maxburke/rust_empty_bitset_fix
Authored-by: Max Burke <0706025b2bbcec1ed8d64822f4eccd96314938d0@urbanlogiq.com>
Signed-off-by: Neville Dipale <4e1ab218b24f06847d563ff3773c7517c139f863@gmail.com>
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Defines a [`BufferBuilder`](crate::array::BufferBuilder) capable
//! of creating a [`Buffer`](crate::buffer::Buffer) which can be used
//! as an internal buffer in an [`ArrayData`](crate::array::ArrayData)
//! object.
use std::any::Any;
use std::collections::HashMap;
use std::io::Write;
use std::marker::PhantomData;
use std::mem;
use std::sync::Arc;
use crate::array::*;
use crate::buffer::{Buffer, MutableBuffer};
use crate::datatypes::*;
use crate::error::{ArrowError, Result};
use crate::util::bit_util;
/// Converts a `MutableBuffer` to a `BufferBuilder<T>`.
///
/// `slots` is the number of array slots currently represented in the `MutableBuffer`.
pub(crate) fn mutable_buffer_to_builder<T: ArrowPrimitiveType>(
mutable_buffer: MutableBuffer,
slots: usize,
) -> BufferBuilder<T> {
BufferBuilder::<T> {
buffer: mutable_buffer,
len: slots,
_marker: PhantomData,
}
}
/// Converts a `BufferBuilder<T>` into it's underlying `MutableBuffer`.
///
/// `From` is not implemented because associated type bounds are unstable.
pub(crate) fn builder_to_mutable_buffer<T: ArrowPrimitiveType>(
builder: BufferBuilder<T>,
) -> MutableBuffer {
builder.buffer
}
/// Builder for creating a [`Buffer`](crate::buffer::Buffer) object.
///
/// This builder is implemented for primitive types and creates a
/// buffer with a zero-copy `build()` method.
///
/// See trait [`BufferBuilderTrait`](crate::array::BufferBuilderTrait)
/// for further documentation and examples.
///
/// A [`Buffer`](crate::buffer::Buffer) is the underlying data
/// structure of Arrow's [`Arrays`](crate::array::Array).
///
/// For all supported types, there are type definitions for the
/// generic version of `BufferBuilder<T>`, e.g. `UInt8BufferBuilder`.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// # fn main() -> arrow::error::Result<()> {
/// let mut builder = UInt8BufferBuilder::new(100);
/// builder.append_slice(&[42, 43, 44]);
/// builder.append(45);
/// let buffer = builder.finish();
///
/// assert_eq!(unsafe { buffer.typed_data::<u8>() }, &[42, 43, 44, 45]);
/// # Ok(())
/// # }
/// ```
pub struct BufferBuilder<T: ArrowPrimitiveType> {
buffer: MutableBuffer,
len: usize,
_marker: PhantomData<T>,
}
/// Trait for simplifying the construction of [`Buffers`](crate::buffer::Buffer).
///
/// This trait is used mainly to offer separate implementations for
/// numeric types and boolean types, while still be able to call methods on buffer builder
/// with generic primitive type.
/// Separate implementations of this trait allow to add implementation-details,
/// e.g. the implementation for boolean types uses bit-packing.
pub trait BufferBuilderTrait<T: ArrowPrimitiveType> {
/// Creates a new builder with initial capacity for _at least_ `capacity`
/// elements of type `T`.
///
/// The capacity can later be manually adjusted with the
/// [`reserve()`](BufferBuilderTrait::reserve) method.
/// Also the
/// [`append()`](BufferBuilderTrait::append),
/// [`append_slice()`](BufferBuilderTrait::append_slice) and
/// [`advance()`](BufferBuilderTrait::advance)
/// methods automatically increase the capacity if needed.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
///
/// assert!(builder.capacity() >= 10);
/// ```
fn new(capacity: usize) -> Self;
/// Returns the current number of array elements in the internal buffer.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
/// builder.append(42);
///
/// assert_eq!(builder.len(), 1);
/// ```
fn len(&self) -> usize;
/// Returns the actual capacity (number of elements) of the internal buffer.
///
/// Note: the internal capacity returned by this method might be larger than
/// what you'd expect after setting the capacity in the `new()` or `reserve()`
/// functions.
fn capacity(&self) -> usize;
/// Increases the number of elements in the internal buffer by `n`
/// and resizes the buffer as needed.
///
/// The values of the newly added elements are undefined.
/// This method is usually used when appending `NULL` values to the buffer
/// as they still require physical memory space.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
/// builder.advance(2);
///
/// assert_eq!(builder.len(), 2);
/// ```
fn advance(&mut self, n: usize) -> Result<()>;
/// Reserves memory for _at least_ `n` more elements of type `T`.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
/// builder.reserve(10);
///
/// assert!(builder.capacity() >= 20);
/// ```
fn reserve(&mut self, n: usize) -> Result<()>;
/// Appends a value of type `T` into the builder,
/// growing the internal buffer as needed.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
/// builder.append(42);
///
/// assert_eq!(builder.len(), 1);
/// ```
fn append(&mut self, value: T::Native) -> Result<()>;
/// Appends a value of type `T` into the builder N times,
/// growing the internal buffer as needed.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
/// builder.append_n(10, 42);
///
/// assert_eq!(builder.len(), 10);
/// ```
fn append_n(&mut self, n: usize, value: T::Native) -> Result<()>;
/// Appends a slice of type `T`, growing the internal buffer as needed.
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
/// builder.append_slice(&[42, 44, 46]);
///
/// assert_eq!(builder.len(), 3);
/// ```
fn append_slice(&mut self, slice: &[T::Native]) -> Result<()>;
/// Resets this builder and returns an immutable [`Buffer`](crate::buffer::Buffer).
///
/// # Example:
///
/// ```
/// use arrow::array::{UInt8BufferBuilder, BufferBuilderTrait};
///
/// let mut builder = UInt8BufferBuilder::new(10);
/// builder.append_slice(&[42, 44, 46]);
///
/// let buffer = builder.finish();
///
/// assert_eq!(unsafe { buffer.typed_data::<u8>() }, &[42, 44, 46]);
/// ```
fn finish(&mut self) -> Buffer;
}
impl<T: ArrowPrimitiveType> BufferBuilderTrait<T> for BufferBuilder<T> {
default fn new(capacity: usize) -> Self {
let buffer = MutableBuffer::new(capacity * mem::size_of::<T::Native>());
Self {
buffer,
len: 0,
_marker: PhantomData,
}
}
fn len(&self) -> usize {
self.len
}
fn capacity(&self) -> usize {
let bit_capacity = self.buffer.capacity() * 8;
bit_capacity / T::get_bit_width()
}
default fn advance(&mut self, i: usize) -> Result<()> {
let new_buffer_len = (self.len + i) * mem::size_of::<T::Native>();
self.buffer.resize(new_buffer_len)?;
self.len += i;
Ok(())
}
default fn reserve(&mut self, n: usize) -> Result<()> {
let new_capacity = self.len + n;
let byte_capacity = mem::size_of::<T::Native>() * new_capacity;
self.buffer.reserve(byte_capacity)?;
Ok(())
}
default fn append(&mut self, v: T::Native) -> Result<()> {
self.reserve(1)?;
self.write_bytes(v.to_byte_slice(), 1)
}
default fn append_n(&mut self, n: usize, v: T::Native) -> Result<()> {
self.reserve(n)?;
for _ in 0..n {
self.write_bytes(v.to_byte_slice(), 1)?;
}
Ok(())
}
default fn append_slice(&mut self, slice: &[T::Native]) -> Result<()> {
let array_slots = slice.len();
self.reserve(array_slots)?;
self.write_bytes(slice.to_byte_slice(), array_slots)
}
default fn finish(&mut self) -> Buffer {
let buf = std::mem::replace(&mut self.buffer, MutableBuffer::new(0));
self.len = 0;
buf.freeze()
}
}
impl<T: ArrowPrimitiveType> BufferBuilder<T> {
/// Writes a byte slice to the underlying buffer and updates the `len`, i.e. the
/// number array elements in the builder. Also, converts the `io::Result`
/// required by the `Write` trait to the Arrow `Result` type.
fn write_bytes(&mut self, bytes: &[u8], len_added: usize) -> Result<()> {
let write_result = self.buffer.write(bytes);
// `io::Result` has many options one of which we use, so pattern matching is
// overkill here
if write_result.is_err() {
Err(ArrowError::MemoryError(
"Could not write to Buffer, not big enough".to_string(),
))
} else {
self.len += len_added;
Ok(())
}
}
}
impl BufferBuilderTrait<BooleanType> for BufferBuilder<BooleanType> {
fn new(capacity: usize) -> Self {
let byte_capacity = bit_util::ceil(capacity, 8);
let actual_capacity = bit_util::round_upto_multiple_of_64(byte_capacity);
let mut buffer = MutableBuffer::new(actual_capacity);
buffer.set_null_bits(0, actual_capacity);
Self {
buffer,
len: 0,
_marker: PhantomData,
}
}
fn advance(&mut self, i: usize) -> Result<()> {
let new_buffer_len = bit_util::ceil(self.len + i, 8);
self.buffer.resize(new_buffer_len)?;
self.len += i;
Ok(())
}
fn append(&mut self, v: bool) -> Result<()> {
self.reserve(1)?;
if v {
// For performance the `len` of the buffer is not updated on each append but
// is updated in the `freeze` method instead.
unsafe {
bit_util::set_bit_raw(self.buffer.raw_data_mut(), self.len);
}
}
self.len += 1;
Ok(())
}
fn append_n(&mut self, n: usize, v: bool) -> Result<()> {
self.reserve(n)?;
if n != 0 && v {
unsafe {
bit_util::set_bits_raw(self.buffer.raw_data_mut(), self.len, self.len + n)
}
}
self.len += n;
Ok(())
}
fn append_slice(&mut self, slice: &[bool]) -> Result<()> {
self.reserve(slice.len())?;
for v in slice {
if *v {
// For performance the `len` of the buffer is not
// updated on each append but is updated in the
// `freeze` method instead.
unsafe {
bit_util::set_bit_raw(self.buffer.raw_data_mut(), self.len);
}
}
self.len += 1;
}
Ok(())
}
fn reserve(&mut self, n: usize) -> Result<()> {
let new_capacity = self.len + n;
if new_capacity > self.capacity() {
let new_byte_capacity = bit_util::ceil(new_capacity, 8);
let existing_capacity = self.buffer.capacity();
let new_capacity = self.buffer.reserve(new_byte_capacity)?;
self.buffer
.set_null_bits(existing_capacity, new_capacity - existing_capacity);
}
Ok(())
}
fn finish(&mut self) -> Buffer {
// `append` does not update the buffer's `len` so do it before `freeze` is called.
let new_buffer_len = bit_util::ceil(self.len, 8);
debug_assert!(new_buffer_len >= self.buffer.len());
let mut buf = std::mem::replace(&mut self.buffer, MutableBuffer::new(0));
self.len = 0;
buf.resize(new_buffer_len).unwrap();
buf.freeze()
}
}
/// Trait for dealing with different array builders at runtime
pub trait ArrayBuilder: Any {
/// Returns the number of array slots in the builder
fn len(&self) -> usize;
/// Builds the array
fn finish(&mut self) -> ArrayRef;
/// Returns the builder as a non-mutable `Any` reference.
///
/// This is most useful when one wants to call non-mutable APIs on a specific builder
/// type. In this case, one can first cast this into a `Any`, and then use
/// `downcast_ref` to get a reference on the specific builder.
fn as_any(&self) -> &Any;
/// Returns the builder as a mutable `Any` reference.
///
/// This is most useful when one wants to call mutable APIs on a specific builder
/// type. In this case, one can first cast this into a `Any`, and then use
/// `downcast_mut` to get a reference on the specific builder.
fn as_any_mut(&mut self) -> &mut Any;
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any>;
}
/// Array builder for fixed-width primitive types
pub struct PrimitiveBuilder<T: ArrowPrimitiveType> {
values_builder: BufferBuilder<T>,
bitmap_builder: BooleanBufferBuilder,
}
impl<T: ArrowPrimitiveType> ArrayBuilder for PrimitiveBuilder<T> {
/// Returns the builder as a non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as a mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.values_builder.len
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl<T: ArrowPrimitiveType> PrimitiveBuilder<T> {
/// Creates a new primitive array builder
pub fn new(capacity: usize) -> Self {
Self {
values_builder: BufferBuilder::<T>::new(capacity),
bitmap_builder: BooleanBufferBuilder::new(capacity),
}
}
/// Returns the capacity of this builder measured in slots of type `T`
pub fn capacity(&self) -> usize {
self.values_builder.capacity()
}
/// Appends a value of type `T` into the builder
pub fn append_value(&mut self, v: T::Native) -> Result<()> {
self.bitmap_builder.append(true)?;
self.values_builder.append(v)?;
Ok(())
}
/// Appends a null slot into the builder
pub fn append_null(&mut self) -> Result<()> {
self.bitmap_builder.append(false)?;
self.values_builder.advance(1)?;
Ok(())
}
/// Appends an `Option<T>` into the builder
pub fn append_option(&mut self, v: Option<T::Native>) -> Result<()> {
match v {
None => self.append_null()?,
Some(v) => self.append_value(v)?,
};
Ok(())
}
/// Appends a slice of type `T` into the builder
pub fn append_slice(&mut self, v: &[T::Native]) -> Result<()> {
self.bitmap_builder.append_n(v.len(), true)?;
self.values_builder.append_slice(v)?;
Ok(())
}
/// Builds the `PrimitiveArray` and reset this builder.
pub fn finish(&mut self) -> PrimitiveArray<T> {
let len = self.len();
let null_bit_buffer = self.bitmap_builder.finish();
let null_count = len - bit_util::count_set_bits(null_bit_buffer.data());
let mut builder = ArrayData::builder(T::get_data_type())
.len(len)
.add_buffer(self.values_builder.finish());
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(null_bit_buffer);
}
let data = builder.build();
PrimitiveArray::<T>::from(data)
}
/// Builds the `DictionaryArray` and reset this builder.
pub fn finish_dict(&mut self, values: ArrayRef) -> DictionaryArray<T> {
let len = self.len();
let null_bit_buffer = self.bitmap_builder.finish();
let null_count = len - bit_util::count_set_bits(null_bit_buffer.data());
let data_type = DataType::Dictionary(
Box::new(T::get_data_type()),
Box::new(values.data_type().clone()),
);
let mut builder = ArrayData::builder(data_type)
.len(len)
.add_buffer(self.values_builder.finish());
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(null_bit_buffer);
}
builder = builder.add_child_data(values.data());
DictionaryArray::<T>::from(builder.build())
}
}
/// Array builder for `ListArray`
pub struct ListBuilder<T: ArrayBuilder> {
offsets_builder: Int32BufferBuilder,
bitmap_builder: BooleanBufferBuilder,
values_builder: T,
len: usize,
}
impl<T: ArrayBuilder> ListBuilder<T> {
/// Creates a new `ListArrayBuilder` from a given values array builder
pub fn new(values_builder: T) -> Self {
let capacity = values_builder.len();
Self::with_capacity(values_builder, capacity)
}
/// Creates a new `ListArrayBuilder` from a given values array builder
/// `capacity` is the number of items to pre-allocate space for in this builder
pub fn with_capacity(values_builder: T, capacity: usize) -> Self {
let mut offsets_builder = Int32BufferBuilder::new(capacity + 1);
offsets_builder.append(0).unwrap();
Self {
offsets_builder,
bitmap_builder: BooleanBufferBuilder::new(capacity),
values_builder,
len: 0,
}
}
}
impl<T: ArrayBuilder> ArrayBuilder for ListBuilder<T>
where
T: 'static,
{
/// Returns the builder as a non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as a mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.len
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl<T: ArrayBuilder> ListBuilder<T>
where
T: 'static,
{
/// Returns the child array builder as a mutable reference.
///
/// This mutable reference can be used to append values into the child array builder,
/// but you must call `append` to delimit each distinct list value.
pub fn values(&mut self) -> &mut T {
&mut self.values_builder
}
/// Finish the current variable-length list array slot
pub fn append(&mut self, is_valid: bool) -> Result<()> {
self.offsets_builder
.append(self.values_builder.len() as i32)?;
self.bitmap_builder.append(is_valid)?;
self.len += 1;
Ok(())
}
/// Builds the `ListArray` and reset this builder.
pub fn finish(&mut self) -> ListArray {
let len = self.len();
self.len = 0;
let values_arr = self
.values_builder
.as_any_mut()
.downcast_mut::<T>()
.unwrap()
.finish();
let values_data = values_arr.data();
let offset_buffer = self.offsets_builder.finish();
let null_bit_buffer = self.bitmap_builder.finish();
self.offsets_builder.append(0).unwrap();
let data =
ArrayData::builder(DataType::List(Box::new(values_data.data_type().clone())))
.len(len)
.null_count(len - bit_util::count_set_bits(null_bit_buffer.data()))
.add_buffer(offset_buffer)
.add_child_data(values_data)
.null_bit_buffer(null_bit_buffer)
.build();
ListArray::from(data)
}
}
/// Array builder for `ListArray`
pub struct FixedSizeListBuilder<T: ArrayBuilder> {
bitmap_builder: BooleanBufferBuilder,
values_builder: T,
len: usize,
list_len: i32,
}
impl<T: ArrayBuilder> FixedSizeListBuilder<T> {
/// Creates a new `FixedSizeListBuilder` from a given values array builder
/// `length` is the number of values within each array
pub fn new(values_builder: T, length: i32) -> Self {
let capacity = values_builder.len();
Self::with_capacity(values_builder, length, capacity)
}
/// Creates a new `FixedSizeListBuilder` from a given values array builder
/// `length` is the number of values within each array
/// `capacity` is the number of items to pre-allocate space for in this builder
pub fn with_capacity(values_builder: T, length: i32, capacity: usize) -> Self {
let mut offsets_builder = Int32BufferBuilder::new(capacity + 1);
offsets_builder.append(0).unwrap();
Self {
bitmap_builder: BooleanBufferBuilder::new(capacity),
values_builder,
len: 0,
list_len: length,
}
}
}
impl<T: ArrayBuilder> ArrayBuilder for FixedSizeListBuilder<T>
where
T: 'static,
{
/// Returns the builder as a non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as a mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.len
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl<T: ArrayBuilder> FixedSizeListBuilder<T>
where
T: 'static,
{
/// Returns the child array builder as a mutable reference.
///
/// This mutable reference can be used to append values into the child array builder,
/// but you must call `append` to delimit each distinct list value.
pub fn values(&mut self) -> &mut T {
&mut self.values_builder
}
pub fn value_length(&self) -> i32 {
self.list_len
}
/// Finish the current variable-length list array slot
pub fn append(&mut self, is_valid: bool) -> Result<()> {
self.bitmap_builder.append(is_valid)?;
self.len += 1;
Ok(())
}
/// Builds the `FixedSizeListBuilder` and reset this builder.
pub fn finish(&mut self) -> FixedSizeListArray {
let len = self.len();
self.len = 0;
let values_arr = self
.values_builder
.as_any_mut()
.downcast_mut::<T>()
.unwrap()
.finish();
let values_data = values_arr.data();
// check that values_data length is multiple of len if we have data
if len != 0 {
assert!(
values_data.len() / len == self.list_len as usize,
"Values of FixedSizeList must have equal lengths, values have length {} and list has {}",
values_data.len(),
len
);
}
let null_bit_buffer = self.bitmap_builder.finish();
let data = ArrayData::builder(DataType::FixedSizeList(
Box::new(values_data.data_type().clone()),
self.list_len,
))
.len(len)
.null_count(len - bit_util::count_set_bits(null_bit_buffer.data()))
.add_child_data(values_data)
.null_bit_buffer(null_bit_buffer)
.build();
FixedSizeListArray::from(data)
}
}
/// Array builder for `BinaryArray`
pub struct BinaryBuilder {
builder: ListBuilder<UInt8Builder>,
}
pub struct StringBuilder {
builder: ListBuilder<UInt8Builder>,
}
pub struct FixedSizeBinaryBuilder {
builder: FixedSizeListBuilder<UInt8Builder>,
}
pub trait BinaryArrayBuilder: ArrayBuilder {}
impl BinaryArrayBuilder for BinaryBuilder {}
impl BinaryArrayBuilder for StringBuilder {}
impl BinaryArrayBuilder for FixedSizeBinaryBuilder {}
impl ArrayBuilder for BinaryBuilder {
/// Returns the builder as a non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as a mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.builder.len()
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl ArrayBuilder for StringBuilder {
/// Returns the builder as a non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as a mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.builder.len()
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl ArrayBuilder for FixedSizeBinaryBuilder {
/// Returns the builder as a non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as a mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.builder.len()
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl BinaryBuilder {
/// Creates a new `BinaryBuilder`, `capacity` is the number of bytes in the values
/// array
pub fn new(capacity: usize) -> Self {
let values_builder = UInt8Builder::new(capacity);
Self {
builder: ListBuilder::new(values_builder),
}
}
/// Appends a single byte value into the builder's values array.
///
/// Note, when appending individual byte values you must call `append` to delimit each
/// distinct list value.
pub fn append_byte(&mut self, value: u8) -> Result<()> {
self.builder.values().append_value(value)?;
Ok(())
}
/// Appends a byte slice into the builder.
///
/// Automatically calls the `append` method to delimit the slice appended in as a
/// distinct array element.
pub fn append_value(&mut self, value: &[u8]) -> Result<()> {
self.builder.values().append_slice(value)?;
self.builder.append(true)?;
Ok(())
}
/// Finish the current variable-length list array slot.
pub fn append(&mut self, is_valid: bool) -> Result<()> {
self.builder.append(is_valid)
}
/// Append a null value to the array.
pub fn append_null(&mut self) -> Result<()> {
self.append(false)
}
/// Builds the `BinaryArray` and reset this builder.
pub fn finish(&mut self) -> BinaryArray {
BinaryArray::from(self.builder.finish())
}
}
impl StringBuilder {
/// Creates a new `StringBuilder`,
/// `capacity` is the number of bytes of string data to pre-allocate space for in this builder
pub fn new(capacity: usize) -> Self {
let values_builder = UInt8Builder::new(capacity);
Self {
builder: ListBuilder::new(values_builder),
}
}
/// Creates a new `StringBuilder`,
/// `data_capacity` is the number of bytes of string data to pre-allocate space for in this builder
/// `item_capacity` is the number of items to pre-allocate space for in this builder
pub fn with_capacity(item_capacity: usize, data_capacity: usize) -> Self {
let values_builder = UInt8Builder::new(data_capacity);
Self {
builder: ListBuilder::with_capacity(values_builder, item_capacity),
}
}
/// Appends a string into the builder.
///
/// Automatically calls the `append` method to delimit the string appended in as a
/// distinct array element.
pub fn append_value(&mut self, value: &str) -> Result<()> {
self.builder.values().append_slice(value.as_bytes())?;
self.builder.append(true)?;
Ok(())
}
/// Finish the current variable-length list array slot.
pub fn append(&mut self, is_valid: bool) -> Result<()> {
self.builder.append(is_valid)
}
/// Append a null value to the array.
pub fn append_null(&mut self) -> Result<()> {
self.append(false)
}
/// Builds the `StringArray` and reset this builder.
pub fn finish(&mut self) -> StringArray {
StringArray::from(self.builder.finish())
}
}
impl FixedSizeBinaryBuilder {
/// Creates a new `BinaryBuilder`, `capacity` is the number of bytes in the values
/// array
pub fn new(capacity: usize, byte_width: i32) -> Self {
let values_builder = UInt8Builder::new(capacity);
Self {
builder: FixedSizeListBuilder::new(values_builder, byte_width),
}
}
/// Appends a byte slice into the builder.
///
/// Automatically calls the `append` method to delimit the slice appended in as a
/// distinct array element.
pub fn append_value(&mut self, value: &[u8]) -> Result<()> {
assert_eq!(
self.builder.value_length(),
value.len() as i32,
"Byte slice does not have the same length as FixedSizeBinaryBuilder value lengths"
);
self.builder.values().append_slice(value)?;
self.builder.append(true)
}
/// Append a null value to the array.
pub fn append_null(&mut self) -> Result<()> {
let length: usize = self.builder.value_length() as usize;
self.builder.values().append_slice(&vec![0u8; length][..])?;
self.builder.append(false)
}
/// Builds the `FixedSizeBinaryArray` and reset this builder.
pub fn finish(&mut self) -> FixedSizeBinaryArray {
FixedSizeBinaryArray::from(self.builder.finish())
}
}
/// Array builder for Struct types.
///
/// Note that callers should make sure that methods of all the child field builders are
/// properly called to maintain the consistency of the data structure.
pub struct StructBuilder {
fields: Vec<Field>,
field_anys: Vec<Box<Any>>,
field_builders: Vec<Box<ArrayBuilder>>,
bitmap_builder: BooleanBufferBuilder,
len: usize,
}
impl ArrayBuilder for StructBuilder {
/// Returns the number of array slots in the builder.
///
/// Note that this always return the first child field builder's length, and it is
/// the caller's responsibility to maintain the consistency that all the child field
/// builder should have the equal number of elements.
fn len(&self) -> usize {
self.len
}
/// Builds the array.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
/// Returns the builder as a non-mutable `Any` reference.
///
/// This is most useful when one wants to call non-mutable APIs on a specific builder
/// type. In this case, one can first cast this into a `Any`, and then use
/// `downcast_ref` to get a reference on the specific builder.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as a mutable `Any` reference.
///
/// This is most useful when one wants to call mutable APIs on a specific builder
/// type. In this case, one can first cast this into a `Any`, and then use
/// `downcast_mut` to get a reference on the specific builder.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
}
impl StructBuilder {
pub fn new(fields: Vec<Field>, builders: Vec<Box<ArrayBuilder>>) -> Self {
let mut field_anys = Vec::with_capacity(builders.len());
let mut field_builders = Vec::with_capacity(builders.len());
// Create and maintain two references for each of the input builder. We need the
// extra `Any` reference because we need to cast the builder to a specific type
// in `field_builder()` by calling `downcast_mut`.
for f in builders.into_iter() {
let raw_f = Box::into_raw(f);
let raw_f_copy = raw_f;
unsafe {
field_anys.push(Box::from_raw(raw_f).into_box_any());
field_builders.push(Box::from_raw(raw_f_copy));
}
}
Self {
fields,
field_anys,
field_builders,
bitmap_builder: BooleanBufferBuilder::new(0),
len: 0,
}
}
pub fn from_schema(schema: Schema, capacity: usize) -> Self {
let fields = schema.fields();
let mut builders = Vec::with_capacity(fields.len());
for f in schema.fields() {
builders.push(Self::from_field(f.clone(), capacity));
}
Self::new(schema.fields, builders)
}
fn from_field(f: Field, capacity: usize) -> Box<ArrayBuilder> {
match f.data_type() {
DataType::Null => unimplemented!(),
DataType::Boolean => Box::new(BooleanBuilder::new(capacity)),
DataType::Int8 => Box::new(Int8Builder::new(capacity)),
DataType::Int16 => Box::new(Int16Builder::new(capacity)),
DataType::Int32 => Box::new(Int32Builder::new(capacity)),
DataType::Int64 => Box::new(Int64Builder::new(capacity)),
DataType::UInt8 => Box::new(UInt8Builder::new(capacity)),
DataType::UInt16 => Box::new(UInt16Builder::new(capacity)),
DataType::UInt32 => Box::new(UInt32Builder::new(capacity)),
DataType::UInt64 => Box::new(UInt64Builder::new(capacity)),
DataType::Float32 => Box::new(Float32Builder::new(capacity)),
DataType::Float64 => Box::new(Float64Builder::new(capacity)),
DataType::Binary => Box::new(BinaryBuilder::new(capacity)),
DataType::FixedSizeBinary(len) => {
Box::new(FixedSizeBinaryBuilder::new(capacity, *len))
}
DataType::Utf8 => Box::new(StringBuilder::new(capacity)),
DataType::Date32(DateUnit::Day) => Box::new(Date32Builder::new(capacity)),
DataType::Date64(DateUnit::Millisecond) => {
Box::new(Date64Builder::new(capacity))
}
DataType::Time32(TimeUnit::Second) => {
Box::new(Time32SecondBuilder::new(capacity))
}
DataType::Time32(TimeUnit::Millisecond) => {
Box::new(Time32MillisecondBuilder::new(capacity))
}
DataType::Time64(TimeUnit::Microsecond) => {
Box::new(Time64MicrosecondBuilder::new(capacity))
}
DataType::Time64(TimeUnit::Nanosecond) => {
Box::new(Time64NanosecondBuilder::new(capacity))
}
DataType::Timestamp(TimeUnit::Second, _) => {
Box::new(TimestampSecondBuilder::new(capacity))
}
DataType::Timestamp(TimeUnit::Millisecond, _) => {
Box::new(TimestampMillisecondBuilder::new(capacity))
}
DataType::Timestamp(TimeUnit::Microsecond, _) => {
Box::new(TimestampMicrosecondBuilder::new(capacity))
}
DataType::Timestamp(TimeUnit::Nanosecond, _) => {
Box::new(TimestampNanosecondBuilder::new(capacity))
}
DataType::Interval(IntervalUnit::YearMonth) => {
Box::new(IntervalYearMonthBuilder::new(capacity))
}
DataType::Interval(IntervalUnit::DayTime) => {
Box::new(IntervalDayTimeBuilder::new(capacity))
}
DataType::Duration(TimeUnit::Second) => {
Box::new(DurationSecondBuilder::new(capacity))
}
DataType::Duration(TimeUnit::Millisecond) => {
Box::new(DurationMillisecondBuilder::new(capacity))
}
DataType::Duration(TimeUnit::Microsecond) => {
Box::new(DurationMicrosecondBuilder::new(capacity))
}
DataType::Duration(TimeUnit::Nanosecond) => {
Box::new(DurationNanosecondBuilder::new(capacity))
}
DataType::Struct(fields) => {
let schema = Schema::new(fields.clone());
Box::new(Self::from_schema(schema, capacity))
}
t => panic!("Data type {:?} is not currently supported", t),
}
}
/// Returns a mutable reference to the child field builder at index `i`.
/// Result will be `None` if the input type `T` provided doesn't match the actual
/// field builder's type.
pub fn field_builder<T: ArrayBuilder>(&mut self, i: usize) -> Option<&mut T> {
self.field_anys[i].downcast_mut::<T>()
}
/// Returns the number of fields for the struct this builder is building.
pub fn num_fields(&self) -> usize {
self.field_builders.len()
}
/// Appends an element (either null or non-null) to the struct. The actual elements
/// should be appended for each child sub-array in a consistent way.
pub fn append(&mut self, is_valid: bool) -> Result<()> {
self.bitmap_builder.append(is_valid)?;
self.len += 1;
Ok(())
}
/// Appends a null element to the struct.
pub fn append_null(&mut self) -> Result<()> {
self.append(false)
}
/// Builds the `StructArray` and reset this builder.
pub fn finish(&mut self) -> StructArray {
let mut child_data = Vec::with_capacity(self.field_builders.len());
for f in &mut self.field_builders {
let arr = f.finish();
child_data.push(arr.data());
}
let null_bit_buffer = self.bitmap_builder.finish();
let null_count = self.len - bit_util::count_set_bits(null_bit_buffer.data());
let mut builder = ArrayData::builder(DataType::Struct(self.fields.clone()))
.len(self.len)
.child_data(child_data);
if null_count > 0 {
builder = builder
.null_count(null_count)
.null_bit_buffer(null_bit_buffer);
}
self.len = 0;
StructArray::from(builder.build())
}
}
impl Drop for StructBuilder {
fn drop(&mut self) {
// To avoid double drop on the field array builders.
let builders = std::mem::replace(&mut self.field_builders, Vec::new());
std::mem::forget(builders);
}
}
/// Array builder for `DictionaryArray`. For example to map a set of byte indices
/// to f32 values. Note that the use of a `HashMap` here will not scale to very large
/// arrays or result in an ordered dictionary.
pub struct PrimitiveDictionaryBuilder<K, V>
where
K: ArrowPrimitiveType,
V: ArrowPrimitiveType,
{
keys_builder: PrimitiveBuilder<K>,
values_builder: PrimitiveBuilder<V>,
map: HashMap<Box<[u8]>, K::Native>,
}
impl<K, V> PrimitiveDictionaryBuilder<K, V>
where
K: ArrowPrimitiveType,
V: ArrowPrimitiveType,
{
/// Creates a new `PrimitiveDictionaryBuilder` from a keys builder and a value builder.
pub fn new(
keys_builder: PrimitiveBuilder<K>,
values_builder: PrimitiveBuilder<V>,
) -> Self {
Self {
keys_builder,
values_builder,
map: HashMap::new(),
}
}
}
impl<K, V> ArrayBuilder for PrimitiveDictionaryBuilder<K, V>
where
K: ArrowPrimitiveType,
V: ArrowPrimitiveType,
{
/// Returns the builder as an non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as an mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.keys_builder.len()
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl<K, V> PrimitiveDictionaryBuilder<K, V>
where
K: ArrowPrimitiveType,
V: ArrowPrimitiveType,
{
/// Append a primitive value to the array. Return an existing index
/// if already present in the values array or a new index if the
/// value is appended to the values array.
pub fn append(&mut self, value: V::Native) -> Result<K::Native> {
if let Some(&key) = self.map.get(value.to_byte_slice()) {
// Append existing value.
self.keys_builder.append_value(key)?;
Ok(key)
} else {
// Append new value.
let key = K::Native::from_usize(self.values_builder.len())
.ok_or(ArrowError::DictionaryKeyOverflowError)?;
self.values_builder.append_value(value)?;
self.keys_builder.append_value(key as K::Native)?;
self.map.insert(value.to_byte_slice().into(), key);
Ok(key)
}
}
pub fn append_null(&mut self) -> Result<()> {
self.keys_builder.append_null()
}
/// Builds the `DictionaryArray` and reset this builder.
pub fn finish(&mut self) -> DictionaryArray<K> {
self.map.clear();
let value_ref: ArrayRef = Arc::new(self.values_builder.finish());
self.keys_builder.finish_dict(value_ref)
}
}
/// Array builder for `DictionaryArray`. For example to map a set of byte indices
/// to f32 values. Note that the use of a `HashMap` here will not scale to very large
/// arrays or result in an ordered dictionary.
pub struct StringDictionaryBuilder<K>
where
K: ArrowDictionaryKeyType,
{
keys_builder: PrimitiveBuilder<K>,
values_builder: StringBuilder,
map: HashMap<Box<[u8]>, K::Native>,
}
impl<K> StringDictionaryBuilder<K>
where
K: ArrowDictionaryKeyType,
{
/// Creates a new `StringDictionaryBuilder` from a keys builder and a value builder.
pub fn new(keys_builder: PrimitiveBuilder<K>, values_builder: StringBuilder) -> Self {
Self {
keys_builder,
values_builder,
map: HashMap::new(),
}
}
/// Creates a new `StringDictionaryBuilder` from a keys builder and a dictionary
/// which is initialized with the given values.
/// The indices of those dictionary values are used as keys.
///
/// # Example
///
/// ```
/// use arrow::datatypes::Int16Type;
/// use arrow::array::{StringArray, StringDictionaryBuilder, PrimitiveBuilder};
/// use std::convert::TryFrom;
///
/// let dictionary_values = StringArray::try_from(vec![None, Some("abc"), Some("def")]).unwrap();
///
/// let mut builder = StringDictionaryBuilder::new_with_dictionary(PrimitiveBuilder::<Int16Type>::new(3), &dictionary_values).unwrap();
/// builder.append("def").unwrap();
/// builder.append_null().unwrap();
/// builder.append("abc").unwrap();
///
/// let dictionary_array = builder.finish();
///
/// let keys: Vec<Option<i16>> = dictionary_array.keys().collect();
///
/// assert_eq!(keys, vec![Some(2), None, Some(1)]);
/// ```
pub fn new_with_dictionary(
keys_builder: PrimitiveBuilder<K>,
dictionary_values: &StringArray,
) -> Result<Self> {
let dict_len = dictionary_values.len();
let mut values_builder =
StringBuilder::with_capacity(dict_len, dictionary_values.value_data().len());
let mut map: HashMap<Box<[u8]>, K::Native> = HashMap::with_capacity(dict_len);
for i in 0..dict_len {
if dictionary_values.is_valid(i) {
let value = dictionary_values.value(i);
map.insert(
value.as_bytes().into(),
K::Native::from_usize(i)
.ok_or(ArrowError::DictionaryKeyOverflowError)?,
);
values_builder.append_value(value)?;
} else {
values_builder.append_null()?;
}
}
Ok(Self {
keys_builder,
values_builder,
map,
})
}
}
impl<K> ArrayBuilder for StringDictionaryBuilder<K>
where
K: ArrowDictionaryKeyType,
{
/// Returns the builder as an non-mutable `Any` reference.
fn as_any(&self) -> &Any {
self
}
/// Returns the builder as an mutable `Any` reference.
fn as_any_mut(&mut self) -> &mut Any {
self
}
/// Returns the boxed builder as a box of `Any`.
fn into_box_any(self: Box<Self>) -> Box<Any> {
self
}
/// Returns the number of array slots in the builder
fn len(&self) -> usize {
self.keys_builder.len()
}
/// Builds the array and reset this builder.
fn finish(&mut self) -> ArrayRef {
Arc::new(self.finish())
}
}
impl<K> StringDictionaryBuilder<K>
where
K: ArrowDictionaryKeyType,
{
/// Append a primitive value to the array. Return an existing index
/// if already present in the values array or a new index if the
/// value is appended to the values array.
pub fn append(&mut self, value: &str) -> Result<K::Native> {
if let Some(&key) = self.map.get(value.as_bytes()) {
// Append existing value.
self.keys_builder.append_value(key)?;
Ok(key)
} else {
// Append new value.
let key = K::Native::from_usize(self.values_builder.len())
.ok_or(ArrowError::DictionaryKeyOverflowError)?;
self.values_builder.append_value(value)?;
self.keys_builder.append_value(key as K::Native)?;
self.map.insert(value.as_bytes().into(), key);
Ok(key)
}
}
pub fn append_null(&mut self) -> Result<()> {
self.keys_builder.append_null()
}
/// Builds the `DictionaryArray` and reset this builder.
pub fn finish(&mut self) -> DictionaryArray<K> {
self.map.clear();
let value_ref: ArrayRef = Arc::new(self.values_builder.finish());
self.keys_builder.finish_dict(value_ref)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::array::Array;
use crate::bitmap::Bitmap;
use std::convert::TryFrom;
#[test]
fn test_builder_i32_empty() {
let mut b = Int32BufferBuilder::new(5);
assert_eq!(0, b.len());
assert_eq!(16, b.capacity());
let a = b.finish();
assert_eq!(0, a.len());
}
#[test]
fn test_builder_i32_alloc_zero_bytes() {
let mut b = Int32BufferBuilder::new(0);
b.append(123).unwrap();
let a = b.finish();
assert_eq!(4, a.len());
}
#[test]
fn test_builder_i32() {
let mut b = Int32BufferBuilder::new(5);
for i in 0..5 {
b.append(i).unwrap();
}
assert_eq!(16, b.capacity());
let a = b.finish();
assert_eq!(20, a.len());
}
#[test]
fn test_builder_i32_grow_buffer() {
let mut b = Int32BufferBuilder::new(2);
assert_eq!(16, b.capacity());
for i in 0..20 {
b.append(i).unwrap();
}
assert_eq!(32, b.capacity());
let a = b.finish();
assert_eq!(80, a.len());
}
#[test]
fn test_builder_finish() {
let mut b = Int32BufferBuilder::new(5);
assert_eq!(16, b.capacity());
for i in 0..10 {
b.append(i).unwrap();
}
let mut a = b.finish();
assert_eq!(40, a.len());
assert_eq!(0, b.len());
assert_eq!(0, b.capacity());
// Try build another buffer after cleaning up.
for i in 0..20 {
b.append(i).unwrap()
}
assert_eq!(32, b.capacity());
a = b.finish();
assert_eq!(80, a.len());
}
#[test]
fn test_reserve() {
let mut b = UInt8BufferBuilder::new(2);
assert_eq!(64, b.capacity());
b.reserve(64).unwrap();
assert_eq!(64, b.capacity());
b.reserve(65).unwrap();
assert_eq!(128, b.capacity());
let mut b = Int32BufferBuilder::new(2);
assert_eq!(16, b.capacity());
b.reserve(16).unwrap();
assert_eq!(16, b.capacity());
b.reserve(17).unwrap();
assert_eq!(32, b.capacity());
}
#[test]
fn test_append_slice() {
let mut b = UInt8BufferBuilder::new(0);
b.append_slice("Hello, ".as_bytes()).unwrap();
b.append_slice("World!".as_bytes()).unwrap();
let buffer = b.finish();
assert_eq!(13, buffer.len());
let mut b = Int32BufferBuilder::new(0);
b.append_slice(&[32, 54]).unwrap();
let buffer = b.finish();
assert_eq!(8, buffer.len());
}
#[test]
fn test_write_bytes() {
let mut b = BooleanBufferBuilder::new(4);
b.append(false).unwrap();
b.append(true).unwrap();
b.append(false).unwrap();
b.append(true).unwrap();
assert_eq!(4, b.len());
assert_eq!(512, b.capacity());
let buffer = b.finish();
assert_eq!(1, buffer.len());
let mut b = BooleanBufferBuilder::new(4);
b.append_slice(&[false, true, false, true]).unwrap();
assert_eq!(4, b.len());
assert_eq!(512, b.capacity());
let buffer = b.finish();
assert_eq!(1, buffer.len());
}
#[test]
fn test_write_bytes_i32() {
let mut b = Int32BufferBuilder::new(4);
let bytes = [8, 16, 32, 64].to_byte_slice();
b.write_bytes(bytes, 4).unwrap();
assert_eq!(4, b.len());
assert_eq!(16, b.capacity());
let buffer = b.finish();
assert_eq!(16, buffer.len());
}
#[test]
#[should_panic(expected = "Could not write to Buffer, not big enough")]
fn test_write_too_many_bytes() {
let mut b = Int32BufferBuilder::new(0);
let bytes = [8, 16, 32, 64].to_byte_slice();
b.write_bytes(bytes, 4).unwrap();
}
#[test]
fn test_boolean_array_builder_append_slice() {
let arr1 =
BooleanArray::from(vec![Some(true), Some(false), None, None, Some(false)]);
let mut builder = BooleanArray::builder(0);
builder.append_slice(&[true, false]).unwrap();
builder.append_null().unwrap();
builder.append_null().unwrap();
builder.append_value(false).unwrap();
let arr2 = builder.finish();
assert_eq!(arr1.len(), arr2.len());
assert_eq!(arr1.offset(), arr2.offset());
assert_eq!(arr1.null_count(), arr2.null_count());
for i in 0..5 {
assert_eq!(arr1.is_null(i), arr2.is_null(i));
assert_eq!(arr1.is_valid(i), arr2.is_valid(i));
if arr1.is_valid(i) {
assert_eq!(arr1.value(i), arr2.value(i));
}
}
}
#[test]
fn test_boolean_builder_increases_buffer_len() {
// 00000010 01001000
let buf = Buffer::from([72_u8, 2_u8]);
let mut builder = BooleanBufferBuilder::new(8);
for i in 0..10 {
if i == 3 || i == 6 || i == 9 {
builder.append(true).unwrap();
} else {
builder.append(false).unwrap();
}
}
let buf2 = builder.finish();
assert_eq!(buf.len(), buf2.len());
assert_eq!(buf.data(), buf2.data());
}
#[test]
fn test_primitive_array_builder_i32() {
let mut builder = Int32Array::builder(5);
for i in 0..5 {
builder.append_value(i).unwrap();
}
let arr = builder.finish();
assert_eq!(5, arr.len());
assert_eq!(0, arr.offset());
assert_eq!(0, arr.null_count());
for i in 0..5 {
assert!(!arr.is_null(i));
assert!(arr.is_valid(i));
assert_eq!(i as i32, arr.value(i));
}
}
#[test]
fn test_primitive_array_builder_date32() {
let mut builder = Date32Array::builder(5);
for i in 0..5 {
builder.append_value(i).unwrap();
}
let arr = builder.finish();
assert_eq!(5, arr.len());
assert_eq!(0, arr.offset());
assert_eq!(0, arr.null_count());
for i in 0..5 {
assert!(!arr.is_null(i));
assert!(arr.is_valid(i));
assert_eq!(i as i32, arr.value(i));
}
}
#[test]
fn test_primitive_array_builder_timestamp_second() {
let mut builder = TimestampSecondArray::builder(5);
for i in 0..5 {
builder.append_value(i).unwrap();
}
let arr = builder.finish();
assert_eq!(5, arr.len());
assert_eq!(0, arr.offset());
assert_eq!(0, arr.null_count());
for i in 0..5 {
assert!(!arr.is_null(i));
assert!(arr.is_valid(i));
assert_eq!(i as i64, arr.value(i));
}
}
#[test]
fn test_primitive_array_builder_bool() {
// 00000010 01001000
let buf = Buffer::from([72_u8, 2_u8]);
let mut builder = BooleanArray::builder(10);
for i in 0..10 {
if i == 3 || i == 6 || i == 9 {
builder.append_value(true).unwrap();
} else {
builder.append_value(false).unwrap();
}
}
let arr = builder.finish();
assert_eq!(buf, arr.values());
assert_eq!(10, arr.len());
assert_eq!(0, arr.offset());
assert_eq!(0, arr.null_count());
for i in 0..10 {
assert!(!arr.is_null(i));
assert!(arr.is_valid(i));
assert_eq!(i == 3 || i == 6 || i == 9, arr.value(i), "failed at {}", i)
}
}
#[test]
fn test_primitive_array_builder_append_option() {
let arr1 = Int32Array::from(vec![Some(0), None, Some(2), None, Some(4)]);
let mut builder = Int32Array::builder(5);
builder.append_option(Some(0)).unwrap();
builder.append_option(None).unwrap();
builder.append_option(Some(2)).unwrap();
builder.append_option(None).unwrap();
builder.append_option(Some(4)).unwrap();
let arr2 = builder.finish();
assert_eq!(arr1.len(), arr2.len());
assert_eq!(arr1.offset(), arr2.offset());
assert_eq!(arr1.null_count(), arr2.null_count());
for i in 0..5 {
assert_eq!(arr1.is_null(i), arr2.is_null(i));
assert_eq!(arr1.is_valid(i), arr2.is_valid(i));
if arr1.is_valid(i) {
assert_eq!(arr1.value(i), arr2.value(i));
}
}
}
#[test]
fn test_primitive_array_builder_append_null() {
let arr1 = Int32Array::from(vec![Some(0), Some(2), None, None, Some(4)]);
let mut builder = Int32Array::builder(5);
builder.append_value(0).unwrap();
builder.append_value(2).unwrap();
builder.append_null().unwrap();
builder.append_null().unwrap();
builder.append_value(4).unwrap();
let arr2 = builder.finish();
assert_eq!(arr1.len(), arr2.len());
assert_eq!(arr1.offset(), arr2.offset());
assert_eq!(arr1.null_count(), arr2.null_count());
for i in 0..5 {
assert_eq!(arr1.is_null(i), arr2.is_null(i));
assert_eq!(arr1.is_valid(i), arr2.is_valid(i));
if arr1.is_valid(i) {
assert_eq!(arr1.value(i), arr2.value(i));
}
}
}
#[test]
fn test_primitive_array_builder_append_slice() {
let arr1 = Int32Array::from(vec![Some(0), Some(2), None, None, Some(4)]);
let mut builder = Int32Array::builder(5);
builder.append_slice(&[0, 2]).unwrap();
builder.append_null().unwrap();
builder.append_null().unwrap();
builder.append_value(4).unwrap();
let arr2 = builder.finish();
assert_eq!(arr1.len(), arr2.len());
assert_eq!(arr1.offset(), arr2.offset());
assert_eq!(arr1.null_count(), arr2.null_count());
for i in 0..5 {
assert_eq!(arr1.is_null(i), arr2.is_null(i));
assert_eq!(arr1.is_valid(i), arr2.is_valid(i));
if arr1.is_valid(i) {
assert_eq!(arr1.value(i), arr2.value(i));
}
}
}
#[test]
fn test_primitive_array_builder_finish() {
let mut builder = Int32Builder::new(5);
builder.append_slice(&[2, 4, 6, 8]).unwrap();
let mut arr = builder.finish();
assert_eq!(4, arr.len());
assert_eq!(0, builder.len());
builder.append_slice(&[1, 3, 5, 7, 9]).unwrap();
arr = builder.finish();
assert_eq!(5, arr.len());
assert_eq!(0, builder.len());
}
#[test]
fn test_list_array_builder() {
let values_builder = Int32Builder::new(10);
let mut builder = ListBuilder::new(values_builder);
// [[0, 1, 2], [3, 4, 5], [6, 7]]
builder.values().append_value(0).unwrap();
builder.values().append_value(1).unwrap();
builder.values().append_value(2).unwrap();
builder.append(true).unwrap();
builder.values().append_value(3).unwrap();
builder.values().append_value(4).unwrap();
builder.values().append_value(5).unwrap();
builder.append(true).unwrap();
builder.values().append_value(6).unwrap();
builder.values().append_value(7).unwrap();
builder.append(true).unwrap();
let list_array = builder.finish();
let values = list_array.values().data().buffers()[0].clone();
assert_eq!(
Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7].to_byte_slice()),
values
);
assert_eq!(
Buffer::from(&[0, 3, 6, 8].to_byte_slice()),
list_array.data().buffers()[0].clone()
);
assert_eq!(DataType::Int32, list_array.value_type());
assert_eq!(3, list_array.len());
assert_eq!(0, list_array.null_count());
assert_eq!(6, list_array.value_offset(2));
assert_eq!(2, list_array.value_length(2));
for i in 0..3 {
assert!(list_array.is_valid(i));
assert!(!list_array.is_null(i));
}
}
#[test]
fn test_list_array_builder_nulls() {
let values_builder = Int32Builder::new(10);
let mut builder = ListBuilder::new(values_builder);
// [[0, 1, 2], null, [3, null, 5], [6, 7]]
builder.values().append_value(0).unwrap();
builder.values().append_value(1).unwrap();
builder.values().append_value(2).unwrap();
builder.append(true).unwrap();
builder.append(false).unwrap();
builder.values().append_value(3).unwrap();
builder.values().append_null().unwrap();
builder.values().append_value(5).unwrap();
builder.append(true).unwrap();
builder.values().append_value(6).unwrap();
builder.values().append_value(7).unwrap();
builder.append(true).unwrap();
let list_array = builder.finish();
assert_eq!(DataType::Int32, list_array.value_type());
assert_eq!(4, list_array.len());
assert_eq!(1, list_array.null_count());
assert_eq!(3, list_array.value_offset(2));
assert_eq!(3, list_array.value_length(2));
}
#[test]
fn test_fixed_size_list_array_builder() {
let values_builder = Int32Builder::new(10);
let mut builder = FixedSizeListBuilder::new(values_builder, 3);
// [[0, 1, 2], null, [3, null, 5], [6, 7, null]]
builder.values().append_value(0).unwrap();
builder.values().append_value(1).unwrap();
builder.values().append_value(2).unwrap();
builder.append(true).unwrap();
builder.values().append_null().unwrap();
builder.values().append_null().unwrap();
builder.values().append_null().unwrap();
builder.append(false).unwrap();
builder.values().append_value(3).unwrap();
builder.values().append_null().unwrap();
builder.values().append_value(5).unwrap();
builder.append(true).unwrap();
builder.values().append_value(6).unwrap();
builder.values().append_value(7).unwrap();
builder.values().append_null().unwrap();
builder.append(true).unwrap();
let list_array = builder.finish();
assert_eq!(DataType::Int32, list_array.value_type());
assert_eq!(4, list_array.len());
assert_eq!(1, list_array.null_count());
assert_eq!(6, list_array.value_offset(2));
assert_eq!(3, list_array.value_length());
}
#[test]
fn test_list_array_builder_finish() {
let values_builder = Int32Array::builder(5);
let mut builder = ListBuilder::new(values_builder);
builder.values().append_slice(&[1, 2, 3]).unwrap();
builder.append(true).unwrap();
builder.values().append_slice(&[4, 5, 6]).unwrap();
builder.append(true).unwrap();
let mut arr = builder.finish();
assert_eq!(2, arr.len());
assert_eq!(0, builder.len());
builder.values().append_slice(&[7, 8, 9]).unwrap();
builder.append(true).unwrap();
arr = builder.finish();
assert_eq!(1, arr.len());
assert_eq!(0, builder.len());
}
#[test]
fn test_fixed_size_list_array_builder_empty() {
let values_builder = Int32Array::builder(5);
let mut builder = FixedSizeListBuilder::new(values_builder, 3);
let arr = builder.finish();
assert_eq!(0, arr.len());
assert_eq!(0, builder.len());
}
#[test]
fn test_fixed_size_list_array_builder_finish() {
let values_builder = Int32Array::builder(5);
let mut builder = FixedSizeListBuilder::new(values_builder, 3);
builder.values().append_slice(&[1, 2, 3]).unwrap();
builder.append(true).unwrap();
builder.values().append_slice(&[4, 5, 6]).unwrap();
builder.append(true).unwrap();
let mut arr = builder.finish();
assert_eq!(2, arr.len());
assert_eq!(0, builder.len());
builder.values().append_slice(&[7, 8, 9]).unwrap();
builder.append(true).unwrap();
arr = builder.finish();
assert_eq!(1, arr.len());
assert_eq!(0, builder.len());
}
#[test]
fn test_list_list_array_builder() {
let primitive_builder = Int32Builder::new(10);
let values_builder = ListBuilder::new(primitive_builder);
let mut builder = ListBuilder::new(values_builder);
// [[[1, 2], [3, 4]], [[5, 6, 7], null, [8]], null, [[9, 10]]]
builder.values().values().append_value(1).unwrap();
builder.values().values().append_value(2).unwrap();
builder.values().append(true).unwrap();
builder.values().values().append_value(3).unwrap();
builder.values().values().append_value(4).unwrap();
builder.values().append(true).unwrap();
builder.append(true).unwrap();
builder.values().values().append_value(5).unwrap();
builder.values().values().append_value(6).unwrap();
builder.values().values().append_value(7).unwrap();
builder.values().append(true).unwrap();
builder.values().append(false).unwrap();
builder.values().values().append_value(8).unwrap();
builder.values().append(true).unwrap();
builder.append(true).unwrap();
builder.append(false).unwrap();
builder.values().values().append_value(9).unwrap();
builder.values().values().append_value(10).unwrap();
builder.values().append(true).unwrap();
builder.append(true).unwrap();
let list_array = builder.finish();
assert_eq!(4, list_array.len());
assert_eq!(1, list_array.null_count());
assert_eq!(
Buffer::from(&[0, 2, 5, 5, 6].to_byte_slice()),
list_array.data().buffers()[0].clone()
);
assert_eq!(6, list_array.values().data().len());
assert_eq!(1, list_array.values().data().null_count());
assert_eq!(
Buffer::from(&[0, 2, 4, 7, 7, 8, 10].to_byte_slice()),
list_array.values().data().buffers()[0].clone()
);
assert_eq!(10, list_array.values().data().child_data()[0].len());
assert_eq!(0, list_array.values().data().child_data()[0].null_count());
assert_eq!(
Buffer::from(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10].to_byte_slice()),
list_array.values().data().child_data()[0].buffers()[0].clone()
);
}
#[test]
fn test_binary_array_builder() {
let mut builder = BinaryBuilder::new(20);
builder.append_byte(b'h').unwrap();
builder.append_byte(b'e').unwrap();
builder.append_byte(b'l').unwrap();
builder.append_byte(b'l').unwrap();
builder.append_byte(b'o').unwrap();
builder.append(true).unwrap();
builder.append(true).unwrap();
builder.append_byte(b'w').unwrap();
builder.append_byte(b'o').unwrap();
builder.append_byte(b'r').unwrap();
builder.append_byte(b'l').unwrap();
builder.append_byte(b'd').unwrap();
builder.append(true).unwrap();
let array = builder.finish();
let binary_array = BinaryArray::from(array);
assert_eq!(3, binary_array.len());
assert_eq!(0, binary_array.null_count());
assert_eq!([b'h', b'e', b'l', b'l', b'o'], binary_array.value(0));
assert_eq!([] as [u8; 0], binary_array.value(1));
assert_eq!([b'w', b'o', b'r', b'l', b'd'], binary_array.value(2));
assert_eq!(5, binary_array.value_offset(2));
assert_eq!(5, binary_array.value_length(2));
}
#[test]
fn test_string_array_builder() {
let mut builder = StringBuilder::new(20);
builder.append_value("hello").unwrap();
builder.append(true).unwrap();
builder.append_value("world").unwrap();
let array = builder.finish();
let string_array = StringArray::from(array);
assert_eq!(3, string_array.len());
assert_eq!(0, string_array.null_count());
assert_eq!("hello", string_array.value(0));
assert_eq!("", string_array.value(1));
assert_eq!("world", string_array.value(2));
assert_eq!(5, string_array.value_offset(2));
assert_eq!(5, string_array.value_length(2));
}
#[test]
fn test_fixed_size_binary_builder() {
let mut builder = FixedSizeBinaryBuilder::new(15, 5);
// [b"hello", null, "arrow"]
builder.append_value(b"hello").unwrap();
builder.append_null().unwrap();
builder.append_value(b"arrow").unwrap();
let fixed_size_binary_array: FixedSizeBinaryArray = builder.finish();
assert_eq!(
&DataType::FixedSizeBinary(5),
fixed_size_binary_array.data_type()
);
assert_eq!(3, fixed_size_binary_array.len());
assert_eq!(1, fixed_size_binary_array.null_count());
assert_eq!(10, fixed_size_binary_array.value_offset(2));
assert_eq!(5, fixed_size_binary_array.value_length());
}
#[test]
fn test_string_array_builder_finish() {
let mut builder = StringBuilder::new(10);
builder.append_value("hello").unwrap();
builder.append_value("world").unwrap();
let mut arr = builder.finish();
assert_eq!(2, arr.len());
assert_eq!(0, builder.len());
builder.append_value("arrow").unwrap();
arr = builder.finish();
assert_eq!(1, arr.len());
assert_eq!(0, builder.len());
}
#[test]
fn test_string_array_builder_append_string() {
let mut builder = StringBuilder::new(20);
let var = "hello".to_owned();
builder.append_value(&var).unwrap();
builder.append(true).unwrap();
builder.append_value("world").unwrap();
let array = builder.finish();
let string_array = StringArray::from(array);
assert_eq!(3, string_array.len());
assert_eq!(0, string_array.null_count());
assert_eq!("hello", string_array.value(0));
assert_eq!("", string_array.value(1));
assert_eq!("world", string_array.value(2));
assert_eq!(5, string_array.value_offset(2));
assert_eq!(5, string_array.value_length(2));
}
#[test]
fn test_struct_array_builder() {
let string_builder = StringBuilder::new(4);
let int_builder = Int32Builder::new(4);
let mut fields = Vec::new();
let mut field_builders = Vec::new();
fields.push(Field::new("f1", DataType::Utf8, false));
field_builders.push(Box::new(string_builder) as Box<ArrayBuilder>);
fields.push(Field::new("f2", DataType::Int32, false));
field_builders.push(Box::new(int_builder) as Box<ArrayBuilder>);
let mut builder = StructBuilder::new(fields, field_builders);
assert_eq!(2, builder.num_fields());
let string_builder = builder
.field_builder::<StringBuilder>(0)
.expect("builder at field 0 should be string builder");
string_builder.append_value("joe").unwrap();
string_builder.append_null().unwrap();
string_builder.append_null().unwrap();
string_builder.append_value("mark").unwrap();
let int_builder = builder
.field_builder::<Int32Builder>(1)
.expect("builder at field 1 should be int builder");
int_builder.append_value(1).unwrap();
int_builder.append_value(2).unwrap();
int_builder.append_null().unwrap();
int_builder.append_value(4).unwrap();
builder.append(true).unwrap();
builder.append(true).unwrap();
builder.append_null().unwrap();
builder.append(true).unwrap();
let arr = builder.finish();
let struct_data = arr.data();
assert_eq!(4, struct_data.len());
assert_eq!(1, struct_data.null_count());
assert_eq!(
&Some(Bitmap::from(Buffer::from(&[11_u8]))),
struct_data.null_bitmap()
);
let expected_string_data = ArrayData::builder(DataType::Utf8)
.len(4)
.null_count(2)
.null_bit_buffer(Buffer::from(&[9_u8]))
.add_buffer(Buffer::from(&[0, 3, 3, 3, 7].to_byte_slice()))
.add_buffer(Buffer::from("joemark".as_bytes()))
.build();
let expected_int_data = ArrayData::builder(DataType::Int32)
.len(4)
.null_count(1)
.null_bit_buffer(Buffer::from(&[11_u8]))
.add_buffer(Buffer::from(&[1, 2, 0, 4].to_byte_slice()))
.build();
assert_eq!(expected_string_data, arr.column(0).data());
// TODO: implement equality for ArrayData
assert_eq!(expected_int_data.len(), arr.column(1).data().len());
assert_eq!(
expected_int_data.null_count(),
arr.column(1).data().null_count()
);
assert_eq!(
expected_int_data.null_bitmap(),
arr.column(1).data().null_bitmap()
);
let expected_value_buf = expected_int_data.buffers()[0].clone();
let actual_value_buf = arr.column(1).data().buffers()[0].clone();
for i in 0..expected_int_data.len() {
if !expected_int_data.is_null(i) {
assert_eq!(
expected_value_buf.data()[i * 4..(i + 1) * 4],
actual_value_buf.data()[i * 4..(i + 1) * 4]
);
}
}
}
#[test]
fn test_struct_array_builder_finish() {
let int_builder = Int32Builder::new(10);
let bool_builder = BooleanBuilder::new(10);
let mut fields = Vec::new();
let mut field_builders = Vec::new();
fields.push(Field::new("f1", DataType::Int32, false));
field_builders.push(Box::new(int_builder) as Box<ArrayBuilder>);
fields.push(Field::new("f2", DataType::Boolean, false));
field_builders.push(Box::new(bool_builder) as Box<ArrayBuilder>);
let mut builder = StructBuilder::new(fields, field_builders);
builder
.field_builder::<Int32Builder>(0)
.unwrap()
.append_slice(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
.unwrap();
builder
.field_builder::<BooleanBuilder>(1)
.unwrap()
.append_slice(&[
false, true, false, true, false, true, false, true, false, true,
])
.unwrap();
// Append slot values - all are valid.
for _ in 0..10 {
assert!(builder.append(true).is_ok())
}
assert_eq!(10, builder.len());
let arr = builder.finish();
assert_eq!(10, arr.len());
assert_eq!(0, builder.len());
builder
.field_builder::<Int32Builder>(0)
.unwrap()
.append_slice(&[1, 3, 5, 7, 9])
.unwrap();
builder
.field_builder::<BooleanBuilder>(1)
.unwrap()
.append_slice(&[false, true, false, true, false])
.unwrap();
// Append slot values - all are valid.
for _ in 0..5 {
assert!(builder.append(true).is_ok())
}
assert_eq!(5, builder.len());
let arr = builder.finish();
assert_eq!(5, arr.len());
assert_eq!(0, builder.len());
}
#[test]
fn test_struct_array_builder_from_schema() {
let mut fields = Vec::new();
fields.push(Field::new("f1", DataType::Float32, false));
fields.push(Field::new("f2", DataType::Utf8, false));
let mut sub_fields = Vec::new();
sub_fields.push(Field::new("g1", DataType::Int32, false));
sub_fields.push(Field::new("g2", DataType::Boolean, false));
let struct_type = DataType::Struct(sub_fields);
fields.push(Field::new("f3", struct_type, false));
let mut builder = StructBuilder::from_schema(Schema::new(fields), 5);
assert_eq!(3, builder.num_fields());
assert!(builder.field_builder::<Float32Builder>(0).is_some());
assert!(builder.field_builder::<StringBuilder>(1).is_some());
assert!(builder.field_builder::<StructBuilder>(2).is_some());
}
#[test]
#[should_panic(expected = "Data type List(Int64) is not currently supported")]
fn test_struct_array_builder_from_schema_unsupported_type() {
let mut fields = Vec::new();
fields.push(Field::new("f1", DataType::Int16, false));
let list_type = DataType::List(Box::new(DataType::Int64));
fields.push(Field::new("f2", list_type, false));
let _ = StructBuilder::from_schema(Schema::new(fields), 5);
}
#[test]
fn test_struct_array_builder_field_builder_type_mismatch() {
let int_builder = Int32Builder::new(10);
let mut fields = Vec::new();
let mut field_builders = Vec::new();
fields.push(Field::new("f1", DataType::Int32, false));
field_builders.push(Box::new(int_builder) as Box<ArrayBuilder>);
let mut builder = StructBuilder::new(fields, field_builders);
assert!(builder.field_builder::<BinaryBuilder>(0).is_none());
}
#[test]
fn test_primitive_dictionary_builder() {
let key_builder = PrimitiveBuilder::<UInt8Type>::new(3);
let value_builder = PrimitiveBuilder::<UInt32Type>::new(2);
let mut builder = PrimitiveDictionaryBuilder::new(key_builder, value_builder);
builder.append(12345678).unwrap();
builder.append_null().unwrap();
builder.append(22345678).unwrap();
let array = builder.finish();
// Keys are strongly typed.
let aks: Vec<_> = array.keys().collect();
// Values are polymorphic and so require a downcast.
let av = array.values();
let ava: &UInt32Array = av.as_any().downcast_ref::<UInt32Array>().unwrap();
let avs: &[u32] = ava.value_slice(0, array.values().len());
assert_eq!(array.is_null(0), false);
assert_eq!(array.is_null(1), true);
assert_eq!(array.is_null(2), false);
assert_eq!(aks, vec![Some(0), None, Some(1)]);
assert_eq!(avs, &[12345678, 22345678]);
}
#[test]
fn test_string_dictionary_builder() {
let key_builder = PrimitiveBuilder::<Int8Type>::new(5);
let value_builder = StringBuilder::new(2);
let mut builder = StringDictionaryBuilder::new(key_builder, value_builder);
builder.append("abc").unwrap();
builder.append_null().unwrap();
builder.append("def").unwrap();
builder.append("def").unwrap();
builder.append("abc").unwrap();
let array = builder.finish();
// Keys are strongly typed.
let aks: Vec<_> = array.keys().collect();
// Values are polymorphic and so require a downcast.
let av = array.values();
let ava: &StringArray = av.as_any().downcast_ref::<StringArray>().unwrap();
assert_eq!(aks, vec![Some(0), None, Some(1), Some(1), Some(0)]);
assert_eq!(ava.value(0), "abc");
assert_eq!(ava.value(1), "def");
}
#[test]
fn test_string_dictionary_builder_with_existing_dictionary() {
let dictionary =
StringArray::try_from(vec![None, Some("def"), Some("abc")]).unwrap();
let key_builder = PrimitiveBuilder::<Int8Type>::new(6);
let mut builder =
StringDictionaryBuilder::new_with_dictionary(key_builder, &dictionary)
.unwrap();
builder.append("abc").unwrap();
builder.append_null().unwrap();
builder.append("def").unwrap();
builder.append("def").unwrap();
builder.append("abc").unwrap();
builder.append("ghi").unwrap();
let array = builder.finish();
// Keys are strongly typed.
let aks: Vec<_> = array.keys().collect();
// Values are polymorphic and so require a downcast.
let av = array.values();
let ava: &StringArray = av.as_any().downcast_ref::<StringArray>().unwrap();
assert_eq!(aks, vec![Some(2), None, Some(1), Some(1), Some(2), Some(3)]);
assert_eq!(ava.is_valid(0), false);
assert_eq!(ava.value(1), "def");
assert_eq!(ava.value(2), "abc");
assert_eq!(ava.value(3), "ghi");
}
#[test]
fn test_string_dictionary_builder_with_reserved_null_value() {
let dictionary = StringArray::try_from(vec![None]).unwrap();
let key_builder = PrimitiveBuilder::<Int16Type>::new(4);
let mut builder =
StringDictionaryBuilder::new_with_dictionary(key_builder, &dictionary)
.unwrap();
builder.append("abc").unwrap();
builder.append_null().unwrap();
builder.append("def").unwrap();
builder.append("abc").unwrap();
let array = builder.finish();
assert_eq!(array.is_null(1), true);
assert_eq!(array.is_valid(1), false);
let keys: Int16Array = array.data().into();
assert_eq!(keys.value(0), 1);
assert_eq!(keys.is_null(1), true);
// zero initialization is currently guaranteed by Buffer allocation and resizing
assert_eq!(keys.value(1), 0);
assert_eq!(keys.value(2), 2);
assert_eq!(keys.value(3), 1);
}
#[test]
fn test_primitive_dictionary_overflow() {
let key_builder = PrimitiveBuilder::<UInt8Type>::new(257);
let value_builder = PrimitiveBuilder::<UInt32Type>::new(257);
let mut builder = PrimitiveDictionaryBuilder::new(key_builder, value_builder);
// 256 unique keys.
for i in 0..256 {
builder.append(i + 1000).unwrap();
}
// Special error if the key overflows (256th entry)
assert_eq!(
builder.append(1257),
Err(ArrowError::DictionaryKeyOverflowError)
);
}
}
|
//! Create textures and build texture atlas.
use std::collections::HashMap;
use std::collections::hash_map::Entry::{ Occupied, Vacant };
use std::fmt::{ Debug, Formatter, Error };
use std::path::{ Path, PathBuf };
use std::mem;
use gfx;
use image::{
self,
ImageBuffer,
RgbaImage,
DynamicImage,
ImageResult,
SubImage,
ImageError,
GenericImage,
Pixel
};
pub use gfx_texture::{ Texture, ImageSize, Settings };
// Loads RGBA image from path.
fn load_rgba8(path: &Path) -> ImageResult<RgbaImage> {
image::open(path).map(|img| match img {
DynamicImage::ImageRgba8(img) => img,
img => img.to_rgba()
})
}
/// An enumeration of ColorMap errors.
pub enum ColorMapError {
/// The image opening error.
Img(ImageError),
/// The image size error.
Size(u32, u32, String)
}
impl Debug for ColorMapError {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match *self {
ColorMapError::Img(ref e) => e.fmt(f),
ColorMapError::Size(w, h, ref path) =>
format!("ColorMap expected 256x256, found {}x{} in '{}'", w, h, path).fmt(f)
}
}
}
impl From<ImageError> for ColorMapError {
fn from(img_err: ImageError) -> Self {
ColorMapError::Img(img_err)
}
}
/// A 256x256 image that stores colors.
pub struct ColorMap(RgbaImage);
impl ColorMap {
/// Creates a new `ColorMap` from path.
pub fn from_path<P>(path: P) -> Result<Self, ColorMapError>
where P: AsRef<Path>
{
let img = try!(load_rgba8(path.as_ref()));
match img.dimensions() {
(256, 256) => Ok(ColorMap(img)),
(w, h) => Err(ColorMapError::Size(w, h, path.as_ref().display().to_string()))
}
}
/// Gets RGB color from the color map.
pub fn get(&self, x: f32, y: f32) -> [u8; 3] {
// Clamp to [0.0, 1.0].
let x = x.max(0.0).min(1.0);
let y = y.max(0.0).min(1.0);
// Scale y from [0.0, 1.0] to [0.0, x], forming a triangle.
let y = x * y;
// Origin is in the bottom-right corner.
let x = ((1.0 - x) * 255.0) as u8;
let y = ((1.0 - y) * 255.0) as u8;
let col = self.0.get_pixel(x as u32, y as u32).channels();
[col[0], col[1], col[2]]
}
}
/// Builds an atlas of textures.
pub struct AtlasBuilder {
image: RgbaImage,
// Base path for loading tiles.
path: PathBuf,
// Size of an individual tile.
unit_width: u32,
unit_height: u32,
// Size of the entirely occupied square, in tiles.
completed_tiles_size: u32,
// Position in the current strip.
position: u32,
// Position cache for loaded tiles (in pixels).
tile_positions: HashMap<String, (u32, u32)>,
// Lowest-alpha cache for rectangles in the atlas.
min_alpha_cache: HashMap<(u32, u32, u32, u32), u8>
}
impl AtlasBuilder {
/// Creates a new `AtlasBuilder`.
pub fn new<P>(path: P, unit_width: u32, unit_height: u32) -> Self
where P: Into<PathBuf>
{
AtlasBuilder {
image: ImageBuffer::new(unit_width * 4, unit_height * 4),
path: path.into(),
unit_width: unit_width,
unit_height: unit_height,
completed_tiles_size: 0,
position: 0,
tile_positions: HashMap::new(),
min_alpha_cache: HashMap::new()
}
}
/// Loads a file into the texture atlas.
/// Checks if the file is loaded and returns position within the atlas.
/// The name should be specified without file extension.
/// PNG is the only supported format.
pub fn load(&mut self, name: &str) -> (u32, u32) {
if let Some(&pos) = self.tile_positions.get(name) {
return pos
}
let mut path = self.path.join(name);
path.set_extension("png");
let img = load_rgba8(&path).unwrap();
let (iw, ih) = img.dimensions();
assert!(iw == self.unit_width);
assert!((ih % self.unit_height) == 0);
if ih > self.unit_height {
println!("ignoring {} extra frames in '{}'", (ih / self.unit_height) - 1, name);
}
let (uw, uh) = (self.unit_width, self.unit_height);
let (w, h) = self.image.dimensions();
let size = self.completed_tiles_size;
// Expand the image buffer if necessary.
if self.position == 0 && (uw * size >= w || uh * size >= h) {
let old = mem::replace(&mut self.image, ImageBuffer::new(w * 2, h * 2));
for ix in 0 .. w {
for iy in 0 .. h {
*self.image.get_pixel_mut(ix, iy) = old[(ix, iy)];
}
}
/*
let mut dest = SubImage::new(&mut self.image, 0, 0, w, h);
for ((_, _, a), b) in dest.pixels_mut().zip(old.pixels()) {
*a = *b;
}
*/
}
let (x, y) = if self.position < size {
(self.position, size)
} else {
(size, self.position - size)
};
self.position += 1;
if self.position >= size * 2 + 1 {
self.position = 0;
self.completed_tiles_size += 1;
}
{
let (x, y, w, h) = (x * uw, y * uh, uw, uh);
for ix in 0 .. w {
for iy in 0 .. h {
*self.image.get_pixel_mut(ix + x, iy + y) = img[(ix, iy)];
}
}
}
/*
let mut dest = SubImage::new(&mut self.image, x * uw, y * uh, uw, uh);
for ((_, _, a), b) in dest.pixels_mut().zip(img.pixels()) {
*a = *b;
}
*/
*match self.tile_positions.entry(name.to_string()) {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert((x * uw, y * uh))
}
}
/// Finds the minimum alpha value in a given sub texture of the image.
pub fn min_alpha(&mut self, rect: [u32; 4]) -> u8 {
let x = rect[0];
let y = rect[1];
let w = rect[2];
let h = rect[3];
if let Some(&alpha) = self.min_alpha_cache.get(&(x, y, w, h)) {
return alpha
}
let tile = SubImage::new(&mut self.image, x, y, w, h);
let min_alpha = tile.pixels().map(|(_, _, p)| p[3]).min().unwrap_or(0);
self.min_alpha_cache.insert((x, y, w, h), min_alpha);
min_alpha
}
/// Returns the complete texture atlas as a texture.
pub fn complete<R, F>(self, factory: &mut F) -> Texture<R>
where R: gfx::Resources, F: gfx::Factory<R>
{
Texture::from_image(factory, &self.image, false, true)
}
}
Update to latest gfx_texture
[ci skip]
//! Create textures and build texture atlas.
use std::collections::HashMap;
use std::collections::hash_map::Entry::{ Occupied, Vacant };
use std::fmt::{ Debug, Formatter, Error };
use std::path::{ Path, PathBuf };
use std::mem;
use gfx;
use image::{
self,
ImageBuffer,
RgbaImage,
DynamicImage,
ImageResult,
SubImage,
ImageError,
GenericImage,
Pixel
};
pub use gfx_texture::{ Texture, ImageSize, Settings };
// Loads RGBA image from path.
fn load_rgba8(path: &Path) -> ImageResult<RgbaImage> {
image::open(path).map(|img| match img {
DynamicImage::ImageRgba8(img) => img,
img => img.to_rgba()
})
}
/// An enumeration of ColorMap errors.
pub enum ColorMapError {
/// The image opening error.
Img(ImageError),
/// The image size error.
Size(u32, u32, String)
}
impl Debug for ColorMapError {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match *self {
ColorMapError::Img(ref e) => e.fmt(f),
ColorMapError::Size(w, h, ref path) =>
format!("ColorMap expected 256x256, found {}x{} in '{}'", w, h, path).fmt(f)
}
}
}
impl From<ImageError> for ColorMapError {
fn from(img_err: ImageError) -> Self {
ColorMapError::Img(img_err)
}
}
/// A 256x256 image that stores colors.
pub struct ColorMap(RgbaImage);
impl ColorMap {
/// Creates a new `ColorMap` from path.
pub fn from_path<P>(path: P) -> Result<Self, ColorMapError>
where P: AsRef<Path>
{
let img = try!(load_rgba8(path.as_ref()));
match img.dimensions() {
(256, 256) => Ok(ColorMap(img)),
(w, h) => Err(ColorMapError::Size(w, h, path.as_ref().display().to_string()))
}
}
/// Gets RGB color from the color map.
pub fn get(&self, x: f32, y: f32) -> [u8; 3] {
// Clamp to [0.0, 1.0].
let x = x.max(0.0).min(1.0);
let y = y.max(0.0).min(1.0);
// Scale y from [0.0, 1.0] to [0.0, x], forming a triangle.
let y = x * y;
// Origin is in the bottom-right corner.
let x = ((1.0 - x) * 255.0) as u8;
let y = ((1.0 - y) * 255.0) as u8;
let col = self.0.get_pixel(x as u32, y as u32).channels();
[col[0], col[1], col[2]]
}
}
/// Builds an atlas of textures.
pub struct AtlasBuilder {
image: RgbaImage,
// Base path for loading tiles.
path: PathBuf,
// Size of an individual tile.
unit_width: u32,
unit_height: u32,
// Size of the entirely occupied square, in tiles.
completed_tiles_size: u32,
// Position in the current strip.
position: u32,
// Position cache for loaded tiles (in pixels).
tile_positions: HashMap<String, (u32, u32)>,
// Lowest-alpha cache for rectangles in the atlas.
min_alpha_cache: HashMap<(u32, u32, u32, u32), u8>
}
impl AtlasBuilder {
/// Creates a new `AtlasBuilder`.
pub fn new<P>(path: P, unit_width: u32, unit_height: u32) -> Self
where P: Into<PathBuf>
{
AtlasBuilder {
image: ImageBuffer::new(unit_width * 4, unit_height * 4),
path: path.into(),
unit_width: unit_width,
unit_height: unit_height,
completed_tiles_size: 0,
position: 0,
tile_positions: HashMap::new(),
min_alpha_cache: HashMap::new()
}
}
/// Loads a file into the texture atlas.
/// Checks if the file is loaded and returns position within the atlas.
/// The name should be specified without file extension.
/// PNG is the only supported format.
pub fn load(&mut self, name: &str) -> (u32, u32) {
if let Some(&pos) = self.tile_positions.get(name) {
return pos
}
let mut path = self.path.join(name);
path.set_extension("png");
let img = load_rgba8(&path).unwrap();
let (iw, ih) = img.dimensions();
assert!(iw == self.unit_width);
assert!((ih % self.unit_height) == 0);
if ih > self.unit_height {
println!("ignoring {} extra frames in '{}'", (ih / self.unit_height) - 1, name);
}
let (uw, uh) = (self.unit_width, self.unit_height);
let (w, h) = self.image.dimensions();
let size = self.completed_tiles_size;
// Expand the image buffer if necessary.
if self.position == 0 && (uw * size >= w || uh * size >= h) {
let old = mem::replace(&mut self.image, ImageBuffer::new(w * 2, h * 2));
for ix in 0 .. w {
for iy in 0 .. h {
*self.image.get_pixel_mut(ix, iy) = old[(ix, iy)];
}
}
/*
let mut dest = SubImage::new(&mut self.image, 0, 0, w, h);
for ((_, _, a), b) in dest.pixels_mut().zip(old.pixels()) {
*a = *b;
}
*/
}
let (x, y) = if self.position < size {
(self.position, size)
} else {
(size, self.position - size)
};
self.position += 1;
if self.position >= size * 2 + 1 {
self.position = 0;
self.completed_tiles_size += 1;
}
{
let (x, y, w, h) = (x * uw, y * uh, uw, uh);
for ix in 0 .. w {
for iy in 0 .. h {
*self.image.get_pixel_mut(ix + x, iy + y) = img[(ix, iy)];
}
}
}
/*
let mut dest = SubImage::new(&mut self.image, x * uw, y * uh, uw, uh);
for ((_, _, a), b) in dest.pixels_mut().zip(img.pixels()) {
*a = *b;
}
*/
*match self.tile_positions.entry(name.to_string()) {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert((x * uw, y * uh))
}
}
/// Finds the minimum alpha value in a given sub texture of the image.
pub fn min_alpha(&mut self, rect: [u32; 4]) -> u8 {
let x = rect[0];
let y = rect[1];
let w = rect[2];
let h = rect[3];
if let Some(&alpha) = self.min_alpha_cache.get(&(x, y, w, h)) {
return alpha
}
let tile = SubImage::new(&mut self.image, x, y, w, h);
let min_alpha = tile.pixels().map(|(_, _, p)| p[3]).min().unwrap_or(0);
self.min_alpha_cache.insert((x, y, w, h), min_alpha);
min_alpha
}
/// Returns the complete texture atlas as a texture.
pub fn complete<R, F>(self, factory: &mut F) -> Texture<R>
where R: gfx::Resources, F: gfx::Factory<R>
{
Texture::from_image(factory, &self.image, false, false, true)
}
}
|
// TODO: Make TextureBuilder completely ignorant of the Texture struct,
// load Texture2d's instead.
use glium::backend::Facade;
use glium::texture::{Texture2d, RawImage2d};
use std::sync::{Arc, Weak};
use std::collections::HashMap;
use std::path::PathBuf;
use texture_flags::*;
use std::fs::PathExt;
use image;
#[derive(Debug, Clone)]
pub struct Texture {
texture: Arc<Texture2d>,
surface_flags: SurfaceFlags,
}
pub trait CreateTexture {
fn create_texture(self, flags: SurfaceFlags) -> Texture;
}
impl CreateTexture for Arc<Texture2d> {
fn create_texture(self, flags: SurfaceFlags) -> Texture {
Texture {
texture: self,
surface_flags: flags,
}
}
}
impl CreateTexture for Texture2d {
fn create_texture(self, flags: SurfaceFlags) -> Texture {
Arc::new(self).create_texture(flags)
}
}
// TODO: make this take a list of root directories
// -- also, allow sub-builders so that builders
// for different maps can share cache information
// -- use enum with root (Vec<pathbuf>, facade, cache) or
// inherit(texturebuilder, pathbuf)
pub struct TextureBuilder<'a, T: Facade + 'a> {
roots: Arc<Vec<PathBuf>>,
missing: Option<String>,
facade: &'a T,
cache: HashMap<String, Weak<Texture2d>>,
}
impl<'a, T: Facade + 'a> TextureBuilder<'a, T> {
pub fn new<A: Into<PathBuf>, I: IntoIterator<Item=A>>(
a: I, facade: &'a T, ms: Option<String>
) -> TextureBuilder<'a, T> {
TextureBuilder {
roots: Arc::new(
a.into_iter().map(|e| e.into()).collect::<Vec<_>>()
),
missing: ms.clone(),
facade: facade,
cache: HashMap::new()
}
}
fn get_real_path_and_ext(
roots: &[PathBuf],
path: &str
) -> Option<(image::ImageFormat, PathBuf)> {
use image::ImageFormat;
use image::ImageFormat::*;
fn get_extensions(i: &ImageFormat) -> &'static [&'static str] {
static PNG_EXT: [&'static str; 1] = ["png"];
static JPEG_EXT: [&'static str; 2] = ["jpeg", "jpg"];
static GIF_EXT: [&'static str; 1] = ["gif"];
static WEBP_EXT: [&'static str; 1] = ["webp"];
static PPM_EXT: [&'static str; 1] = ["ppm"];
static TIFF_EXT: [&'static str; 1] = ["tiff"];
static TGA_EXT: [&'static str; 1] = ["tga"];
static BMP_EXT: [&'static str; 1] = ["bmp"];
static ICO_EXT: [&'static str; 1] = ["ico"];
match *i {
PNG => &PNG_EXT,
JPEG => &JPEG_EXT,
GIF => &GIF_EXT,
WEBP => &WEBP_EXT,
PPM => &PPM_EXT,
TIFF => &TIFF_EXT,
TGA => &TGA_EXT,
BMP => &BMP_EXT,
ICO => &ICO_EXT,
}
}
for root in roots {
let root: PathBuf = root.join(path);
let file_name: String =
if let Some(Some(f)) = root.file_name().map(|o| o.to_str()) {
f.into()
} else {
return None
};
for ex in [PNG, JPEG, GIF, WEBP, PPM, TIFF, TGA, BMP, ICO].into_iter() {
let extensions = get_extensions(&ex);
for str_ex in extensions {
let out = root.with_file_name(format!("{}.{}", file_name, str_ex));
if out.is_file() { return Some((*ex, out.to_path_buf())); }
}
}
}
None
}
fn find_in_cache(&self, hash: String) -> Option<Arc<Texture2d>> {
self.cache.get(&hash).and_then(|weak| weak.upgrade())
}
pub fn load(
&mut self, path: &str
) -> Option<Arc<Texture2d>> {
self.find_in_cache(path.into()).or_else(||
Self::load_inner(self.roots.clone(), path).and_then(|image|
Texture2d::new(self.facade, image).ok()
.map(|t| {
let out = Arc::new(t);
self.cache.insert(path.into(), Arc::downgrade(&out));
out
})
)
)
}
pub fn load_async(
&mut self, many: Vec<String>
) -> Vec<Option<Arc<Texture2d>>> {
use eventual::*;
use itertools::*;
let cached = many.iter()
.enumerate()
.map(|(i, path)|
(i, path.clone(), self.find_in_cache(path.clone()))
)
.collect::<Vec<_>>();
let promises =
cached.iter()
.cloned()
.filter_map(|(n, path, opt)|
if opt.is_none() {
let rclone = self.roots.clone();
let msclone = self.missing.clone();
Some(Future::spawn(move || {
let load = Self::load_inner(rclone.clone(), &path)
.or_else(||
msclone.and_then(|inner|
Self::load_inner(rclone, &inner)
)
);
(
n,
path,
load,
)
}))
} else {
None
}
)
.collect::<Vec<_>>();
let textures = join(
promises
).await()
.unwrap()
.into_iter()
.map(|(n, path, raw)|
(
n,
raw.and_then(|image|
Texture2d::new(self.facade, image).ok()
.map(|t| {
let out = Arc::new(t);
self.cache.insert(path.into(), Arc::downgrade(&out));
out
})
),
)
)
.collect::<Vec<_>>();
cached.into_iter()
.filter_map(|(n, _, maybe_tex)|
maybe_tex.map(|t| (n, Some(t)))
)
.chain(textures.into_iter())
.sorted_by(|a, b| a.0.cmp(&b.0))
.into_iter()
.zip(0..)
.map(|((n, t), i)| {
debug_assert!(n==i);
t
})
.collect::<Vec<_>>()
}
fn load_inner<'b: 'a>(
roots: Arc<Vec<PathBuf>>, path: &str
) -> Option<RawImage2d<'b, u8>> {
use std::io::BufReader;
use std::fs::File;
let (ext, real_path) =
if let Some(tup) = Self::get_real_path_and_ext(&*roots, path) {
tup
} else {
return None
};
let f = if let Ok(a) = File::open(&real_path) {
a
} else {
return None
};
let reader = BufReader::new(f);
let raw = if let Ok(a) = image::load(
reader,
ext
) {
a.to_rgba()
} else {
return None
};
let image_dimensions = raw.dimensions();
Some(
RawImage2d::from_raw_rgba_reversed(
raw.into_raw(), image_dimensions
)
)
}
}
Add texturebuilder inheritence
// TODO: Make TextureBuilder completely ignorant of the Texture struct,
// load Texture2d's instead.
use glium::backend::Facade;
use glium::texture::{Texture2d, RawImage2d};
use std::sync::{Arc, Weak, RwLock};
use std::collections::HashMap;
use std::path::PathBuf;
use texture_flags::*;
use std::fs::PathExt;
use image;
#[derive(Debug, Clone)]
pub struct Texture {
texture: Arc<Texture2d>,
surface_flags: SurfaceFlags,
}
pub trait CreateTexture {
fn create_texture(self, flags: SurfaceFlags) -> Texture;
}
impl CreateTexture for Arc<Texture2d> {
fn create_texture(self, flags: SurfaceFlags) -> Texture {
Texture {
texture: self,
surface_flags: flags,
}
}
}
impl CreateTexture for Texture2d {
fn create_texture(self, flags: SurfaceFlags) -> Texture {
Arc::new(self).create_texture(flags)
}
}
// TODO: make this take a list of root directories
// -- also, allow sub-builders so that builders
// for different maps can share cache information
// -- use enum with root (Vec<pathbuf>, facade, cache) or
// inherit(texturebuilder, pathbuf)
pub struct TextureBuilder<'a, T: Facade + 'a> {
roots: Arc<Vec<PathBuf>>,
missing: Option<String>,
facade: &'a T,
cache: Arc<RwLock<HashMap<String, Weak<Texture2d>>>>,
}
impl<'a, T: Facade + 'a> TextureBuilder<'a, T> {
pub fn new<A: Into<PathBuf>, I: IntoIterator<Item=A>>(
a: I, facade: &'a T, ms: Option<String>
) -> TextureBuilder<'a, T> {
TextureBuilder {
roots: Arc::new(
a.into_iter().map(|e| e.into()).collect::<Vec<_>>()
),
missing: ms,
facade: facade,
cache: Arc::new(RwLock::new(HashMap::new())),
}
}
pub fn inherit<A: Into<PathBuf>, I: IntoIterator<Item=A>>(
parent: &TextureBuilder<'a, T>, a: I
) -> TextureBuilder<'a, T> {
TextureBuilder {
roots:
Arc::new(
parent.roots.iter()
.cloned()
.chain(a.into_iter().map(|e| e.into()))
.collect()
),
missing: parent.missing.clone(),
facade: parent.facade,
cache: parent.cache.clone(),
}
}
fn get_real_path_and_ext(
roots: &[PathBuf],
path: &str
) -> Option<(image::ImageFormat, PathBuf)> {
use image::ImageFormat;
use image::ImageFormat::*;
fn get_extensions(i: &ImageFormat) -> &'static [&'static str] {
static PNG_EXT: [&'static str; 1] = ["png"];
static JPEG_EXT: [&'static str; 2] = ["jpeg", "jpg"];
static GIF_EXT: [&'static str; 1] = ["gif"];
static WEBP_EXT: [&'static str; 1] = ["webp"];
static PPM_EXT: [&'static str; 1] = ["ppm"];
static TIFF_EXT: [&'static str; 1] = ["tiff"];
static TGA_EXT: [&'static str; 1] = ["tga"];
static BMP_EXT: [&'static str; 1] = ["bmp"];
static ICO_EXT: [&'static str; 1] = ["ico"];
match *i {
PNG => &PNG_EXT,
JPEG => &JPEG_EXT,
GIF => &GIF_EXT,
WEBP => &WEBP_EXT,
PPM => &PPM_EXT,
TIFF => &TIFF_EXT,
TGA => &TGA_EXT,
BMP => &BMP_EXT,
ICO => &ICO_EXT,
}
}
for root in roots {
let root: PathBuf = root.join(path);
let file_name: String =
if let Some(Some(f)) = root.file_name().map(|o| o.to_str()) {
f.into()
} else {
return None
};
for ex in [PNG, JPEG, GIF, WEBP, PPM, TIFF, TGA, BMP, ICO].into_iter() {
let extensions = get_extensions(&ex);
for str_ex in extensions {
let out = root.with_file_name(format!("{}.{}", file_name, str_ex));
if out.is_file() { return Some((*ex, out.to_path_buf())); }
}
}
}
None
}
pub fn load(
&mut self, path: &str
) -> Option<Arc<Texture2d>> {
self.cache.read().unwrap()
.get(path)
.and_then(|weak| weak.upgrade())
.or_else(||
Self::load_inner(self.roots.clone(), path).and_then(|image|
Texture2d::new(self.facade, image).ok()
.map(|t| {
let out = Arc::new(t);
self.cache.write().unwrap()
.insert(path.into(), Arc::downgrade(&out));
out
})
)
)
}
pub fn load_async(
&mut self, many: Vec<String>
) -> Vec<Option<Arc<Texture2d>>> {
use eventual::*;
use itertools::*;
let cached;
{
let cache = self.cache.read().unwrap();
cached = many.iter()
.enumerate()
.map(|(i, path)|
(
i,
path.clone(),
cache.get(path).and_then(|weak| weak.upgrade())
)
)
.collect::<Vec<_>>();
}
let mut cache = self.cache.write().unwrap();
let promises =
cached.iter()
.cloned()
.filter_map(|(n, path, opt)|
if opt.is_none() {
let rclone = self.roots.clone();
let msclone = self.missing.clone();
Some(Future::spawn(move || {
let load = Self::load_inner(rclone.clone(), &path)
.or_else(||
msclone.and_then(|inner|
Self::load_inner(rclone, &inner)
)
);
(
n,
path,
load,
)
}))
} else {
None
}
)
.collect::<Vec<_>>();
let textures = join(
promises
).await()
.unwrap()
.into_iter()
.map(|(n, path, raw)|
(
n,
raw.and_then(|image|
Texture2d::new(self.facade, image).ok()
.map(|t| {
let out = Arc::new(t);
cache.insert(path.into(), Arc::downgrade(&out));
out
})
),
)
)
.collect::<Vec<_>>();
cached.into_iter()
.filter_map(|(n, _, maybe_tex)|
maybe_tex.map(|t| (n, Some(t)))
)
.chain(textures.into_iter())
.sorted_by(|a, b| a.0.cmp(&b.0))
.into_iter()
.zip(0..)
.map(|((n, t), i)| {
debug_assert!(n==i);
t
})
.collect::<Vec<_>>()
}
fn load_inner<'b: 'a>(
roots: Arc<Vec<PathBuf>>, path: &str
) -> Option<RawImage2d<'b, u8>> {
use std::io::BufReader;
use std::fs::File;
let (ext, real_path) =
if let Some(tup) = Self::get_real_path_and_ext(&*roots, path) {
tup
} else {
return None
};
let f = if let Ok(a) = File::open(&real_path) {
a
} else {
return None
};
let reader = BufReader::new(f);
let raw = if let Ok(a) = image::load(
reader,
ext
) {
a.to_rgba()
} else {
return None
};
let image_dimensions = raw.dimensions();
Some(
RawImage2d::from_raw_rgba_reversed(
raw.into_raw(), image_dimensions
)
)
}
}
|
use gfx;
use image;
use image::{
DynamicImage,
GenericImage,
RgbaImage,
};
use texture_lib::ImageSize;
/// Represents a texture.
pub struct Texture<R: gfx::Resources> {
/// A handle to the Gfx texture.
pub handle: gfx::TextureHandle<R>,
}
impl<R: gfx::Resources> Texture<R> {
/// Creates a texture from path.
pub fn from_path<D: gfx::Factory<R>>(
device: &mut D,
path: &Path
) -> Result<Self, String> {
let img = match image::open(path) {
Ok(img) => img,
Err(e) => return Err(format!("Could not load '{:?}': {:?}",
path.filename_str().unwrap(), e)),
};
let img = match img {
DynamicImage::ImageRgba8(img) => img,
x => x.to_rgba()
};
let (width, height) = img.dimensions();
let texture_info = gfx::tex::TextureInfo {
width: width as u16,
height: height as u16,
depth: 1,
levels: 1,
kind: gfx::tex::TextureKind::Texture2D,
format: gfx::tex::RGBA8,
};
let image_info = texture_info.to_image_info();
let texture = device.create_texture(texture_info).ok().unwrap();
device.update_texture(&texture, &image_info,
img.as_slice())
.ok().unwrap();
Ok(Texture {
handle: texture
})
}
/// Creates a texture from image.
pub fn from_image<D: gfx::Factory<R>>(
device: &mut D,
image: &RgbaImage
) -> Self {
let (width, height) = image.dimensions();
let texture_info = gfx::tex::TextureInfo {
width: width as u16,
height: height as u16,
depth: 1,
levels: 1,
kind: gfx::tex::TextureKind::Texture2D,
format: gfx::tex::RGBA8,
};
let image_info = texture_info.to_image_info();
let texture = device.create_texture(texture_info).ok().unwrap();
device.update_texture(&texture, &image_info,
image.as_slice())
.ok().unwrap();
Texture {
handle: texture
}
}
/// Creates a texture from image and generates mipmap.
pub fn from_image_with_mipmap<D: gfx::Factory<R>>(
device: &mut D,
image: &RgbaImage
) -> Self {
let texture = Texture::from_image(device, image);
device.generate_mipmap(&texture.handle);
texture
}
/// Creates texture from memory alpha.
pub fn from_memory_alpha<D: gfx::Factory<R>>(
device: &mut D,
buffer: &[u8],
width: u32,
height: u32,
) -> Self {
use std::cmp::max;
let width = max(width, 1);
let height = max(height, 1);
let mut pixels = Vec::new();
for alpha in buffer.iter() {
pixels.push(255);
pixels.push(255);
pixels.push(255);
pixels.push(*alpha);
}
let texture_info = gfx::tex::TextureInfo {
width: width as u16,
height: height as u16,
depth: 1,
levels: 1,
kind: gfx::tex::TextureKind::Texture2D,
format: gfx::tex::RGBA8,
};
let image_info = texture_info.to_image_info();
let texture = device.create_texture(texture_info).ok().unwrap();
device.update_texture(&texture, &image_info,
&pixels)
.ok().unwrap();
Texture {
handle: texture
}
}
/// Updates the texture with an image.
pub fn update<D: gfx::Factory<R>>(&mut self, device: &mut D, image: &RgbaImage) {
device.update_texture(&self.handle,
&self.handle.get_info().to_image_info(),
image.as_slice()
).ok().unwrap();
}
}
impl<R: gfx::Resources> ImageSize for Texture<R> {
#[inline(always)]
fn get_size(&self) -> (u32, u32) {
let info = self.handle.get_info();
(info.width as u32, info.height as u32)
}
}
Fix old_path import
use std::old_path::*;
use gfx;
use image;
use image::{
DynamicImage,
GenericImage,
RgbaImage,
};
use texture_lib::ImageSize;
/// Represents a texture.
pub struct Texture<R: gfx::Resources> {
/// A handle to the Gfx texture.
pub handle: gfx::TextureHandle<R>,
}
impl<R: gfx::Resources> Texture<R> {
/// Creates a texture from path.
pub fn from_path<D: gfx::Factory<R>>(
device: &mut D,
path: &Path
) -> Result<Self, String> {
let img = match image::open(path) {
Ok(img) => img,
Err(e) => return Err(format!("Could not load '{:?}': {:?}",
path.filename_str().unwrap(), e)),
};
let img = match img {
DynamicImage::ImageRgba8(img) => img,
x => x.to_rgba()
};
let (width, height) = img.dimensions();
let texture_info = gfx::tex::TextureInfo {
width: width as u16,
height: height as u16,
depth: 1,
levels: 1,
kind: gfx::tex::TextureKind::Texture2D,
format: gfx::tex::RGBA8,
};
let image_info = texture_info.to_image_info();
let texture = device.create_texture(texture_info).ok().unwrap();
device.update_texture(&texture, &image_info,
img.as_slice())
.ok().unwrap();
Ok(Texture {
handle: texture
})
}
/// Creates a texture from image.
pub fn from_image<D: gfx::Factory<R>>(
device: &mut D,
image: &RgbaImage
) -> Self {
let (width, height) = image.dimensions();
let texture_info = gfx::tex::TextureInfo {
width: width as u16,
height: height as u16,
depth: 1,
levels: 1,
kind: gfx::tex::TextureKind::Texture2D,
format: gfx::tex::RGBA8,
};
let image_info = texture_info.to_image_info();
let texture = device.create_texture(texture_info).ok().unwrap();
device.update_texture(&texture, &image_info,
image.as_slice())
.ok().unwrap();
Texture {
handle: texture
}
}
/// Creates a texture from image and generates mipmap.
pub fn from_image_with_mipmap<D: gfx::Factory<R>>(
device: &mut D,
image: &RgbaImage
) -> Self {
let texture = Texture::from_image(device, image);
device.generate_mipmap(&texture.handle);
texture
}
/// Creates texture from memory alpha.
pub fn from_memory_alpha<D: gfx::Factory<R>>(
device: &mut D,
buffer: &[u8],
width: u32,
height: u32,
) -> Self {
use std::cmp::max;
let width = max(width, 1);
let height = max(height, 1);
let mut pixels = Vec::new();
for alpha in buffer.iter() {
pixels.push(255);
pixels.push(255);
pixels.push(255);
pixels.push(*alpha);
}
let texture_info = gfx::tex::TextureInfo {
width: width as u16,
height: height as u16,
depth: 1,
levels: 1,
kind: gfx::tex::TextureKind::Texture2D,
format: gfx::tex::RGBA8,
};
let image_info = texture_info.to_image_info();
let texture = device.create_texture(texture_info).ok().unwrap();
device.update_texture(&texture, &image_info,
&pixels)
.ok().unwrap();
Texture {
handle: texture
}
}
/// Updates the texture with an image.
pub fn update<D: gfx::Factory<R>>(&mut self, device: &mut D, image: &RgbaImage) {
device.update_texture(&self.handle,
&self.handle.get_info().to_image_info(),
image.as_slice()
).ok().unwrap();
}
}
impl<R: gfx::Resources> ImageSize for Texture<R> {
#[inline(always)]
fn get_size(&self) -> (u32, u32) {
let info = self.handle.get_info();
(info.width as u32, info.height as u32)
}
}
|
use crate::arena::*;
use crate::atom_table::*;
use crate::parser::ast::*;
use crate::parser::char_reader::*;
use crate::read::*;
use crate::machine::heap::*;
use crate::machine::machine_errors::*;
use crate::machine::machine_indices::*;
use crate::machine::machine_state::*;
use crate::types::*;
pub use modular_bitfield::prelude::*;
use std::cmp::Ordering;
use std::error::Error;
use std::fmt;
use std::fs::{File, OpenOptions};
use std::hash::{Hash};
use std::io;
use std::io::{Cursor, ErrorKind, Read, Seek, SeekFrom, Write};
use std::mem;
use std::net::{TcpStream, Shutdown};
use std::ops::{Deref, DerefMut};
use std::ptr;
use native_tls::TlsStream;
#[derive(Debug, BitfieldSpecifier, Clone, Copy, PartialEq, Eq, Hash)]
#[bits = 1]
pub enum StreamType {
Binary,
Text,
}
impl StreamType {
#[inline]
pub(crate) fn as_atom(&self) -> Atom {
match self {
StreamType::Binary => atom!("binary_stream"),
StreamType::Text => atom!("text_stream"),
}
}
#[inline]
pub(crate) fn as_property_atom(&self) -> Atom {
match self {
StreamType::Binary => atom!("binary"),
StreamType::Text => atom!("text"),
}
}
#[inline]
pub(crate) fn other(self) -> StreamType {
match self {
StreamType::Binary => StreamType::Text,
StreamType::Text => StreamType::Binary,
}
}
}
#[derive(Debug, BitfieldSpecifier, Clone, Copy, PartialEq, Eq, Hash)]
#[bits = 2]
pub enum EOFAction {
EOFCode,
Error,
Reset,
}
#[derive(Debug, BitfieldSpecifier, Copy, Clone, PartialEq)]
#[bits = 2]
pub(crate) enum AtEndOfStream {
Not,
At,
Past,
}
impl AtEndOfStream {
#[inline]
pub(crate) fn as_atom(&self) -> Atom {
match self {
AtEndOfStream::Not => atom!("not"),
AtEndOfStream::Past => atom!("past"),
AtEndOfStream::At => atom!("at"),
}
}
}
impl EOFAction {
#[inline]
pub(crate) fn as_atom(&self) -> Atom {
match self {
EOFAction::EOFCode => atom!("eof_code"),
EOFAction::Error => atom!("error"),
EOFAction::Reset => atom!("reset"),
}
}
}
#[derive(Debug)]
pub struct ByteStream(Cursor<Vec<u8>>);
impl Read for ByteStream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.0.read(buf)
}
}
impl Write for ByteStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.0.write(buf)
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
self.0.flush()
}
}
#[derive(Debug)]
pub struct InputFileStream {
file_name: Atom,
file: File,
}
impl Read for InputFileStream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.file.read(buf)
}
}
#[derive(Debug)]
pub struct OutputFileStream {
file_name: Atom,
file: File,
is_append: bool,
}
impl Write for OutputFileStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.file.write(buf)
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
self.file.flush()
}
}
#[derive(Debug)]
pub struct StaticStringStream {
stream: Cursor<&'static str>,
}
impl Read for StaticStringStream {
#[inline(always)]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.stream.read(buf)
}
}
impl CharRead for StaticStringStream {
#[inline(always)]
fn peek_char(&mut self) -> Option<std::io::Result<char>> {
let pos = self.stream.position() as usize;
self.stream.get_ref()[pos ..].chars().next().map(Ok)
}
#[inline(always)]
fn consume(&mut self, nread: usize) {
self.stream.seek(SeekFrom::Current(nread as i64)).unwrap();
}
#[inline(always)]
fn put_back_char(&mut self, c: char) {
self.stream.seek(SeekFrom::Current(- (c.len_utf8() as i64))).unwrap();
}
}
#[derive(Debug)]
pub struct NamedTcpStream {
address: Atom,
tcp_stream: TcpStream,
}
impl Read for NamedTcpStream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.tcp_stream.read(buf)
}
}
impl Write for NamedTcpStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.tcp_stream.write(buf)
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
self.tcp_stream.flush()
}
}
#[derive(Debug)]
pub struct NamedTlsStream {
address: Atom,
tls_stream: TlsStream<Stream>,
}
impl Read for NamedTlsStream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.tls_stream.read(buf)
}
}
impl Write for NamedTlsStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.tls_stream.write(buf)
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
self.tls_stream.flush()
}
}
/*
#[derive(Debug)]
pub struct NullStream {}
*/
#[derive(Debug)]
pub struct StandardOutputStream {}
impl Write for StandardOutputStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
io::stdout().write(buf)
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
io::stdout().flush()
}
}
#[derive(Debug)]
pub struct StandardErrorStream {}
impl Write for StandardErrorStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
io::stderr().write(buf)
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
io::stderr().flush()
}
}
#[bitfield]
#[repr(u64)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct StreamOptions {
pub stream_type: StreamType,
pub reposition: bool,
pub eof_action: EOFAction,
pub has_alias: bool,
pub alias: B59,
}
impl StreamOptions {
#[inline]
pub fn get_alias(self) -> Option<Atom> {
if self.has_alias() {
Some(Atom::from((self.alias() << 3) as usize))
} else {
None
}
}
#[inline]
pub fn set_alias_to_atom_opt(&mut self, alias: Option<Atom>) {
self.set_has_alias(alias.is_some());
if let Some(alias) = alias {
self.set_alias(alias.flat_index());
}
}
}
impl Default for StreamOptions {
#[inline]
fn default() -> Self {
StreamOptions::new()
.with_stream_type(StreamType::Text)
.with_reposition(false)
.with_eof_action(EOFAction::EOFCode)
.with_has_alias(false)
.with_alias(0)
}
}
#[derive(Debug, Copy, Clone)]
pub struct StreamLayout<T> {
pub options: StreamOptions,
pub lines_read: usize,
past_end_of_stream: bool,
stream: T,
}
impl<T> StreamLayout<T> {
#[inline]
pub fn new(stream: T) -> Self {
Self {
options: StreamOptions::default(),
lines_read: 0,
past_end_of_stream: false,
stream,
}
}
}
impl<T> Deref for StreamLayout<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.stream
}
}
impl<T> DerefMut for StreamLayout<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.stream
}
}
macro_rules! arena_allocated_impl_for_stream {
($stream_type:ty, $stream_tag:ident) => {
impl ArenaAllocated for StreamLayout<$stream_type> {
type PtrToAllocated = TypedArenaPtr<StreamLayout<$stream_type>>;
#[inline]
fn tag() -> ArenaHeaderTag {
ArenaHeaderTag::$stream_tag
}
#[inline]
fn size(&self) -> usize {
mem::size_of::<StreamLayout<$stream_type>>()
}
#[inline]
fn copy_to_arena(self, dst: *mut Self) -> Self::PtrToAllocated {
unsafe {
ptr::write(dst, self);
TypedArenaPtr::new(dst as *mut Self)
}
}
}
};
}
/*
pub mod testing {
use super::PausedPrologStream;
impl PausedPrologStream {
#[allow(dead_code)]
pub fn write_test_input(&mut self, string: &str) {
self.bytes.extend(string.as_bytes().iter().rev());
}
}
}
*/
/*
impl ArenaAllocated for PausedPrologStream {
type PtrToAllocated = TypedArenaPtr<PausedPrologStream>;
#[inline]
fn tag() -> ArenaHeaderTag {
ArenaHeaderTag::PausedPrologStream
}
#[inline]
fn size(&self) -> usize {
mem::size_of::<PausedPrologStream>()
}
#[inline]
fn copy_to_arena(self, dst: *mut Self) -> Self::PtrToAllocated {
unsafe {
ptr::write(dst, self);
TypedArenaPtr::new(dst as *mut Self)
}
}
}
#[derive(Debug)]
pub struct PausedPrologStream {
bytes: Vec<u8>,
paused_stream: Stream,
}
impl PausedPrologStream {
#[inline]
pub fn new() -> Self {
PausedPrologStream {
bytes: vec![],
paused_stream: Stream::Null(StreamOptions::default()),
}
}
}
impl Read for PausedPrologStream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.paused_stream.read(buf)
}
}
*/
arena_allocated_impl_for_stream!(CharReader<ByteStream>, ByteStream);
arena_allocated_impl_for_stream!(CharReader<InputFileStream>, InputFileStream);
arena_allocated_impl_for_stream!(OutputFileStream, OutputFileStream);
arena_allocated_impl_for_stream!(CharReader<NamedTcpStream>, NamedTcpStream);
arena_allocated_impl_for_stream!(CharReader<NamedTlsStream>, NamedTlsStream);
arena_allocated_impl_for_stream!(ReadlineStream, ReadlineStream);
arena_allocated_impl_for_stream!(StaticStringStream, StaticStringStream);
arena_allocated_impl_for_stream!(StandardOutputStream, StandardOutputStream);
arena_allocated_impl_for_stream!(StandardErrorStream, StandardErrorStream);
#[derive(Debug, Copy, Clone)]
pub enum Stream {
Byte(TypedArenaPtr<StreamLayout<CharReader<ByteStream>>>),
InputFile(TypedArenaPtr<StreamLayout<CharReader<InputFileStream>>>),
OutputFile(TypedArenaPtr<StreamLayout<OutputFileStream>>),
StaticString(TypedArenaPtr<StreamLayout<StaticStringStream>>),
NamedTcp(TypedArenaPtr<StreamLayout<CharReader<NamedTcpStream>>>),
NamedTls(TypedArenaPtr<StreamLayout<CharReader<NamedTlsStream>>>),
Null(StreamOptions),
Readline(TypedArenaPtr<StreamLayout<ReadlineStream>>),
StandardOutput(TypedArenaPtr<StreamLayout<StandardOutputStream>>),
StandardError(TypedArenaPtr<StreamLayout<StandardErrorStream>>),
}
impl From<TypedArenaPtr<StreamLayout<ReadlineStream>>> for Stream {
#[inline]
fn from(stream: TypedArenaPtr<StreamLayout<ReadlineStream>>) -> Stream {
Stream::Readline(stream)
}
}
impl Stream {
#[inline]
pub fn from_readline_stream(stream: ReadlineStream, arena: &mut Arena) -> Stream {
Stream::Readline(arena_alloc!(StreamLayout::new(stream), arena))
}
#[inline]
pub fn from_owned_string(string: String, arena: &mut Arena) -> Stream {
Stream::Byte(arena_alloc!(
StreamLayout::new(CharReader::new(ByteStream(Cursor::new(string.into_bytes())))),
arena
))
}
#[inline]
pub fn from_static_string(src: &'static str, arena: &mut Arena) -> Stream {
Stream::StaticString(arena_alloc!(
StreamLayout::new(StaticStringStream {
stream: Cursor::new(src)
}),
arena
))
}
#[inline]
pub fn stdin(arena: &mut Arena) -> Stream {
Stream::Readline(arena_alloc!(
StreamLayout::new(ReadlineStream::new("")),
arena
))
}
pub fn from_tag(tag: ArenaHeaderTag, ptr: *const u8) -> Self {
match tag {
ArenaHeaderTag::ByteStream => Stream::Byte(TypedArenaPtr::new(ptr as *mut _)),
ArenaHeaderTag::InputFileStream => Stream::InputFile(TypedArenaPtr::new(ptr as *mut _)),
ArenaHeaderTag::OutputFileStream => {
Stream::OutputFile(TypedArenaPtr::new(ptr as *mut _))
}
ArenaHeaderTag::NamedTcpStream => Stream::NamedTcp(TypedArenaPtr::new(ptr as *mut _)),
ArenaHeaderTag::NamedTlsStream => Stream::NamedTls(TypedArenaPtr::new(ptr as *mut _)),
ArenaHeaderTag::ReadlineStream => Stream::Readline(TypedArenaPtr::new(ptr as *mut _)),
ArenaHeaderTag::StaticStringStream => {
Stream::StaticString(TypedArenaPtr::new(ptr as *mut _))
}
ArenaHeaderTag::StandardOutputStream => {
Stream::StandardOutput(TypedArenaPtr::new(ptr as *mut _))
}
ArenaHeaderTag::StandardErrorStream => {
Stream::StandardError(TypedArenaPtr::new(ptr as *mut _))
}
ArenaHeaderTag::NullStream => Stream::Null(StreamOptions::default()),
_ => unreachable!(),
}
}
#[inline]
pub fn is_stderr(&self) -> bool {
if let Stream::StandardError(_) = self {
true
} else {
false
}
}
#[inline]
pub fn is_stdout(&self) -> bool {
if let Stream::StandardOutput(_) = self {
true
} else {
false
}
}
#[inline]
pub fn is_stdin(&self) -> bool {
if let Stream::Readline(_) = self {
true
} else {
false
}
}
pub fn as_ptr(&self) -> *const ArenaHeader {
match self {
Stream::Byte(ptr) => ptr.header_ptr(),
Stream::InputFile(ptr) => ptr.header_ptr(),
Stream::OutputFile(ptr) => ptr.header_ptr(),
Stream::StaticString(ptr) => ptr.header_ptr(),
Stream::NamedTcp(ptr) => ptr.header_ptr(),
Stream::NamedTls(ptr) => ptr.header_ptr(),
Stream::Null(_) => ptr::null(),
Stream::Readline(ptr) => ptr.header_ptr(),
Stream::StandardOutput(ptr) => ptr.header_ptr(),
Stream::StandardError(ptr) => ptr.header_ptr(),
}
}
pub fn options(&self) -> &StreamOptions {
match self {
Stream::Byte(ref ptr) => &ptr.options,
Stream::InputFile(ref ptr) => &ptr.options,
Stream::OutputFile(ref ptr) => &ptr.options,
Stream::StaticString(ref ptr) => &ptr.options,
Stream::NamedTcp(ref ptr) => &ptr.options,
Stream::NamedTls(ref ptr) => &ptr.options,
Stream::Null(ref options) => options,
Stream::Readline(ref ptr) => &ptr.options,
Stream::StandardOutput(ref ptr) => &ptr.options,
Stream::StandardError(ref ptr) => &ptr.options,
}
}
pub fn options_mut(&mut self) -> &mut StreamOptions {
match self {
Stream::Byte(ref mut ptr) => &mut ptr.options,
Stream::InputFile(ref mut ptr) => &mut ptr.options,
Stream::OutputFile(ref mut ptr) => &mut ptr.options,
Stream::StaticString(ref mut ptr) => &mut ptr.options,
Stream::NamedTcp(ref mut ptr) => &mut ptr.options,
Stream::NamedTls(ref mut ptr) => &mut ptr.options,
Stream::Null(ref mut options) => options,
Stream::Readline(ref mut ptr) => &mut ptr.options,
Stream::StandardOutput(ref mut ptr) => &mut ptr.options,
Stream::StandardError(ref mut ptr) => &mut ptr.options,
}
}
/*
fn unpause_stream(&mut self) {
let stream_inst = match self {
Stream::PausedProlog(paused) if paused.bytes.is_empty() => {
mem::replace(&mut paused.paused_stream, Stream::Null(StreamOptions::default()))
}
_ => {
return;
}
};
*self = stream_inst;
}
*/
#[inline]
pub(crate) fn add_lines_read(&mut self, incr_num_lines_read: usize) {
match self {
Stream::Byte(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::InputFile(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::OutputFile(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::StaticString(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::NamedTcp(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::NamedTls(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::Null(_) => {}
Stream::Readline(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::StandardOutput(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::StandardError(ptr) => ptr.lines_read += incr_num_lines_read,
}
}
#[inline]
pub(crate) fn set_lines_read(&mut self, value: usize) {
match self {
Stream::Byte(ptr) => ptr.lines_read = value,
Stream::InputFile(ptr) => ptr.lines_read = value,
Stream::OutputFile(ptr) => ptr.lines_read = value,
Stream::StaticString(ptr) => ptr.lines_read = value,
Stream::NamedTcp(ptr) => ptr.lines_read = value,
Stream::NamedTls(ptr) => ptr.lines_read = value,
Stream::Null(_) => {}
Stream::Readline(ptr) => ptr.lines_read = value,
Stream::StandardOutput(ptr) => ptr.lines_read = value,
Stream::StandardError(ptr) => ptr.lines_read = value,
}
}
#[inline]
pub(crate) fn lines_read(&self) -> usize {
match self {
Stream::Byte(ptr) => ptr.lines_read,
Stream::InputFile(ptr) => ptr.lines_read,
Stream::OutputFile(ptr) => ptr.lines_read,
Stream::StaticString(ptr) => ptr.lines_read,
Stream::NamedTcp(ptr) => ptr.lines_read,
Stream::NamedTls(ptr) => ptr.lines_read,
Stream::Null(_) => 0,
Stream::Readline(ptr) => ptr.lines_read,
Stream::StandardOutput(ptr) => ptr.lines_read,
Stream::StandardError(ptr) => ptr.lines_read,
}
}
}
impl CharRead for Stream {
fn peek_char(&mut self) -> Option<std::io::Result<char>> {
match self {
Stream::InputFile(file) => (*file).peek_char(),
Stream::NamedTcp(tcp_stream) => (*tcp_stream).peek_char(),
Stream::NamedTls(tls_stream) => (*tls_stream).peek_char(),
Stream::Readline(rl_stream) => (*rl_stream).peek_char(),
Stream::StaticString(src) => (*src).peek_char(),
Stream::Byte(cursor) => (*cursor).peek_char(),
Stream::OutputFile(_) |
Stream::StandardError(_) |
Stream::StandardOutput(_) |
Stream::Null(_) => Some(Err(std::io::Error::new(
ErrorKind::PermissionDenied,
StreamError::ReadFromOutputStream,
))),
}
}
fn read_char(&mut self) -> Option<std::io::Result<char>> {
match self {
Stream::InputFile(file) => (*file).read_char(),
Stream::NamedTcp(tcp_stream) => (*tcp_stream).read_char(),
Stream::NamedTls(tls_stream) => (*tls_stream).read_char(),
Stream::Readline(rl_stream) => (*rl_stream).read_char(),
Stream::StaticString(src) => (*src).read_char(),
Stream::Byte(cursor) => (*cursor).read_char(),
Stream::OutputFile(_) |
Stream::StandardError(_) |
Stream::StandardOutput(_) |
Stream::Null(_) => Some(Err(std::io::Error::new(
ErrorKind::PermissionDenied,
StreamError::ReadFromOutputStream,
))),
}
}
fn put_back_char(&mut self, c: char) {
match self {
Stream::InputFile(file) => file.put_back_char(c),
Stream::NamedTcp(tcp_stream) => tcp_stream.put_back_char(c),
Stream::NamedTls(tls_stream) => tls_stream.put_back_char(c),
Stream::Readline(rl_stream) => rl_stream.put_back_char(c),
Stream::StaticString(src) => src.put_back_char(c),
Stream::Byte(cursor) => cursor.put_back_char(c),
Stream::OutputFile(_) |
Stream::StandardError(_) |
Stream::StandardOutput(_) |
Stream::Null(_) => {}
}
}
fn consume(&mut self, nread: usize) {
match self {
Stream::InputFile(ref mut file) => file.consume(nread),
Stream::NamedTcp(ref mut tcp_stream) => tcp_stream.consume(nread),
Stream::NamedTls(ref mut tls_stream) => tls_stream.consume(nread),
Stream::Readline(ref mut rl_stream) => rl_stream.consume(nread),
Stream::StaticString(ref mut src) => src.consume(nread),
Stream::Byte(ref mut cursor) => cursor.consume(nread),
Stream::OutputFile(_) |
Stream::StandardError(_) |
Stream::StandardOutput(_) |
Stream::Null(_) => {}
}
}
}
impl Read for Stream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let bytes_read = match self {
Stream::InputFile(file) => (*file).read(buf),
Stream::NamedTcp(tcp_stream) => (*tcp_stream).read(buf),
Stream::NamedTls(tls_stream) => (*tls_stream).read(buf),
Stream::Readline(rl_stream) => (*rl_stream).read(buf),
Stream::StaticString(src) => (*src).read(buf),
Stream::Byte(cursor) => (*cursor).read(buf),
Stream::OutputFile(_)
| Stream::StandardError(_)
| Stream::StandardOutput(_)
| Stream::Null(_) => Err(std::io::Error::new(
ErrorKind::PermissionDenied,
StreamError::ReadFromOutputStream,
)),
};
bytes_read
}
}
impl Write for Stream {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
match self {
Stream::OutputFile(ref mut file) => file.write(buf),
Stream::NamedTcp(ref mut tcp_stream) => tcp_stream.get_mut().write(buf),
Stream::NamedTls(ref mut tls_stream) => tls_stream.get_mut().write(buf),
Stream::Byte(ref mut cursor) => cursor.get_mut().write(buf),
Stream::StandardOutput(stream) => stream.write(buf),
Stream::StandardError(stream) => stream.write(buf),
Stream::StaticString(_) |
Stream::Readline(_) |
Stream::InputFile(..) |
Stream::Null(_) => Err(std::io::Error::new(
ErrorKind::PermissionDenied,
StreamError::WriteToInputStream,
)),
}
}
fn flush(&mut self) -> std::io::Result<()> {
match self {
Stream::OutputFile(ref mut file) => file.stream.flush(),
Stream::NamedTcp(ref mut tcp_stream) => tcp_stream.stream.get_mut().flush(),
Stream::NamedTls(ref mut tls_stream) => tls_stream.stream.get_mut().flush(),
Stream::Byte(ref mut cursor) => cursor.stream.get_mut().flush(),
Stream::StandardError(stream) => stream.stream.flush(),
Stream::StandardOutput(stream) => stream.stream.flush(),
Stream::StaticString(_) |
Stream::Readline(_) |
Stream::InputFile(_) |
Stream::Null(_) => Err(std::io::Error::new(
ErrorKind::PermissionDenied,
StreamError::FlushToInputStream,
)),
}
}
}
#[derive(Debug)]
enum StreamError {
PeekByteFailed,
PeekByteFromNonPeekableStream,
#[allow(unused)] PeekCharFailed,
#[allow(unused)] PeekCharFromNonPeekableStream,
ReadFromOutputStream,
WriteToInputStream,
FlushToInputStream,
}
impl fmt::Display for StreamError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
StreamError::PeekByteFailed => {
write!(f, "peek byte failed!")
}
StreamError::PeekByteFromNonPeekableStream => {
write!(f, "attempted to peek byte from a non-peekable input stream")
}
StreamError::PeekCharFailed => {
write!(f, "peek char failed!")
}
StreamError::PeekCharFromNonPeekableStream => {
write!(f, "attempted to peek char from a non-peekable input stream")
}
StreamError::ReadFromOutputStream => {
write!(f, "attempted to read from a write-only stream")
}
StreamError::WriteToInputStream => {
write!(f, "attempted to write to a read-only stream")
}
StreamError::FlushToInputStream => {
write!(f, "attempted to flush a read-only stream")
}
}
}
}
impl Error for StreamError {}
impl PartialOrd for Stream {
#[inline]
fn partial_cmp(&self, other: &Stream) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Stream {
#[inline]
fn cmp(&self, other: &Stream) -> Ordering {
self.as_ptr().cmp(&other.as_ptr())
}
}
impl PartialEq for Stream {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.as_ptr() == other.as_ptr()
}
}
impl Eq for Stream {}
impl Stream {
#[inline]
pub(crate) fn position(&mut self) -> Option<(u64, usize)> {
// returns lines_read, position.
let result = match self {
Stream::InputFile(ref mut file_stream) => {
file_stream.get_mut().file.seek(SeekFrom::Current(0)).ok()
}
Stream::NamedTcp(..)
| Stream::NamedTls(..)
| Stream::Readline(..)
| Stream::StaticString(..)
| Stream::Byte(..) => Some(0),
_ => None,
};
result.map(|position| (position, self.lines_read()))
}
#[inline]
pub(crate) fn set_position(&mut self, position: u64) {
match self {
Stream::InputFile(stream_layout) => {
let StreamLayout {
past_end_of_stream,
stream,
..
} = &mut **stream_layout;
stream.get_mut().file.seek(SeekFrom::Start(position)).unwrap();
if let Ok(metadata) = stream.get_ref().file.metadata() {
*past_end_of_stream = position > metadata.len();
}
}
_ => {}
}
}
#[inline]
pub(crate) fn past_end_of_stream(&self) -> bool {
match self {
Stream::Byte(stream) => stream.past_end_of_stream,
Stream::InputFile(stream) => stream.past_end_of_stream,
Stream::OutputFile(stream) => stream.past_end_of_stream,
// Stream::PausedProlog(stream) => stream.paused_stream.past_end_of_stream(),
Stream::StaticString(stream) => stream.past_end_of_stream,
Stream::NamedTcp(stream) => stream.past_end_of_stream,
Stream::NamedTls(stream) => stream.past_end_of_stream,
Stream::Null(_) => false,
Stream::Readline(stream) => stream.past_end_of_stream,
Stream::StandardOutput(stream) => stream.past_end_of_stream,
Stream::StandardError(stream) => stream.past_end_of_stream,
}
}
#[inline]
pub(crate) fn at_end_of_stream(&mut self) -> bool {
self.position_relative_to_end() == AtEndOfStream::At
}
#[inline]
pub(crate) fn set_past_end_of_stream(&mut self, value: bool) {
match self {
Stream::Byte(stream) => stream.past_end_of_stream = value,
Stream::InputFile(stream) => stream.past_end_of_stream = value,
Stream::OutputFile(stream) => stream.past_end_of_stream = value,
Stream::StaticString(stream) => stream.past_end_of_stream = value,
Stream::NamedTcp(stream) => stream.past_end_of_stream = value,
Stream::NamedTls(stream) => stream.past_end_of_stream = value,
Stream::Null(_) => {}
Stream::Readline(stream) => stream.past_end_of_stream = value,
Stream::StandardOutput(stream) => stream.past_end_of_stream = value,
Stream::StandardError(stream) => stream.past_end_of_stream = value,
}
}
#[inline]
pub(crate) fn position_relative_to_end(&mut self) -> AtEndOfStream {
if self.past_end_of_stream() {
return AtEndOfStream::Past;
}
if let Stream::InputFile(stream_layout) = self {
let StreamLayout {
past_end_of_stream,
stream,
..
} = &mut **stream_layout;
match stream.get_ref().file.metadata() {
Ok(metadata) => {
if let Ok(position) = stream.get_mut().file.seek(SeekFrom::Current(0)) {
return match position.cmp(&metadata.len()) {
Ordering::Equal => AtEndOfStream::At,
Ordering::Less => AtEndOfStream::Not,
Ordering::Greater => {
*past_end_of_stream = true;
AtEndOfStream::Past
}
};
} else {
*past_end_of_stream = true;
AtEndOfStream::Past
}
}
_ => {
*past_end_of_stream = true;
AtEndOfStream::Past
}
}
} else {
AtEndOfStream::Not
}
}
#[inline]
pub(crate) fn file_name(&self) -> Option<Atom> {
match self {
Stream::InputFile(file) => Some(file.stream.get_ref().file_name),
Stream::OutputFile(file) => Some(file.stream.file_name),
Stream::NamedTcp(tcp) => Some(tcp.stream.get_ref().address),
Stream::NamedTls(tls) => Some(tls.stream.get_ref().address),
_ => None,
}
}
#[inline]
pub(crate) fn mode(&self) -> Atom {
match self {
Stream::Byte(_)
| Stream::Readline(_)
| Stream::StaticString(_)
| Stream::InputFile(..) => atom!("read"),
Stream::NamedTcp(..) | Stream::NamedTls(..) => atom!("read_append"),
Stream::OutputFile(file) if file.is_append => atom!("append"),
Stream::OutputFile(_) | Stream::StandardError(_) | Stream::StandardOutput(_) => atom!("write"),
Stream::Null(_) => atom!(""),
}
}
#[inline]
pub fn stdout(arena: &mut Arena) -> Self {
Stream::StandardOutput(arena_alloc!(
StreamLayout::new(StandardOutputStream {}),
arena
))
}
#[inline]
pub fn stderr(arena: &mut Arena) -> Self {
Stream::StandardError(arena_alloc!(
StreamLayout::new(StandardErrorStream {}),
arena
))
}
#[inline]
pub(crate) fn from_tcp_stream(
address: Atom,
tcp_stream: TcpStream,
arena: &mut Arena,
) -> Self {
tcp_stream.set_read_timeout(None).unwrap();
tcp_stream.set_write_timeout(None).unwrap();
Stream::NamedTcp(arena_alloc!(
StreamLayout::new(CharReader::new(NamedTcpStream {
address,
tcp_stream
})),
arena
))
}
#[inline]
pub(crate) fn from_tls_stream(
address: Atom,
tls_stream: TlsStream<Stream>,
arena: &mut Arena,
) -> Self {
Stream::NamedTls(arena_alloc!(
StreamLayout::new(CharReader::new(NamedTlsStream {
address,
tls_stream
})),
arena
))
}
#[inline]
pub(crate) fn from_file_as_output(
file_name: Atom,
file: File,
is_append: bool,
arena: &mut Arena,
) -> Self {
Stream::OutputFile(arena_alloc!(
StreamLayout::new(OutputFileStream {
file_name,
file,
is_append
}),
arena
))
}
#[inline]
pub(crate) fn from_file_as_input(file_name: Atom, file: File, arena: &mut Arena) -> Self {
Stream::InputFile(arena_alloc!(
StreamLayout::new(CharReader::new(InputFileStream { file_name, file })),
arena
))
}
#[inline]
pub(crate) fn close(&mut self) -> Result<(), std::io::Error> {
let result = match self {
Stream::NamedTcp(ref mut tcp_stream) => {
tcp_stream.inner_mut().tcp_stream.shutdown(Shutdown::Both)
},
Stream::NamedTls(ref mut tls_stream) => {
tls_stream.inner_mut().tls_stream.shutdown()
}
_ => Ok(())
};
*self = Stream::Null(StreamOptions::default());
result
}
#[inline]
pub(crate) fn is_null_stream(&self) -> bool {
if let Stream::Null(_) = self {
true
} else {
false
}
}
#[inline]
pub(crate) fn is_input_stream(&self) -> bool {
match self {
Stream::NamedTcp(..)
| Stream::NamedTls(..)
| Stream::Byte(_)
| Stream::Readline(_)
| Stream::StaticString(_)
| Stream::InputFile(..) => true,
_ => false,
}
}
#[inline]
pub(crate) fn is_output_stream(&self) -> bool {
match self {
Stream::StandardError(_)
| Stream::StandardOutput(_)
| Stream::NamedTcp(..)
| Stream::NamedTls(..)
| Stream::Byte(_)
| Stream::OutputFile(..) => true,
_ => false,
}
}
// returns true on success.
#[inline]
pub(super) fn reset(&mut self) -> bool {
self.set_lines_read(0);
self.set_past_end_of_stream(false);
loop {
match self {
Stream::Byte(ref mut cursor) => {
cursor.stream.get_mut().0.set_position(0);
return true;
}
Stream::InputFile(ref mut file_stream) => {
file_stream.stream.get_mut().file.seek(SeekFrom::Start(0)).unwrap();
return true;
}
Stream::Readline(_) => {
return true;
}
_ => {
return false;
}
}
}
}
#[inline]
pub(crate) fn peek_byte(&mut self) -> std::io::Result<u8> {
match self {
Stream::Byte(ref mut cursor) => {
let mut b = [0u8; 1];
let pos = cursor.stream.get_mut().0.position();
match cursor.read(&mut b)? {
1 => {
cursor.stream.get_mut().0.set_position(pos);
Ok(b[0])
}
_ => Err(std::io::Error::new(ErrorKind::UnexpectedEof, "end of file")),
}
}
Stream::InputFile(ref mut file) => {
let mut b = [0u8; 1];
match file.read(&mut b)? {
1 => {
file.stream.get_mut().file.seek(SeekFrom::Current(-1))?;
Ok(b[0])
}
_ => Err(std::io::Error::new(
ErrorKind::UnexpectedEof,
StreamError::PeekByteFailed,
)),
}
}
Stream::Readline(ref mut stream) => stream.stream.peek_byte(),
Stream::NamedTcp(ref mut stream) => {
let mut b = [0u8; 1];
stream.stream.get_mut().tcp_stream.peek(&mut b)?;
Ok(b[0])
}
_ => Err(std::io::Error::new(
ErrorKind::PermissionDenied,
StreamError::PeekByteFromNonPeekableStream,
)),
}
}
}
impl MachineState {
#[inline]
pub(crate) fn eof_action(
&mut self,
result: HeapCellValue,
mut stream: Stream,
caller: Atom,
arity: usize,
) -> CallResult {
let eof_action = stream.options().eof_action();
match eof_action {
EOFAction::Error => {
stream.set_past_end_of_stream(true);
return Err(self.open_past_eos_error(stream, caller, arity));
}
EOFAction::EOFCode => {
let end_of_stream = if stream.options().stream_type() == StreamType::Binary {
fixnum_as_cell!(Fixnum::build_with(-1))
} else {
atom_as_cell!(atom!("end_of_file"))
};
stream.set_past_end_of_stream(true);
Ok(unify!(self, result, end_of_stream))
}
EOFAction::Reset => {
if !stream.reset() {
stream.set_past_end_of_stream(true);
}
Ok(self.fail = stream.past_end_of_stream())
}
}
}
pub(crate) fn to_stream_options(
&mut self,
alias: HeapCellValue,
eof_action: HeapCellValue,
reposition: HeapCellValue,
stream_type: HeapCellValue,
) -> StreamOptions {
let alias = read_heap_cell!(self.store(MachineState::deref(self, alias)),
(HeapCellValueTag::Atom, (name, arity)) => {
debug_assert_eq!(arity, 0);
Some(name)
}
_ => {
None
}
);
let eof_action = read_heap_cell!(self.store(MachineState::deref(self, eof_action)),
(HeapCellValueTag::Atom, (name, arity)) => {
debug_assert_eq!(arity, 0);
match name {
atom!("eof_code") => EOFAction::EOFCode,
atom!("error") => EOFAction::Error,
atom!("reset") => EOFAction::Reset,
_ => unreachable!(),
}
}
_ => {
unreachable!()
}
);
let reposition = read_heap_cell!(self.store(MachineState::deref(self, reposition)),
(HeapCellValueTag::Atom, (name, arity)) => {
debug_assert_eq!(arity, 0);
name == atom!("true")
}
_ => {
unreachable!()
}
);
let stream_type = read_heap_cell!(self.store(MachineState::deref(self, stream_type)),
(HeapCellValueTag::Atom, (name, arity)) => {
debug_assert_eq!(arity, 0);
match name {
atom!("text") => StreamType::Text,
atom!("binary") => StreamType::Binary,
_ => unreachable!(),
}
}
_ => {
unreachable!()
}
);
let mut options = StreamOptions::default();
options.set_stream_type(stream_type);
options.set_reposition(reposition);
options.set_alias_to_atom_opt(alias);
options.set_eof_action(eof_action);
options
}
pub(crate) fn get_stream_or_alias(
&mut self,
addr: HeapCellValue,
stream_aliases: &StreamAliasDir,
caller: Atom,
arity: usize,
) -> Result<Stream, MachineStub> {
let addr = self.store(MachineState::deref(self, addr));
read_heap_cell!(addr,
(HeapCellValueTag::Atom, (name, arity)) => {
debug_assert_eq!(arity, 0);
return match stream_aliases.get(&name) {
Some(stream) if !stream.is_null_stream() => Ok(*stream),
_ => {
let stub = functor_stub(caller, arity);
let addr = atom_as_cell!(name);
let existence_error = self.existence_error(ExistenceError::Stream(addr));
Err(self.error_form(existence_error, stub))
}
};
}
(HeapCellValueTag::Cons, ptr) => {
match_untyped_arena_ptr!(ptr,
(ArenaHeaderTag::Stream, stream) => {
return if stream.is_null_stream() {
Err(self.open_permission_error(stream_as_cell!(stream), caller, arity))
} else {
Ok(stream)
};
}
_ => {
}
);
}
_ => {
}
);
let stub = functor_stub(caller, arity);
if addr.is_var() {
let instantiation_error = self.instantiation_error();
Err(self.error_form(instantiation_error, stub))
} else {
let domain_error = self.domain_error(DomainErrorType::StreamOrAlias, addr);
Err(self.error_form(domain_error, stub))
}
}
pub(crate) fn open_parsing_stream(
&mut self,
mut stream: Stream,
stub_name: Atom,
stub_arity: usize,
) -> Result<Stream, MachineStub> {
match stream.peek_char() {
None => Ok(stream), // empty stream is handled gracefully by Lexer::eof
Some(Err(e)) => {
let err = self.session_error(SessionError::from(e));
let stub = functor_stub(stub_name, stub_arity);
Err(self.error_form(err, stub))
}
Some(Ok(c)) => {
if c == '\u{feff}' {
// skip UTF-8 BOM
stream.consume(c.len_utf8());
}
Ok(stream)
}
}
}
pub(crate) fn stream_permission_error(
&mut self,
perm: Permission,
err_atom: Atom,
stream: Stream,
caller: Atom,
arity: usize,
) -> MachineStub {
let stub = functor_stub(caller, arity);
let err = self.permission_error(perm, err_atom, stream_as_cell!(stream));
return self.error_form(err, stub);
}
#[inline]
pub(crate) fn open_past_eos_error(
&mut self,
stream: Stream,
caller: Atom,
arity: usize,
) -> MachineStub {
self.stream_permission_error(
Permission::InputStream,
atom!("past_end_of_stream"),
stream,
caller,
arity,
)
}
pub(crate) fn open_permission_error<T: PermissionError>(
&mut self,
culprit: T,
stub_name: Atom,
stub_arity: usize,
) -> MachineStub {
let stub = functor_stub(stub_name, stub_arity);
let err = self.permission_error(Permission::Open, atom!("source_sink"), culprit);
return self.error_form(err, stub);
}
pub(crate) fn occupied_alias_permission_error(
&mut self,
alias: Atom,
stub_name: Atom,
stub_arity: usize,
) -> MachineStub {
let stub = functor_stub(stub_name, stub_arity);
let alias_name = atom!("alias");
let err = self.permission_error(
Permission::Open,
atom!("source_sink"),
functor!(alias_name, [atom(alias)]),
);
return self.error_form(err, stub);
}
pub(crate) fn reposition_error(&mut self, stub_name: Atom, stub_arity: usize) -> MachineStub {
let stub = functor_stub(stub_name, stub_arity);
let rep_stub = functor!(atom!("reposition"), [atom(atom!("true"))]);
let err = self.permission_error(Permission::Open, atom!("source_sink"), rep_stub);
return self.error_form(err, stub);
}
pub(crate) fn check_stream_properties(
&mut self,
stream: Stream,
expected_type: StreamType,
input: Option<HeapCellValue>,
caller: Atom,
arity: usize,
) -> CallResult {
let opt_err = if input.is_some() && !stream.is_input_stream() {
Some(atom!("stream")) // 8.14.2.3 g)
} else if input.is_none() && !stream.is_output_stream() {
Some(atom!("stream")) // 8.14.2.3 g)
} else if stream.options().stream_type() != expected_type {
Some(expected_type.other().as_atom()) // 8.14.2.3 h)
} else {
None
};
let permission = if input.is_some() {
Permission::InputStream
} else {
Permission::OutputStream
};
if let Some(err_atom) = opt_err {
return Err(self.stream_permission_error(permission, err_atom, stream, caller, arity));
}
if let Some(input) = input {
if stream.past_end_of_stream() {
self.eof_action(input, stream, caller, arity)?;
}
}
Ok(())
}
pub(crate) fn stream_from_file_spec(
&mut self,
file_spec: Atom,
indices: &mut IndexStore,
options: &StreamOptions,
) -> Result<Stream, MachineStub> {
if file_spec == atom!("") {
let stub = functor_stub(atom!("open"), 4);
let err = self.domain_error(DomainErrorType::SourceSink, self[temp_v!(1)]);
return Err(self.error_form(err, stub));
}
// 8.11.5.3l)
if let Some(alias) = options.get_alias() {
if indices.stream_aliases.contains_key(&alias) {
return Err(self.occupied_alias_permission_error(alias, atom!("open"), 4));
}
}
let mode = MachineState::deref(self, self[temp_v!(2)]);
let mode = cell_as_atom!(self.store(mode));
let mut open_options = OpenOptions::new();
let (is_input_file, in_append_mode) = match mode {
atom!("read") => {
open_options.read(true).write(false).create(false);
(true, false)
}
atom!("write") => {
open_options
.read(false)
.write(true)
.truncate(true)
.create(true);
(false, false)
}
atom!("append") => {
open_options
.read(false)
.write(true)
.create(true)
.append(true);
(false, true)
}
_ => {
let stub = functor_stub(atom!("open"), 4);
let err = self.domain_error(DomainErrorType::IOMode, self[temp_v!(2)]);
// 8.11.5.3h)
return Err(self.error_form(err, stub));
}
};
let file = match open_options.open(file_spec.as_str()) {
Ok(file) => file,
Err(err) => {
match err.kind() {
ErrorKind::NotFound => {
// 8.11.5.3j)
let stub = functor_stub(atom!("open"), 4);
let err = self.existence_error(
ExistenceError::SourceSink(self[temp_v!(1)]),
);
return Err(self.error_form(err, stub));
}
ErrorKind::PermissionDenied => {
// 8.11.5.3k)
return Err(self.open_permission_error(self[temp_v!(1)], atom!("open"), 4));
}
_ => {
let stub = functor_stub(atom!("open"), 4);
let err = self.syntax_error(ParserError::IO(err));
return Err(self.error_form(err, stub));
}
}
}
};
Ok(if is_input_file {
Stream::from_file_as_input(file_spec, file, &mut self.arena)
} else {
Stream::from_file_as_output(file_spec, file, in_append_mode, &mut self.arena)
})
}
}
keep cursor position after writing to byte stream
use crate::arena::*;
use crate::atom_table::*;
use crate::parser::ast::*;
use crate::parser::char_reader::*;
use crate::read::*;
use crate::machine::heap::*;
use crate::machine::machine_errors::*;
use crate::machine::machine_indices::*;
use crate::machine::machine_state::*;
use crate::types::*;
pub use modular_bitfield::prelude::*;
use std::cmp::Ordering;
use std::error::Error;
use std::fmt;
use std::fs::{File, OpenOptions};
use std::hash::{Hash};
use std::io;
use std::io::{Cursor, ErrorKind, Read, Seek, SeekFrom, Write};
use std::mem;
use std::net::{TcpStream, Shutdown};
use std::ops::{Deref, DerefMut};
use std::ptr;
use native_tls::TlsStream;
#[derive(Debug, BitfieldSpecifier, Clone, Copy, PartialEq, Eq, Hash)]
#[bits = 1]
pub enum StreamType {
Binary,
Text,
}
impl StreamType {
#[inline]
pub(crate) fn as_atom(&self) -> Atom {
match self {
StreamType::Binary => atom!("binary_stream"),
StreamType::Text => atom!("text_stream"),
}
}
#[inline]
pub(crate) fn as_property_atom(&self) -> Atom {
match self {
StreamType::Binary => atom!("binary"),
StreamType::Text => atom!("text"),
}
}
#[inline]
pub(crate) fn other(self) -> StreamType {
match self {
StreamType::Binary => StreamType::Text,
StreamType::Text => StreamType::Binary,
}
}
}
#[derive(Debug, BitfieldSpecifier, Clone, Copy, PartialEq, Eq, Hash)]
#[bits = 2]
pub enum EOFAction {
EOFCode,
Error,
Reset,
}
#[derive(Debug, BitfieldSpecifier, Copy, Clone, PartialEq)]
#[bits = 2]
pub(crate) enum AtEndOfStream {
Not,
At,
Past,
}
impl AtEndOfStream {
#[inline]
pub(crate) fn as_atom(&self) -> Atom {
match self {
AtEndOfStream::Not => atom!("not"),
AtEndOfStream::Past => atom!("past"),
AtEndOfStream::At => atom!("at"),
}
}
}
impl EOFAction {
#[inline]
pub(crate) fn as_atom(&self) -> Atom {
match self {
EOFAction::EOFCode => atom!("eof_code"),
EOFAction::Error => atom!("error"),
EOFAction::Reset => atom!("reset"),
}
}
}
#[derive(Debug)]
pub struct ByteStream(Cursor<Vec<u8>>);
impl Read for ByteStream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.0.read(buf)
}
}
impl Write for ByteStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let pos = self.0.position();
self.0.seek(SeekFrom::End(0))?;
let result = self.0.write(buf);
self.0.seek(SeekFrom::Start(pos))?;
result
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
self.0.flush()
}
}
#[derive(Debug)]
pub struct InputFileStream {
file_name: Atom,
file: File,
}
impl Read for InputFileStream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.file.read(buf)
}
}
#[derive(Debug)]
pub struct OutputFileStream {
file_name: Atom,
file: File,
is_append: bool,
}
impl Write for OutputFileStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.file.write(buf)
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
self.file.flush()
}
}
#[derive(Debug)]
pub struct StaticStringStream {
stream: Cursor<&'static str>,
}
impl Read for StaticStringStream {
#[inline(always)]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.stream.read(buf)
}
}
impl CharRead for StaticStringStream {
#[inline(always)]
fn peek_char(&mut self) -> Option<std::io::Result<char>> {
let pos = self.stream.position() as usize;
self.stream.get_ref()[pos ..].chars().next().map(Ok)
}
#[inline(always)]
fn consume(&mut self, nread: usize) {
self.stream.seek(SeekFrom::Current(nread as i64)).unwrap();
}
#[inline(always)]
fn put_back_char(&mut self, c: char) {
self.stream.seek(SeekFrom::Current(- (c.len_utf8() as i64))).unwrap();
}
}
#[derive(Debug)]
pub struct NamedTcpStream {
address: Atom,
tcp_stream: TcpStream,
}
impl Read for NamedTcpStream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.tcp_stream.read(buf)
}
}
impl Write for NamedTcpStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.tcp_stream.write(buf)
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
self.tcp_stream.flush()
}
}
#[derive(Debug)]
pub struct NamedTlsStream {
address: Atom,
tls_stream: TlsStream<Stream>,
}
impl Read for NamedTlsStream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.tls_stream.read(buf)
}
}
impl Write for NamedTlsStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.tls_stream.write(buf)
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
self.tls_stream.flush()
}
}
/*
#[derive(Debug)]
pub struct NullStream {}
*/
#[derive(Debug)]
pub struct StandardOutputStream {}
impl Write for StandardOutputStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
io::stdout().write(buf)
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
io::stdout().flush()
}
}
#[derive(Debug)]
pub struct StandardErrorStream {}
impl Write for StandardErrorStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
io::stderr().write(buf)
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
io::stderr().flush()
}
}
#[bitfield]
#[repr(u64)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct StreamOptions {
pub stream_type: StreamType,
pub reposition: bool,
pub eof_action: EOFAction,
pub has_alias: bool,
pub alias: B59,
}
impl StreamOptions {
#[inline]
pub fn get_alias(self) -> Option<Atom> {
if self.has_alias() {
Some(Atom::from((self.alias() << 3) as usize))
} else {
None
}
}
#[inline]
pub fn set_alias_to_atom_opt(&mut self, alias: Option<Atom>) {
self.set_has_alias(alias.is_some());
if let Some(alias) = alias {
self.set_alias(alias.flat_index());
}
}
}
impl Default for StreamOptions {
#[inline]
fn default() -> Self {
StreamOptions::new()
.with_stream_type(StreamType::Text)
.with_reposition(false)
.with_eof_action(EOFAction::EOFCode)
.with_has_alias(false)
.with_alias(0)
}
}
#[derive(Debug, Copy, Clone)]
pub struct StreamLayout<T> {
pub options: StreamOptions,
pub lines_read: usize,
past_end_of_stream: bool,
stream: T,
}
impl<T> StreamLayout<T> {
#[inline]
pub fn new(stream: T) -> Self {
Self {
options: StreamOptions::default(),
lines_read: 0,
past_end_of_stream: false,
stream,
}
}
}
impl<T> Deref for StreamLayout<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.stream
}
}
impl<T> DerefMut for StreamLayout<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.stream
}
}
macro_rules! arena_allocated_impl_for_stream {
($stream_type:ty, $stream_tag:ident) => {
impl ArenaAllocated for StreamLayout<$stream_type> {
type PtrToAllocated = TypedArenaPtr<StreamLayout<$stream_type>>;
#[inline]
fn tag() -> ArenaHeaderTag {
ArenaHeaderTag::$stream_tag
}
#[inline]
fn size(&self) -> usize {
mem::size_of::<StreamLayout<$stream_type>>()
}
#[inline]
fn copy_to_arena(self, dst: *mut Self) -> Self::PtrToAllocated {
unsafe {
ptr::write(dst, self);
TypedArenaPtr::new(dst as *mut Self)
}
}
}
};
}
/*
pub mod testing {
use super::PausedPrologStream;
impl PausedPrologStream {
#[allow(dead_code)]
pub fn write_test_input(&mut self, string: &str) {
self.bytes.extend(string.as_bytes().iter().rev());
}
}
}
*/
/*
impl ArenaAllocated for PausedPrologStream {
type PtrToAllocated = TypedArenaPtr<PausedPrologStream>;
#[inline]
fn tag() -> ArenaHeaderTag {
ArenaHeaderTag::PausedPrologStream
}
#[inline]
fn size(&self) -> usize {
mem::size_of::<PausedPrologStream>()
}
#[inline]
fn copy_to_arena(self, dst: *mut Self) -> Self::PtrToAllocated {
unsafe {
ptr::write(dst, self);
TypedArenaPtr::new(dst as *mut Self)
}
}
}
#[derive(Debug)]
pub struct PausedPrologStream {
bytes: Vec<u8>,
paused_stream: Stream,
}
impl PausedPrologStream {
#[inline]
pub fn new() -> Self {
PausedPrologStream {
bytes: vec![],
paused_stream: Stream::Null(StreamOptions::default()),
}
}
}
impl Read for PausedPrologStream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.paused_stream.read(buf)
}
}
*/
arena_allocated_impl_for_stream!(CharReader<ByteStream>, ByteStream);
arena_allocated_impl_for_stream!(CharReader<InputFileStream>, InputFileStream);
arena_allocated_impl_for_stream!(OutputFileStream, OutputFileStream);
arena_allocated_impl_for_stream!(CharReader<NamedTcpStream>, NamedTcpStream);
arena_allocated_impl_for_stream!(CharReader<NamedTlsStream>, NamedTlsStream);
arena_allocated_impl_for_stream!(ReadlineStream, ReadlineStream);
arena_allocated_impl_for_stream!(StaticStringStream, StaticStringStream);
arena_allocated_impl_for_stream!(StandardOutputStream, StandardOutputStream);
arena_allocated_impl_for_stream!(StandardErrorStream, StandardErrorStream);
#[derive(Debug, Copy, Clone)]
pub enum Stream {
Byte(TypedArenaPtr<StreamLayout<CharReader<ByteStream>>>),
InputFile(TypedArenaPtr<StreamLayout<CharReader<InputFileStream>>>),
OutputFile(TypedArenaPtr<StreamLayout<OutputFileStream>>),
StaticString(TypedArenaPtr<StreamLayout<StaticStringStream>>),
NamedTcp(TypedArenaPtr<StreamLayout<CharReader<NamedTcpStream>>>),
NamedTls(TypedArenaPtr<StreamLayout<CharReader<NamedTlsStream>>>),
Null(StreamOptions),
Readline(TypedArenaPtr<StreamLayout<ReadlineStream>>),
StandardOutput(TypedArenaPtr<StreamLayout<StandardOutputStream>>),
StandardError(TypedArenaPtr<StreamLayout<StandardErrorStream>>),
}
impl From<TypedArenaPtr<StreamLayout<ReadlineStream>>> for Stream {
#[inline]
fn from(stream: TypedArenaPtr<StreamLayout<ReadlineStream>>) -> Stream {
Stream::Readline(stream)
}
}
impl Stream {
#[inline]
pub fn from_readline_stream(stream: ReadlineStream, arena: &mut Arena) -> Stream {
Stream::Readline(arena_alloc!(StreamLayout::new(stream), arena))
}
#[inline]
pub fn from_owned_string(string: String, arena: &mut Arena) -> Stream {
Stream::Byte(arena_alloc!(
StreamLayout::new(CharReader::new(ByteStream(Cursor::new(string.into_bytes())))),
arena
))
}
#[inline]
pub fn from_static_string(src: &'static str, arena: &mut Arena) -> Stream {
Stream::StaticString(arena_alloc!(
StreamLayout::new(StaticStringStream {
stream: Cursor::new(src)
}),
arena
))
}
#[inline]
pub fn stdin(arena: &mut Arena) -> Stream {
Stream::Readline(arena_alloc!(
StreamLayout::new(ReadlineStream::new("")),
arena
))
}
pub fn from_tag(tag: ArenaHeaderTag, ptr: *const u8) -> Self {
match tag {
ArenaHeaderTag::ByteStream => Stream::Byte(TypedArenaPtr::new(ptr as *mut _)),
ArenaHeaderTag::InputFileStream => Stream::InputFile(TypedArenaPtr::new(ptr as *mut _)),
ArenaHeaderTag::OutputFileStream => {
Stream::OutputFile(TypedArenaPtr::new(ptr as *mut _))
}
ArenaHeaderTag::NamedTcpStream => Stream::NamedTcp(TypedArenaPtr::new(ptr as *mut _)),
ArenaHeaderTag::NamedTlsStream => Stream::NamedTls(TypedArenaPtr::new(ptr as *mut _)),
ArenaHeaderTag::ReadlineStream => Stream::Readline(TypedArenaPtr::new(ptr as *mut _)),
ArenaHeaderTag::StaticStringStream => {
Stream::StaticString(TypedArenaPtr::new(ptr as *mut _))
}
ArenaHeaderTag::StandardOutputStream => {
Stream::StandardOutput(TypedArenaPtr::new(ptr as *mut _))
}
ArenaHeaderTag::StandardErrorStream => {
Stream::StandardError(TypedArenaPtr::new(ptr as *mut _))
}
ArenaHeaderTag::NullStream => Stream::Null(StreamOptions::default()),
_ => unreachable!(),
}
}
#[inline]
pub fn is_stderr(&self) -> bool {
if let Stream::StandardError(_) = self {
true
} else {
false
}
}
#[inline]
pub fn is_stdout(&self) -> bool {
if let Stream::StandardOutput(_) = self {
true
} else {
false
}
}
#[inline]
pub fn is_stdin(&self) -> bool {
if let Stream::Readline(_) = self {
true
} else {
false
}
}
pub fn as_ptr(&self) -> *const ArenaHeader {
match self {
Stream::Byte(ptr) => ptr.header_ptr(),
Stream::InputFile(ptr) => ptr.header_ptr(),
Stream::OutputFile(ptr) => ptr.header_ptr(),
Stream::StaticString(ptr) => ptr.header_ptr(),
Stream::NamedTcp(ptr) => ptr.header_ptr(),
Stream::NamedTls(ptr) => ptr.header_ptr(),
Stream::Null(_) => ptr::null(),
Stream::Readline(ptr) => ptr.header_ptr(),
Stream::StandardOutput(ptr) => ptr.header_ptr(),
Stream::StandardError(ptr) => ptr.header_ptr(),
}
}
pub fn options(&self) -> &StreamOptions {
match self {
Stream::Byte(ref ptr) => &ptr.options,
Stream::InputFile(ref ptr) => &ptr.options,
Stream::OutputFile(ref ptr) => &ptr.options,
Stream::StaticString(ref ptr) => &ptr.options,
Stream::NamedTcp(ref ptr) => &ptr.options,
Stream::NamedTls(ref ptr) => &ptr.options,
Stream::Null(ref options) => options,
Stream::Readline(ref ptr) => &ptr.options,
Stream::StandardOutput(ref ptr) => &ptr.options,
Stream::StandardError(ref ptr) => &ptr.options,
}
}
pub fn options_mut(&mut self) -> &mut StreamOptions {
match self {
Stream::Byte(ref mut ptr) => &mut ptr.options,
Stream::InputFile(ref mut ptr) => &mut ptr.options,
Stream::OutputFile(ref mut ptr) => &mut ptr.options,
Stream::StaticString(ref mut ptr) => &mut ptr.options,
Stream::NamedTcp(ref mut ptr) => &mut ptr.options,
Stream::NamedTls(ref mut ptr) => &mut ptr.options,
Stream::Null(ref mut options) => options,
Stream::Readline(ref mut ptr) => &mut ptr.options,
Stream::StandardOutput(ref mut ptr) => &mut ptr.options,
Stream::StandardError(ref mut ptr) => &mut ptr.options,
}
}
/*
fn unpause_stream(&mut self) {
let stream_inst = match self {
Stream::PausedProlog(paused) if paused.bytes.is_empty() => {
mem::replace(&mut paused.paused_stream, Stream::Null(StreamOptions::default()))
}
_ => {
return;
}
};
*self = stream_inst;
}
*/
#[inline]
pub(crate) fn add_lines_read(&mut self, incr_num_lines_read: usize) {
match self {
Stream::Byte(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::InputFile(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::OutputFile(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::StaticString(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::NamedTcp(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::NamedTls(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::Null(_) => {}
Stream::Readline(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::StandardOutput(ptr) => ptr.lines_read += incr_num_lines_read,
Stream::StandardError(ptr) => ptr.lines_read += incr_num_lines_read,
}
}
#[inline]
pub(crate) fn set_lines_read(&mut self, value: usize) {
match self {
Stream::Byte(ptr) => ptr.lines_read = value,
Stream::InputFile(ptr) => ptr.lines_read = value,
Stream::OutputFile(ptr) => ptr.lines_read = value,
Stream::StaticString(ptr) => ptr.lines_read = value,
Stream::NamedTcp(ptr) => ptr.lines_read = value,
Stream::NamedTls(ptr) => ptr.lines_read = value,
Stream::Null(_) => {}
Stream::Readline(ptr) => ptr.lines_read = value,
Stream::StandardOutput(ptr) => ptr.lines_read = value,
Stream::StandardError(ptr) => ptr.lines_read = value,
}
}
#[inline]
pub(crate) fn lines_read(&self) -> usize {
match self {
Stream::Byte(ptr) => ptr.lines_read,
Stream::InputFile(ptr) => ptr.lines_read,
Stream::OutputFile(ptr) => ptr.lines_read,
Stream::StaticString(ptr) => ptr.lines_read,
Stream::NamedTcp(ptr) => ptr.lines_read,
Stream::NamedTls(ptr) => ptr.lines_read,
Stream::Null(_) => 0,
Stream::Readline(ptr) => ptr.lines_read,
Stream::StandardOutput(ptr) => ptr.lines_read,
Stream::StandardError(ptr) => ptr.lines_read,
}
}
}
impl CharRead for Stream {
fn peek_char(&mut self) -> Option<std::io::Result<char>> {
match self {
Stream::InputFile(file) => (*file).peek_char(),
Stream::NamedTcp(tcp_stream) => (*tcp_stream).peek_char(),
Stream::NamedTls(tls_stream) => (*tls_stream).peek_char(),
Stream::Readline(rl_stream) => (*rl_stream).peek_char(),
Stream::StaticString(src) => (*src).peek_char(),
Stream::Byte(cursor) => (*cursor).peek_char(),
Stream::OutputFile(_) |
Stream::StandardError(_) |
Stream::StandardOutput(_) |
Stream::Null(_) => Some(Err(std::io::Error::new(
ErrorKind::PermissionDenied,
StreamError::ReadFromOutputStream,
))),
}
}
fn read_char(&mut self) -> Option<std::io::Result<char>> {
match self {
Stream::InputFile(file) => (*file).read_char(),
Stream::NamedTcp(tcp_stream) => (*tcp_stream).read_char(),
Stream::NamedTls(tls_stream) => (*tls_stream).read_char(),
Stream::Readline(rl_stream) => (*rl_stream).read_char(),
Stream::StaticString(src) => (*src).read_char(),
Stream::Byte(cursor) => (*cursor).read_char(),
Stream::OutputFile(_) |
Stream::StandardError(_) |
Stream::StandardOutput(_) |
Stream::Null(_) => Some(Err(std::io::Error::new(
ErrorKind::PermissionDenied,
StreamError::ReadFromOutputStream,
))),
}
}
fn put_back_char(&mut self, c: char) {
match self {
Stream::InputFile(file) => file.put_back_char(c),
Stream::NamedTcp(tcp_stream) => tcp_stream.put_back_char(c),
Stream::NamedTls(tls_stream) => tls_stream.put_back_char(c),
Stream::Readline(rl_stream) => rl_stream.put_back_char(c),
Stream::StaticString(src) => src.put_back_char(c),
Stream::Byte(cursor) => cursor.put_back_char(c),
Stream::OutputFile(_) |
Stream::StandardError(_) |
Stream::StandardOutput(_) |
Stream::Null(_) => {}
}
}
fn consume(&mut self, nread: usize) {
match self {
Stream::InputFile(ref mut file) => file.consume(nread),
Stream::NamedTcp(ref mut tcp_stream) => tcp_stream.consume(nread),
Stream::NamedTls(ref mut tls_stream) => tls_stream.consume(nread),
Stream::Readline(ref mut rl_stream) => rl_stream.consume(nread),
Stream::StaticString(ref mut src) => src.consume(nread),
Stream::Byte(ref mut cursor) => cursor.consume(nread),
Stream::OutputFile(_) |
Stream::StandardError(_) |
Stream::StandardOutput(_) |
Stream::Null(_) => {}
}
}
}
impl Read for Stream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let bytes_read = match self {
Stream::InputFile(file) => (*file).read(buf),
Stream::NamedTcp(tcp_stream) => (*tcp_stream).read(buf),
Stream::NamedTls(tls_stream) => (*tls_stream).read(buf),
Stream::Readline(rl_stream) => (*rl_stream).read(buf),
Stream::StaticString(src) => (*src).read(buf),
Stream::Byte(cursor) => (*cursor).read(buf),
Stream::OutputFile(_)
| Stream::StandardError(_)
| Stream::StandardOutput(_)
| Stream::Null(_) => Err(std::io::Error::new(
ErrorKind::PermissionDenied,
StreamError::ReadFromOutputStream,
)),
};
bytes_read
}
}
impl Write for Stream {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
match self {
Stream::OutputFile(ref mut file) => file.write(buf),
Stream::NamedTcp(ref mut tcp_stream) => tcp_stream.get_mut().write(buf),
Stream::NamedTls(ref mut tls_stream) => tls_stream.get_mut().write(buf),
Stream::Byte(ref mut cursor) => cursor.get_mut().write(buf),
Stream::StandardOutput(stream) => stream.write(buf),
Stream::StandardError(stream) => stream.write(buf),
Stream::StaticString(_) |
Stream::Readline(_) |
Stream::InputFile(..) |
Stream::Null(_) => Err(std::io::Error::new(
ErrorKind::PermissionDenied,
StreamError::WriteToInputStream,
)),
}
}
fn flush(&mut self) -> std::io::Result<()> {
match self {
Stream::OutputFile(ref mut file) => file.stream.flush(),
Stream::NamedTcp(ref mut tcp_stream) => tcp_stream.stream.get_mut().flush(),
Stream::NamedTls(ref mut tls_stream) => tls_stream.stream.get_mut().flush(),
Stream::Byte(ref mut cursor) => cursor.stream.get_mut().flush(),
Stream::StandardError(stream) => stream.stream.flush(),
Stream::StandardOutput(stream) => stream.stream.flush(),
Stream::StaticString(_) |
Stream::Readline(_) |
Stream::InputFile(_) |
Stream::Null(_) => Err(std::io::Error::new(
ErrorKind::PermissionDenied,
StreamError::FlushToInputStream,
)),
}
}
}
#[derive(Debug)]
enum StreamError {
PeekByteFailed,
PeekByteFromNonPeekableStream,
#[allow(unused)] PeekCharFailed,
#[allow(unused)] PeekCharFromNonPeekableStream,
ReadFromOutputStream,
WriteToInputStream,
FlushToInputStream,
}
impl fmt::Display for StreamError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
StreamError::PeekByteFailed => {
write!(f, "peek byte failed!")
}
StreamError::PeekByteFromNonPeekableStream => {
write!(f, "attempted to peek byte from a non-peekable input stream")
}
StreamError::PeekCharFailed => {
write!(f, "peek char failed!")
}
StreamError::PeekCharFromNonPeekableStream => {
write!(f, "attempted to peek char from a non-peekable input stream")
}
StreamError::ReadFromOutputStream => {
write!(f, "attempted to read from a write-only stream")
}
StreamError::WriteToInputStream => {
write!(f, "attempted to write to a read-only stream")
}
StreamError::FlushToInputStream => {
write!(f, "attempted to flush a read-only stream")
}
}
}
}
impl Error for StreamError {}
impl PartialOrd for Stream {
#[inline]
fn partial_cmp(&self, other: &Stream) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Stream {
#[inline]
fn cmp(&self, other: &Stream) -> Ordering {
self.as_ptr().cmp(&other.as_ptr())
}
}
impl PartialEq for Stream {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.as_ptr() == other.as_ptr()
}
}
impl Eq for Stream {}
impl Stream {
#[inline]
pub(crate) fn position(&mut self) -> Option<(u64, usize)> {
// returns lines_read, position.
let result = match self {
Stream::InputFile(ref mut file_stream) => {
file_stream.get_mut().file.seek(SeekFrom::Current(0)).ok()
}
Stream::NamedTcp(..)
| Stream::NamedTls(..)
| Stream::Readline(..)
| Stream::StaticString(..)
| Stream::Byte(..) => Some(0),
_ => None,
};
result.map(|position| (position, self.lines_read()))
}
#[inline]
pub(crate) fn set_position(&mut self, position: u64) {
match self {
Stream::InputFile(stream_layout) => {
let StreamLayout {
past_end_of_stream,
stream,
..
} = &mut **stream_layout;
stream.get_mut().file.seek(SeekFrom::Start(position)).unwrap();
if let Ok(metadata) = stream.get_ref().file.metadata() {
*past_end_of_stream = position > metadata.len();
}
}
_ => {}
}
}
#[inline]
pub(crate) fn past_end_of_stream(&self) -> bool {
match self {
Stream::Byte(stream) => stream.past_end_of_stream,
Stream::InputFile(stream) => stream.past_end_of_stream,
Stream::OutputFile(stream) => stream.past_end_of_stream,
// Stream::PausedProlog(stream) => stream.paused_stream.past_end_of_stream(),
Stream::StaticString(stream) => stream.past_end_of_stream,
Stream::NamedTcp(stream) => stream.past_end_of_stream,
Stream::NamedTls(stream) => stream.past_end_of_stream,
Stream::Null(_) => false,
Stream::Readline(stream) => stream.past_end_of_stream,
Stream::StandardOutput(stream) => stream.past_end_of_stream,
Stream::StandardError(stream) => stream.past_end_of_stream,
}
}
#[inline]
pub(crate) fn at_end_of_stream(&mut self) -> bool {
self.position_relative_to_end() == AtEndOfStream::At
}
#[inline]
pub(crate) fn set_past_end_of_stream(&mut self, value: bool) {
match self {
Stream::Byte(stream) => stream.past_end_of_stream = value,
Stream::InputFile(stream) => stream.past_end_of_stream = value,
Stream::OutputFile(stream) => stream.past_end_of_stream = value,
Stream::StaticString(stream) => stream.past_end_of_stream = value,
Stream::NamedTcp(stream) => stream.past_end_of_stream = value,
Stream::NamedTls(stream) => stream.past_end_of_stream = value,
Stream::Null(_) => {}
Stream::Readline(stream) => stream.past_end_of_stream = value,
Stream::StandardOutput(stream) => stream.past_end_of_stream = value,
Stream::StandardError(stream) => stream.past_end_of_stream = value,
}
}
#[inline]
pub(crate) fn position_relative_to_end(&mut self) -> AtEndOfStream {
if self.past_end_of_stream() {
return AtEndOfStream::Past;
}
if let Stream::InputFile(stream_layout) = self {
let StreamLayout {
past_end_of_stream,
stream,
..
} = &mut **stream_layout;
match stream.get_ref().file.metadata() {
Ok(metadata) => {
if let Ok(position) = stream.get_mut().file.seek(SeekFrom::Current(0)) {
return match position.cmp(&metadata.len()) {
Ordering::Equal => AtEndOfStream::At,
Ordering::Less => AtEndOfStream::Not,
Ordering::Greater => {
*past_end_of_stream = true;
AtEndOfStream::Past
}
};
} else {
*past_end_of_stream = true;
AtEndOfStream::Past
}
}
_ => {
*past_end_of_stream = true;
AtEndOfStream::Past
}
}
} else {
AtEndOfStream::Not
}
}
#[inline]
pub(crate) fn file_name(&self) -> Option<Atom> {
match self {
Stream::InputFile(file) => Some(file.stream.get_ref().file_name),
Stream::OutputFile(file) => Some(file.stream.file_name),
Stream::NamedTcp(tcp) => Some(tcp.stream.get_ref().address),
Stream::NamedTls(tls) => Some(tls.stream.get_ref().address),
_ => None,
}
}
#[inline]
pub(crate) fn mode(&self) -> Atom {
match self {
Stream::Byte(_)
| Stream::Readline(_)
| Stream::StaticString(_)
| Stream::InputFile(..) => atom!("read"),
Stream::NamedTcp(..) | Stream::NamedTls(..) => atom!("read_append"),
Stream::OutputFile(file) if file.is_append => atom!("append"),
Stream::OutputFile(_) | Stream::StandardError(_) | Stream::StandardOutput(_) => atom!("write"),
Stream::Null(_) => atom!(""),
}
}
#[inline]
pub fn stdout(arena: &mut Arena) -> Self {
Stream::StandardOutput(arena_alloc!(
StreamLayout::new(StandardOutputStream {}),
arena
))
}
#[inline]
pub fn stderr(arena: &mut Arena) -> Self {
Stream::StandardError(arena_alloc!(
StreamLayout::new(StandardErrorStream {}),
arena
))
}
#[inline]
pub(crate) fn from_tcp_stream(
address: Atom,
tcp_stream: TcpStream,
arena: &mut Arena,
) -> Self {
tcp_stream.set_read_timeout(None).unwrap();
tcp_stream.set_write_timeout(None).unwrap();
Stream::NamedTcp(arena_alloc!(
StreamLayout::new(CharReader::new(NamedTcpStream {
address,
tcp_stream
})),
arena
))
}
#[inline]
pub(crate) fn from_tls_stream(
address: Atom,
tls_stream: TlsStream<Stream>,
arena: &mut Arena,
) -> Self {
Stream::NamedTls(arena_alloc!(
StreamLayout::new(CharReader::new(NamedTlsStream {
address,
tls_stream
})),
arena
))
}
#[inline]
pub(crate) fn from_file_as_output(
file_name: Atom,
file: File,
is_append: bool,
arena: &mut Arena,
) -> Self {
Stream::OutputFile(arena_alloc!(
StreamLayout::new(OutputFileStream {
file_name,
file,
is_append
}),
arena
))
}
#[inline]
pub(crate) fn from_file_as_input(file_name: Atom, file: File, arena: &mut Arena) -> Self {
Stream::InputFile(arena_alloc!(
StreamLayout::new(CharReader::new(InputFileStream { file_name, file })),
arena
))
}
#[inline]
pub(crate) fn close(&mut self) -> Result<(), std::io::Error> {
let result = match self {
Stream::NamedTcp(ref mut tcp_stream) => {
tcp_stream.inner_mut().tcp_stream.shutdown(Shutdown::Both)
},
Stream::NamedTls(ref mut tls_stream) => {
tls_stream.inner_mut().tls_stream.shutdown()
}
_ => Ok(())
};
*self = Stream::Null(StreamOptions::default());
result
}
#[inline]
pub(crate) fn is_null_stream(&self) -> bool {
if let Stream::Null(_) = self {
true
} else {
false
}
}
#[inline]
pub(crate) fn is_input_stream(&self) -> bool {
match self {
Stream::NamedTcp(..)
| Stream::NamedTls(..)
| Stream::Byte(_)
| Stream::Readline(_)
| Stream::StaticString(_)
| Stream::InputFile(..) => true,
_ => false,
}
}
#[inline]
pub(crate) fn is_output_stream(&self) -> bool {
match self {
Stream::StandardError(_)
| Stream::StandardOutput(_)
| Stream::NamedTcp(..)
| Stream::NamedTls(..)
| Stream::Byte(_)
| Stream::OutputFile(..) => true,
_ => false,
}
}
// returns true on success.
#[inline]
pub(super) fn reset(&mut self) -> bool {
self.set_lines_read(0);
self.set_past_end_of_stream(false);
loop {
match self {
Stream::Byte(ref mut cursor) => {
cursor.stream.get_mut().0.set_position(0);
return true;
}
Stream::InputFile(ref mut file_stream) => {
file_stream.stream.get_mut().file.seek(SeekFrom::Start(0)).unwrap();
return true;
}
Stream::Readline(_) => {
return true;
}
_ => {
return false;
}
}
}
}
#[inline]
pub(crate) fn peek_byte(&mut self) -> std::io::Result<u8> {
match self {
Stream::Byte(ref mut cursor) => {
let mut b = [0u8; 1];
let pos = cursor.stream.get_mut().0.position();
match cursor.read(&mut b)? {
1 => {
cursor.stream.get_mut().0.set_position(pos);
Ok(b[0])
}
_ => Err(std::io::Error::new(ErrorKind::UnexpectedEof, "end of file")),
}
}
Stream::InputFile(ref mut file) => {
let mut b = [0u8; 1];
match file.read(&mut b)? {
1 => {
file.stream.get_mut().file.seek(SeekFrom::Current(-1))?;
Ok(b[0])
}
_ => Err(std::io::Error::new(
ErrorKind::UnexpectedEof,
StreamError::PeekByteFailed,
)),
}
}
Stream::Readline(ref mut stream) => stream.stream.peek_byte(),
Stream::NamedTcp(ref mut stream) => {
let mut b = [0u8; 1];
stream.stream.get_mut().tcp_stream.peek(&mut b)?;
Ok(b[0])
}
_ => Err(std::io::Error::new(
ErrorKind::PermissionDenied,
StreamError::PeekByteFromNonPeekableStream,
)),
}
}
}
impl MachineState {
#[inline]
pub(crate) fn eof_action(
&mut self,
result: HeapCellValue,
mut stream: Stream,
caller: Atom,
arity: usize,
) -> CallResult {
let eof_action = stream.options().eof_action();
match eof_action {
EOFAction::Error => {
stream.set_past_end_of_stream(true);
return Err(self.open_past_eos_error(stream, caller, arity));
}
EOFAction::EOFCode => {
let end_of_stream = if stream.options().stream_type() == StreamType::Binary {
fixnum_as_cell!(Fixnum::build_with(-1))
} else {
atom_as_cell!(atom!("end_of_file"))
};
stream.set_past_end_of_stream(true);
Ok(unify!(self, result, end_of_stream))
}
EOFAction::Reset => {
if !stream.reset() {
stream.set_past_end_of_stream(true);
}
Ok(self.fail = stream.past_end_of_stream())
}
}
}
pub(crate) fn to_stream_options(
&mut self,
alias: HeapCellValue,
eof_action: HeapCellValue,
reposition: HeapCellValue,
stream_type: HeapCellValue,
) -> StreamOptions {
let alias = read_heap_cell!(self.store(MachineState::deref(self, alias)),
(HeapCellValueTag::Atom, (name, arity)) => {
debug_assert_eq!(arity, 0);
Some(name)
}
_ => {
None
}
);
let eof_action = read_heap_cell!(self.store(MachineState::deref(self, eof_action)),
(HeapCellValueTag::Atom, (name, arity)) => {
debug_assert_eq!(arity, 0);
match name {
atom!("eof_code") => EOFAction::EOFCode,
atom!("error") => EOFAction::Error,
atom!("reset") => EOFAction::Reset,
_ => unreachable!(),
}
}
_ => {
unreachable!()
}
);
let reposition = read_heap_cell!(self.store(MachineState::deref(self, reposition)),
(HeapCellValueTag::Atom, (name, arity)) => {
debug_assert_eq!(arity, 0);
name == atom!("true")
}
_ => {
unreachable!()
}
);
let stream_type = read_heap_cell!(self.store(MachineState::deref(self, stream_type)),
(HeapCellValueTag::Atom, (name, arity)) => {
debug_assert_eq!(arity, 0);
match name {
atom!("text") => StreamType::Text,
atom!("binary") => StreamType::Binary,
_ => unreachable!(),
}
}
_ => {
unreachable!()
}
);
let mut options = StreamOptions::default();
options.set_stream_type(stream_type);
options.set_reposition(reposition);
options.set_alias_to_atom_opt(alias);
options.set_eof_action(eof_action);
options
}
pub(crate) fn get_stream_or_alias(
&mut self,
addr: HeapCellValue,
stream_aliases: &StreamAliasDir,
caller: Atom,
arity: usize,
) -> Result<Stream, MachineStub> {
let addr = self.store(MachineState::deref(self, addr));
read_heap_cell!(addr,
(HeapCellValueTag::Atom, (name, arity)) => {
debug_assert_eq!(arity, 0);
return match stream_aliases.get(&name) {
Some(stream) if !stream.is_null_stream() => Ok(*stream),
_ => {
let stub = functor_stub(caller, arity);
let addr = atom_as_cell!(name);
let existence_error = self.existence_error(ExistenceError::Stream(addr));
Err(self.error_form(existence_error, stub))
}
};
}
(HeapCellValueTag::Cons, ptr) => {
match_untyped_arena_ptr!(ptr,
(ArenaHeaderTag::Stream, stream) => {
return if stream.is_null_stream() {
Err(self.open_permission_error(stream_as_cell!(stream), caller, arity))
} else {
Ok(stream)
};
}
_ => {
}
);
}
_ => {
}
);
let stub = functor_stub(caller, arity);
if addr.is_var() {
let instantiation_error = self.instantiation_error();
Err(self.error_form(instantiation_error, stub))
} else {
let domain_error = self.domain_error(DomainErrorType::StreamOrAlias, addr);
Err(self.error_form(domain_error, stub))
}
}
pub(crate) fn open_parsing_stream(
&mut self,
mut stream: Stream,
stub_name: Atom,
stub_arity: usize,
) -> Result<Stream, MachineStub> {
match stream.peek_char() {
None => Ok(stream), // empty stream is handled gracefully by Lexer::eof
Some(Err(e)) => {
let err = self.session_error(SessionError::from(e));
let stub = functor_stub(stub_name, stub_arity);
Err(self.error_form(err, stub))
}
Some(Ok(c)) => {
if c == '\u{feff}' {
// skip UTF-8 BOM
stream.consume(c.len_utf8());
}
Ok(stream)
}
}
}
pub(crate) fn stream_permission_error(
&mut self,
perm: Permission,
err_atom: Atom,
stream: Stream,
caller: Atom,
arity: usize,
) -> MachineStub {
let stub = functor_stub(caller, arity);
let err = self.permission_error(perm, err_atom, stream_as_cell!(stream));
return self.error_form(err, stub);
}
#[inline]
pub(crate) fn open_past_eos_error(
&mut self,
stream: Stream,
caller: Atom,
arity: usize,
) -> MachineStub {
self.stream_permission_error(
Permission::InputStream,
atom!("past_end_of_stream"),
stream,
caller,
arity,
)
}
pub(crate) fn open_permission_error<T: PermissionError>(
&mut self,
culprit: T,
stub_name: Atom,
stub_arity: usize,
) -> MachineStub {
let stub = functor_stub(stub_name, stub_arity);
let err = self.permission_error(Permission::Open, atom!("source_sink"), culprit);
return self.error_form(err, stub);
}
pub(crate) fn occupied_alias_permission_error(
&mut self,
alias: Atom,
stub_name: Atom,
stub_arity: usize,
) -> MachineStub {
let stub = functor_stub(stub_name, stub_arity);
let alias_name = atom!("alias");
let err = self.permission_error(
Permission::Open,
atom!("source_sink"),
functor!(alias_name, [atom(alias)]),
);
return self.error_form(err, stub);
}
pub(crate) fn reposition_error(&mut self, stub_name: Atom, stub_arity: usize) -> MachineStub {
let stub = functor_stub(stub_name, stub_arity);
let rep_stub = functor!(atom!("reposition"), [atom(atom!("true"))]);
let err = self.permission_error(Permission::Open, atom!("source_sink"), rep_stub);
return self.error_form(err, stub);
}
pub(crate) fn check_stream_properties(
&mut self,
stream: Stream,
expected_type: StreamType,
input: Option<HeapCellValue>,
caller: Atom,
arity: usize,
) -> CallResult {
let opt_err = if input.is_some() && !stream.is_input_stream() {
Some(atom!("stream")) // 8.14.2.3 g)
} else if input.is_none() && !stream.is_output_stream() {
Some(atom!("stream")) // 8.14.2.3 g)
} else if stream.options().stream_type() != expected_type {
Some(expected_type.other().as_atom()) // 8.14.2.3 h)
} else {
None
};
let permission = if input.is_some() {
Permission::InputStream
} else {
Permission::OutputStream
};
if let Some(err_atom) = opt_err {
return Err(self.stream_permission_error(permission, err_atom, stream, caller, arity));
}
if let Some(input) = input {
if stream.past_end_of_stream() {
self.eof_action(input, stream, caller, arity)?;
}
}
Ok(())
}
pub(crate) fn stream_from_file_spec(
&mut self,
file_spec: Atom,
indices: &mut IndexStore,
options: &StreamOptions,
) -> Result<Stream, MachineStub> {
if file_spec == atom!("") {
let stub = functor_stub(atom!("open"), 4);
let err = self.domain_error(DomainErrorType::SourceSink, self[temp_v!(1)]);
return Err(self.error_form(err, stub));
}
// 8.11.5.3l)
if let Some(alias) = options.get_alias() {
if indices.stream_aliases.contains_key(&alias) {
return Err(self.occupied_alias_permission_error(alias, atom!("open"), 4));
}
}
let mode = MachineState::deref(self, self[temp_v!(2)]);
let mode = cell_as_atom!(self.store(mode));
let mut open_options = OpenOptions::new();
let (is_input_file, in_append_mode) = match mode {
atom!("read") => {
open_options.read(true).write(false).create(false);
(true, false)
}
atom!("write") => {
open_options
.read(false)
.write(true)
.truncate(true)
.create(true);
(false, false)
}
atom!("append") => {
open_options
.read(false)
.write(true)
.create(true)
.append(true);
(false, true)
}
_ => {
let stub = functor_stub(atom!("open"), 4);
let err = self.domain_error(DomainErrorType::IOMode, self[temp_v!(2)]);
// 8.11.5.3h)
return Err(self.error_form(err, stub));
}
};
let file = match open_options.open(file_spec.as_str()) {
Ok(file) => file,
Err(err) => {
match err.kind() {
ErrorKind::NotFound => {
// 8.11.5.3j)
let stub = functor_stub(atom!("open"), 4);
let err = self.existence_error(
ExistenceError::SourceSink(self[temp_v!(1)]),
);
return Err(self.error_form(err, stub));
}
ErrorKind::PermissionDenied => {
// 8.11.5.3k)
return Err(self.open_permission_error(self[temp_v!(1)], atom!("open"), 4));
}
_ => {
let stub = functor_stub(atom!("open"), 4);
let err = self.syntax_error(ParserError::IO(err));
return Err(self.error_form(err, stub));
}
}
}
};
Ok(if is_input_file {
Stream::from_file_as_input(file_spec, file, &mut self.arena)
} else {
Stream::from_file_as_output(file_spec, file, in_append_mode, &mut self.arena)
})
}
}
|
// Copyright 2016 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A syntax highlighting plugin based on syntect.
extern crate serde_json;
extern crate syntect;
extern crate xi_core_lib as xi_core;
extern crate xi_plugin_lib;
extern crate xi_rope;
extern crate xi_trace;
mod stackmap;
use std::collections::HashMap;
use std::ops::Range;
use std::path::Path;
use std::str::FromStr;
use std::sync::MutexGuard;
use crate::xi_core::plugin_rpc::ScopeSpan;
use crate::xi_core::{ConfigTable, LanguageId, ViewId};
use xi_plugin_lib::{mainloop, Cache, Error, Plugin, StateCache, View};
use xi_rope::{DeltaBuilder, Interval, Rope, RopeDelta, RopeInfo};
use xi_trace::{trace, trace_block};
use syntect::parsing::{
ParseState, ScopeRepository, ScopeStack, ScopedMetadata, SyntaxSet, SCOPE_REPO,
};
use crate::stackmap::{LookupResult, StackMap};
const LINES_PER_RPC: usize = 10;
const INDENTATION_PRIORITY: u64 = 100;
type EditBuilder = DeltaBuilder<RopeInfo>;
/// Edit types that will get processed.
#[derive(PartialEq, Clone, Copy)]
pub enum EditType {
Insert,
Newline,
Other,
}
impl FromStr for EditType {
type Err = ();
fn from_str(s: &str) -> Result<EditType, ()> {
match s {
"insert" => Ok(EditType::Insert),
"newline" => Ok(EditType::Newline),
"other" => Ok(EditType::Other),
_ => Err(()),
}
}
}
#[derive(PartialEq, Clone)]
enum IndentationTask {
Newline(usize),
Edit(usize),
Batch(Range<usize>),
}
/// The state for syntax highlighting of one file.
struct PluginState {
stack_idents: StackMap,
offset: usize,
initial_state: LineState,
spans_start: usize,
// unflushed spans
spans: Vec<ScopeSpan>,
new_scopes: Vec<Vec<String>>,
// keeps track of the lines (start, end) that might need indentation after edit
indentation_state: Vec<IndentationTask>,
}
type LockedRepo = MutexGuard<'static, ScopeRepository>;
/// The syntax highlighting state corresponding to the beginning of a line
/// (as stored in the state cache).
// Note: this needs to be option because the caching layer relies on Default.
// We can't implement that because the actual initial state depends on the
// syntax. There are other ways to handle this, but this will do for now.
type LineState = Option<(ParseState, ScopeStack)>;
/// The state of syntax highlighting for a collection of buffers.
struct Syntect<'a> {
view_state: HashMap<ViewId, PluginState>,
syntax_set: &'a SyntaxSet,
}
impl<'a> PluginState {
fn new() -> Self {
PluginState {
stack_idents: StackMap::default(),
offset: 0,
initial_state: None,
spans_start: 0,
spans: Vec::new(),
new_scopes: Vec::new(),
indentation_state: Vec::new(),
}
}
/// Compute syntax for one line, optionally also accumulating the style spans.
///
/// NOTE: `accumulate_spans` should be true if we're doing syntax highlighting,
/// and want to update the client. It should be `false` if we need syntax
/// information for another purpose, such as auto-indent.
fn compute_syntax(
&mut self,
line: &str,
state: LineState,
syntax_set: &SyntaxSet,
accumulate_spans: bool,
) -> LineState {
let (mut parse_state, mut scope_state) =
state.or_else(|| self.initial_state.clone()).unwrap();
let ops = parse_state.parse_line(&line, syntax_set);
let mut prev_cursor = 0;
let repo = SCOPE_REPO.lock().unwrap();
for (cursor, batch) in ops {
if !scope_state.is_empty() {
let scope_id = self.identifier_for_stack(&scope_state, &repo);
let start = self.offset - self.spans_start + prev_cursor;
let end = start + (cursor - prev_cursor);
if accumulate_spans && start != end {
let span = ScopeSpan { start, end, scope_id };
self.spans.push(span);
}
}
prev_cursor = cursor;
scope_state.apply(&batch);
}
if accumulate_spans {
// add span for final state
let start = self.offset - self.spans_start + prev_cursor;
let end = start + (line.len() - prev_cursor);
let scope_id = self.identifier_for_stack(&scope_state, &repo);
let span = ScopeSpan { start, end, scope_id };
self.spans.push(span);
}
Some((parse_state, scope_state))
}
/// Returns the unique identifier for this `ScopeStack`. We use identifiers
/// so we aren't constantly sending long stack names to the peer.
fn identifier_for_stack(&mut self, stack: &ScopeStack, repo: &LockedRepo) -> u32 {
let identifier = self.stack_idents.get_value(stack.as_slice());
match identifier {
LookupResult::Existing(id) => id,
LookupResult::New(id) => {
let stack_strings =
stack.as_slice().iter().map(|slice| repo.to_string(*slice)).collect::<Vec<_>>();
self.new_scopes.push(stack_strings);
id
}
}
}
// Return true if there's any more work to be done.
fn highlight_one_line(&mut self, ctx: &mut MyView, syntax_set: &SyntaxSet) -> bool {
if let Some(line_num) = ctx.get_frontier() {
let (line_num, offset, state) = ctx.get_prev(line_num);
if offset != self.offset {
self.flush_spans(ctx);
self.offset = offset;
self.spans_start = offset;
}
let new_frontier = match ctx.get_line(line_num) {
Ok("") => None,
Ok(s) => {
let new_state = self.compute_syntax(s, state, syntax_set, true);
self.offset += s.len();
if s.as_bytes().last() == Some(&b'\n') {
Some((new_state, line_num + 1))
} else {
None
}
}
Err(_) => None,
};
let mut converged = false;
if let Some((ref new_state, new_line_num)) = new_frontier {
if let Some(old_state) = ctx.get(new_line_num) {
converged = old_state.as_ref().unwrap().0 == new_state.as_ref().unwrap().0;
}
}
if !converged {
if let Some((new_state, new_line_num)) = new_frontier {
ctx.set(new_line_num, new_state);
ctx.update_frontier(new_line_num);
return true;
}
}
ctx.close_frontier();
}
false
}
fn flush_spans(&mut self, ctx: &mut MyView) {
let _t = trace_block("PluginState::flush_spans", &["syntect"]);
if !self.new_scopes.is_empty() {
ctx.add_scopes(&self.new_scopes);
self.new_scopes.clear();
}
if self.spans_start != self.offset {
ctx.update_spans(self.spans_start, self.offset - self.spans_start, &self.spans);
self.spans.clear();
}
self.spans_start = self.offset;
}
pub fn indent_lines(&mut self, view: &mut MyView, syntax_set: &SyntaxSet) {
for indentation_task in self.indentation_state.to_vec() {
match indentation_task {
IndentationTask::Newline(line) => self
.autoindent_line(view, syntax_set, line)
.expect("auto-indent error on newline"),
IndentationTask::Edit(line) => self
.check_indent_active_edit(view, syntax_set, line)
.expect("auto-indent error on insert"),
IndentationTask::Batch(range) => self
.bulk_autoindent(view, syntax_set, range)
.expect("auto-indent error on other"),
};
}
self.indentation_state.clear();
}
/// Returns the metadata relevant to the given line. Computes the syntax
/// for this line (during normal editing this is only likely for line 0) if
/// necessary; in general reuses the syntax state calculated for highlighting.
fn get_metadata(
&mut self,
view: &mut MyView,
syntax_set: &'a SyntaxSet,
line: usize,
) -> Option<ScopedMetadata<'a>> {
let text = view.get_line(line).unwrap_or("");
let scope = self.compute_syntax(&text, None, syntax_set, false).map(|(_, scope)| scope)?;
Some(syntax_set.metadata().metadata_for_scope(scope.as_slice()))
}
/// Checks for possible auto-indent changes after an appropriate edit.
fn consider_indentation(&mut self, view: &mut MyView, delta: &RopeDelta, edit_type: EditType) {
for region in delta.iter_inserts() {
let line_of_edit = view.line_of_offset(region.new_offset).unwrap();
let last_line_of_edit = view.line_of_offset(region.new_offset + region.len).unwrap();
match edit_type {
EditType::Newline => {
self.indentation_state.push(IndentationTask::Newline(line_of_edit + 1))
}
EditType::Insert => {
let range = region.new_offset..region.new_offset + region.len;
let is_whitespace = {
let insert_region =
view.get_region(range).expect("view must return region");
insert_region.as_bytes().iter().all(u8::is_ascii_whitespace)
};
if !is_whitespace {
self.indentation_state.push(IndentationTask::Edit(line_of_edit));
}
}
EditType::Other => {
// we are mainly interested in auto-indenting after paste
let range = Range { start: line_of_edit, end: last_line_of_edit };
self.indentation_state.push(IndentationTask::Batch(range));
}
};
}
}
fn bulk_autoindent(
&mut self,
view: &mut MyView,
syntax_set: &SyntaxSet,
range: Range<usize>,
) -> Result<(), Error> {
let _t = trace_block("Syntect::bulk_autoindent", &["syntect"]);
let tab_size = view.get_config().tab_size;
let use_spaces = view.get_config().translate_tabs_to_spaces;
let mut builder = DeltaBuilder::new(view.get_buf_size());
let mut base_indent = if range.start > 0 {
self.previous_nonblank_line(view, range.start)?
.map(|l| self.indent_level_of_line(view, l))
.unwrap_or(0)
} else {
0
};
for line in range.start..=range.end {
let current_line_indent = self.indent_level_of_line(view, line);
if line > 0 {
let increase_level = self.test_increase(view, syntax_set, line)?;
let decrease_level = self.test_decrease(view, syntax_set, line)?;
let increase = if increase_level { tab_size } else { 0 };
let decrease = if decrease_level { tab_size } else { 0 };
let final_level = base_indent + increase - decrease;
base_indent = final_level;
}
if base_indent != current_line_indent {
let edit_start = view.offset_of_line(line)?;
let edit_len = {
let line = view.get_line(line)?;
line.as_bytes().iter().take_while(|b| **b == b' ' || **b == b'\t').count()
};
let indent_text =
if use_spaces { n_spaces(base_indent) } else { n_tabs(base_indent / tab_size) };
let iv = Interval::new(edit_start, edit_start + edit_len);
builder.replace(iv, indent_text.into());
}
}
view.edit(builder.build(), INDENTATION_PRIORITY, false, false, String::from("syntect"));
Ok(())
}
/// Called when freshly computing a line's indent level, such as after
/// a newline, or when re-indenting a block.
fn autoindent_line(
&mut self,
view: &mut MyView,
syntax_set: &SyntaxSet,
line: usize,
) -> Result<(), Error> {
let _t = trace_block("Syntect::autoindent", &["syntect"]);
debug_assert!(line > 0);
let tab_size = view.get_config().tab_size;
let current_indent = self.indent_level_of_line(view, line);
let base_indent = self
.previous_nonblank_line(view, line)?
.map(|l| self.indent_level_of_line(view, l))
.unwrap_or(0);
let increase_level = self.test_increase(view, syntax_set, line)?;
let decrease_level = self.test_decrease(view, syntax_set, line)?;
let increase = if increase_level { tab_size } else { 0 };
let decrease = if decrease_level { tab_size } else { 0 };
let final_level = base_indent + increase - decrease;
if final_level != current_indent {
self.set_indent(view, line, final_level)
} else {
Ok(())
}
}
/// Called when actively editing a line; chiefly checks for whether or not
/// the current line should be de-indented, such as after a closing '}'.
fn check_indent_active_edit(
&mut self,
view: &mut MyView,
syntax_set: &SyntaxSet,
line: usize,
) -> Result<(), Error> {
let _t = trace_block("Syntect::check_indent_active_line", &["syntect"]);
if line == 0 {
return Ok(());
}
let tab_size = view.get_config().tab_size;
let current_indent = self.indent_level_of_line(view, line);
if line == 0 || current_indent == 0 {
return Ok(());
}
let just_increased = self.test_increase(view, syntax_set, line)?;
let decrease = self.test_decrease(view, syntax_set, line)?;
let prev_line = self.previous_nonblank_line(view, line)?;
let mut indent_level = prev_line.map(|l| self.indent_level_of_line(view, l)).unwrap_or(0);
if decrease {
// the first line after an increase should just match the previous line
if !just_increased {
indent_level = indent_level.saturating_sub(tab_size);
}
// we don't want to change indent level if this line doesn't
// match `test_decrease`, because the user could have changed
// it manually, and we respect that.
if indent_level != current_indent {
return self.set_indent(view, line, indent_level);
}
}
Ok(())
}
fn set_indent(&self, view: &mut MyView, line: usize, level: usize) -> Result<(), Error> {
let edit_start = view.offset_of_line(line)?;
let edit_len = {
let line = view.get_line(line)?;
line.as_bytes().iter().take_while(|b| **b == b' ' || **b == b'\t').count()
};
let use_spaces = view.get_config().translate_tabs_to_spaces;
let tab_size = view.get_config().tab_size;
let indent_text = if use_spaces { n_spaces(level) } else { n_tabs(level / tab_size) };
let iv = Interval::new(edit_start, edit_start + edit_len);
let delta = RopeDelta::simple_edit(iv, indent_text.into(), view.get_buf_size());
view.edit(delta, INDENTATION_PRIORITY, false, false, String::from("syntect"));
Ok(())
}
/// Test whether the indent level should be increased for this line,
/// by testing the _previous_ line against a regex.
fn test_increase(
&mut self,
view: &mut MyView,
syntax_set: &SyntaxSet,
line: usize,
) -> Result<bool, Error> {
debug_assert!(line > 0, "increasing indent requires a previous line");
let prev_line = match self.previous_nonblank_line(view, line) {
Ok(Some(l)) => l,
Ok(None) => return Ok(false),
Err(e) => return Err(e),
};
let metadata =
self.get_metadata(view, syntax_set, prev_line).ok_or_else(|| Error::PeerDisconnect)?;
let line = view.get_line(prev_line)?;
Ok(metadata.increase_indent(line))
}
/// Test whether the indent level for this line should be decreased, by
/// checking this line against a regex.
fn test_decrease(
&mut self,
view: &mut MyView,
syntax_set: &SyntaxSet,
line: usize,
) -> Result<bool, Error> {
if line == 0 {
return Ok(false);
}
let metadata =
self.get_metadata(view, syntax_set, line).ok_or_else(|| Error::PeerDisconnect)?;
let line = view.get_line(line)?;
Ok(metadata.decrease_indent(line))
}
fn previous_nonblank_line(
&self,
view: &mut MyView,
line: usize,
) -> Result<Option<usize>, Error> {
debug_assert!(line > 0);
let mut line = line;
while line > 0 {
line -= 1;
let text = view.get_line(line)?;
if !text.bytes().all(|b| b.is_ascii_whitespace()) {
return Ok(Some(line));
}
}
Ok(None)
}
fn indent_level_of_line(&self, view: &mut MyView, line: usize) -> usize {
let tab_size = view.get_config().tab_size;
let line = view.get_line(line).unwrap_or("");
line.as_bytes()
.iter()
.take_while(|b| **b == b' ' || **b == b'\t')
.map(|b| if b == &b' ' { 1 } else { tab_size })
.sum()
}
fn reindent(&mut self, view: &mut MyView, syntax_set: &SyntaxSet, lines: &[(usize, usize)]) {
for (start, end) in lines {
let range = Range { start: *start, end: *end - 1 };
self.bulk_autoindent(view, syntax_set, range).expect("error on reindent");
}
}
fn toggle_comment(
&mut self,
view: &mut MyView,
syntax_set: &SyntaxSet,
lines: &[(usize, usize)],
) {
let _t = trace_block("Syntect::toggle_comment", &["syntect"]);
if lines.is_empty() {
return;
}
let mut builder = DeltaBuilder::new(view.get_buf_size());
for (start, end) in lines {
let range = Range { start: *start, end: *end };
self.toggle_comment_line_range(view, syntax_set, &mut builder, range);
}
if builder.is_empty() {
eprintln!("no delta for lines {:?}", &lines);
} else {
view.edit(builder.build(), INDENTATION_PRIORITY, false, true, String::from("syntect"));
}
}
fn toggle_comment_line_range(
&mut self,
view: &mut MyView,
syntax_set: &SyntaxSet,
builder: &mut EditBuilder,
line_range: Range<usize>,
) {
let comment_str = match self
.get_metadata(view, syntax_set, line_range.start)
.and_then(|s| s.line_comment().map(|s| s.to_owned()))
{
Some(s) => s,
None => return,
};
match view
.get_line(line_range.start)
.map(|l| comment_str.trim() == l.trim() || l.trim().starts_with(&comment_str))
{
Ok(true) => self.remove_comment_marker(view, builder, line_range, &comment_str),
Ok(false) => self.insert_comment_marker(view, builder, line_range, &comment_str),
Err(e) => eprintln!("toggle comment error: {:?}", e),
}
}
fn insert_comment_marker(
&self,
view: &mut MyView,
builder: &mut EditBuilder,
line_range: Range<usize>,
comment_str: &str,
) {
// when commenting out multiple lines, we insert all comment markers at
// the same indent level: that of the least indented line.
let line_offset = line_range
.clone()
.map(|num| {
view.get_line(num)
.ok()
.and_then(|line| line.as_bytes().iter().position(|b| *b != b' ' && *b != b'\t'))
.unwrap_or(0)
})
.min()
.unwrap_or(0);
let comment_txt = Rope::from(&comment_str);
for num in line_range {
let offset = view.offset_of_line(num).unwrap();
let line = view.get_line(num).unwrap();
if line.trim().starts_with(&comment_str) {
continue;
}
let iv = Interval::new(offset + line_offset, offset + line_offset);
builder.replace(iv, comment_txt.clone());
}
}
fn remove_comment_marker(
&self,
view: &mut MyView,
builder: &mut EditBuilder,
lines: Range<usize>,
comment_str: &str,
) {
for num in lines {
let offset = view.offset_of_line(num).unwrap();
let line = view.get_line(num).unwrap();
let (comment_start, len) = match line.find(&comment_str) {
Some(off) => (offset + off, comment_str.len()),
None if line.trim() == comment_str.trim() => (offset, comment_str.trim().len()),
None => continue,
};
let iv = Interval::new(comment_start, comment_start + len);
builder.delete(iv);
}
}
}
type MyView = View<StateCache<LineState>>;
impl<'a> Syntect<'a> {
fn new(syntax_set: &'a SyntaxSet) -> Self {
Syntect { view_state: HashMap::new(), syntax_set }
}
/// Wipes any existing state and starts highlighting with `syntax`.
fn do_highlighting(&mut self, view: &mut MyView) {
let initial_state = {
let language_id = view.get_language_id();
let syntax = self
.syntax_set
.find_syntax_by_name(language_id.as_ref())
.unwrap_or_else(|| self.syntax_set.find_syntax_plain_text());
Some((ParseState::new(syntax), ScopeStack::new()))
};
let state = self.view_state.get_mut(&view.get_id()).unwrap();
state.initial_state = initial_state;
state.spans = Vec::new();
state.new_scopes = Vec::new();
state.offset = 0;
state.spans_start = 0;
view.get_cache().clear();
view.schedule_idle();
}
}
impl<'a> Plugin for Syntect<'a> {
type Cache = StateCache<LineState>;
fn new_view(&mut self, view: &mut View<Self::Cache>) {
let _t = trace_block("Syntect::new_view", &["syntect"]);
let view_id = view.get_id();
let state = PluginState::new();
self.view_state.insert(view_id, state);
self.do_highlighting(view);
}
fn did_close(&mut self, view: &View<Self::Cache>) {
self.view_state.remove(&view.get_id());
}
fn did_save(&mut self, view: &mut View<Self::Cache>, _old: Option<&Path>) {
let _t = trace_block("Syntect::did_save", &["syntect"]);
self.do_highlighting(view);
}
fn config_changed(&mut self, _view: &mut View<Self::Cache>, _changes: &ConfigTable) {}
fn language_changed(&mut self, view: &mut View<Self::Cache>, _old_lang: LanguageId) {
self.do_highlighting(view);
}
fn update(
&mut self,
view: &mut View<Self::Cache>,
delta: Option<&RopeDelta>,
edit_type: String,
author: String,
) {
let _t = trace_block("Syntect::update", &["syntect"]);
view.schedule_idle();
let should_auto_indent = view.get_config().auto_indent;
let edit_type = edit_type.parse::<EditType>().ok();
if should_auto_indent
&& author == "core"
&& (edit_type == Some(EditType::Newline)
|| edit_type == Some(EditType::Insert)
|| edit_type == Some(EditType::Other))
{
if let Some(delta) = delta {
let state = self.view_state.get_mut(&view.get_id()).unwrap();
state.consider_indentation(view, delta, edit_type.unwrap());
}
}
}
fn custom_command(
&mut self,
view: &mut View<Self::Cache>,
method: &str,
params: serde_json::Value,
) {
match method {
"toggle_comment" => {
let lines: Vec<(usize, usize)> = serde_json::from_value(params).unwrap();
let state = self.view_state.get_mut(&view.get_id()).unwrap();
state.toggle_comment(view, self.syntax_set, &lines);
}
"reindent" => {
let lines: Vec<(usize, usize)> = serde_json::from_value(params).unwrap();
let state = self.view_state.get_mut(&view.get_id()).unwrap();
state.reindent(view, self.syntax_set, &lines);
}
other => eprintln!("syntect received unexpected command {}", other),
}
}
fn idle(&mut self, view: &mut View<Self::Cache>) {
let state = self.view_state.get_mut(&view.get_id()).unwrap();
state.indent_lines(view, self.syntax_set);
for _ in 0..LINES_PER_RPC {
if !state.highlight_one_line(view, self.syntax_set) {
state.flush_spans(view);
return;
}
if view.request_is_pending() {
trace("yielding for request", &["syntect"]);
break;
}
}
state.flush_spans(view);
view.schedule_idle();
}
}
fn main() {
let syntax_set = SyntaxSet::load_defaults_newlines();
let mut state = Syntect::new(&syntax_set);
mainloop(&mut state).unwrap();
}
fn n_spaces(n: usize) -> &'static str {
// when someone opens an issue complaining about this we know we've made it
const MAX_SPACES: usize = 160;
static MANY_SPACES: [u8; MAX_SPACES] = [b' '; MAX_SPACES];
unsafe { ::std::str::from_utf8_unchecked(&MANY_SPACES[..n.min(MAX_SPACES)]) }
}
fn n_tabs(n: usize) -> &'static str {
const MAX_TABS: usize = 40;
static MANY_TABS: [u8; MAX_TABS] = [b'\t'; MAX_TABS];
unsafe { ::std::str::from_utf8_unchecked(&MANY_TABS[..n.min(MAX_TABS)]) }
}
Single delta for indentation
// Copyright 2016 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A syntax highlighting plugin based on syntect.
extern crate serde_json;
extern crate syntect;
extern crate xi_core_lib as xi_core;
extern crate xi_plugin_lib;
extern crate xi_rope;
extern crate xi_trace;
mod stackmap;
use std::collections::HashMap;
use std::ops::Range;
use std::path::Path;
use std::str::FromStr;
use std::sync::MutexGuard;
use crate::xi_core::plugin_rpc::ScopeSpan;
use crate::xi_core::{ConfigTable, LanguageId, ViewId};
use xi_plugin_lib::{mainloop, Cache, Error, Plugin, StateCache, View};
use xi_rope::{DeltaBuilder, Interval, Rope, RopeDelta, RopeInfo};
use xi_trace::{trace, trace_block};
use syntect::parsing::{
ParseState, ScopeRepository, ScopeStack, ScopedMetadata, SyntaxSet, SCOPE_REPO,
};
use crate::stackmap::{LookupResult, StackMap};
const LINES_PER_RPC: usize = 10;
const INDENTATION_PRIORITY: u64 = 100;
type EditBuilder = DeltaBuilder<RopeInfo>;
/// Edit types that will get processed.
#[derive(PartialEq, Clone, Copy)]
pub enum EditType {
Insert,
Newline,
Other,
}
impl FromStr for EditType {
type Err = ();
fn from_str(s: &str) -> Result<EditType, ()> {
match s {
"insert" => Ok(EditType::Insert),
"newline" => Ok(EditType::Newline),
"other" => Ok(EditType::Other),
_ => Err(()),
}
}
}
#[derive(PartialEq, Clone)]
enum IndentationTask {
Newline(usize),
Edit(usize),
Batch(Range<usize>),
}
/// The state for syntax highlighting of one file.
struct PluginState {
stack_idents: StackMap,
offset: usize,
initial_state: LineState,
spans_start: usize,
// unflushed spans
spans: Vec<ScopeSpan>,
new_scopes: Vec<Vec<String>>,
// keeps track of the lines (start, end) that might need indentation after edit
indentation_state: Vec<IndentationTask>,
}
type LockedRepo = MutexGuard<'static, ScopeRepository>;
/// The syntax highlighting state corresponding to the beginning of a line
/// (as stored in the state cache).
// Note: this needs to be option because the caching layer relies on Default.
// We can't implement that because the actual initial state depends on the
// syntax. There are other ways to handle this, but this will do for now.
type LineState = Option<(ParseState, ScopeStack)>;
/// The state of syntax highlighting for a collection of buffers.
struct Syntect<'a> {
view_state: HashMap<ViewId, PluginState>,
syntax_set: &'a SyntaxSet,
}
impl<'a> PluginState {
fn new() -> Self {
PluginState {
stack_idents: StackMap::default(),
offset: 0,
initial_state: None,
spans_start: 0,
spans: Vec::new(),
new_scopes: Vec::new(),
indentation_state: Vec::new(),
}
}
/// Compute syntax for one line, optionally also accumulating the style spans.
///
/// NOTE: `accumulate_spans` should be true if we're doing syntax highlighting,
/// and want to update the client. It should be `false` if we need syntax
/// information for another purpose, such as auto-indent.
fn compute_syntax(
&mut self,
line: &str,
state: LineState,
syntax_set: &SyntaxSet,
accumulate_spans: bool,
) -> LineState {
let (mut parse_state, mut scope_state) =
state.or_else(|| self.initial_state.clone()).unwrap();
let ops = parse_state.parse_line(&line, syntax_set);
let mut prev_cursor = 0;
let repo = SCOPE_REPO.lock().unwrap();
for (cursor, batch) in ops {
if !scope_state.is_empty() {
let scope_id = self.identifier_for_stack(&scope_state, &repo);
let start = self.offset - self.spans_start + prev_cursor;
let end = start + (cursor - prev_cursor);
if accumulate_spans && start != end {
let span = ScopeSpan { start, end, scope_id };
self.spans.push(span);
}
}
prev_cursor = cursor;
scope_state.apply(&batch);
}
if accumulate_spans {
// add span for final state
let start = self.offset - self.spans_start + prev_cursor;
let end = start + (line.len() - prev_cursor);
let scope_id = self.identifier_for_stack(&scope_state, &repo);
let span = ScopeSpan { start, end, scope_id };
self.spans.push(span);
}
Some((parse_state, scope_state))
}
/// Returns the unique identifier for this `ScopeStack`. We use identifiers
/// so we aren't constantly sending long stack names to the peer.
fn identifier_for_stack(&mut self, stack: &ScopeStack, repo: &LockedRepo) -> u32 {
let identifier = self.stack_idents.get_value(stack.as_slice());
match identifier {
LookupResult::Existing(id) => id,
LookupResult::New(id) => {
let stack_strings =
stack.as_slice().iter().map(|slice| repo.to_string(*slice)).collect::<Vec<_>>();
self.new_scopes.push(stack_strings);
id
}
}
}
// Return true if there's any more work to be done.
fn highlight_one_line(&mut self, ctx: &mut MyView, syntax_set: &SyntaxSet) -> bool {
if let Some(line_num) = ctx.get_frontier() {
let (line_num, offset, state) = ctx.get_prev(line_num);
if offset != self.offset {
self.flush_spans(ctx);
self.offset = offset;
self.spans_start = offset;
}
let new_frontier = match ctx.get_line(line_num) {
Ok("") => None,
Ok(s) => {
let new_state = self.compute_syntax(s, state, syntax_set, true);
self.offset += s.len();
if s.as_bytes().last() == Some(&b'\n') {
Some((new_state, line_num + 1))
} else {
None
}
}
Err(_) => None,
};
let mut converged = false;
if let Some((ref new_state, new_line_num)) = new_frontier {
if let Some(old_state) = ctx.get(new_line_num) {
converged = old_state.as_ref().unwrap().0 == new_state.as_ref().unwrap().0;
}
}
if !converged {
if let Some((new_state, new_line_num)) = new_frontier {
ctx.set(new_line_num, new_state);
ctx.update_frontier(new_line_num);
return true;
}
}
ctx.close_frontier();
}
false
}
fn flush_spans(&mut self, ctx: &mut MyView) {
let _t = trace_block("PluginState::flush_spans", &["syntect"]);
if !self.new_scopes.is_empty() {
ctx.add_scopes(&self.new_scopes);
self.new_scopes.clear();
}
if self.spans_start != self.offset {
ctx.update_spans(self.spans_start, self.offset - self.spans_start, &self.spans);
self.spans.clear();
}
self.spans_start = self.offset;
}
pub fn indent_lines(&mut self, view: &mut MyView, syntax_set: &SyntaxSet) {
let mut builder = DeltaBuilder::new(view.get_buf_size());
for indentation_task in self.indentation_state.to_vec() {
match indentation_task {
IndentationTask::Newline(line) => self
.autoindent_line(view, &mut builder, syntax_set, line)
.expect("auto-indent error on newline"),
IndentationTask::Edit(line) => self
.check_indent_active_edit(view, &mut builder, syntax_set, line)
.expect("auto-indent error on insert"),
IndentationTask::Batch(range) => self
.bulk_autoindent(view, &mut builder, syntax_set, range)
.expect("auto-indent error on other"),
};
}
if !builder.is_empty() {
view.edit(builder.build(), INDENTATION_PRIORITY, false, false, String::from("syntect"));
}
self.indentation_state.clear();
}
/// Returns the metadata relevant to the given line. Computes the syntax
/// for this line (during normal editing this is only likely for line 0) if
/// necessary; in general reuses the syntax state calculated for highlighting.
fn get_metadata(
&mut self,
view: &mut MyView,
syntax_set: &'a SyntaxSet,
line: usize,
) -> Option<ScopedMetadata<'a>> {
let text = view.get_line(line).unwrap_or("");
let scope = self.compute_syntax(&text, None, syntax_set, false).map(|(_, scope)| scope)?;
Some(syntax_set.metadata().metadata_for_scope(scope.as_slice()))
}
/// Checks for possible auto-indent changes after an appropriate edit.
fn consider_indentation(&mut self, view: &mut MyView, delta: &RopeDelta, edit_type: EditType) {
for region in delta.iter_inserts() {
let line_of_edit = view.line_of_offset(region.new_offset).unwrap();
let last_line_of_edit = view.line_of_offset(region.new_offset + region.len).unwrap();
match edit_type {
EditType::Newline => {
self.indentation_state.push(IndentationTask::Newline(line_of_edit + 1))
}
EditType::Insert => {
let range = region.new_offset..region.new_offset + region.len;
let is_whitespace = {
let insert_region =
view.get_region(range).expect("view must return region");
insert_region.as_bytes().iter().all(u8::is_ascii_whitespace)
};
if !is_whitespace {
self.indentation_state.push(IndentationTask::Edit(line_of_edit));
}
}
EditType::Other => {
// we are mainly interested in auto-indenting after paste
let range = Range { start: line_of_edit, end: last_line_of_edit };
self.indentation_state.push(IndentationTask::Batch(range));
}
};
}
}
fn bulk_autoindent(
&mut self,
view: &mut MyView,
builder: &mut EditBuilder,
syntax_set: &SyntaxSet,
range: Range<usize>,
) -> Result<(), Error> {
let _t = trace_block("Syntect::bulk_autoindent", &["syntect"]);
let tab_size = view.get_config().tab_size;
let use_spaces = view.get_config().translate_tabs_to_spaces;
let mut base_indent = if range.start > 0 {
self.previous_nonblank_line(view, range.start)?
.map(|l| self.indent_level_of_line(view, l))
.unwrap_or(0)
} else {
0
};
for line in range.start..=range.end {
let current_line_indent = self.indent_level_of_line(view, line);
if line > 0 {
let increase_level = self.test_increase(view, syntax_set, line)?;
let decrease_level = self.test_decrease(view, syntax_set, line)?;
let increase = if increase_level { tab_size } else { 0 };
let decrease = if decrease_level { tab_size } else { 0 };
let final_level = base_indent + increase - decrease;
base_indent = final_level;
}
if base_indent != current_line_indent {
let edit_start = view.offset_of_line(line)?;
let edit_len = {
let line = view.get_line(line)?;
line.as_bytes().iter().take_while(|b| **b == b' ' || **b == b'\t').count()
};
let indent_text =
if use_spaces { n_spaces(base_indent) } else { n_tabs(base_indent / tab_size) };
let iv = Interval::new(edit_start, edit_start + edit_len);
builder.replace(iv, indent_text.into());
}
}
Ok(())
}
/// Called when freshly computing a line's indent level, such as after
/// a newline, or when re-indenting a block.
fn autoindent_line(
&mut self,
view: &mut MyView,
builder: &mut EditBuilder,
syntax_set: &SyntaxSet,
line: usize,
) -> Result<(), Error> {
let _t = trace_block("Syntect::autoindent", &["syntect"]);
debug_assert!(line > 0);
let tab_size = view.get_config().tab_size;
let current_indent = self.indent_level_of_line(view, line);
let base_indent = self
.previous_nonblank_line(view, line)?
.map(|l| self.indent_level_of_line(view, l))
.unwrap_or(0);
let increase_level = self.test_increase(view, syntax_set, line)?;
let decrease_level = self.test_decrease(view, syntax_set, line)?;
let increase = if increase_level { tab_size } else { 0 };
let decrease = if decrease_level { tab_size } else { 0 };
let final_level = base_indent + increase - decrease;
if final_level != current_indent {
self.set_indent(view, builder, line, final_level)
} else {
Ok(())
}
}
/// Called when actively editing a line; chiefly checks for whether or not
/// the current line should be de-indented, such as after a closing '}'.
fn check_indent_active_edit(
&mut self,
view: &mut MyView,
builder: &mut EditBuilder,
syntax_set: &SyntaxSet,
line: usize,
) -> Result<(), Error> {
let _t = trace_block("Syntect::check_indent_active_line", &["syntect"]);
if line == 0 {
return Ok(());
}
let tab_size = view.get_config().tab_size;
let current_indent = self.indent_level_of_line(view, line);
if line == 0 || current_indent == 0 {
return Ok(());
}
let just_increased = self.test_increase(view, syntax_set, line)?;
let decrease = self.test_decrease(view, syntax_set, line)?;
let prev_line = self.previous_nonblank_line(view, line)?;
let mut indent_level = prev_line.map(|l| self.indent_level_of_line(view, l)).unwrap_or(0);
if decrease {
// the first line after an increase should just match the previous line
if !just_increased {
indent_level = indent_level.saturating_sub(tab_size);
}
// we don't want to change indent level if this line doesn't
// match `test_decrease`, because the user could have changed
// it manually, and we respect that.
if indent_level != current_indent {
return self.set_indent(view, builder, line, indent_level);
}
}
Ok(())
}
fn set_indent(
&self,
view: &mut MyView,
builder: &mut EditBuilder,
line: usize,
level: usize,
) -> Result<(), Error> {
let edit_start = view.offset_of_line(line)?;
let edit_len = {
let line = view.get_line(line)?;
line.as_bytes().iter().take_while(|b| **b == b' ' || **b == b'\t').count()
};
let use_spaces = view.get_config().translate_tabs_to_spaces;
let tab_size = view.get_config().tab_size;
let indent_text = if use_spaces { n_spaces(level) } else { n_tabs(level / tab_size) };
let iv = Interval::new(edit_start, edit_start + edit_len);
builder.replace(iv, indent_text.into());
Ok(())
}
/// Test whether the indent level should be increased for this line,
/// by testing the _previous_ line against a regex.
fn test_increase(
&mut self,
view: &mut MyView,
syntax_set: &SyntaxSet,
line: usize,
) -> Result<bool, Error> {
debug_assert!(line > 0, "increasing indent requires a previous line");
let prev_line = match self.previous_nonblank_line(view, line) {
Ok(Some(l)) => l,
Ok(None) => return Ok(false),
Err(e) => return Err(e),
};
let metadata =
self.get_metadata(view, syntax_set, prev_line).ok_or_else(|| Error::PeerDisconnect)?;
let line = view.get_line(prev_line)?;
Ok(metadata.increase_indent(line))
}
/// Test whether the indent level for this line should be decreased, by
/// checking this line against a regex.
fn test_decrease(
&mut self,
view: &mut MyView,
syntax_set: &SyntaxSet,
line: usize,
) -> Result<bool, Error> {
if line == 0 {
return Ok(false);
}
let metadata =
self.get_metadata(view, syntax_set, line).ok_or_else(|| Error::PeerDisconnect)?;
let line = view.get_line(line)?;
Ok(metadata.decrease_indent(line))
}
fn previous_nonblank_line(
&self,
view: &mut MyView,
line: usize,
) -> Result<Option<usize>, Error> {
debug_assert!(line > 0);
let mut line = line;
while line > 0 {
line -= 1;
let text = view.get_line(line)?;
if !text.bytes().all(|b| b.is_ascii_whitespace()) {
return Ok(Some(line));
}
}
Ok(None)
}
fn indent_level_of_line(&self, view: &mut MyView, line: usize) -> usize {
let tab_size = view.get_config().tab_size;
let line = view.get_line(line).unwrap_or("");
line.as_bytes()
.iter()
.take_while(|b| **b == b' ' || **b == b'\t')
.map(|b| if b == &b' ' { 1 } else { tab_size })
.sum()
}
fn reindent(&mut self, view: &mut MyView, syntax_set: &SyntaxSet, lines: &[(usize, usize)]) {
let mut builder = DeltaBuilder::new(view.get_buf_size());
for (start, end) in lines {
let range = Range { start: *start, end: *end - 1 };
self.bulk_autoindent(view, &mut builder, syntax_set, range).expect("error on reindent");
}
view.edit(builder.build(), INDENTATION_PRIORITY, false, false, String::from("syntect"));
}
fn toggle_comment(
&mut self,
view: &mut MyView,
syntax_set: &SyntaxSet,
lines: &[(usize, usize)],
) {
let _t = trace_block("Syntect::toggle_comment", &["syntect"]);
if lines.is_empty() {
return;
}
let mut builder = DeltaBuilder::new(view.get_buf_size());
for (start, end) in lines {
let range = Range { start: *start, end: *end };
self.toggle_comment_line_range(view, syntax_set, &mut builder, range);
}
if builder.is_empty() {
eprintln!("no delta for lines {:?}", &lines);
} else {
view.edit(builder.build(), INDENTATION_PRIORITY, false, true, String::from("syntect"));
}
}
fn toggle_comment_line_range(
&mut self,
view: &mut MyView,
syntax_set: &SyntaxSet,
builder: &mut EditBuilder,
line_range: Range<usize>,
) {
let comment_str = match self
.get_metadata(view, syntax_set, line_range.start)
.and_then(|s| s.line_comment().map(|s| s.to_owned()))
{
Some(s) => s,
None => return,
};
match view
.get_line(line_range.start)
.map(|l| comment_str.trim() == l.trim() || l.trim().starts_with(&comment_str))
{
Ok(true) => self.remove_comment_marker(view, builder, line_range, &comment_str),
Ok(false) => self.insert_comment_marker(view, builder, line_range, &comment_str),
Err(e) => eprintln!("toggle comment error: {:?}", e),
}
}
fn insert_comment_marker(
&self,
view: &mut MyView,
builder: &mut EditBuilder,
line_range: Range<usize>,
comment_str: &str,
) {
// when commenting out multiple lines, we insert all comment markers at
// the same indent level: that of the least indented line.
let line_offset = line_range
.clone()
.map(|num| {
view.get_line(num)
.ok()
.and_then(|line| line.as_bytes().iter().position(|b| *b != b' ' && *b != b'\t'))
.unwrap_or(0)
})
.min()
.unwrap_or(0);
let comment_txt = Rope::from(&comment_str);
for num in line_range {
let offset = view.offset_of_line(num).unwrap();
let line = view.get_line(num).unwrap();
if line.trim().starts_with(&comment_str) {
continue;
}
let iv = Interval::new(offset + line_offset, offset + line_offset);
builder.replace(iv, comment_txt.clone());
}
}
fn remove_comment_marker(
&self,
view: &mut MyView,
builder: &mut EditBuilder,
lines: Range<usize>,
comment_str: &str,
) {
for num in lines {
let offset = view.offset_of_line(num).unwrap();
let line = view.get_line(num).unwrap();
let (comment_start, len) = match line.find(&comment_str) {
Some(off) => (offset + off, comment_str.len()),
None if line.trim() == comment_str.trim() => (offset, comment_str.trim().len()),
None => continue,
};
let iv = Interval::new(comment_start, comment_start + len);
builder.delete(iv);
}
}
}
type MyView = View<StateCache<LineState>>;
impl<'a> Syntect<'a> {
fn new(syntax_set: &'a SyntaxSet) -> Self {
Syntect { view_state: HashMap::new(), syntax_set }
}
/// Wipes any existing state and starts highlighting with `syntax`.
fn do_highlighting(&mut self, view: &mut MyView) {
let initial_state = {
let language_id = view.get_language_id();
let syntax = self
.syntax_set
.find_syntax_by_name(language_id.as_ref())
.unwrap_or_else(|| self.syntax_set.find_syntax_plain_text());
Some((ParseState::new(syntax), ScopeStack::new()))
};
let state = self.view_state.get_mut(&view.get_id()).unwrap();
state.initial_state = initial_state;
state.spans = Vec::new();
state.new_scopes = Vec::new();
state.offset = 0;
state.spans_start = 0;
view.get_cache().clear();
view.schedule_idle();
}
}
impl<'a> Plugin for Syntect<'a> {
type Cache = StateCache<LineState>;
fn new_view(&mut self, view: &mut View<Self::Cache>) {
let _t = trace_block("Syntect::new_view", &["syntect"]);
let view_id = view.get_id();
let state = PluginState::new();
self.view_state.insert(view_id, state);
self.do_highlighting(view);
}
fn did_close(&mut self, view: &View<Self::Cache>) {
self.view_state.remove(&view.get_id());
}
fn did_save(&mut self, view: &mut View<Self::Cache>, _old: Option<&Path>) {
let _t = trace_block("Syntect::did_save", &["syntect"]);
self.do_highlighting(view);
}
fn config_changed(&mut self, _view: &mut View<Self::Cache>, _changes: &ConfigTable) {}
fn language_changed(&mut self, view: &mut View<Self::Cache>, _old_lang: LanguageId) {
self.do_highlighting(view);
}
fn update(
&mut self,
view: &mut View<Self::Cache>,
delta: Option<&RopeDelta>,
edit_type: String,
author: String,
) {
let _t = trace_block("Syntect::update", &["syntect"]);
view.schedule_idle();
let should_auto_indent = view.get_config().auto_indent;
let edit_type = edit_type.parse::<EditType>().ok();
if should_auto_indent
&& author == "core"
&& (edit_type == Some(EditType::Newline)
|| edit_type == Some(EditType::Insert)
|| edit_type == Some(EditType::Other))
{
if let Some(delta) = delta {
let state = self.view_state.get_mut(&view.get_id()).unwrap();
state.consider_indentation(view, delta, edit_type.unwrap());
}
}
}
fn custom_command(
&mut self,
view: &mut View<Self::Cache>,
method: &str,
params: serde_json::Value,
) {
match method {
"toggle_comment" => {
let lines: Vec<(usize, usize)> = serde_json::from_value(params).unwrap();
let state = self.view_state.get_mut(&view.get_id()).unwrap();
state.toggle_comment(view, self.syntax_set, &lines);
}
"reindent" => {
let lines: Vec<(usize, usize)> = serde_json::from_value(params).unwrap();
let state = self.view_state.get_mut(&view.get_id()).unwrap();
state.reindent(view, self.syntax_set, &lines);
}
other => eprintln!("syntect received unexpected command {}", other),
}
}
fn idle(&mut self, view: &mut View<Self::Cache>) {
let state = self.view_state.get_mut(&view.get_id()).unwrap();
state.indent_lines(view, self.syntax_set);
for _ in 0..LINES_PER_RPC {
if !state.highlight_one_line(view, self.syntax_set) {
state.flush_spans(view);
return;
}
if view.request_is_pending() {
trace("yielding for request", &["syntect"]);
break;
}
}
state.flush_spans(view);
view.schedule_idle();
}
}
fn main() {
let syntax_set = SyntaxSet::load_defaults_newlines();
let mut state = Syntect::new(&syntax_set);
mainloop(&mut state).unwrap();
}
fn n_spaces(n: usize) -> &'static str {
// when someone opens an issue complaining about this we know we've made it
const MAX_SPACES: usize = 160;
static MANY_SPACES: [u8; MAX_SPACES] = [b' '; MAX_SPACES];
unsafe { ::std::str::from_utf8_unchecked(&MANY_SPACES[..n.min(MAX_SPACES)]) }
}
fn n_tabs(n: usize) -> &'static str {
const MAX_TABS: usize = 40;
static MANY_TABS: [u8; MAX_TABS] = [b'\t'; MAX_TABS];
unsafe { ::std::str::from_utf8_unchecked(&MANY_TABS[..n.min(MAX_TABS)]) }
}
|
use std::result::*;
use std::rc::*;
use rustc_serialize::*;
use super::treenode::*;
use super::basictree::*;
use super::values::*;
///
/// Encoder that will write to the specified tree node
///
struct TreeNodeEncoder<'a> {
root: &'a mut MutableTreeNode
}
impl<'a> TreeNodeEncoder<'a> {
fn new(root: &'a mut MutableTreeNode) -> TreeNodeEncoder<'a> {
TreeNodeEncoder { root: root }
}
}
pub enum TreeNodeCodingError {
UnsupportedType
}
impl<'a> Encoder for TreeNodeEncoder<'a> {
type Error = TreeNodeCodingError;
fn emit_nil(&mut self) -> Result<(), Self::Error> {
self.root.set_tree_value(TreeValue::Nothing);
Ok(())
}
fn emit_i32(&mut self, v: i32) -> Result<(), Self::Error> {
self.root.set_tree_value(TreeValue::Int(v));
Ok(())
}
fn emit_i16(&mut self, v: i16) -> Result<(), Self::Error> {
self.root.set_tree_value(TreeValue::Int(v as i32));
Ok(())
}
fn emit_i8(&mut self, v: i8) -> Result<(), Self::Error> {
self.root.set_tree_value(TreeValue::Int(v as i32));
Ok(())
}
fn emit_bool(&mut self, v: bool) -> Result<(), Self::Error> {
self.root.set_tree_value(TreeValue::Bool(v));
Ok(())
}
fn emit_f64(&mut self, v: f64) -> Result<(), Self::Error> {
self.root.set_tree_value(TreeValue::Real(v));
Ok(())
}
fn emit_f32(&mut self, v: f32) -> Result<(), Self::Error> {
self.root.set_tree_value(TreeValue::Real(v as f64));
Ok(())
}
fn emit_str(&mut self, v: &str) -> Result<(), Self::Error> {
self.root.set_tree_value(TreeValue::String(v.to_string()));
Ok(())
}
fn emit_struct<F>(&mut self, name: &str, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
self.root.set_tree_value(TreeValue::String(name.to_string()));
f(self)
}
fn emit_struct_field<F>(&mut self, f_name: &str, f_idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
// Hrm, so what I want to do is create a new encoder with a new node and call f on it.
// But rust has other ideas; it doesn't know that f(X) doesn't reference X after it returns, so it moans
// Other ideas
// * create a whole new encoder (can't do it, we don't have any access to the struct)
// * swap the reference to the node (can't do it, the new node has the same lifetime problems)
// * use a CloneCell (can't do it, set_tree_value and set_tag aren't supported)
// Insert a new node into the tree
let new_node = BasicTree::new(f_name, ());
self.root.get_child_ref().and_then(|sibling| {
new_node.set_sibling_ref(sibling);
Some(())
});
// Encode the field into the new node (would be super-elegant if this works but Rust is all 'nooope, you need to write a
// billion more lines of code'). I know *why* but this is stupid, if the encoder function had a better lifetime specifier
// it'd be unnecessary.
// Rust thinks that f() needs stuff with a lifetime the same as this struct rather than stuff with a lifetime as long
// as the function call (it's not obvious from a casual reading of the definition, rust likes to be inscrutable).
// This means we need to do dumb stuff to make it work.
/*
let node_encoder = TreeNodeEncoder::new(&mut new_node);
f(&mut node_encoder);
*/
// Save the node we just created and update the tree
self.root.set_child_ref(Rc::new(new_node));
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_usize(&mut self, v: usize) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_u64(&mut self, v: u64) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_u32(&mut self, v: u32) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_u16(&mut self, v: u16) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_u8(&mut self, v: u8) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_isize(&mut self, v: isize) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_i64(&mut self, v: i64) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_char(&mut self, v: char) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_enum<F>(&mut self, name: &str, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_enum_variant<F>(&mut self, v_name: &str, v_id: usize, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_enum_variant_arg<F>(&mut self, a_idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_enum_struct_variant<F>(&mut self, v_name: &str, v_id: usize, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_enum_struct_variant_field<F>(&mut self, f_name: &str, f_idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_tuple<F>(&mut self, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_tuple_arg<F>(&mut self, idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_tuple_struct<F>(&mut self, name: &str, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_tuple_struct_arg<F>(&mut self, f_idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_option<F>(&mut self, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_option_none(&mut self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_option_some<F>(&mut self, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_seq<F>(&mut self, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_seq_elt<F>(&mut self, idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_map<F>(&mut self, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_map_elt_key<F>(&mut self, idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_map_elt_val<F>(&mut self, idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
}
Can fix the lifetime thing by not making a lifetime part of the type, which means we can't directly encode to a TreeNode but instead need to encode something that can be made into a treenode later on
use std::result::*;
use std::rc::*;
use rustc_serialize::*;
use super::treenode::*;
use super::basictree::*;
use super::values::*;
///
/// Encoder that will write to the specified tree node
///
struct TreeNodeEncoder {
tag: String,
value: TreeValue,
child: Option<Rc<TreeNode>>
}
impl TreeNodeEncoder {
fn new() -> TreeNodeEncoder {
TreeNodeEncoder {
tag: "".to_string(),
value: TreeValue::Nothing,
child: None }
}
}
pub enum TreeNodeCodingError {
UnsupportedType
}
impl Encoder for TreeNodeEncoder {
type Error = TreeNodeCodingError;
fn emit_nil(&mut self) -> Result<(), Self::Error> {
self.value = TreeValue::Nothing;
Ok(())
}
fn emit_i32(&mut self, v: i32) -> Result<(), Self::Error> {
self.value = TreeValue::Int(v);
Ok(())
}
fn emit_i16(&mut self, v: i16) -> Result<(), Self::Error> {
self.value = TreeValue::Int(v as i32);
Ok(())
}
fn emit_i8(&mut self, v: i8) -> Result<(), Self::Error> {
self.value = TreeValue::Int(v as i32);
Ok(())
}
fn emit_bool(&mut self, v: bool) -> Result<(), Self::Error> {
self.value = TreeValue::Bool(v);
Ok(())
}
fn emit_f64(&mut self, v: f64) -> Result<(), Self::Error> {
self.value = TreeValue::Real(v);
Ok(())
}
fn emit_f32(&mut self, v: f32) -> Result<(), Self::Error> {
self.value = TreeValue::Real(v as f64);
Ok(())
}
fn emit_str(&mut self, v: &str) -> Result<(), Self::Error> {
self.value = TreeValue::String(v.to_string());
Ok(())
}
fn emit_struct<F>(&mut self, name: &str, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
self.value = TreeValue::String(name.to_string());
f(self)
}
fn emit_struct_field<F>(&mut self, f_name: &str, f_idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
// Hrm, so what I want to do is create a new encoder with a new node and call f on it.
// But rust has other ideas; it doesn't know that f(X) doesn't reference X after it returns, so it moans
// Other ideas
// * create a whole new encoder (can't do it, we don't have any access to the struct)
// * swap the reference to the node (can't do it, the new node has the same lifetime problems)
// * use a CloneCell (can't do it, set_tree_value and set_tag aren't supported)
// Insert a new node into the tree
let new_node = BasicTree::new(f_name, ());
self.child.to_owned().and_then(|sibling| {
new_node.set_sibling_ref(sibling);
Some(())
});
// Encode the field into the new node (would be super-elegant if this works but Rust is all 'nooope, you need to write a
// billion more lines of code'). I know *why* but this is stupid, if the encoder function had a better lifetime specifier
// it'd be unnecessary.
// Rust thinks that f() needs stuff with a lifetime the same as this struct rather than stuff with a lifetime as long
// as the function call (it's not obvious from a casual reading of the definition, rust likes to be inscrutable).
// This means we need to do dumb stuff to make it work.
let mut node_encoder = TreeNodeEncoder::new();
f(&mut node_encoder);
// Save the node we just created and update the tree
self.child = Some(Rc::new(new_node));
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_usize(&mut self, v: usize) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_u64(&mut self, v: u64) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_u32(&mut self, v: u32) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_u16(&mut self, v: u16) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_u8(&mut self, v: u8) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_isize(&mut self, v: isize) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_i64(&mut self, v: i64) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_char(&mut self, v: char) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_enum<F>(&mut self, name: &str, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_enum_variant<F>(&mut self, v_name: &str, v_id: usize, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_enum_variant_arg<F>(&mut self, a_idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_enum_struct_variant<F>(&mut self, v_name: &str, v_id: usize, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_enum_struct_variant_field<F>(&mut self, f_name: &str, f_idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_tuple<F>(&mut self, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_tuple_arg<F>(&mut self, idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_tuple_struct<F>(&mut self, name: &str, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_tuple_struct_arg<F>(&mut self, f_idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_option<F>(&mut self, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_option_none(&mut self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_option_some<F>(&mut self, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_seq<F>(&mut self, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_seq_elt<F>(&mut self, idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_map<F>(&mut self, len: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_map_elt_key<F>(&mut self, idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
fn emit_map_elt_val<F>(&mut self, idx: usize, f: F) -> Result<(), Self::Error> where F: FnOnce(&mut Self) -> Result<(), Self::Error> {
Err(TreeNodeCodingError::UnsupportedType)
}
}
|
use std::cmp;
use std::fs::File;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::iter;
use std::error::Error;
use std::ascii::AsciiExt;
use itertools::Itertools;
use std::borrow::Cow;
use std::rc::Rc;
use rustbox::{RustBox};
use rustbox::keyboard::Key;
use rex_utils;
use rex_utils::split_vec::SplitVec;
use rex_utils::rect::Rect;
use super::super::config::Config;
use super::RustBoxEx::{RustBoxEx, Style};
use super::input::Input;
use super::widget::Widget;
use super::inputline::{GotoInputLine, FindInputLine, PathInputLine};
use super::overlay::OverlayText;
use super::config::ConfigScreen;
use super::menu::{OverlayMenu, MenuState, MenuEntry};
#[derive(Debug)]
enum EditOp {
Delete(isize, isize),
Insert(isize, Vec<u8>),
Write(isize, Vec<u8>),
}
#[derive(Debug)]
enum LineNumberMode {
None,
Short,
Long
}
#[derive(Copy,Clone,Debug)]
pub enum HexEditActions {
Edit(char),
SwitchView,
MoveLeft,
MoveRight,
MoveUp,
MoveDown,
MovePageUp,
MovePageDown,
MoveToFirstColumn,
MoveToLastColumn,
Delete,
DeleteWithMove,
CopySelection,
CutSelection,
PasteSelection,
Undo,
ToggleInsert,
ToggleSelecion,
HelpView,
LogView,
AskGoto,
AskFind,
AskOpen,
AskSave,
AskConfig,
AskFill,
AskMarkAdd,
AskMarkGoto,
CheckMagic,
StartMenu,
}
static ROOT_ENTRIES: MenuState<HexEditActions> = &[
MenuEntry::CommandEntry('c', "Config", HexEditActions::AskConfig),
MenuEntry::SubEntries('m', "Mark", &[
MenuEntry::CommandEntry('a', "Add", HexEditActions::AskMarkAdd),
MenuEntry::CommandEntry('g', "Goto", HexEditActions::AskMarkGoto),
]),
];
signalreceiver_decl!{HexEditSignalReceiver(HexEdit)}
pub struct HexEdit {
buffer: SplitVec,
config: Rc<Config>,
rect: Rect<isize>,
cursor_nibble_pos: isize,
status_log: Vec<String>,
show_last_status: bool,
data_offset: isize,
row_offset: isize,
nibble_active: bool,
selection_start: Option<isize>,
insert_mode: bool,
input: Input,
undo_stack: Vec<EditOp>,
input_entry: Option<Box<Widget>>,
overlay: Option<Box<Widget>>,
cur_path: Option<PathBuf>,
clipboard: Option<Vec<u8>>,
signal_receiver: Rc<HexEditSignalReceiver>,
}
impl HexEdit {
pub fn new(config: Config) -> HexEdit {
HexEdit {
buffer: SplitVec::new(),
config: Rc::new(config),
rect: Default::default(),
cursor_nibble_pos: 0,
data_offset: 0,
row_offset: 0,
status_log: vec!["Press C-/ for help".to_string()],
show_last_status: true,
nibble_active: true,
selection_start: None,
insert_mode: false,
input_entry: None,
undo_stack: Vec::new(),
overlay: None,
cur_path: None,
clipboard: None,
input: Input::new(),
signal_receiver: Rc::new(HexEditSignalReceiver::new()),
}
}
fn reset(&mut self) {
self.cursor_nibble_pos = 0;
self.data_offset = 0;
self.nibble_active = true;
self.selection_start = None;
self.insert_mode = false;
self.input_entry = None;
self.undo_stack = Vec::new();
}
fn get_linenumber_mode(&self) -> LineNumberMode {
if !self.config.show_linenum {
LineNumberMode::None
} else if self.buffer.len() <= 0xFFFF {
LineNumberMode::Short
} else {
LineNumberMode::Long
}
}
fn get_linenumber_width(&self) -> isize {
match self.get_linenumber_mode() {
LineNumberMode::None => 1,
LineNumberMode::Short => 4 + 1, // 4 for the XXXX + 1 for whitespace
LineNumberMode::Long => 9 + 1, // 7 for XXXX:XXXX + 1 for whitespace
}
}
fn get_line_width(&self) -> isize {
self.config.line_width.unwrap_or(self.get_bytes_per_row() as u32) as isize
}
fn get_bytes_per_row(&self) -> isize {
// This is the number of cells on the screen that are used for each byte.
// For the nibble view, we need 3 (1 for each nibble and 1 for the spacing). For
// the ascii view, if it is shown, we need another one.
let cells_per_byte = if self.config.show_ascii { 4 } else { 3 };
(self.rect.width - self.get_linenumber_width()) / cells_per_byte
}
fn get_bytes_per_screen(&self) -> isize {
self.get_line_width() * self.rect.height
}
fn draw_line_number(&self, rb: &RustBox, row: usize, line_number: usize) {
match self.get_linenumber_mode() {
LineNumberMode::None => (),
LineNumberMode::Short => {
rb.print_style(0, row, Style::Default, &format!("{:04X}", line_number));
}
LineNumberMode::Long => {
rb.print_style(0, row, Style::Default, &format!("{:04X}:{:04X}", line_number >> 16, line_number & 0xFFFF));
}
};
}
fn draw_line(&self, rb: &RustBox, iter: &mut Iterator<Item=(usize, Option<&u8>)>, row: usize) {
let nibble_view_start = self.get_linenumber_width() as usize;
// The value of this is wrong if we are not showing the ascii view
let byte_view_start = nibble_view_start + self.get_bytes_per_row() as usize * 3;
// We want the selection draw to not go out of the editor view
let mut prev_in_selection = false;
let mut at_current_row = false;
for (row_offset, (byte_pos, maybe_byte)) in iter.skip(self.row_offset as usize).enumerate().take(self.get_bytes_per_row() as usize) {
let at_current_byte = byte_pos as isize == (self.cursor_nibble_pos / 2);
at_current_row = at_current_row || at_current_byte;
let in_selection = if let Some(selection_pos) = self.selection_start {
rex_utils::is_between(byte_pos as isize, selection_pos, self.cursor_nibble_pos / 2)
} else {
false
};
// Now we draw the nibble view
let hex_chars = if let Some(&byte) = maybe_byte {
rex_utils::u8_to_hex(byte)
} else {
(' ', ' ')
};
let nibble_view_column = nibble_view_start + (row_offset * 3);
let nibble_style = if (!self.nibble_active && at_current_byte) || in_selection {
Style::Selection
} else {
Style::Default
};
rb.print_char_style(nibble_view_column, row, nibble_style,
hex_chars.0);
rb.print_char_style(nibble_view_column + 1, row, nibble_style,
hex_chars.1);
if prev_in_selection && in_selection {
rb.print_char_style(nibble_view_column - 1, row, nibble_style,
' ');
}
if self.nibble_active && self.input_entry.is_none() && at_current_byte {
rb.set_cursor(nibble_view_column as isize + (self.cursor_nibble_pos & 1),
row as isize);
};
if self.config.show_ascii {
// Now let's draw the byte window
let byte_char = if let Some(&byte) = maybe_byte {
let bc = byte as char;
if bc.is_ascii() && bc.is_alphanumeric() {
bc
} else {
'.'
}
} else {
' '
};
// If we are at the current byte but the nibble view is active, we want to draw a
// "fake" cursor by dawing a selection square
let byte_style = if (self.nibble_active && at_current_byte) || in_selection {
Style::Selection
} else {
Style::Default
};
rb.print_char_style(byte_view_start + row_offset, row, byte_style,
byte_char);
if !self.nibble_active && self.input_entry.is_none() && at_current_byte {
rb.set_cursor((byte_view_start + row_offset) as isize, row as isize);
}
// Remember if we had a selection, so that we know for next char to "fill in" with
// selection in the nibble view
prev_in_selection = in_selection;
}
}
// We just need to consume the iterator and see if there were any remaining bytes
let bytes_remaining = iter.count();
if at_current_row && self.row_offset != 0 {
rb.print_char_style(nibble_view_start - 1, row, Style::Default, '<');
}
if at_current_row && bytes_remaining != 0 {
rb.print_char_style(byte_view_start - 1, row, Style::Default, '>');
}
}
pub fn draw_view(&self, rb: &RustBox) {
let start_iter = self.data_offset as usize;
let stop_iter = cmp::min(start_iter + self.get_bytes_per_screen() as usize, self.buffer.len());
let itit = (start_iter..).zip( // We are zipping the byte position
self.buffer.iter_range(start_iter..stop_iter) // With the data at those bytes
.map(|x| Some(x)) // And wrapping it in an option
.chain(iter::once(None))) // So we can have a "fake" last item that will be None
.chunks_lazy(self.get_line_width() as usize); //And split it into nice row-sized chunks
for (row, row_iter_) in itit.into_iter().take(self.rect.height as usize).enumerate() {
// We need to be able to peek in the iterable so we can get the current position
let mut row_iter = row_iter_.peekable();
let byte_pos = row_iter.peek().unwrap().0;
self.draw_line_number(rb, row, byte_pos);
self.draw_line(rb, &mut row_iter, row);
}
}
fn draw_statusbar(&self, rb: &RustBox) {
rb.print_style(0, rb.height() - 1, Style::StatusBar, &rex_utils::string_with_repeat(' ', rb.width()));
if self.show_last_status {
if let Some(ref status_line) = self.status_log.last() {
rb.print_style(0, rb.height() - 1, Style::StatusBar, &status_line);
}
}
let mode = if let Some(_) = self.selection_start {
"SEL"
} else if self.insert_mode {
"INS"
} else {
"OVR"
};
let right_status;
if let Some(selection_start) = self.selection_start {
let size = (self.cursor_nibble_pos/2 - selection_start).abs();
right_status = format!(
" Start: {} Size: {} Pos: {} {}",
selection_start, size, self.cursor_nibble_pos/2, mode);
} else {
right_status = format!(
" Pos: {} Undo: {} {}",
self.undo_stack.len(), self.cursor_nibble_pos/2, mode);
};
let (x_pos, start_index) = if rb.width() >= right_status.len() {
(rb.width() - right_status.len(), 0)
} else {
(0, right_status.len() - rb.width())
};
rb.print_style(x_pos, rb.height() - 1, Style::StatusBar, &right_status[start_index..]);
}
pub fn draw(&mut self, rb: &RustBox) {
self.draw_view(rb);
if let Some(entry) = self.input_entry.as_mut() {
entry.draw(rb, Rect {
top: (rb.height() - 2) as isize,
left: 0,
height: 1,
width: rb.width() as isize
}, true);
}
if let Some(overlay) = self.overlay.as_mut() {
overlay.draw(rb, Rect {
top: 0,
left: 0,
height: self.rect.height,
width: self.rect.width,
}, true);
}
self.draw_statusbar(rb);
}
fn status<S: Into<Cow<'static, str>> + ?Sized>(&mut self, st: S) {
self.show_last_status = true;
let cow: Cow<'static, str> = st.into();
self.status_log.push(format!("{}", &cow));
}
fn clear_status(&mut self) {
self.show_last_status = false;
}
pub fn open(&mut self, path: &Path) {
let mut v = vec![];
if let Err(e) = File::open(path).and_then(|mut f| f.read_to_end(&mut v)) {
self.status(format!("ERROR: {}", e.description()));
return;
}
self.buffer = SplitVec::from_vec(v);
self.cur_path = Some(PathBuf::from(path));
self.reset();
}
pub fn save(&mut self, path: &Path) {
let result = File::create(path)
.and_then(|mut f| self.buffer.iter_slices()
.fold(Ok(()), |res, val| res
.and_then(|_| f.write_all(val))));
match result {
Ok(_) => {
self.cur_path = Some(PathBuf::from(path));
}
Err(e) => {
self.status(format!("ERROR: {}", e.description()));
}
}
}
fn edit_buffer(&mut self, act: EditOp, add_to_undo: bool) -> (isize, isize) {
let stat = format!("doing = {:?}", act);
let mut begin_region: isize;
let mut end_region: isize;
match act {
EditOp::Insert(offset, buf) => {
begin_region = offset;
end_region = offset + buf.len() as isize;
self.buffer.insert(offset as usize, &buf);
if add_to_undo {
self.push_undo(EditOp::Delete(offset, offset + buf.len() as isize))
}
}
EditOp::Delete(offset, end) => {
begin_region = offset;
end_region = end;
let res = self.buffer.move_out(offset as usize..end as usize);
if add_to_undo { self.push_undo(EditOp::Insert(offset, res)) }
}
EditOp::Write(offset, buf) => {
begin_region = offset;
end_region = offset + buf.len() as isize;
let orig_data = self.buffer.copy_out(offset as usize..(offset as usize + buf.len()));
self.buffer.copy_in(offset as usize, &buf);
if add_to_undo { self.push_undo(EditOp::Write(offset, orig_data)) }
}
}
self.status(stat);
(begin_region, end_region)
}
fn push_undo(&mut self, act: EditOp) {
self.undo_stack.push(act);
}
fn undo(&mut self) {
if let Some(act) = self.undo_stack.pop() {
let (begin, _) = self.edit_buffer(act, false);
self.set_cursor(begin * 2);
}
}
fn cursor_at_end(&self) -> bool {
self.cursor_nibble_pos == (self.buffer.len()*2) as isize
}
fn delete_at_cursor(&mut self, with_bksp: bool) {
let mut cursor_nibble_pos = self.cursor_nibble_pos;
let selection_pos = match self.selection_start {
Some(selection_pos_tag) => selection_pos_tag,
None => {
if with_bksp {
if cursor_nibble_pos < 2 {
return;
}
cursor_nibble_pos -= 2;
}
cursor_nibble_pos / 2
}
};
let del_start = cmp::min(selection_pos, cursor_nibble_pos / 2);
let mut del_stop = cmp::max(selection_pos, cursor_nibble_pos / 2) + 1;
if del_stop > self.buffer.len() as isize {
del_stop -= 1;
if del_stop == del_start {
return;
}
}
if self.buffer.len() == 0 {
self.status("Nothing to delete");
return;
}
self.selection_start = None;
self.edit_buffer(EditOp::Delete(del_start, del_stop), true);
self.set_cursor(del_start * 2);
}
fn write_nibble_at_cursor(&mut self, c: u8) {
// Replace the text at the selection before writing the data
if self.selection_start.is_some() {
self.delete_at_cursor(false);
}
if self.insert_mode || self.cursor_at_end() {
self.insert_nibble_at_cursor(c);
} else {
self.set_nibble_at_cursor(c);
}
}
fn set_nibble_at_cursor(&mut self, c: u8) {
let mut byte = self.buffer[(self.cursor_nibble_pos / 2) as usize];
byte = match self.cursor_nibble_pos & 1 {
0 => (byte & 0x0f) + c * 16,
1 => (byte & 0xf0) + c,
_ => 0xff,
};
let byte_offset = self.cursor_nibble_pos / 2;
self.edit_buffer(EditOp::Write(byte_offset, vec![byte]), true);
}
fn insert_nibble_at_cursor(&mut self, c: u8) {
// If we are at half byte, we still overwrite
if self.cursor_nibble_pos & 1 == 1 {
self.set_nibble_at_cursor(c);
return
}
let pos_div2 = self.cursor_nibble_pos / 2;
self.edit_buffer(EditOp::Insert(pos_div2, vec![c * 16]), true);
}
fn toggle_insert_mode(&mut self) {
self.insert_mode = !self.insert_mode;
self.move_cursor(0);
}
fn write_byte_at_cursor(&mut self, c: u8) {
// Replace the text at the selection before writing the data
if self.selection_start.is_some() {
self.delete_at_cursor(false);
}
let byte_offset = self.cursor_nibble_pos / 2;
if self.insert_mode || self.cursor_at_end() {
self.edit_buffer(EditOp::Insert(byte_offset, vec![c]), true);
} else {
self.edit_buffer(EditOp::Write(byte_offset, vec![c]), true);
}
}
fn move_cursor(&mut self, pos: isize) {
self.cursor_nibble_pos += pos;
self.update_cursor()
}
fn set_cursor(&mut self, pos: isize) {
self.cursor_nibble_pos = pos;
self.update_cursor()
}
fn update_cursor(&mut self) {
self.cursor_nibble_pos = cmp::max(self.cursor_nibble_pos, 0);
self.cursor_nibble_pos = cmp::min(self.cursor_nibble_pos, (self.buffer.len()*2) as isize);
let cursor_byte_pos = self.cursor_nibble_pos / 2;
let cursor_row_offset = cursor_byte_pos % self.get_line_width();
// If the cursor moves above or below the view, scroll it
if cursor_byte_pos < self.data_offset {
self.data_offset = (cursor_byte_pos) - cursor_row_offset;
}
if cursor_byte_pos > (self.data_offset + self.get_bytes_per_screen() - 1) {
self.data_offset = cursor_byte_pos - cursor_row_offset -
self.get_bytes_per_screen() + self.get_line_width();
}
// If the cursor moves to the right or left of the view, scroll it
if cursor_row_offset < self.row_offset {
self.row_offset = cursor_row_offset;
}
if cursor_row_offset >= self.row_offset + self.get_bytes_per_row() {
self.row_offset = cursor_row_offset - self.get_bytes_per_row() + 1;
}
}
fn toggle_selection(&mut self) {
match self.selection_start {
Some(_) => self.selection_start = None,
None => self.selection_start = Some(self.cursor_nibble_pos / 2)
}
let selection_start = self.selection_start; // Yay! Lifetimes!
self.status(format!("selection = {:?}", selection_start));
}
fn goto(&mut self, pos: isize) {
self.status(format!("Going to {:?}", pos));
self.set_cursor(pos * 2);
}
fn find_buf(&mut self, needle: &[u8]) {
let found_pos = match self.buffer.find_slice_from((self.cursor_nibble_pos / 2) as usize, needle) {
None => {
self.buffer.find_slice_from(0, needle)
}
a => a
};
if let Some(pos) = found_pos {
self.status(format!("Found at {:?}", pos));
self.set_cursor((pos * 2) as isize);
} else {
self.status("Nothing found!");
}
}
fn read_cursor_to_clipboard(&mut self) -> Option<usize> {
let (start, stop) = match self.selection_start {
None => { return None; },
Some(selection_pos) => {
(cmp::min(selection_pos, self.cursor_nibble_pos / 2),
cmp::max(selection_pos, self.cursor_nibble_pos / 2))
}
};
let data = self.buffer.copy_out(start as usize..stop as usize);
let data_len = data.len();
self.clipboard = Some(data);
Some(data_len)
}
fn edit_copy(&mut self) {
if let Some(data_len) = self.read_cursor_to_clipboard() {
self.status(format!("Copied {}", data_len));
self.selection_start = None;
}
}
fn edit_cut(&mut self) {
if let Some(data_len) = self.read_cursor_to_clipboard() {
self.delete_at_cursor(false);
self.status(format!("Cut {}", data_len));
}
}
fn edit_paste(&mut self) {
let data = if let Some(ref d) = self.clipboard {
d.clone()
} else {
return;
};
let data_len = data.len() as isize;
// This is needed to satisfy the borrow checker
let cur_pos_in_bytes = self.cursor_nibble_pos / 2;
self.edit_buffer(EditOp::Insert(cur_pos_in_bytes, data), true);
self.move_cursor(data_len + 1);
}
fn view_input(&mut self, key: Key) {
if let Some(action) = self.input.editor_input(key) {
self.do_action(action)
}
}
fn do_action(&mut self, action: HexEditActions) {
self.clear_status();
match action {
// Movement
HexEditActions::MoveLeft if self.nibble_active => self.move_cursor(-1),
HexEditActions::MoveRight if self.nibble_active => self.move_cursor(1),
HexEditActions::MoveLeft if !self.nibble_active => self.move_cursor(-2),
HexEditActions::MoveRight if !self.nibble_active => self.move_cursor(2),
HexEditActions::MoveLeft => panic!("Make the case handler happy!"),
HexEditActions::MoveRight => panic!("Make the case handler happy!"),
HexEditActions::MoveUp => {
let t = -self.get_line_width() * 2;
self.move_cursor(t)
}
HexEditActions::MoveDown => {
let t = self.get_line_width() * 2;
self.move_cursor(t)
}
HexEditActions::MovePageUp => {
let t = -(self.get_bytes_per_screen() * 2);
self.move_cursor(t)
}
HexEditActions::MovePageDown => {
let t = self.get_bytes_per_screen() * 2;
self.move_cursor(t)
}
HexEditActions::MoveToFirstColumn => {
let pos_in_line = self.cursor_nibble_pos % (self.get_line_width()*2);
self.move_cursor(-pos_in_line)
}
HexEditActions::MoveToLastColumn => {
let pos_in_line = self.cursor_nibble_pos % (self.get_line_width()*2);
let i = self.get_line_width()*2 - 2 - pos_in_line;
self.move_cursor(i);
}
HexEditActions::Delete => self.delete_at_cursor(false),
HexEditActions::DeleteWithMove => self.delete_at_cursor(true),
// Ctrl X, C V
HexEditActions::CutSelection => self.edit_cut(),
HexEditActions::CopySelection => self.edit_copy(),
HexEditActions::PasteSelection => self.edit_paste(),
// Hex input for nibble view
HexEditActions::Edit(ch) if self.nibble_active => {
if let Some(val) = ch.to_digit(16) {
self.write_nibble_at_cursor(val as u8);
self.move_cursor(1);
} else {
// TODO: Show error?
}
},
// Ascii edit for byte view
HexEditActions::Edit(ch) if !self.nibble_active => {
if ch.len_utf8() == 1 && ch.is_alphanumeric() {
// TODO: Make it printable rather than alphanumeric
self.write_byte_at_cursor(ch as u8);
self.move_cursor(2);
} else {
// TODO: Show error?
}
}
HexEditActions::Edit(ch) => panic!("Make the case handler happy!"),
HexEditActions::SwitchView => {
self.nibble_active = !self.nibble_active;
let t = self.nibble_active;
self.status(format!("nibble_active = {:?}", t));
},
HexEditActions::HelpView => self.start_help(),
HexEditActions::LogView => self.start_logview(),
HexEditActions::ToggleInsert => self.toggle_insert_mode(),
HexEditActions::ToggleSelecion => self.toggle_selection(),
HexEditActions::Undo => self.undo(),
HexEditActions::AskGoto => self.start_goto(),
HexEditActions::AskFind => self.start_find(),
HexEditActions::AskOpen => self.start_open(),
HexEditActions::AskSave => self.start_save(),
HexEditActions::AskConfig => self.start_config(),
HexEditActions::StartMenu => self.start_menu(),
_ => self.status(format!("Operation not implemented yet: {:?}", action))
}
}
fn start_menu(&mut self) {
let sr = &self.signal_receiver;
let mut menu = OverlayMenu::with_menu(ROOT_ENTRIES);
menu.on_selected.connect(signal!(sr with |obj, action| {
obj.overlay = None;
obj.do_action(action);
}));
menu.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.overlay = None;
}));
self.overlay = Some(Box::new(menu));
}
fn start_config(&mut self) {
let sr = &self.signal_receiver;
let mut config_screen = ConfigScreen::with_config(self.config.clone());
config_screen.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.overlay = None;
}));
self.overlay = Some(Box::new(config_screen));
}
fn start_help(&mut self) {
let help_text = include_str!("Help.txt");
// YAY Lifetimes! (This will hopfully be fixed once rust gains MIR/HIR)
{
let sr = &self.signal_receiver;
let mut ot = OverlayText::with_text(help_text.to_string(), false);
ot.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.overlay = None;
}));
self.overlay = Some(Box::new(ot));
}
{
self.status("Press Esc to return");
}
}
fn start_logview(&mut self) {
let logs = self.status_log.clone();
let sr = &self.signal_receiver;
let mut ot = OverlayText::with_logs(logs, true);
ot.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.overlay = None;
}));
self.overlay = Some(Box::new(ot));
}
fn start_goto(&mut self) {
let mut gt = GotoInputLine::new();
// let mut sender_clone0 = self.sender.clone();
let sr = &self.signal_receiver;
gt.on_done.connect(signal!(sr with |obj, pos| {
obj.goto(pos*2);
obj.input_entry = None;
}));
gt.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.input_entry = None;
}));
self.input_entry = Some(Box::new(gt) as Box<Widget>)
}
fn start_find(&mut self) {
let mut find_line = FindInputLine::new();
let sr = &self.signal_receiver;
find_line.on_find.connect(signal!(sr with |obj, needle| {
obj.find_buf(&needle);
obj.input_entry = None;
}));
find_line.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.input_entry = None;
}));
self.input_entry = Some(Box::new(find_line) as Box<Widget>)
}
fn start_save(&mut self) {
let mut path_line = PathInputLine::new("Save: ".into());
let sr = &self.signal_receiver;
path_line.on_done.connect(signal!(sr with |obj, path| {
obj.save(&path);
obj.input_entry = None;
}));
path_line.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.input_entry = None;
}));
self.input_entry = Some(Box::new(path_line) as Box<Widget>)
}
fn start_open(&mut self) {
let mut path_line = PathInputLine::new("Open: ".into());
let sr = &self.signal_receiver;
path_line.on_done.connect(signal!(sr with |obj, path| {
obj.open(&path);
obj.input_entry = None;
}));
path_line.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.input_entry = None;
}));
self.input_entry = Some(Box::new(path_line) as Box<Widget>)
}
fn process_msgs(&mut self) {
let mut sr = self.signal_receiver.clone();
sr.run(self);
}
pub fn input(&mut self, key: Key) {
self.process_msgs();
if let Some(ref mut overlay) = self.overlay {
overlay.input(&self.input, key);
} else if let Some(ref mut input_entry) = self.input_entry {
input_entry.input(&self.input, key);
} else {
self.view_input(key);
}
self.process_msgs();
}
pub fn resize(&mut self, width: i32, height: i32) {
self.rect.height = height as isize - 1; // Substract 1 for the status line on the bottom
self.rect.width = width as isize;
self.update_cursor();
}
}
Remove debug status lines
use std::cmp;
use std::fs::File;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::iter;
use std::error::Error;
use std::ascii::AsciiExt;
use itertools::Itertools;
use std::borrow::Cow;
use std::rc::Rc;
use rustbox::{RustBox};
use rustbox::keyboard::Key;
use rex_utils;
use rex_utils::split_vec::SplitVec;
use rex_utils::rect::Rect;
use super::super::config::Config;
use super::RustBoxEx::{RustBoxEx, Style};
use super::input::Input;
use super::widget::Widget;
use super::inputline::{GotoInputLine, FindInputLine, PathInputLine};
use super::overlay::OverlayText;
use super::config::ConfigScreen;
use super::menu::{OverlayMenu, MenuState, MenuEntry};
#[derive(Debug)]
enum EditOp {
Delete(isize, isize),
Insert(isize, Vec<u8>),
Write(isize, Vec<u8>),
}
#[derive(Debug)]
enum LineNumberMode {
None,
Short,
Long
}
#[derive(Copy,Clone,Debug)]
pub enum HexEditActions {
Edit(char),
SwitchView,
MoveLeft,
MoveRight,
MoveUp,
MoveDown,
MovePageUp,
MovePageDown,
MoveToFirstColumn,
MoveToLastColumn,
Delete,
DeleteWithMove,
CopySelection,
CutSelection,
PasteSelection,
Undo,
ToggleInsert,
ToggleSelecion,
HelpView,
LogView,
AskGoto,
AskFind,
AskOpen,
AskSave,
AskConfig,
AskFill,
AskMarkAdd,
AskMarkGoto,
CheckMagic,
StartMenu,
}
static ROOT_ENTRIES: MenuState<HexEditActions> = &[
MenuEntry::CommandEntry('c', "Config", HexEditActions::AskConfig),
MenuEntry::SubEntries('m', "Mark", &[
MenuEntry::CommandEntry('a', "Add", HexEditActions::AskMarkAdd),
MenuEntry::CommandEntry('g', "Goto", HexEditActions::AskMarkGoto),
]),
];
signalreceiver_decl!{HexEditSignalReceiver(HexEdit)}
pub struct HexEdit {
buffer: SplitVec,
config: Rc<Config>,
rect: Rect<isize>,
cursor_nibble_pos: isize,
status_log: Vec<String>,
show_last_status: bool,
data_offset: isize,
row_offset: isize,
nibble_active: bool,
selection_start: Option<isize>,
insert_mode: bool,
input: Input,
undo_stack: Vec<EditOp>,
input_entry: Option<Box<Widget>>,
overlay: Option<Box<Widget>>,
cur_path: Option<PathBuf>,
clipboard: Option<Vec<u8>>,
signal_receiver: Rc<HexEditSignalReceiver>,
}
impl HexEdit {
pub fn new(config: Config) -> HexEdit {
HexEdit {
buffer: SplitVec::new(),
config: Rc::new(config),
rect: Default::default(),
cursor_nibble_pos: 0,
data_offset: 0,
row_offset: 0,
status_log: vec!["Press C-/ for help".to_string()],
show_last_status: true,
nibble_active: true,
selection_start: None,
insert_mode: false,
input_entry: None,
undo_stack: Vec::new(),
overlay: None,
cur_path: None,
clipboard: None,
input: Input::new(),
signal_receiver: Rc::new(HexEditSignalReceiver::new()),
}
}
fn reset(&mut self) {
self.cursor_nibble_pos = 0;
self.data_offset = 0;
self.nibble_active = true;
self.selection_start = None;
self.insert_mode = false;
self.input_entry = None;
self.undo_stack = Vec::new();
}
fn get_linenumber_mode(&self) -> LineNumberMode {
if !self.config.show_linenum {
LineNumberMode::None
} else if self.buffer.len() <= 0xFFFF {
LineNumberMode::Short
} else {
LineNumberMode::Long
}
}
fn get_linenumber_width(&self) -> isize {
match self.get_linenumber_mode() {
LineNumberMode::None => 1,
LineNumberMode::Short => 4 + 1, // 4 for the XXXX + 1 for whitespace
LineNumberMode::Long => 9 + 1, // 7 for XXXX:XXXX + 1 for whitespace
}
}
fn get_line_width(&self) -> isize {
self.config.line_width.unwrap_or(self.get_bytes_per_row() as u32) as isize
}
fn get_bytes_per_row(&self) -> isize {
// This is the number of cells on the screen that are used for each byte.
// For the nibble view, we need 3 (1 for each nibble and 1 for the spacing). For
// the ascii view, if it is shown, we need another one.
let cells_per_byte = if self.config.show_ascii { 4 } else { 3 };
(self.rect.width - self.get_linenumber_width()) / cells_per_byte
}
fn get_bytes_per_screen(&self) -> isize {
self.get_line_width() * self.rect.height
}
fn draw_line_number(&self, rb: &RustBox, row: usize, line_number: usize) {
match self.get_linenumber_mode() {
LineNumberMode::None => (),
LineNumberMode::Short => {
rb.print_style(0, row, Style::Default, &format!("{:04X}", line_number));
}
LineNumberMode::Long => {
rb.print_style(0, row, Style::Default, &format!("{:04X}:{:04X}", line_number >> 16, line_number & 0xFFFF));
}
};
}
fn draw_line(&self, rb: &RustBox, iter: &mut Iterator<Item=(usize, Option<&u8>)>, row: usize) {
let nibble_view_start = self.get_linenumber_width() as usize;
// The value of this is wrong if we are not showing the ascii view
let byte_view_start = nibble_view_start + self.get_bytes_per_row() as usize * 3;
// We want the selection draw to not go out of the editor view
let mut prev_in_selection = false;
let mut at_current_row = false;
for (row_offset, (byte_pos, maybe_byte)) in iter.skip(self.row_offset as usize).enumerate().take(self.get_bytes_per_row() as usize) {
let at_current_byte = byte_pos as isize == (self.cursor_nibble_pos / 2);
at_current_row = at_current_row || at_current_byte;
let in_selection = if let Some(selection_pos) = self.selection_start {
rex_utils::is_between(byte_pos as isize, selection_pos, self.cursor_nibble_pos / 2)
} else {
false
};
// Now we draw the nibble view
let hex_chars = if let Some(&byte) = maybe_byte {
rex_utils::u8_to_hex(byte)
} else {
(' ', ' ')
};
let nibble_view_column = nibble_view_start + (row_offset * 3);
let nibble_style = if (!self.nibble_active && at_current_byte) || in_selection {
Style::Selection
} else {
Style::Default
};
rb.print_char_style(nibble_view_column, row, nibble_style,
hex_chars.0);
rb.print_char_style(nibble_view_column + 1, row, nibble_style,
hex_chars.1);
if prev_in_selection && in_selection {
rb.print_char_style(nibble_view_column - 1, row, nibble_style,
' ');
}
if self.nibble_active && self.input_entry.is_none() && at_current_byte {
rb.set_cursor(nibble_view_column as isize + (self.cursor_nibble_pos & 1),
row as isize);
};
if self.config.show_ascii {
// Now let's draw the byte window
let byte_char = if let Some(&byte) = maybe_byte {
let bc = byte as char;
if bc.is_ascii() && bc.is_alphanumeric() {
bc
} else {
'.'
}
} else {
' '
};
// If we are at the current byte but the nibble view is active, we want to draw a
// "fake" cursor by dawing a selection square
let byte_style = if (self.nibble_active && at_current_byte) || in_selection {
Style::Selection
} else {
Style::Default
};
rb.print_char_style(byte_view_start + row_offset, row, byte_style,
byte_char);
if !self.nibble_active && self.input_entry.is_none() && at_current_byte {
rb.set_cursor((byte_view_start + row_offset) as isize, row as isize);
}
// Remember if we had a selection, so that we know for next char to "fill in" with
// selection in the nibble view
prev_in_selection = in_selection;
}
}
// We just need to consume the iterator and see if there were any remaining bytes
let bytes_remaining = iter.count();
if at_current_row && self.row_offset != 0 {
rb.print_char_style(nibble_view_start - 1, row, Style::Default, '<');
}
if at_current_row && bytes_remaining != 0 {
rb.print_char_style(byte_view_start - 1, row, Style::Default, '>');
}
}
pub fn draw_view(&self, rb: &RustBox) {
let start_iter = self.data_offset as usize;
let stop_iter = cmp::min(start_iter + self.get_bytes_per_screen() as usize, self.buffer.len());
let itit = (start_iter..).zip( // We are zipping the byte position
self.buffer.iter_range(start_iter..stop_iter) // With the data at those bytes
.map(|x| Some(x)) // And wrapping it in an option
.chain(iter::once(None))) // So we can have a "fake" last item that will be None
.chunks_lazy(self.get_line_width() as usize); //And split it into nice row-sized chunks
for (row, row_iter_) in itit.into_iter().take(self.rect.height as usize).enumerate() {
// We need to be able to peek in the iterable so we can get the current position
let mut row_iter = row_iter_.peekable();
let byte_pos = row_iter.peek().unwrap().0;
self.draw_line_number(rb, row, byte_pos);
self.draw_line(rb, &mut row_iter, row);
}
}
fn draw_statusbar(&self, rb: &RustBox) {
rb.print_style(0, rb.height() - 1, Style::StatusBar, &rex_utils::string_with_repeat(' ', rb.width()));
if self.show_last_status {
if let Some(ref status_line) = self.status_log.last() {
rb.print_style(0, rb.height() - 1, Style::StatusBar, &status_line);
}
}
let mode = if let Some(_) = self.selection_start {
"SEL"
} else if self.insert_mode {
"INS"
} else {
"OVR"
};
let right_status;
if let Some(selection_start) = self.selection_start {
let size = (self.cursor_nibble_pos/2 - selection_start).abs();
right_status = format!(
" Start: {} Size: {} Pos: {} {}",
selection_start, size, self.cursor_nibble_pos/2, mode);
} else {
right_status = format!(
" Pos: {} Undo: {} {}",
self.undo_stack.len(), self.cursor_nibble_pos/2, mode);
};
let (x_pos, start_index) = if rb.width() >= right_status.len() {
(rb.width() - right_status.len(), 0)
} else {
(0, right_status.len() - rb.width())
};
rb.print_style(x_pos, rb.height() - 1, Style::StatusBar, &right_status[start_index..]);
}
pub fn draw(&mut self, rb: &RustBox) {
self.draw_view(rb);
if let Some(entry) = self.input_entry.as_mut() {
entry.draw(rb, Rect {
top: (rb.height() - 2) as isize,
left: 0,
height: 1,
width: rb.width() as isize
}, true);
}
if let Some(overlay) = self.overlay.as_mut() {
overlay.draw(rb, Rect {
top: 0,
left: 0,
height: self.rect.height,
width: self.rect.width,
}, true);
}
self.draw_statusbar(rb);
}
fn status<S: Into<Cow<'static, str>> + ?Sized>(&mut self, st: S) {
self.show_last_status = true;
let cow: Cow<'static, str> = st.into();
self.status_log.push(format!("{}", &cow));
}
fn clear_status(&mut self) {
self.show_last_status = false;
}
pub fn open(&mut self, path: &Path) {
let mut v = vec![];
if let Err(e) = File::open(path).and_then(|mut f| f.read_to_end(&mut v)) {
self.status(format!("ERROR: {}", e.description()));
return;
}
self.buffer = SplitVec::from_vec(v);
self.cur_path = Some(PathBuf::from(path));
self.reset();
}
pub fn save(&mut self, path: &Path) {
let result = File::create(path)
.and_then(|mut f| self.buffer.iter_slices()
.fold(Ok(()), |res, val| res
.and_then(|_| f.write_all(val))));
match result {
Ok(_) => {
self.cur_path = Some(PathBuf::from(path));
}
Err(e) => {
self.status(format!("ERROR: {}", e.description()));
}
}
}
fn edit_buffer(&mut self, act: EditOp, add_to_undo: bool) -> (isize, isize) {
let mut begin_region: isize;
let mut end_region: isize;
match act {
EditOp::Insert(offset, buf) => {
begin_region = offset;
end_region = offset + buf.len() as isize;
self.buffer.insert(offset as usize, &buf);
if add_to_undo {
self.push_undo(EditOp::Delete(offset, offset + buf.len() as isize))
}
}
EditOp::Delete(offset, end) => {
begin_region = offset;
end_region = end;
let res = self.buffer.move_out(offset as usize..end as usize);
if add_to_undo { self.push_undo(EditOp::Insert(offset, res)) }
}
EditOp::Write(offset, buf) => {
begin_region = offset;
end_region = offset + buf.len() as isize;
let orig_data = self.buffer.copy_out(offset as usize..(offset as usize + buf.len()));
self.buffer.copy_in(offset as usize, &buf);
if add_to_undo { self.push_undo(EditOp::Write(offset, orig_data)) }
}
}
(begin_region, end_region)
}
fn push_undo(&mut self, act: EditOp) {
self.undo_stack.push(act);
}
fn undo(&mut self) {
if let Some(act) = self.undo_stack.pop() {
let (begin, _) = self.edit_buffer(act, false);
self.set_cursor(begin * 2);
}
}
fn cursor_at_end(&self) -> bool {
self.cursor_nibble_pos == (self.buffer.len()*2) as isize
}
fn delete_at_cursor(&mut self, with_bksp: bool) {
let mut cursor_nibble_pos = self.cursor_nibble_pos;
let selection_pos = match self.selection_start {
Some(selection_pos_tag) => selection_pos_tag,
None => {
if with_bksp {
if cursor_nibble_pos < 2 {
return;
}
cursor_nibble_pos -= 2;
}
cursor_nibble_pos / 2
}
};
let del_start = cmp::min(selection_pos, cursor_nibble_pos / 2);
let mut del_stop = cmp::max(selection_pos, cursor_nibble_pos / 2) + 1;
if del_stop > self.buffer.len() as isize {
del_stop -= 1;
if del_stop == del_start {
return;
}
}
if self.buffer.len() == 0 {
self.status("Nothing to delete");
return;
}
self.selection_start = None;
self.edit_buffer(EditOp::Delete(del_start, del_stop), true);
self.set_cursor(del_start * 2);
}
fn write_nibble_at_cursor(&mut self, c: u8) {
// Replace the text at the selection before writing the data
if self.selection_start.is_some() {
self.delete_at_cursor(false);
}
if self.insert_mode || self.cursor_at_end() {
self.insert_nibble_at_cursor(c);
} else {
self.set_nibble_at_cursor(c);
}
}
fn set_nibble_at_cursor(&mut self, c: u8) {
let mut byte = self.buffer[(self.cursor_nibble_pos / 2) as usize];
byte = match self.cursor_nibble_pos & 1 {
0 => (byte & 0x0f) + c * 16,
1 => (byte & 0xf0) + c,
_ => 0xff,
};
let byte_offset = self.cursor_nibble_pos / 2;
self.edit_buffer(EditOp::Write(byte_offset, vec![byte]), true);
}
fn insert_nibble_at_cursor(&mut self, c: u8) {
// If we are at half byte, we still overwrite
if self.cursor_nibble_pos & 1 == 1 {
self.set_nibble_at_cursor(c);
return
}
let pos_div2 = self.cursor_nibble_pos / 2;
self.edit_buffer(EditOp::Insert(pos_div2, vec![c * 16]), true);
}
fn toggle_insert_mode(&mut self) {
self.insert_mode = !self.insert_mode;
self.move_cursor(0);
}
fn write_byte_at_cursor(&mut self, c: u8) {
// Replace the text at the selection before writing the data
if self.selection_start.is_some() {
self.delete_at_cursor(false);
}
let byte_offset = self.cursor_nibble_pos / 2;
if self.insert_mode || self.cursor_at_end() {
self.edit_buffer(EditOp::Insert(byte_offset, vec![c]), true);
} else {
self.edit_buffer(EditOp::Write(byte_offset, vec![c]), true);
}
}
fn move_cursor(&mut self, pos: isize) {
self.cursor_nibble_pos += pos;
self.update_cursor()
}
fn set_cursor(&mut self, pos: isize) {
self.cursor_nibble_pos = pos;
self.update_cursor()
}
fn update_cursor(&mut self) {
self.cursor_nibble_pos = cmp::max(self.cursor_nibble_pos, 0);
self.cursor_nibble_pos = cmp::min(self.cursor_nibble_pos, (self.buffer.len()*2) as isize);
let cursor_byte_pos = self.cursor_nibble_pos / 2;
let cursor_row_offset = cursor_byte_pos % self.get_line_width();
// If the cursor moves above or below the view, scroll it
if cursor_byte_pos < self.data_offset {
self.data_offset = (cursor_byte_pos) - cursor_row_offset;
}
if cursor_byte_pos > (self.data_offset + self.get_bytes_per_screen() - 1) {
self.data_offset = cursor_byte_pos - cursor_row_offset -
self.get_bytes_per_screen() + self.get_line_width();
}
// If the cursor moves to the right or left of the view, scroll it
if cursor_row_offset < self.row_offset {
self.row_offset = cursor_row_offset;
}
if cursor_row_offset >= self.row_offset + self.get_bytes_per_row() {
self.row_offset = cursor_row_offset - self.get_bytes_per_row() + 1;
}
}
fn toggle_selection(&mut self) {
match self.selection_start {
Some(_) => self.selection_start = None,
None => self.selection_start = Some(self.cursor_nibble_pos / 2)
}
let selection_start = self.selection_start; // Yay! Lifetimes!
}
fn goto(&mut self, pos: isize) {
self.status(format!("Going to {:?}", pos));
self.set_cursor(pos * 2);
}
fn find_buf(&mut self, needle: &[u8]) {
let found_pos = match self.buffer.find_slice_from((self.cursor_nibble_pos / 2) as usize, needle) {
None => {
self.buffer.find_slice_from(0, needle)
}
a => a
};
if let Some(pos) = found_pos {
self.status(format!("Found at {:?}", pos));
self.set_cursor((pos * 2) as isize);
} else {
self.status("Nothing found!");
}
}
fn read_cursor_to_clipboard(&mut self) -> Option<usize> {
let (start, stop) = match self.selection_start {
None => { return None; },
Some(selection_pos) => {
(cmp::min(selection_pos, self.cursor_nibble_pos / 2),
cmp::max(selection_pos, self.cursor_nibble_pos / 2))
}
};
let data = self.buffer.copy_out(start as usize..stop as usize);
let data_len = data.len();
self.clipboard = Some(data);
Some(data_len)
}
fn edit_copy(&mut self) {
if let Some(data_len) = self.read_cursor_to_clipboard() {
self.status(format!("Copied {}", data_len));
self.selection_start = None;
}
}
fn edit_cut(&mut self) {
if let Some(data_len) = self.read_cursor_to_clipboard() {
self.delete_at_cursor(false);
self.status(format!("Cut {}", data_len));
}
}
fn edit_paste(&mut self) {
let data = if let Some(ref d) = self.clipboard {
d.clone()
} else {
return;
};
let data_len = data.len() as isize;
// This is needed to satisfy the borrow checker
let cur_pos_in_bytes = self.cursor_nibble_pos / 2;
self.edit_buffer(EditOp::Insert(cur_pos_in_bytes, data), true);
self.move_cursor(data_len + 1);
}
fn view_input(&mut self, key: Key) {
if let Some(action) = self.input.editor_input(key) {
self.do_action(action)
}
}
fn do_action(&mut self, action: HexEditActions) {
self.clear_status();
match action {
// Movement
HexEditActions::MoveLeft if self.nibble_active => self.move_cursor(-1),
HexEditActions::MoveRight if self.nibble_active => self.move_cursor(1),
HexEditActions::MoveLeft if !self.nibble_active => self.move_cursor(-2),
HexEditActions::MoveRight if !self.nibble_active => self.move_cursor(2),
HexEditActions::MoveLeft => panic!("Make the case handler happy!"),
HexEditActions::MoveRight => panic!("Make the case handler happy!"),
HexEditActions::MoveUp => {
let t = -self.get_line_width() * 2;
self.move_cursor(t)
}
HexEditActions::MoveDown => {
let t = self.get_line_width() * 2;
self.move_cursor(t)
}
HexEditActions::MovePageUp => {
let t = -(self.get_bytes_per_screen() * 2);
self.move_cursor(t)
}
HexEditActions::MovePageDown => {
let t = self.get_bytes_per_screen() * 2;
self.move_cursor(t)
}
HexEditActions::MoveToFirstColumn => {
let pos_in_line = self.cursor_nibble_pos % (self.get_line_width()*2);
self.move_cursor(-pos_in_line)
}
HexEditActions::MoveToLastColumn => {
let pos_in_line = self.cursor_nibble_pos % (self.get_line_width()*2);
let i = self.get_line_width()*2 - 2 - pos_in_line;
self.move_cursor(i);
}
HexEditActions::Delete => self.delete_at_cursor(false),
HexEditActions::DeleteWithMove => self.delete_at_cursor(true),
// Ctrl X, C V
HexEditActions::CutSelection => self.edit_cut(),
HexEditActions::CopySelection => self.edit_copy(),
HexEditActions::PasteSelection => self.edit_paste(),
// Hex input for nibble view
HexEditActions::Edit(ch) if self.nibble_active => {
if let Some(val) = ch.to_digit(16) {
self.write_nibble_at_cursor(val as u8);
self.move_cursor(1);
} else {
// TODO: Show error?
}
},
// Ascii edit for byte view
HexEditActions::Edit(ch) if !self.nibble_active => {
if ch.len_utf8() == 1 && ch.is_alphanumeric() {
// TODO: Make it printable rather than alphanumeric
self.write_byte_at_cursor(ch as u8);
self.move_cursor(2);
} else {
// TODO: Show error?
}
}
HexEditActions::Edit(ch) => panic!("Make the case handler happy!"),
HexEditActions::SwitchView => {
self.nibble_active = !self.nibble_active;
let t = self.nibble_active;
},
HexEditActions::HelpView => self.start_help(),
HexEditActions::LogView => self.start_logview(),
HexEditActions::ToggleInsert => self.toggle_insert_mode(),
HexEditActions::ToggleSelecion => self.toggle_selection(),
HexEditActions::Undo => self.undo(),
HexEditActions::AskGoto => self.start_goto(),
HexEditActions::AskFind => self.start_find(),
HexEditActions::AskOpen => self.start_open(),
HexEditActions::AskSave => self.start_save(),
HexEditActions::AskConfig => self.start_config(),
HexEditActions::StartMenu => self.start_menu(),
_ => self.status(format!("Operation not implemented yet: {:?}", action))
}
}
fn start_menu(&mut self) {
let sr = &self.signal_receiver;
let mut menu = OverlayMenu::with_menu(ROOT_ENTRIES);
menu.on_selected.connect(signal!(sr with |obj, action| {
obj.overlay = None;
obj.do_action(action);
}));
menu.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.overlay = None;
}));
self.overlay = Some(Box::new(menu));
}
fn start_config(&mut self) {
let sr = &self.signal_receiver;
let mut config_screen = ConfigScreen::with_config(self.config.clone());
config_screen.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.overlay = None;
}));
self.overlay = Some(Box::new(config_screen));
}
fn start_help(&mut self) {
let help_text = include_str!("Help.txt");
// YAY Lifetimes! (This will hopfully be fixed once rust gains MIR/HIR)
{
let sr = &self.signal_receiver;
let mut ot = OverlayText::with_text(help_text.to_string(), false);
ot.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.overlay = None;
}));
self.overlay = Some(Box::new(ot));
}
{
self.status("Press Esc to return");
}
}
fn start_logview(&mut self) {
let logs = self.status_log.clone();
let sr = &self.signal_receiver;
let mut ot = OverlayText::with_logs(logs, true);
ot.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.overlay = None;
}));
self.overlay = Some(Box::new(ot));
}
fn start_goto(&mut self) {
let mut gt = GotoInputLine::new();
// let mut sender_clone0 = self.sender.clone();
let sr = &self.signal_receiver;
gt.on_done.connect(signal!(sr with |obj, pos| {
obj.goto(pos*2);
obj.input_entry = None;
}));
gt.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.input_entry = None;
}));
self.input_entry = Some(Box::new(gt) as Box<Widget>)
}
fn start_find(&mut self) {
let mut find_line = FindInputLine::new();
let sr = &self.signal_receiver;
find_line.on_find.connect(signal!(sr with |obj, needle| {
obj.find_buf(&needle);
obj.input_entry = None;
}));
find_line.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.input_entry = None;
}));
self.input_entry = Some(Box::new(find_line) as Box<Widget>)
}
fn start_save(&mut self) {
let mut path_line = PathInputLine::new("Save: ".into());
let sr = &self.signal_receiver;
path_line.on_done.connect(signal!(sr with |obj, path| {
obj.save(&path);
obj.input_entry = None;
}));
path_line.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.input_entry = None;
}));
self.input_entry = Some(Box::new(path_line) as Box<Widget>)
}
fn start_open(&mut self) {
let mut path_line = PathInputLine::new("Open: ".into());
let sr = &self.signal_receiver;
path_line.on_done.connect(signal!(sr with |obj, path| {
obj.open(&path);
obj.input_entry = None;
}));
path_line.on_cancel.connect(signal!(sr with |obj, opt_msg| {
if let Some(ref msg) = opt_msg {
obj.status(msg.clone());
} else {
obj.clear_status();
}
obj.input_entry = None;
}));
self.input_entry = Some(Box::new(path_line) as Box<Widget>)
}
fn process_msgs(&mut self) {
let mut sr = self.signal_receiver.clone();
sr.run(self);
}
pub fn input(&mut self, key: Key) {
self.process_msgs();
if let Some(ref mut overlay) = self.overlay {
overlay.input(&self.input, key);
} else if let Some(ref mut input_entry) = self.input_entry {
input_entry.input(&self.input, key);
} else {
self.view_input(key);
}
self.process_msgs();
}
pub fn resize(&mut self, width: i32, height: i32) {
self.rect.height = height as isize - 1; // Substract 1 for the status line on the bottom
self.rect.width = width as isize;
self.update_cursor();
}
}
|
use std::{env, thread, time};
use std::sync::mpsc;
use reqwest;
use crossbeam;
use bot;
use command_handler::CommandHandler;
use update::Update;
const BASE_URL: &'static str = "https://api.telegram.org/bot";
pub struct Updater {
token: String,
last_update_id: i32,
pub running: bool,
pub is_idle: bool,
}
impl Updater {
/// Creates a new Updater struct.
pub fn start(token: Option<String>,
poll_interval: Option<u64>,
timeout: Option<i32>,
network_delay: Option<i32>,
command_handler: CommandHandler) {
let token = token.or_else(|| env::var("TELEGRAM_BOT_TOKEN").ok())
.expect("You should pass in a token to new or set TELEGRAM_BOT_TOKEN");
let updater = Updater {
token: token,
last_update_id: 0,
running: false,
is_idle: false,
};
updater.start_polling(poll_interval, timeout, network_delay, command_handler);
}
pub fn start_polling(mut self,
poll_interval: Option<u64>,
timeout: Option<i32>,
network_delay: Option<i32>,
mut command_handler: CommandHandler) {
if !self.running {
self.running = true;
let (tx, rx) = mpsc::channel();
let bot = bot::Bot::new([BASE_URL, &self.token].concat()).unwrap();
// Spawn scoped threads
crossbeam::scope(|scope| {
scope.spawn(|| {
self.start_polling_thread(poll_interval, timeout, network_delay, &bot, tx)
});
scope.spawn(|| {
command_handler.start_command_handling(rx, &bot);
});
});
}
}
fn start_polling_thread(&mut self,
poll_interval: Option<u64>,
timeout: Option<i32>,
network_delay: Option<i32>,
bot: &bot::Bot,
tx: mpsc::Sender<Update>) {
let poll_interval = time::Duration::from_secs(poll_interval.unwrap_or(0));
while self.running {
let updates = bot.get_updates(self.last_update_id, None, timeout, network_delay);
match updates {
Ok(Some(ref v)) => {
if let Some(u) = v.last() {
for update in v {
tx.send(update.clone()).unwrap();
}
let update_id_store = u.update_id + 1;
self.last_update_id = update_id_store as i32;
} else {
// Do nothing, the vector is empty
continue;
}
}
Ok(None) => {
// Do nothing, we have nothing
continue;
}
Err(err) => {
// Handle error
continue;
}
};
thread::sleep(poll_interval);
}
}
}
Removing reqwest from file
use std::{env, thread, time};
use std::sync::mpsc;
use crossbeam;
use bot;
use command_handler::CommandHandler;
use update::Update;
const BASE_URL: &'static str = "https://api.telegram.org/bot";
pub struct Updater {
token: String,
last_update_id: i32,
pub running: bool,
pub is_idle: bool,
}
impl Updater {
/// Creates a new Updater struct.
pub fn start(token: Option<String>,
poll_interval: Option<u64>,
timeout: Option<i32>,
network_delay: Option<i32>,
command_handler: CommandHandler) {
let token = token.or_else(|| env::var("TELEGRAM_BOT_TOKEN").ok())
.expect("You should pass in a token to new or set TELEGRAM_BOT_TOKEN");
let updater = Updater {
token: token,
last_update_id: 0,
running: false,
is_idle: false,
};
updater.start_polling(poll_interval, timeout, network_delay, command_handler);
}
pub fn start_polling(mut self,
poll_interval: Option<u64>,
timeout: Option<i32>,
network_delay: Option<i32>,
mut command_handler: CommandHandler) {
if !self.running {
self.running = true;
let (tx, rx) = mpsc::channel();
let bot = bot::Bot::new([BASE_URL, &self.token].concat()).unwrap();
// Spawn scoped threads
crossbeam::scope(|scope| {
scope.spawn(|| {
self.start_polling_thread(poll_interval, timeout, network_delay, &bot, tx)
});
scope.spawn(|| {
command_handler.start_command_handling(rx, &bot);
});
});
}
}
fn start_polling_thread(&mut self,
poll_interval: Option<u64>,
timeout: Option<i32>,
network_delay: Option<i32>,
bot: &bot::Bot,
tx: mpsc::Sender<Update>) {
let poll_interval = time::Duration::from_secs(poll_interval.unwrap_or(0));
while self.running {
let updates = bot.get_updates(self.last_update_id, None, timeout, network_delay);
match updates {
Ok(Some(ref v)) => {
if let Some(u) = v.last() {
for update in v {
tx.send(update.clone()).unwrap();
}
let update_id_store = u.update_id + 1;
self.last_update_id = update_id_store as i32;
} else {
// Do nothing, the vector is empty
continue;
}
}
Ok(None) => {
// Do nothing, we have nothing
continue;
}
Err(err) => {
// Handle error
continue;
}
};
thread::sleep(poll_interval);
}
}
}
|
use update_client::*;
use errors::*;
use database::*;
// use diesel::prelude::*;
// use diesel::pg::PgConnection;
// use dotenv::dotenv;
//
// use rocksdb::DB;
use std::collections::HashMap;
use std::env;
use std::str;
use std::thread;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex};
enum CurrentState {
Running,
Stopped,
}
// Using the client, fetches updates periodically, storing the results in a database
pub struct GSBUpdater<'a, T>
where T: 'a + Database
{
update_client: Arc<Mutex<UpdateClient<'a>>>,
db: &'a mut T,
period: usize, // 30 seconds - will be 30 minutes later...
thread: Option<thread::JoinHandle<Result<()>>>,
should_execute: AtomicBool,
}
impl<'a, T> GSBUpdater<'a, T>
where T: Database
{
pub fn new(api_key: &'a str, db: &'a mut T) -> Result<GSBUpdater<'a, T>> {
Ok(GSBUpdater {
update_client: Arc::new(Mutex::new(UpdateClient::new(api_key))),
db: db,
period: 30,
thread: None,
should_execute: AtomicBool::new(false),
})
}
pub fn begin_update(&mut self) -> Result<()> {
self.thread = Some(thread::spawn(|| {
loop {
let update_client = self.update_client.lock().unwrap();
let fetch_response = try!(update_client.fetch().send());
try!(self.db.update(&fetch_response));
}
}));
Ok(())
}
pub fn set_period() {
unimplemented!();
}
pub fn stop_updates() {
unimplemented!();
}
}
Make GsbUpdater period atomic
use update_client::*;
use errors::*;
use database::*;
// use diesel::prelude::*;
// use diesel::pg::PgConnection;
// use dotenv::dotenv;
//
// use rocksdb::DB;
use std::collections::HashMap;
use std::env;
use std::str;
use std::thread;
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::{Arc, Mutex};
enum CurrentState {
Running,
Stopped,
}
// Using the client, fetches updates periodically, storing the results in a database
pub struct GSBUpdater<'a, T>
where T: 'a + Database
{
update_client: Arc<Mutex<UpdateClient<'a>>>,
db: &'a mut T,
period: AtomicUsize,
thread: Option<thread::JoinHandle<Result<()>>>,
should_execute: AtomicBool,
}
impl<'a, T> GSBUpdater<'a, T>
where T: Database
{
pub fn new(api_key: &'a str, db: &'a mut T) -> Result<GSBUpdater<'a, T>> {
Ok(GSBUpdater {
update_client: Arc::new(Mutex::new(UpdateClient::new(api_key))),
db: db,
period: AtomicUsize::new(30), // 30 seconds - will be 30 minutes later...
thread: None,
should_execute: AtomicBool::new(false),
})
}
pub fn begin_update(&mut self) -> Result<()> {
self.thread = Some(thread::spawn(|| {
loop {
let update_client = self.update_client.lock().unwrap();
let fetch_response = try!(update_client.fetch().send());
try!(self.db.update(&fetch_response));
}
}));
Ok(())
}
pub fn set_period() {
unimplemented!();
}
pub fn stop_updates() {
unimplemented!();
}
}
|
pub type c_char = i8;
pub type c_long = i64;
pub type c_ulong = u64;
pub type wchar_t = i32;
pub type blkcnt_t = ::c_ulong;
pub type blksize_t = ::c_long;
pub type clock_t = ::c_long;
pub type clockid_t = ::c_int;
pub type dev_t = ::c_long;
pub type fsblkcnt_t = ::c_ulong;
pub type fsfilcnt_t = ::c_ulong;
pub type ino_t = ::c_ulong;
pub type mode_t = ::c_int;
pub type nfds_t = ::c_ulong;
pub type nlink_t = ::c_ulong;
pub type off_t = ::c_long;
pub type pthread_t = *mut ::c_void;
pub type pthread_attr_t = *mut ::c_void;
pub type pthread_cond_t = *mut ::c_void;
pub type pthread_condattr_t = *mut ::c_void;
// Must be usize due to libstd/sys_common/thread_local.rs,
// should technically be *mut ::c_void
pub type pthread_key_t = usize;
pub type pthread_mutex_t = *mut ::c_void;
pub type pthread_mutexattr_t = *mut ::c_void;
pub type pthread_rwlock_t = *mut ::c_void;
pub type pthread_rwlockattr_t = *mut ::c_void;
pub type rlim_t = ::c_ulonglong;
pub type sa_family_t = u16;
pub type sem_t = *mut ::c_void;
pub type sigset_t = ::c_ulong;
pub type socklen_t = u32;
pub type speed_t = u32;
pub type suseconds_t = ::c_int;
pub type tcflag_t = u32;
pub type time_t = ::c_long;
#[cfg_attr(feature = "extra_traits", derive(Debug))]
pub enum timezone {}
impl ::Copy for timezone {}
impl ::Clone for timezone {
fn clone(&self) -> timezone {
*self
}
}
s_no_extra_traits! {
#[repr(C)]
pub struct utsname {
pub sysname: [::c_char; UTSLENGTH],
pub nodename: [::c_char; UTSLENGTH],
pub release: [::c_char; UTSLENGTH],
pub version: [::c_char; UTSLENGTH],
pub machine: [::c_char; UTSLENGTH],
pub domainname: [::c_char; UTSLENGTH],
}
pub struct dirent {
pub d_ino: ::ino_t,
pub d_off: ::off_t,
pub d_reclen: ::c_ushort,
pub d_type: ::c_uchar,
pub d_name: [::c_char; 256],
}
pub struct sockaddr_un {
pub sun_family: ::sa_family_t,
pub sun_path: [::c_char; 108]
}
pub struct sockaddr_storage {
pub ss_family: ::sa_family_t,
__ss_padding: [
u8;
128 -
::core::mem::size_of::<sa_family_t>() -
::core::mem::size_of::<c_ulong>()
],
__ss_align: ::c_ulong,
}
}
s! {
pub struct addrinfo {
pub ai_flags: ::c_int,
pub ai_family: ::c_int,
pub ai_socktype: ::c_int,
pub ai_protocol: ::c_int,
pub ai_addrlen: ::size_t,
pub ai_canonname: *mut ::c_char,
pub ai_addr: *mut ::sockaddr,
pub ai_next: *mut ::addrinfo,
}
pub struct Dl_info {
pub dli_fname: *const ::c_char,
pub dli_fbase: *mut ::c_void,
pub dli_sname: *const ::c_char,
pub dli_saddr: *mut ::c_void,
}
pub struct epoll_event {
pub events: u32,
pub u64: u64,
pub _pad: u64,
}
pub struct fd_set {
fds_bits: [::c_ulong; ::FD_SETSIZE / ULONG_SIZE],
}
pub struct in_addr {
pub s_addr: ::in_addr_t,
}
pub struct ip_mreq {
pub imr_multiaddr: ::in_addr,
pub imr_interface: ::in_addr,
}
pub struct lconv {
pub currency_symbol: *const ::c_char,
pub decimal_point: *const ::c_char,
pub frac_digits: ::c_char,
pub grouping: *const ::c_char,
pub int_curr_symbol: *const ::c_char,
pub int_frac_digits: ::c_char,
pub mon_decimal_point: *const ::c_char,
pub mon_grouping: *const ::c_char,
pub mon_thousands_sep: *const ::c_char,
pub negative_sign: *const ::c_char,
pub n_cs_precedes: ::c_char,
pub n_sep_by_space: ::c_char,
pub n_sign_posn: ::c_char,
pub positive_sign: *const ::c_char,
pub p_cs_precedes: ::c_char,
pub p_sep_by_space: ::c_char,
pub p_sign_posn: ::c_char,
pub thousands_sep: *const ::c_char,
}
pub struct passwd {
pub pw_name: *mut ::c_char,
pub pw_passwd: *mut ::c_char,
pub pw_uid: ::uid_t,
pub pw_gid: ::gid_t,
pub pw_gecos: *mut ::c_char,
pub pw_dir: *mut ::c_char,
pub pw_shell: *mut ::c_char,
}
pub struct sigaction {
pub sa_handler: ::sighandler_t,
pub sa_flags: ::c_ulong,
pub sa_restorer: ::Option<extern fn()>,
pub sa_mask: ::sigset_t,
}
pub struct sockaddr {
pub sa_family: ::sa_family_t,
pub sa_data: [::c_char; 14],
}
pub struct sockaddr_in {
pub sin_family: ::sa_family_t,
pub sin_port: ::in_port_t,
pub sin_addr: ::in_addr,
pub sin_zero: [::c_char; 8],
}
pub struct sockaddr_in6 {
pub sin6_family: ::sa_family_t,
pub sin6_port: ::in_port_t,
pub sin6_flowinfo: u32,
pub sin6_addr: ::in6_addr,
pub sin6_scope_id: u32,
}
pub struct stat {
pub st_dev: ::dev_t,
pub st_ino: ::ino_t,
pub st_nlink: ::nlink_t,
pub st_mode: ::mode_t,
pub st_uid: ::uid_t,
pub st_gid: ::gid_t,
pub st_rdev: ::dev_t,
pub st_size: ::off_t,
pub st_blksize: ::blksize_t,
pub st_blocks: ::blkcnt_t,
pub st_atime: ::time_t,
pub st_atime_nsec: ::c_long,
pub st_mtime: ::time_t,
pub st_mtime_nsec: ::c_long,
pub st_ctime: ::time_t,
pub st_ctime_nsec: ::c_long,
_pad: [::c_char; 24],
}
pub struct statvfs {
pub f_bsize: ::c_ulong,
pub f_frsize: ::c_ulong,
pub f_blocks: ::fsblkcnt_t,
pub f_bfree: ::fsblkcnt_t,
pub f_bavail: ::fsblkcnt_t,
pub f_files: ::fsfilcnt_t,
pub f_ffree: ::fsfilcnt_t,
pub f_favail: ::fsfilcnt_t,
pub f_fsid: ::c_ulong,
pub f_flag: ::c_ulong,
pub f_namemax: ::c_ulong,
}
pub struct termios {
pub c_iflag: ::tcflag_t,
pub c_oflag: ::tcflag_t,
pub c_cflag: ::tcflag_t,
pub c_lflag: ::tcflag_t,
pub c_line: ::cc_t,
pub c_cc: [::cc_t; ::NCCS],
pub c_ispeed: ::speed_t,
pub c_ospeed: ::speed_t,
}
pub struct tm {
pub tm_sec: ::c_int,
pub tm_min: ::c_int,
pub tm_hour: ::c_int,
pub tm_mday: ::c_int,
pub tm_mon: ::c_int,
pub tm_year: ::c_int,
pub tm_wday: ::c_int,
pub tm_yday: ::c_int,
pub tm_isdst: ::c_int,
pub tm_gmtoff: ::c_long,
pub tm_zone: *const ::c_char,
}
}
pub const UTSLENGTH: usize = 65;
// intentionally not public, only used for fd_set
cfg_if! {
if #[cfg(target_pointer_width = "32")] {
const ULONG_SIZE: usize = 32;
} else if #[cfg(target_pointer_width = "64")] {
const ULONG_SIZE: usize = 64;
} else {
// Unknown target_pointer_width
}
}
// limits.h
pub const PATH_MAX: ::c_int = 4096;
// fcntl.h
pub const F_GETLK: ::c_int = 5;
pub const F_SETLK: ::c_int = 6;
pub const F_SETLKW: ::c_int = 7;
// FIXME: relibc {
pub const RTLD_DEFAULT: *mut ::c_void = 0i64 as *mut ::c_void;
// }
// dlfcn.h
pub const RTLD_LAZY: ::c_int = 0x0001;
pub const RTLD_NOW: ::c_int = 0x0002;
pub const RTLD_GLOBAL: ::c_int = 0x0100;
pub const RTLD_LOCAL: ::c_int = 0x0000;
// errno.h
pub const EPERM: ::c_int = 1; /* Operation not permitted */
pub const ENOENT: ::c_int = 2; /* No such file or directory */
pub const ESRCH: ::c_int = 3; /* No such process */
pub const EINTR: ::c_int = 4; /* Interrupted system call */
pub const EIO: ::c_int = 5; /* I/O error */
pub const ENXIO: ::c_int = 6; /* No such device or address */
pub const E2BIG: ::c_int = 7; /* Argument list too long */
pub const ENOEXEC: ::c_int = 8; /* Exec format error */
pub const EBADF: ::c_int = 9; /* Bad file number */
pub const ECHILD: ::c_int = 10; /* No child processes */
pub const EAGAIN: ::c_int = 11; /* Try again */
pub const ENOMEM: ::c_int = 12; /* Out of memory */
pub const EACCES: ::c_int = 13; /* Permission denied */
pub const EFAULT: ::c_int = 14; /* Bad address */
pub const ENOTBLK: ::c_int = 15; /* Block device required */
pub const EBUSY: ::c_int = 16; /* Device or resource busy */
pub const EEXIST: ::c_int = 17; /* File exists */
pub const EXDEV: ::c_int = 18; /* Cross-device link */
pub const ENODEV: ::c_int = 19; /* No such device */
pub const ENOTDIR: ::c_int = 20; /* Not a directory */
pub const EISDIR: ::c_int = 21; /* Is a directory */
pub const EINVAL: ::c_int = 22; /* Invalid argument */
pub const ENFILE: ::c_int = 23; /* File table overflow */
pub const EMFILE: ::c_int = 24; /* Too many open files */
pub const ENOTTY: ::c_int = 25; /* Not a typewriter */
pub const ETXTBSY: ::c_int = 26; /* Text file busy */
pub const EFBIG: ::c_int = 27; /* File too large */
pub const ENOSPC: ::c_int = 28; /* No space left on device */
pub const ESPIPE: ::c_int = 29; /* Illegal seek */
pub const EROFS: ::c_int = 30; /* Read-only file system */
pub const EMLINK: ::c_int = 31; /* Too many links */
pub const EPIPE: ::c_int = 32; /* Broken pipe */
pub const EDOM: ::c_int = 33; /* Math argument out of domain of func */
pub const ERANGE: ::c_int = 34; /* Math result not representable */
pub const EDEADLK: ::c_int = 35; /* Resource deadlock would occur */
pub const ENAMETOOLONG: ::c_int = 36; /* File name too long */
pub const ENOLCK: ::c_int = 37; /* No record locks available */
pub const ENOSYS: ::c_int = 38; /* Function not implemented */
pub const ENOTEMPTY: ::c_int = 39; /* Directory not empty */
pub const ELOOP: ::c_int = 40; /* Too many symbolic links encountered */
pub const EWOULDBLOCK: ::c_int = 41; /* Operation would block */
pub const ENOMSG: ::c_int = 42; /* No message of desired type */
pub const EIDRM: ::c_int = 43; /* Identifier removed */
pub const ECHRNG: ::c_int = 44; /* Channel number out of range */
pub const EL2NSYNC: ::c_int = 45; /* Level 2 not synchronized */
pub const EL3HLT: ::c_int = 46; /* Level 3 halted */
pub const EL3RST: ::c_int = 47; /* Level 3 reset */
pub const ELNRNG: ::c_int = 48; /* Link number out of range */
pub const EUNATCH: ::c_int = 49; /* Protocol driver not attached */
pub const ENOCSI: ::c_int = 50; /* No CSI structure available */
pub const EL2HLT: ::c_int = 51; /* Level 2 halted */
pub const EBADE: ::c_int = 52; /* Invalid exchange */
pub const EBADR: ::c_int = 53; /* Invalid request descriptor */
pub const EXFULL: ::c_int = 54; /* Exchange full */
pub const ENOANO: ::c_int = 55; /* No anode */
pub const EBADRQC: ::c_int = 56; /* Invalid request code */
pub const EBADSLT: ::c_int = 57; /* Invalid slot */
pub const EDEADLOCK: ::c_int = 58; /* Resource deadlock would occur */
pub const EBFONT: ::c_int = 59; /* Bad font file format */
pub const ENOSTR: ::c_int = 60; /* Device not a stream */
pub const ENODATA: ::c_int = 61; /* No data available */
pub const ETIME: ::c_int = 62; /* Timer expired */
pub const ENOSR: ::c_int = 63; /* Out of streams resources */
pub const ENONET: ::c_int = 64; /* Machine is not on the network */
pub const ENOPKG: ::c_int = 65; /* Package not installed */
pub const EREMOTE: ::c_int = 66; /* Object is remote */
pub const ENOLINK: ::c_int = 67; /* Link has been severed */
pub const EADV: ::c_int = 68; /* Advertise error */
pub const ESRMNT: ::c_int = 69; /* Srmount error */
pub const ECOMM: ::c_int = 70; /* Communication error on send */
pub const EPROTO: ::c_int = 71; /* Protocol error */
pub const EMULTIHOP: ::c_int = 72; /* Multihop attempted */
pub const EDOTDOT: ::c_int = 73; /* RFS specific error */
pub const EBADMSG: ::c_int = 74; /* Not a data message */
pub const EOVERFLOW: ::c_int = 75; /* Value too large for defined data type */
pub const ENOTUNIQ: ::c_int = 76; /* Name not unique on network */
pub const EBADFD: ::c_int = 77; /* File descriptor in bad state */
pub const EREMCHG: ::c_int = 78; /* Remote address changed */
pub const ELIBACC: ::c_int = 79; /* Can not access a needed shared library */
pub const ELIBBAD: ::c_int = 80; /* Accessing a corrupted shared library */
pub const ELIBSCN: ::c_int = 81; /* .lib section in a.out corrupted */
/* Attempting to link in too many shared libraries */
pub const ELIBMAX: ::c_int = 82;
pub const ELIBEXEC: ::c_int = 83; /* Cannot exec a shared library directly */
pub const EILSEQ: ::c_int = 84; /* Illegal byte sequence */
/* Interrupted system call should be restarted */
pub const ERESTART: ::c_int = 85;
pub const ESTRPIPE: ::c_int = 86; /* Streams pipe error */
pub const EUSERS: ::c_int = 87; /* Too many users */
pub const ENOTSOCK: ::c_int = 88; /* Socket operation on non-socket */
pub const EDESTADDRREQ: ::c_int = 89; /* Destination address required */
pub const EMSGSIZE: ::c_int = 90; /* Message too long */
pub const EPROTOTYPE: ::c_int = 91; /* Protocol wrong type for socket */
pub const ENOPROTOOPT: ::c_int = 92; /* Protocol not available */
pub const EPROTONOSUPPORT: ::c_int = 93; /* Protocol not supported */
pub const ESOCKTNOSUPPORT: ::c_int = 94; /* Socket type not supported */
/* Operation not supported on transport endpoint */
pub const EOPNOTSUPP: ::c_int = 95;
pub const EPFNOSUPPORT: ::c_int = 96; /* Protocol family not supported */
/* Address family not supported by protocol */
pub const EAFNOSUPPORT: ::c_int = 97;
pub const EADDRINUSE: ::c_int = 98; /* Address already in use */
pub const EADDRNOTAVAIL: ::c_int = 99; /* Cannot assign requested address */
pub const ENETDOWN: ::c_int = 100; /* Network is down */
pub const ENETUNREACH: ::c_int = 101; /* Network is unreachable */
/* Network dropped connection because of reset */
pub const ENETRESET: ::c_int = 102;
pub const ECONNABORTED: ::c_int = 103; /* Software caused connection abort */
pub const ECONNRESET: ::c_int = 104; /* Connection reset by peer */
pub const ENOBUFS: ::c_int = 105; /* No buffer space available */
pub const EISCONN: ::c_int = 106; /* Transport endpoint is already connected */
pub const ENOTCONN: ::c_int = 107; /* Transport endpoint is not connected */
/* Cannot send after transport endpoint shutdown */
pub const ESHUTDOWN: ::c_int = 108;
pub const ETOOMANYREFS: ::c_int = 109; /* Too many references: cannot splice */
pub const ETIMEDOUT: ::c_int = 110; /* Connection timed out */
pub const ECONNREFUSED: ::c_int = 111; /* Connection refused */
pub const EHOSTDOWN: ::c_int = 112; /* Host is down */
pub const EHOSTUNREACH: ::c_int = 113; /* No route to host */
pub const EALREADY: ::c_int = 114; /* Operation already in progress */
pub const EINPROGRESS: ::c_int = 115; /* Operation now in progress */
pub const ESTALE: ::c_int = 116; /* Stale NFS file handle */
pub const EUCLEAN: ::c_int = 117; /* Structure needs cleaning */
pub const ENOTNAM: ::c_int = 118; /* Not a XENIX named type file */
pub const ENAVAIL: ::c_int = 119; /* No XENIX semaphores available */
pub const EISNAM: ::c_int = 120; /* Is a named type file */
pub const EREMOTEIO: ::c_int = 121; /* Remote I/O error */
pub const EDQUOT: ::c_int = 122; /* Quota exceeded */
pub const ENOMEDIUM: ::c_int = 123; /* No medium found */
pub const EMEDIUMTYPE: ::c_int = 124; /* Wrong medium type */
pub const ECANCELED: ::c_int = 125; /* Operation Canceled */
pub const ENOKEY: ::c_int = 126; /* Required key not available */
pub const EKEYEXPIRED: ::c_int = 127; /* Key has expired */
pub const EKEYREVOKED: ::c_int = 128; /* Key has been revoked */
pub const EKEYREJECTED: ::c_int = 129; /* Key was rejected by service */
pub const EOWNERDEAD: ::c_int = 130; /* Owner died */
pub const ENOTRECOVERABLE: ::c_int = 131; /* State not recoverable */
// fcntl.h
pub const F_DUPFD: ::c_int = 0;
pub const F_GETFD: ::c_int = 1;
pub const F_SETFD: ::c_int = 2;
pub const F_GETFL: ::c_int = 3;
pub const F_SETFL: ::c_int = 4;
// FIXME: relibc {
pub const F_DUPFD_CLOEXEC: ::c_int = ::F_DUPFD;
// }
pub const FD_CLOEXEC: ::c_int = 0x0100_0000;
pub const O_RDONLY: ::c_int = 0x0001_0000;
pub const O_WRONLY: ::c_int = 0x0002_0000;
pub const O_RDWR: ::c_int = 0x0003_0000;
pub const O_ACCMODE: ::c_int = 0x0003_0000;
pub const O_NONBLOCK: ::c_int = 0x0004_0000;
pub const O_APPEND: ::c_int = 0x0008_0000;
pub const O_SHLOCK: ::c_int = 0x0010_0000;
pub const O_EXLOCK: ::c_int = 0x0020_0000;
pub const O_ASYNC: ::c_int = 0x0040_0000;
pub const O_FSYNC: ::c_int = 0x0080_0000;
pub const O_CLOEXEC: ::c_int = 0x0100_0000;
pub const O_CREAT: ::c_int = 0x0200_0000;
pub const O_TRUNC: ::c_int = 0x0400_0000;
pub const O_EXCL: ::c_int = 0x0800_0000;
pub const O_DIRECTORY: ::c_int = 0x1000_0000;
pub const O_PATH: ::c_int = 0x2000_0000;
pub const O_SYMLINK: ::c_int = 0x4000_0000;
// Negative to allow it to be used as int
// FIXME: Fix negative values missing from includes
pub const O_NOFOLLOW: ::c_int = -0x8000_0000;
// netdb.h
pub const EAI_SYSTEM: ::c_int = -11;
// netinet/in.h
// FIXME: relibc {
pub const IP_TTL: ::c_int = 2;
pub const IPV6_UNICAST_HOPS: ::c_int = 16;
pub const IPV6_MULTICAST_IF: ::c_int = 17;
pub const IPV6_MULTICAST_HOPS: ::c_int = 18;
pub const IPV6_MULTICAST_LOOP: ::c_int = 19;
pub const IPV6_ADD_MEMBERSHIP: ::c_int = 20;
pub const IPV6_DROP_MEMBERSHIP: ::c_int = 21;
pub const IPV6_V6ONLY: ::c_int = 26;
pub const IP_MULTICAST_IF: ::c_int = 32;
pub const IP_MULTICAST_TTL: ::c_int = 33;
pub const IP_MULTICAST_LOOP: ::c_int = 34;
pub const IP_ADD_MEMBERSHIP: ::c_int = 35;
pub const IP_DROP_MEMBERSHIP: ::c_int = 36;
// }
// netinet/tcp.h
pub const TCP_NODELAY: ::c_int = 1;
// FIXME: relibc {
pub const TCP_KEEPIDLE: ::c_int = 1;
// }
// poll.h
pub const POLLIN: ::c_short = 0x001;
pub const POLLPRI: ::c_short = 0x002;
pub const POLLOUT: ::c_short = 0x004;
pub const POLLERR: ::c_short = 0x008;
pub const POLLHUP: ::c_short = 0x010;
pub const POLLNVAL: ::c_short = 0x020;
// pthread.h
pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0;
pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 1;
pub const PTHREAD_MUTEX_INITIALIZER: ::pthread_mutex_t = -1isize as *mut _;
pub const PTHREAD_COND_INITIALIZER: ::pthread_cond_t = -1isize as *mut _;
pub const PTHREAD_RWLOCK_INITIALIZER: ::pthread_rwlock_t = -1isize as *mut _;
pub const PTHREAD_STACK_MIN: ::size_t = 4096;
// signal.h
pub const SIG_BLOCK: ::c_int = 0;
pub const SIG_UNBLOCK: ::c_int = 1;
pub const SIG_SETMASK: ::c_int = 2;
pub const SIGHUP: ::c_int = 1;
pub const SIGINT: ::c_int = 2;
pub const SIGQUIT: ::c_int = 3;
pub const SIGILL: ::c_int = 4;
pub const SIGTRAP: ::c_int = 5;
pub const SIGABRT: ::c_int = 6;
pub const SIGBUS: ::c_int = 7;
pub const SIGFPE: ::c_int = 8;
pub const SIGKILL: ::c_int = 9;
pub const SIGUSR1: ::c_int = 10;
pub const SIGSEGV: ::c_int = 11;
pub const SIGUSR2: ::c_int = 12;
pub const SIGPIPE: ::c_int = 13;
pub const SIGALRM: ::c_int = 14;
pub const SIGTERM: ::c_int = 15;
pub const SIGSTKFLT: ::c_int = 16;
pub const SIGCHLD: ::c_int = 17;
pub const SIGCONT: ::c_int = 18;
pub const SIGSTOP: ::c_int = 19;
pub const SIGTSTP: ::c_int = 20;
pub const SIGTTIN: ::c_int = 21;
pub const SIGTTOU: ::c_int = 22;
pub const SIGURG: ::c_int = 23;
pub const SIGXCPU: ::c_int = 24;
pub const SIGXFSZ: ::c_int = 25;
pub const SIGVTALRM: ::c_int = 26;
pub const SIGPROF: ::c_int = 27;
pub const SIGWINCH: ::c_int = 28;
pub const SIGIO: ::c_int = 29;
pub const SIGPWR: ::c_int = 30;
pub const SIGSYS: ::c_int = 31;
pub const NSIG: ::c_int = 32;
pub const SA_NOCLDSTOP: ::c_ulong = 0x00000001;
pub const SA_NOCLDWAIT: ::c_ulong = 0x00000002;
pub const SA_SIGINFO: ::c_ulong = 0x00000004;
pub const SA_RESTORER: ::c_ulong = 0x04000000;
pub const SA_ONSTACK: ::c_ulong = 0x08000000;
pub const SA_RESTART: ::c_ulong = 0x10000000;
pub const SA_NODEFER: ::c_ulong = 0x40000000;
pub const SA_RESETHAND: ::c_ulong = 0x80000000;
// sys/epoll.h
pub const EPOLL_CLOEXEC: ::c_int = 0x0100_0000;
pub const EPOLL_CTL_ADD: ::c_int = 1;
pub const EPOLL_CTL_DEL: ::c_int = 2;
pub const EPOLL_CTL_MOD: ::c_int = 3;
pub const EPOLLIN: ::c_int = 1;
pub const EPOLLPRI: ::c_int = 0;
pub const EPOLLOUT: ::c_int = 2;
pub const EPOLLRDNORM: ::c_int = 0;
pub const EPOLLNVAL: ::c_int = 0;
pub const EPOLLRDBAND: ::c_int = 0;
pub const EPOLLWRNORM: ::c_int = 0;
pub const EPOLLWRBAND: ::c_int = 0;
pub const EPOLLMSG: ::c_int = 0;
pub const EPOLLERR: ::c_int = 0;
pub const EPOLLHUP: ::c_int = 0;
pub const EPOLLRDHUP: ::c_int = 0;
pub const EPOLLEXCLUSIVE: ::c_int = 0;
pub const EPOLLWAKEUP: ::c_int = 0;
pub const EPOLLONESHOT: ::c_int = 0;
pub const EPOLLET: ::c_int = 0;
// sys/stat.h
pub const S_IFMT: ::c_int = 0o0_170_000;
pub const S_IFDIR: ::c_int = 0o040_000;
pub const S_IFCHR: ::c_int = 0o020_000;
pub const S_IFBLK: ::c_int = 0o060_000;
pub const S_IFREG: ::c_int = 0o100_000;
pub const S_IFIFO: ::c_int = 0o010_000;
pub const S_IFLNK: ::c_int = 0o120_000;
pub const S_IFSOCK: ::c_int = 0o140_000;
pub const S_IRWXU: ::c_int = 0o0_700;
pub const S_IRUSR: ::c_int = 0o0_400;
pub const S_IWUSR: ::c_int = 0o0_200;
pub const S_IXUSR: ::c_int = 0o0_100;
pub const S_IRWXG: ::c_int = 0o0_070;
pub const S_IRGRP: ::c_int = 0o0_040;
pub const S_IWGRP: ::c_int = 0o0_020;
pub const S_IXGRP: ::c_int = 0o0_010;
pub const S_IRWXO: ::c_int = 0o0_007;
pub const S_IROTH: ::c_int = 0o0_004;
pub const S_IWOTH: ::c_int = 0o0_002;
pub const S_IXOTH: ::c_int = 0o0_001;
// stdlib.h
pub const EXIT_SUCCESS: ::c_int = 0;
pub const EXIT_FAILURE: ::c_int = 1;
// sys/ioctl.h
// FIXME: relibc {
pub const FIONBIO: ::c_ulong = 0x5421;
pub const FIOCLEX: ::c_ulong = 0x5451;
// }
pub const TCGETS: ::c_ulong = 0x5401;
pub const TCSETS: ::c_ulong = 0x5402;
pub const TCFLSH: ::c_ulong = 0x540B;
pub const TIOCGPGRP: ::c_ulong = 0x540F;
pub const TIOCSPGRP: ::c_ulong = 0x5410;
pub const TIOCGWINSZ: ::c_ulong = 0x5413;
pub const TIOCSWINSZ: ::c_ulong = 0x5414;
// sys/mman.h
pub const PROT_NONE: ::c_int = 0x0000;
pub const PROT_READ: ::c_int = 0x0004;
pub const PROT_WRITE: ::c_int = 0x0002;
pub const PROT_EXEC: ::c_int = 0x0001;
pub const MAP_SHARED: ::c_int = 0x0001;
pub const MAP_PRIVATE: ::c_int = 0x0002;
pub const MAP_ANON: ::c_int = 0x0020;
pub const MAP_ANONYMOUS: ::c_int = MAP_ANON;
pub const MAP_FIXED: ::c_int = 0x0010;
pub const MAP_FAILED: *mut ::c_void = !0 as _;
pub const MS_ASYNC: ::c_int = 0x0001;
pub const MS_INVALIDATE: ::c_int = 0x0002;
pub const MS_SYNC: ::c_int = 0x0004;
// sys/select.h
pub const FD_SETSIZE: usize = 1024;
// sys/socket.h
pub const AF_UNIX: ::c_int = 1;
pub const AF_INET: ::c_int = 2;
pub const AF_INET6: ::c_int = 10;
pub const MSG_PEEK: ::c_int = 2;
pub const SHUT_RD: ::c_int = 0;
pub const SHUT_WR: ::c_int = 1;
pub const SHUT_RDWR: ::c_int = 2;
pub const SO_REUSEADDR: ::c_int = 2;
pub const SO_ERROR: ::c_int = 4;
pub const SO_BROADCAST: ::c_int = 6;
pub const SO_SNDBUF: ::c_int = 7;
pub const SO_RCVBUF: ::c_int = 8;
pub const SO_KEEPALIVE: ::c_int = 9;
pub const SO_LINGER: ::c_int = 13;
pub const SO_REUSEPORT: ::c_int = 15;
pub const SO_RCVTIMEO: ::c_int = 20;
pub const SO_SNDTIMEO: ::c_int = 21;
pub const SOCK_STREAM: ::c_int = 1;
pub const SOCK_DGRAM: ::c_int = 2;
pub const SOL_SOCKET: ::c_int = 1;
// sys/termios.h
pub const NCCS: usize = 32;
pub const VINTR: usize = 0;
pub const VQUIT: usize = 1;
pub const VERASE: usize = 2;
pub const VKILL: usize = 3;
pub const VEOF: usize = 4;
pub const VTIME: usize = 5;
pub const VMIN: usize = 6;
pub const VSWTC: usize = 7;
pub const VSTART: usize = 8;
pub const VSTOP: usize = 9;
pub const VSUSP: usize = 10;
pub const VEOL: usize = 11;
pub const VREPRINT: usize = 12;
pub const VDISCARD: usize = 13;
pub const VWERASE: usize = 14;
pub const VLNEXT: usize = 15;
pub const VEOL2: usize = 16;
pub const IGNBRK: ::tcflag_t = 0o000_001;
pub const BRKINT: ::tcflag_t = 0o000_002;
pub const IGNPAR: ::tcflag_t = 0o000_004;
pub const PARMRK: ::tcflag_t = 0o000_010;
pub const INPCK: ::tcflag_t = 0o000_020;
pub const ISTRIP: ::tcflag_t = 0o000_040;
pub const INLCR: ::tcflag_t = 0o000_100;
pub const IGNCR: ::tcflag_t = 0o000_200;
pub const ICRNL: ::tcflag_t = 0o000_400;
pub const IUCLC: ::tcflag_t = 0o001_000;
pub const IXON: ::tcflag_t = 0o002_000;
pub const IXANY: ::tcflag_t = 0o004_000;
pub const IXOFF: ::tcflag_t = 0o010_000;
pub const IMAXBEL: ::tcflag_t = 0o020_000;
pub const IUTF8: ::tcflag_t = 0o040_000;
pub const OPOST: ::tcflag_t = 0o000_001;
pub const OLCUC: ::tcflag_t = 0o000_002;
pub const ONLCR: ::tcflag_t = 0o000_004;
pub const OCRNL: ::tcflag_t = 0o000_010;
pub const ONOCR: ::tcflag_t = 0o000_020;
pub const ONLRET: ::tcflag_t = 0o00_0040;
pub const OFILL: ::tcflag_t = 0o000_100;
pub const OFDEL: ::tcflag_t = 0o000_200;
pub const VTDLY: usize = 0o040_000;
pub const VT0: usize = 0o000_000;
pub const VT1: usize = 0o040_000;
pub const B0: speed_t = 0o000_000;
pub const B50: speed_t = 0o000_001;
pub const B75: speed_t = 0o000_002;
pub const B110: speed_t = 0o000_003;
pub const B134: speed_t = 0o000_004;
pub const B150: speed_t = 0o000_005;
pub const B200: speed_t = 0o000_006;
pub const B300: speed_t = 0o000_007;
pub const B600: speed_t = 0o000_010;
pub const B1200: speed_t = 0o000_011;
pub const B1800: speed_t = 0o000_012;
pub const B2400: speed_t = 0o000_013;
pub const B4800: speed_t = 0o000_014;
pub const B9600: speed_t = 0o000_015;
pub const B19200: speed_t = 0o000_016;
pub const B38400: speed_t = 0o000_017;
pub const B57600: speed_t = 0o010_001;
pub const B115200: speed_t = 0o010_002;
pub const B230400: speed_t = 0o010_003;
pub const B460800: speed_t = 0o010_004;
pub const B500000: speed_t = 0o010_005;
pub const B576000: speed_t = 0o010_006;
pub const B921600: speed_t = 0o010_007;
pub const B1000000: speed_t = 0o010_010;
pub const B1152000: speed_t = 0o010_011;
pub const B1500000: speed_t = 0o010_012;
pub const B2000000: speed_t = 0o010_013;
pub const B2500000: speed_t = 0o010_014;
pub const B3000000: speed_t = 0o010_015;
pub const B3500000: speed_t = 0o010_016;
pub const B4000000: speed_t = 0o010_017;
pub const CSIZE: ::tcflag_t = 0o000_060;
pub const CS5: ::tcflag_t = 0o000_000;
pub const CS6: ::tcflag_t = 0o000_020;
pub const CS7: ::tcflag_t = 0o000_040;
pub const CS8: ::tcflag_t = 0o000_060;
pub const CSTOPB: ::tcflag_t = 0o000_100;
pub const CREAD: ::tcflag_t = 0o000_200;
pub const PARENB: ::tcflag_t = 0o000_400;
pub const PARODD: ::tcflag_t = 0o001_000;
pub const HUPCL: ::tcflag_t = 0o002_000;
pub const CLOCAL: ::tcflag_t = 0o004_000;
pub const ISIG: ::tcflag_t = 0o000_001;
pub const ICANON: ::tcflag_t = 0o000_002;
pub const ECHO: ::tcflag_t = 0o000_010;
pub const ECHOE: ::tcflag_t = 0o000_020;
pub const ECHOK: ::tcflag_t = 0o000_040;
pub const ECHONL: ::tcflag_t = 0o000_100;
pub const NOFLSH: ::tcflag_t = 0o000_200;
pub const TOSTOP: ::tcflag_t = 0o000_400;
pub const IEXTEN: ::tcflag_t = 0o100_000;
pub const TCOOFF: ::c_int = 0;
pub const TCOON: ::c_int = 1;
pub const TCIOFF: ::c_int = 2;
pub const TCION: ::c_int = 3;
pub const TCIFLUSH: ::c_int = 0;
pub const TCOFLUSH: ::c_int = 1;
pub const TCIOFLUSH: ::c_int = 2;
pub const TCSANOW: ::c_int = 0;
pub const TCSADRAIN: ::c_int = 1;
pub const TCSAFLUSH: ::c_int = 2;
// sys/wait.h
pub const WNOHANG: ::c_int = 1;
pub const WUNTRACED: ::c_int = 2;
pub const WSTOPPED: ::c_int = 2;
pub const WEXITED: ::c_int = 4;
pub const WCONTINUED: ::c_int = 8;
pub const WNOWAIT: ::c_int = 0x0100_0000;
pub const __WNOTHREAD: ::c_int = 0x2000_0000;
pub const __WALL: ::c_int = 0x4000_0000;
#[allow(overflowing_literals)]
pub const __WCLONE: ::c_int = 0x8000_0000;
// time.h
pub const CLOCK_REALTIME: ::c_int = 1;
pub const CLOCK_MONOTONIC: ::c_int = 4;
// unistd.h
// POSIX.1 {
pub const _SC_ARG_MAX: ::c_int = 0;
pub const _SC_CHILD_MAX: ::c_int = 1;
pub const _SC_CLK_TCK: ::c_int = 2;
pub const _SC_NGROUPS_MAX: ::c_int = 3;
pub const _SC_OPEN_MAX: ::c_int = 4;
pub const _SC_STREAM_MAX: ::c_int = 5;
pub const _SC_TZNAME_MAX: ::c_int = 6;
// ...
pub const _SC_VERSION: ::c_int = 29;
pub const _SC_PAGESIZE: ::c_int = 30;
pub const _SC_PAGE_SIZE: ::c_int = 30;
// ...
pub const _SC_RE_DUP_MAX: ::c_int = 44;
// ...
pub const _SC_LOGIN_NAME_MAX: ::c_int = 71;
pub const _SC_TTY_NAME_MAX: ::c_int = 72;
// ...
pub const _SC_SYMLOOP_MAX: ::c_int = 173;
// ...
pub const _SC_HOST_NAME_MAX: ::c_int = 180;
// } POSIX.1
pub const F_OK: ::c_int = 0;
pub const R_OK: ::c_int = 4;
pub const W_OK: ::c_int = 2;
pub const X_OK: ::c_int = 1;
pub const SEEK_SET: ::c_int = 0;
pub const SEEK_CUR: ::c_int = 1;
pub const SEEK_END: ::c_int = 2;
pub const STDIN_FILENO: ::c_int = 0;
pub const STDOUT_FILENO: ::c_int = 1;
pub const STDERR_FILENO: ::c_int = 2;
pub const _PC_LINK_MAX: ::c_int = 0;
pub const _PC_MAX_CANON: ::c_int = 1;
pub const _PC_MAX_INPUT: ::c_int = 2;
pub const _PC_NAME_MAX: ::c_int = 3;
pub const _PC_PATH_MAX: ::c_int = 4;
pub const _PC_PIPE_BUF: ::c_int = 5;
pub const _PC_CHOWN_RESTRICTED: ::c_int = 6;
pub const _PC_NO_TRUNC: ::c_int = 7;
pub const _PC_VDISABLE: ::c_int = 8;
pub const _PC_SYNC_IO: ::c_int = 9;
pub const _PC_ASYNC_IO: ::c_int = 10;
pub const _PC_PRIO_IO: ::c_int = 11;
pub const _PC_SOCK_MAXBUF: ::c_int = 12;
pub const _PC_FILESIZEBITS: ::c_int = 13;
pub const _PC_REC_INCR_XFER_SIZE: ::c_int = 14;
pub const _PC_REC_MAX_XFER_SIZE: ::c_int = 15;
pub const _PC_REC_MIN_XFER_SIZE: ::c_int = 16;
pub const _PC_REC_XFER_ALIGN: ::c_int = 17;
pub const _PC_ALLOC_SIZE_MIN: ::c_int = 18;
pub const _PC_SYMLINK_MAX: ::c_int = 19;
pub const _PC_2_SYMLINKS: ::c_int = 20;
pub const PRIO_PROCESS: ::c_int = 0;
pub const PRIO_PGRP: ::c_int = 1;
pub const PRIO_USER: ::c_int = 2;
// wait.h
f! {
pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () {
let fd = fd as usize;
let size = ::mem::size_of_val(&(*set).fds_bits[0]) * 8;
(*set).fds_bits[fd / size] &= !(1 << (fd % size));
return
}
pub fn FD_ISSET(fd: ::c_int, set: *mut fd_set) -> bool {
let fd = fd as usize;
let size = ::mem::size_of_val(&(*set).fds_bits[0]) * 8;
return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0
}
pub fn FD_SET(fd: ::c_int, set: *mut fd_set) -> () {
let fd = fd as usize;
let size = ::mem::size_of_val(&(*set).fds_bits[0]) * 8;
(*set).fds_bits[fd / size] |= 1 << (fd % size);
return
}
pub fn FD_ZERO(set: *mut fd_set) -> () {
for slot in (*set).fds_bits.iter_mut() {
*slot = 0;
}
}
}
safe_f! {
pub {const} fn WIFSTOPPED(status: ::c_int) -> bool {
(status & 0xff) == 0x7f
}
pub {const} fn WSTOPSIG(status: ::c_int) -> ::c_int {
(status >> 8) & 0xff
}
pub {const} fn WIFCONTINUED(status: ::c_int) -> bool {
status == 0xffff
}
pub {const} fn WIFSIGNALED(status: ::c_int) -> bool {
((status & 0x7f) + 1) as i8 >= 2
}
pub {const} fn WTERMSIG(status: ::c_int) -> ::c_int {
status & 0x7f
}
pub {const} fn WIFEXITED(status: ::c_int) -> bool {
(status & 0x7f) == 0
}
pub {const} fn WEXITSTATUS(status: ::c_int) -> ::c_int {
(status >> 8) & 0xff
}
pub {const} fn WCOREDUMP(status: ::c_int) -> bool {
(status & 0x80) != 0
}
}
extern "C" {
// errno.h
pub fn __errno_location() -> *mut ::c_int;
pub fn strerror_r(
errnum: ::c_int,
buf: *mut c_char,
buflen: ::size_t,
) -> ::c_int;
// unistd.h
pub fn pipe2(fds: *mut ::c_int, flags: ::c_int) -> ::c_int;
// malloc.h
pub fn memalign(align: ::size_t, size: ::size_t) -> *mut ::c_void;
// pthread.h
pub fn pthread_atfork(
prepare: ::Option<unsafe extern "C" fn()>,
parent: ::Option<unsafe extern "C" fn()>,
child: ::Option<unsafe extern "C" fn()>,
) -> ::c_int;
pub fn pthread_create(
tid: *mut ::pthread_t,
attr: *const ::pthread_attr_t,
start: extern "C" fn(*mut ::c_void) -> *mut ::c_void,
arg: *mut ::c_void,
) -> ::c_int;
pub fn pthread_condattr_setclock(
attr: *mut pthread_condattr_t,
clock_id: ::clockid_t,
) -> ::c_int;
// pwd.h
pub fn getpwuid_r(
uid: ::uid_t,
pwd: *mut passwd,
buf: *mut ::c_char,
buflen: ::size_t,
result: *mut *mut passwd,
) -> ::c_int;
// signal.h
pub fn pthread_sigmask(
how: ::c_int,
set: *const ::sigset_t,
oldset: *mut ::sigset_t,
) -> ::c_int;
// sys/epoll.h
pub fn epoll_create(size: ::c_int) -> ::c_int;
pub fn epoll_create1(flags: ::c_int) -> ::c_int;
pub fn epoll_wait(
epfd: ::c_int,
events: *mut ::epoll_event,
maxevents: ::c_int,
timeout: ::c_int,
) -> ::c_int;
pub fn epoll_ctl(
epfd: ::c_int,
op: ::c_int,
fd: ::c_int,
event: *mut ::epoll_event,
) -> ::c_int;
// sys/ioctl.h
pub fn ioctl(fd: ::c_int, request: ::c_ulong, ...) -> ::c_int;
// sys/mman.h
pub fn msync(
addr: *mut ::c_void,
len: ::size_t,
flags: ::c_int,
) -> ::c_int;
pub fn mprotect(
addr: *mut ::c_void,
len: ::size_t,
prot: ::c_int,
) -> ::c_int;
pub fn shm_open(
name: *const c_char,
oflag: ::c_int,
mode: mode_t,
) -> ::c_int;
pub fn shm_unlink(name: *const ::c_char) -> ::c_int;
// sys/resource.h
pub fn getrlimit(resource: ::c_int, rlim: *mut ::rlimit) -> ::c_int;
pub fn setrlimit(resource: ::c_int, rlim: *const ::rlimit) -> ::c_int;
// sys/socket.h
pub fn bind(
socket: ::c_int,
address: *const ::sockaddr,
address_len: ::socklen_t,
) -> ::c_int;
pub fn recvfrom(
socket: ::c_int,
buf: *mut ::c_void,
len: ::size_t,
flags: ::c_int,
addr: *mut ::sockaddr,
addrlen: *mut ::socklen_t,
) -> ::ssize_t;
// sys/stat.h
pub fn futimens(fd: ::c_int, times: *const ::timespec) -> ::c_int;
// sys/uio.h
pub fn readv(
fd: ::c_int,
iov: *const ::iovec,
iovcnt: ::c_int,
) -> ::ssize_t;
pub fn writev(
fd: ::c_int,
iov: *const ::iovec,
iovcnt: ::c_int,
) -> ::ssize_t;
// sys/utsname.h
pub fn uname(utsname: *mut utsname) -> ::c_int;
// time.h
pub fn gettimeofday(tp: *mut ::timeval, tz: *mut ::timezone) -> ::c_int;
pub fn clock_gettime(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int;
}
cfg_if! {
if #[cfg(feature = "extra_traits")] {
impl PartialEq for dirent {
fn eq(&self, other: &dirent) -> bool {
self.d_ino == other.d_ino
&& self.d_off == other.d_off
&& self.d_reclen == other.d_reclen
&& self.d_type == other.d_type
&& self
.d_name
.iter()
.zip(other.d_name.iter())
.all(|(a,b)| a == b)
}
}
impl Eq for dirent {}
impl ::fmt::Debug for dirent {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("dirent")
.field("d_ino", &self.d_ino)
.field("d_off", &self.d_off)
.field("d_reclen", &self.d_reclen)
.field("d_type", &self.d_type)
// FIXME: .field("d_name", &self.d_name)
.finish()
}
}
impl ::hash::Hash for dirent {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.d_ino.hash(state);
self.d_off.hash(state);
self.d_reclen.hash(state);
self.d_type.hash(state);
self.d_name.hash(state);
}
}
impl PartialEq for sockaddr_un {
fn eq(&self, other: &sockaddr_un) -> bool {
self.sun_family == other.sun_family
&& self
.sun_path
.iter()
.zip(other.sun_path.iter())
.all(|(a,b)| a == b)
}
}
impl Eq for sockaddr_un {}
impl ::fmt::Debug for sockaddr_un {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("sockaddr_un")
.field("sun_family", &self.sun_family)
// FIXME: .field("sun_path", &self.sun_path)
.finish()
}
}
impl ::hash::Hash for sockaddr_un {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.sun_family.hash(state);
self.sun_path.hash(state);
}
}
impl PartialEq for sockaddr_storage {
fn eq(&self, other: &sockaddr_storage) -> bool {
self.ss_family == other.ss_family
&& self.__ss_align == self.__ss_align
&& self
.__ss_padding
.iter()
.zip(other.__ss_padding.iter())
.all(|(a,b)| a == b)
}
}
impl Eq for sockaddr_storage {}
impl ::fmt::Debug for sockaddr_storage {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("sockaddr_storage")
.field("ss_family", &self.ss_family)
.field("__ss_align", &self.__ss_align)
// FIXME: .field("__ss_padding", &self.__ss_padding)
.finish()
}
}
impl ::hash::Hash for sockaddr_storage {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.ss_family.hash(state);
self.__ss_padding.hash(state);
self.__ss_align.hash(state);
}
}
impl PartialEq for utsname {
fn eq(&self, other: &utsname) -> bool {
self.sysname
.iter()
.zip(other.sysname.iter())
.all(|(a, b)| a == b)
&& self
.nodename
.iter()
.zip(other.nodename.iter())
.all(|(a, b)| a == b)
&& self
.release
.iter()
.zip(other.release.iter())
.all(|(a, b)| a == b)
&& self
.version
.iter()
.zip(other.version.iter())
.all(|(a, b)| a == b)
&& self
.machine
.iter()
.zip(other.machine.iter())
.all(|(a, b)| a == b)
&& self
.domainname
.iter()
.zip(other.domainname.iter())
.all(|(a, b)| a == b)
}
}
impl Eq for utsname {}
impl ::fmt::Debug for utsname {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("utsname")
// FIXME: .field("sysname", &self.sysname)
// FIXME: .field("nodename", &self.nodename)
// FIXME: .field("release", &self.release)
// FIXME: .field("version", &self.version)
// FIXME: .field("machine", &self.machine)
// FIXME: .field("domainname", &self.domainname)
.finish()
}
}
impl ::hash::Hash for utsname {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.sysname.hash(state);
self.nodename.hash(state);
self.release.hash(state);
self.version.hash(state);
self.machine.hash(state);
self.domainname.hash(state);
}
}
}
}
Auto merge of #1962 - coolreader18:redox-sock, r=JohnTitor
Update redox socket constants
pub type c_char = i8;
pub type c_long = i64;
pub type c_ulong = u64;
pub type wchar_t = i32;
pub type blkcnt_t = ::c_ulong;
pub type blksize_t = ::c_long;
pub type clock_t = ::c_long;
pub type clockid_t = ::c_int;
pub type dev_t = ::c_long;
pub type fsblkcnt_t = ::c_ulong;
pub type fsfilcnt_t = ::c_ulong;
pub type ino_t = ::c_ulong;
pub type mode_t = ::c_int;
pub type nfds_t = ::c_ulong;
pub type nlink_t = ::c_ulong;
pub type off_t = ::c_long;
pub type pthread_t = *mut ::c_void;
pub type pthread_attr_t = *mut ::c_void;
pub type pthread_cond_t = *mut ::c_void;
pub type pthread_condattr_t = *mut ::c_void;
// Must be usize due to libstd/sys_common/thread_local.rs,
// should technically be *mut ::c_void
pub type pthread_key_t = usize;
pub type pthread_mutex_t = *mut ::c_void;
pub type pthread_mutexattr_t = *mut ::c_void;
pub type pthread_rwlock_t = *mut ::c_void;
pub type pthread_rwlockattr_t = *mut ::c_void;
pub type rlim_t = ::c_ulonglong;
pub type sa_family_t = u16;
pub type sem_t = *mut ::c_void;
pub type sigset_t = ::c_ulong;
pub type socklen_t = u32;
pub type speed_t = u32;
pub type suseconds_t = ::c_int;
pub type tcflag_t = u32;
pub type time_t = ::c_long;
#[cfg_attr(feature = "extra_traits", derive(Debug))]
pub enum timezone {}
impl ::Copy for timezone {}
impl ::Clone for timezone {
fn clone(&self) -> timezone {
*self
}
}
s_no_extra_traits! {
#[repr(C)]
pub struct utsname {
pub sysname: [::c_char; UTSLENGTH],
pub nodename: [::c_char; UTSLENGTH],
pub release: [::c_char; UTSLENGTH],
pub version: [::c_char; UTSLENGTH],
pub machine: [::c_char; UTSLENGTH],
pub domainname: [::c_char; UTSLENGTH],
}
pub struct dirent {
pub d_ino: ::ino_t,
pub d_off: ::off_t,
pub d_reclen: ::c_ushort,
pub d_type: ::c_uchar,
pub d_name: [::c_char; 256],
}
pub struct sockaddr_un {
pub sun_family: ::sa_family_t,
pub sun_path: [::c_char; 108]
}
pub struct sockaddr_storage {
pub ss_family: ::sa_family_t,
__ss_padding: [
u8;
128 -
::core::mem::size_of::<sa_family_t>() -
::core::mem::size_of::<c_ulong>()
],
__ss_align: ::c_ulong,
}
}
s! {
pub struct addrinfo {
pub ai_flags: ::c_int,
pub ai_family: ::c_int,
pub ai_socktype: ::c_int,
pub ai_protocol: ::c_int,
pub ai_addrlen: ::size_t,
pub ai_canonname: *mut ::c_char,
pub ai_addr: *mut ::sockaddr,
pub ai_next: *mut ::addrinfo,
}
pub struct Dl_info {
pub dli_fname: *const ::c_char,
pub dli_fbase: *mut ::c_void,
pub dli_sname: *const ::c_char,
pub dli_saddr: *mut ::c_void,
}
pub struct epoll_event {
pub events: u32,
pub u64: u64,
pub _pad: u64,
}
pub struct fd_set {
fds_bits: [::c_ulong; ::FD_SETSIZE / ULONG_SIZE],
}
pub struct in_addr {
pub s_addr: ::in_addr_t,
}
pub struct ip_mreq {
pub imr_multiaddr: ::in_addr,
pub imr_interface: ::in_addr,
}
pub struct lconv {
pub currency_symbol: *const ::c_char,
pub decimal_point: *const ::c_char,
pub frac_digits: ::c_char,
pub grouping: *const ::c_char,
pub int_curr_symbol: *const ::c_char,
pub int_frac_digits: ::c_char,
pub mon_decimal_point: *const ::c_char,
pub mon_grouping: *const ::c_char,
pub mon_thousands_sep: *const ::c_char,
pub negative_sign: *const ::c_char,
pub n_cs_precedes: ::c_char,
pub n_sep_by_space: ::c_char,
pub n_sign_posn: ::c_char,
pub positive_sign: *const ::c_char,
pub p_cs_precedes: ::c_char,
pub p_sep_by_space: ::c_char,
pub p_sign_posn: ::c_char,
pub thousands_sep: *const ::c_char,
}
pub struct passwd {
pub pw_name: *mut ::c_char,
pub pw_passwd: *mut ::c_char,
pub pw_uid: ::uid_t,
pub pw_gid: ::gid_t,
pub pw_gecos: *mut ::c_char,
pub pw_dir: *mut ::c_char,
pub pw_shell: *mut ::c_char,
}
pub struct sigaction {
pub sa_handler: ::sighandler_t,
pub sa_flags: ::c_ulong,
pub sa_restorer: ::Option<extern fn()>,
pub sa_mask: ::sigset_t,
}
pub struct sockaddr {
pub sa_family: ::sa_family_t,
pub sa_data: [::c_char; 14],
}
pub struct sockaddr_in {
pub sin_family: ::sa_family_t,
pub sin_port: ::in_port_t,
pub sin_addr: ::in_addr,
pub sin_zero: [::c_char; 8],
}
pub struct sockaddr_in6 {
pub sin6_family: ::sa_family_t,
pub sin6_port: ::in_port_t,
pub sin6_flowinfo: u32,
pub sin6_addr: ::in6_addr,
pub sin6_scope_id: u32,
}
pub struct stat {
pub st_dev: ::dev_t,
pub st_ino: ::ino_t,
pub st_nlink: ::nlink_t,
pub st_mode: ::mode_t,
pub st_uid: ::uid_t,
pub st_gid: ::gid_t,
pub st_rdev: ::dev_t,
pub st_size: ::off_t,
pub st_blksize: ::blksize_t,
pub st_blocks: ::blkcnt_t,
pub st_atime: ::time_t,
pub st_atime_nsec: ::c_long,
pub st_mtime: ::time_t,
pub st_mtime_nsec: ::c_long,
pub st_ctime: ::time_t,
pub st_ctime_nsec: ::c_long,
_pad: [::c_char; 24],
}
pub struct statvfs {
pub f_bsize: ::c_ulong,
pub f_frsize: ::c_ulong,
pub f_blocks: ::fsblkcnt_t,
pub f_bfree: ::fsblkcnt_t,
pub f_bavail: ::fsblkcnt_t,
pub f_files: ::fsfilcnt_t,
pub f_ffree: ::fsfilcnt_t,
pub f_favail: ::fsfilcnt_t,
pub f_fsid: ::c_ulong,
pub f_flag: ::c_ulong,
pub f_namemax: ::c_ulong,
}
pub struct termios {
pub c_iflag: ::tcflag_t,
pub c_oflag: ::tcflag_t,
pub c_cflag: ::tcflag_t,
pub c_lflag: ::tcflag_t,
pub c_line: ::cc_t,
pub c_cc: [::cc_t; ::NCCS],
pub c_ispeed: ::speed_t,
pub c_ospeed: ::speed_t,
}
pub struct tm {
pub tm_sec: ::c_int,
pub tm_min: ::c_int,
pub tm_hour: ::c_int,
pub tm_mday: ::c_int,
pub tm_mon: ::c_int,
pub tm_year: ::c_int,
pub tm_wday: ::c_int,
pub tm_yday: ::c_int,
pub tm_isdst: ::c_int,
pub tm_gmtoff: ::c_long,
pub tm_zone: *const ::c_char,
}
}
pub const UTSLENGTH: usize = 65;
// intentionally not public, only used for fd_set
cfg_if! {
if #[cfg(target_pointer_width = "32")] {
const ULONG_SIZE: usize = 32;
} else if #[cfg(target_pointer_width = "64")] {
const ULONG_SIZE: usize = 64;
} else {
// Unknown target_pointer_width
}
}
// limits.h
pub const PATH_MAX: ::c_int = 4096;
// fcntl.h
pub const F_GETLK: ::c_int = 5;
pub const F_SETLK: ::c_int = 6;
pub const F_SETLKW: ::c_int = 7;
// FIXME: relibc {
pub const RTLD_DEFAULT: *mut ::c_void = 0i64 as *mut ::c_void;
// }
// dlfcn.h
pub const RTLD_LAZY: ::c_int = 0x0001;
pub const RTLD_NOW: ::c_int = 0x0002;
pub const RTLD_GLOBAL: ::c_int = 0x0100;
pub const RTLD_LOCAL: ::c_int = 0x0000;
// errno.h
pub const EPERM: ::c_int = 1; /* Operation not permitted */
pub const ENOENT: ::c_int = 2; /* No such file or directory */
pub const ESRCH: ::c_int = 3; /* No such process */
pub const EINTR: ::c_int = 4; /* Interrupted system call */
pub const EIO: ::c_int = 5; /* I/O error */
pub const ENXIO: ::c_int = 6; /* No such device or address */
pub const E2BIG: ::c_int = 7; /* Argument list too long */
pub const ENOEXEC: ::c_int = 8; /* Exec format error */
pub const EBADF: ::c_int = 9; /* Bad file number */
pub const ECHILD: ::c_int = 10; /* No child processes */
pub const EAGAIN: ::c_int = 11; /* Try again */
pub const ENOMEM: ::c_int = 12; /* Out of memory */
pub const EACCES: ::c_int = 13; /* Permission denied */
pub const EFAULT: ::c_int = 14; /* Bad address */
pub const ENOTBLK: ::c_int = 15; /* Block device required */
pub const EBUSY: ::c_int = 16; /* Device or resource busy */
pub const EEXIST: ::c_int = 17; /* File exists */
pub const EXDEV: ::c_int = 18; /* Cross-device link */
pub const ENODEV: ::c_int = 19; /* No such device */
pub const ENOTDIR: ::c_int = 20; /* Not a directory */
pub const EISDIR: ::c_int = 21; /* Is a directory */
pub const EINVAL: ::c_int = 22; /* Invalid argument */
pub const ENFILE: ::c_int = 23; /* File table overflow */
pub const EMFILE: ::c_int = 24; /* Too many open files */
pub const ENOTTY: ::c_int = 25; /* Not a typewriter */
pub const ETXTBSY: ::c_int = 26; /* Text file busy */
pub const EFBIG: ::c_int = 27; /* File too large */
pub const ENOSPC: ::c_int = 28; /* No space left on device */
pub const ESPIPE: ::c_int = 29; /* Illegal seek */
pub const EROFS: ::c_int = 30; /* Read-only file system */
pub const EMLINK: ::c_int = 31; /* Too many links */
pub const EPIPE: ::c_int = 32; /* Broken pipe */
pub const EDOM: ::c_int = 33; /* Math argument out of domain of func */
pub const ERANGE: ::c_int = 34; /* Math result not representable */
pub const EDEADLK: ::c_int = 35; /* Resource deadlock would occur */
pub const ENAMETOOLONG: ::c_int = 36; /* File name too long */
pub const ENOLCK: ::c_int = 37; /* No record locks available */
pub const ENOSYS: ::c_int = 38; /* Function not implemented */
pub const ENOTEMPTY: ::c_int = 39; /* Directory not empty */
pub const ELOOP: ::c_int = 40; /* Too many symbolic links encountered */
pub const EWOULDBLOCK: ::c_int = 41; /* Operation would block */
pub const ENOMSG: ::c_int = 42; /* No message of desired type */
pub const EIDRM: ::c_int = 43; /* Identifier removed */
pub const ECHRNG: ::c_int = 44; /* Channel number out of range */
pub const EL2NSYNC: ::c_int = 45; /* Level 2 not synchronized */
pub const EL3HLT: ::c_int = 46; /* Level 3 halted */
pub const EL3RST: ::c_int = 47; /* Level 3 reset */
pub const ELNRNG: ::c_int = 48; /* Link number out of range */
pub const EUNATCH: ::c_int = 49; /* Protocol driver not attached */
pub const ENOCSI: ::c_int = 50; /* No CSI structure available */
pub const EL2HLT: ::c_int = 51; /* Level 2 halted */
pub const EBADE: ::c_int = 52; /* Invalid exchange */
pub const EBADR: ::c_int = 53; /* Invalid request descriptor */
pub const EXFULL: ::c_int = 54; /* Exchange full */
pub const ENOANO: ::c_int = 55; /* No anode */
pub const EBADRQC: ::c_int = 56; /* Invalid request code */
pub const EBADSLT: ::c_int = 57; /* Invalid slot */
pub const EDEADLOCK: ::c_int = 58; /* Resource deadlock would occur */
pub const EBFONT: ::c_int = 59; /* Bad font file format */
pub const ENOSTR: ::c_int = 60; /* Device not a stream */
pub const ENODATA: ::c_int = 61; /* No data available */
pub const ETIME: ::c_int = 62; /* Timer expired */
pub const ENOSR: ::c_int = 63; /* Out of streams resources */
pub const ENONET: ::c_int = 64; /* Machine is not on the network */
pub const ENOPKG: ::c_int = 65; /* Package not installed */
pub const EREMOTE: ::c_int = 66; /* Object is remote */
pub const ENOLINK: ::c_int = 67; /* Link has been severed */
pub const EADV: ::c_int = 68; /* Advertise error */
pub const ESRMNT: ::c_int = 69; /* Srmount error */
pub const ECOMM: ::c_int = 70; /* Communication error on send */
pub const EPROTO: ::c_int = 71; /* Protocol error */
pub const EMULTIHOP: ::c_int = 72; /* Multihop attempted */
pub const EDOTDOT: ::c_int = 73; /* RFS specific error */
pub const EBADMSG: ::c_int = 74; /* Not a data message */
pub const EOVERFLOW: ::c_int = 75; /* Value too large for defined data type */
pub const ENOTUNIQ: ::c_int = 76; /* Name not unique on network */
pub const EBADFD: ::c_int = 77; /* File descriptor in bad state */
pub const EREMCHG: ::c_int = 78; /* Remote address changed */
pub const ELIBACC: ::c_int = 79; /* Can not access a needed shared library */
pub const ELIBBAD: ::c_int = 80; /* Accessing a corrupted shared library */
pub const ELIBSCN: ::c_int = 81; /* .lib section in a.out corrupted */
/* Attempting to link in too many shared libraries */
pub const ELIBMAX: ::c_int = 82;
pub const ELIBEXEC: ::c_int = 83; /* Cannot exec a shared library directly */
pub const EILSEQ: ::c_int = 84; /* Illegal byte sequence */
/* Interrupted system call should be restarted */
pub const ERESTART: ::c_int = 85;
pub const ESTRPIPE: ::c_int = 86; /* Streams pipe error */
pub const EUSERS: ::c_int = 87; /* Too many users */
pub const ENOTSOCK: ::c_int = 88; /* Socket operation on non-socket */
pub const EDESTADDRREQ: ::c_int = 89; /* Destination address required */
pub const EMSGSIZE: ::c_int = 90; /* Message too long */
pub const EPROTOTYPE: ::c_int = 91; /* Protocol wrong type for socket */
pub const ENOPROTOOPT: ::c_int = 92; /* Protocol not available */
pub const EPROTONOSUPPORT: ::c_int = 93; /* Protocol not supported */
pub const ESOCKTNOSUPPORT: ::c_int = 94; /* Socket type not supported */
/* Operation not supported on transport endpoint */
pub const EOPNOTSUPP: ::c_int = 95;
pub const EPFNOSUPPORT: ::c_int = 96; /* Protocol family not supported */
/* Address family not supported by protocol */
pub const EAFNOSUPPORT: ::c_int = 97;
pub const EADDRINUSE: ::c_int = 98; /* Address already in use */
pub const EADDRNOTAVAIL: ::c_int = 99; /* Cannot assign requested address */
pub const ENETDOWN: ::c_int = 100; /* Network is down */
pub const ENETUNREACH: ::c_int = 101; /* Network is unreachable */
/* Network dropped connection because of reset */
pub const ENETRESET: ::c_int = 102;
pub const ECONNABORTED: ::c_int = 103; /* Software caused connection abort */
pub const ECONNRESET: ::c_int = 104; /* Connection reset by peer */
pub const ENOBUFS: ::c_int = 105; /* No buffer space available */
pub const EISCONN: ::c_int = 106; /* Transport endpoint is already connected */
pub const ENOTCONN: ::c_int = 107; /* Transport endpoint is not connected */
/* Cannot send after transport endpoint shutdown */
pub const ESHUTDOWN: ::c_int = 108;
pub const ETOOMANYREFS: ::c_int = 109; /* Too many references: cannot splice */
pub const ETIMEDOUT: ::c_int = 110; /* Connection timed out */
pub const ECONNREFUSED: ::c_int = 111; /* Connection refused */
pub const EHOSTDOWN: ::c_int = 112; /* Host is down */
pub const EHOSTUNREACH: ::c_int = 113; /* No route to host */
pub const EALREADY: ::c_int = 114; /* Operation already in progress */
pub const EINPROGRESS: ::c_int = 115; /* Operation now in progress */
pub const ESTALE: ::c_int = 116; /* Stale NFS file handle */
pub const EUCLEAN: ::c_int = 117; /* Structure needs cleaning */
pub const ENOTNAM: ::c_int = 118; /* Not a XENIX named type file */
pub const ENAVAIL: ::c_int = 119; /* No XENIX semaphores available */
pub const EISNAM: ::c_int = 120; /* Is a named type file */
pub const EREMOTEIO: ::c_int = 121; /* Remote I/O error */
pub const EDQUOT: ::c_int = 122; /* Quota exceeded */
pub const ENOMEDIUM: ::c_int = 123; /* No medium found */
pub const EMEDIUMTYPE: ::c_int = 124; /* Wrong medium type */
pub const ECANCELED: ::c_int = 125; /* Operation Canceled */
pub const ENOKEY: ::c_int = 126; /* Required key not available */
pub const EKEYEXPIRED: ::c_int = 127; /* Key has expired */
pub const EKEYREVOKED: ::c_int = 128; /* Key has been revoked */
pub const EKEYREJECTED: ::c_int = 129; /* Key was rejected by service */
pub const EOWNERDEAD: ::c_int = 130; /* Owner died */
pub const ENOTRECOVERABLE: ::c_int = 131; /* State not recoverable */
// fcntl.h
pub const F_DUPFD: ::c_int = 0;
pub const F_GETFD: ::c_int = 1;
pub const F_SETFD: ::c_int = 2;
pub const F_GETFL: ::c_int = 3;
pub const F_SETFL: ::c_int = 4;
// FIXME: relibc {
pub const F_DUPFD_CLOEXEC: ::c_int = ::F_DUPFD;
// }
pub const FD_CLOEXEC: ::c_int = 0x0100_0000;
pub const O_RDONLY: ::c_int = 0x0001_0000;
pub const O_WRONLY: ::c_int = 0x0002_0000;
pub const O_RDWR: ::c_int = 0x0003_0000;
pub const O_ACCMODE: ::c_int = 0x0003_0000;
pub const O_NONBLOCK: ::c_int = 0x0004_0000;
pub const O_APPEND: ::c_int = 0x0008_0000;
pub const O_SHLOCK: ::c_int = 0x0010_0000;
pub const O_EXLOCK: ::c_int = 0x0020_0000;
pub const O_ASYNC: ::c_int = 0x0040_0000;
pub const O_FSYNC: ::c_int = 0x0080_0000;
pub const O_CLOEXEC: ::c_int = 0x0100_0000;
pub const O_CREAT: ::c_int = 0x0200_0000;
pub const O_TRUNC: ::c_int = 0x0400_0000;
pub const O_EXCL: ::c_int = 0x0800_0000;
pub const O_DIRECTORY: ::c_int = 0x1000_0000;
pub const O_PATH: ::c_int = 0x2000_0000;
pub const O_SYMLINK: ::c_int = 0x4000_0000;
// Negative to allow it to be used as int
// FIXME: Fix negative values missing from includes
pub const O_NOFOLLOW: ::c_int = -0x8000_0000;
// netdb.h
pub const EAI_SYSTEM: ::c_int = -11;
pub const NI_MAXHOST: ::c_int = 1025;
pub const NI_MAXSERV: ::c_int = 32;
pub const NI_NUMERICHOST: ::c_int = 0x0001;
pub const NI_NUMERICSERV: ::c_int = 0x0002;
pub const NI_NOFQDN: ::c_int = 0x0004;
pub const NI_NAMEREQD: ::c_int = 0x0008;
pub const NI_DGRAM: ::c_int = 0x0010;
// netinet/in.h
// FIXME: relibc {
pub const IP_TTL: ::c_int = 2;
pub const IPV6_UNICAST_HOPS: ::c_int = 16;
pub const IPV6_MULTICAST_IF: ::c_int = 17;
pub const IPV6_MULTICAST_HOPS: ::c_int = 18;
pub const IPV6_MULTICAST_LOOP: ::c_int = 19;
pub const IPV6_ADD_MEMBERSHIP: ::c_int = 20;
pub const IPV6_DROP_MEMBERSHIP: ::c_int = 21;
pub const IPV6_V6ONLY: ::c_int = 26;
pub const IP_MULTICAST_IF: ::c_int = 32;
pub const IP_MULTICAST_TTL: ::c_int = 33;
pub const IP_MULTICAST_LOOP: ::c_int = 34;
pub const IP_ADD_MEMBERSHIP: ::c_int = 35;
pub const IP_DROP_MEMBERSHIP: ::c_int = 36;
// }
// netinet/tcp.h
pub const TCP_NODELAY: ::c_int = 1;
// FIXME: relibc {
pub const TCP_KEEPIDLE: ::c_int = 1;
// }
// poll.h
pub const POLLIN: ::c_short = 0x001;
pub const POLLPRI: ::c_short = 0x002;
pub const POLLOUT: ::c_short = 0x004;
pub const POLLERR: ::c_short = 0x008;
pub const POLLHUP: ::c_short = 0x010;
pub const POLLNVAL: ::c_short = 0x020;
// pthread.h
pub const PTHREAD_MUTEX_NORMAL: ::c_int = 0;
pub const PTHREAD_MUTEX_RECURSIVE: ::c_int = 1;
pub const PTHREAD_MUTEX_INITIALIZER: ::pthread_mutex_t = -1isize as *mut _;
pub const PTHREAD_COND_INITIALIZER: ::pthread_cond_t = -1isize as *mut _;
pub const PTHREAD_RWLOCK_INITIALIZER: ::pthread_rwlock_t = -1isize as *mut _;
pub const PTHREAD_STACK_MIN: ::size_t = 4096;
// signal.h
pub const SIG_BLOCK: ::c_int = 0;
pub const SIG_UNBLOCK: ::c_int = 1;
pub const SIG_SETMASK: ::c_int = 2;
pub const SIGHUP: ::c_int = 1;
pub const SIGINT: ::c_int = 2;
pub const SIGQUIT: ::c_int = 3;
pub const SIGILL: ::c_int = 4;
pub const SIGTRAP: ::c_int = 5;
pub const SIGABRT: ::c_int = 6;
pub const SIGBUS: ::c_int = 7;
pub const SIGFPE: ::c_int = 8;
pub const SIGKILL: ::c_int = 9;
pub const SIGUSR1: ::c_int = 10;
pub const SIGSEGV: ::c_int = 11;
pub const SIGUSR2: ::c_int = 12;
pub const SIGPIPE: ::c_int = 13;
pub const SIGALRM: ::c_int = 14;
pub const SIGTERM: ::c_int = 15;
pub const SIGSTKFLT: ::c_int = 16;
pub const SIGCHLD: ::c_int = 17;
pub const SIGCONT: ::c_int = 18;
pub const SIGSTOP: ::c_int = 19;
pub const SIGTSTP: ::c_int = 20;
pub const SIGTTIN: ::c_int = 21;
pub const SIGTTOU: ::c_int = 22;
pub const SIGURG: ::c_int = 23;
pub const SIGXCPU: ::c_int = 24;
pub const SIGXFSZ: ::c_int = 25;
pub const SIGVTALRM: ::c_int = 26;
pub const SIGPROF: ::c_int = 27;
pub const SIGWINCH: ::c_int = 28;
pub const SIGIO: ::c_int = 29;
pub const SIGPWR: ::c_int = 30;
pub const SIGSYS: ::c_int = 31;
pub const NSIG: ::c_int = 32;
pub const SA_NOCLDSTOP: ::c_ulong = 0x00000001;
pub const SA_NOCLDWAIT: ::c_ulong = 0x00000002;
pub const SA_SIGINFO: ::c_ulong = 0x00000004;
pub const SA_RESTORER: ::c_ulong = 0x04000000;
pub const SA_ONSTACK: ::c_ulong = 0x08000000;
pub const SA_RESTART: ::c_ulong = 0x10000000;
pub const SA_NODEFER: ::c_ulong = 0x40000000;
pub const SA_RESETHAND: ::c_ulong = 0x80000000;
// sys/file.h
pub const LOCK_SH: ::c_int = 1;
pub const LOCK_EX: ::c_int = 2;
pub const LOCK_NB: ::c_int = 4;
pub const LOCK_UN: ::c_int = 8;
// sys/epoll.h
pub const EPOLL_CLOEXEC: ::c_int = 0x0100_0000;
pub const EPOLL_CTL_ADD: ::c_int = 1;
pub const EPOLL_CTL_DEL: ::c_int = 2;
pub const EPOLL_CTL_MOD: ::c_int = 3;
pub const EPOLLIN: ::c_int = 1;
pub const EPOLLPRI: ::c_int = 0;
pub const EPOLLOUT: ::c_int = 2;
pub const EPOLLRDNORM: ::c_int = 0;
pub const EPOLLNVAL: ::c_int = 0;
pub const EPOLLRDBAND: ::c_int = 0;
pub const EPOLLWRNORM: ::c_int = 0;
pub const EPOLLWRBAND: ::c_int = 0;
pub const EPOLLMSG: ::c_int = 0;
pub const EPOLLERR: ::c_int = 0;
pub const EPOLLHUP: ::c_int = 0;
pub const EPOLLRDHUP: ::c_int = 0;
pub const EPOLLEXCLUSIVE: ::c_int = 0;
pub const EPOLLWAKEUP: ::c_int = 0;
pub const EPOLLONESHOT: ::c_int = 0;
pub const EPOLLET: ::c_int = 0;
// sys/stat.h
pub const S_IFMT: ::c_int = 0o0_170_000;
pub const S_IFDIR: ::c_int = 0o040_000;
pub const S_IFCHR: ::c_int = 0o020_000;
pub const S_IFBLK: ::c_int = 0o060_000;
pub const S_IFREG: ::c_int = 0o100_000;
pub const S_IFIFO: ::c_int = 0o010_000;
pub const S_IFLNK: ::c_int = 0o120_000;
pub const S_IFSOCK: ::c_int = 0o140_000;
pub const S_IRWXU: ::c_int = 0o0_700;
pub const S_IRUSR: ::c_int = 0o0_400;
pub const S_IWUSR: ::c_int = 0o0_200;
pub const S_IXUSR: ::c_int = 0o0_100;
pub const S_IRWXG: ::c_int = 0o0_070;
pub const S_IRGRP: ::c_int = 0o0_040;
pub const S_IWGRP: ::c_int = 0o0_020;
pub const S_IXGRP: ::c_int = 0o0_010;
pub const S_IRWXO: ::c_int = 0o0_007;
pub const S_IROTH: ::c_int = 0o0_004;
pub const S_IWOTH: ::c_int = 0o0_002;
pub const S_IXOTH: ::c_int = 0o0_001;
// stdlib.h
pub const EXIT_SUCCESS: ::c_int = 0;
pub const EXIT_FAILURE: ::c_int = 1;
// sys/ioctl.h
// FIXME: relibc {
pub const FIONBIO: ::c_ulong = 0x5421;
pub const FIOCLEX: ::c_ulong = 0x5451;
// }
pub const TCGETS: ::c_ulong = 0x5401;
pub const TCSETS: ::c_ulong = 0x5402;
pub const TCFLSH: ::c_ulong = 0x540B;
pub const TIOCGPGRP: ::c_ulong = 0x540F;
pub const TIOCSPGRP: ::c_ulong = 0x5410;
pub const TIOCGWINSZ: ::c_ulong = 0x5413;
pub const TIOCSWINSZ: ::c_ulong = 0x5414;
// sys/mman.h
pub const PROT_NONE: ::c_int = 0x0000;
pub const PROT_READ: ::c_int = 0x0004;
pub const PROT_WRITE: ::c_int = 0x0002;
pub const PROT_EXEC: ::c_int = 0x0001;
pub const MAP_SHARED: ::c_int = 0x0001;
pub const MAP_PRIVATE: ::c_int = 0x0002;
pub const MAP_ANON: ::c_int = 0x0020;
pub const MAP_ANONYMOUS: ::c_int = MAP_ANON;
pub const MAP_FIXED: ::c_int = 0x0010;
pub const MAP_FAILED: *mut ::c_void = !0 as _;
pub const MS_ASYNC: ::c_int = 0x0001;
pub const MS_INVALIDATE: ::c_int = 0x0002;
pub const MS_SYNC: ::c_int = 0x0004;
// sys/select.h
pub const FD_SETSIZE: usize = 1024;
// sys/socket.h
pub const AF_INET: ::c_int = 2;
pub const AF_INET6: ::c_int = 10;
pub const AF_UNIX: ::c_int = 1;
pub const AF_UNSPEC: ::c_int = 0;
pub const PF_INET: ::c_int = 2;
pub const PF_INET6: ::c_int = 10;
pub const PF_UNIX: ::c_int = 1;
pub const PF_UNSPEC: ::c_int = 0;
pub const MSG_CTRUNC: ::c_int = 8;
pub const MSG_DONTROUTE: ::c_int = 4;
pub const MSG_EOR: ::c_int = 128;
pub const MSG_OOB: ::c_int = 1;
pub const MSG_PEEK: ::c_int = 2;
pub const MSG_TRUNC: ::c_int = 32;
pub const MSG_WAITALL: ::c_int = 256;
pub const SHUT_RD: ::c_int = 0;
pub const SHUT_WR: ::c_int = 1;
pub const SHUT_RDWR: ::c_int = 2;
pub const SO_DEBUG: ::c_int = 1;
pub const SO_REUSEADDR: ::c_int = 2;
pub const SO_TYPE: ::c_int = 3;
pub const SO_ERROR: ::c_int = 4;
pub const SO_DONTROUTE: ::c_int = 5;
pub const SO_BROADCAST: ::c_int = 6;
pub const SO_SNDBUF: ::c_int = 7;
pub const SO_RCVBUF: ::c_int = 8;
pub const SO_KEEPALIVE: ::c_int = 9;
pub const SO_OOBINLINE: ::c_int = 10;
pub const SO_NO_CHECK: ::c_int = 11;
pub const SO_PRIORITY: ::c_int = 12;
pub const SO_LINGER: ::c_int = 13;
pub const SO_BSDCOMPAT: ::c_int = 14;
pub const SO_REUSEPORT: ::c_int = 15;
pub const SO_PASSCRED: ::c_int = 16;
pub const SO_PEERCRED: ::c_int = 17;
pub const SO_RCVLOWAT: ::c_int = 18;
pub const SO_SNDLOWAT: ::c_int = 19;
pub const SO_RCVTIMEO: ::c_int = 20;
pub const SO_SNDTIMEO: ::c_int = 21;
pub const SO_ACCEPTCONN: ::c_int = 30;
pub const SO_PEERSEC: ::c_int = 31;
pub const SO_SNDBUFFORCE: ::c_int = 32;
pub const SO_RCVBUFFORCE: ::c_int = 33;
pub const SO_PROTOCOL: ::c_int = 38;
pub const SO_DOMAIN: ::c_int = 39;
pub const SOCK_STREAM: ::c_int = 1;
pub const SOCK_DGRAM: ::c_int = 2;
pub const SOCK_NONBLOCK: ::c_int = 0o4_000;
pub const SOCK_CLOEXEC: ::c_int = 0o2_000_000;
pub const SOCK_SEQPACKET: ::c_int = 5;
pub const SOL_SOCKET: ::c_int = 1;
// sys/termios.h
pub const NCCS: usize = 32;
pub const VINTR: usize = 0;
pub const VQUIT: usize = 1;
pub const VERASE: usize = 2;
pub const VKILL: usize = 3;
pub const VEOF: usize = 4;
pub const VTIME: usize = 5;
pub const VMIN: usize = 6;
pub const VSWTC: usize = 7;
pub const VSTART: usize = 8;
pub const VSTOP: usize = 9;
pub const VSUSP: usize = 10;
pub const VEOL: usize = 11;
pub const VREPRINT: usize = 12;
pub const VDISCARD: usize = 13;
pub const VWERASE: usize = 14;
pub const VLNEXT: usize = 15;
pub const VEOL2: usize = 16;
pub const IGNBRK: ::tcflag_t = 0o000_001;
pub const BRKINT: ::tcflag_t = 0o000_002;
pub const IGNPAR: ::tcflag_t = 0o000_004;
pub const PARMRK: ::tcflag_t = 0o000_010;
pub const INPCK: ::tcflag_t = 0o000_020;
pub const ISTRIP: ::tcflag_t = 0o000_040;
pub const INLCR: ::tcflag_t = 0o000_100;
pub const IGNCR: ::tcflag_t = 0o000_200;
pub const ICRNL: ::tcflag_t = 0o000_400;
pub const IUCLC: ::tcflag_t = 0o001_000;
pub const IXON: ::tcflag_t = 0o002_000;
pub const IXANY: ::tcflag_t = 0o004_000;
pub const IXOFF: ::tcflag_t = 0o010_000;
pub const IMAXBEL: ::tcflag_t = 0o020_000;
pub const IUTF8: ::tcflag_t = 0o040_000;
pub const OPOST: ::tcflag_t = 0o000_001;
pub const OLCUC: ::tcflag_t = 0o000_002;
pub const ONLCR: ::tcflag_t = 0o000_004;
pub const OCRNL: ::tcflag_t = 0o000_010;
pub const ONOCR: ::tcflag_t = 0o000_020;
pub const ONLRET: ::tcflag_t = 0o00_0040;
pub const OFILL: ::tcflag_t = 0o000_100;
pub const OFDEL: ::tcflag_t = 0o000_200;
pub const VTDLY: usize = 0o040_000;
pub const VT0: usize = 0o000_000;
pub const VT1: usize = 0o040_000;
pub const B0: speed_t = 0o000_000;
pub const B50: speed_t = 0o000_001;
pub const B75: speed_t = 0o000_002;
pub const B110: speed_t = 0o000_003;
pub const B134: speed_t = 0o000_004;
pub const B150: speed_t = 0o000_005;
pub const B200: speed_t = 0o000_006;
pub const B300: speed_t = 0o000_007;
pub const B600: speed_t = 0o000_010;
pub const B1200: speed_t = 0o000_011;
pub const B1800: speed_t = 0o000_012;
pub const B2400: speed_t = 0o000_013;
pub const B4800: speed_t = 0o000_014;
pub const B9600: speed_t = 0o000_015;
pub const B19200: speed_t = 0o000_016;
pub const B38400: speed_t = 0o000_017;
pub const B57600: speed_t = 0o010_001;
pub const B115200: speed_t = 0o010_002;
pub const B230400: speed_t = 0o010_003;
pub const B460800: speed_t = 0o010_004;
pub const B500000: speed_t = 0o010_005;
pub const B576000: speed_t = 0o010_006;
pub const B921600: speed_t = 0o010_007;
pub const B1000000: speed_t = 0o010_010;
pub const B1152000: speed_t = 0o010_011;
pub const B1500000: speed_t = 0o010_012;
pub const B2000000: speed_t = 0o010_013;
pub const B2500000: speed_t = 0o010_014;
pub const B3000000: speed_t = 0o010_015;
pub const B3500000: speed_t = 0o010_016;
pub const B4000000: speed_t = 0o010_017;
pub const CSIZE: ::tcflag_t = 0o000_060;
pub const CS5: ::tcflag_t = 0o000_000;
pub const CS6: ::tcflag_t = 0o000_020;
pub const CS7: ::tcflag_t = 0o000_040;
pub const CS8: ::tcflag_t = 0o000_060;
pub const CSTOPB: ::tcflag_t = 0o000_100;
pub const CREAD: ::tcflag_t = 0o000_200;
pub const PARENB: ::tcflag_t = 0o000_400;
pub const PARODD: ::tcflag_t = 0o001_000;
pub const HUPCL: ::tcflag_t = 0o002_000;
pub const CLOCAL: ::tcflag_t = 0o004_000;
pub const ISIG: ::tcflag_t = 0o000_001;
pub const ICANON: ::tcflag_t = 0o000_002;
pub const ECHO: ::tcflag_t = 0o000_010;
pub const ECHOE: ::tcflag_t = 0o000_020;
pub const ECHOK: ::tcflag_t = 0o000_040;
pub const ECHONL: ::tcflag_t = 0o000_100;
pub const NOFLSH: ::tcflag_t = 0o000_200;
pub const TOSTOP: ::tcflag_t = 0o000_400;
pub const IEXTEN: ::tcflag_t = 0o100_000;
pub const TCOOFF: ::c_int = 0;
pub const TCOON: ::c_int = 1;
pub const TCIOFF: ::c_int = 2;
pub const TCION: ::c_int = 3;
pub const TCIFLUSH: ::c_int = 0;
pub const TCOFLUSH: ::c_int = 1;
pub const TCIOFLUSH: ::c_int = 2;
pub const TCSANOW: ::c_int = 0;
pub const TCSADRAIN: ::c_int = 1;
pub const TCSAFLUSH: ::c_int = 2;
// sys/wait.h
pub const WNOHANG: ::c_int = 1;
pub const WUNTRACED: ::c_int = 2;
pub const WSTOPPED: ::c_int = 2;
pub const WEXITED: ::c_int = 4;
pub const WCONTINUED: ::c_int = 8;
pub const WNOWAIT: ::c_int = 0x0100_0000;
pub const __WNOTHREAD: ::c_int = 0x2000_0000;
pub const __WALL: ::c_int = 0x4000_0000;
#[allow(overflowing_literals)]
pub const __WCLONE: ::c_int = 0x8000_0000;
// time.h
pub const CLOCK_REALTIME: ::c_int = 1;
pub const CLOCK_MONOTONIC: ::c_int = 4;
// unistd.h
// POSIX.1 {
pub const _SC_ARG_MAX: ::c_int = 0;
pub const _SC_CHILD_MAX: ::c_int = 1;
pub const _SC_CLK_TCK: ::c_int = 2;
pub const _SC_NGROUPS_MAX: ::c_int = 3;
pub const _SC_OPEN_MAX: ::c_int = 4;
pub const _SC_STREAM_MAX: ::c_int = 5;
pub const _SC_TZNAME_MAX: ::c_int = 6;
// ...
pub const _SC_VERSION: ::c_int = 29;
pub const _SC_PAGESIZE: ::c_int = 30;
pub const _SC_PAGE_SIZE: ::c_int = 30;
// ...
pub const _SC_RE_DUP_MAX: ::c_int = 44;
// ...
pub const _SC_LOGIN_NAME_MAX: ::c_int = 71;
pub const _SC_TTY_NAME_MAX: ::c_int = 72;
// ...
pub const _SC_SYMLOOP_MAX: ::c_int = 173;
// ...
pub const _SC_HOST_NAME_MAX: ::c_int = 180;
// } POSIX.1
pub const F_OK: ::c_int = 0;
pub const R_OK: ::c_int = 4;
pub const W_OK: ::c_int = 2;
pub const X_OK: ::c_int = 1;
pub const SEEK_SET: ::c_int = 0;
pub const SEEK_CUR: ::c_int = 1;
pub const SEEK_END: ::c_int = 2;
pub const STDIN_FILENO: ::c_int = 0;
pub const STDOUT_FILENO: ::c_int = 1;
pub const STDERR_FILENO: ::c_int = 2;
pub const _PC_LINK_MAX: ::c_int = 0;
pub const _PC_MAX_CANON: ::c_int = 1;
pub const _PC_MAX_INPUT: ::c_int = 2;
pub const _PC_NAME_MAX: ::c_int = 3;
pub const _PC_PATH_MAX: ::c_int = 4;
pub const _PC_PIPE_BUF: ::c_int = 5;
pub const _PC_CHOWN_RESTRICTED: ::c_int = 6;
pub const _PC_NO_TRUNC: ::c_int = 7;
pub const _PC_VDISABLE: ::c_int = 8;
pub const _PC_SYNC_IO: ::c_int = 9;
pub const _PC_ASYNC_IO: ::c_int = 10;
pub const _PC_PRIO_IO: ::c_int = 11;
pub const _PC_SOCK_MAXBUF: ::c_int = 12;
pub const _PC_FILESIZEBITS: ::c_int = 13;
pub const _PC_REC_INCR_XFER_SIZE: ::c_int = 14;
pub const _PC_REC_MAX_XFER_SIZE: ::c_int = 15;
pub const _PC_REC_MIN_XFER_SIZE: ::c_int = 16;
pub const _PC_REC_XFER_ALIGN: ::c_int = 17;
pub const _PC_ALLOC_SIZE_MIN: ::c_int = 18;
pub const _PC_SYMLINK_MAX: ::c_int = 19;
pub const _PC_2_SYMLINKS: ::c_int = 20;
pub const PRIO_PROCESS: ::c_int = 0;
pub const PRIO_PGRP: ::c_int = 1;
pub const PRIO_USER: ::c_int = 2;
// wait.h
f! {
pub fn FD_CLR(fd: ::c_int, set: *mut fd_set) -> () {
let fd = fd as usize;
let size = ::mem::size_of_val(&(*set).fds_bits[0]) * 8;
(*set).fds_bits[fd / size] &= !(1 << (fd % size));
return
}
pub fn FD_ISSET(fd: ::c_int, set: *mut fd_set) -> bool {
let fd = fd as usize;
let size = ::mem::size_of_val(&(*set).fds_bits[0]) * 8;
return ((*set).fds_bits[fd / size] & (1 << (fd % size))) != 0
}
pub fn FD_SET(fd: ::c_int, set: *mut fd_set) -> () {
let fd = fd as usize;
let size = ::mem::size_of_val(&(*set).fds_bits[0]) * 8;
(*set).fds_bits[fd / size] |= 1 << (fd % size);
return
}
pub fn FD_ZERO(set: *mut fd_set) -> () {
for slot in (*set).fds_bits.iter_mut() {
*slot = 0;
}
}
}
safe_f! {
pub {const} fn WIFSTOPPED(status: ::c_int) -> bool {
(status & 0xff) == 0x7f
}
pub {const} fn WSTOPSIG(status: ::c_int) -> ::c_int {
(status >> 8) & 0xff
}
pub {const} fn WIFCONTINUED(status: ::c_int) -> bool {
status == 0xffff
}
pub {const} fn WIFSIGNALED(status: ::c_int) -> bool {
((status & 0x7f) + 1) as i8 >= 2
}
pub {const} fn WTERMSIG(status: ::c_int) -> ::c_int {
status & 0x7f
}
pub {const} fn WIFEXITED(status: ::c_int) -> bool {
(status & 0x7f) == 0
}
pub {const} fn WEXITSTATUS(status: ::c_int) -> ::c_int {
(status >> 8) & 0xff
}
pub {const} fn WCOREDUMP(status: ::c_int) -> bool {
(status & 0x80) != 0
}
}
extern "C" {
// errno.h
pub fn __errno_location() -> *mut ::c_int;
pub fn strerror_r(
errnum: ::c_int,
buf: *mut c_char,
buflen: ::size_t,
) -> ::c_int;
// unistd.h
pub fn pipe2(fds: *mut ::c_int, flags: ::c_int) -> ::c_int;
// malloc.h
pub fn memalign(align: ::size_t, size: ::size_t) -> *mut ::c_void;
// pthread.h
pub fn pthread_atfork(
prepare: ::Option<unsafe extern "C" fn()>,
parent: ::Option<unsafe extern "C" fn()>,
child: ::Option<unsafe extern "C" fn()>,
) -> ::c_int;
pub fn pthread_create(
tid: *mut ::pthread_t,
attr: *const ::pthread_attr_t,
start: extern "C" fn(*mut ::c_void) -> *mut ::c_void,
arg: *mut ::c_void,
) -> ::c_int;
pub fn pthread_condattr_setclock(
attr: *mut pthread_condattr_t,
clock_id: ::clockid_t,
) -> ::c_int;
// pwd.h
pub fn getpwuid_r(
uid: ::uid_t,
pwd: *mut passwd,
buf: *mut ::c_char,
buflen: ::size_t,
result: *mut *mut passwd,
) -> ::c_int;
// signal.h
pub fn pthread_sigmask(
how: ::c_int,
set: *const ::sigset_t,
oldset: *mut ::sigset_t,
) -> ::c_int;
// sys/epoll.h
pub fn epoll_create(size: ::c_int) -> ::c_int;
pub fn epoll_create1(flags: ::c_int) -> ::c_int;
pub fn epoll_wait(
epfd: ::c_int,
events: *mut ::epoll_event,
maxevents: ::c_int,
timeout: ::c_int,
) -> ::c_int;
pub fn epoll_ctl(
epfd: ::c_int,
op: ::c_int,
fd: ::c_int,
event: *mut ::epoll_event,
) -> ::c_int;
// sys/ioctl.h
pub fn ioctl(fd: ::c_int, request: ::c_ulong, ...) -> ::c_int;
// sys/mman.h
pub fn msync(
addr: *mut ::c_void,
len: ::size_t,
flags: ::c_int,
) -> ::c_int;
pub fn mprotect(
addr: *mut ::c_void,
len: ::size_t,
prot: ::c_int,
) -> ::c_int;
pub fn shm_open(
name: *const c_char,
oflag: ::c_int,
mode: mode_t,
) -> ::c_int;
pub fn shm_unlink(name: *const ::c_char) -> ::c_int;
// sys/resource.h
pub fn getrlimit(resource: ::c_int, rlim: *mut ::rlimit) -> ::c_int;
pub fn setrlimit(resource: ::c_int, rlim: *const ::rlimit) -> ::c_int;
// sys/socket.h
pub fn bind(
socket: ::c_int,
address: *const ::sockaddr,
address_len: ::socklen_t,
) -> ::c_int;
pub fn recvfrom(
socket: ::c_int,
buf: *mut ::c_void,
len: ::size_t,
flags: ::c_int,
addr: *mut ::sockaddr,
addrlen: *mut ::socklen_t,
) -> ::ssize_t;
// sys/stat.h
pub fn futimens(fd: ::c_int, times: *const ::timespec) -> ::c_int;
// sys/uio.h
pub fn readv(
fd: ::c_int,
iov: *const ::iovec,
iovcnt: ::c_int,
) -> ::ssize_t;
pub fn writev(
fd: ::c_int,
iov: *const ::iovec,
iovcnt: ::c_int,
) -> ::ssize_t;
// sys/utsname.h
pub fn uname(utsname: *mut utsname) -> ::c_int;
// time.h
pub fn gettimeofday(tp: *mut ::timeval, tz: *mut ::timezone) -> ::c_int;
pub fn clock_gettime(clk_id: ::clockid_t, tp: *mut ::timespec) -> ::c_int;
}
cfg_if! {
if #[cfg(feature = "extra_traits")] {
impl PartialEq for dirent {
fn eq(&self, other: &dirent) -> bool {
self.d_ino == other.d_ino
&& self.d_off == other.d_off
&& self.d_reclen == other.d_reclen
&& self.d_type == other.d_type
&& self
.d_name
.iter()
.zip(other.d_name.iter())
.all(|(a,b)| a == b)
}
}
impl Eq for dirent {}
impl ::fmt::Debug for dirent {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("dirent")
.field("d_ino", &self.d_ino)
.field("d_off", &self.d_off)
.field("d_reclen", &self.d_reclen)
.field("d_type", &self.d_type)
// FIXME: .field("d_name", &self.d_name)
.finish()
}
}
impl ::hash::Hash for dirent {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.d_ino.hash(state);
self.d_off.hash(state);
self.d_reclen.hash(state);
self.d_type.hash(state);
self.d_name.hash(state);
}
}
impl PartialEq for sockaddr_un {
fn eq(&self, other: &sockaddr_un) -> bool {
self.sun_family == other.sun_family
&& self
.sun_path
.iter()
.zip(other.sun_path.iter())
.all(|(a,b)| a == b)
}
}
impl Eq for sockaddr_un {}
impl ::fmt::Debug for sockaddr_un {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("sockaddr_un")
.field("sun_family", &self.sun_family)
// FIXME: .field("sun_path", &self.sun_path)
.finish()
}
}
impl ::hash::Hash for sockaddr_un {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.sun_family.hash(state);
self.sun_path.hash(state);
}
}
impl PartialEq for sockaddr_storage {
fn eq(&self, other: &sockaddr_storage) -> bool {
self.ss_family == other.ss_family
&& self.__ss_align == self.__ss_align
&& self
.__ss_padding
.iter()
.zip(other.__ss_padding.iter())
.all(|(a,b)| a == b)
}
}
impl Eq for sockaddr_storage {}
impl ::fmt::Debug for sockaddr_storage {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("sockaddr_storage")
.field("ss_family", &self.ss_family)
.field("__ss_align", &self.__ss_align)
// FIXME: .field("__ss_padding", &self.__ss_padding)
.finish()
}
}
impl ::hash::Hash for sockaddr_storage {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.ss_family.hash(state);
self.__ss_padding.hash(state);
self.__ss_align.hash(state);
}
}
impl PartialEq for utsname {
fn eq(&self, other: &utsname) -> bool {
self.sysname
.iter()
.zip(other.sysname.iter())
.all(|(a, b)| a == b)
&& self
.nodename
.iter()
.zip(other.nodename.iter())
.all(|(a, b)| a == b)
&& self
.release
.iter()
.zip(other.release.iter())
.all(|(a, b)| a == b)
&& self
.version
.iter()
.zip(other.version.iter())
.all(|(a, b)| a == b)
&& self
.machine
.iter()
.zip(other.machine.iter())
.all(|(a, b)| a == b)
&& self
.domainname
.iter()
.zip(other.domainname.iter())
.all(|(a, b)| a == b)
}
}
impl Eq for utsname {}
impl ::fmt::Debug for utsname {
fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
f.debug_struct("utsname")
// FIXME: .field("sysname", &self.sysname)
// FIXME: .field("nodename", &self.nodename)
// FIXME: .field("release", &self.release)
// FIXME: .field("version", &self.version)
// FIXME: .field("machine", &self.machine)
// FIXME: .field("domainname", &self.domainname)
.finish()
}
}
impl ::hash::Hash for utsname {
fn hash<H: ::hash::Hasher>(&self, state: &mut H) {
self.sysname.hash(state);
self.nodename.hash(state);
self.release.hash(state);
self.version.hash(state);
self.machine.hash(state);
self.domainname.hash(state);
}
}
}
}
|
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(unused_variables)]
#![allow(unused_must_use)]
include!("bindings.rs");
use std::os::raw::c_char;
use std::io::Write;
#[derive(PartialEq)]
enum PrintState {
OUT,
IN,
DISPLAY
}
fn print_ptr_rec<W: Write>(port: &mut W, p: ptr, state: PrintState) {
let x = p as u32;
if (x & fx_mask) == fx_tag {
write!(port, "{}", (x as i32) >> fx_shift);
} else if x == bool_f {
write!(port, "#f");
} else if x == bool_t {
write!(port, "#t");
} else if x == list_nil {
write!(port, "()");
} else if x == eof_obj {
write!(port, "#!eof");
} else if (x & char_mask) == char_tag {
let c = std::char::from_u32(x >> char_shift).
expect("a char");
if state == PrintState::DISPLAY {
write!(port, "{}", c);
} else {
if c == '\t' { write!(port, "#\\tab"); }
else if c == '\n' { write!(port, "#\\newline"); }
else if c == '\r' { write!(port, "#\\return"); }
else if c == ' ' { write!(port, "#\\space"); }
else { write!(port, "#\\{}", c); }
}
} else if (x & obj_mask) == pair_tag {
if state == PrintState::OUT { write!(port, "("); }
let cell = unsafe { *((x-pair_tag) as *const cell) };
let car = cell.car;
print_ptr_rec(port, car, PrintState::OUT);
let cdr = cell.cdr;
if cdr != list_nil {
if (cdr & obj_mask) != pair_tag {
write!(port, " . ");
print_ptr_rec(port, cdr, PrintState::OUT);
} else {
write!(port, " ");
print_ptr_rec(port, cdr, PrintState::IN);
}
}
if state == PrintState::OUT { write!(port, ")"); }
} else if (x & obj_mask) == vector_tag {
write!(port, "#(");
unsafe {
let p = &*((x-vector_tag) as *const vector);
let n = (p.length as i32) >> fx_shift;
for i in 0..n {
if i > 0 { write!(port, " "); }
print_ptr_rec(port,
*((p.buf.as_ptr()).offset(i as isize)),
PrintState::OUT)
}}
write!(port, ")");
} else if (x & obj_mask) == string_tag {
if state == PrintState::OUT { write!(port, "\""); }
unsafe {
let p = &*((x-string_tag) as *const string);
let n = (p.length as i32) >> fx_shift;
for i in 0..n {
let c = std::char::from_u32(*((p.buf.as_ptr()).offset(i as isize)) as u32)
.expect("char");
if c == '"' { write!(port, "\\\""); }
else if c == '\\' { write!(port, "\\\\"); }
else { write!(port, "{}", c); }
}}
if state == PrintState::OUT { write!(port, "\""); }
} else if (x & obj_mask) == symbol_tag {
print_ptr_rec(port, (x - symbol_tag) | string_tag, PrintState::IN);
} else if (x & obj_mask) == closure_tag {
write!(port, "#<procedure>");
} else {
write!(port, "#<unknown 0x{:x}>", x);
}
}
fn print_ptr(x: ptr) {
print_ptr_rec(&mut std::io::stdout(), x, PrintState::OUT);
println!("");
}
#[no_mangle]
pub extern "C" fn ik_log(msg: ptr) {
}
#[no_mangle]
pub extern "C" fn ik_error(x: ptr) {
std::process::exit(0);
}
#[no_mangle]
pub extern "C" fn s_write(fd: ptr, str: ptr, len: ptr) -> ptr {
0
}
#[no_mangle]
pub extern "C" fn s_open_write(fname: ptr) -> ptr {
0
}
#[no_mangle]
pub extern "C" fn s_fflush(fd: ptr) -> ptr {
0
}
#[no_mangle]
pub extern "C" fn scheme_write(fd: ptr, x: ptr, opt: ptr) -> ptr {
0
}
#[no_mangle]
pub extern "C" fn s_open_read(fname: ptr) -> ptr {
0
}
#[no_mangle]
pub extern "C" fn s_read_char(fd: ptr) -> ptr {
0
}
#[no_mangle]
pub extern "C" fn s_close(fd: ptr) -> ptr {
0
}
#[no_mangle]
pub extern "C" fn heap_alloc(mem: *mut memory, stack: *mut c_char, size: usize) -> *mut c_char {
let heap_next = unsafe { (*mem).heap_next };
let heap_new = unsafe { heap_next.offset(size as isize) };
if heap_new >= unsafe { (*mem).heap_top } {
eprintln!("Exception: overflow");
std::process::exit(0);
}
unsafe {(*mem).heap_next = heap_new};
return heap_next;
}
fn allocate_protected_space(size: usize) -> *mut c_char {
let mut v = Vec::with_capacity(size);
let ptr = v.as_mut_ptr();
std::mem::forget(v);
ptr
}
fn deallocate_protected_space(p: *mut c_char, size: usize) {
unsafe { std::mem::drop(Vec::from_raw_parts(p, 0, size)) };
}
fn main() {
let stack_size = 16 * 4096;
let heap_size = 4 * 16 * 4096;
let global_size = 16 * 4096;
let scratch_size = 16 * 4096;
let stack_top = allocate_protected_space(stack_size);
let stack_base = unsafe { stack_top.offset(stack_size as isize) };
let heap = allocate_protected_space(heap_size);
let global = allocate_protected_space(global_size);
let scratch = allocate_protected_space(scratch_size);
let uninit = 0 as (*mut std::os::raw::c_void);
let mut ctxt = context {
eax : uninit,
ebx : uninit,
ecx : uninit,
edx : uninit,
esi : uninit,
edi : uninit,
ebp : uninit,
esp : uninit,
};
let heap_top = unsafe { heap.offset((heap_size as isize)/2) };
let mut mem = memory {
heap_next : heap,
global_next : global,
heap_base : heap,
heap_top : heap_top,
heap_base_alt : heap_top,
heap_top_alt : unsafe { heap.offset(heap_size as isize) },
global_base : global,
stack_base : stack_base,
scratch_base : scratch,
edi : 0
};
print_ptr(unsafe {
scheme_entry(&mut ctxt, stack_base, &mut mem)
});
deallocate_protected_space(stack_top, stack_size);
deallocate_protected_space(heap, stack_size);
}
implement ik_log
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(unused_variables)]
#![allow(unused_must_use)]
include!("bindings.rs");
use std::os::raw::c_char;
use std::io::Write;
#[derive(PartialEq)]
enum PrintState {
OUT,
IN,
DISPLAY
}
fn print_ptr_rec<W: Write>(port: &mut W, p: ptr, state: PrintState) {
let x = p as u32;
if (x & fx_mask) == fx_tag {
write!(port, "{}", (x as i32) >> fx_shift);
} else if x == bool_f {
write!(port, "#f");
} else if x == bool_t {
write!(port, "#t");
} else if x == list_nil {
write!(port, "()");
} else if x == eof_obj {
write!(port, "#!eof");
} else if (x & char_mask) == char_tag {
let c = std::char::from_u32(x >> char_shift).
expect("a char");
if state == PrintState::DISPLAY {
write!(port, "{}", c);
} else {
if c == '\t' { write!(port, "#\\tab"); }
else if c == '\n' { write!(port, "#\\newline"); }
else if c == '\r' { write!(port, "#\\return"); }
else if c == ' ' { write!(port, "#\\space"); }
else { write!(port, "#\\{}", c); }
}
} else if (x & obj_mask) == pair_tag {
if state == PrintState::OUT { write!(port, "("); }
let cell = unsafe { *((x-pair_tag) as *const cell) };
let car = cell.car;
print_ptr_rec(port, car, PrintState::OUT);
let cdr = cell.cdr;
if cdr != list_nil {
if (cdr & obj_mask) != pair_tag {
write!(port, " . ");
print_ptr_rec(port, cdr, PrintState::OUT);
} else {
write!(port, " ");
print_ptr_rec(port, cdr, PrintState::IN);
}
}
if state == PrintState::OUT { write!(port, ")"); }
} else if (x & obj_mask) == vector_tag {
write!(port, "#(");
unsafe {
let p = &*((x-vector_tag) as *const vector);
let n = (p.length as i32) >> fx_shift;
for i in 0..n {
if i > 0 { write!(port, " "); }
print_ptr_rec(port,
*((p.buf.as_ptr()).offset(i as isize)),
PrintState::OUT)
}}
write!(port, ")");
} else if (x & obj_mask) == string_tag {
if state == PrintState::OUT { write!(port, "\""); }
unsafe {
let p = &*((x-string_tag) as *const string);
let n = (p.length as i32) >> fx_shift;
for i in 0..n {
let c = std::char::from_u32(*((p.buf.as_ptr()).offset(i as isize)) as u32)
.expect("char");
if c == '"' { write!(port, "\\\""); }
else if c == '\\' { write!(port, "\\\\"); }
else { write!(port, "{}", c); }
}}
if state == PrintState::OUT { write!(port, "\""); }
} else if (x & obj_mask) == symbol_tag {
print_ptr_rec(port, (x - symbol_tag) | string_tag, PrintState::IN);
} else if (x & obj_mask) == closure_tag {
write!(port, "#<procedure>");
} else {
write!(port, "#<unknown 0x{:x}>", x);
}
}
fn print_ptr(x: ptr) {
print_ptr_rec(&mut std::io::stdout(), x, PrintState::OUT);
println!("");
}
#[no_mangle]
pub extern "C" fn ik_log(msg: ptr) -> ptr {
eprint!("log: ");
print_ptr_rec(&mut std::io::stderr(), msg, PrintState::IN);
eprintln!("");
0
}
#[no_mangle]
pub extern "C" fn ik_error(x: ptr) {
std::process::exit(0);
}
#[no_mangle]
pub extern "C" fn s_write(fd: ptr, str: ptr, len: ptr) -> ptr {
0
}
#[no_mangle]
pub extern "C" fn s_open_write(fname: ptr) -> ptr {
0
}
#[no_mangle]
pub extern "C" fn s_fflush(fd: ptr) -> ptr {
0
}
#[no_mangle]
pub extern "C" fn scheme_write(fd: ptr, x: ptr, opt: ptr) -> ptr {
0
}
#[no_mangle]
pub extern "C" fn s_open_read(fname: ptr) -> ptr {
0
}
#[no_mangle]
pub extern "C" fn s_read_char(fd: ptr) -> ptr {
0
}
#[no_mangle]
pub extern "C" fn s_close(fd: ptr) -> ptr {
0
}
#[no_mangle]
pub extern "C" fn heap_alloc(mem: *mut memory, stack: *mut c_char, size: usize) -> *mut c_char {
let heap_next = unsafe { (*mem).heap_next };
let heap_new = unsafe { heap_next.offset(size as isize) };
if heap_new >= unsafe { (*mem).heap_top } {
eprintln!("Exception: overflow");
std::process::exit(0);
}
unsafe {(*mem).heap_next = heap_new};
return heap_next;
}
fn allocate_protected_space(size: usize) -> *mut c_char {
let mut v = Vec::with_capacity(size);
let ptr = v.as_mut_ptr();
std::mem::forget(v);
ptr
}
fn deallocate_protected_space(p: *mut c_char, size: usize) {
unsafe { std::mem::drop(Vec::from_raw_parts(p, 0, size)) };
}
fn main() {
let stack_size = 16 * 4096;
let heap_size = 4 * 16 * 4096;
let global_size = 16 * 4096;
let scratch_size = 16 * 4096;
let stack_top = allocate_protected_space(stack_size);
let stack_base = unsafe { stack_top.offset(stack_size as isize) };
let heap = allocate_protected_space(heap_size);
let global = allocate_protected_space(global_size);
let scratch = allocate_protected_space(scratch_size);
let uninit = 0 as (*mut std::os::raw::c_void);
let mut ctxt = context {
eax : uninit,
ebx : uninit,
ecx : uninit,
edx : uninit,
esi : uninit,
edi : uninit,
ebp : uninit,
esp : uninit,
};
let heap_top = unsafe { heap.offset((heap_size as isize)/2) };
let mut mem = memory {
heap_next : heap,
global_next : global,
heap_base : heap,
heap_top : heap_top,
heap_base_alt : heap_top,
heap_top_alt : unsafe { heap.offset(heap_size as isize) },
global_base : global,
stack_base : stack_base,
scratch_base : scratch,
edi : 0
};
print_ptr(unsafe {
scheme_entry(&mut ctxt, stack_base, &mut mem)
});
deallocate_protected_space(stack_top, stack_size);
deallocate_protected_space(heap, stack_size);
}
|
#![allow(non_snake_case)]
use codegen;
use codegen::BitSize;
use cpu::cpu::{
FLAGS_ALL, FLAGS_DEFAULT, FLAGS_MASK, FLAG_ADJUST, FLAG_CARRY, FLAG_DIRECTION, FLAG_INTERRUPT,
FLAG_OVERFLOW, FLAG_SUB, FLAG_ZERO, OPSIZE_8, OPSIZE_16, OPSIZE_32,
};
use cpu::global_pointers;
use jit::JitContext;
use modrm::{jit_add_seg_offset, ModrmByte};
use prefix::SEG_PREFIX_ZERO;
use prefix::{PREFIX_66, PREFIX_67, PREFIX_F2, PREFIX_F3};
use regs;
use regs::{AX, BP, BX, CX, DI, DX, SI, SP};
use regs::{CS, DS, ES, FS, GS, SS};
use regs::{EAX, EBP, EBX, ECX, EDI, EDX, ESI, ESP};
use wasmgen::wasm_builder::{WasmBuilder, WasmLocal};
enum LocalOrImmediate<'a> {
WasmLocal(&'a WasmLocal),
Immediate(i32),
}
impl<'a> LocalOrImmediate<'a> {
pub fn gen_get(&self, builder: &mut WasmBuilder) {
match self {
LocalOrImmediate::WasmLocal(l) => builder.get_local(l),
LocalOrImmediate::Immediate(i) => builder.const_i32(*i),
}
}
}
pub fn jit_instruction(ctx: &mut JitContext, instr_flags: &mut u32) {
ctx.cpu.prefixes = 0;
ctx.start_of_current_instruction = ctx.cpu.eip;
::gen::jit::jit(
ctx.cpu.read_imm8() as u32 | (ctx.cpu.osize_32() as u32) << 8,
ctx,
instr_flags,
);
}
pub fn jit_handle_prefix(ctx: &mut JitContext, instr_flags: &mut u32) {
::gen::jit::jit(
ctx.cpu.read_imm8() as u32 | (ctx.cpu.osize_32() as u32) << 8,
ctx,
instr_flags,
);
}
pub fn jit_handle_segment_prefix(segment: u32, ctx: &mut JitContext, instr_flags: &mut u32) {
dbg_assert!(segment <= 5);
ctx.cpu.prefixes |= segment + 1;
jit_handle_prefix(ctx, instr_flags)
}
pub fn instr16_0F_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
::gen::jit0f::jit(ctx.cpu.read_imm8() as u32, ctx, instr_flags)
}
pub fn instr32_0F_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
::gen::jit0f::jit(ctx.cpu.read_imm8() as u32 | 0x100, ctx, instr_flags)
}
pub fn instr_26_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
jit_handle_segment_prefix(ES, ctx, instr_flags)
}
pub fn instr_2E_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
jit_handle_segment_prefix(CS, ctx, instr_flags)
}
pub fn instr_36_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
jit_handle_segment_prefix(SS, ctx, instr_flags)
}
pub fn instr_3E_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
jit_handle_segment_prefix(DS, ctx, instr_flags)
}
pub fn instr_64_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
jit_handle_segment_prefix(FS, ctx, instr_flags)
}
pub fn instr_65_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
jit_handle_segment_prefix(GS, ctx, instr_flags)
}
pub fn instr_66_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
ctx.cpu.prefixes |= PREFIX_66;
jit_handle_prefix(ctx, instr_flags)
}
pub fn instr_67_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
ctx.cpu.prefixes |= PREFIX_67;
jit_handle_prefix(ctx, instr_flags)
}
pub fn instr_F0_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
// lock: Ignore
jit_handle_prefix(ctx, instr_flags)
}
pub fn instr_F2_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
ctx.cpu.prefixes |= PREFIX_F2;
jit_handle_prefix(ctx, instr_flags)
}
pub fn instr_F3_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
ctx.cpu.prefixes |= PREFIX_F3;
jit_handle_prefix(ctx, instr_flags)
}
fn sse_read128_xmm_mem(ctx: &mut JitContext, name: &str, modrm_byte: ModrmByte, r: u32) {
let dest = global_pointers::sse_scratch_register as u32;
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn2(name);
}
fn sse_read128_xmm_xmm(ctx: &mut JitContext, name: &str, r1: u32, r2: u32) {
// Make a copy to avoid aliasing problems: Called function expects a reg128, which must not
// alias with memory
codegen::gen_read_reg_xmm128_into_scratch(ctx, r1);
let dest = global_pointers::sse_scratch_register;
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2(name);
}
fn sse_mov_xmm_xmm(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r2) as i32);
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r1) as i32);
ctx.builder.load_aligned_i64(0);
ctx.builder.store_aligned_i64(0);
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r2) as i32 + 8);
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r1) as i32 + 8);
ctx.builder.load_aligned_i64(0);
ctx.builder.store_aligned_i64(0);
}
fn mmx_read64_mm_mem32(ctx: &mut JitContext, name: &str, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn2(name)
}
fn mmx_read64_mm_mm32(ctx: &mut JitContext, name: &str, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_mmx_offset(r1) as i32);
ctx.builder.load_aligned_i32(0);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2(name);
}
fn mmx_read64_mm_mem(ctx: &mut JitContext, name: &str, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn2_i64_i32(name)
}
fn mmx_read64_mm_mm(ctx: &mut JitContext, name: &str, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_mmx_offset(r1) as i32);
ctx.builder.load_aligned_i64(0);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2_i64_i32(name);
}
fn push16_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
let value_local = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &value_local);
ctx.builder.free_local(value_local);
}
fn push32_reg_jit(ctx: &mut JitContext, r: u32) {
let reg = ctx.register_locals[r as usize].unsafe_clone();
codegen::gen_push32(ctx, ®);
}
fn push16_imm_jit(ctx: &mut JitContext, imm: u32) {
ctx.builder.const_i32(imm as i32);
let value_local = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &value_local);
ctx.builder.free_local(value_local);
}
fn push32_imm_jit(ctx: &mut JitContext, imm: u32) {
ctx.builder.const_i32(imm as i32);
let value_local = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &value_local);
ctx.builder.free_local(value_local);
}
fn push16_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
let value_local = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &value_local);
ctx.builder.free_local(value_local);
}
fn push32_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let value_local = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &value_local);
ctx.builder.free_local(value_local);
}
fn pop16_reg_jit(ctx: &mut JitContext, reg: u32) {
codegen::gen_pop16(ctx);
codegen::gen_set_reg16(ctx, reg);
}
fn pop32_reg_jit(ctx: &mut JitContext, reg: u32) {
codegen::gen_pop32s(ctx);
codegen::gen_set_reg32(ctx, reg);
}
fn group_arith_al_imm8(ctx: &mut JitContext, op: &str, imm8: u32) {
codegen::gen_get_reg8(ctx, regs::AL);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2_ret(op);
codegen::gen_set_reg8(ctx, regs::AL);
}
fn group_arith_ax_imm16(ctx: &mut JitContext, op: &str, imm16: u32) {
codegen::gen_get_reg16(ctx, regs::AX);
ctx.builder.const_i32(imm16 as i32);
ctx.builder.call_fn2_ret(op);
codegen::gen_set_reg16(ctx, regs::AX);
}
fn group_arith_eax_imm32(
ctx: &mut JitContext,
op: &dyn Fn(&mut WasmBuilder, &WasmLocal, &LocalOrImmediate),
imm32: u32,
) {
op(
ctx.builder,
&ctx.register_locals[regs::EAX as usize],
&LocalOrImmediate::Immediate(imm32 as i32),
);
}
macro_rules! define_instruction_read8(
($fn:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
let dest_operand = ctx.builder.set_new_local();
let source_operand = codegen::gen_get_reg8_or_alias_to_reg32(ctx, r);
$fn(ctx.builder, &dest_operand, &LocalOrImmediate::WasmLocal(&source_operand));
ctx.builder.free_local(dest_operand);
codegen::gen_free_reg8_or_alias(ctx, r, source_operand);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
let dest_operand = codegen::gen_get_reg8_or_alias_to_reg32(ctx, r1);
let source_operand = codegen::gen_get_reg8_or_alias_to_reg32(ctx, r2);
$fn(ctx.builder, &dest_operand, &LocalOrImmediate::WasmLocal(&source_operand));
codegen::gen_free_reg8_or_alias(ctx, r1, dest_operand);
codegen::gen_free_reg8_or_alias(ctx, r2, source_operand);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
let dest_operand = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm);
$fn(ctx.builder, &dest_operand, &LocalOrImmediate::Immediate(imm as i32));
ctx.builder.free_local(dest_operand);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, imm: u32) {
let dest_operand = codegen::gen_get_reg8_or_alias_to_reg32(ctx, r1);
$fn(ctx.builder, &dest_operand, &LocalOrImmediate::Immediate(imm as i32));
codegen::gen_free_reg8_or_alias(ctx, r1, dest_operand);
}
);
);
macro_rules! define_instruction_read16(
($fn:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
let dest_operand = ctx.builder.set_new_local();
$fn(
ctx.builder,
&dest_operand,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize]),
);
ctx.builder.free_local(dest_operand);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
$fn(
ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize])
);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
let dest_operand = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm);
$fn(
ctx.builder,
&dest_operand,
&LocalOrImmediate::Immediate(imm as i32),
);
ctx.builder.free_local(dest_operand);
}
pub fn $name_reg(ctx: &mut JitContext, r: u32, imm: u32) {
$fn(
ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm as i32),
);
}
);
);
macro_rules! define_instruction_read32(
($fn:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let dest_operand = ctx.builder.set_new_local();
$fn(
ctx.builder,
&dest_operand,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize]),
);
ctx.builder.free_local(dest_operand);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
$fn(
ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize])
);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let dest_operand = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm);
$fn(
ctx.builder,
&dest_operand,
&LocalOrImmediate::Immediate(imm as i32),
);
ctx.builder.free_local(dest_operand);
}
pub fn $name_reg(ctx: &mut JitContext, r: u32, imm: u32) {
$fn(
ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm as i32),
);
}
);
);
macro_rules! define_instruction_write_reg8(
($fn:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_get_reg8(ctx, r);
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg8(ctx, r);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg8(ctx, r2);
codegen::gen_get_reg8(ctx, r1);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg8(ctx, r2);
}
)
);
macro_rules! define_instruction_write_reg16(
($fn:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_get_reg16(ctx, r);
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg16(ctx, r);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r2);
codegen::gen_get_reg16(ctx, r1);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg16(ctx, r2);
}
)
);
macro_rules! define_instruction_write_reg32(
($fn:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let source_operand = ctx.builder.set_new_local();
$fn(
ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::WasmLocal(&source_operand),
);
ctx.builder.free_local(source_operand);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
$fn(
ctx.builder,
&ctx.register_locals[r2 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r1 as usize]),
);
}
);
);
macro_rules! mask_imm(
($imm:expr, imm8_5bits) => { $imm & 31 };
($imm:expr, imm8) => { $imm };
($imm:expr, imm8s) => { $imm };
($imm:expr, imm16) => { $imm };
($imm:expr, imm32) => { $imm };
);
macro_rules! define_instruction_read_write_mem8(
($fn:expr, $name_mem:ident, $name_reg:ident, reg) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::BYTE, &address_local, &|ref mut ctx| {
codegen::gen_get_reg8(ctx, r);
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg8(ctx, r1);
codegen::gen_get_reg8(ctx, r2);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg8(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, constant_one) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::BYTE, &address_local, &|ref mut ctx| {
ctx.builder.const_i32(1);
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
codegen::gen_get_reg8(ctx, r1);
ctx.builder.const_i32(1);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg8(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, cl) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::BYTE, &address_local, &|ref mut ctx| {
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
codegen::gen_get_reg8(ctx, r1);
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg8(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, none) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::BYTE, &address_local, &|ref mut ctx| {
ctx.builder.call_fn1_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
codegen::gen_get_reg8(ctx, r1);
ctx.builder.call_fn1_ret($fn);
codegen::gen_set_reg8(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm) as i32;
codegen::gen_safe_read_write(ctx, BitSize::BYTE, &address_local, &|ref mut ctx| {
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, imm: u32) {
let imm = mask_imm!(imm, $imm);
codegen::gen_get_reg8(ctx, r1);
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg8(ctx, r1);
}
);
);
macro_rules! define_instruction_read_write_mem16(
($fn:expr, $name_mem:ident, $name_reg:ident, reg) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg16(ctx, r);
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
codegen::gen_get_reg16(ctx, r2);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg16(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, constant_one) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
ctx.builder.const_i32(1);
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
codegen::gen_get_reg16(ctx, r1);
ctx.builder.const_i32(1);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg16(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, cl) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
codegen::gen_get_reg16(ctx, r1);
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg16(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, reg, cl) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg16(ctx, r);
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn3_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
codegen::gen_get_reg16(ctx, r2);
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn3_ret($fn);
codegen::gen_set_reg16(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, reg, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm);
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg16(ctx, r);
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn3_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32, imm: u32) {
let imm = mask_imm!(imm, $imm);
codegen::gen_get_reg16(ctx, r1);
codegen::gen_get_reg16(ctx, r2);
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn3_ret($fn);
codegen::gen_set_reg16(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, none) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
let mut dest_operand = ctx.builder.set_new_local();
$fn(ctx.builder, &mut dest_operand);
ctx.builder.get_local(&dest_operand);
ctx.builder.free_local(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
$fn(ctx.builder, &mut ctx.register_locals[r1 as usize]);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm) as i32;
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, imm: u32) {
let imm = mask_imm!(imm, $imm);
codegen::gen_get_reg16(ctx, r1);
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg16(ctx, r1);
}
);
);
macro_rules! define_instruction_read_write_mem32(
($fn:expr, $name_mem:ident, $name_reg:ident, reg) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
let dest_operand = ctx.builder.set_new_local();
$fn(
ctx.builder,
&dest_operand,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize]),
);
ctx.builder.get_local(&dest_operand);
ctx.builder.free_local(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
$fn(
ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, constant_one) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
let dest_operand = ctx.builder.set_new_local();
$fn(ctx.builder, &dest_operand, &LocalOrImmediate::Immediate(1));
ctx.builder.get_local(&dest_operand);
ctx.builder.free_local(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
$fn(ctx.builder, &ctx.register_locals[r1 as usize], &LocalOrImmediate::Immediate(1));
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, cl) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
let dest_operand = ctx.builder.set_new_local();
$fn(
ctx.builder,
&dest_operand,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[regs::ECX as usize]),
);
ctx.builder.get_local(&dest_operand);
ctx.builder.free_local(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
$fn(
ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[regs::ECX as usize]),
);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, reg, cl) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg32(ctx, r);
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn3_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg32(ctx, r1);
codegen::gen_get_reg32(ctx, r2);
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn3_ret($fn);
codegen::gen_set_reg32(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, reg, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm) as i32;
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg32(ctx, r);
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn3_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32, imm: u32) {
let imm = mask_imm!(imm, $imm);
codegen::gen_get_reg32(ctx, r1);
codegen::gen_get_reg32(ctx, r2);
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn3_ret($fn);
codegen::gen_set_reg32(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, none) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
let mut dest_operand = ctx.builder.set_new_local();
$fn(ctx.builder, &mut dest_operand);
ctx.builder.get_local(&dest_operand);
ctx.builder.free_local(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
$fn(ctx.builder, &mut ctx.register_locals[r1 as usize]);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm) as i32;
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
let dest_operand = ctx.builder.set_new_local();
$fn(
ctx.builder,
&dest_operand,
&LocalOrImmediate::Immediate(imm),
);
ctx.builder.get_local(&dest_operand);
ctx.builder.free_local(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, imm: u32) {
let imm = mask_imm!(imm, $imm);
$fn(
ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::Immediate(imm as i32),
);
}
);
);
fn gen_add32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
codegen::gen_set_last_op1(builder, &dest_operand);
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.add_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, &dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(builder, FLAGS_ALL);
}
fn gen_sub32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
codegen::gen_set_last_op1(builder, &dest_operand);
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.sub_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, &dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(builder, FLAGS_ALL | FLAG_SUB);
}
fn gen_cmp(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
size: i32,
) {
builder.const_i32(global_pointers::last_result as i32);
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.sub_i32();
if size == OPSIZE_8 || size == OPSIZE_16 {
builder.const_i32(if size == OPSIZE_8 { 0xFF } else { 0xFFFF });
builder.and_i32();
}
builder.store_aligned_i32(0);
builder.const_i32(global_pointers::last_op1 as i32);
builder.get_local(&dest_operand);
if size == OPSIZE_8 || size == OPSIZE_16 {
builder.const_i32(if size == OPSIZE_8 { 0xFF } else { 0xFFFF });
builder.and_i32();
}
builder.store_aligned_i32(0);
codegen::gen_set_last_op_size(builder, size);
codegen::gen_set_flags_changed(builder, FLAGS_ALL | FLAG_SUB);
}
fn gen_cmp8(builder: &mut WasmBuilder, dest: &WasmLocal, source: &LocalOrImmediate) {
gen_cmp(builder, dest, source, OPSIZE_8)
}
fn gen_cmp16(builder: &mut WasmBuilder, dest: &WasmLocal, source: &LocalOrImmediate) {
gen_cmp(builder, dest, source, OPSIZE_16)
}
fn gen_cmp32(builder: &mut WasmBuilder, dest: &WasmLocal, source: &LocalOrImmediate) {
gen_cmp(builder, dest, source, OPSIZE_32)
}
fn gen_adc32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.call_fn2_ret("adc32");
builder.set_local(dest_operand);
}
fn gen_sbb32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.call_fn2_ret("sbb32");
builder.set_local(dest_operand);
}
fn gen_and32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.and_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, &dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(
builder,
FLAGS_ALL & !FLAG_CARRY & !FLAG_OVERFLOW & !FLAG_ADJUST,
);
codegen::gen_clear_flags_bits(builder, FLAG_CARRY | FLAG_OVERFLOW | FLAG_ADJUST);
}
fn gen_test(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
size: i32,
) {
builder.const_i32(global_pointers::last_result as i32);
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.and_i32();
builder.store_aligned_i32(0);
codegen::gen_set_last_op_size(builder, size);
codegen::gen_set_flags_changed(
builder,
FLAGS_ALL & !FLAG_CARRY & !FLAG_OVERFLOW & !FLAG_ADJUST,
);
codegen::gen_clear_flags_bits(builder, FLAG_CARRY | FLAG_OVERFLOW | FLAG_ADJUST);
}
fn gen_test8(builder: &mut WasmBuilder, dest: &WasmLocal, source: &LocalOrImmediate) {
gen_test(builder, dest, source, OPSIZE_8)
}
fn gen_test16(builder: &mut WasmBuilder, dest: &WasmLocal, source: &LocalOrImmediate) {
gen_test(builder, dest, source, OPSIZE_16)
}
fn gen_test32(builder: &mut WasmBuilder, dest: &WasmLocal, source: &LocalOrImmediate) {
gen_test(builder, dest, source, OPSIZE_32)
}
fn gen_or32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.or_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, &dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(
builder,
FLAGS_ALL & !FLAG_CARRY & !FLAG_OVERFLOW & !FLAG_ADJUST,
);
codegen::gen_clear_flags_bits(builder, FLAG_CARRY | FLAG_OVERFLOW | FLAG_ADJUST);
}
fn gen_xor32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.xor_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, &dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(
builder,
FLAGS_ALL & !FLAG_CARRY & !FLAG_OVERFLOW & !FLAG_ADJUST,
);
codegen::gen_clear_flags_bits(builder, FLAG_CARRY | FLAG_OVERFLOW | FLAG_ADJUST);
}
fn gen_rol32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(dest_operand);
match source_operand {
LocalOrImmediate::WasmLocal(l) => {
builder.get_local(l);
builder.const_i32(31);
builder.and_i32();
},
LocalOrImmediate::Immediate(i) => {
builder.const_i32(*i & 31);
},
}
builder.const_i32(31);
builder.and_i32();
builder.call_fn2_ret("rol32");
builder.set_local(dest_operand);
}
fn gen_ror32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(dest_operand);
match source_operand {
LocalOrImmediate::WasmLocal(l) => {
builder.get_local(l);
builder.const_i32(31);
builder.and_i32();
},
LocalOrImmediate::Immediate(i) => {
builder.const_i32(*i & 31);
},
}
builder.const_i32(31);
builder.and_i32();
builder.call_fn2_ret("ror32");
builder.set_local(dest_operand);
}
fn gen_rcl32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(dest_operand);
match source_operand {
LocalOrImmediate::WasmLocal(l) => {
builder.get_local(l);
builder.const_i32(31);
builder.and_i32();
},
LocalOrImmediate::Immediate(i) => {
builder.const_i32(*i & 31);
},
}
builder.const_i32(31);
builder.and_i32();
builder.call_fn2_ret("rcl32");
builder.set_local(dest_operand);
}
fn gen_rcr32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(dest_operand);
match source_operand {
LocalOrImmediate::WasmLocal(l) => {
builder.get_local(l);
builder.const_i32(31);
builder.and_i32();
},
LocalOrImmediate::Immediate(i) => {
builder.const_i32(*i & 31);
},
}
builder.const_i32(31);
builder.and_i32();
builder.call_fn2_ret("rcr32");
builder.set_local(dest_operand);
}
enum ShiftCount {
Local(WasmLocal),
Immediate(i32),
}
impl ShiftCount {
pub fn gen_get(builder: &mut WasmBuilder, count: &ShiftCount) {
match &count {
ShiftCount::Local(l) => builder.get_local(l),
ShiftCount::Immediate(i) => builder.const_i32(*i),
}
}
pub fn gen_get_thirtytwo_minus(builder: &mut WasmBuilder, count: &ShiftCount) {
match &count {
ShiftCount::Local(l) => {
builder.const_i32(32);
builder.get_local(l);
builder.sub_i32();
},
ShiftCount::Immediate(i) => builder.const_i32(32 - *i),
}
}
pub fn gen_get_minus_one(builder: &mut WasmBuilder, count: &ShiftCount) {
match &count {
ShiftCount::Local(l) => {
builder.get_local(l);
builder.const_i32(1);
builder.sub_i32()
},
ShiftCount::Immediate(i) => builder.const_i32(*i - 1),
}
}
}
fn gen_shl32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
let count = match source_operand {
LocalOrImmediate::WasmLocal(l) => {
let exit = builder.block_void();
builder.get_local(l);
builder.const_i32(31); // Note: mask can probably be avoided since wasm has the same semantics on shl_i32
builder.and_i32();
let count = builder.tee_new_local();
builder.eqz_i32();
builder.br_if(exit);
ShiftCount::Local(count)
},
LocalOrImmediate::Immediate(i) => {
if *i & 31 == 0 {
return;
}
ShiftCount::Immediate(*i & 31)
},
};
builder.get_local(&dest_operand);
ShiftCount::gen_get_thirtytwo_minus(builder, &count);
builder.shr_u_i32();
builder.const_i32(1);
builder.and_i32();
let b = builder.set_new_local();
builder.get_local(dest_operand);
ShiftCount::gen_get(builder, &count);
builder.shl_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(builder, FLAGS_ALL & !FLAG_CARRY & !FLAG_OVERFLOW);
builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(builder);
builder.const_i32(!(FLAG_CARRY | FLAG_OVERFLOW));
builder.and_i32();
builder.get_local(&b);
builder.or_i32();
{
builder.get_local(&b);
builder.get_local(&dest_operand);
builder.const_i32(31);
builder.shr_u_i32();
builder.xor_i32();
builder.const_i32(11);
builder.shl_i32();
builder.const_i32(FLAG_OVERFLOW);
builder.and_i32();
builder.or_i32();
}
builder.store_aligned_i32(0);
builder.free_local(b);
if let ShiftCount::Local(l) = count {
builder.block_end();
builder.free_local(l);
}
}
fn gen_shr32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
let count = match source_operand {
LocalOrImmediate::WasmLocal(l) => {
let exit = builder.block_void();
builder.get_local(l);
builder.const_i32(31); // Note: mask can probably be avoided since wasm has the same semantics on shl_i32
builder.and_i32();
let count = builder.tee_new_local();
builder.eqz_i32();
builder.br_if(exit);
ShiftCount::Local(count)
},
LocalOrImmediate::Immediate(i) => {
if *i & 31 == 0 {
return;
}
ShiftCount::Immediate(*i & 31)
},
};
builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(builder);
builder.const_i32(!(FLAG_CARRY | FLAG_OVERFLOW));
builder.and_i32();
{
builder.get_local(dest_operand);
ShiftCount::gen_get_minus_one(builder, &count);
builder.shr_u_i32();
builder.const_i32(1);
builder.and_i32();
builder.or_i32()
}
{
builder.get_local(dest_operand);
builder.const_i32(20);
builder.shr_u_i32();
builder.const_i32(FLAG_OVERFLOW);
builder.and_i32();
builder.or_i32()
}
builder.store_aligned_i32(0);
builder.get_local(dest_operand);
ShiftCount::gen_get(builder, &count);
builder.shr_u_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(builder, FLAGS_ALL & !FLAG_CARRY & !FLAG_OVERFLOW);
if let ShiftCount::Local(l) = count {
builder.block_end();
builder.free_local(l);
}
}
fn gen_sar32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
let count = match source_operand {
LocalOrImmediate::WasmLocal(l) => {
let exit = builder.block_void();
builder.get_local(l);
builder.const_i32(31); // Note: mask can probably be avoided since wasm has the same semantics on shl_i32
builder.and_i32();
let count = builder.tee_new_local();
builder.eqz_i32();
builder.br_if(exit);
ShiftCount::Local(count)
},
LocalOrImmediate::Immediate(i) => {
if *i & 31 == 0 {
return;
}
ShiftCount::Immediate(*i & 31)
},
};
builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(builder);
builder.const_i32(!(FLAG_CARRY | FLAG_OVERFLOW));
builder.and_i32();
{
builder.get_local(dest_operand);
ShiftCount::gen_get_minus_one(builder, &count);
builder.shr_u_i32();
builder.const_i32(1);
builder.and_i32();
builder.or_i32()
}
builder.store_aligned_i32(0);
builder.get_local(dest_operand);
ShiftCount::gen_get(builder, &count);
builder.shr_s_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(builder, FLAGS_ALL & !FLAG_CARRY & !FLAG_OVERFLOW);
if let ShiftCount::Local(l) = count {
builder.block_end();
builder.free_local(l);
}
}
fn gen_xadd32(ctx: &mut JitContext, dest_operand: &WasmLocal, r: u32) {
ctx.builder.get_local(&ctx.register_locals[r as usize]);
let tmp = ctx.builder.set_new_local();
ctx.builder.get_local(&dest_operand);
codegen::gen_set_reg32(ctx, r);
gen_add32(
ctx.builder,
&dest_operand,
&LocalOrImmediate::WasmLocal(&tmp),
);
ctx.builder.free_local(tmp);
}
fn gen_cmpxchg32(ctx: &mut JitContext, r: u32) {
let source = ctx.builder.set_new_local();
gen_cmp32(
ctx.builder,
&ctx.register_locals[0],
&LocalOrImmediate::WasmLocal(&source),
);
ctx.builder.get_local(&ctx.register_locals[0]);
ctx.builder.get_local(&source);
ctx.builder.eq_i32();
ctx.builder.if_i32();
codegen::gen_get_reg32(ctx, r);
ctx.builder.else_();
ctx.builder.get_local(&source);
codegen::gen_set_reg32(ctx, regs::EAX);
ctx.builder.get_local(&source);
ctx.builder.block_end();
ctx.builder.free_local(source);
}
fn gen_mul32(ctx: &mut JitContext) {
ctx.builder.extend_unsigned_i32_to_i64();
codegen::gen_get_reg32(ctx, regs::EAX);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.mul_i64();
let result = ctx.builder.tee_new_local_i64();
ctx.builder.const_i64(32);
ctx.builder.shr_u_i64();
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EDX);
ctx.builder.get_local_i64(&result);
ctx.builder.free_local_i64(result);
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EAX);
codegen::gen_get_reg32(ctx, regs::EDX);
ctx.builder.if_void();
codegen::gen_set_flags_bits(ctx.builder, 1 | FLAG_OVERFLOW);
ctx.builder.else_();
codegen::gen_clear_flags_bits(ctx.builder, 1 | FLAG_OVERFLOW);
ctx.builder.block_end();
codegen::gen_set_last_result(ctx.builder, &ctx.register_locals[regs::EAX as usize]);
codegen::gen_set_last_op_size(ctx.builder, OPSIZE_32);
codegen::gen_set_flags_changed(ctx.builder, FLAGS_ALL & !1 & !FLAG_OVERFLOW);
}
fn gen_imul_reg32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
gen_imul3_reg32(builder, dest_operand, dest_operand, source_operand);
}
fn gen_imul3_reg32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand1: &WasmLocal,
source_operand2: &LocalOrImmediate,
) {
builder.get_local(&source_operand1);
builder.extend_signed_i32_to_i64();
source_operand2.gen_get(builder);
builder.extend_signed_i32_to_i64();
builder.mul_i64();
let result = builder.tee_new_local_i64();
builder.wrap_i64_to_i32();
builder.set_local(&dest_operand);
codegen::gen_set_last_result(builder, &dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(builder, FLAGS_ALL & !1 & !FLAG_OVERFLOW);
builder.const_i32(global_pointers::flags as i32);
builder.get_local_i64(&result);
builder.wrap_i64_to_i32();
builder.extend_signed_i32_to_i64();
builder.get_local_i64(&result);
builder.ne_i64();
builder.const_i32(1 | FLAG_OVERFLOW);
builder.mul_i32();
codegen::gen_get_flags(builder);
builder.const_i32(!1 & !FLAG_OVERFLOW);
builder.and_i32();
builder.or_i32();
builder.store_aligned_i32(0);
builder.free_local_i64(result);
}
fn gen_div32(ctx: &mut JitContext, source: &WasmLocal) {
let done = ctx.builder.block_void();
{
let exception = ctx.builder.block_void();
{
ctx.builder.get_local(source);
ctx.builder.eqz_i32();
ctx.builder.br_if(exception);
codegen::gen_get_reg32(ctx, regs::EDX);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.const_i64(32);
ctx.builder.shl_i64();
codegen::gen_get_reg32(ctx, regs::EAX);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.or_i64();
let dest_operand = ctx.builder.tee_new_local_i64();
ctx.builder.get_local(source);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.div_i64();
let result = ctx.builder.tee_new_local_i64();
ctx.builder.const_i64(0xFFFF_FFFF);
ctx.builder.gtu_i64();
ctx.builder.br_if(exception);
ctx.builder.get_local_i64(&dest_operand);
ctx.builder.get_local(source);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.rem_i64();
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EDX);
ctx.builder.get_local_i64(&result);
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EAX);
ctx.builder.br(done);
ctx.builder.free_local_i64(dest_operand);
ctx.builder.free_local_i64(result);
}
ctx.builder.block_end();
codegen::gen_trigger_de(ctx);
}
ctx.builder.block_end();
}
fn gen_bt(
builder: &mut WasmBuilder,
bit_base: &WasmLocal,
bit_offset: &LocalOrImmediate,
offset_mask: u32,
) {
builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(builder);
builder.const_i32(!1);
builder.and_i32();
builder.get_local(bit_base);
match bit_offset {
LocalOrImmediate::WasmLocal(l) => {
builder.get_local(l);
builder.const_i32(offset_mask as i32);
builder.and_i32();
},
LocalOrImmediate::Immediate(imm) => builder.const_i32(imm & offset_mask as i32),
}
builder.shr_u_i32();
builder.const_i32(1);
builder.and_i32();
builder.or_i32();
builder.store_aligned_i32(0);
codegen::gen_clear_flags_changed_bits(builder, 1);
}
fn gen_bts(
builder: &mut WasmBuilder,
dest_bit_base: &WasmLocal,
bit_offset: &LocalOrImmediate,
offset_mask: u32,
) {
gen_bt(builder, dest_bit_base, bit_offset, offset_mask);
builder.get_local(dest_bit_base);
match bit_offset {
LocalOrImmediate::WasmLocal(l) => {
builder.const_i32(1);
builder.get_local(l);
builder.const_i32(offset_mask as i32);
builder.and_i32();
builder.shl_i32();
},
LocalOrImmediate::Immediate(imm) => builder.const_i32(1 << (imm & offset_mask as i32)),
}
builder.or_i32();
builder.set_local(dest_bit_base);
}
fn gen_btc(
builder: &mut WasmBuilder,
dest_bit_base: &WasmLocal,
bit_offset: &LocalOrImmediate,
offset_mask: u32,
) {
gen_bt(builder, dest_bit_base, bit_offset, offset_mask);
builder.get_local(dest_bit_base);
match bit_offset {
LocalOrImmediate::WasmLocal(l) => {
builder.const_i32(1);
builder.get_local(l);
builder.const_i32(offset_mask as i32);
builder.and_i32();
builder.shl_i32();
},
LocalOrImmediate::Immediate(imm) => builder.const_i32(1 << (imm & offset_mask as i32)),
}
builder.xor_i32();
builder.set_local(dest_bit_base);
}
fn gen_btr(
builder: &mut WasmBuilder,
dest_bit_base: &WasmLocal,
bit_offset: &LocalOrImmediate,
offset_mask: u32,
) {
gen_bt(builder, dest_bit_base, bit_offset, offset_mask);
builder.get_local(dest_bit_base);
match bit_offset {
LocalOrImmediate::WasmLocal(l) => {
builder.const_i32(1);
builder.get_local(l);
builder.const_i32(offset_mask as i32);
builder.and_i32();
builder.shl_i32();
builder.const_i32(-1);
builder.xor_i32();
},
LocalOrImmediate::Immediate(imm) => builder.const_i32(!(1 << (imm & offset_mask as i32))),
}
builder.and_i32();
builder.set_local(dest_bit_base);
}
fn gen_bit_rmw(
ctx: &mut JitContext,
modrm_byte: ModrmByte,
op: &dyn Fn(&mut WasmBuilder, &WasmLocal, &LocalOrImmediate, u32),
source_operand: &LocalOrImmediate,
opsize: i32,
) {
dbg_assert!(opsize == 16 || opsize == 32);
codegen::gen_modrm_resolve(ctx, modrm_byte);
match source_operand {
LocalOrImmediate::WasmLocal(l) => {
ctx.builder.get_local(l);
if opsize == 16 {
codegen::sign_extend_i16(ctx.builder);
}
ctx.builder.const_i32(3);
ctx.builder.shr_s_i32();
ctx.builder.add_i32();
},
LocalOrImmediate::Immediate(imm8) => {
ctx.builder.const_i32((*imm8 as i32 & (opsize - 1)) >> 3);
ctx.builder.add_i32();
},
}
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::BYTE, &address_local, &|ref mut ctx| {
let value_local = ctx.builder.set_new_local();
op(ctx.builder, &value_local, source_operand, 7);
ctx.builder.get_local(&value_local);
ctx.builder.free_local(value_local);
});
ctx.builder.free_local(address_local);
}
fn gen_bsf32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.call_fn2_ret("bsf32");
builder.set_local(dest_operand);
}
fn gen_bsr32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.call_fn2_ret("bsr32");
builder.set_local(dest_operand);
}
fn gen_bswap(ctx: &mut JitContext, reg: i32) {
let l = &ctx.register_locals[reg as usize];
ctx.builder.get_local(l);
ctx.builder.const_i32(8);
ctx.builder.rotl_i32();
ctx.builder.const_i32(0xFF00FF);
ctx.builder.and_i32();
ctx.builder.get_local(l);
ctx.builder.const_i32(24);
ctx.builder.rotl_i32();
ctx.builder.const_i32(0xFF00FF00u32 as i32);
ctx.builder.and_i32();
ctx.builder.or_i32();
ctx.builder.set_local(l);
}
define_instruction_read_write_mem8!("add8", instr_00_mem_jit, instr_00_reg_jit, reg);
define_instruction_read_write_mem16!("add16", instr16_01_mem_jit, instr16_01_reg_jit, reg);
define_instruction_read_write_mem32!(gen_add32, instr32_01_mem_jit, instr32_01_reg_jit, reg);
define_instruction_write_reg8!("add8", instr_02_mem_jit, instr_02_reg_jit);
define_instruction_write_reg16!("add16", instr16_03_mem_jit, instr16_03_reg_jit);
define_instruction_write_reg32!(gen_add32, instr32_03_mem_jit, instr32_03_reg_jit);
pub fn instr_04_jit(ctx: &mut JitContext, imm8: u32) { group_arith_al_imm8(ctx, "add8", imm8); }
pub fn instr16_05_jit(ctx: &mut JitContext, imm16: u32) {
group_arith_ax_imm16(ctx, "add16", imm16);
}
pub fn instr32_05_jit(ctx: &mut JitContext, imm32: u32) {
group_arith_eax_imm32(ctx, &gen_add32, imm32);
}
define_instruction_read_write_mem8!("or8", instr_08_mem_jit, instr_08_reg_jit, reg);
define_instruction_read_write_mem16!("or16", instr16_09_mem_jit, instr16_09_reg_jit, reg);
define_instruction_read_write_mem32!(gen_or32, instr32_09_mem_jit, instr32_09_reg_jit, reg);
define_instruction_write_reg8!("or8", instr_0A_mem_jit, instr_0A_reg_jit);
define_instruction_write_reg16!("or16", instr16_0B_mem_jit, instr16_0B_reg_jit);
define_instruction_write_reg32!(gen_or32, instr32_0B_mem_jit, instr32_0B_reg_jit);
pub fn instr_0C_jit(ctx: &mut JitContext, imm8: u32) { group_arith_al_imm8(ctx, "or8", imm8); }
pub fn instr16_0D_jit(ctx: &mut JitContext, imm16: u32) {
group_arith_ax_imm16(ctx, "or16", imm16);
}
pub fn instr32_0D_jit(ctx: &mut JitContext, imm32: u32) {
group_arith_eax_imm32(ctx, &gen_or32, imm32);
}
define_instruction_read_write_mem8!("adc8", instr_10_mem_jit, instr_10_reg_jit, reg);
define_instruction_read_write_mem16!("adc16", instr16_11_mem_jit, instr16_11_reg_jit, reg);
define_instruction_read_write_mem32!(gen_adc32, instr32_11_mem_jit, instr32_11_reg_jit, reg);
define_instruction_write_reg8!("adc8", instr_12_mem_jit, instr_12_reg_jit);
define_instruction_write_reg16!("adc16", instr16_13_mem_jit, instr16_13_reg_jit);
define_instruction_write_reg32!(gen_adc32, instr32_13_mem_jit, instr32_13_reg_jit);
pub fn instr_14_jit(ctx: &mut JitContext, imm8: u32) { group_arith_al_imm8(ctx, "adc8", imm8); }
pub fn instr16_15_jit(ctx: &mut JitContext, imm16: u32) {
group_arith_ax_imm16(ctx, "adc16", imm16);
}
pub fn instr32_15_jit(ctx: &mut JitContext, imm32: u32) {
group_arith_eax_imm32(ctx, &gen_adc32, imm32);
}
define_instruction_read_write_mem8!("sbb8", instr_18_mem_jit, instr_18_reg_jit, reg);
define_instruction_read_write_mem16!("sbb16", instr16_19_mem_jit, instr16_19_reg_jit, reg);
define_instruction_read_write_mem32!(gen_sbb32, instr32_19_mem_jit, instr32_19_reg_jit, reg);
define_instruction_write_reg8!("sbb8", instr_1A_mem_jit, instr_1A_reg_jit);
define_instruction_write_reg16!("sbb16", instr16_1B_mem_jit, instr16_1B_reg_jit);
define_instruction_write_reg32!(gen_sbb32, instr32_1B_mem_jit, instr32_1B_reg_jit);
pub fn instr_1C_jit(ctx: &mut JitContext, imm8: u32) { group_arith_al_imm8(ctx, "sbb8", imm8); }
pub fn instr16_1D_jit(ctx: &mut JitContext, imm16: u32) {
group_arith_ax_imm16(ctx, "sbb16", imm16);
}
pub fn instr32_1D_jit(ctx: &mut JitContext, imm32: u32) {
group_arith_eax_imm32(ctx, &gen_sbb32, imm32);
}
define_instruction_read_write_mem8!("and8", instr_20_mem_jit, instr_20_reg_jit, reg);
define_instruction_read_write_mem16!("and16", instr16_21_mem_jit, instr16_21_reg_jit, reg);
define_instruction_read_write_mem32!(gen_and32, instr32_21_mem_jit, instr32_21_reg_jit, reg);
define_instruction_write_reg8!("and8", instr_22_mem_jit, instr_22_reg_jit);
define_instruction_write_reg16!("and16", instr16_23_mem_jit, instr16_23_reg_jit);
define_instruction_write_reg32!(gen_and32, instr32_23_mem_jit, instr32_23_reg_jit);
pub fn instr_24_jit(ctx: &mut JitContext, imm8: u32) { group_arith_al_imm8(ctx, "and8", imm8); }
pub fn instr16_25_jit(ctx: &mut JitContext, imm16: u32) {
group_arith_ax_imm16(ctx, "and16", imm16);
}
pub fn instr32_25_jit(ctx: &mut JitContext, imm32: u32) {
group_arith_eax_imm32(ctx, &gen_and32, imm32);
}
define_instruction_read_write_mem8!("sub8", instr_28_mem_jit, instr_28_reg_jit, reg);
define_instruction_read_write_mem16!("sub16", instr16_29_mem_jit, instr16_29_reg_jit, reg);
define_instruction_read_write_mem32!(gen_sub32, instr32_29_mem_jit, instr32_29_reg_jit, reg);
define_instruction_write_reg8!("sub8", instr_2A_mem_jit, instr_2A_reg_jit);
define_instruction_write_reg16!("sub16", instr16_2B_mem_jit, instr16_2B_reg_jit);
define_instruction_write_reg32!(gen_sub32, instr32_2B_mem_jit, instr32_2B_reg_jit);
pub fn instr_2C_jit(ctx: &mut JitContext, imm8: u32) { group_arith_al_imm8(ctx, "sub8", imm8); }
pub fn instr16_2D_jit(ctx: &mut JitContext, imm16: u32) {
group_arith_ax_imm16(ctx, "sub16", imm16);
}
pub fn instr32_2D_jit(ctx: &mut JitContext, imm32: u32) {
group_arith_eax_imm32(ctx, &gen_sub32, imm32);
}
define_instruction_read_write_mem8!("xor8", instr_30_mem_jit, instr_30_reg_jit, reg);
define_instruction_read_write_mem16!("xor16", instr16_31_mem_jit, instr16_31_reg_jit, reg);
define_instruction_read_write_mem32!(gen_xor32, instr32_31_mem_jit, instr32_31_reg_jit, reg);
define_instruction_write_reg8!("xor8", instr_32_mem_jit, instr_32_reg_jit);
define_instruction_write_reg16!("xor16", instr16_33_mem_jit, instr16_33_reg_jit);
define_instruction_write_reg32!(gen_xor32, instr32_33_mem_jit, instr32_33_reg_jit);
pub fn instr_34_jit(ctx: &mut JitContext, imm8: u32) { group_arith_al_imm8(ctx, "xor8", imm8); }
pub fn instr16_35_jit(ctx: &mut JitContext, imm16: u32) {
group_arith_ax_imm16(ctx, "xor16", imm16);
}
pub fn instr32_35_jit(ctx: &mut JitContext, imm32: u32) {
group_arith_eax_imm32(ctx, &gen_xor32, imm32);
}
define_instruction_read8!(gen_cmp8, instr_38_mem_jit, instr_38_reg_jit);
define_instruction_read16!(gen_cmp16, instr16_39_mem_jit, instr16_39_reg_jit);
define_instruction_read32!(gen_cmp32, instr32_39_mem_jit, instr32_39_reg_jit);
pub fn instr_3A_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
let dest_operand = codegen::gen_get_reg8_or_alias_to_reg32(ctx, r);
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
let source_operand = ctx.builder.set_new_local();
gen_cmp8(
ctx.builder,
&dest_operand,
&LocalOrImmediate::WasmLocal(&source_operand),
);
codegen::gen_free_reg8_or_alias(ctx, r, dest_operand);
ctx.builder.free_local(source_operand);
}
pub fn instr_3A_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
let dest_operand = codegen::gen_get_reg8_or_alias_to_reg32(ctx, r2);
let source_operand = codegen::gen_get_reg8_or_alias_to_reg32(ctx, r1);
gen_cmp8(
ctx.builder,
&dest_operand,
&LocalOrImmediate::WasmLocal(&source_operand),
);
codegen::gen_free_reg8_or_alias(ctx, r2, dest_operand);
codegen::gen_free_reg8_or_alias(ctx, r1, source_operand);
}
pub fn instr16_3B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
let source_operand = ctx.builder.set_new_local();
gen_cmp16(
ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::WasmLocal(&source_operand),
);
ctx.builder.free_local(source_operand);
}
pub fn instr16_3B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_cmp16(
ctx.builder,
&ctx.register_locals[r2 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r1 as usize]),
);
}
pub fn instr32_3B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let source_operand = ctx.builder.set_new_local();
gen_cmp32(
ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::WasmLocal(&source_operand),
);
ctx.builder.free_local(source_operand);
}
pub fn instr32_3B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_cmp32(
ctx.builder,
&ctx.register_locals[r2 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r1 as usize]),
);
}
pub fn instr_3C_jit(ctx: &mut JitContext, imm8: u32) {
gen_cmp8(
ctx.builder,
&ctx.register_locals[0],
&LocalOrImmediate::Immediate(imm8 as i32),
);
}
pub fn instr16_3D_jit(ctx: &mut JitContext, imm16: u32) {
gen_cmp16(
ctx.builder,
&ctx.register_locals[0],
&LocalOrImmediate::Immediate(imm16 as i32),
);
}
pub fn instr32_3D_jit(ctx: &mut JitContext, imm32: u32) {
gen_cmp32(
ctx.builder,
&ctx.register_locals[0],
&LocalOrImmediate::Immediate(imm32 as i32),
);
}
fn gen_inc(builder: &mut WasmBuilder, dest_operand: &WasmLocal, size: i32) {
builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(builder);
builder.const_i32(!1);
builder.and_i32();
codegen::gen_getcf(builder);
builder.or_i32();
builder.store_aligned_i32(0);
builder.const_i32(global_pointers::last_op1 as i32);
builder.get_local(&dest_operand);
if size == OPSIZE_8 || size == OPSIZE_16 {
builder.const_i32(if size == OPSIZE_8 { 0xFF } else { 0xFFFF });
builder.and_i32();
}
builder.store_aligned_i32(0);
builder.get_local(dest_operand);
builder.const_i32(1);
builder.add_i32();
if size == OPSIZE_16 {
codegen::gen_set_reg16_local(builder, dest_operand);
}
else {
builder.set_local(dest_operand);
}
builder.const_i32(global_pointers::last_result as i32);
builder.get_local(&dest_operand);
if size == OPSIZE_16 {
builder.const_i32(0xFFFF);
builder.and_i32();
}
builder.store_aligned_i32(0);
codegen::gen_set_last_op_size(builder, size);
codegen::gen_set_flags_changed(builder, FLAGS_ALL & !1);
}
fn gen_inc16(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
gen_inc(builder, dest_operand, OPSIZE_16);
}
fn gen_inc32(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
gen_inc(builder, dest_operand, OPSIZE_32);
}
fn gen_dec(builder: &mut WasmBuilder, dest_operand: &WasmLocal, size: i32) {
builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(builder);
builder.const_i32(!1);
builder.and_i32();
codegen::gen_getcf(builder);
builder.or_i32();
builder.store_aligned_i32(0);
builder.const_i32(global_pointers::last_op1 as i32);
builder.get_local(&dest_operand);
if size == OPSIZE_8 || size == OPSIZE_16 {
builder.const_i32(if size == OPSIZE_8 { 0xFF } else { 0xFFFF });
builder.and_i32();
}
builder.store_aligned_i32(0);
builder.get_local(dest_operand);
builder.const_i32(1);
builder.sub_i32();
if size == OPSIZE_16 {
codegen::gen_set_reg16_local(builder, dest_operand);
}
else {
builder.set_local(dest_operand);
}
builder.const_i32(global_pointers::last_result as i32);
builder.get_local(&dest_operand);
if size == OPSIZE_16 {
builder.const_i32(0xFFFF);
builder.and_i32();
}
builder.store_aligned_i32(0);
codegen::gen_set_last_op_size(builder, size);
codegen::gen_set_flags_changed(builder, FLAGS_ALL & !1 | FLAG_SUB);
}
fn gen_dec16(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
gen_dec(builder, dest_operand, OPSIZE_16)
}
fn gen_dec32(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
gen_dec(builder, dest_operand, OPSIZE_32)
}
fn gen_inc16_r(ctx: &mut JitContext, r: u32) {
gen_inc16(ctx.builder, &mut ctx.register_locals[r as usize])
}
fn gen_inc32_r(ctx: &mut JitContext, r: u32) {
gen_inc32(ctx.builder, &mut ctx.register_locals[r as usize])
}
fn gen_dec16_r(ctx: &mut JitContext, r: u32) {
gen_dec16(ctx.builder, &mut ctx.register_locals[r as usize])
}
fn gen_dec32_r(ctx: &mut JitContext, r: u32) {
gen_dec32(ctx.builder, &mut ctx.register_locals[r as usize])
}
fn gen_not16(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
builder.get_local(dest_operand);
builder.const_i32(-1);
builder.xor_i32();
codegen::gen_set_reg16_local(builder, dest_operand);
}
fn gen_not32(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
builder.get_local(dest_operand);
builder.const_i32(-1);
builder.xor_i32();
builder.set_local(dest_operand);
}
fn gen_neg16(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
builder.get_local(dest_operand);
builder.call_fn1_ret("neg16");
codegen::gen_set_reg16_local(builder, dest_operand);
}
fn gen_neg32(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
builder.const_i32(global_pointers::last_op1 as i32);
builder.const_i32(0);
builder.store_aligned_i32(0);
builder.const_i32(0);
builder.get_local(&dest_operand);
builder.sub_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, &dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(builder, FLAGS_ALL | FLAG_SUB);
}
pub fn instr16_06_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::ES);
let sreg = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr32_06_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::ES);
let sreg = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr16_0E_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::CS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr32_0E_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::CS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr16_16_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::SS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr32_16_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::SS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr16_1E_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::DS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr32_1E_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::DS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr16_40_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, AX); }
pub fn instr32_40_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, EAX); }
pub fn instr16_41_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, CX); }
pub fn instr32_41_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, ECX); }
pub fn instr16_42_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, DX); }
pub fn instr32_42_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, EDX); }
pub fn instr16_43_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, BX); }
pub fn instr32_43_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, EBX); }
pub fn instr16_44_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, SP); }
pub fn instr32_44_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, ESP); }
pub fn instr16_45_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, BP); }
pub fn instr32_45_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, EBP); }
pub fn instr16_46_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, SI); }
pub fn instr32_46_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, ESI); }
pub fn instr16_47_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, DI); }
pub fn instr32_47_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, EDI); }
pub fn instr16_48_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, AX); }
pub fn instr32_48_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, EAX); }
pub fn instr16_49_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, CX); }
pub fn instr32_49_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, ECX); }
pub fn instr16_4A_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, DX); }
pub fn instr32_4A_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, EDX); }
pub fn instr16_4B_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, BX); }
pub fn instr32_4B_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, EBX); }
pub fn instr16_4C_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, SP); }
pub fn instr32_4C_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, ESP); }
pub fn instr16_4D_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, BP); }
pub fn instr32_4D_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, EBP); }
pub fn instr16_4E_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, SI); }
pub fn instr32_4E_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, ESI); }
pub fn instr16_4F_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, DI); }
pub fn instr32_4F_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, EDI); }
pub fn instr16_50_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, AX); }
pub fn instr32_50_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, EAX); }
pub fn instr16_51_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, CX); }
pub fn instr32_51_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, ECX); }
pub fn instr16_52_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, DX); }
pub fn instr32_52_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, EDX); }
pub fn instr16_53_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, BX); }
pub fn instr32_53_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, EBX); }
pub fn instr16_54_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, SP); }
pub fn instr32_54_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, ESP); }
pub fn instr16_55_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, BP); }
pub fn instr32_55_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, EBP); }
pub fn instr16_56_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, SI); }
pub fn instr32_56_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, ESI); }
pub fn instr16_57_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, DI); }
pub fn instr32_57_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, EDI); }
pub fn instr16_58_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, AX); }
pub fn instr32_58_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, EAX); }
pub fn instr16_59_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, CX); }
pub fn instr32_59_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, ECX); }
pub fn instr16_5A_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, DX); }
pub fn instr32_5A_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, EDX); }
pub fn instr16_5B_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, BX); }
pub fn instr32_5B_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, EBX); }
pub fn instr16_5C_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, SP); }
pub fn instr32_5C_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, ESP); }
pub fn instr16_5D_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, BP); }
pub fn instr32_5D_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, EBP); }
pub fn instr16_5E_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, SI); }
pub fn instr32_5E_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, ESI); }
pub fn instr16_5F_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, DI); }
pub fn instr32_5F_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, EDI); }
pub fn instr16_68_jit(ctx: &mut JitContext, imm16: u32) { push16_imm_jit(ctx, imm16) }
pub fn instr32_68_jit(ctx: &mut JitContext, imm32: u32) { push32_imm_jit(ctx, imm32) }
pub fn instr16_6A_jit(ctx: &mut JitContext, imm16: u32) { push16_imm_jit(ctx, imm16) }
pub fn instr32_6A_jit(ctx: &mut JitContext, imm32: u32) { push32_imm_jit(ctx, imm32) }
pub fn instr16_69_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm16: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
ctx.builder.const_i32(imm16 as i32);
ctx.builder.call_fn2_ret("imul_reg16");
codegen::gen_set_reg16(ctx, r);
}
pub fn instr16_69_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm16: u32) {
codegen::gen_get_reg16(ctx, r1);
ctx.builder.const_i32(imm16 as i32);
ctx.builder.call_fn2_ret("imul_reg16");
codegen::gen_set_reg16(ctx, r2);
}
pub fn instr32_69_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm32: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let value_local = ctx.builder.set_new_local();
gen_imul3_reg32(
ctx.builder,
&ctx.register_locals[r as usize],
&value_local,
&LocalOrImmediate::Immediate(imm32 as i32),
);
ctx.builder.free_local(value_local);
}
pub fn instr32_69_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm32: u32) {
gen_imul3_reg32(
ctx.builder,
&ctx.register_locals[r2 as usize],
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::Immediate(imm32 as i32),
);
}
pub fn instr16_6B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm8s: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
ctx.builder.const_i32(imm8s as i32);
ctx.builder.call_fn2_ret("imul_reg16");
codegen::gen_set_reg16(ctx, r);
}
pub fn instr16_6B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm8s: u32) {
codegen::gen_get_reg16(ctx, r1);
ctx.builder.const_i32(imm8s as i32);
ctx.builder.call_fn2_ret("imul_reg16");
codegen::gen_set_reg16(ctx, r2);
}
pub fn instr32_6B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm8s: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let value_local = ctx.builder.set_new_local();
gen_imul3_reg32(
ctx.builder,
&ctx.register_locals[r as usize],
&value_local,
&LocalOrImmediate::Immediate(imm8s as i32),
);
ctx.builder.free_local(value_local);
}
pub fn instr32_6B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm8s: u32) {
gen_imul3_reg32(
ctx.builder,
&ctx.register_locals[r2 as usize],
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::Immediate(imm8s as i32),
);
}
// Code for conditional jumps is generated automatically by the basic block codegen
pub fn instr16_70_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_70_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_71_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_71_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_72_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_72_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_73_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_73_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_74_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_74_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_75_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_75_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_76_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_76_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_77_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_77_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_78_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_78_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_79_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_79_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_7A_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_7A_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_7B_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_7B_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_7C_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_7C_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_7D_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_7D_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_7E_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_7E_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_7F_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_7F_jit(_ctx: &mut JitContext, _imm: u32) {}
// loop/loopz/loopnz/jcxz: Conditional jump is generated in main loop
pub fn instr16_E0_jit(ctx: &mut JitContext, _imm: u32) { codegen::decr_exc_asize(ctx) }
pub fn instr32_E0_jit(ctx: &mut JitContext, _imm: u32) { codegen::decr_exc_asize(ctx) }
pub fn instr16_E1_jit(ctx: &mut JitContext, _imm: u32) { codegen::decr_exc_asize(ctx) }
pub fn instr32_E1_jit(ctx: &mut JitContext, _imm: u32) { codegen::decr_exc_asize(ctx) }
pub fn instr16_E2_jit(ctx: &mut JitContext, _imm: u32) { codegen::decr_exc_asize(ctx) }
pub fn instr32_E2_jit(ctx: &mut JitContext, _imm: u32) { codegen::decr_exc_asize(ctx) }
pub fn instr16_E3_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_E3_jit(_ctx: &mut JitContext, _imm: u32) {}
define_instruction_read_write_mem8!("add8", instr_80_0_mem_jit, instr_80_0_reg_jit, imm8);
define_instruction_read_write_mem8!("or8", instr_80_1_mem_jit, instr_80_1_reg_jit, imm8);
define_instruction_read_write_mem8!("adc8", instr_80_2_mem_jit, instr_80_2_reg_jit, imm8);
define_instruction_read_write_mem8!("sbb8", instr_80_3_mem_jit, instr_80_3_reg_jit, imm8);
define_instruction_read_write_mem8!("and8", instr_80_4_mem_jit, instr_80_4_reg_jit, imm8);
define_instruction_read_write_mem8!("sub8", instr_80_5_mem_jit, instr_80_5_reg_jit, imm8);
define_instruction_read_write_mem8!("xor8", instr_80_6_mem_jit, instr_80_6_reg_jit, imm8);
define_instruction_read_write_mem8!("add8", instr_82_0_mem_jit, instr_82_0_reg_jit, imm8);
define_instruction_read_write_mem8!("or8", instr_82_1_mem_jit, instr_82_1_reg_jit, imm8);
define_instruction_read_write_mem8!("adc8", instr_82_2_mem_jit, instr_82_2_reg_jit, imm8);
define_instruction_read_write_mem8!("sbb8", instr_82_3_mem_jit, instr_82_3_reg_jit, imm8);
define_instruction_read_write_mem8!("and8", instr_82_4_mem_jit, instr_82_4_reg_jit, imm8);
define_instruction_read_write_mem8!("sub8", instr_82_5_mem_jit, instr_82_5_reg_jit, imm8);
define_instruction_read_write_mem8!("xor8", instr_82_6_mem_jit, instr_82_6_reg_jit, imm8);
define_instruction_read_write_mem16!("add16", instr16_81_0_mem_jit, instr16_81_0_reg_jit, imm16);
define_instruction_read_write_mem32!(gen_add32, instr32_81_0_mem_jit, instr32_81_0_reg_jit, imm32);
define_instruction_read_write_mem16!("or16", instr16_81_1_mem_jit, instr16_81_1_reg_jit, imm16);
define_instruction_read_write_mem32!(gen_or32, instr32_81_1_mem_jit, instr32_81_1_reg_jit, imm32);
define_instruction_read_write_mem16!("adc16", instr16_81_2_mem_jit, instr16_81_2_reg_jit, imm16);
define_instruction_read_write_mem32!(gen_adc32, instr32_81_2_mem_jit, instr32_81_2_reg_jit, imm32);
define_instruction_read_write_mem16!("sbb16", instr16_81_3_mem_jit, instr16_81_3_reg_jit, imm16);
define_instruction_read_write_mem32!(gen_sbb32, instr32_81_3_mem_jit, instr32_81_3_reg_jit, imm32);
define_instruction_read_write_mem16!("and16", instr16_81_4_mem_jit, instr16_81_4_reg_jit, imm16);
define_instruction_read_write_mem32!(gen_and32, instr32_81_4_mem_jit, instr32_81_4_reg_jit, imm32);
define_instruction_read_write_mem16!("sub16", instr16_81_5_mem_jit, instr16_81_5_reg_jit, imm16);
define_instruction_read_write_mem32!(gen_sub32, instr32_81_5_mem_jit, instr32_81_5_reg_jit, imm32);
define_instruction_read_write_mem16!("xor16", instr16_81_6_mem_jit, instr16_81_6_reg_jit, imm16);
define_instruction_read_write_mem32!(gen_xor32, instr32_81_6_mem_jit, instr32_81_6_reg_jit, imm32);
define_instruction_read_write_mem16!("add16", instr16_83_0_mem_jit, instr16_83_0_reg_jit, imm8s);
define_instruction_read_write_mem32!(gen_add32, instr32_83_0_mem_jit, instr32_83_0_reg_jit, imm8s);
define_instruction_read_write_mem16!("or16", instr16_83_1_mem_jit, instr16_83_1_reg_jit, imm8s);
define_instruction_read_write_mem32!(gen_or32, instr32_83_1_mem_jit, instr32_83_1_reg_jit, imm8s);
define_instruction_read_write_mem16!("adc16", instr16_83_2_mem_jit, instr16_83_2_reg_jit, imm8s);
define_instruction_read_write_mem32!(gen_adc32, instr32_83_2_mem_jit, instr32_83_2_reg_jit, imm8s);
define_instruction_read_write_mem16!("sbb16", instr16_83_3_mem_jit, instr16_83_3_reg_jit, imm8s);
define_instruction_read_write_mem32!(gen_sbb32, instr32_83_3_mem_jit, instr32_83_3_reg_jit, imm8s);
define_instruction_read_write_mem16!("and16", instr16_83_4_mem_jit, instr16_83_4_reg_jit, imm8s);
define_instruction_read_write_mem32!(gen_and32, instr32_83_4_mem_jit, instr32_83_4_reg_jit, imm8s);
define_instruction_read_write_mem16!("sub16", instr16_83_5_mem_jit, instr16_83_5_reg_jit, imm8s);
define_instruction_read_write_mem32!(gen_sub32, instr32_83_5_mem_jit, instr32_83_5_reg_jit, imm8s);
define_instruction_read_write_mem16!("xor16", instr16_83_6_mem_jit, instr16_83_6_reg_jit, imm8s);
define_instruction_read_write_mem32!(gen_xor32, instr32_83_6_mem_jit, instr32_83_6_reg_jit, imm8s);
define_instruction_read8!(gen_cmp8, instr_80_7_mem_jit, instr_80_7_reg_jit, imm8);
define_instruction_read16!(gen_cmp16, instr16_81_7_mem_jit, instr16_81_7_reg_jit, imm16);
define_instruction_read32!(gen_cmp32, instr32_81_7_mem_jit, instr32_81_7_reg_jit, imm32);
define_instruction_read8!(gen_cmp8, instr_82_7_mem_jit, instr_82_7_reg_jit, imm8);
define_instruction_read16!(gen_cmp16, instr16_83_7_mem_jit, instr16_83_7_reg_jit, imm8s);
define_instruction_read32!(gen_cmp32, instr32_83_7_mem_jit, instr32_83_7_reg_jit, imm8s);
define_instruction_read8!(gen_test8, instr_84_mem_jit, instr_84_reg_jit);
define_instruction_read16!(gen_test16, instr16_85_mem_jit, instr16_85_reg_jit);
define_instruction_read32!(gen_test32, instr32_85_mem_jit, instr32_85_reg_jit);
pub fn instr_86_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::BYTE, &address_local, &|ref mut ctx| {
codegen::gen_get_reg8(ctx, r);
let tmp = ctx.builder.set_new_local();
codegen::gen_set_reg8(ctx, r);
ctx.builder.get_local(&tmp);
ctx.builder.free_local(tmp);
});
ctx.builder.free_local(address_local);
}
pub fn instr_86_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg8(ctx, r2);
let tmp = ctx.builder.set_new_local();
codegen::gen_get_reg8(ctx, r1);
codegen::gen_set_reg8(ctx, r2);
ctx.builder.get_local(&tmp);
codegen::gen_set_reg8(ctx, r1);
ctx.builder.free_local(tmp);
}
pub fn instr16_87_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg16(ctx, r);
let tmp = ctx.builder.set_new_local();
codegen::gen_set_reg16(ctx, r);
ctx.builder.get_local(&tmp);
ctx.builder.free_local(tmp);
});
ctx.builder.free_local(address_local);
}
pub fn instr32_87_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg32(ctx, r);
let tmp = ctx.builder.set_new_local();
codegen::gen_set_reg32(ctx, r);
ctx.builder.get_local(&tmp);
ctx.builder.free_local(tmp);
});
ctx.builder.free_local(address_local);
}
pub fn instr16_87_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r2);
let tmp = ctx.builder.set_new_local();
codegen::gen_get_reg16(ctx, r1);
codegen::gen_set_reg16(ctx, r2);
ctx.builder.get_local(&tmp);
codegen::gen_set_reg16(ctx, r1);
ctx.builder.free_local(tmp);
}
pub fn instr32_87_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg32(ctx, r2);
let tmp = ctx.builder.set_new_local();
codegen::gen_get_reg32(ctx, r1);
codegen::gen_set_reg32(ctx, r2);
ctx.builder.get_local(&tmp);
codegen::gen_set_reg32(ctx, r1);
ctx.builder.free_local(tmp);
}
pub fn instr_88_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_get_reg8(ctx, r);
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write8(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr_88_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_reg8_r(ctx, r1, r2);
}
pub fn instr16_89_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(
ctx,
&address_local,
&ctx.register_locals[r as usize].unsafe_clone(),
);
ctx.builder.free_local(address_local);
}
pub fn instr16_89_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_reg16_r(ctx, r1, r2);
}
pub fn instr32_89_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// Pseudo: safe_write32(modrm_resolve(modrm_byte), reg32[r]);
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(
ctx,
&address_local,
&ctx.register_locals[r as usize].unsafe_clone(),
);
ctx.builder.free_local(address_local);
}
pub fn instr32_89_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_reg32_r(ctx, r1, r2);
}
pub fn instr_8A_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// Pseudo: reg8[r] = safe_read8(modrm_resolve(modrm_byte));
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
codegen::gen_set_reg8(ctx, r);
}
pub fn instr_8A_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_reg8_r(ctx, r2, r1);
}
pub fn instr16_8B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// Pseudo: reg16[r] = safe_read16(modrm_resolve(modrm_byte));
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::gen_set_reg16(ctx, r);
}
pub fn instr16_8B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_reg16_r(ctx, r2, r1);
}
pub fn instr32_8B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// Pseudo: reg32[r] = safe_read32s(modrm_resolve(modrm_byte));
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
codegen::gen_set_reg32(ctx, r);
}
pub fn instr32_8B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_reg32_r(ctx, r2, r1);
}
pub fn instr16_8C_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
if r >= 6 {
codegen::gen_trigger_ud(ctx);
}
else {
codegen::gen_get_sreg(ctx, r);
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(ctx, &address_local, &value_local);
ctx.builder.free_local(value_local);
}
ctx.builder.free_local(address_local);
}
pub fn instr32_8C_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
if r >= 6 {
codegen::gen_trigger_ud(ctx);
}
else {
codegen::gen_get_sreg(ctx, r);
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(ctx, &address_local, &value_local);
ctx.builder.free_local(value_local);
}
ctx.builder.free_local(address_local);
}
pub fn instr16_8C_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
if r2 >= 6 {
codegen::gen_trigger_ud(ctx);
}
else {
codegen::gen_get_sreg(ctx, r2);
codegen::gen_set_reg16(ctx, r1);
}
}
pub fn instr32_8C_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
if r2 >= 6 {
codegen::gen_trigger_ud(ctx);
}
else {
codegen::gen_get_sreg(ctx, r2);
codegen::gen_set_reg32(ctx, r1);
}
}
pub fn instr16_8D_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, reg: u32) {
ctx.cpu.prefixes |= SEG_PREFIX_ZERO;
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_set_reg16(ctx, reg);
}
pub fn instr32_8D_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, reg: u32) {
ctx.cpu.prefixes |= SEG_PREFIX_ZERO;
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_set_reg32(ctx, reg);
}
pub fn instr16_8D_reg_jit(ctx: &mut JitContext, _r1: u32, _r2: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr32_8D_reg_jit(ctx: &mut JitContext, _r1: u32, _r2: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr16_8F_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
// before gen_modrm_resolve, update esp to the new value
codegen::gen_adjust_stack_reg(ctx, 2);
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
// pop takes care of updating esp, so undo the previous change
codegen::gen_adjust_stack_reg(ctx, (-2i32) as u32);
codegen::gen_pop16(ctx);
let value_local = ctx.builder.set_new_local();
// undo the esp change of pop, as safe_write16 can fail
codegen::gen_adjust_stack_reg(ctx, (-2i32) as u32);
codegen::gen_safe_write16(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
// finally, actually update esp
codegen::gen_adjust_stack_reg(ctx, 2);
}
pub fn instr16_8F_0_reg_jit(ctx: &mut JitContext, r: u32) { pop16_reg_jit(ctx, r); }
pub fn instr32_8F_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_adjust_stack_reg(ctx, 4);
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_adjust_stack_reg(ctx, (-4i32) as u32);
codegen::gen_pop32s(ctx);
let value_local = ctx.builder.set_new_local();
codegen::gen_adjust_stack_reg(ctx, (-4i32) as u32);
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
codegen::gen_adjust_stack_reg(ctx, 4);
}
pub fn instr32_8F_0_reg_jit(ctx: &mut JitContext, r: u32) { pop32_reg_jit(ctx, r); }
define_instruction_read_write_mem16!(
"rol16",
instr16_C1_0_mem_jit,
instr16_C1_0_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_rol32,
instr32_C1_0_mem_jit,
instr32_C1_0_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem16!(
"ror16",
instr16_C1_1_mem_jit,
instr16_C1_1_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_ror32,
instr32_C1_1_mem_jit,
instr32_C1_1_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem16!(
"rcl16",
instr16_C1_2_mem_jit,
instr16_C1_2_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_rcl32,
instr32_C1_2_mem_jit,
instr32_C1_2_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem16!(
"rcr16",
instr16_C1_3_mem_jit,
instr16_C1_3_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_rcr32,
instr32_C1_3_mem_jit,
instr32_C1_3_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem16!(
"shl16",
instr16_C1_4_mem_jit,
instr16_C1_4_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_shl32,
instr32_C1_4_mem_jit,
instr32_C1_4_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem16!(
"shr16",
instr16_C1_5_mem_jit,
instr16_C1_5_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_shr32,
instr32_C1_5_mem_jit,
instr32_C1_5_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem16!(
"shl16",
instr16_C1_6_mem_jit,
instr16_C1_6_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_shl32,
instr32_C1_6_mem_jit,
instr32_C1_6_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem16!(
"sar16",
instr16_C1_7_mem_jit,
instr16_C1_7_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_sar32,
instr32_C1_7_mem_jit,
instr32_C1_7_reg_jit,
imm8_5bits
);
pub fn instr16_E8_jit(ctx: &mut JitContext, imm: u32) {
codegen::gen_get_real_eip(ctx);
let value_local = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &value_local);
ctx.builder.free_local(value_local);
codegen::gen_jmp_rel16(ctx.builder, imm as u16);
}
pub fn instr32_E8_jit(ctx: &mut JitContext, imm: u32) {
codegen::gen_get_real_eip(ctx);
let value_local = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &value_local);
ctx.builder.free_local(value_local);
codegen::gen_relative_jump(ctx.builder, imm as i32);
}
pub fn instr16_E9_jit(ctx: &mut JitContext, imm: u32) {
codegen::gen_jmp_rel16(ctx.builder, imm as u16);
}
pub fn instr32_E9_jit(ctx: &mut JitContext, imm: u32) {
codegen::gen_relative_jump(ctx.builder, imm as i32);
}
pub fn instr16_C2_jit(ctx: &mut JitContext, imm16: u32) {
codegen::gen_pop16(ctx);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_adjust_stack_reg(ctx, imm16);
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr32_C2_jit(ctx: &mut JitContext, imm16: u32) {
codegen::gen_pop32s(ctx);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_adjust_stack_reg(ctx, imm16);
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr16_C3_jit(ctx: &mut JitContext) {
codegen::gen_pop16(ctx);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr32_C3_jit(ctx: &mut JitContext) {
codegen::gen_pop32s(ctx);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr16_C9_jit(ctx: &mut JitContext) { codegen::gen_leave(ctx, false); }
pub fn instr32_C9_jit(ctx: &mut JitContext) { codegen::gen_leave(ctx, true); }
pub fn gen_mov_reg8_imm(ctx: &mut JitContext, r: u32, imm: u32) {
ctx.builder.const_i32(imm as i32);
codegen::gen_set_reg8(ctx, r);
}
pub fn instr_B0_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 0, imm) }
pub fn instr_B1_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 1, imm) }
pub fn instr_B2_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 2, imm) }
pub fn instr_B3_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 3, imm) }
pub fn instr_B4_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 4, imm) }
pub fn instr_B5_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 5, imm) }
pub fn instr_B6_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 6, imm) }
pub fn instr_B7_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 7, imm) }
pub fn gen_mov_reg16_imm(ctx: &mut JitContext, r: u32, imm: u32) {
ctx.builder.const_i32(imm as i32);
codegen::gen_set_reg16(ctx, r);
}
pub fn instr16_B8_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 0, imm) }
pub fn instr16_B9_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 1, imm) }
pub fn instr16_BA_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 2, imm) }
pub fn instr16_BB_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 3, imm) }
pub fn instr16_BC_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 4, imm) }
pub fn instr16_BD_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 5, imm) }
pub fn instr16_BE_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 6, imm) }
pub fn instr16_BF_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 7, imm) }
pub fn gen_mov_reg32_imm(ctx: &mut JitContext, r: u32, imm: u32) {
ctx.builder.const_i32(imm as i32);
codegen::gen_set_reg32(ctx, r);
}
pub fn instr32_B8_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 0, imm) }
pub fn instr32_B9_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 1, imm) }
pub fn instr32_BA_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 2, imm) }
pub fn instr32_BB_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 3, imm) }
pub fn instr32_BC_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 4, imm) }
pub fn instr32_BD_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 5, imm) }
pub fn instr32_BE_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 6, imm) }
pub fn instr32_BF_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 7, imm) }
define_instruction_read_write_mem8!("rol8", instr_C0_0_mem_jit, instr_C0_0_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("ror8", instr_C0_1_mem_jit, instr_C0_1_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("rcl8", instr_C0_2_mem_jit, instr_C0_2_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("rcr8", instr_C0_3_mem_jit, instr_C0_3_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("shl8", instr_C0_4_mem_jit, instr_C0_4_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("shr8", instr_C0_5_mem_jit, instr_C0_5_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("shl8", instr_C0_6_mem_jit, instr_C0_6_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("sar8", instr_C0_7_mem_jit, instr_C0_7_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("rol8", instr_D0_0_mem_jit, instr_D0_0_reg_jit, constant_one);
define_instruction_read_write_mem8!("ror8", instr_D0_1_mem_jit, instr_D0_1_reg_jit, constant_one);
define_instruction_read_write_mem8!("rcl8", instr_D0_2_mem_jit, instr_D0_2_reg_jit, constant_one);
define_instruction_read_write_mem8!("rcr8", instr_D0_3_mem_jit, instr_D0_3_reg_jit, constant_one);
define_instruction_read_write_mem8!("shl8", instr_D0_4_mem_jit, instr_D0_4_reg_jit, constant_one);
define_instruction_read_write_mem8!("shr8", instr_D0_5_mem_jit, instr_D0_5_reg_jit, constant_one);
define_instruction_read_write_mem8!("shl8", instr_D0_6_mem_jit, instr_D0_6_reg_jit, constant_one);
define_instruction_read_write_mem8!("sar8", instr_D0_7_mem_jit, instr_D0_7_reg_jit, constant_one);
define_instruction_read_write_mem16!(
"rol16",
instr16_D1_0_mem_jit,
instr16_D1_0_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_rol32,
instr32_D1_0_mem_jit,
instr32_D1_0_reg_jit,
constant_one
);
define_instruction_read_write_mem16!(
"ror16",
instr16_D1_1_mem_jit,
instr16_D1_1_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_ror32,
instr32_D1_1_mem_jit,
instr32_D1_1_reg_jit,
constant_one
);
define_instruction_read_write_mem16!(
"rcl16",
instr16_D1_2_mem_jit,
instr16_D1_2_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_rcl32,
instr32_D1_2_mem_jit,
instr32_D1_2_reg_jit,
constant_one
);
define_instruction_read_write_mem16!(
"rcr16",
instr16_D1_3_mem_jit,
instr16_D1_3_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_rcr32,
instr32_D1_3_mem_jit,
instr32_D1_3_reg_jit,
constant_one
);
define_instruction_read_write_mem16!(
"shl16",
instr16_D1_4_mem_jit,
instr16_D1_4_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_shl32,
instr32_D1_4_mem_jit,
instr32_D1_4_reg_jit,
constant_one
);
define_instruction_read_write_mem16!(
"shr16",
instr16_D1_5_mem_jit,
instr16_D1_5_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_shr32,
instr32_D1_5_mem_jit,
instr32_D1_5_reg_jit,
constant_one
);
define_instruction_read_write_mem16!(
"shl16",
instr16_D1_6_mem_jit,
instr16_D1_6_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_shl32,
instr32_D1_6_mem_jit,
instr32_D1_6_reg_jit,
constant_one
);
define_instruction_read_write_mem16!(
"sar16",
instr16_D1_7_mem_jit,
instr16_D1_7_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_sar32,
instr32_D1_7_mem_jit,
instr32_D1_7_reg_jit,
constant_one
);
define_instruction_read_write_mem8!("rol8", instr_D2_0_mem_jit, instr_D2_0_reg_jit, cl);
define_instruction_read_write_mem8!("ror8", instr_D2_1_mem_jit, instr_D2_1_reg_jit, cl);
define_instruction_read_write_mem8!("rcl8", instr_D2_2_mem_jit, instr_D2_2_reg_jit, cl);
define_instruction_read_write_mem8!("rcr8", instr_D2_3_mem_jit, instr_D2_3_reg_jit, cl);
define_instruction_read_write_mem8!("shl8", instr_D2_4_mem_jit, instr_D2_4_reg_jit, cl);
define_instruction_read_write_mem8!("shr8", instr_D2_5_mem_jit, instr_D2_5_reg_jit, cl);
define_instruction_read_write_mem8!("shl8", instr_D2_6_mem_jit, instr_D2_6_reg_jit, cl);
define_instruction_read_write_mem8!("sar8", instr_D2_7_mem_jit, instr_D2_7_reg_jit, cl);
define_instruction_read_write_mem16!("rol16", instr16_D3_0_mem_jit, instr16_D3_0_reg_jit, cl);
define_instruction_read_write_mem32!(gen_rol32, instr32_D3_0_mem_jit, instr32_D3_0_reg_jit, cl);
define_instruction_read_write_mem16!("ror16", instr16_D3_1_mem_jit, instr16_D3_1_reg_jit, cl);
define_instruction_read_write_mem32!(gen_ror32, instr32_D3_1_mem_jit, instr32_D3_1_reg_jit, cl);
define_instruction_read_write_mem16!("rcl16", instr16_D3_2_mem_jit, instr16_D3_2_reg_jit, cl);
define_instruction_read_write_mem32!(gen_rcl32, instr32_D3_2_mem_jit, instr32_D3_2_reg_jit, cl);
define_instruction_read_write_mem16!("rcr16", instr16_D3_3_mem_jit, instr16_D3_3_reg_jit, cl);
define_instruction_read_write_mem32!(gen_rcr32, instr32_D3_3_mem_jit, instr32_D3_3_reg_jit, cl);
define_instruction_read_write_mem16!("shl16", instr16_D3_4_mem_jit, instr16_D3_4_reg_jit, cl);
define_instruction_read_write_mem32!(gen_shl32, instr32_D3_4_mem_jit, instr32_D3_4_reg_jit, cl);
define_instruction_read_write_mem16!("shr16", instr16_D3_5_mem_jit, instr16_D3_5_reg_jit, cl);
define_instruction_read_write_mem32!(gen_shr32, instr32_D3_5_mem_jit, instr32_D3_5_reg_jit, cl);
define_instruction_read_write_mem16!("shl16", instr16_D3_6_mem_jit, instr16_D3_6_reg_jit, cl);
define_instruction_read_write_mem32!(gen_shl32, instr32_D3_6_mem_jit, instr32_D3_6_reg_jit, cl);
define_instruction_read_write_mem16!("sar16", instr16_D3_7_mem_jit, instr16_D3_7_reg_jit, cl);
define_instruction_read_write_mem32!(gen_sar32, instr32_D3_7_mem_jit, instr32_D3_7_reg_jit, cl);
pub fn instr_D7_jit(ctx: &mut JitContext) {
if ctx.cpu.asize_32() {
codegen::gen_get_reg32(ctx, regs::EBX);
}
else {
codegen::gen_get_reg16(ctx, regs::BX);
}
codegen::gen_get_reg8(ctx, regs::AL);
ctx.builder.add_i32();
if !ctx.cpu.asize_32() {
ctx.builder.const_i32(0xFFFF);
ctx.builder.and_i32();
}
jit_add_seg_offset(ctx, regs::DS);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read8(ctx, &address_local);
ctx.builder.free_local(address_local);
codegen::gen_set_reg8(ctx, regs::AL);
}
fn instr_group_D8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, op: &str) {
ctx.builder.const_i32(0);
codegen::gen_fpu_load_m32(ctx, modrm_byte);
ctx.builder.call_fn3_i32_i64_i32(op)
}
fn instr_group_D8_reg_jit(ctx: &mut JitContext, r: u32, op: &str) {
ctx.builder.const_i32(0);
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn3_i32_i64_i32(op)
}
pub fn instr_D8_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_D8_mem_jit(ctx, modrm_byte, "fpu_fadd")
}
pub fn instr_D8_0_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_D8_reg_jit(ctx, r, "fpu_fadd")
}
pub fn instr_D8_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_D8_mem_jit(ctx, modrm_byte, "fpu_fmul")
}
pub fn instr_D8_1_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_D8_reg_jit(ctx, r, "fpu_fmul")
}
pub fn instr_D8_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_m32(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_fcom")
}
pub fn instr_D8_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn2_i64_i32("fpu_fcom")
}
pub fn instr_D8_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_m32(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_fcomp")
}
pub fn instr_D8_3_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn2_i64_i32("fpu_fcomp")
}
pub fn instr_D8_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_D8_mem_jit(ctx, modrm_byte, "fpu_fsub")
}
pub fn instr_D8_4_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_D8_reg_jit(ctx, r, "fpu_fsub")
}
pub fn instr_D8_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_D8_mem_jit(ctx, modrm_byte, "fpu_fsubr")
}
pub fn instr_D8_5_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_D8_reg_jit(ctx, r, "fpu_fsubr")
}
pub fn instr_D8_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_D8_mem_jit(ctx, modrm_byte, "fpu_fdiv")
}
pub fn instr_D8_6_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_D8_reg_jit(ctx, r, "fpu_fdiv")
}
pub fn instr_D8_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_D8_mem_jit(ctx, modrm_byte, "fpu_fdivr")
}
pub fn instr_D8_7_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_D8_reg_jit(ctx, r, "fpu_fdivr")
}
pub fn instr16_D9_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_m32(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_push");
}
pub fn instr16_D9_0_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn2_i64_i32("fpu_push");
}
pub fn instr32_D9_0_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_0_reg_jit(ctx, r) }
pub fn instr32_D9_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_0_mem_jit(ctx, modrm_byte)
}
pub fn instr16_D9_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_trigger_ud(ctx);
}
pub fn instr16_D9_1_reg_jit(ctx: &mut JitContext, r: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1("fpu_fxch");
}
pub fn instr32_D9_1_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_1_reg_jit(ctx, r) }
pub fn instr32_D9_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_1_mem_jit(ctx, modrm_byte)
}
pub fn instr16_D9_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret("f80_to_f32");
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr16_D9_2_reg_jit(ctx: &mut JitContext, r: u32) {
if r != 0 {
codegen::gen_trigger_ud(ctx);
}
}
pub fn instr32_D9_2_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_2_reg_jit(ctx, r) }
pub fn instr32_D9_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_2_mem_jit(ctx, modrm_byte)
}
pub fn instr16_D9_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret("f80_to_f32");
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
codegen::gen_fn0_const(ctx.builder, "fpu_pop");
}
pub fn instr16_D9_3_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "fpu_fstp", r);
}
pub fn instr32_D9_3_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_3_reg_jit(ctx, r) }
pub fn instr32_D9_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_3_mem_jit(ctx, modrm_byte)
}
pub fn instr16_D9_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_set_previous_eip_offset_from_eip_with_low_bits(
ctx.builder,
ctx.start_of_current_instruction as i32 & 0xFFF,
);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("fpu_fldenv32");
codegen::gen_move_registers_from_memory_to_locals(ctx);
codegen::gen_get_page_fault(ctx.builder);
ctx.builder.if_void();
codegen::gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.return_();
ctx.builder.block_end();
}
pub fn instr16_D9_4_reg_jit(ctx: &mut JitContext, r: u32) {
match r {
0 | 1 | 4 | 5 => {
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1("instr16_D9_4_reg");
},
_ => codegen::gen_trigger_ud(ctx),
}
}
pub fn instr32_D9_4_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_4_reg_jit(ctx, r) }
pub fn instr32_D9_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_4_mem_jit(ctx, modrm_byte)
}
pub fn instr16_D9_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
ctx.builder.call_fn1("set_control_word");
}
pub fn instr16_D9_5_reg_jit(ctx: &mut JitContext, r: u32) {
if r == 7 {
codegen::gen_trigger_ud(ctx);
}
else {
codegen::gen_fn1_const(ctx.builder, "instr16_D9_5_reg", r);
}
}
pub fn instr32_D9_5_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_5_reg_jit(ctx, r) }
pub fn instr32_D9_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_5_mem_jit(ctx, modrm_byte)
}
pub fn instr16_D9_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_set_previous_eip_offset_from_eip_with_low_bits(
ctx.builder,
ctx.start_of_current_instruction as i32 & 0xFFF,
);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("fpu_fstenv32");
codegen::gen_move_registers_from_memory_to_locals(ctx);
codegen::gen_get_page_fault(ctx.builder);
ctx.builder.if_void();
codegen::gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.return_();
ctx.builder.block_end();
}
pub fn instr16_D9_6_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "instr16_D9_6_reg", r);
}
pub fn instr32_D9_6_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_6_reg_jit(ctx, r) }
pub fn instr32_D9_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_6_mem_jit(ctx, modrm_byte)
}
pub fn instr16_D9_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder
.const_i32(global_pointers::fpu_control_word as i32);
ctx.builder.load_aligned_u16(0);
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr16_D9_7_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "instr16_D9_7_reg", r);
}
pub fn instr32_D9_7_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_7_reg_jit(ctx, r) }
pub fn instr32_D9_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_7_mem_jit(ctx, modrm_byte)
}
pub fn instr_DA_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
ctx.builder.const_i32(0);
codegen::gen_fpu_load_i32(ctx, modrm_byte);
ctx.builder.call_fn3_i32_i64_i32("fpu_fsubr")
}
pub fn instr_DA_5_reg_jit(ctx: &mut JitContext, r: u32) {
if r == 1 {
codegen::gen_fn0_const(ctx.builder, "fpu_fucompp");
}
else {
codegen::gen_trigger_ud(ctx);
};
}
pub fn instr_DB_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_i32(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_push");
}
pub fn instr_DB_0_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "instr_DB_0_reg", r);
}
pub fn instr_DB_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret("fpu_convert_to_i32");
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr_DB_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "instr_DB_2_reg", r);
}
pub fn instr_DB_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret("fpu_convert_to_i32");
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
codegen::gen_fn0_const(ctx.builder, "fpu_pop");
}
pub fn instr_DB_3_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "instr_DB_3_reg", r);
}
pub fn instr_DB_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_set_previous_eip_offset_from_eip_with_low_bits(
ctx.builder,
ctx.start_of_current_instruction as i32 & 0xFFF,
);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("fpu_fldm80");
codegen::gen_move_registers_from_memory_to_locals(ctx);
codegen::gen_get_page_fault(ctx.builder);
ctx.builder.if_void();
codegen::gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.return_();
ctx.builder.block_end();
}
pub fn instr_DB_5_reg_jit(ctx: &mut JitContext, r: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1("fpu_fucomi");
}
pub fn instr_DB_6_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_DB_6_reg_jit(ctx: &mut JitContext, r: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1("fpu_fcomi");
}
fn instr_group_DC_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, op: &str) {
ctx.builder.const_i32(0);
codegen::gen_fpu_load_m64(ctx, modrm_byte);
ctx.builder.call_fn3_i32_i64_i32(op)
}
fn instr_group_DC_reg_jit(ctx: &mut JitContext, r: u32, op: &str) {
ctx.builder.const_i32(r as i32);
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn3_i32_i64_i32(op)
}
pub fn instr_DC_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DC_mem_jit(ctx, modrm_byte, "fpu_fadd")
}
pub fn instr_DC_0_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DC_reg_jit(ctx, r, "fpu_fadd")
}
pub fn instr_DC_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DC_mem_jit(ctx, modrm_byte, "fpu_fmul")
}
pub fn instr_DC_1_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DC_reg_jit(ctx, r, "fpu_fmul")
}
pub fn instr_DC_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_m64(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_fcom")
}
pub fn instr_DC_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn2_i64_i32("fpu_fcom")
}
pub fn instr_DC_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_m64(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_fcomp")
}
pub fn instr_DC_3_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn2_i64_i32("fpu_fcomp")
}
pub fn instr_DC_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DC_mem_jit(ctx, modrm_byte, "fpu_fsub")
}
pub fn instr_DC_4_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DC_reg_jit(ctx, r, "fpu_fsub")
}
pub fn instr_DC_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DC_mem_jit(ctx, modrm_byte, "fpu_fsubr")
}
pub fn instr_DC_5_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DC_reg_jit(ctx, r, "fpu_fsubr")
}
pub fn instr_DC_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DC_mem_jit(ctx, modrm_byte, "fpu_fdiv")
}
pub fn instr_DC_6_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DC_reg_jit(ctx, r, "fpu_fdiv")
}
pub fn instr_DC_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DC_mem_jit(ctx, modrm_byte, "fpu_fdivr")
}
pub fn instr_DC_7_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DC_reg_jit(ctx, r, "fpu_fdivr")
}
pub fn instr16_DD_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_m64(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_push");
}
pub fn instr16_DD_0_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "fpu_ffree", r);
}
pub fn instr32_DD_0_reg_jit(ctx: &mut JitContext, r: u32) { instr16_DD_0_reg_jit(ctx, r) }
pub fn instr32_DD_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_DD_0_mem_jit(ctx, modrm_byte)
}
pub fn instr16_DD_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret_i64("f80_to_f64");
let value_local = ctx.builder.set_new_local_i64();
codegen::gen_safe_write64(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local_i64(value_local);
}
pub fn instr16_DD_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "fpu_fst", r);
}
pub fn instr32_DD_2_reg_jit(ctx: &mut JitContext, r: u32) { instr16_DD_2_reg_jit(ctx, r) }
pub fn instr32_DD_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_DD_2_mem_jit(ctx, modrm_byte)
}
pub fn instr16_DD_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret_i64("f80_to_f64");
let value_local = ctx.builder.set_new_local_i64();
codegen::gen_safe_write64(ctx, &address_local, &value_local);
codegen::gen_fn0_const(ctx.builder, "fpu_pop");
ctx.builder.free_local(address_local);
ctx.builder.free_local_i64(value_local);
}
pub fn instr16_DD_3_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "fpu_fstp", r);
}
pub fn instr32_DD_3_reg_jit(ctx: &mut JitContext, r: u32) { instr16_DD_3_reg_jit(ctx, r) }
pub fn instr32_DD_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_DD_3_mem_jit(ctx, modrm_byte)
}
pub fn instr16_DD_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_trigger_ud(ctx);
}
pub fn instr16_DD_5_reg_jit(ctx: &mut JitContext, r: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1("fpu_fucomp");
}
pub fn instr32_DD_5_reg_jit(ctx: &mut JitContext, r: u32) { instr16_DD_5_reg_jit(ctx, r) }
pub fn instr32_DD_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_DD_5_mem_jit(ctx, modrm_byte)
}
fn instr_group_DE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, op: &str) {
ctx.builder.const_i32(0);
codegen::gen_fpu_load_i16(ctx, modrm_byte);
ctx.builder.call_fn3_i32_i64_i32(op)
}
fn instr_group_DE_reg_jit(ctx: &mut JitContext, r: u32, op: &str) {
ctx.builder.const_i32(r as i32);
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn3_i32_i64_i32(op);
codegen::gen_fn0_const(ctx.builder, "fpu_pop")
}
pub fn instr_DE_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DE_mem_jit(ctx, modrm_byte, "fpu_fadd")
}
pub fn instr_DE_0_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DE_reg_jit(ctx, r, "fpu_fadd")
}
pub fn instr_DE_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DE_mem_jit(ctx, modrm_byte, "fpu_fmul")
}
pub fn instr_DE_1_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DE_reg_jit(ctx, r, "fpu_fmul")
}
pub fn instr_DE_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_i16(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_fcom")
}
pub fn instr_DE_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn2_i64_i32("fpu_fcom");
codegen::gen_fn0_const(ctx.builder, "fpu_pop")
}
pub fn instr_DE_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_i16(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_fcomp")
}
pub fn instr_DE_3_reg_jit(ctx: &mut JitContext, r: u32) {
if r == 1 {
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn2_i64_i32("fpu_fcomp");
codegen::gen_fn0_const(ctx.builder, "fpu_pop")
}
else {
codegen::gen_trigger_ud(ctx);
}
}
pub fn instr_DE_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DE_mem_jit(ctx, modrm_byte, "fpu_fsub")
}
pub fn instr_DE_4_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DE_reg_jit(ctx, r, "fpu_fsub")
}
pub fn instr_DE_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DE_mem_jit(ctx, modrm_byte, "fpu_fsubr")
}
pub fn instr_DE_5_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DE_reg_jit(ctx, r, "fpu_fsubr")
}
pub fn instr_DE_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DE_mem_jit(ctx, modrm_byte, "fpu_fdiv")
}
pub fn instr_DE_6_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DE_reg_jit(ctx, r, "fpu_fdiv")
}
pub fn instr_DE_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DE_mem_jit(ctx, modrm_byte, "fpu_fdivr")
}
pub fn instr_DE_7_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DE_reg_jit(ctx, r, "fpu_fdivr")
}
pub fn instr_DF_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret("fpu_convert_to_i16");
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr_DF_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "fpu_fstp", r);
}
pub fn instr_DF_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret("fpu_convert_to_i16");
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
codegen::gen_fn0_const(ctx.builder, "fpu_pop");
}
pub fn instr_DF_3_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "fpu_fstp", r);
}
pub fn instr_DF_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
dbg_log!("fbld");
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_trigger_ud(ctx);
}
pub fn instr_DF_4_reg_jit(ctx: &mut JitContext, r: u32) {
if r == 0 {
ctx.builder.call_fn0_ret("fpu_load_status_word");
codegen::gen_set_reg16(ctx, regs::AX);
}
else {
codegen::gen_trigger_ud(ctx);
};
}
pub fn instr_DF_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_i64(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_push");
}
pub fn instr_DF_5_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "fpu_fucomip", r);
}
pub fn instr_DF_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
dbg_log!("fbstp");
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_trigger_ud(ctx);
}
pub fn instr_DF_6_reg_jit(ctx: &mut JitContext, r: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1("fpu_fcomip");
}
pub fn instr_DF_7_reg_jit(ctx: &mut JitContext, _r: u32) { codegen::gen_trigger_ud(ctx); }
pub fn instr_DF_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret_i64("fpu_convert_to_i64");
let value_local = ctx.builder.set_new_local_i64();
codegen::gen_safe_write64(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local_i64(value_local);
codegen::gen_fn0_const(ctx.builder, "fpu_pop");
}
pub fn instr16_EB_jit(ctx: &mut JitContext, imm8: u32) {
codegen::gen_jmp_rel16(ctx.builder, imm8 as u16);
// dbg_assert(is_asize_32() || get_real_eip() < 0x10000);
}
pub fn instr32_EB_jit(ctx: &mut JitContext, imm8: u32) {
// jmp near
codegen::gen_relative_jump(ctx.builder, imm8 as i32);
// dbg_assert(is_asize_32() || get_real_eip() < 0x10000);
}
define_instruction_read8!(gen_test8, instr_F6_0_mem_jit, instr_F6_0_reg_jit, imm8);
define_instruction_read16!(
gen_test16,
instr16_F7_0_mem_jit,
instr16_F7_0_reg_jit,
imm16
);
define_instruction_read32!(
gen_test32,
instr32_F7_0_mem_jit,
instr32_F7_0_reg_jit,
imm32
);
pub fn instr_F6_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
instr_F6_0_mem_jit(ctx, modrm_byte, imm)
}
pub fn instr_F6_1_reg_jit(ctx: &mut JitContext, r: u32, imm: u32) {
instr_F6_0_reg_jit(ctx, r, imm)
}
pub fn instr16_F7_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
instr16_F7_0_mem_jit(ctx, modrm_byte, imm)
}
pub fn instr16_F7_1_reg_jit(ctx: &mut JitContext, r: u32, imm: u32) {
instr16_F7_0_reg_jit(ctx, r, imm)
}
pub fn instr32_F7_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
instr32_F7_0_mem_jit(ctx, modrm_byte, imm)
}
pub fn instr32_F7_1_reg_jit(ctx: &mut JitContext, r: u32, imm: u32) {
instr32_F7_0_reg_jit(ctx, r, imm)
}
define_instruction_read_write_mem16!(gen_not16, instr16_F7_2_mem_jit, instr16_F7_2_reg_jit, none);
define_instruction_read_write_mem32!(gen_not32, instr32_F7_2_mem_jit, instr32_F7_2_reg_jit, none);
define_instruction_read_write_mem16!(gen_neg16, instr16_F7_3_mem_jit, instr16_F7_3_reg_jit, none);
define_instruction_read_write_mem32!(gen_neg32, instr32_F7_3_mem_jit, instr32_F7_3_reg_jit, none);
pub fn instr16_F7_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("mul16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
}
pub fn instr16_F7_4_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("mul16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
}
pub fn instr32_F7_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
gen_mul32(ctx);
}
pub fn instr32_F7_4_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg32(ctx, r);
gen_mul32(ctx);
}
pub fn instr16_F7_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("imul16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
}
pub fn instr16_F7_5_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("imul16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
}
pub fn instr32_F7_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("imul32");
codegen::gen_move_registers_from_memory_to_locals(ctx);
}
pub fn instr32_F7_5_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg32(ctx, r);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("imul32");
codegen::gen_move_registers_from_memory_to_locals(ctx);
}
pub fn instr16_F7_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("div16_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
pub fn instr16_F7_6_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("div16_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
pub fn instr32_F7_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
if false {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("div32_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
else {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let source_operand = ctx.builder.set_new_local();
gen_div32(ctx, &source_operand);
ctx.builder.free_local(source_operand);
}
}
pub fn instr32_F7_6_reg_jit(ctx: &mut JitContext, r: u32) {
if false {
codegen::gen_get_reg32(ctx, r);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("div32_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
else {
gen_div32(ctx, &ctx.register_locals[r as usize].unsafe_clone());
}
}
pub fn instr16_F7_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("idiv16_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
pub fn instr16_F7_7_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("idiv16_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
pub fn instr32_F7_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("idiv32_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
pub fn instr32_F7_7_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg32(ctx, r);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("idiv32_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
pub fn instr_F8_jit(ctx: &mut JitContext) {
codegen::gen_clear_flags_changed_bits(ctx.builder, 1);
codegen::gen_clear_flags_bits(ctx.builder, 1);
}
pub fn instr_F9_jit(ctx: &mut JitContext) {
codegen::gen_clear_flags_changed_bits(ctx.builder, 1);
codegen::gen_set_flags_bits(ctx.builder, 1);
}
pub fn instr_FA_jit(ctx: &mut JitContext) {
ctx.builder.call_fn0_ret("instr_FA_without_fault");
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_gp(ctx, 0);
ctx.builder.block_end();
}
pub fn instr_FB_jit(ctx: &mut JitContext) {
ctx.builder.call_fn0_ret("instr_FB_without_fault");
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_gp(ctx, 0);
ctx.builder.block_end();
// handle_irqs is specially handled in jit to be called one instruction after this one
}
pub fn instr_FC_jit(ctx: &mut JitContext) {
ctx.builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(ctx.builder);
ctx.builder.const_i32(!FLAG_DIRECTION);
ctx.builder.and_i32();
ctx.builder.store_aligned_i32(0);
}
pub fn instr_FD_jit(ctx: &mut JitContext) {
ctx.builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(ctx.builder);
ctx.builder.const_i32(FLAG_DIRECTION);
ctx.builder.or_i32();
ctx.builder.store_aligned_i32(0);
}
define_instruction_read_write_mem8!("inc8", instr_FE_0_mem_jit, instr_FE_0_reg_jit, none);
define_instruction_read_write_mem8!("dec8", instr_FE_1_mem_jit, instr_FE_1_reg_jit, none);
define_instruction_read_write_mem16!(gen_inc16, instr16_FF_0_mem_jit, instr16_FF_0_reg_jit, none);
define_instruction_read_write_mem32!(gen_inc32, instr32_FF_0_mem_jit, instr32_FF_0_reg_jit, none);
define_instruction_read_write_mem16!(gen_dec16, instr16_FF_1_mem_jit, instr16_FF_1_reg_jit, none);
define_instruction_read_write_mem32!(gen_dec32, instr32_FF_1_mem_jit, instr32_FF_1_reg_jit, none);
pub fn instr16_FF_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_get_real_eip(ctx);
let value_local = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &value_local);
ctx.builder.free_local(value_local);
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr16_FF_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_get_real_eip(ctx);
let value_local = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &value_local);
ctx.builder.free_local(value_local);
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr32_FF_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_get_real_eip(ctx);
let value_local = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &value_local);
ctx.builder.free_local(value_local);
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr32_FF_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg32(ctx, r);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_get_real_eip(ctx);
let value_local = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &value_local);
ctx.builder.free_local(value_local);
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr16_FF_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr16_FF_4_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr32_FF_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr32_FF_4_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg32(ctx, r);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr16_FF_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
push16_mem_jit(ctx, modrm_byte)
}
pub fn instr16_FF_6_reg_jit(ctx: &mut JitContext, r: u32) { push16_reg_jit(ctx, r) }
pub fn instr32_FF_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
push32_mem_jit(ctx, modrm_byte)
}
pub fn instr32_FF_6_reg_jit(ctx: &mut JitContext, r: u32) { push32_reg_jit(ctx, r) }
// Code for conditional jumps is generated automatically by the basic block codegen
pub fn instr16_0F80_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F81_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F82_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F83_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F84_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F85_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F86_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F87_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F88_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F89_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F8A_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F8B_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F8C_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F8D_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F8E_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F8F_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F80_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F81_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F82_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F83_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F84_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F85_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F86_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F87_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F88_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F89_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F8A_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F8B_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F8C_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F8D_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F8E_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F8F_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr_90_jit(_ctx: &mut JitContext) {}
fn gen_xchg_reg16(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
let tmp = ctx.builder.set_new_local();
codegen::gen_get_reg16(ctx, regs::AX);
codegen::gen_set_reg16(ctx, r);
ctx.builder.get_local(&tmp);
codegen::gen_set_reg16(ctx, regs::AX);
ctx.builder.free_local(tmp);
}
fn gen_xchg_reg32(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg32(ctx, r);
let tmp = ctx.builder.set_new_local();
codegen::gen_get_reg32(ctx, regs::EAX);
codegen::gen_set_reg32(ctx, r);
ctx.builder.get_local(&tmp);
codegen::gen_set_reg32(ctx, regs::EAX);
ctx.builder.free_local(tmp);
}
pub fn instr16_91_jit(ctx: &mut JitContext) { gen_xchg_reg16(ctx, regs::CX); }
pub fn instr16_92_jit(ctx: &mut JitContext) { gen_xchg_reg16(ctx, regs::DX); }
pub fn instr16_93_jit(ctx: &mut JitContext) { gen_xchg_reg16(ctx, regs::BX); }
pub fn instr16_94_jit(ctx: &mut JitContext) { gen_xchg_reg16(ctx, regs::SP); }
pub fn instr16_95_jit(ctx: &mut JitContext) { gen_xchg_reg16(ctx, regs::BP); }
pub fn instr16_96_jit(ctx: &mut JitContext) { gen_xchg_reg16(ctx, regs::SI); }
pub fn instr16_97_jit(ctx: &mut JitContext) { gen_xchg_reg16(ctx, regs::DI); }
pub fn instr32_91_jit(ctx: &mut JitContext) { gen_xchg_reg32(ctx, regs::CX); }
pub fn instr32_92_jit(ctx: &mut JitContext) { gen_xchg_reg32(ctx, regs::DX); }
pub fn instr32_93_jit(ctx: &mut JitContext) { gen_xchg_reg32(ctx, regs::BX); }
pub fn instr32_94_jit(ctx: &mut JitContext) { gen_xchg_reg32(ctx, regs::SP); }
pub fn instr32_95_jit(ctx: &mut JitContext) { gen_xchg_reg32(ctx, regs::BP); }
pub fn instr32_96_jit(ctx: &mut JitContext) { gen_xchg_reg32(ctx, regs::SI); }
pub fn instr32_97_jit(ctx: &mut JitContext) { gen_xchg_reg32(ctx, regs::DI); }
pub fn instr16_98_jit(ctx: &mut JitContext) {
codegen::gen_get_reg8(ctx, regs::AL);
codegen::sign_extend_i8(ctx.builder);
codegen::gen_set_reg16(ctx, regs::AX);
}
pub fn instr32_98_jit(ctx: &mut JitContext) {
codegen::gen_get_reg16(ctx, regs::AX);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_set_reg32(ctx, regs::EAX);
}
pub fn instr16_99_jit(ctx: &mut JitContext) {
codegen::gen_get_reg16(ctx, regs::AX);
ctx.builder.const_i32(16);
ctx.builder.shl_i32();
ctx.builder.const_i32(31);
ctx.builder.shr_s_i32();
codegen::gen_set_reg16(ctx, regs::DX);
}
pub fn instr32_99_jit(ctx: &mut JitContext) {
codegen::gen_get_reg32(ctx, regs::EAX);
ctx.builder.const_i32(31);
ctx.builder.shr_s_i32();
codegen::gen_set_reg32(ctx, regs::EDX);
}
pub fn instr16_9C_jit(ctx: &mut JitContext) {
ctx.builder.call_fn0_ret("instr_9C_check");
ctx.builder.if_void();
codegen::gen_trigger_gp(ctx, 0);
ctx.builder.else_();
ctx.builder.call_fn0_ret("get_eflags");
let value = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &value);
ctx.builder.block_end();
ctx.builder.free_local(value);
}
pub fn instr32_9C_jit(ctx: &mut JitContext) {
ctx.builder.call_fn0_ret("instr_9C_check");
ctx.builder.if_void();
codegen::gen_trigger_gp(ctx, 0);
ctx.builder.else_();
ctx.builder.call_fn0_ret("get_eflags");
ctx.builder.const_i32(0xFCFFFF);
ctx.builder.and_i32();
let value = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &value);
ctx.builder.block_end();
ctx.builder.free_local(value);
}
fn gen_popf(ctx: &mut JitContext, is_32: bool) {
ctx.builder.call_fn0_ret("instr_9C_check");
ctx.builder.if_void();
codegen::gen_trigger_gp(ctx, 0);
ctx.builder.else_();
codegen::gen_get_flags(ctx.builder);
let old_eflags = ctx.builder.set_new_local();
if is_32 {
codegen::gen_pop32s(ctx);
}
else {
ctx.builder.get_local(&old_eflags);
ctx.builder.const_i32(!0xFFFF);
ctx.builder.and_i32();
codegen::gen_pop16(ctx);
ctx.builder.or_i32();
}
ctx.builder.call_fn1("update_eflags");
ctx.builder.get_local(&old_eflags);
ctx.builder.free_local(old_eflags);
ctx.builder.const_i32(FLAG_INTERRUPT);
ctx.builder.and_i32();
ctx.builder.eqz_i32();
codegen::gen_get_flags(ctx.builder);
ctx.builder.const_i32(FLAG_INTERRUPT);
ctx.builder.and_i32();
ctx.builder.eqz_i32();
ctx.builder.eqz_i32();
ctx.builder.and_i32();
ctx.builder.if_void();
{
codegen::gen_set_eip_to_after_current_instruction(ctx);
codegen::gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
codegen::gen_move_registers_from_locals_to_memory(ctx);
codegen::gen_fn0_const(ctx.builder, "handle_irqs");
ctx.builder.return_();
}
ctx.builder.block_end();
ctx.builder.block_end();
}
pub fn instr16_9D_jit(ctx: &mut JitContext) { gen_popf(ctx, false) }
pub fn instr32_9D_jit(ctx: &mut JitContext) { gen_popf(ctx, true) }
pub fn instr_9E_jit(ctx: &mut JitContext) {
ctx.builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(ctx.builder);
ctx.builder.const_i32(!0xFF);
ctx.builder.and_i32();
codegen::gen_get_reg8(ctx, regs::AH);
ctx.builder.or_i32();
ctx.builder.const_i32(FLAGS_MASK);
ctx.builder.and_i32();
ctx.builder.const_i32(FLAGS_DEFAULT);
ctx.builder.or_i32();
ctx.builder.store_aligned_i32(0);
codegen::gen_clear_flags_changed_bits(ctx.builder, 0xFF);
}
pub fn instr_9F_jit(ctx: &mut JitContext) {
ctx.builder.call_fn0_ret("get_eflags");
codegen::gen_set_reg8(ctx, regs::AH);
}
pub fn instr_A0_jit(ctx: &mut JitContext, immaddr: u32) {
ctx.builder.const_i32(immaddr as i32);
jit_add_seg_offset(ctx, regs::DS);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read8(ctx, &address_local);
ctx.builder.free_local(address_local);
codegen::gen_set_reg8(ctx, regs::AL);
}
pub fn instr16_A1_jit(ctx: &mut JitContext, immaddr: u32) {
ctx.builder.const_i32(immaddr as i32);
jit_add_seg_offset(ctx, regs::DS);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read16(ctx, &address_local);
ctx.builder.free_local(address_local);
codegen::gen_set_reg16(ctx, regs::AX);
}
pub fn instr32_A1_jit(ctx: &mut JitContext, immaddr: u32) {
ctx.builder.const_i32(immaddr as i32);
jit_add_seg_offset(ctx, regs::DS);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read32(ctx, &address_local);
ctx.builder.free_local(address_local);
codegen::gen_set_reg32(ctx, regs::EAX);
}
pub fn instr_A2_jit(ctx: &mut JitContext, immaddr: u32) {
ctx.builder.const_i32(immaddr as i32);
jit_add_seg_offset(ctx, regs::DS);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_write8(
ctx,
&address_local,
&ctx.register_locals[regs::EAX as usize].unsafe_clone(),
);
ctx.builder.free_local(address_local);
}
pub fn instr16_A3_jit(ctx: &mut JitContext, immaddr: u32) {
ctx.builder.const_i32(immaddr as i32);
jit_add_seg_offset(ctx, regs::DS);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(
ctx,
&address_local,
&ctx.register_locals[regs::EAX as usize].unsafe_clone(),
);
ctx.builder.free_local(address_local);
}
pub fn instr32_A3_jit(ctx: &mut JitContext, immaddr: u32) {
ctx.builder.const_i32(immaddr as i32);
jit_add_seg_offset(ctx, regs::DS);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(
ctx,
&address_local,
&ctx.register_locals[regs::EAX as usize].unsafe_clone(),
);
ctx.builder.free_local(address_local);
}
pub fn instr_A8_jit(ctx: &mut JitContext, imm8: u32) {
gen_test8(
ctx.builder,
&ctx.register_locals[0],
&LocalOrImmediate::Immediate(imm8 as i32),
);
}
pub fn instr16_A9_jit(ctx: &mut JitContext, imm16: u32) {
gen_test16(
ctx.builder,
&ctx.register_locals[0],
&LocalOrImmediate::Immediate(imm16 as i32),
);
}
pub fn instr32_A9_jit(ctx: &mut JitContext, imm32: u32) {
gen_test32(
ctx.builder,
&ctx.register_locals[0],
&LocalOrImmediate::Immediate(imm32 as i32),
);
}
#[derive(PartialEq)]
enum String {
INS,
OUTS,
MOVS,
CMPS,
STOS,
LODS,
SCAS,
}
fn gen_string_ins(ctx: &mut JitContext, ins: String, size: u8, prefix: u8) {
dbg_assert!(prefix == 0 || prefix == 0xF2 || prefix == 0xF3);
dbg_assert!(size == 8 || size == 16 || size == 32);
let mut args = 0;
args += 1;
ctx.builder.const_i32(ctx.cpu.asize_32() as i32);
if ins == String::OUTS || ins == String::CMPS || ins == String::LODS || ins == String::MOVS {
args += 1;
ctx.builder.const_i32(0);
jit_add_seg_offset(ctx, regs::DS);
}
let name = format!(
"{}{}{}",
match ins {
String::INS => "ins",
String::OUTS => "outs",
String::MOVS => "movs",
String::CMPS => "cmps",
String::STOS => "stos",
String::LODS => "lods",
String::SCAS => "scas",
},
if size == 8 {
"b"
}
else if size == 16 {
"w"
}
else {
"d"
},
if prefix == 0xF2 || prefix == 0xF3 {
match ins {
String::CMPS | String::SCAS => {
if prefix == 0xF2 {
"_repnz"
}
else {
"_repz"
}
},
_ => "_rep",
}
}
else {
"_no_rep"
}
);
codegen::gen_move_registers_from_locals_to_memory(ctx);
if args == 1 {
ctx.builder.call_fn1(&name)
}
else if args == 2 {
ctx.builder.call_fn2(&name)
}
else {
dbg_assert!(false);
}
codegen::gen_move_registers_from_memory_to_locals(ctx);
}
pub fn instr_6C_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 8, 0) }
pub fn instr_F26C_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 8, 0xF2) }
pub fn instr_F36C_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 8, 0xF3) }
pub fn instr16_6D_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 16, 0) }
pub fn instr16_F26D_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 16, 0xF2) }
pub fn instr16_F36D_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 16, 0xF3) }
pub fn instr32_6D_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 32, 0) }
pub fn instr32_F26D_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 32, 0xF2) }
pub fn instr32_F36D_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 32, 0xF3) }
pub fn instr_6E_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 8, 0) }
pub fn instr_F26E_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 8, 0xF2) }
pub fn instr_F36E_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 8, 0xF3) }
pub fn instr16_6F_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 16, 0) }
pub fn instr16_F26F_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 16, 0xF2) }
pub fn instr16_F36F_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 16, 0xF3) }
pub fn instr32_6F_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 32, 0) }
pub fn instr32_F26F_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 32, 0xF2) }
pub fn instr32_F36F_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 32, 0xF3) }
pub fn instr_A4_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 8, 0) }
pub fn instr_F2A4_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 8, 0xF2) }
pub fn instr_F3A4_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 8, 0xF3) }
pub fn instr16_A5_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 16, 0) }
pub fn instr16_F2A5_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 16, 0xF2) }
pub fn instr16_F3A5_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 16, 0xF3) }
pub fn instr32_A5_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 32, 0) }
pub fn instr32_F2A5_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 32, 0xF2) }
pub fn instr32_F3A5_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 32, 0xF3) }
pub fn instr_A6_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 8, 0) }
pub fn instr_F2A6_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 8, 0xF2) }
pub fn instr_F3A6_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 8, 0xF3) }
pub fn instr16_A7_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 16, 0) }
pub fn instr16_F2A7_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 16, 0xF2) }
pub fn instr16_F3A7_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 16, 0xF3) }
pub fn instr32_A7_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 32, 0) }
pub fn instr32_F2A7_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 32, 0xF2) }
pub fn instr32_F3A7_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 32, 0xF3) }
pub fn instr_AA_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 8, 0) }
pub fn instr_F2AA_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 8, 0xF2) }
pub fn instr_F3AA_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 8, 0xF3) }
pub fn instr16_AB_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 16, 0) }
pub fn instr16_F2AB_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 16, 0xF2) }
pub fn instr16_F3AB_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 16, 0xF3) }
pub fn instr32_AB_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 32, 0) }
pub fn instr32_F2AB_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 32, 0xF2) }
pub fn instr32_F3AB_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 32, 0xF3) }
pub fn instr_AC_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 8, 0) }
pub fn instr_F2AC_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 8, 0xF2) }
pub fn instr_F3AC_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 8, 0xF3) }
pub fn instr16_AD_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 16, 0) }
pub fn instr16_F2AD_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 16, 0xF2) }
pub fn instr16_F3AD_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 16, 0xF3) }
pub fn instr32_AD_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 32, 0) }
pub fn instr32_F2AD_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 32, 0xF2) }
pub fn instr32_F3AD_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 32, 0xF3) }
pub fn instr_AE_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 8, 0) }
pub fn instr_F2AE_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 8, 0xF2) }
pub fn instr_F3AE_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 8, 0xF3) }
pub fn instr16_AF_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 16, 0) }
pub fn instr16_F2AF_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 16, 0xF2) }
pub fn instr16_F3AF_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 16, 0xF3) }
pub fn instr32_AF_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 32, 0) }
pub fn instr32_F2AF_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 32, 0xF2) }
pub fn instr32_F3AF_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 32, 0xF3) }
pub fn instr_0F31_jit(ctx: &mut JitContext) {
ctx.builder.load_fixed_u8(global_pointers::cpl as u32);
ctx.builder.eqz_i32();
dbg_assert!(regs::CR4_TSD < 0x100);
ctx.builder
.load_fixed_u8(global_pointers::get_creg_offset(4));
ctx.builder.const_i32(regs::CR4_TSD as i32);
ctx.builder.and_i32();
ctx.builder.eqz_i32();
ctx.builder.or_i32();
ctx.builder.if_void();
ctx.builder.call_fn0_ret_i64("read_tsc");
let tsc = ctx.builder.tee_new_local_i64();
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EAX);
ctx.builder.get_local_i64(&tsc);
ctx.builder.const_i64(32);
ctx.builder.shr_u_i64();
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EDX);
ctx.builder.free_local_i64(tsc);
ctx.builder.else_();
codegen::gen_trigger_gp(ctx, 0);
ctx.builder.block_end();
}
pub fn instr_0F18_mem_jit(_ctx: &mut JitContext, _modrm_byte: ModrmByte, _reg: u32) {}
pub fn instr_0F18_reg_jit(_ctx: &mut JitContext, _r1: u32, _r2: u32) {}
pub fn instr_0F19_mem_jit(_ctx: &mut JitContext, _modrm_byte: ModrmByte, _reg: u32) {}
pub fn instr_0F19_reg_jit(_ctx: &mut JitContext, _r1: u32, _r2: u32) {}
pub fn instr_0F1C_mem_jit(_ctx: &mut JitContext, _modrm_byte: ModrmByte, _reg: u32) {}
pub fn instr_0F1C_reg_jit(_ctx: &mut JitContext, _r1: u32, _r2: u32) {}
pub fn instr_0F1D_mem_jit(_ctx: &mut JitContext, _modrm_byte: ModrmByte, _reg: u32) {}
pub fn instr_0F1D_reg_jit(_ctx: &mut JitContext, _r1: u32, _r2: u32) {}
pub fn instr_0F1E_mem_jit(_ctx: &mut JitContext, _modrm_byte: ModrmByte, _reg: u32) {}
pub fn instr_0F1E_reg_jit(_ctx: &mut JitContext, _r1: u32, _r2: u32) {}
pub fn instr_0F1F_mem_jit(_ctx: &mut JitContext, _modrm_byte: ModrmByte, _reg: u32) {}
pub fn instr_0F1F_reg_jit(_ctx: &mut JitContext, _r1: u32, _r2: u32) {}
define_instruction_read_write_mem16!(
"shld16",
instr16_0FA4_mem_jit,
instr16_0FA4_reg_jit,
reg,
imm8_5bits
);
define_instruction_read_write_mem32!(
"shld32",
instr32_0FA4_mem_jit,
instr32_0FA4_reg_jit,
reg,
imm8_5bits
);
define_instruction_read_write_mem16!(
"shld16",
instr16_0FA5_mem_jit,
instr16_0FA5_reg_jit,
reg,
cl
);
define_instruction_read_write_mem32!(
"shld32",
instr32_0FA5_mem_jit,
instr32_0FA5_reg_jit,
reg,
cl
);
define_instruction_read_write_mem16!(
"shrd16",
instr16_0FAC_mem_jit,
instr16_0FAC_reg_jit,
reg,
imm8_5bits
);
define_instruction_read_write_mem32!(
"shrd32",
instr32_0FAC_mem_jit,
instr32_0FAC_reg_jit,
reg,
imm8_5bits
);
define_instruction_read_write_mem16!(
"shrd16",
instr16_0FAD_mem_jit,
instr16_0FAD_reg_jit,
reg,
cl
);
define_instruction_read_write_mem32!(
"shrd32",
instr32_0FAD_mem_jit,
instr32_0FAD_reg_jit,
reg,
cl
);
pub fn instr16_0FB1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
ctx.builder.const_i32(r2 as i32);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn2_ret("cmpxchg16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
codegen::gen_set_reg16(ctx, r1);
}
pub fn instr16_0FB1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
ctx.builder.const_i32(r as i32);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn2_ret("cmpxchg16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
});
ctx.builder.free_local(address_local);
}
pub fn instr32_0FB1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg32(ctx, r1);
gen_cmpxchg32(ctx, r2);
codegen::gen_set_reg32(ctx, r1);
}
pub fn instr32_0FB1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
gen_cmpxchg32(ctx, r);
});
ctx.builder.free_local(address_local);
}
pub fn instr16_0FB6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg8(ctx, r1);
codegen::gen_set_reg16(ctx, r2);
}
pub fn instr16_0FB6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
codegen::gen_set_reg16(ctx, r);
}
pub fn instr32_0FB6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg8(ctx, r1);
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr32_0FB6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
codegen::gen_set_reg32(ctx, r);
}
pub fn instr16_0FB7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::gen_set_reg16(ctx, r);
}
pub fn instr16_0FB7_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
codegen::gen_set_reg16(ctx, r2);
}
pub fn instr32_0FB7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::gen_set_reg32(ctx, r);
}
pub fn instr32_0FB7_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr16_F30FB8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
ctx.builder.call_fn1_ret("popcnt");
codegen::gen_set_reg16(ctx, r);
}
pub fn instr16_F30FB8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
ctx.builder.call_fn1_ret("popcnt");
codegen::gen_set_reg16(ctx, r2);
}
pub fn instr32_F30FB8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
ctx.builder.call_fn1_ret("popcnt");
codegen::gen_set_reg32(ctx, r);
}
pub fn instr32_F30FB8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg32(ctx, r1);
ctx.builder.call_fn1_ret("popcnt");
codegen::gen_set_reg32(ctx, r2);
}
define_instruction_write_reg16!("bsf16", instr16_0FBC_mem_jit, instr16_0FBC_reg_jit);
define_instruction_write_reg32!(gen_bsf32, instr32_0FBC_mem_jit, instr32_0FBC_reg_jit);
define_instruction_write_reg16!("bsr16", instr16_0FBD_mem_jit, instr16_0FBD_reg_jit);
define_instruction_write_reg32!(gen_bsr32, instr32_0FBD_mem_jit, instr32_0FBD_reg_jit);
pub fn instr16_0FBE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg8(ctx, r1);
codegen::sign_extend_i8(ctx.builder);
codegen::gen_set_reg16(ctx, r2);
}
pub fn instr16_0FBE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
codegen::sign_extend_i8(ctx.builder);
codegen::gen_set_reg16(ctx, r);
}
pub fn instr32_0FBE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg8(ctx, r1);
codegen::sign_extend_i8(ctx.builder);
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr32_0FBE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
codegen::sign_extend_i8(ctx.builder);
codegen::gen_set_reg32(ctx, r);
}
pub fn instr16_0FBF_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_set_reg16(ctx, r2);
}
pub fn instr16_0FBF_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_set_reg16(ctx, r);
}
pub fn instr32_0FBF_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr32_0FBF_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_set_reg32(ctx, r);
}
pub fn instr16_0FC1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
ctx.builder.const_i32(r as i32);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn2_ret("xadd16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
});
ctx.builder.free_local(address_local);
}
pub fn instr16_0FC1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
ctx.builder.const_i32(r2 as i32);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn2_ret("xadd16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
codegen::gen_set_reg16(ctx, r1);
}
pub fn instr32_0FC1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
let dest_operand = ctx.builder.set_new_local();
gen_xadd32(ctx, &dest_operand, r);
ctx.builder.get_local(&dest_operand);
ctx.builder.free_local(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn instr32_0FC1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg32(ctx, r1);
let dest_operand = ctx.builder.set_new_local();
gen_xadd32(ctx, &dest_operand, r2);
ctx.builder.get_local(&dest_operand);
codegen::gen_set_reg32(ctx, r1);
ctx.builder.free_local(dest_operand);
}
pub fn instr_0FC3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(
ctx,
&address_local,
&ctx.register_locals[r as usize].unsafe_clone(),
);
ctx.builder.free_local(address_local);
}
pub fn instr_0FC3_reg_jit(ctx: &mut JitContext, _r1: u32, _r2: u32) { codegen::gen_trigger_ud(ctx) }
pub fn instr16_0FC7_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
// cmpxchg8b
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::QWORD, &address_local, &|ref mut ctx| {
let dest_operand = ctx.builder.tee_new_local_i64();
codegen::gen_get_reg32(ctx, regs::EDX);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.const_i64(32);
ctx.builder.shl_i64();
codegen::gen_get_reg32(ctx, regs::EAX);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.or_i64();
ctx.builder.eq_i64();
ctx.builder.if_i64();
{
codegen::gen_set_flags_bits(ctx.builder, FLAG_ZERO);
codegen::gen_get_reg32(ctx, regs::ECX);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.const_i64(32);
ctx.builder.shl_i64();
codegen::gen_get_reg32(ctx, regs::EBX);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.or_i64();
}
ctx.builder.else_();
{
codegen::gen_clear_flags_bits(ctx.builder, FLAG_ZERO);
ctx.builder.get_local_i64(&dest_operand);
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EAX);
ctx.builder.get_local_i64(&dest_operand);
ctx.builder.const_i64(32);
ctx.builder.shr_u_i64();
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EDX);
ctx.builder.get_local_i64(&dest_operand);
}
ctx.builder.block_end();
codegen::gen_clear_flags_changed_bits(ctx.builder, FLAG_ZERO);
ctx.builder.free_local_i64(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn instr16_0FC7_1_reg_jit(ctx: &mut JitContext, _r: u32) { codegen::gen_trigger_ud(ctx); }
pub fn instr32_0FC7_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_0FC7_1_mem_jit(ctx, modrm_byte);
}
pub fn instr32_0FC7_1_reg_jit(ctx: &mut JitContext, _r: u32) { codegen::gen_trigger_ud(ctx); }
pub fn instr_C6_0_reg_jit(ctx: &mut JitContext, r: u32, imm: u32) {
// reg8[r] = imm;
ctx.builder.const_i32(imm as i32);
codegen::gen_set_reg8(ctx, r);
}
pub fn instr_C6_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder.const_i32(imm as i32);
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write8(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr16_C7_0_reg_jit(ctx: &mut JitContext, r: u32, imm: u32) {
// reg16[r] = imm;
ctx.builder.const_i32(imm as i32);
codegen::gen_set_reg16(ctx, r);
}
pub fn instr16_C7_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder.const_i32(imm as i32);
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr32_C7_0_reg_jit(ctx: &mut JitContext, r: u32, imm: u32) {
// reg32[r] = imm;
ctx.builder.const_i32(imm as i32);
codegen::gen_set_reg32(ctx, r);
}
pub fn instr32_C7_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder.const_i32(imm as i32);
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr_0FC8_jit(ctx: &mut JitContext) { gen_bswap(ctx, 0) }
pub fn instr_0FC9_jit(ctx: &mut JitContext) { gen_bswap(ctx, 1) }
pub fn instr_0FCA_jit(ctx: &mut JitContext) { gen_bswap(ctx, 2) }
pub fn instr_0FCB_jit(ctx: &mut JitContext) { gen_bswap(ctx, 3) }
pub fn instr_0FCC_jit(ctx: &mut JitContext) { gen_bswap(ctx, 4) }
pub fn instr_0FCD_jit(ctx: &mut JitContext) { gen_bswap(ctx, 5) }
pub fn instr_0FCE_jit(ctx: &mut JitContext) { gen_bswap(ctx, 6) }
pub fn instr_0FCF_jit(ctx: &mut JitContext) { gen_bswap(ctx, 7) }
define_instruction_write_reg16!("imul_reg16", instr16_0FAF_mem_jit, instr16_0FAF_reg_jit);
define_instruction_write_reg32!(gen_imul_reg32, instr32_0FAF_mem_jit, instr32_0FAF_reg_jit);
macro_rules! define_cmovcc16(
($cond:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
let value = ctx.builder.set_new_local();
codegen::gen_condition_fn(ctx, $cond);
ctx.builder.if_void();
ctx.builder.get_local(&value);
codegen::gen_set_reg16(ctx, r);
ctx.builder.block_end();
ctx.builder.free_local(value);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_condition_fn(ctx, $cond);
ctx.builder.if_void();
codegen::gen_get_reg16(ctx, r1);
codegen::gen_set_reg16(ctx, r2);
ctx.builder.block_end();
}
);
);
macro_rules! define_cmovcc32(
($cond:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let value = ctx.builder.set_new_local();
codegen::gen_condition_fn(ctx, $cond);
ctx.builder.if_void();
ctx.builder.get_local(&value);
codegen::gen_set_reg32(ctx, r);
ctx.builder.block_end();
ctx.builder.free_local(value);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_condition_fn(ctx, $cond);
ctx.builder.if_void();
codegen::gen_get_reg32(ctx, r1);
codegen::gen_set_reg32(ctx, r2);
ctx.builder.block_end();
}
);
);
define_cmovcc16!(0x0, instr16_0F40_mem_jit, instr16_0F40_reg_jit);
define_cmovcc16!(0x1, instr16_0F41_mem_jit, instr16_0F41_reg_jit);
define_cmovcc16!(0x2, instr16_0F42_mem_jit, instr16_0F42_reg_jit);
define_cmovcc16!(0x3, instr16_0F43_mem_jit, instr16_0F43_reg_jit);
define_cmovcc16!(0x4, instr16_0F44_mem_jit, instr16_0F44_reg_jit);
define_cmovcc16!(0x5, instr16_0F45_mem_jit, instr16_0F45_reg_jit);
define_cmovcc16!(0x6, instr16_0F46_mem_jit, instr16_0F46_reg_jit);
define_cmovcc16!(0x7, instr16_0F47_mem_jit, instr16_0F47_reg_jit);
define_cmovcc16!(0x8, instr16_0F48_mem_jit, instr16_0F48_reg_jit);
define_cmovcc16!(0x9, instr16_0F49_mem_jit, instr16_0F49_reg_jit);
define_cmovcc16!(0xA, instr16_0F4A_mem_jit, instr16_0F4A_reg_jit);
define_cmovcc16!(0xB, instr16_0F4B_mem_jit, instr16_0F4B_reg_jit);
define_cmovcc16!(0xC, instr16_0F4C_mem_jit, instr16_0F4C_reg_jit);
define_cmovcc16!(0xD, instr16_0F4D_mem_jit, instr16_0F4D_reg_jit);
define_cmovcc16!(0xE, instr16_0F4E_mem_jit, instr16_0F4E_reg_jit);
define_cmovcc16!(0xF, instr16_0F4F_mem_jit, instr16_0F4F_reg_jit);
define_cmovcc32!(0x0, instr32_0F40_mem_jit, instr32_0F40_reg_jit);
define_cmovcc32!(0x1, instr32_0F41_mem_jit, instr32_0F41_reg_jit);
define_cmovcc32!(0x2, instr32_0F42_mem_jit, instr32_0F42_reg_jit);
define_cmovcc32!(0x3, instr32_0F43_mem_jit, instr32_0F43_reg_jit);
define_cmovcc32!(0x4, instr32_0F44_mem_jit, instr32_0F44_reg_jit);
define_cmovcc32!(0x5, instr32_0F45_mem_jit, instr32_0F45_reg_jit);
define_cmovcc32!(0x6, instr32_0F46_mem_jit, instr32_0F46_reg_jit);
define_cmovcc32!(0x7, instr32_0F47_mem_jit, instr32_0F47_reg_jit);
define_cmovcc32!(0x8, instr32_0F48_mem_jit, instr32_0F48_reg_jit);
define_cmovcc32!(0x9, instr32_0F49_mem_jit, instr32_0F49_reg_jit);
define_cmovcc32!(0xA, instr32_0F4A_mem_jit, instr32_0F4A_reg_jit);
define_cmovcc32!(0xB, instr32_0F4B_mem_jit, instr32_0F4B_reg_jit);
define_cmovcc32!(0xC, instr32_0F4C_mem_jit, instr32_0F4C_reg_jit);
define_cmovcc32!(0xD, instr32_0F4D_mem_jit, instr32_0F4D_reg_jit);
define_cmovcc32!(0xE, instr32_0F4E_mem_jit, instr32_0F4E_reg_jit);
define_cmovcc32!(0xF, instr32_0F4F_mem_jit, instr32_0F4F_reg_jit);
macro_rules! define_setcc(
($cond:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, _r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_condition_fn(ctx, $cond);
ctx.builder.const_i32(0);
ctx.builder.ne_i32();
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write8(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, _r2: u32) {
codegen::gen_condition_fn(ctx, $cond);
ctx.builder.const_i32(0);
ctx.builder.ne_i32();
codegen::gen_set_reg8(ctx, r1);
}
);
);
define_setcc!(0x0, instr_0F90_mem_jit, instr_0F90_reg_jit);
define_setcc!(0x1, instr_0F91_mem_jit, instr_0F91_reg_jit);
define_setcc!(0x2, instr_0F92_mem_jit, instr_0F92_reg_jit);
define_setcc!(0x3, instr_0F93_mem_jit, instr_0F93_reg_jit);
define_setcc!(0x4, instr_0F94_mem_jit, instr_0F94_reg_jit);
define_setcc!(0x5, instr_0F95_mem_jit, instr_0F95_reg_jit);
define_setcc!(0x6, instr_0F96_mem_jit, instr_0F96_reg_jit);
define_setcc!(0x7, instr_0F97_mem_jit, instr_0F97_reg_jit);
define_setcc!(0x8, instr_0F98_mem_jit, instr_0F98_reg_jit);
define_setcc!(0x9, instr_0F99_mem_jit, instr_0F99_reg_jit);
define_setcc!(0xA, instr_0F9A_mem_jit, instr_0F9A_reg_jit);
define_setcc!(0xB, instr_0F9B_mem_jit, instr_0F9B_reg_jit);
define_setcc!(0xC, instr_0F9C_mem_jit, instr_0F9C_reg_jit);
define_setcc!(0xD, instr_0F9D_mem_jit, instr_0F9D_reg_jit);
define_setcc!(0xE, instr_0F9E_mem_jit, instr_0F9E_reg_jit);
define_setcc!(0xF, instr_0F9F_mem_jit, instr_0F9F_reg_jit);
pub fn instr_0F10_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
let dest = global_pointers::get_reg_xmm_offset(r);
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
}
pub fn instr_0F10_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r1, r2) }
pub fn instr_660F10_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
let dest = global_pointers::get_reg_xmm_offset(r);
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
}
pub fn instr_660F10_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r1, r2) }
pub fn instr_F20F10_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_F30F7E_mem_jit(ctx, modrm_byte, r)
}
pub fn instr_F20F10_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_F20F10_reg");
}
pub fn instr_0F11_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_0F29_mem_jit(ctx, modrm_byte, r)
}
pub fn instr_0F11_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r2, r1) }
pub fn instr_660F11_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_660F29_mem_jit(ctx, modrm_byte, r)
}
pub fn instr_660F11_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r2, r1) }
pub fn instr_F20F11_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_660FD6_mem_jit(ctx, modrm_byte, r)
}
pub fn instr_F20F11_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_F20F11_reg");
}
pub fn instr_0F28_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
let dest = global_pointers::get_reg_xmm_offset(r);
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
}
pub fn instr_0F28_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r1, r2) }
pub fn instr_660F28_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
let dest = global_pointers::get_reg_xmm_offset(r);
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
}
pub fn instr_660F28_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r1, r2) }
pub fn instr_0F29_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// XXX: Aligned write or #gp
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r) as i32);
ctx.builder.load_aligned_i64(0);
let value_local_low = ctx.builder.set_new_local_i64();
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r) as i32 + 8);
ctx.builder.load_aligned_i64(0);
let value_local_high = ctx.builder.set_new_local_i64();
codegen::gen_safe_write128(ctx, &address_local, &value_local_low, &value_local_high);
ctx.builder.free_local(address_local);
ctx.builder.free_local_i64(value_local_low);
ctx.builder.free_local_i64(value_local_high);
}
pub fn instr_0F29_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r2, r1) }
pub fn instr_660F29_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_0F29_mem_jit(ctx, modrm_byte, r);
}
pub fn instr_660F29_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r2, r1) }
pub fn instr_0F2B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_0F29_mem_jit(ctx, modrm_byte, r)
}
pub fn instr_0F2B_reg_jit(ctx: &mut JitContext, _r1: u32, _r2: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F2B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_0F29_mem_jit(ctx, modrm_byte, r)
}
pub fn instr_660F2B_reg_jit(ctx: &mut JitContext, _r1: u32, _r2: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_F20F2C_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.reinterpret_i64_as_f64();
ctx.builder
.call_fn1_f64_ret("sse_convert_with_truncation_f64_to_i32");
codegen::gen_set_reg32(ctx, r);
}
pub fn instr_F20F2C_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r1) as i32);
ctx.builder.load_aligned_f64(0);
ctx.builder
.call_fn1_f64_ret("sse_convert_with_truncation_f64_to_i32");
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr_F30F2C_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
ctx.builder.reinterpret_i32_as_f32();
ctx.builder
.call_fn1_f32_ret("sse_convert_with_truncation_f32_to_i32");
codegen::gen_set_reg32(ctx, r);
}
pub fn instr_F30F2C_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r1) as i32);
ctx.builder.load_aligned_f32(0);
ctx.builder
.call_fn1_f32_ret("sse_convert_with_truncation_f32_to_i32");
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr_F20F2D_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.reinterpret_i64_as_f64();
ctx.builder.call_fn1_f64_ret("sse_convert_f64_to_i32");
codegen::gen_set_reg32(ctx, r);
}
pub fn instr_F20F2D_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r1) as i32);
ctx.builder.load_aligned_f64(0);
ctx.builder.call_fn1_f64_ret("sse_convert_f64_to_i32");
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr_F30F2D_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
ctx.builder.reinterpret_i32_as_f32();
ctx.builder.call_fn1_f32_ret("sse_convert_f32_to_i32");
codegen::gen_set_reg32(ctx, r);
}
pub fn instr_F30F2D_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r1) as i32);
ctx.builder.load_aligned_f32(0);
ctx.builder.call_fn1_f32_ret("sse_convert_f32_to_i32");
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr_0F60_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem32(ctx, "instr_0F60", modrm_byte, r);
}
pub fn instr_0F60_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm32(ctx, "instr_0F60", r1, r2);
}
pub fn instr_0F61_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem32(ctx, "instr_0F61", modrm_byte, r);
}
pub fn instr_0F61_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm32(ctx, "instr_0F61", r1, r2);
}
pub fn instr_0F62_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem32(ctx, "instr_0F62", modrm_byte, r);
}
pub fn instr_0F62_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm32(ctx, "instr_0F62", r1, r2);
}
pub fn instr_0F63_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F63", modrm_byte, r);
}
pub fn instr_0F63_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F63", r1, r2);
}
pub fn instr_0F64_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F64", modrm_byte, r);
}
pub fn instr_0F64_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F64", r1, r2);
}
pub fn instr_0F65_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F65", modrm_byte, r);
}
pub fn instr_0F65_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F65", r1, r2);
}
pub fn instr_0F66_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F66", modrm_byte, r);
}
pub fn instr_0F66_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F66", r1, r2);
}
pub fn instr_0F67_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F67", modrm_byte, r);
}
pub fn instr_0F67_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F67", r1, r2);
}
pub fn instr_0F68_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F68", modrm_byte, r);
}
pub fn instr_0F68_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F68", r1, r2);
}
pub fn instr_0F69_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F69", modrm_byte, r);
}
pub fn instr_0F69_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F69", r1, r2);
}
pub fn instr_0F6A_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F6A", modrm_byte, r);
}
pub fn instr_0F6A_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F6A", r1, r2);
}
pub fn instr_0F6B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F6B", modrm_byte, r);
}
pub fn instr_0F6B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F6B", r1, r2);
}
pub fn instr_660F60_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// Note: Only requires 64-bit read, but is allowed to do 128-bit read
sse_read128_xmm_mem(ctx, "instr_660F60", modrm_byte, r);
}
pub fn instr_660F60_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F60", r1, r2);
}
pub fn instr_660F61_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// Note: Only requires 64-bit read, but is allowed to do 128-bit read
sse_read128_xmm_mem(ctx, "instr_660F61", modrm_byte, r);
}
pub fn instr_660F61_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F61", r1, r2);
}
pub fn instr_660F62_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F62", modrm_byte, r);
}
pub fn instr_660F62_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F62", r1, r2);
}
pub fn instr_660F63_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F63", modrm_byte, r);
}
pub fn instr_660F63_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F63", r1, r2);
}
pub fn instr_660F64_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F64", modrm_byte, r);
}
pub fn instr_660F64_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F64", r1, r2);
}
pub fn instr_660F65_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F65", modrm_byte, r);
}
pub fn instr_660F65_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F65", r1, r2);
}
pub fn instr_660F66_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F66", modrm_byte, r);
}
pub fn instr_660F66_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F66", r1, r2);
}
pub fn instr_660F67_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F67", modrm_byte, r);
}
pub fn instr_660F67_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F67", r1, r2);
}
pub fn instr_660F68_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F68", modrm_byte, r);
}
pub fn instr_660F68_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F68", r1, r2);
}
pub fn instr_660F69_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F69", modrm_byte, r);
}
pub fn instr_660F69_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F69", r1, r2);
}
pub fn instr_660F6A_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F6A", modrm_byte, r);
}
pub fn instr_660F6A_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F6A", r1, r2);
}
pub fn instr_660F6B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F6B", modrm_byte, r);
}
pub fn instr_660F6B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F6B", r1, r2);
}
pub fn instr_660F6C_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F6C", modrm_byte, r);
}
pub fn instr_660F6C_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F6C", r1, r2);
}
pub fn instr_660F6D_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F6D", modrm_byte, r);
}
pub fn instr_660F6D_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F6D", r1, r2);
}
pub fn instr_0F6E_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn2("instr_0F6E")
}
pub fn instr_0F6E_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg32(ctx, r1);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_0F6E")
}
pub fn instr_660F6E_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn2("instr_660F6E")
}
pub fn instr_660F6E_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg32(ctx, r1);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_660F6E")
}
pub fn instr_0F6F_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// XXX: Aligned read or #gp
codegen::gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn2_i64_i32("instr_0F6F")
}
pub fn instr_0F6F_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_0F6F_reg")
}
pub fn instr_660F6F_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// XXX: Aligned read or #gp
let dest = global_pointers::get_reg_xmm_offset(r);
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
}
pub fn instr_660F6F_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r1, r2) }
pub fn instr_F30F6F_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
let dest = global_pointers::get_reg_xmm_offset(r);
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
}
pub fn instr_F30F6F_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r1, r2) }
pub fn instr_0F70_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm8: u32) {
codegen::gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3_i64_i32_i32("instr_0F70");
}
pub fn instr_0F70_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm8: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_mmx_offset(r1) as i32);
ctx.builder.load_aligned_i64(0);
ctx.builder.const_i32(r2 as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3_i64_i32_i32("instr_0F70");
}
pub fn instr_660F70_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm8: u32) {
let dest = global_pointers::sse_scratch_register as u32;
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3("instr_660F70");
}
pub fn instr_660F70_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm8: u32) {
codegen::gen_read_reg_xmm128_into_scratch(ctx, r1);
let dest = global_pointers::sse_scratch_register;
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3("instr_660F70");
}
pub fn instr_F20F70_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm8: u32) {
let dest = global_pointers::sse_scratch_register as u32;
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3("instr_F20F70");
}
pub fn instr_F20F70_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm8: u32) {
codegen::gen_read_reg_xmm128_into_scratch(ctx, r1);
let dest = global_pointers::sse_scratch_register;
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3("instr_F20F70");
}
pub fn instr_F30F70_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm8: u32) {
let dest = global_pointers::sse_scratch_register as u32;
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3("instr_F30F70");
}
pub fn instr_F30F70_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm8: u32) {
codegen::gen_read_reg_xmm128_into_scratch(ctx, r1);
let dest = global_pointers::sse_scratch_register;
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3("instr_F30F70");
}
pub fn instr_0F71_2_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F71_2_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F71_2_reg");
}
pub fn instr_0F71_4_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F71_4_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F71_4_reg");
}
pub fn instr_0F71_6_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F71_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F71_6_reg");
}
pub fn instr_0F72_2_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F72_2_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F72_2_reg");
}
pub fn instr_0F72_4_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F72_4_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F72_4_reg");
}
pub fn instr_0F72_6_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F72_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F72_6_reg");
}
pub fn instr_0F73_2_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F73_2_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F73_2_reg");
}
pub fn instr_0F73_6_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F73_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F73_6_reg");
}
pub fn instr_660F71_2_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F71_2_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F71_2_reg");
}
pub fn instr_660F71_4_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F71_4_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F71_4_reg");
}
pub fn instr_660F71_6_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F71_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F71_6_reg");
}
pub fn instr_660F72_2_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F72_2_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F72_2_reg");
}
pub fn instr_660F72_4_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F72_4_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F72_4_reg");
}
pub fn instr_660F72_6_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F72_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F72_6_reg");
}
pub fn instr_660F73_2_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F73_2_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F73_2_reg");
}
pub fn instr_660F73_3_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F73_3_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F73_3_reg");
}
pub fn instr_660F73_6_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F73_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F73_6_reg");
}
pub fn instr_660F73_7_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F73_7_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F73_7_reg");
}
pub fn instr_660F74_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F74", modrm_byte, r);
}
pub fn instr_660F74_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F74", r1, r2);
}
pub fn instr_0F7E_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1_ret("instr_0F7E");
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr_0F7E_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn1_ret("instr_0F7E");
codegen::gen_set_reg32(ctx, r1);
}
pub fn instr_660F7E_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder
.load_fixed_i32(global_pointers::get_reg_xmm_offset(r));
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr_660F7E_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder
.load_fixed_i32(global_pointers::get_reg_xmm_offset(r2));
codegen::gen_set_reg32(ctx, r1);
}
pub fn instr_0F7F_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1_ret_i64("instr_0F7F");
let value_local = ctx.builder.set_new_local_i64();
codegen::gen_safe_write64(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local_i64(value_local);
}
pub fn instr_0F7F_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_0F7F_reg")
}
pub fn instr_F30F7E_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r) as i32);
codegen::gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.store_aligned_i64(0);
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r) as i32 + 8);
ctx.builder.const_i64(0);
ctx.builder.store_aligned_i64(0);
}
pub fn instr_F30F7E_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_F30F7E_reg");
}
pub fn instr_660F7F_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_0F29_mem_jit(ctx, modrm_byte, r);
}
pub fn instr_660F7F_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r2, r1) }
pub fn instr_F30F7F_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_0F29_mem_jit(ctx, modrm_byte, r);
}
pub fn instr_F30F7F_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r2, r1) }
pub fn instr16_0FA0_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::FS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr32_0FA0_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::FS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr16_0FA8_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::GS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr32_0FA8_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::GS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr16_0FA3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_bt(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
15,
)
}
pub fn instr16_0FA3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_get_reg16(ctx, r);
codegen::sign_extend_i16(ctx.builder);
ctx.builder.const_i32(3);
ctx.builder.shr_s_i32();
ctx.builder.add_i32();
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read8(ctx, &address_local);
ctx.builder.free_local(address_local);
let value = ctx.builder.set_new_local();
gen_bt(
&mut ctx.builder,
&value,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize]),
7,
);
ctx.builder.free_local(value);
}
pub fn instr32_0FA3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_bt(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
31,
)
}
pub fn instr32_0FA3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_get_reg32(ctx, r);
ctx.builder.const_i32(3);
ctx.builder.shr_s_i32();
ctx.builder.add_i32();
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read8(ctx, &address_local);
ctx.builder.free_local(address_local);
let value = ctx.builder.set_new_local();
gen_bt(
&mut ctx.builder,
&value,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize]),
7,
);
ctx.builder.free_local(value);
}
pub fn instr16_0FAB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_bts(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
15,
)
}
pub fn instr16_0FAB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_bts,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize].unsafe_clone()),
16,
);
}
pub fn instr32_0FAB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_bts(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
31,
)
}
pub fn instr32_0FAB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_bts,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize].unsafe_clone()),
32,
);
}
pub fn instr16_0FB3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_btr(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
15,
)
}
pub fn instr16_0FB3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btr,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize].unsafe_clone()),
16,
);
}
pub fn instr32_0FB3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_btr(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
31,
)
}
pub fn instr32_0FB3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btr,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize].unsafe_clone()),
32,
);
}
pub fn instr16_0FBB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_btc(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
15,
)
}
pub fn instr16_0FBB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btc,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize].unsafe_clone()),
16,
);
}
pub fn instr32_0FBB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_btc(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
31,
)
}
pub fn instr32_0FBB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btc,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize].unsafe_clone()),
32,
);
}
pub fn instr16_0FBA_4_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_bt(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
15,
)
}
pub fn instr16_0FBA_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
ctx.builder.const_i32((imm8 as i32 & 15) >> 3);
ctx.builder.add_i32();
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read8(ctx, &address_local);
ctx.builder.free_local(address_local);
let value = ctx.builder.set_new_local();
gen_bt(
&mut ctx.builder,
&value,
&LocalOrImmediate::Immediate(imm8 as i32),
7,
);
ctx.builder.free_local(value);
}
pub fn instr32_0FBA_4_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_bt(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
31,
)
}
pub fn instr32_0FBA_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
ctx.builder.const_i32((imm8 as i32 & 31) >> 3);
ctx.builder.add_i32();
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read8(ctx, &address_local);
ctx.builder.free_local(address_local);
let value = ctx.builder.set_new_local();
gen_bt(
&mut ctx.builder,
&value,
&LocalOrImmediate::Immediate(imm8 as i32),
7,
);
ctx.builder.free_local(value);
}
pub fn instr16_0FBA_5_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_bts(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
15,
)
}
pub fn instr16_0FBA_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_bts,
&LocalOrImmediate::Immediate(imm8 as i32),
16,
);
}
pub fn instr32_0FBA_5_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_bts(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
31,
)
}
pub fn instr32_0FBA_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_bts,
&LocalOrImmediate::Immediate(imm8 as i32),
32,
);
}
pub fn instr16_0FBA_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_btr(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
15,
)
}
pub fn instr16_0FBA_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btr,
&LocalOrImmediate::Immediate(imm8 as i32),
16,
);
}
pub fn instr32_0FBA_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_btr(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
31,
)
}
pub fn instr32_0FBA_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btr,
&LocalOrImmediate::Immediate(imm8 as i32),
32,
);
}
pub fn instr16_0FBA_7_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_btc(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
15,
)
}
pub fn instr16_0FBA_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btc,
&LocalOrImmediate::Immediate(imm8 as i32),
16,
);
}
pub fn instr32_0FBA_7_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_btc(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
31,
)
}
pub fn instr32_0FBA_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btc,
&LocalOrImmediate::Immediate(imm8 as i32),
32,
);
}
pub fn instr_0FAE_5_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte) {
dbg_log!("Generating #ud for unimplemented instruction: instr_0FAE_5_mem_jit");
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0FAE_5_reg_jit(_ctx: &mut JitContext, _r: u32) {
// For this instruction, the processor ignores the r/m field of the ModR/M byte.
}
pub fn instr_0FD1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FD1", modrm_byte, r);
}
pub fn instr_0FD1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FD1", r1, r2);
}
pub fn instr_0FD2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FD2", modrm_byte, r);
}
pub fn instr_0FD2_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FD2", r1, r2);
}
pub fn instr_0FD3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FD3", modrm_byte, r);
}
pub fn instr_0FD3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FD3", r1, r2);
}
pub fn instr_0FD4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FD4", modrm_byte, r);
}
pub fn instr_0FD4_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FD4", r1, r2);
}
pub fn instr_0FD5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FD5", modrm_byte, r);
}
pub fn instr_0FD5_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FD5", r1, r2);
}
pub fn instr_0FD7_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _r: u32) {
codegen::gen_trigger_ud(ctx)
}
pub fn instr_0FD7_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.call_fn1_ret("instr_0FD7");
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr_0FD8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FD8", modrm_byte, r);
}
pub fn instr_0FD8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FD8", r1, r2);
}
pub fn instr_0FD9_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FD9", modrm_byte, r);
}
pub fn instr_0FD9_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FD9", r1, r2);
}
pub fn instr_0FDA_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FDA", modrm_byte, r);
}
pub fn instr_0FDA_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FDA", r1, r2);
}
pub fn instr_0FDB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FDB", modrm_byte, r);
}
pub fn instr_0FDB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FDB", r1, r2);
}
pub fn instr_0FDC_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FDC", modrm_byte, r);
}
pub fn instr_0FDC_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FDC", r1, r2);
}
pub fn instr_0FDD_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FDD", modrm_byte, r);
}
pub fn instr_0FDD_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FDD", r1, r2);
}
pub fn instr_0FDE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FDE", modrm_byte, r);
}
pub fn instr_0FDE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FDE", r1, r2);
}
pub fn instr_0FDF_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FDF", modrm_byte, r);
}
pub fn instr_0FDF_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FDF", r1, r2);
}
pub fn instr_660FD1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FD1", modrm_byte, r);
}
pub fn instr_660FD1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FD1", r1, r2);
}
pub fn instr_660FD2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FD2", modrm_byte, r);
}
pub fn instr_660FD2_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FD2", r1, r2);
}
pub fn instr_660FD3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FD3", modrm_byte, r);
}
pub fn instr_660FD3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FD3", r1, r2);
}
pub fn instr_660FD4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FD4", modrm_byte, r);
}
pub fn instr_660FD4_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FD4", r1, r2);
}
pub fn instr_660FD5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FD5", modrm_byte, r);
}
pub fn instr_660FD5_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FD5", r1, r2);
}
pub fn instr_660FD6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r) as i32);
ctx.builder.load_aligned_i64(0);
let value_local = ctx.builder.set_new_local_i64();
codegen::gen_safe_write64(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local_i64(value_local);
}
pub fn instr_660FD6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_660FD6_reg");
}
pub fn instr_660FD7_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _r: u32) {
codegen::gen_trigger_ud(ctx)
}
pub fn instr_660FD7_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.call_fn1_ret("instr_660FD7");
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr_660FD8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FD8", modrm_byte, r);
}
pub fn instr_660FD8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FD8", r1, r2);
}
pub fn instr_660FD9_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FD9", modrm_byte, r);
}
pub fn instr_660FD9_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FD9", r1, r2);
}
pub fn instr_660FDA_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FDA", modrm_byte, r);
}
pub fn instr_660FDA_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FDA", r1, r2);
}
pub fn instr_660FDB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FDB", modrm_byte, r);
}
pub fn instr_660FDB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FDB", r1, r2);
}
pub fn instr_660FDC_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FDC", modrm_byte, r);
}
pub fn instr_660FDC_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FDC", r1, r2);
}
pub fn instr_660FDD_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FDD", modrm_byte, r);
}
pub fn instr_660FDD_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FDD", r1, r2);
}
pub fn instr_660FDE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FDE", modrm_byte, r);
}
pub fn instr_660FDE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FDE", r1, r2);
}
pub fn instr_660FDF_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FDF", modrm_byte, r);
}
pub fn instr_660FDF_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FDF", r1, r2);
}
pub fn instr_0FE0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE0", modrm_byte, r);
}
pub fn instr_0FE0_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE0", r1, r2);
}
pub fn instr_0FE1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE1", modrm_byte, r);
}
pub fn instr_0FE1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE1", r1, r2);
}
pub fn instr_0FE2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE2", modrm_byte, r);
}
pub fn instr_0FE2_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE2", r1, r2);
}
pub fn instr_0FE3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE3", modrm_byte, r);
}
pub fn instr_0FE3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE3", r1, r2);
}
pub fn instr_0FE4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE4", modrm_byte, r);
}
pub fn instr_0FE4_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE4", r1, r2);
}
pub fn instr_0FE5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE5", modrm_byte, r);
}
pub fn instr_0FE5_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE5", r1, r2);
}
pub fn instr_0FE8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE8", modrm_byte, r);
}
pub fn instr_0FE8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE8", r1, r2);
}
pub fn instr_0FE9_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE9", modrm_byte, r);
}
pub fn instr_0FE9_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE9", r1, r2);
}
pub fn instr_0FEA_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FEA", modrm_byte, r);
}
pub fn instr_0FEA_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FEA", r1, r2);
}
pub fn instr_0FEB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FEB", modrm_byte, r);
}
pub fn instr_0FEB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FEB", r1, r2);
}
pub fn instr_0FEC_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FEC", modrm_byte, r);
}
pub fn instr_0FEC_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FEC", r1, r2);
}
pub fn instr_0FED_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FED", modrm_byte, r);
}
pub fn instr_0FED_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FED", r1, r2);
}
pub fn instr_0FEE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FEE", modrm_byte, r);
}
pub fn instr_0FEE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FEE", r1, r2);
}
pub fn instr_0FEF_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FEF", modrm_byte, r);
}
pub fn instr_0FEF_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FEF", r1, r2);
}
pub fn instr_660FE0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE0", modrm_byte, r);
}
pub fn instr_660FE0_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE0", r1, r2);
}
pub fn instr_660FE1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE1", modrm_byte, r);
}
pub fn instr_660FE1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE1", r1, r2);
}
pub fn instr_660FE2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE2", modrm_byte, r);
}
pub fn instr_660FE2_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE2", r1, r2);
}
pub fn instr_660FE3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE3", modrm_byte, r);
}
pub fn instr_660FE3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE3", r1, r2);
}
pub fn instr_660FE4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE4", modrm_byte, r);
}
pub fn instr_660FE4_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE4", r1, r2);
}
pub fn instr_660FE5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE5", modrm_byte, r);
}
pub fn instr_660FE5_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE5", r1, r2);
}
pub fn instr_660FE6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE6", modrm_byte, r);
}
pub fn instr_660FE6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE6", r1, r2);
}
pub fn instr_F20FE6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_F20FE6", modrm_byte, r);
}
pub fn instr_F20FE6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_F20FE6", r1, r2);
}
pub fn instr_F30FE6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn2_i64_i32("instr_F30FE6")
}
pub fn instr_F30FE6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r1) as i32);
ctx.builder.load_aligned_i64(0);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2_i64_i32("instr_F30FE6")
}
pub fn instr_660FE7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_0F29_mem_jit(ctx, modrm_byte, r);
}
pub fn instr_660FE7_reg_jit(ctx: &mut JitContext, _r1: u32, _r2: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660FE8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE8", modrm_byte, r);
}
pub fn instr_660FE8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE8", r1, r2);
}
pub fn instr_660FE9_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE9", modrm_byte, r);
}
pub fn instr_660FE9_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE9", r1, r2);
}
pub fn instr_660FEA_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FEA", modrm_byte, r);
}
pub fn instr_660FEA_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FEA", r1, r2);
}
pub fn instr_660FEB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FEB", modrm_byte, r);
}
pub fn instr_660FEB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FEB", r1, r2);
}
pub fn instr_660FEC_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FEC", modrm_byte, r);
}
pub fn instr_660FEC_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FEC", r1, r2);
}
pub fn instr_660FED_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FED", modrm_byte, r);
}
pub fn instr_660FED_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FED", r1, r2);
}
pub fn instr_660FEE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FEE", modrm_byte, r);
}
pub fn instr_660FEE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FEE", r1, r2);
}
pub fn instr_660FEF_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FEF", modrm_byte, r);
}
pub fn instr_660FEF_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FEF", r1, r2);
}
pub fn instr_0FF1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF1", modrm_byte, r);
}
pub fn instr_0FF1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF1", r1, r2);
}
pub fn instr_0FF2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF2", modrm_byte, r);
}
pub fn instr_0FF2_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF2", r1, r2);
}
pub fn instr_0FF3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF3", modrm_byte, r);
}
pub fn instr_0FF3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF3", r1, r2);
}
pub fn instr_0FF4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF4", modrm_byte, r);
}
pub fn instr_0FF4_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF4", r1, r2);
}
pub fn instr_0FF5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF5", modrm_byte, r);
}
pub fn instr_0FF5_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF5", r1, r2);
}
pub fn instr_0FF6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF6", modrm_byte, r);
}
pub fn instr_0FF6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF6", r1, r2);
}
pub fn instr_0FF7_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _r: u32) {
codegen::gen_trigger_ud(ctx)
}
pub fn instr_0FF7_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_previous_eip_offset_from_eip_with_low_bits(
ctx.builder,
ctx.start_of_current_instruction as i32 & 0xFFF,
);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
if ctx.cpu.asize_32() {
codegen::gen_get_reg32(ctx, regs::EDI);
}
else {
codegen::gen_get_reg16(ctx, regs::DI);
}
jit_add_seg_offset(ctx, regs::DS);
ctx.builder.call_fn3("maskmovq");
codegen::gen_move_registers_from_memory_to_locals(ctx);
codegen::gen_get_page_fault(ctx.builder);
ctx.builder.if_void();
codegen::gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.return_();
ctx.builder.block_end();
}
pub fn instr_0FF8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF8", modrm_byte, r);
}
pub fn instr_0FF8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF8", r1, r2);
}
pub fn instr_0FF9_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF9", modrm_byte, r);
}
pub fn instr_0FF9_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF9", r1, r2);
}
pub fn instr_0FFA_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FFA", modrm_byte, r);
}
pub fn instr_0FFA_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FFA", r1, r2);
}
pub fn instr_0FFB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FFB", modrm_byte, r);
}
pub fn instr_0FFB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FFB", r1, r2);
}
pub fn instr_0FFC_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FFC", modrm_byte, r);
}
pub fn instr_0FFC_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FFC", r1, r2);
}
pub fn instr_0FFD_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FFD", modrm_byte, r);
}
pub fn instr_0FFD_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FFD", r1, r2);
}
pub fn instr_0FFE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FFE", modrm_byte, r);
}
pub fn instr_0FFE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FFE", r1, r2);
}
pub fn instr_660FF1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF1", modrm_byte, r);
}
pub fn instr_660FF1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF1", r1, r2);
}
pub fn instr_660FF2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF2", modrm_byte, r);
}
pub fn instr_660FF2_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF2", r1, r2);
}
pub fn instr_660FF3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF3", modrm_byte, r);
}
pub fn instr_660FF3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF3", r1, r2);
}
pub fn instr_660FF4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF4", modrm_byte, r);
}
pub fn instr_660FF4_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF4", r1, r2);
}
pub fn instr_660FF5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF5", modrm_byte, r);
}
pub fn instr_660FF5_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF5", r1, r2);
}
pub fn instr_660FF6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF6", modrm_byte, r);
}
pub fn instr_660FF6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF6", r1, r2);
}
pub fn instr_660FF7_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _r: u32) {
codegen::gen_trigger_ud(ctx)
}
pub fn instr_660FF7_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_previous_eip_offset_from_eip_with_low_bits(
ctx.builder,
ctx.start_of_current_instruction as i32 & 0xFFF,
);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
if ctx.cpu.asize_32() {
codegen::gen_get_reg32(ctx, regs::EDI);
}
else {
codegen::gen_get_reg16(ctx, regs::DI);
}
jit_add_seg_offset(ctx, regs::DS);
ctx.builder.call_fn3("maskmovdqu");
codegen::gen_move_registers_from_memory_to_locals(ctx);
codegen::gen_get_page_fault(ctx.builder);
ctx.builder.if_void();
codegen::gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.return_();
ctx.builder.block_end();
}
pub fn instr_660FF8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF8", modrm_byte, r);
}
pub fn instr_660FF8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF8", r1, r2);
}
pub fn instr_660FF9_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF9", modrm_byte, r);
}
pub fn instr_660FF9_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF9", r1, r2);
}
pub fn instr_660FFA_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FFA", modrm_byte, r);
}
pub fn instr_660FFA_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FFA", r1, r2);
}
pub fn instr_660FFB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FFB", modrm_byte, r);
}
pub fn instr_660FFB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FFB", r1, r2);
}
pub fn instr_660FFC_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FFC", modrm_byte, r);
}
pub fn instr_660FFC_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FFC", r1, r2);
}
pub fn instr_660FFD_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FFD", modrm_byte, r);
}
pub fn instr_660FFD_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FFD", r1, r2);
}
pub fn instr_660FFE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FFE", modrm_byte, r);
}
pub fn instr_660FFE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FFE", r1, r2);
}
jit imul32
#![allow(non_snake_case)]
use codegen;
use codegen::BitSize;
use cpu::cpu::{
FLAGS_ALL, FLAGS_DEFAULT, FLAGS_MASK, FLAG_ADJUST, FLAG_CARRY, FLAG_DIRECTION, FLAG_INTERRUPT,
FLAG_OVERFLOW, FLAG_SUB, FLAG_ZERO, OPSIZE_8, OPSIZE_16, OPSIZE_32,
};
use cpu::global_pointers;
use jit::JitContext;
use modrm::{jit_add_seg_offset, ModrmByte};
use prefix::SEG_PREFIX_ZERO;
use prefix::{PREFIX_66, PREFIX_67, PREFIX_F2, PREFIX_F3};
use regs;
use regs::{AX, BP, BX, CX, DI, DX, SI, SP};
use regs::{CS, DS, ES, FS, GS, SS};
use regs::{EAX, EBP, EBX, ECX, EDI, EDX, ESI, ESP};
use wasmgen::wasm_builder::{WasmBuilder, WasmLocal};
enum LocalOrImmediate<'a> {
WasmLocal(&'a WasmLocal),
Immediate(i32),
}
impl<'a> LocalOrImmediate<'a> {
pub fn gen_get(&self, builder: &mut WasmBuilder) {
match self {
LocalOrImmediate::WasmLocal(l) => builder.get_local(l),
LocalOrImmediate::Immediate(i) => builder.const_i32(*i),
}
}
}
pub fn jit_instruction(ctx: &mut JitContext, instr_flags: &mut u32) {
ctx.cpu.prefixes = 0;
ctx.start_of_current_instruction = ctx.cpu.eip;
::gen::jit::jit(
ctx.cpu.read_imm8() as u32 | (ctx.cpu.osize_32() as u32) << 8,
ctx,
instr_flags,
);
}
pub fn jit_handle_prefix(ctx: &mut JitContext, instr_flags: &mut u32) {
::gen::jit::jit(
ctx.cpu.read_imm8() as u32 | (ctx.cpu.osize_32() as u32) << 8,
ctx,
instr_flags,
);
}
pub fn jit_handle_segment_prefix(segment: u32, ctx: &mut JitContext, instr_flags: &mut u32) {
dbg_assert!(segment <= 5);
ctx.cpu.prefixes |= segment + 1;
jit_handle_prefix(ctx, instr_flags)
}
pub fn instr16_0F_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
::gen::jit0f::jit(ctx.cpu.read_imm8() as u32, ctx, instr_flags)
}
pub fn instr32_0F_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
::gen::jit0f::jit(ctx.cpu.read_imm8() as u32 | 0x100, ctx, instr_flags)
}
pub fn instr_26_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
jit_handle_segment_prefix(ES, ctx, instr_flags)
}
pub fn instr_2E_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
jit_handle_segment_prefix(CS, ctx, instr_flags)
}
pub fn instr_36_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
jit_handle_segment_prefix(SS, ctx, instr_flags)
}
pub fn instr_3E_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
jit_handle_segment_prefix(DS, ctx, instr_flags)
}
pub fn instr_64_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
jit_handle_segment_prefix(FS, ctx, instr_flags)
}
pub fn instr_65_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
jit_handle_segment_prefix(GS, ctx, instr_flags)
}
pub fn instr_66_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
ctx.cpu.prefixes |= PREFIX_66;
jit_handle_prefix(ctx, instr_flags)
}
pub fn instr_67_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
ctx.cpu.prefixes |= PREFIX_67;
jit_handle_prefix(ctx, instr_flags)
}
pub fn instr_F0_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
// lock: Ignore
jit_handle_prefix(ctx, instr_flags)
}
pub fn instr_F2_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
ctx.cpu.prefixes |= PREFIX_F2;
jit_handle_prefix(ctx, instr_flags)
}
pub fn instr_F3_jit(ctx: &mut JitContext, instr_flags: &mut u32) {
ctx.cpu.prefixes |= PREFIX_F3;
jit_handle_prefix(ctx, instr_flags)
}
fn sse_read128_xmm_mem(ctx: &mut JitContext, name: &str, modrm_byte: ModrmByte, r: u32) {
let dest = global_pointers::sse_scratch_register as u32;
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn2(name);
}
fn sse_read128_xmm_xmm(ctx: &mut JitContext, name: &str, r1: u32, r2: u32) {
// Make a copy to avoid aliasing problems: Called function expects a reg128, which must not
// alias with memory
codegen::gen_read_reg_xmm128_into_scratch(ctx, r1);
let dest = global_pointers::sse_scratch_register;
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2(name);
}
fn sse_mov_xmm_xmm(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r2) as i32);
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r1) as i32);
ctx.builder.load_aligned_i64(0);
ctx.builder.store_aligned_i64(0);
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r2) as i32 + 8);
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r1) as i32 + 8);
ctx.builder.load_aligned_i64(0);
ctx.builder.store_aligned_i64(0);
}
fn mmx_read64_mm_mem32(ctx: &mut JitContext, name: &str, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn2(name)
}
fn mmx_read64_mm_mm32(ctx: &mut JitContext, name: &str, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_mmx_offset(r1) as i32);
ctx.builder.load_aligned_i32(0);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2(name);
}
fn mmx_read64_mm_mem(ctx: &mut JitContext, name: &str, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn2_i64_i32(name)
}
fn mmx_read64_mm_mm(ctx: &mut JitContext, name: &str, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_mmx_offset(r1) as i32);
ctx.builder.load_aligned_i64(0);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2_i64_i32(name);
}
fn push16_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
let value_local = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &value_local);
ctx.builder.free_local(value_local);
}
fn push32_reg_jit(ctx: &mut JitContext, r: u32) {
let reg = ctx.register_locals[r as usize].unsafe_clone();
codegen::gen_push32(ctx, ®);
}
fn push16_imm_jit(ctx: &mut JitContext, imm: u32) {
ctx.builder.const_i32(imm as i32);
let value_local = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &value_local);
ctx.builder.free_local(value_local);
}
fn push32_imm_jit(ctx: &mut JitContext, imm: u32) {
ctx.builder.const_i32(imm as i32);
let value_local = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &value_local);
ctx.builder.free_local(value_local);
}
fn push16_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
let value_local = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &value_local);
ctx.builder.free_local(value_local);
}
fn push32_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let value_local = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &value_local);
ctx.builder.free_local(value_local);
}
fn pop16_reg_jit(ctx: &mut JitContext, reg: u32) {
codegen::gen_pop16(ctx);
codegen::gen_set_reg16(ctx, reg);
}
fn pop32_reg_jit(ctx: &mut JitContext, reg: u32) {
codegen::gen_pop32s(ctx);
codegen::gen_set_reg32(ctx, reg);
}
fn group_arith_al_imm8(ctx: &mut JitContext, op: &str, imm8: u32) {
codegen::gen_get_reg8(ctx, regs::AL);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2_ret(op);
codegen::gen_set_reg8(ctx, regs::AL);
}
fn group_arith_ax_imm16(ctx: &mut JitContext, op: &str, imm16: u32) {
codegen::gen_get_reg16(ctx, regs::AX);
ctx.builder.const_i32(imm16 as i32);
ctx.builder.call_fn2_ret(op);
codegen::gen_set_reg16(ctx, regs::AX);
}
fn group_arith_eax_imm32(
ctx: &mut JitContext,
op: &dyn Fn(&mut WasmBuilder, &WasmLocal, &LocalOrImmediate),
imm32: u32,
) {
op(
ctx.builder,
&ctx.register_locals[regs::EAX as usize],
&LocalOrImmediate::Immediate(imm32 as i32),
);
}
macro_rules! define_instruction_read8(
($fn:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
let dest_operand = ctx.builder.set_new_local();
let source_operand = codegen::gen_get_reg8_or_alias_to_reg32(ctx, r);
$fn(ctx.builder, &dest_operand, &LocalOrImmediate::WasmLocal(&source_operand));
ctx.builder.free_local(dest_operand);
codegen::gen_free_reg8_or_alias(ctx, r, source_operand);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
let dest_operand = codegen::gen_get_reg8_or_alias_to_reg32(ctx, r1);
let source_operand = codegen::gen_get_reg8_or_alias_to_reg32(ctx, r2);
$fn(ctx.builder, &dest_operand, &LocalOrImmediate::WasmLocal(&source_operand));
codegen::gen_free_reg8_or_alias(ctx, r1, dest_operand);
codegen::gen_free_reg8_or_alias(ctx, r2, source_operand);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
let dest_operand = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm);
$fn(ctx.builder, &dest_operand, &LocalOrImmediate::Immediate(imm as i32));
ctx.builder.free_local(dest_operand);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, imm: u32) {
let dest_operand = codegen::gen_get_reg8_or_alias_to_reg32(ctx, r1);
$fn(ctx.builder, &dest_operand, &LocalOrImmediate::Immediate(imm as i32));
codegen::gen_free_reg8_or_alias(ctx, r1, dest_operand);
}
);
);
macro_rules! define_instruction_read16(
($fn:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
let dest_operand = ctx.builder.set_new_local();
$fn(
ctx.builder,
&dest_operand,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize]),
);
ctx.builder.free_local(dest_operand);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
$fn(
ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize])
);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
let dest_operand = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm);
$fn(
ctx.builder,
&dest_operand,
&LocalOrImmediate::Immediate(imm as i32),
);
ctx.builder.free_local(dest_operand);
}
pub fn $name_reg(ctx: &mut JitContext, r: u32, imm: u32) {
$fn(
ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm as i32),
);
}
);
);
macro_rules! define_instruction_read32(
($fn:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let dest_operand = ctx.builder.set_new_local();
$fn(
ctx.builder,
&dest_operand,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize]),
);
ctx.builder.free_local(dest_operand);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
$fn(
ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize])
);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let dest_operand = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm);
$fn(
ctx.builder,
&dest_operand,
&LocalOrImmediate::Immediate(imm as i32),
);
ctx.builder.free_local(dest_operand);
}
pub fn $name_reg(ctx: &mut JitContext, r: u32, imm: u32) {
$fn(
ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm as i32),
);
}
);
);
macro_rules! define_instruction_write_reg8(
($fn:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_get_reg8(ctx, r);
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg8(ctx, r);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg8(ctx, r2);
codegen::gen_get_reg8(ctx, r1);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg8(ctx, r2);
}
)
);
macro_rules! define_instruction_write_reg16(
($fn:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_get_reg16(ctx, r);
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg16(ctx, r);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r2);
codegen::gen_get_reg16(ctx, r1);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg16(ctx, r2);
}
)
);
macro_rules! define_instruction_write_reg32(
($fn:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let source_operand = ctx.builder.set_new_local();
$fn(
ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::WasmLocal(&source_operand),
);
ctx.builder.free_local(source_operand);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
$fn(
ctx.builder,
&ctx.register_locals[r2 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r1 as usize]),
);
}
);
);
macro_rules! mask_imm(
($imm:expr, imm8_5bits) => { $imm & 31 };
($imm:expr, imm8) => { $imm };
($imm:expr, imm8s) => { $imm };
($imm:expr, imm16) => { $imm };
($imm:expr, imm32) => { $imm };
);
macro_rules! define_instruction_read_write_mem8(
($fn:expr, $name_mem:ident, $name_reg:ident, reg) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::BYTE, &address_local, &|ref mut ctx| {
codegen::gen_get_reg8(ctx, r);
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg8(ctx, r1);
codegen::gen_get_reg8(ctx, r2);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg8(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, constant_one) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::BYTE, &address_local, &|ref mut ctx| {
ctx.builder.const_i32(1);
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
codegen::gen_get_reg8(ctx, r1);
ctx.builder.const_i32(1);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg8(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, cl) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::BYTE, &address_local, &|ref mut ctx| {
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
codegen::gen_get_reg8(ctx, r1);
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg8(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, none) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::BYTE, &address_local, &|ref mut ctx| {
ctx.builder.call_fn1_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
codegen::gen_get_reg8(ctx, r1);
ctx.builder.call_fn1_ret($fn);
codegen::gen_set_reg8(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm) as i32;
codegen::gen_safe_read_write(ctx, BitSize::BYTE, &address_local, &|ref mut ctx| {
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, imm: u32) {
let imm = mask_imm!(imm, $imm);
codegen::gen_get_reg8(ctx, r1);
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg8(ctx, r1);
}
);
);
macro_rules! define_instruction_read_write_mem16(
($fn:expr, $name_mem:ident, $name_reg:ident, reg) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg16(ctx, r);
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
codegen::gen_get_reg16(ctx, r2);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg16(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, constant_one) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
ctx.builder.const_i32(1);
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
codegen::gen_get_reg16(ctx, r1);
ctx.builder.const_i32(1);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg16(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, cl) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
codegen::gen_get_reg16(ctx, r1);
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg16(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, reg, cl) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg16(ctx, r);
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn3_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
codegen::gen_get_reg16(ctx, r2);
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn3_ret($fn);
codegen::gen_set_reg16(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, reg, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm);
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg16(ctx, r);
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn3_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32, imm: u32) {
let imm = mask_imm!(imm, $imm);
codegen::gen_get_reg16(ctx, r1);
codegen::gen_get_reg16(ctx, r2);
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn3_ret($fn);
codegen::gen_set_reg16(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, none) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
let mut dest_operand = ctx.builder.set_new_local();
$fn(ctx.builder, &mut dest_operand);
ctx.builder.get_local(&dest_operand);
ctx.builder.free_local(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
$fn(ctx.builder, &mut ctx.register_locals[r1 as usize]);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm) as i32;
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn2_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, imm: u32) {
let imm = mask_imm!(imm, $imm);
codegen::gen_get_reg16(ctx, r1);
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn2_ret($fn);
codegen::gen_set_reg16(ctx, r1);
}
);
);
macro_rules! define_instruction_read_write_mem32(
($fn:expr, $name_mem:ident, $name_reg:ident, reg) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
let dest_operand = ctx.builder.set_new_local();
$fn(
ctx.builder,
&dest_operand,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize]),
);
ctx.builder.get_local(&dest_operand);
ctx.builder.free_local(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
$fn(
ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, constant_one) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
let dest_operand = ctx.builder.set_new_local();
$fn(ctx.builder, &dest_operand, &LocalOrImmediate::Immediate(1));
ctx.builder.get_local(&dest_operand);
ctx.builder.free_local(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
$fn(ctx.builder, &ctx.register_locals[r1 as usize], &LocalOrImmediate::Immediate(1));
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, cl) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
let dest_operand = ctx.builder.set_new_local();
$fn(
ctx.builder,
&dest_operand,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[regs::ECX as usize]),
);
ctx.builder.get_local(&dest_operand);
ctx.builder.free_local(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
$fn(
ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[regs::ECX as usize]),
);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, reg, cl) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg32(ctx, r);
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn3_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg32(ctx, r1);
codegen::gen_get_reg32(ctx, r2);
codegen::gen_get_reg8(ctx, regs::CL);
ctx.builder.const_i32(31);
ctx.builder.and_i32();
ctx.builder.call_fn3_ret($fn);
codegen::gen_set_reg32(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, reg, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm) as i32;
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg32(ctx, r);
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn3_ret($fn);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32, imm: u32) {
let imm = mask_imm!(imm, $imm);
codegen::gen_get_reg32(ctx, r1);
codegen::gen_get_reg32(ctx, r2);
ctx.builder.const_i32(imm as i32);
ctx.builder.call_fn3_ret($fn);
codegen::gen_set_reg32(ctx, r1);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, none) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
let mut dest_operand = ctx.builder.set_new_local();
$fn(ctx.builder, &mut dest_operand);
ctx.builder.get_local(&dest_operand);
ctx.builder.free_local(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32) {
$fn(ctx.builder, &mut ctx.register_locals[r1 as usize]);
}
);
($fn:expr, $name_mem:ident, $name_reg:ident, $imm:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
let imm = mask_imm!(imm, $imm) as i32;
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
let dest_operand = ctx.builder.set_new_local();
$fn(
ctx.builder,
&dest_operand,
&LocalOrImmediate::Immediate(imm),
);
ctx.builder.get_local(&dest_operand);
ctx.builder.free_local(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, imm: u32) {
let imm = mask_imm!(imm, $imm);
$fn(
ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::Immediate(imm as i32),
);
}
);
);
fn gen_add32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
codegen::gen_set_last_op1(builder, &dest_operand);
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.add_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, &dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(builder, FLAGS_ALL);
}
fn gen_sub32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
codegen::gen_set_last_op1(builder, &dest_operand);
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.sub_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, &dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(builder, FLAGS_ALL | FLAG_SUB);
}
fn gen_cmp(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
size: i32,
) {
builder.const_i32(global_pointers::last_result as i32);
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.sub_i32();
if size == OPSIZE_8 || size == OPSIZE_16 {
builder.const_i32(if size == OPSIZE_8 { 0xFF } else { 0xFFFF });
builder.and_i32();
}
builder.store_aligned_i32(0);
builder.const_i32(global_pointers::last_op1 as i32);
builder.get_local(&dest_operand);
if size == OPSIZE_8 || size == OPSIZE_16 {
builder.const_i32(if size == OPSIZE_8 { 0xFF } else { 0xFFFF });
builder.and_i32();
}
builder.store_aligned_i32(0);
codegen::gen_set_last_op_size(builder, size);
codegen::gen_set_flags_changed(builder, FLAGS_ALL | FLAG_SUB);
}
fn gen_cmp8(builder: &mut WasmBuilder, dest: &WasmLocal, source: &LocalOrImmediate) {
gen_cmp(builder, dest, source, OPSIZE_8)
}
fn gen_cmp16(builder: &mut WasmBuilder, dest: &WasmLocal, source: &LocalOrImmediate) {
gen_cmp(builder, dest, source, OPSIZE_16)
}
fn gen_cmp32(builder: &mut WasmBuilder, dest: &WasmLocal, source: &LocalOrImmediate) {
gen_cmp(builder, dest, source, OPSIZE_32)
}
fn gen_adc32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.call_fn2_ret("adc32");
builder.set_local(dest_operand);
}
fn gen_sbb32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.call_fn2_ret("sbb32");
builder.set_local(dest_operand);
}
fn gen_and32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.and_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, &dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(
builder,
FLAGS_ALL & !FLAG_CARRY & !FLAG_OVERFLOW & !FLAG_ADJUST,
);
codegen::gen_clear_flags_bits(builder, FLAG_CARRY | FLAG_OVERFLOW | FLAG_ADJUST);
}
fn gen_test(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
size: i32,
) {
builder.const_i32(global_pointers::last_result as i32);
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.and_i32();
builder.store_aligned_i32(0);
codegen::gen_set_last_op_size(builder, size);
codegen::gen_set_flags_changed(
builder,
FLAGS_ALL & !FLAG_CARRY & !FLAG_OVERFLOW & !FLAG_ADJUST,
);
codegen::gen_clear_flags_bits(builder, FLAG_CARRY | FLAG_OVERFLOW | FLAG_ADJUST);
}
fn gen_test8(builder: &mut WasmBuilder, dest: &WasmLocal, source: &LocalOrImmediate) {
gen_test(builder, dest, source, OPSIZE_8)
}
fn gen_test16(builder: &mut WasmBuilder, dest: &WasmLocal, source: &LocalOrImmediate) {
gen_test(builder, dest, source, OPSIZE_16)
}
fn gen_test32(builder: &mut WasmBuilder, dest: &WasmLocal, source: &LocalOrImmediate) {
gen_test(builder, dest, source, OPSIZE_32)
}
fn gen_or32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.or_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, &dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(
builder,
FLAGS_ALL & !FLAG_CARRY & !FLAG_OVERFLOW & !FLAG_ADJUST,
);
codegen::gen_clear_flags_bits(builder, FLAG_CARRY | FLAG_OVERFLOW | FLAG_ADJUST);
}
fn gen_xor32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.xor_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, &dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(
builder,
FLAGS_ALL & !FLAG_CARRY & !FLAG_OVERFLOW & !FLAG_ADJUST,
);
codegen::gen_clear_flags_bits(builder, FLAG_CARRY | FLAG_OVERFLOW | FLAG_ADJUST);
}
fn gen_rol32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(dest_operand);
match source_operand {
LocalOrImmediate::WasmLocal(l) => {
builder.get_local(l);
builder.const_i32(31);
builder.and_i32();
},
LocalOrImmediate::Immediate(i) => {
builder.const_i32(*i & 31);
},
}
builder.const_i32(31);
builder.and_i32();
builder.call_fn2_ret("rol32");
builder.set_local(dest_operand);
}
fn gen_ror32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(dest_operand);
match source_operand {
LocalOrImmediate::WasmLocal(l) => {
builder.get_local(l);
builder.const_i32(31);
builder.and_i32();
},
LocalOrImmediate::Immediate(i) => {
builder.const_i32(*i & 31);
},
}
builder.const_i32(31);
builder.and_i32();
builder.call_fn2_ret("ror32");
builder.set_local(dest_operand);
}
fn gen_rcl32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(dest_operand);
match source_operand {
LocalOrImmediate::WasmLocal(l) => {
builder.get_local(l);
builder.const_i32(31);
builder.and_i32();
},
LocalOrImmediate::Immediate(i) => {
builder.const_i32(*i & 31);
},
}
builder.const_i32(31);
builder.and_i32();
builder.call_fn2_ret("rcl32");
builder.set_local(dest_operand);
}
fn gen_rcr32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(dest_operand);
match source_operand {
LocalOrImmediate::WasmLocal(l) => {
builder.get_local(l);
builder.const_i32(31);
builder.and_i32();
},
LocalOrImmediate::Immediate(i) => {
builder.const_i32(*i & 31);
},
}
builder.const_i32(31);
builder.and_i32();
builder.call_fn2_ret("rcr32");
builder.set_local(dest_operand);
}
enum ShiftCount {
Local(WasmLocal),
Immediate(i32),
}
impl ShiftCount {
pub fn gen_get(builder: &mut WasmBuilder, count: &ShiftCount) {
match &count {
ShiftCount::Local(l) => builder.get_local(l),
ShiftCount::Immediate(i) => builder.const_i32(*i),
}
}
pub fn gen_get_thirtytwo_minus(builder: &mut WasmBuilder, count: &ShiftCount) {
match &count {
ShiftCount::Local(l) => {
builder.const_i32(32);
builder.get_local(l);
builder.sub_i32();
},
ShiftCount::Immediate(i) => builder.const_i32(32 - *i),
}
}
pub fn gen_get_minus_one(builder: &mut WasmBuilder, count: &ShiftCount) {
match &count {
ShiftCount::Local(l) => {
builder.get_local(l);
builder.const_i32(1);
builder.sub_i32()
},
ShiftCount::Immediate(i) => builder.const_i32(*i - 1),
}
}
}
fn gen_shl32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
let count = match source_operand {
LocalOrImmediate::WasmLocal(l) => {
let exit = builder.block_void();
builder.get_local(l);
builder.const_i32(31); // Note: mask can probably be avoided since wasm has the same semantics on shl_i32
builder.and_i32();
let count = builder.tee_new_local();
builder.eqz_i32();
builder.br_if(exit);
ShiftCount::Local(count)
},
LocalOrImmediate::Immediate(i) => {
if *i & 31 == 0 {
return;
}
ShiftCount::Immediate(*i & 31)
},
};
builder.get_local(&dest_operand);
ShiftCount::gen_get_thirtytwo_minus(builder, &count);
builder.shr_u_i32();
builder.const_i32(1);
builder.and_i32();
let b = builder.set_new_local();
builder.get_local(dest_operand);
ShiftCount::gen_get(builder, &count);
builder.shl_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(builder, FLAGS_ALL & !FLAG_CARRY & !FLAG_OVERFLOW);
builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(builder);
builder.const_i32(!(FLAG_CARRY | FLAG_OVERFLOW));
builder.and_i32();
builder.get_local(&b);
builder.or_i32();
{
builder.get_local(&b);
builder.get_local(&dest_operand);
builder.const_i32(31);
builder.shr_u_i32();
builder.xor_i32();
builder.const_i32(11);
builder.shl_i32();
builder.const_i32(FLAG_OVERFLOW);
builder.and_i32();
builder.or_i32();
}
builder.store_aligned_i32(0);
builder.free_local(b);
if let ShiftCount::Local(l) = count {
builder.block_end();
builder.free_local(l);
}
}
fn gen_shr32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
let count = match source_operand {
LocalOrImmediate::WasmLocal(l) => {
let exit = builder.block_void();
builder.get_local(l);
builder.const_i32(31); // Note: mask can probably be avoided since wasm has the same semantics on shl_i32
builder.and_i32();
let count = builder.tee_new_local();
builder.eqz_i32();
builder.br_if(exit);
ShiftCount::Local(count)
},
LocalOrImmediate::Immediate(i) => {
if *i & 31 == 0 {
return;
}
ShiftCount::Immediate(*i & 31)
},
};
builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(builder);
builder.const_i32(!(FLAG_CARRY | FLAG_OVERFLOW));
builder.and_i32();
{
builder.get_local(dest_operand);
ShiftCount::gen_get_minus_one(builder, &count);
builder.shr_u_i32();
builder.const_i32(1);
builder.and_i32();
builder.or_i32()
}
{
builder.get_local(dest_operand);
builder.const_i32(20);
builder.shr_u_i32();
builder.const_i32(FLAG_OVERFLOW);
builder.and_i32();
builder.or_i32()
}
builder.store_aligned_i32(0);
builder.get_local(dest_operand);
ShiftCount::gen_get(builder, &count);
builder.shr_u_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(builder, FLAGS_ALL & !FLAG_CARRY & !FLAG_OVERFLOW);
if let ShiftCount::Local(l) = count {
builder.block_end();
builder.free_local(l);
}
}
fn gen_sar32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
let count = match source_operand {
LocalOrImmediate::WasmLocal(l) => {
let exit = builder.block_void();
builder.get_local(l);
builder.const_i32(31); // Note: mask can probably be avoided since wasm has the same semantics on shl_i32
builder.and_i32();
let count = builder.tee_new_local();
builder.eqz_i32();
builder.br_if(exit);
ShiftCount::Local(count)
},
LocalOrImmediate::Immediate(i) => {
if *i & 31 == 0 {
return;
}
ShiftCount::Immediate(*i & 31)
},
};
builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(builder);
builder.const_i32(!(FLAG_CARRY | FLAG_OVERFLOW));
builder.and_i32();
{
builder.get_local(dest_operand);
ShiftCount::gen_get_minus_one(builder, &count);
builder.shr_u_i32();
builder.const_i32(1);
builder.and_i32();
builder.or_i32()
}
builder.store_aligned_i32(0);
builder.get_local(dest_operand);
ShiftCount::gen_get(builder, &count);
builder.shr_s_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(builder, FLAGS_ALL & !FLAG_CARRY & !FLAG_OVERFLOW);
if let ShiftCount::Local(l) = count {
builder.block_end();
builder.free_local(l);
}
}
fn gen_xadd32(ctx: &mut JitContext, dest_operand: &WasmLocal, r: u32) {
ctx.builder.get_local(&ctx.register_locals[r as usize]);
let tmp = ctx.builder.set_new_local();
ctx.builder.get_local(&dest_operand);
codegen::gen_set_reg32(ctx, r);
gen_add32(
ctx.builder,
&dest_operand,
&LocalOrImmediate::WasmLocal(&tmp),
);
ctx.builder.free_local(tmp);
}
fn gen_cmpxchg32(ctx: &mut JitContext, r: u32) {
let source = ctx.builder.set_new_local();
gen_cmp32(
ctx.builder,
&ctx.register_locals[0],
&LocalOrImmediate::WasmLocal(&source),
);
ctx.builder.get_local(&ctx.register_locals[0]);
ctx.builder.get_local(&source);
ctx.builder.eq_i32();
ctx.builder.if_i32();
codegen::gen_get_reg32(ctx, r);
ctx.builder.else_();
ctx.builder.get_local(&source);
codegen::gen_set_reg32(ctx, regs::EAX);
ctx.builder.get_local(&source);
ctx.builder.block_end();
ctx.builder.free_local(source);
}
fn gen_mul32(ctx: &mut JitContext) {
ctx.builder.extend_unsigned_i32_to_i64();
codegen::gen_get_reg32(ctx, regs::EAX);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.mul_i64();
let result = ctx.builder.tee_new_local_i64();
ctx.builder.const_i64(32);
ctx.builder.shr_u_i64();
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EDX);
ctx.builder.get_local_i64(&result);
ctx.builder.free_local_i64(result);
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EAX);
codegen::gen_get_reg32(ctx, regs::EDX);
ctx.builder.if_void();
codegen::gen_set_flags_bits(ctx.builder, 1 | FLAG_OVERFLOW);
ctx.builder.else_();
codegen::gen_clear_flags_bits(ctx.builder, 1 | FLAG_OVERFLOW);
ctx.builder.block_end();
codegen::gen_set_last_result(ctx.builder, &ctx.register_locals[regs::EAX as usize]);
codegen::gen_set_last_op_size(ctx.builder, OPSIZE_32);
codegen::gen_set_flags_changed(ctx.builder, FLAGS_ALL & !1 & !FLAG_OVERFLOW);
}
fn gen_imul32(ctx: &mut JitContext) {
ctx.builder.extend_signed_i32_to_i64();
codegen::gen_get_reg32(ctx, regs::EAX);
ctx.builder.extend_signed_i32_to_i64();
ctx.builder.mul_i64();
let result = ctx.builder.tee_new_local_i64();
ctx.builder.const_i64(32);
ctx.builder.shr_u_i64();
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EDX);
ctx.builder.get_local_i64(&result);
ctx.builder.free_local_i64(result);
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EAX);
codegen::gen_get_reg32(ctx, regs::EDX);
codegen::gen_get_reg32(ctx, regs::EAX);
ctx.builder.const_i32(31);
ctx.builder.shr_s_i32();
ctx.builder.eq_i32();
ctx.builder.if_void();
codegen::gen_clear_flags_bits(ctx.builder, 1 | FLAG_OVERFLOW);
ctx.builder.else_();
codegen::gen_set_flags_bits(ctx.builder, 1 | FLAG_OVERFLOW);
ctx.builder.block_end();
codegen::gen_set_last_result(ctx.builder, &ctx.register_locals[regs::EAX as usize]);
codegen::gen_set_last_op_size(ctx.builder, OPSIZE_32);
codegen::gen_set_flags_changed(ctx.builder, FLAGS_ALL & !1 & !FLAG_OVERFLOW);
}
fn gen_imul_reg32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
gen_imul3_reg32(builder, dest_operand, dest_operand, source_operand);
}
fn gen_imul3_reg32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand1: &WasmLocal,
source_operand2: &LocalOrImmediate,
) {
builder.get_local(&source_operand1);
builder.extend_signed_i32_to_i64();
source_operand2.gen_get(builder);
builder.extend_signed_i32_to_i64();
builder.mul_i64();
let result = builder.tee_new_local_i64();
builder.wrap_i64_to_i32();
builder.set_local(&dest_operand);
codegen::gen_set_last_result(builder, &dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(builder, FLAGS_ALL & !1 & !FLAG_OVERFLOW);
builder.const_i32(global_pointers::flags as i32);
builder.get_local_i64(&result);
builder.wrap_i64_to_i32();
builder.extend_signed_i32_to_i64();
builder.get_local_i64(&result);
builder.ne_i64();
builder.const_i32(1 | FLAG_OVERFLOW);
builder.mul_i32();
codegen::gen_get_flags(builder);
builder.const_i32(!1 & !FLAG_OVERFLOW);
builder.and_i32();
builder.or_i32();
builder.store_aligned_i32(0);
builder.free_local_i64(result);
}
fn gen_div32(ctx: &mut JitContext, source: &WasmLocal) {
let done = ctx.builder.block_void();
{
let exception = ctx.builder.block_void();
{
ctx.builder.get_local(source);
ctx.builder.eqz_i32();
ctx.builder.br_if(exception);
codegen::gen_get_reg32(ctx, regs::EDX);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.const_i64(32);
ctx.builder.shl_i64();
codegen::gen_get_reg32(ctx, regs::EAX);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.or_i64();
let dest_operand = ctx.builder.tee_new_local_i64();
ctx.builder.get_local(source);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.div_i64();
let result = ctx.builder.tee_new_local_i64();
ctx.builder.const_i64(0xFFFF_FFFF);
ctx.builder.gtu_i64();
ctx.builder.br_if(exception);
ctx.builder.get_local_i64(&dest_operand);
ctx.builder.get_local(source);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.rem_i64();
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EDX);
ctx.builder.get_local_i64(&result);
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EAX);
ctx.builder.br(done);
ctx.builder.free_local_i64(dest_operand);
ctx.builder.free_local_i64(result);
}
ctx.builder.block_end();
codegen::gen_trigger_de(ctx);
}
ctx.builder.block_end();
}
fn gen_bt(
builder: &mut WasmBuilder,
bit_base: &WasmLocal,
bit_offset: &LocalOrImmediate,
offset_mask: u32,
) {
builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(builder);
builder.const_i32(!1);
builder.and_i32();
builder.get_local(bit_base);
match bit_offset {
LocalOrImmediate::WasmLocal(l) => {
builder.get_local(l);
builder.const_i32(offset_mask as i32);
builder.and_i32();
},
LocalOrImmediate::Immediate(imm) => builder.const_i32(imm & offset_mask as i32),
}
builder.shr_u_i32();
builder.const_i32(1);
builder.and_i32();
builder.or_i32();
builder.store_aligned_i32(0);
codegen::gen_clear_flags_changed_bits(builder, 1);
}
fn gen_bts(
builder: &mut WasmBuilder,
dest_bit_base: &WasmLocal,
bit_offset: &LocalOrImmediate,
offset_mask: u32,
) {
gen_bt(builder, dest_bit_base, bit_offset, offset_mask);
builder.get_local(dest_bit_base);
match bit_offset {
LocalOrImmediate::WasmLocal(l) => {
builder.const_i32(1);
builder.get_local(l);
builder.const_i32(offset_mask as i32);
builder.and_i32();
builder.shl_i32();
},
LocalOrImmediate::Immediate(imm) => builder.const_i32(1 << (imm & offset_mask as i32)),
}
builder.or_i32();
builder.set_local(dest_bit_base);
}
fn gen_btc(
builder: &mut WasmBuilder,
dest_bit_base: &WasmLocal,
bit_offset: &LocalOrImmediate,
offset_mask: u32,
) {
gen_bt(builder, dest_bit_base, bit_offset, offset_mask);
builder.get_local(dest_bit_base);
match bit_offset {
LocalOrImmediate::WasmLocal(l) => {
builder.const_i32(1);
builder.get_local(l);
builder.const_i32(offset_mask as i32);
builder.and_i32();
builder.shl_i32();
},
LocalOrImmediate::Immediate(imm) => builder.const_i32(1 << (imm & offset_mask as i32)),
}
builder.xor_i32();
builder.set_local(dest_bit_base);
}
fn gen_btr(
builder: &mut WasmBuilder,
dest_bit_base: &WasmLocal,
bit_offset: &LocalOrImmediate,
offset_mask: u32,
) {
gen_bt(builder, dest_bit_base, bit_offset, offset_mask);
builder.get_local(dest_bit_base);
match bit_offset {
LocalOrImmediate::WasmLocal(l) => {
builder.const_i32(1);
builder.get_local(l);
builder.const_i32(offset_mask as i32);
builder.and_i32();
builder.shl_i32();
builder.const_i32(-1);
builder.xor_i32();
},
LocalOrImmediate::Immediate(imm) => builder.const_i32(!(1 << (imm & offset_mask as i32))),
}
builder.and_i32();
builder.set_local(dest_bit_base);
}
fn gen_bit_rmw(
ctx: &mut JitContext,
modrm_byte: ModrmByte,
op: &dyn Fn(&mut WasmBuilder, &WasmLocal, &LocalOrImmediate, u32),
source_operand: &LocalOrImmediate,
opsize: i32,
) {
dbg_assert!(opsize == 16 || opsize == 32);
codegen::gen_modrm_resolve(ctx, modrm_byte);
match source_operand {
LocalOrImmediate::WasmLocal(l) => {
ctx.builder.get_local(l);
if opsize == 16 {
codegen::sign_extend_i16(ctx.builder);
}
ctx.builder.const_i32(3);
ctx.builder.shr_s_i32();
ctx.builder.add_i32();
},
LocalOrImmediate::Immediate(imm8) => {
ctx.builder.const_i32((*imm8 as i32 & (opsize - 1)) >> 3);
ctx.builder.add_i32();
},
}
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::BYTE, &address_local, &|ref mut ctx| {
let value_local = ctx.builder.set_new_local();
op(ctx.builder, &value_local, source_operand, 7);
ctx.builder.get_local(&value_local);
ctx.builder.free_local(value_local);
});
ctx.builder.free_local(address_local);
}
fn gen_bsf32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.call_fn2_ret("bsf32");
builder.set_local(dest_operand);
}
fn gen_bsr32(
builder: &mut WasmBuilder,
dest_operand: &WasmLocal,
source_operand: &LocalOrImmediate,
) {
builder.get_local(&dest_operand);
source_operand.gen_get(builder);
builder.call_fn2_ret("bsr32");
builder.set_local(dest_operand);
}
fn gen_bswap(ctx: &mut JitContext, reg: i32) {
let l = &ctx.register_locals[reg as usize];
ctx.builder.get_local(l);
ctx.builder.const_i32(8);
ctx.builder.rotl_i32();
ctx.builder.const_i32(0xFF00FF);
ctx.builder.and_i32();
ctx.builder.get_local(l);
ctx.builder.const_i32(24);
ctx.builder.rotl_i32();
ctx.builder.const_i32(0xFF00FF00u32 as i32);
ctx.builder.and_i32();
ctx.builder.or_i32();
ctx.builder.set_local(l);
}
define_instruction_read_write_mem8!("add8", instr_00_mem_jit, instr_00_reg_jit, reg);
define_instruction_read_write_mem16!("add16", instr16_01_mem_jit, instr16_01_reg_jit, reg);
define_instruction_read_write_mem32!(gen_add32, instr32_01_mem_jit, instr32_01_reg_jit, reg);
define_instruction_write_reg8!("add8", instr_02_mem_jit, instr_02_reg_jit);
define_instruction_write_reg16!("add16", instr16_03_mem_jit, instr16_03_reg_jit);
define_instruction_write_reg32!(gen_add32, instr32_03_mem_jit, instr32_03_reg_jit);
pub fn instr_04_jit(ctx: &mut JitContext, imm8: u32) { group_arith_al_imm8(ctx, "add8", imm8); }
pub fn instr16_05_jit(ctx: &mut JitContext, imm16: u32) {
group_arith_ax_imm16(ctx, "add16", imm16);
}
pub fn instr32_05_jit(ctx: &mut JitContext, imm32: u32) {
group_arith_eax_imm32(ctx, &gen_add32, imm32);
}
define_instruction_read_write_mem8!("or8", instr_08_mem_jit, instr_08_reg_jit, reg);
define_instruction_read_write_mem16!("or16", instr16_09_mem_jit, instr16_09_reg_jit, reg);
define_instruction_read_write_mem32!(gen_or32, instr32_09_mem_jit, instr32_09_reg_jit, reg);
define_instruction_write_reg8!("or8", instr_0A_mem_jit, instr_0A_reg_jit);
define_instruction_write_reg16!("or16", instr16_0B_mem_jit, instr16_0B_reg_jit);
define_instruction_write_reg32!(gen_or32, instr32_0B_mem_jit, instr32_0B_reg_jit);
pub fn instr_0C_jit(ctx: &mut JitContext, imm8: u32) { group_arith_al_imm8(ctx, "or8", imm8); }
pub fn instr16_0D_jit(ctx: &mut JitContext, imm16: u32) {
group_arith_ax_imm16(ctx, "or16", imm16);
}
pub fn instr32_0D_jit(ctx: &mut JitContext, imm32: u32) {
group_arith_eax_imm32(ctx, &gen_or32, imm32);
}
define_instruction_read_write_mem8!("adc8", instr_10_mem_jit, instr_10_reg_jit, reg);
define_instruction_read_write_mem16!("adc16", instr16_11_mem_jit, instr16_11_reg_jit, reg);
define_instruction_read_write_mem32!(gen_adc32, instr32_11_mem_jit, instr32_11_reg_jit, reg);
define_instruction_write_reg8!("adc8", instr_12_mem_jit, instr_12_reg_jit);
define_instruction_write_reg16!("adc16", instr16_13_mem_jit, instr16_13_reg_jit);
define_instruction_write_reg32!(gen_adc32, instr32_13_mem_jit, instr32_13_reg_jit);
pub fn instr_14_jit(ctx: &mut JitContext, imm8: u32) { group_arith_al_imm8(ctx, "adc8", imm8); }
pub fn instr16_15_jit(ctx: &mut JitContext, imm16: u32) {
group_arith_ax_imm16(ctx, "adc16", imm16);
}
pub fn instr32_15_jit(ctx: &mut JitContext, imm32: u32) {
group_arith_eax_imm32(ctx, &gen_adc32, imm32);
}
define_instruction_read_write_mem8!("sbb8", instr_18_mem_jit, instr_18_reg_jit, reg);
define_instruction_read_write_mem16!("sbb16", instr16_19_mem_jit, instr16_19_reg_jit, reg);
define_instruction_read_write_mem32!(gen_sbb32, instr32_19_mem_jit, instr32_19_reg_jit, reg);
define_instruction_write_reg8!("sbb8", instr_1A_mem_jit, instr_1A_reg_jit);
define_instruction_write_reg16!("sbb16", instr16_1B_mem_jit, instr16_1B_reg_jit);
define_instruction_write_reg32!(gen_sbb32, instr32_1B_mem_jit, instr32_1B_reg_jit);
pub fn instr_1C_jit(ctx: &mut JitContext, imm8: u32) { group_arith_al_imm8(ctx, "sbb8", imm8); }
pub fn instr16_1D_jit(ctx: &mut JitContext, imm16: u32) {
group_arith_ax_imm16(ctx, "sbb16", imm16);
}
pub fn instr32_1D_jit(ctx: &mut JitContext, imm32: u32) {
group_arith_eax_imm32(ctx, &gen_sbb32, imm32);
}
define_instruction_read_write_mem8!("and8", instr_20_mem_jit, instr_20_reg_jit, reg);
define_instruction_read_write_mem16!("and16", instr16_21_mem_jit, instr16_21_reg_jit, reg);
define_instruction_read_write_mem32!(gen_and32, instr32_21_mem_jit, instr32_21_reg_jit, reg);
define_instruction_write_reg8!("and8", instr_22_mem_jit, instr_22_reg_jit);
define_instruction_write_reg16!("and16", instr16_23_mem_jit, instr16_23_reg_jit);
define_instruction_write_reg32!(gen_and32, instr32_23_mem_jit, instr32_23_reg_jit);
pub fn instr_24_jit(ctx: &mut JitContext, imm8: u32) { group_arith_al_imm8(ctx, "and8", imm8); }
pub fn instr16_25_jit(ctx: &mut JitContext, imm16: u32) {
group_arith_ax_imm16(ctx, "and16", imm16);
}
pub fn instr32_25_jit(ctx: &mut JitContext, imm32: u32) {
group_arith_eax_imm32(ctx, &gen_and32, imm32);
}
define_instruction_read_write_mem8!("sub8", instr_28_mem_jit, instr_28_reg_jit, reg);
define_instruction_read_write_mem16!("sub16", instr16_29_mem_jit, instr16_29_reg_jit, reg);
define_instruction_read_write_mem32!(gen_sub32, instr32_29_mem_jit, instr32_29_reg_jit, reg);
define_instruction_write_reg8!("sub8", instr_2A_mem_jit, instr_2A_reg_jit);
define_instruction_write_reg16!("sub16", instr16_2B_mem_jit, instr16_2B_reg_jit);
define_instruction_write_reg32!(gen_sub32, instr32_2B_mem_jit, instr32_2B_reg_jit);
pub fn instr_2C_jit(ctx: &mut JitContext, imm8: u32) { group_arith_al_imm8(ctx, "sub8", imm8); }
pub fn instr16_2D_jit(ctx: &mut JitContext, imm16: u32) {
group_arith_ax_imm16(ctx, "sub16", imm16);
}
pub fn instr32_2D_jit(ctx: &mut JitContext, imm32: u32) {
group_arith_eax_imm32(ctx, &gen_sub32, imm32);
}
define_instruction_read_write_mem8!("xor8", instr_30_mem_jit, instr_30_reg_jit, reg);
define_instruction_read_write_mem16!("xor16", instr16_31_mem_jit, instr16_31_reg_jit, reg);
define_instruction_read_write_mem32!(gen_xor32, instr32_31_mem_jit, instr32_31_reg_jit, reg);
define_instruction_write_reg8!("xor8", instr_32_mem_jit, instr_32_reg_jit);
define_instruction_write_reg16!("xor16", instr16_33_mem_jit, instr16_33_reg_jit);
define_instruction_write_reg32!(gen_xor32, instr32_33_mem_jit, instr32_33_reg_jit);
pub fn instr_34_jit(ctx: &mut JitContext, imm8: u32) { group_arith_al_imm8(ctx, "xor8", imm8); }
pub fn instr16_35_jit(ctx: &mut JitContext, imm16: u32) {
group_arith_ax_imm16(ctx, "xor16", imm16);
}
pub fn instr32_35_jit(ctx: &mut JitContext, imm32: u32) {
group_arith_eax_imm32(ctx, &gen_xor32, imm32);
}
define_instruction_read8!(gen_cmp8, instr_38_mem_jit, instr_38_reg_jit);
define_instruction_read16!(gen_cmp16, instr16_39_mem_jit, instr16_39_reg_jit);
define_instruction_read32!(gen_cmp32, instr32_39_mem_jit, instr32_39_reg_jit);
pub fn instr_3A_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
let dest_operand = codegen::gen_get_reg8_or_alias_to_reg32(ctx, r);
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
let source_operand = ctx.builder.set_new_local();
gen_cmp8(
ctx.builder,
&dest_operand,
&LocalOrImmediate::WasmLocal(&source_operand),
);
codegen::gen_free_reg8_or_alias(ctx, r, dest_operand);
ctx.builder.free_local(source_operand);
}
pub fn instr_3A_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
let dest_operand = codegen::gen_get_reg8_or_alias_to_reg32(ctx, r2);
let source_operand = codegen::gen_get_reg8_or_alias_to_reg32(ctx, r1);
gen_cmp8(
ctx.builder,
&dest_operand,
&LocalOrImmediate::WasmLocal(&source_operand),
);
codegen::gen_free_reg8_or_alias(ctx, r2, dest_operand);
codegen::gen_free_reg8_or_alias(ctx, r1, source_operand);
}
pub fn instr16_3B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
let source_operand = ctx.builder.set_new_local();
gen_cmp16(
ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::WasmLocal(&source_operand),
);
ctx.builder.free_local(source_operand);
}
pub fn instr16_3B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_cmp16(
ctx.builder,
&ctx.register_locals[r2 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r1 as usize]),
);
}
pub fn instr32_3B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let source_operand = ctx.builder.set_new_local();
gen_cmp32(
ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::WasmLocal(&source_operand),
);
ctx.builder.free_local(source_operand);
}
pub fn instr32_3B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_cmp32(
ctx.builder,
&ctx.register_locals[r2 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r1 as usize]),
);
}
pub fn instr_3C_jit(ctx: &mut JitContext, imm8: u32) {
gen_cmp8(
ctx.builder,
&ctx.register_locals[0],
&LocalOrImmediate::Immediate(imm8 as i32),
);
}
pub fn instr16_3D_jit(ctx: &mut JitContext, imm16: u32) {
gen_cmp16(
ctx.builder,
&ctx.register_locals[0],
&LocalOrImmediate::Immediate(imm16 as i32),
);
}
pub fn instr32_3D_jit(ctx: &mut JitContext, imm32: u32) {
gen_cmp32(
ctx.builder,
&ctx.register_locals[0],
&LocalOrImmediate::Immediate(imm32 as i32),
);
}
fn gen_inc(builder: &mut WasmBuilder, dest_operand: &WasmLocal, size: i32) {
builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(builder);
builder.const_i32(!1);
builder.and_i32();
codegen::gen_getcf(builder);
builder.or_i32();
builder.store_aligned_i32(0);
builder.const_i32(global_pointers::last_op1 as i32);
builder.get_local(&dest_operand);
if size == OPSIZE_8 || size == OPSIZE_16 {
builder.const_i32(if size == OPSIZE_8 { 0xFF } else { 0xFFFF });
builder.and_i32();
}
builder.store_aligned_i32(0);
builder.get_local(dest_operand);
builder.const_i32(1);
builder.add_i32();
if size == OPSIZE_16 {
codegen::gen_set_reg16_local(builder, dest_operand);
}
else {
builder.set_local(dest_operand);
}
builder.const_i32(global_pointers::last_result as i32);
builder.get_local(&dest_operand);
if size == OPSIZE_16 {
builder.const_i32(0xFFFF);
builder.and_i32();
}
builder.store_aligned_i32(0);
codegen::gen_set_last_op_size(builder, size);
codegen::gen_set_flags_changed(builder, FLAGS_ALL & !1);
}
fn gen_inc16(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
gen_inc(builder, dest_operand, OPSIZE_16);
}
fn gen_inc32(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
gen_inc(builder, dest_operand, OPSIZE_32);
}
fn gen_dec(builder: &mut WasmBuilder, dest_operand: &WasmLocal, size: i32) {
builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(builder);
builder.const_i32(!1);
builder.and_i32();
codegen::gen_getcf(builder);
builder.or_i32();
builder.store_aligned_i32(0);
builder.const_i32(global_pointers::last_op1 as i32);
builder.get_local(&dest_operand);
if size == OPSIZE_8 || size == OPSIZE_16 {
builder.const_i32(if size == OPSIZE_8 { 0xFF } else { 0xFFFF });
builder.and_i32();
}
builder.store_aligned_i32(0);
builder.get_local(dest_operand);
builder.const_i32(1);
builder.sub_i32();
if size == OPSIZE_16 {
codegen::gen_set_reg16_local(builder, dest_operand);
}
else {
builder.set_local(dest_operand);
}
builder.const_i32(global_pointers::last_result as i32);
builder.get_local(&dest_operand);
if size == OPSIZE_16 {
builder.const_i32(0xFFFF);
builder.and_i32();
}
builder.store_aligned_i32(0);
codegen::gen_set_last_op_size(builder, size);
codegen::gen_set_flags_changed(builder, FLAGS_ALL & !1 | FLAG_SUB);
}
fn gen_dec16(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
gen_dec(builder, dest_operand, OPSIZE_16)
}
fn gen_dec32(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
gen_dec(builder, dest_operand, OPSIZE_32)
}
fn gen_inc16_r(ctx: &mut JitContext, r: u32) {
gen_inc16(ctx.builder, &mut ctx.register_locals[r as usize])
}
fn gen_inc32_r(ctx: &mut JitContext, r: u32) {
gen_inc32(ctx.builder, &mut ctx.register_locals[r as usize])
}
fn gen_dec16_r(ctx: &mut JitContext, r: u32) {
gen_dec16(ctx.builder, &mut ctx.register_locals[r as usize])
}
fn gen_dec32_r(ctx: &mut JitContext, r: u32) {
gen_dec32(ctx.builder, &mut ctx.register_locals[r as usize])
}
fn gen_not16(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
builder.get_local(dest_operand);
builder.const_i32(-1);
builder.xor_i32();
codegen::gen_set_reg16_local(builder, dest_operand);
}
fn gen_not32(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
builder.get_local(dest_operand);
builder.const_i32(-1);
builder.xor_i32();
builder.set_local(dest_operand);
}
fn gen_neg16(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
builder.get_local(dest_operand);
builder.call_fn1_ret("neg16");
codegen::gen_set_reg16_local(builder, dest_operand);
}
fn gen_neg32(builder: &mut WasmBuilder, dest_operand: &WasmLocal) {
builder.const_i32(global_pointers::last_op1 as i32);
builder.const_i32(0);
builder.store_aligned_i32(0);
builder.const_i32(0);
builder.get_local(&dest_operand);
builder.sub_i32();
builder.set_local(dest_operand);
codegen::gen_set_last_result(builder, &dest_operand);
codegen::gen_set_last_op_size(builder, OPSIZE_32);
codegen::gen_set_flags_changed(builder, FLAGS_ALL | FLAG_SUB);
}
pub fn instr16_06_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::ES);
let sreg = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr32_06_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::ES);
let sreg = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr16_0E_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::CS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr32_0E_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::CS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr16_16_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::SS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr32_16_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::SS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr16_1E_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::DS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr32_1E_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::DS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr16_40_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, AX); }
pub fn instr32_40_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, EAX); }
pub fn instr16_41_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, CX); }
pub fn instr32_41_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, ECX); }
pub fn instr16_42_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, DX); }
pub fn instr32_42_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, EDX); }
pub fn instr16_43_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, BX); }
pub fn instr32_43_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, EBX); }
pub fn instr16_44_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, SP); }
pub fn instr32_44_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, ESP); }
pub fn instr16_45_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, BP); }
pub fn instr32_45_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, EBP); }
pub fn instr16_46_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, SI); }
pub fn instr32_46_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, ESI); }
pub fn instr16_47_jit(ctx: &mut JitContext) { gen_inc16_r(ctx, DI); }
pub fn instr32_47_jit(ctx: &mut JitContext) { gen_inc32_r(ctx, EDI); }
pub fn instr16_48_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, AX); }
pub fn instr32_48_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, EAX); }
pub fn instr16_49_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, CX); }
pub fn instr32_49_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, ECX); }
pub fn instr16_4A_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, DX); }
pub fn instr32_4A_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, EDX); }
pub fn instr16_4B_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, BX); }
pub fn instr32_4B_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, EBX); }
pub fn instr16_4C_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, SP); }
pub fn instr32_4C_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, ESP); }
pub fn instr16_4D_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, BP); }
pub fn instr32_4D_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, EBP); }
pub fn instr16_4E_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, SI); }
pub fn instr32_4E_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, ESI); }
pub fn instr16_4F_jit(ctx: &mut JitContext) { gen_dec16_r(ctx, DI); }
pub fn instr32_4F_jit(ctx: &mut JitContext) { gen_dec32_r(ctx, EDI); }
pub fn instr16_50_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, AX); }
pub fn instr32_50_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, EAX); }
pub fn instr16_51_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, CX); }
pub fn instr32_51_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, ECX); }
pub fn instr16_52_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, DX); }
pub fn instr32_52_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, EDX); }
pub fn instr16_53_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, BX); }
pub fn instr32_53_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, EBX); }
pub fn instr16_54_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, SP); }
pub fn instr32_54_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, ESP); }
pub fn instr16_55_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, BP); }
pub fn instr32_55_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, EBP); }
pub fn instr16_56_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, SI); }
pub fn instr32_56_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, ESI); }
pub fn instr16_57_jit(ctx: &mut JitContext) { push16_reg_jit(ctx, DI); }
pub fn instr32_57_jit(ctx: &mut JitContext) { push32_reg_jit(ctx, EDI); }
pub fn instr16_58_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, AX); }
pub fn instr32_58_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, EAX); }
pub fn instr16_59_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, CX); }
pub fn instr32_59_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, ECX); }
pub fn instr16_5A_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, DX); }
pub fn instr32_5A_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, EDX); }
pub fn instr16_5B_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, BX); }
pub fn instr32_5B_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, EBX); }
pub fn instr16_5C_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, SP); }
pub fn instr32_5C_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, ESP); }
pub fn instr16_5D_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, BP); }
pub fn instr32_5D_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, EBP); }
pub fn instr16_5E_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, SI); }
pub fn instr32_5E_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, ESI); }
pub fn instr16_5F_jit(ctx: &mut JitContext) { pop16_reg_jit(ctx, DI); }
pub fn instr32_5F_jit(ctx: &mut JitContext) { pop32_reg_jit(ctx, EDI); }
pub fn instr16_68_jit(ctx: &mut JitContext, imm16: u32) { push16_imm_jit(ctx, imm16) }
pub fn instr32_68_jit(ctx: &mut JitContext, imm32: u32) { push32_imm_jit(ctx, imm32) }
pub fn instr16_6A_jit(ctx: &mut JitContext, imm16: u32) { push16_imm_jit(ctx, imm16) }
pub fn instr32_6A_jit(ctx: &mut JitContext, imm32: u32) { push32_imm_jit(ctx, imm32) }
pub fn instr16_69_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm16: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
ctx.builder.const_i32(imm16 as i32);
ctx.builder.call_fn2_ret("imul_reg16");
codegen::gen_set_reg16(ctx, r);
}
pub fn instr16_69_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm16: u32) {
codegen::gen_get_reg16(ctx, r1);
ctx.builder.const_i32(imm16 as i32);
ctx.builder.call_fn2_ret("imul_reg16");
codegen::gen_set_reg16(ctx, r2);
}
pub fn instr32_69_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm32: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let value_local = ctx.builder.set_new_local();
gen_imul3_reg32(
ctx.builder,
&ctx.register_locals[r as usize],
&value_local,
&LocalOrImmediate::Immediate(imm32 as i32),
);
ctx.builder.free_local(value_local);
}
pub fn instr32_69_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm32: u32) {
gen_imul3_reg32(
ctx.builder,
&ctx.register_locals[r2 as usize],
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::Immediate(imm32 as i32),
);
}
pub fn instr16_6B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm8s: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
ctx.builder.const_i32(imm8s as i32);
ctx.builder.call_fn2_ret("imul_reg16");
codegen::gen_set_reg16(ctx, r);
}
pub fn instr16_6B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm8s: u32) {
codegen::gen_get_reg16(ctx, r1);
ctx.builder.const_i32(imm8s as i32);
ctx.builder.call_fn2_ret("imul_reg16");
codegen::gen_set_reg16(ctx, r2);
}
pub fn instr32_6B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm8s: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let value_local = ctx.builder.set_new_local();
gen_imul3_reg32(
ctx.builder,
&ctx.register_locals[r as usize],
&value_local,
&LocalOrImmediate::Immediate(imm8s as i32),
);
ctx.builder.free_local(value_local);
}
pub fn instr32_6B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm8s: u32) {
gen_imul3_reg32(
ctx.builder,
&ctx.register_locals[r2 as usize],
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::Immediate(imm8s as i32),
);
}
// Code for conditional jumps is generated automatically by the basic block codegen
pub fn instr16_70_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_70_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_71_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_71_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_72_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_72_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_73_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_73_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_74_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_74_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_75_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_75_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_76_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_76_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_77_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_77_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_78_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_78_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_79_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_79_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_7A_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_7A_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_7B_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_7B_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_7C_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_7C_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_7D_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_7D_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_7E_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_7E_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_7F_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_7F_jit(_ctx: &mut JitContext, _imm: u32) {}
// loop/loopz/loopnz/jcxz: Conditional jump is generated in main loop
pub fn instr16_E0_jit(ctx: &mut JitContext, _imm: u32) { codegen::decr_exc_asize(ctx) }
pub fn instr32_E0_jit(ctx: &mut JitContext, _imm: u32) { codegen::decr_exc_asize(ctx) }
pub fn instr16_E1_jit(ctx: &mut JitContext, _imm: u32) { codegen::decr_exc_asize(ctx) }
pub fn instr32_E1_jit(ctx: &mut JitContext, _imm: u32) { codegen::decr_exc_asize(ctx) }
pub fn instr16_E2_jit(ctx: &mut JitContext, _imm: u32) { codegen::decr_exc_asize(ctx) }
pub fn instr32_E2_jit(ctx: &mut JitContext, _imm: u32) { codegen::decr_exc_asize(ctx) }
pub fn instr16_E3_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_E3_jit(_ctx: &mut JitContext, _imm: u32) {}
define_instruction_read_write_mem8!("add8", instr_80_0_mem_jit, instr_80_0_reg_jit, imm8);
define_instruction_read_write_mem8!("or8", instr_80_1_mem_jit, instr_80_1_reg_jit, imm8);
define_instruction_read_write_mem8!("adc8", instr_80_2_mem_jit, instr_80_2_reg_jit, imm8);
define_instruction_read_write_mem8!("sbb8", instr_80_3_mem_jit, instr_80_3_reg_jit, imm8);
define_instruction_read_write_mem8!("and8", instr_80_4_mem_jit, instr_80_4_reg_jit, imm8);
define_instruction_read_write_mem8!("sub8", instr_80_5_mem_jit, instr_80_5_reg_jit, imm8);
define_instruction_read_write_mem8!("xor8", instr_80_6_mem_jit, instr_80_6_reg_jit, imm8);
define_instruction_read_write_mem8!("add8", instr_82_0_mem_jit, instr_82_0_reg_jit, imm8);
define_instruction_read_write_mem8!("or8", instr_82_1_mem_jit, instr_82_1_reg_jit, imm8);
define_instruction_read_write_mem8!("adc8", instr_82_2_mem_jit, instr_82_2_reg_jit, imm8);
define_instruction_read_write_mem8!("sbb8", instr_82_3_mem_jit, instr_82_3_reg_jit, imm8);
define_instruction_read_write_mem8!("and8", instr_82_4_mem_jit, instr_82_4_reg_jit, imm8);
define_instruction_read_write_mem8!("sub8", instr_82_5_mem_jit, instr_82_5_reg_jit, imm8);
define_instruction_read_write_mem8!("xor8", instr_82_6_mem_jit, instr_82_6_reg_jit, imm8);
define_instruction_read_write_mem16!("add16", instr16_81_0_mem_jit, instr16_81_0_reg_jit, imm16);
define_instruction_read_write_mem32!(gen_add32, instr32_81_0_mem_jit, instr32_81_0_reg_jit, imm32);
define_instruction_read_write_mem16!("or16", instr16_81_1_mem_jit, instr16_81_1_reg_jit, imm16);
define_instruction_read_write_mem32!(gen_or32, instr32_81_1_mem_jit, instr32_81_1_reg_jit, imm32);
define_instruction_read_write_mem16!("adc16", instr16_81_2_mem_jit, instr16_81_2_reg_jit, imm16);
define_instruction_read_write_mem32!(gen_adc32, instr32_81_2_mem_jit, instr32_81_2_reg_jit, imm32);
define_instruction_read_write_mem16!("sbb16", instr16_81_3_mem_jit, instr16_81_3_reg_jit, imm16);
define_instruction_read_write_mem32!(gen_sbb32, instr32_81_3_mem_jit, instr32_81_3_reg_jit, imm32);
define_instruction_read_write_mem16!("and16", instr16_81_4_mem_jit, instr16_81_4_reg_jit, imm16);
define_instruction_read_write_mem32!(gen_and32, instr32_81_4_mem_jit, instr32_81_4_reg_jit, imm32);
define_instruction_read_write_mem16!("sub16", instr16_81_5_mem_jit, instr16_81_5_reg_jit, imm16);
define_instruction_read_write_mem32!(gen_sub32, instr32_81_5_mem_jit, instr32_81_5_reg_jit, imm32);
define_instruction_read_write_mem16!("xor16", instr16_81_6_mem_jit, instr16_81_6_reg_jit, imm16);
define_instruction_read_write_mem32!(gen_xor32, instr32_81_6_mem_jit, instr32_81_6_reg_jit, imm32);
define_instruction_read_write_mem16!("add16", instr16_83_0_mem_jit, instr16_83_0_reg_jit, imm8s);
define_instruction_read_write_mem32!(gen_add32, instr32_83_0_mem_jit, instr32_83_0_reg_jit, imm8s);
define_instruction_read_write_mem16!("or16", instr16_83_1_mem_jit, instr16_83_1_reg_jit, imm8s);
define_instruction_read_write_mem32!(gen_or32, instr32_83_1_mem_jit, instr32_83_1_reg_jit, imm8s);
define_instruction_read_write_mem16!("adc16", instr16_83_2_mem_jit, instr16_83_2_reg_jit, imm8s);
define_instruction_read_write_mem32!(gen_adc32, instr32_83_2_mem_jit, instr32_83_2_reg_jit, imm8s);
define_instruction_read_write_mem16!("sbb16", instr16_83_3_mem_jit, instr16_83_3_reg_jit, imm8s);
define_instruction_read_write_mem32!(gen_sbb32, instr32_83_3_mem_jit, instr32_83_3_reg_jit, imm8s);
define_instruction_read_write_mem16!("and16", instr16_83_4_mem_jit, instr16_83_4_reg_jit, imm8s);
define_instruction_read_write_mem32!(gen_and32, instr32_83_4_mem_jit, instr32_83_4_reg_jit, imm8s);
define_instruction_read_write_mem16!("sub16", instr16_83_5_mem_jit, instr16_83_5_reg_jit, imm8s);
define_instruction_read_write_mem32!(gen_sub32, instr32_83_5_mem_jit, instr32_83_5_reg_jit, imm8s);
define_instruction_read_write_mem16!("xor16", instr16_83_6_mem_jit, instr16_83_6_reg_jit, imm8s);
define_instruction_read_write_mem32!(gen_xor32, instr32_83_6_mem_jit, instr32_83_6_reg_jit, imm8s);
define_instruction_read8!(gen_cmp8, instr_80_7_mem_jit, instr_80_7_reg_jit, imm8);
define_instruction_read16!(gen_cmp16, instr16_81_7_mem_jit, instr16_81_7_reg_jit, imm16);
define_instruction_read32!(gen_cmp32, instr32_81_7_mem_jit, instr32_81_7_reg_jit, imm32);
define_instruction_read8!(gen_cmp8, instr_82_7_mem_jit, instr_82_7_reg_jit, imm8);
define_instruction_read16!(gen_cmp16, instr16_83_7_mem_jit, instr16_83_7_reg_jit, imm8s);
define_instruction_read32!(gen_cmp32, instr32_83_7_mem_jit, instr32_83_7_reg_jit, imm8s);
define_instruction_read8!(gen_test8, instr_84_mem_jit, instr_84_reg_jit);
define_instruction_read16!(gen_test16, instr16_85_mem_jit, instr16_85_reg_jit);
define_instruction_read32!(gen_test32, instr32_85_mem_jit, instr32_85_reg_jit);
pub fn instr_86_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::BYTE, &address_local, &|ref mut ctx| {
codegen::gen_get_reg8(ctx, r);
let tmp = ctx.builder.set_new_local();
codegen::gen_set_reg8(ctx, r);
ctx.builder.get_local(&tmp);
ctx.builder.free_local(tmp);
});
ctx.builder.free_local(address_local);
}
pub fn instr_86_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg8(ctx, r2);
let tmp = ctx.builder.set_new_local();
codegen::gen_get_reg8(ctx, r1);
codegen::gen_set_reg8(ctx, r2);
ctx.builder.get_local(&tmp);
codegen::gen_set_reg8(ctx, r1);
ctx.builder.free_local(tmp);
}
pub fn instr16_87_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg16(ctx, r);
let tmp = ctx.builder.set_new_local();
codegen::gen_set_reg16(ctx, r);
ctx.builder.get_local(&tmp);
ctx.builder.free_local(tmp);
});
ctx.builder.free_local(address_local);
}
pub fn instr32_87_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
codegen::gen_get_reg32(ctx, r);
let tmp = ctx.builder.set_new_local();
codegen::gen_set_reg32(ctx, r);
ctx.builder.get_local(&tmp);
ctx.builder.free_local(tmp);
});
ctx.builder.free_local(address_local);
}
pub fn instr16_87_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r2);
let tmp = ctx.builder.set_new_local();
codegen::gen_get_reg16(ctx, r1);
codegen::gen_set_reg16(ctx, r2);
ctx.builder.get_local(&tmp);
codegen::gen_set_reg16(ctx, r1);
ctx.builder.free_local(tmp);
}
pub fn instr32_87_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg32(ctx, r2);
let tmp = ctx.builder.set_new_local();
codegen::gen_get_reg32(ctx, r1);
codegen::gen_set_reg32(ctx, r2);
ctx.builder.get_local(&tmp);
codegen::gen_set_reg32(ctx, r1);
ctx.builder.free_local(tmp);
}
pub fn instr_88_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_get_reg8(ctx, r);
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write8(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr_88_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_reg8_r(ctx, r1, r2);
}
pub fn instr16_89_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(
ctx,
&address_local,
&ctx.register_locals[r as usize].unsafe_clone(),
);
ctx.builder.free_local(address_local);
}
pub fn instr16_89_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_reg16_r(ctx, r1, r2);
}
pub fn instr32_89_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// Pseudo: safe_write32(modrm_resolve(modrm_byte), reg32[r]);
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(
ctx,
&address_local,
&ctx.register_locals[r as usize].unsafe_clone(),
);
ctx.builder.free_local(address_local);
}
pub fn instr32_89_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_reg32_r(ctx, r1, r2);
}
pub fn instr_8A_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// Pseudo: reg8[r] = safe_read8(modrm_resolve(modrm_byte));
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
codegen::gen_set_reg8(ctx, r);
}
pub fn instr_8A_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_reg8_r(ctx, r2, r1);
}
pub fn instr16_8B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// Pseudo: reg16[r] = safe_read16(modrm_resolve(modrm_byte));
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::gen_set_reg16(ctx, r);
}
pub fn instr16_8B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_reg16_r(ctx, r2, r1);
}
pub fn instr32_8B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// Pseudo: reg32[r] = safe_read32s(modrm_resolve(modrm_byte));
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
codegen::gen_set_reg32(ctx, r);
}
pub fn instr32_8B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_reg32_r(ctx, r2, r1);
}
pub fn instr16_8C_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
if r >= 6 {
codegen::gen_trigger_ud(ctx);
}
else {
codegen::gen_get_sreg(ctx, r);
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(ctx, &address_local, &value_local);
ctx.builder.free_local(value_local);
}
ctx.builder.free_local(address_local);
}
pub fn instr32_8C_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
if r >= 6 {
codegen::gen_trigger_ud(ctx);
}
else {
codegen::gen_get_sreg(ctx, r);
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(ctx, &address_local, &value_local);
ctx.builder.free_local(value_local);
}
ctx.builder.free_local(address_local);
}
pub fn instr16_8C_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
if r2 >= 6 {
codegen::gen_trigger_ud(ctx);
}
else {
codegen::gen_get_sreg(ctx, r2);
codegen::gen_set_reg16(ctx, r1);
}
}
pub fn instr32_8C_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
if r2 >= 6 {
codegen::gen_trigger_ud(ctx);
}
else {
codegen::gen_get_sreg(ctx, r2);
codegen::gen_set_reg32(ctx, r1);
}
}
pub fn instr16_8D_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, reg: u32) {
ctx.cpu.prefixes |= SEG_PREFIX_ZERO;
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_set_reg16(ctx, reg);
}
pub fn instr32_8D_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, reg: u32) {
ctx.cpu.prefixes |= SEG_PREFIX_ZERO;
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_set_reg32(ctx, reg);
}
pub fn instr16_8D_reg_jit(ctx: &mut JitContext, _r1: u32, _r2: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr32_8D_reg_jit(ctx: &mut JitContext, _r1: u32, _r2: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr16_8F_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
// before gen_modrm_resolve, update esp to the new value
codegen::gen_adjust_stack_reg(ctx, 2);
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
// pop takes care of updating esp, so undo the previous change
codegen::gen_adjust_stack_reg(ctx, (-2i32) as u32);
codegen::gen_pop16(ctx);
let value_local = ctx.builder.set_new_local();
// undo the esp change of pop, as safe_write16 can fail
codegen::gen_adjust_stack_reg(ctx, (-2i32) as u32);
codegen::gen_safe_write16(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
// finally, actually update esp
codegen::gen_adjust_stack_reg(ctx, 2);
}
pub fn instr16_8F_0_reg_jit(ctx: &mut JitContext, r: u32) { pop16_reg_jit(ctx, r); }
pub fn instr32_8F_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_adjust_stack_reg(ctx, 4);
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_adjust_stack_reg(ctx, (-4i32) as u32);
codegen::gen_pop32s(ctx);
let value_local = ctx.builder.set_new_local();
codegen::gen_adjust_stack_reg(ctx, (-4i32) as u32);
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
codegen::gen_adjust_stack_reg(ctx, 4);
}
pub fn instr32_8F_0_reg_jit(ctx: &mut JitContext, r: u32) { pop32_reg_jit(ctx, r); }
define_instruction_read_write_mem16!(
"rol16",
instr16_C1_0_mem_jit,
instr16_C1_0_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_rol32,
instr32_C1_0_mem_jit,
instr32_C1_0_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem16!(
"ror16",
instr16_C1_1_mem_jit,
instr16_C1_1_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_ror32,
instr32_C1_1_mem_jit,
instr32_C1_1_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem16!(
"rcl16",
instr16_C1_2_mem_jit,
instr16_C1_2_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_rcl32,
instr32_C1_2_mem_jit,
instr32_C1_2_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem16!(
"rcr16",
instr16_C1_3_mem_jit,
instr16_C1_3_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_rcr32,
instr32_C1_3_mem_jit,
instr32_C1_3_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem16!(
"shl16",
instr16_C1_4_mem_jit,
instr16_C1_4_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_shl32,
instr32_C1_4_mem_jit,
instr32_C1_4_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem16!(
"shr16",
instr16_C1_5_mem_jit,
instr16_C1_5_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_shr32,
instr32_C1_5_mem_jit,
instr32_C1_5_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem16!(
"shl16",
instr16_C1_6_mem_jit,
instr16_C1_6_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_shl32,
instr32_C1_6_mem_jit,
instr32_C1_6_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem16!(
"sar16",
instr16_C1_7_mem_jit,
instr16_C1_7_reg_jit,
imm8_5bits
);
define_instruction_read_write_mem32!(
gen_sar32,
instr32_C1_7_mem_jit,
instr32_C1_7_reg_jit,
imm8_5bits
);
pub fn instr16_E8_jit(ctx: &mut JitContext, imm: u32) {
codegen::gen_get_real_eip(ctx);
let value_local = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &value_local);
ctx.builder.free_local(value_local);
codegen::gen_jmp_rel16(ctx.builder, imm as u16);
}
pub fn instr32_E8_jit(ctx: &mut JitContext, imm: u32) {
codegen::gen_get_real_eip(ctx);
let value_local = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &value_local);
ctx.builder.free_local(value_local);
codegen::gen_relative_jump(ctx.builder, imm as i32);
}
pub fn instr16_E9_jit(ctx: &mut JitContext, imm: u32) {
codegen::gen_jmp_rel16(ctx.builder, imm as u16);
}
pub fn instr32_E9_jit(ctx: &mut JitContext, imm: u32) {
codegen::gen_relative_jump(ctx.builder, imm as i32);
}
pub fn instr16_C2_jit(ctx: &mut JitContext, imm16: u32) {
codegen::gen_pop16(ctx);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_adjust_stack_reg(ctx, imm16);
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr32_C2_jit(ctx: &mut JitContext, imm16: u32) {
codegen::gen_pop32s(ctx);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_adjust_stack_reg(ctx, imm16);
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr16_C3_jit(ctx: &mut JitContext) {
codegen::gen_pop16(ctx);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr32_C3_jit(ctx: &mut JitContext) {
codegen::gen_pop32s(ctx);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr16_C9_jit(ctx: &mut JitContext) { codegen::gen_leave(ctx, false); }
pub fn instr32_C9_jit(ctx: &mut JitContext) { codegen::gen_leave(ctx, true); }
pub fn gen_mov_reg8_imm(ctx: &mut JitContext, r: u32, imm: u32) {
ctx.builder.const_i32(imm as i32);
codegen::gen_set_reg8(ctx, r);
}
pub fn instr_B0_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 0, imm) }
pub fn instr_B1_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 1, imm) }
pub fn instr_B2_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 2, imm) }
pub fn instr_B3_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 3, imm) }
pub fn instr_B4_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 4, imm) }
pub fn instr_B5_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 5, imm) }
pub fn instr_B6_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 6, imm) }
pub fn instr_B7_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg8_imm(ctx, 7, imm) }
pub fn gen_mov_reg16_imm(ctx: &mut JitContext, r: u32, imm: u32) {
ctx.builder.const_i32(imm as i32);
codegen::gen_set_reg16(ctx, r);
}
pub fn instr16_B8_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 0, imm) }
pub fn instr16_B9_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 1, imm) }
pub fn instr16_BA_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 2, imm) }
pub fn instr16_BB_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 3, imm) }
pub fn instr16_BC_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 4, imm) }
pub fn instr16_BD_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 5, imm) }
pub fn instr16_BE_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 6, imm) }
pub fn instr16_BF_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg16_imm(ctx, 7, imm) }
pub fn gen_mov_reg32_imm(ctx: &mut JitContext, r: u32, imm: u32) {
ctx.builder.const_i32(imm as i32);
codegen::gen_set_reg32(ctx, r);
}
pub fn instr32_B8_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 0, imm) }
pub fn instr32_B9_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 1, imm) }
pub fn instr32_BA_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 2, imm) }
pub fn instr32_BB_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 3, imm) }
pub fn instr32_BC_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 4, imm) }
pub fn instr32_BD_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 5, imm) }
pub fn instr32_BE_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 6, imm) }
pub fn instr32_BF_jit(ctx: &mut JitContext, imm: u32) { gen_mov_reg32_imm(ctx, 7, imm) }
define_instruction_read_write_mem8!("rol8", instr_C0_0_mem_jit, instr_C0_0_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("ror8", instr_C0_1_mem_jit, instr_C0_1_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("rcl8", instr_C0_2_mem_jit, instr_C0_2_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("rcr8", instr_C0_3_mem_jit, instr_C0_3_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("shl8", instr_C0_4_mem_jit, instr_C0_4_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("shr8", instr_C0_5_mem_jit, instr_C0_5_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("shl8", instr_C0_6_mem_jit, instr_C0_6_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("sar8", instr_C0_7_mem_jit, instr_C0_7_reg_jit, imm8_5bits);
define_instruction_read_write_mem8!("rol8", instr_D0_0_mem_jit, instr_D0_0_reg_jit, constant_one);
define_instruction_read_write_mem8!("ror8", instr_D0_1_mem_jit, instr_D0_1_reg_jit, constant_one);
define_instruction_read_write_mem8!("rcl8", instr_D0_2_mem_jit, instr_D0_2_reg_jit, constant_one);
define_instruction_read_write_mem8!("rcr8", instr_D0_3_mem_jit, instr_D0_3_reg_jit, constant_one);
define_instruction_read_write_mem8!("shl8", instr_D0_4_mem_jit, instr_D0_4_reg_jit, constant_one);
define_instruction_read_write_mem8!("shr8", instr_D0_5_mem_jit, instr_D0_5_reg_jit, constant_one);
define_instruction_read_write_mem8!("shl8", instr_D0_6_mem_jit, instr_D0_6_reg_jit, constant_one);
define_instruction_read_write_mem8!("sar8", instr_D0_7_mem_jit, instr_D0_7_reg_jit, constant_one);
define_instruction_read_write_mem16!(
"rol16",
instr16_D1_0_mem_jit,
instr16_D1_0_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_rol32,
instr32_D1_0_mem_jit,
instr32_D1_0_reg_jit,
constant_one
);
define_instruction_read_write_mem16!(
"ror16",
instr16_D1_1_mem_jit,
instr16_D1_1_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_ror32,
instr32_D1_1_mem_jit,
instr32_D1_1_reg_jit,
constant_one
);
define_instruction_read_write_mem16!(
"rcl16",
instr16_D1_2_mem_jit,
instr16_D1_2_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_rcl32,
instr32_D1_2_mem_jit,
instr32_D1_2_reg_jit,
constant_one
);
define_instruction_read_write_mem16!(
"rcr16",
instr16_D1_3_mem_jit,
instr16_D1_3_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_rcr32,
instr32_D1_3_mem_jit,
instr32_D1_3_reg_jit,
constant_one
);
define_instruction_read_write_mem16!(
"shl16",
instr16_D1_4_mem_jit,
instr16_D1_4_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_shl32,
instr32_D1_4_mem_jit,
instr32_D1_4_reg_jit,
constant_one
);
define_instruction_read_write_mem16!(
"shr16",
instr16_D1_5_mem_jit,
instr16_D1_5_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_shr32,
instr32_D1_5_mem_jit,
instr32_D1_5_reg_jit,
constant_one
);
define_instruction_read_write_mem16!(
"shl16",
instr16_D1_6_mem_jit,
instr16_D1_6_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_shl32,
instr32_D1_6_mem_jit,
instr32_D1_6_reg_jit,
constant_one
);
define_instruction_read_write_mem16!(
"sar16",
instr16_D1_7_mem_jit,
instr16_D1_7_reg_jit,
constant_one
);
define_instruction_read_write_mem32!(
gen_sar32,
instr32_D1_7_mem_jit,
instr32_D1_7_reg_jit,
constant_one
);
define_instruction_read_write_mem8!("rol8", instr_D2_0_mem_jit, instr_D2_0_reg_jit, cl);
define_instruction_read_write_mem8!("ror8", instr_D2_1_mem_jit, instr_D2_1_reg_jit, cl);
define_instruction_read_write_mem8!("rcl8", instr_D2_2_mem_jit, instr_D2_2_reg_jit, cl);
define_instruction_read_write_mem8!("rcr8", instr_D2_3_mem_jit, instr_D2_3_reg_jit, cl);
define_instruction_read_write_mem8!("shl8", instr_D2_4_mem_jit, instr_D2_4_reg_jit, cl);
define_instruction_read_write_mem8!("shr8", instr_D2_5_mem_jit, instr_D2_5_reg_jit, cl);
define_instruction_read_write_mem8!("shl8", instr_D2_6_mem_jit, instr_D2_6_reg_jit, cl);
define_instruction_read_write_mem8!("sar8", instr_D2_7_mem_jit, instr_D2_7_reg_jit, cl);
define_instruction_read_write_mem16!("rol16", instr16_D3_0_mem_jit, instr16_D3_0_reg_jit, cl);
define_instruction_read_write_mem32!(gen_rol32, instr32_D3_0_mem_jit, instr32_D3_0_reg_jit, cl);
define_instruction_read_write_mem16!("ror16", instr16_D3_1_mem_jit, instr16_D3_1_reg_jit, cl);
define_instruction_read_write_mem32!(gen_ror32, instr32_D3_1_mem_jit, instr32_D3_1_reg_jit, cl);
define_instruction_read_write_mem16!("rcl16", instr16_D3_2_mem_jit, instr16_D3_2_reg_jit, cl);
define_instruction_read_write_mem32!(gen_rcl32, instr32_D3_2_mem_jit, instr32_D3_2_reg_jit, cl);
define_instruction_read_write_mem16!("rcr16", instr16_D3_3_mem_jit, instr16_D3_3_reg_jit, cl);
define_instruction_read_write_mem32!(gen_rcr32, instr32_D3_3_mem_jit, instr32_D3_3_reg_jit, cl);
define_instruction_read_write_mem16!("shl16", instr16_D3_4_mem_jit, instr16_D3_4_reg_jit, cl);
define_instruction_read_write_mem32!(gen_shl32, instr32_D3_4_mem_jit, instr32_D3_4_reg_jit, cl);
define_instruction_read_write_mem16!("shr16", instr16_D3_5_mem_jit, instr16_D3_5_reg_jit, cl);
define_instruction_read_write_mem32!(gen_shr32, instr32_D3_5_mem_jit, instr32_D3_5_reg_jit, cl);
define_instruction_read_write_mem16!("shl16", instr16_D3_6_mem_jit, instr16_D3_6_reg_jit, cl);
define_instruction_read_write_mem32!(gen_shl32, instr32_D3_6_mem_jit, instr32_D3_6_reg_jit, cl);
define_instruction_read_write_mem16!("sar16", instr16_D3_7_mem_jit, instr16_D3_7_reg_jit, cl);
define_instruction_read_write_mem32!(gen_sar32, instr32_D3_7_mem_jit, instr32_D3_7_reg_jit, cl);
pub fn instr_D7_jit(ctx: &mut JitContext) {
if ctx.cpu.asize_32() {
codegen::gen_get_reg32(ctx, regs::EBX);
}
else {
codegen::gen_get_reg16(ctx, regs::BX);
}
codegen::gen_get_reg8(ctx, regs::AL);
ctx.builder.add_i32();
if !ctx.cpu.asize_32() {
ctx.builder.const_i32(0xFFFF);
ctx.builder.and_i32();
}
jit_add_seg_offset(ctx, regs::DS);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read8(ctx, &address_local);
ctx.builder.free_local(address_local);
codegen::gen_set_reg8(ctx, regs::AL);
}
fn instr_group_D8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, op: &str) {
ctx.builder.const_i32(0);
codegen::gen_fpu_load_m32(ctx, modrm_byte);
ctx.builder.call_fn3_i32_i64_i32(op)
}
fn instr_group_D8_reg_jit(ctx: &mut JitContext, r: u32, op: &str) {
ctx.builder.const_i32(0);
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn3_i32_i64_i32(op)
}
pub fn instr_D8_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_D8_mem_jit(ctx, modrm_byte, "fpu_fadd")
}
pub fn instr_D8_0_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_D8_reg_jit(ctx, r, "fpu_fadd")
}
pub fn instr_D8_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_D8_mem_jit(ctx, modrm_byte, "fpu_fmul")
}
pub fn instr_D8_1_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_D8_reg_jit(ctx, r, "fpu_fmul")
}
pub fn instr_D8_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_m32(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_fcom")
}
pub fn instr_D8_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn2_i64_i32("fpu_fcom")
}
pub fn instr_D8_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_m32(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_fcomp")
}
pub fn instr_D8_3_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn2_i64_i32("fpu_fcomp")
}
pub fn instr_D8_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_D8_mem_jit(ctx, modrm_byte, "fpu_fsub")
}
pub fn instr_D8_4_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_D8_reg_jit(ctx, r, "fpu_fsub")
}
pub fn instr_D8_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_D8_mem_jit(ctx, modrm_byte, "fpu_fsubr")
}
pub fn instr_D8_5_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_D8_reg_jit(ctx, r, "fpu_fsubr")
}
pub fn instr_D8_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_D8_mem_jit(ctx, modrm_byte, "fpu_fdiv")
}
pub fn instr_D8_6_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_D8_reg_jit(ctx, r, "fpu_fdiv")
}
pub fn instr_D8_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_D8_mem_jit(ctx, modrm_byte, "fpu_fdivr")
}
pub fn instr_D8_7_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_D8_reg_jit(ctx, r, "fpu_fdivr")
}
pub fn instr16_D9_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_m32(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_push");
}
pub fn instr16_D9_0_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn2_i64_i32("fpu_push");
}
pub fn instr32_D9_0_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_0_reg_jit(ctx, r) }
pub fn instr32_D9_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_0_mem_jit(ctx, modrm_byte)
}
pub fn instr16_D9_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_trigger_ud(ctx);
}
pub fn instr16_D9_1_reg_jit(ctx: &mut JitContext, r: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1("fpu_fxch");
}
pub fn instr32_D9_1_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_1_reg_jit(ctx, r) }
pub fn instr32_D9_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_1_mem_jit(ctx, modrm_byte)
}
pub fn instr16_D9_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret("f80_to_f32");
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr16_D9_2_reg_jit(ctx: &mut JitContext, r: u32) {
if r != 0 {
codegen::gen_trigger_ud(ctx);
}
}
pub fn instr32_D9_2_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_2_reg_jit(ctx, r) }
pub fn instr32_D9_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_2_mem_jit(ctx, modrm_byte)
}
pub fn instr16_D9_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret("f80_to_f32");
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
codegen::gen_fn0_const(ctx.builder, "fpu_pop");
}
pub fn instr16_D9_3_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "fpu_fstp", r);
}
pub fn instr32_D9_3_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_3_reg_jit(ctx, r) }
pub fn instr32_D9_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_3_mem_jit(ctx, modrm_byte)
}
pub fn instr16_D9_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_set_previous_eip_offset_from_eip_with_low_bits(
ctx.builder,
ctx.start_of_current_instruction as i32 & 0xFFF,
);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("fpu_fldenv32");
codegen::gen_move_registers_from_memory_to_locals(ctx);
codegen::gen_get_page_fault(ctx.builder);
ctx.builder.if_void();
codegen::gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.return_();
ctx.builder.block_end();
}
pub fn instr16_D9_4_reg_jit(ctx: &mut JitContext, r: u32) {
match r {
0 | 1 | 4 | 5 => {
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1("instr16_D9_4_reg");
},
_ => codegen::gen_trigger_ud(ctx),
}
}
pub fn instr32_D9_4_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_4_reg_jit(ctx, r) }
pub fn instr32_D9_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_4_mem_jit(ctx, modrm_byte)
}
pub fn instr16_D9_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
ctx.builder.call_fn1("set_control_word");
}
pub fn instr16_D9_5_reg_jit(ctx: &mut JitContext, r: u32) {
if r == 7 {
codegen::gen_trigger_ud(ctx);
}
else {
codegen::gen_fn1_const(ctx.builder, "instr16_D9_5_reg", r);
}
}
pub fn instr32_D9_5_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_5_reg_jit(ctx, r) }
pub fn instr32_D9_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_5_mem_jit(ctx, modrm_byte)
}
pub fn instr16_D9_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_set_previous_eip_offset_from_eip_with_low_bits(
ctx.builder,
ctx.start_of_current_instruction as i32 & 0xFFF,
);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("fpu_fstenv32");
codegen::gen_move_registers_from_memory_to_locals(ctx);
codegen::gen_get_page_fault(ctx.builder);
ctx.builder.if_void();
codegen::gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.return_();
ctx.builder.block_end();
}
pub fn instr16_D9_6_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "instr16_D9_6_reg", r);
}
pub fn instr32_D9_6_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_6_reg_jit(ctx, r) }
pub fn instr32_D9_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_6_mem_jit(ctx, modrm_byte)
}
pub fn instr16_D9_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder
.const_i32(global_pointers::fpu_control_word as i32);
ctx.builder.load_aligned_u16(0);
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr16_D9_7_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "instr16_D9_7_reg", r);
}
pub fn instr32_D9_7_reg_jit(ctx: &mut JitContext, r: u32) { instr16_D9_7_reg_jit(ctx, r) }
pub fn instr32_D9_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_D9_7_mem_jit(ctx, modrm_byte)
}
pub fn instr_DA_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
ctx.builder.const_i32(0);
codegen::gen_fpu_load_i32(ctx, modrm_byte);
ctx.builder.call_fn3_i32_i64_i32("fpu_fsubr")
}
pub fn instr_DA_5_reg_jit(ctx: &mut JitContext, r: u32) {
if r == 1 {
codegen::gen_fn0_const(ctx.builder, "fpu_fucompp");
}
else {
codegen::gen_trigger_ud(ctx);
};
}
pub fn instr_DB_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_i32(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_push");
}
pub fn instr_DB_0_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "instr_DB_0_reg", r);
}
pub fn instr_DB_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret("fpu_convert_to_i32");
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr_DB_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "instr_DB_2_reg", r);
}
pub fn instr_DB_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret("fpu_convert_to_i32");
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
codegen::gen_fn0_const(ctx.builder, "fpu_pop");
}
pub fn instr_DB_3_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "instr_DB_3_reg", r);
}
pub fn instr_DB_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_set_previous_eip_offset_from_eip_with_low_bits(
ctx.builder,
ctx.start_of_current_instruction as i32 & 0xFFF,
);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("fpu_fldm80");
codegen::gen_move_registers_from_memory_to_locals(ctx);
codegen::gen_get_page_fault(ctx.builder);
ctx.builder.if_void();
codegen::gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.return_();
ctx.builder.block_end();
}
pub fn instr_DB_5_reg_jit(ctx: &mut JitContext, r: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1("fpu_fucomi");
}
pub fn instr_DB_6_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_DB_6_reg_jit(ctx: &mut JitContext, r: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1("fpu_fcomi");
}
fn instr_group_DC_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, op: &str) {
ctx.builder.const_i32(0);
codegen::gen_fpu_load_m64(ctx, modrm_byte);
ctx.builder.call_fn3_i32_i64_i32(op)
}
fn instr_group_DC_reg_jit(ctx: &mut JitContext, r: u32, op: &str) {
ctx.builder.const_i32(r as i32);
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn3_i32_i64_i32(op)
}
pub fn instr_DC_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DC_mem_jit(ctx, modrm_byte, "fpu_fadd")
}
pub fn instr_DC_0_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DC_reg_jit(ctx, r, "fpu_fadd")
}
pub fn instr_DC_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DC_mem_jit(ctx, modrm_byte, "fpu_fmul")
}
pub fn instr_DC_1_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DC_reg_jit(ctx, r, "fpu_fmul")
}
pub fn instr_DC_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_m64(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_fcom")
}
pub fn instr_DC_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn2_i64_i32("fpu_fcom")
}
pub fn instr_DC_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_m64(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_fcomp")
}
pub fn instr_DC_3_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn2_i64_i32("fpu_fcomp")
}
pub fn instr_DC_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DC_mem_jit(ctx, modrm_byte, "fpu_fsub")
}
pub fn instr_DC_4_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DC_reg_jit(ctx, r, "fpu_fsub")
}
pub fn instr_DC_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DC_mem_jit(ctx, modrm_byte, "fpu_fsubr")
}
pub fn instr_DC_5_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DC_reg_jit(ctx, r, "fpu_fsubr")
}
pub fn instr_DC_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DC_mem_jit(ctx, modrm_byte, "fpu_fdiv")
}
pub fn instr_DC_6_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DC_reg_jit(ctx, r, "fpu_fdiv")
}
pub fn instr_DC_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DC_mem_jit(ctx, modrm_byte, "fpu_fdivr")
}
pub fn instr_DC_7_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DC_reg_jit(ctx, r, "fpu_fdivr")
}
pub fn instr16_DD_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_m64(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_push");
}
pub fn instr16_DD_0_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "fpu_ffree", r);
}
pub fn instr32_DD_0_reg_jit(ctx: &mut JitContext, r: u32) { instr16_DD_0_reg_jit(ctx, r) }
pub fn instr32_DD_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_DD_0_mem_jit(ctx, modrm_byte)
}
pub fn instr16_DD_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret_i64("f80_to_f64");
let value_local = ctx.builder.set_new_local_i64();
codegen::gen_safe_write64(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local_i64(value_local);
}
pub fn instr16_DD_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "fpu_fst", r);
}
pub fn instr32_DD_2_reg_jit(ctx: &mut JitContext, r: u32) { instr16_DD_2_reg_jit(ctx, r) }
pub fn instr32_DD_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_DD_2_mem_jit(ctx, modrm_byte)
}
pub fn instr16_DD_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret_i64("f80_to_f64");
let value_local = ctx.builder.set_new_local_i64();
codegen::gen_safe_write64(ctx, &address_local, &value_local);
codegen::gen_fn0_const(ctx.builder, "fpu_pop");
ctx.builder.free_local(address_local);
ctx.builder.free_local_i64(value_local);
}
pub fn instr16_DD_3_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "fpu_fstp", r);
}
pub fn instr32_DD_3_reg_jit(ctx: &mut JitContext, r: u32) { instr16_DD_3_reg_jit(ctx, r) }
pub fn instr32_DD_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_DD_3_mem_jit(ctx, modrm_byte)
}
pub fn instr16_DD_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_trigger_ud(ctx);
}
pub fn instr16_DD_5_reg_jit(ctx: &mut JitContext, r: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1("fpu_fucomp");
}
pub fn instr32_DD_5_reg_jit(ctx: &mut JitContext, r: u32) { instr16_DD_5_reg_jit(ctx, r) }
pub fn instr32_DD_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_DD_5_mem_jit(ctx, modrm_byte)
}
fn instr_group_DE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, op: &str) {
ctx.builder.const_i32(0);
codegen::gen_fpu_load_i16(ctx, modrm_byte);
ctx.builder.call_fn3_i32_i64_i32(op)
}
fn instr_group_DE_reg_jit(ctx: &mut JitContext, r: u32, op: &str) {
ctx.builder.const_i32(r as i32);
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn3_i32_i64_i32(op);
codegen::gen_fn0_const(ctx.builder, "fpu_pop")
}
pub fn instr_DE_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DE_mem_jit(ctx, modrm_byte, "fpu_fadd")
}
pub fn instr_DE_0_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DE_reg_jit(ctx, r, "fpu_fadd")
}
pub fn instr_DE_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DE_mem_jit(ctx, modrm_byte, "fpu_fmul")
}
pub fn instr_DE_1_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DE_reg_jit(ctx, r, "fpu_fmul")
}
pub fn instr_DE_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_i16(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_fcom")
}
pub fn instr_DE_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn2_i64_i32("fpu_fcom");
codegen::gen_fn0_const(ctx.builder, "fpu_pop")
}
pub fn instr_DE_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_i16(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_fcomp")
}
pub fn instr_DE_3_reg_jit(ctx: &mut JitContext, r: u32) {
if r == 1 {
codegen::gen_fpu_get_sti(ctx, r);
ctx.builder.call_fn2_i64_i32("fpu_fcomp");
codegen::gen_fn0_const(ctx.builder, "fpu_pop")
}
else {
codegen::gen_trigger_ud(ctx);
}
}
pub fn instr_DE_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DE_mem_jit(ctx, modrm_byte, "fpu_fsub")
}
pub fn instr_DE_4_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DE_reg_jit(ctx, r, "fpu_fsub")
}
pub fn instr_DE_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DE_mem_jit(ctx, modrm_byte, "fpu_fsubr")
}
pub fn instr_DE_5_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DE_reg_jit(ctx, r, "fpu_fsubr")
}
pub fn instr_DE_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DE_mem_jit(ctx, modrm_byte, "fpu_fdiv")
}
pub fn instr_DE_6_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DE_reg_jit(ctx, r, "fpu_fdiv")
}
pub fn instr_DE_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr_group_DE_mem_jit(ctx, modrm_byte, "fpu_fdivr")
}
pub fn instr_DE_7_reg_jit(ctx: &mut JitContext, r: u32) {
instr_group_DE_reg_jit(ctx, r, "fpu_fdivr")
}
pub fn instr_DF_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret("fpu_convert_to_i16");
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr_DF_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "fpu_fstp", r);
}
pub fn instr_DF_3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret("fpu_convert_to_i16");
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
codegen::gen_fn0_const(ctx.builder, "fpu_pop");
}
pub fn instr_DF_3_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "fpu_fstp", r);
}
pub fn instr_DF_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
dbg_log!("fbld");
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_trigger_ud(ctx);
}
pub fn instr_DF_4_reg_jit(ctx: &mut JitContext, r: u32) {
if r == 0 {
ctx.builder.call_fn0_ret("fpu_load_status_word");
codegen::gen_set_reg16(ctx, regs::AX);
}
else {
codegen::gen_trigger_ud(ctx);
};
}
pub fn instr_DF_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_fpu_load_i64(ctx, modrm_byte);
ctx.builder.call_fn2_i64_i32("fpu_push");
}
pub fn instr_DF_5_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_fn1_const(ctx.builder, "fpu_fucomip", r);
}
pub fn instr_DF_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
dbg_log!("fbstp");
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_trigger_ud(ctx);
}
pub fn instr_DF_6_reg_jit(ctx: &mut JitContext, r: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1("fpu_fcomip");
}
pub fn instr_DF_7_reg_jit(ctx: &mut JitContext, _r: u32) { codegen::gen_trigger_ud(ctx); }
pub fn instr_DF_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_fpu_get_sti(ctx, 0);
ctx.builder.call_fn2_i64_i32_ret_i64("fpu_convert_to_i64");
let value_local = ctx.builder.set_new_local_i64();
codegen::gen_safe_write64(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local_i64(value_local);
codegen::gen_fn0_const(ctx.builder, "fpu_pop");
}
pub fn instr16_EB_jit(ctx: &mut JitContext, imm8: u32) {
codegen::gen_jmp_rel16(ctx.builder, imm8 as u16);
// dbg_assert(is_asize_32() || get_real_eip() < 0x10000);
}
pub fn instr32_EB_jit(ctx: &mut JitContext, imm8: u32) {
// jmp near
codegen::gen_relative_jump(ctx.builder, imm8 as i32);
// dbg_assert(is_asize_32() || get_real_eip() < 0x10000);
}
define_instruction_read8!(gen_test8, instr_F6_0_mem_jit, instr_F6_0_reg_jit, imm8);
define_instruction_read16!(
gen_test16,
instr16_F7_0_mem_jit,
instr16_F7_0_reg_jit,
imm16
);
define_instruction_read32!(
gen_test32,
instr32_F7_0_mem_jit,
instr32_F7_0_reg_jit,
imm32
);
pub fn instr_F6_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
instr_F6_0_mem_jit(ctx, modrm_byte, imm)
}
pub fn instr_F6_1_reg_jit(ctx: &mut JitContext, r: u32, imm: u32) {
instr_F6_0_reg_jit(ctx, r, imm)
}
pub fn instr16_F7_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
instr16_F7_0_mem_jit(ctx, modrm_byte, imm)
}
pub fn instr16_F7_1_reg_jit(ctx: &mut JitContext, r: u32, imm: u32) {
instr16_F7_0_reg_jit(ctx, r, imm)
}
pub fn instr32_F7_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
instr32_F7_0_mem_jit(ctx, modrm_byte, imm)
}
pub fn instr32_F7_1_reg_jit(ctx: &mut JitContext, r: u32, imm: u32) {
instr32_F7_0_reg_jit(ctx, r, imm)
}
define_instruction_read_write_mem16!(gen_not16, instr16_F7_2_mem_jit, instr16_F7_2_reg_jit, none);
define_instruction_read_write_mem32!(gen_not32, instr32_F7_2_mem_jit, instr32_F7_2_reg_jit, none);
define_instruction_read_write_mem16!(gen_neg16, instr16_F7_3_mem_jit, instr16_F7_3_reg_jit, none);
define_instruction_read_write_mem32!(gen_neg32, instr32_F7_3_mem_jit, instr32_F7_3_reg_jit, none);
pub fn instr16_F7_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("mul16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
}
pub fn instr16_F7_4_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("mul16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
}
pub fn instr32_F7_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
gen_mul32(ctx);
}
pub fn instr32_F7_4_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg32(ctx, r);
gen_mul32(ctx);
}
pub fn instr16_F7_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("imul16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
}
pub fn instr16_F7_5_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1("imul16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
}
pub fn instr32_F7_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
gen_imul32(ctx);
}
pub fn instr32_F7_5_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg32(ctx, r);
gen_imul32(ctx);
}
pub fn instr16_F7_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("div16_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
pub fn instr16_F7_6_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("div16_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
pub fn instr32_F7_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
if false {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("div32_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
else {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let source_operand = ctx.builder.set_new_local();
gen_div32(ctx, &source_operand);
ctx.builder.free_local(source_operand);
}
}
pub fn instr32_F7_6_reg_jit(ctx: &mut JitContext, r: u32) {
if false {
codegen::gen_get_reg32(ctx, r);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("div32_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
else {
gen_div32(ctx, &ctx.register_locals[r as usize].unsafe_clone());
}
}
pub fn instr16_F7_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("idiv16_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
pub fn instr16_F7_7_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("idiv16_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
pub fn instr32_F7_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("idiv32_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
pub fn instr32_F7_7_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg32(ctx, r);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn1_ret("idiv32_without_fault");
codegen::gen_move_registers_from_memory_to_locals(ctx);
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_de(ctx);
ctx.builder.block_end();
}
pub fn instr_F8_jit(ctx: &mut JitContext) {
codegen::gen_clear_flags_changed_bits(ctx.builder, 1);
codegen::gen_clear_flags_bits(ctx.builder, 1);
}
pub fn instr_F9_jit(ctx: &mut JitContext) {
codegen::gen_clear_flags_changed_bits(ctx.builder, 1);
codegen::gen_set_flags_bits(ctx.builder, 1);
}
pub fn instr_FA_jit(ctx: &mut JitContext) {
ctx.builder.call_fn0_ret("instr_FA_without_fault");
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_gp(ctx, 0);
ctx.builder.block_end();
}
pub fn instr_FB_jit(ctx: &mut JitContext) {
ctx.builder.call_fn0_ret("instr_FB_without_fault");
ctx.builder.eqz_i32();
ctx.builder.if_void();
codegen::gen_trigger_gp(ctx, 0);
ctx.builder.block_end();
// handle_irqs is specially handled in jit to be called one instruction after this one
}
pub fn instr_FC_jit(ctx: &mut JitContext) {
ctx.builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(ctx.builder);
ctx.builder.const_i32(!FLAG_DIRECTION);
ctx.builder.and_i32();
ctx.builder.store_aligned_i32(0);
}
pub fn instr_FD_jit(ctx: &mut JitContext) {
ctx.builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(ctx.builder);
ctx.builder.const_i32(FLAG_DIRECTION);
ctx.builder.or_i32();
ctx.builder.store_aligned_i32(0);
}
define_instruction_read_write_mem8!("inc8", instr_FE_0_mem_jit, instr_FE_0_reg_jit, none);
define_instruction_read_write_mem8!("dec8", instr_FE_1_mem_jit, instr_FE_1_reg_jit, none);
define_instruction_read_write_mem16!(gen_inc16, instr16_FF_0_mem_jit, instr16_FF_0_reg_jit, none);
define_instruction_read_write_mem32!(gen_inc32, instr32_FF_0_mem_jit, instr32_FF_0_reg_jit, none);
define_instruction_read_write_mem16!(gen_dec16, instr16_FF_1_mem_jit, instr16_FF_1_reg_jit, none);
define_instruction_read_write_mem32!(gen_dec32, instr32_FF_1_mem_jit, instr32_FF_1_reg_jit, none);
pub fn instr16_FF_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_get_real_eip(ctx);
let value_local = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &value_local);
ctx.builder.free_local(value_local);
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr16_FF_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_get_real_eip(ctx);
let value_local = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &value_local);
ctx.builder.free_local(value_local);
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr32_FF_2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_get_real_eip(ctx);
let value_local = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &value_local);
ctx.builder.free_local(value_local);
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr32_FF_2_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg32(ctx, r);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_get_real_eip(ctx);
let value_local = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &value_local);
ctx.builder.free_local(value_local);
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr16_FF_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr16_FF_4_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr32_FF_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr32_FF_4_reg_jit(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg32(ctx, r);
codegen::gen_add_cs_offset(ctx);
let new_eip = ctx.builder.set_new_local();
codegen::gen_absolute_indirect_jump(ctx, new_eip);
}
pub fn instr16_FF_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
push16_mem_jit(ctx, modrm_byte)
}
pub fn instr16_FF_6_reg_jit(ctx: &mut JitContext, r: u32) { push16_reg_jit(ctx, r) }
pub fn instr32_FF_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
push32_mem_jit(ctx, modrm_byte)
}
pub fn instr32_FF_6_reg_jit(ctx: &mut JitContext, r: u32) { push32_reg_jit(ctx, r) }
// Code for conditional jumps is generated automatically by the basic block codegen
pub fn instr16_0F80_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F81_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F82_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F83_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F84_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F85_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F86_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F87_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F88_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F89_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F8A_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F8B_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F8C_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F8D_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F8E_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr16_0F8F_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F80_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F81_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F82_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F83_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F84_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F85_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F86_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F87_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F88_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F89_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F8A_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F8B_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F8C_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F8D_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F8E_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr32_0F8F_jit(_ctx: &mut JitContext, _imm: u32) {}
pub fn instr_90_jit(_ctx: &mut JitContext) {}
fn gen_xchg_reg16(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg16(ctx, r);
let tmp = ctx.builder.set_new_local();
codegen::gen_get_reg16(ctx, regs::AX);
codegen::gen_set_reg16(ctx, r);
ctx.builder.get_local(&tmp);
codegen::gen_set_reg16(ctx, regs::AX);
ctx.builder.free_local(tmp);
}
fn gen_xchg_reg32(ctx: &mut JitContext, r: u32) {
codegen::gen_get_reg32(ctx, r);
let tmp = ctx.builder.set_new_local();
codegen::gen_get_reg32(ctx, regs::EAX);
codegen::gen_set_reg32(ctx, r);
ctx.builder.get_local(&tmp);
codegen::gen_set_reg32(ctx, regs::EAX);
ctx.builder.free_local(tmp);
}
pub fn instr16_91_jit(ctx: &mut JitContext) { gen_xchg_reg16(ctx, regs::CX); }
pub fn instr16_92_jit(ctx: &mut JitContext) { gen_xchg_reg16(ctx, regs::DX); }
pub fn instr16_93_jit(ctx: &mut JitContext) { gen_xchg_reg16(ctx, regs::BX); }
pub fn instr16_94_jit(ctx: &mut JitContext) { gen_xchg_reg16(ctx, regs::SP); }
pub fn instr16_95_jit(ctx: &mut JitContext) { gen_xchg_reg16(ctx, regs::BP); }
pub fn instr16_96_jit(ctx: &mut JitContext) { gen_xchg_reg16(ctx, regs::SI); }
pub fn instr16_97_jit(ctx: &mut JitContext) { gen_xchg_reg16(ctx, regs::DI); }
pub fn instr32_91_jit(ctx: &mut JitContext) { gen_xchg_reg32(ctx, regs::CX); }
pub fn instr32_92_jit(ctx: &mut JitContext) { gen_xchg_reg32(ctx, regs::DX); }
pub fn instr32_93_jit(ctx: &mut JitContext) { gen_xchg_reg32(ctx, regs::BX); }
pub fn instr32_94_jit(ctx: &mut JitContext) { gen_xchg_reg32(ctx, regs::SP); }
pub fn instr32_95_jit(ctx: &mut JitContext) { gen_xchg_reg32(ctx, regs::BP); }
pub fn instr32_96_jit(ctx: &mut JitContext) { gen_xchg_reg32(ctx, regs::SI); }
pub fn instr32_97_jit(ctx: &mut JitContext) { gen_xchg_reg32(ctx, regs::DI); }
pub fn instr16_98_jit(ctx: &mut JitContext) {
codegen::gen_get_reg8(ctx, regs::AL);
codegen::sign_extend_i8(ctx.builder);
codegen::gen_set_reg16(ctx, regs::AX);
}
pub fn instr32_98_jit(ctx: &mut JitContext) {
codegen::gen_get_reg16(ctx, regs::AX);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_set_reg32(ctx, regs::EAX);
}
pub fn instr16_99_jit(ctx: &mut JitContext) {
codegen::gen_get_reg16(ctx, regs::AX);
ctx.builder.const_i32(16);
ctx.builder.shl_i32();
ctx.builder.const_i32(31);
ctx.builder.shr_s_i32();
codegen::gen_set_reg16(ctx, regs::DX);
}
pub fn instr32_99_jit(ctx: &mut JitContext) {
codegen::gen_get_reg32(ctx, regs::EAX);
ctx.builder.const_i32(31);
ctx.builder.shr_s_i32();
codegen::gen_set_reg32(ctx, regs::EDX);
}
pub fn instr16_9C_jit(ctx: &mut JitContext) {
ctx.builder.call_fn0_ret("instr_9C_check");
ctx.builder.if_void();
codegen::gen_trigger_gp(ctx, 0);
ctx.builder.else_();
ctx.builder.call_fn0_ret("get_eflags");
let value = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &value);
ctx.builder.block_end();
ctx.builder.free_local(value);
}
pub fn instr32_9C_jit(ctx: &mut JitContext) {
ctx.builder.call_fn0_ret("instr_9C_check");
ctx.builder.if_void();
codegen::gen_trigger_gp(ctx, 0);
ctx.builder.else_();
ctx.builder.call_fn0_ret("get_eflags");
ctx.builder.const_i32(0xFCFFFF);
ctx.builder.and_i32();
let value = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &value);
ctx.builder.block_end();
ctx.builder.free_local(value);
}
fn gen_popf(ctx: &mut JitContext, is_32: bool) {
ctx.builder.call_fn0_ret("instr_9C_check");
ctx.builder.if_void();
codegen::gen_trigger_gp(ctx, 0);
ctx.builder.else_();
codegen::gen_get_flags(ctx.builder);
let old_eflags = ctx.builder.set_new_local();
if is_32 {
codegen::gen_pop32s(ctx);
}
else {
ctx.builder.get_local(&old_eflags);
ctx.builder.const_i32(!0xFFFF);
ctx.builder.and_i32();
codegen::gen_pop16(ctx);
ctx.builder.or_i32();
}
ctx.builder.call_fn1("update_eflags");
ctx.builder.get_local(&old_eflags);
ctx.builder.free_local(old_eflags);
ctx.builder.const_i32(FLAG_INTERRUPT);
ctx.builder.and_i32();
ctx.builder.eqz_i32();
codegen::gen_get_flags(ctx.builder);
ctx.builder.const_i32(FLAG_INTERRUPT);
ctx.builder.and_i32();
ctx.builder.eqz_i32();
ctx.builder.eqz_i32();
ctx.builder.and_i32();
ctx.builder.if_void();
{
codegen::gen_set_eip_to_after_current_instruction(ctx);
codegen::gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
codegen::gen_move_registers_from_locals_to_memory(ctx);
codegen::gen_fn0_const(ctx.builder, "handle_irqs");
ctx.builder.return_();
}
ctx.builder.block_end();
ctx.builder.block_end();
}
pub fn instr16_9D_jit(ctx: &mut JitContext) { gen_popf(ctx, false) }
pub fn instr32_9D_jit(ctx: &mut JitContext) { gen_popf(ctx, true) }
pub fn instr_9E_jit(ctx: &mut JitContext) {
ctx.builder.const_i32(global_pointers::flags as i32);
codegen::gen_get_flags(ctx.builder);
ctx.builder.const_i32(!0xFF);
ctx.builder.and_i32();
codegen::gen_get_reg8(ctx, regs::AH);
ctx.builder.or_i32();
ctx.builder.const_i32(FLAGS_MASK);
ctx.builder.and_i32();
ctx.builder.const_i32(FLAGS_DEFAULT);
ctx.builder.or_i32();
ctx.builder.store_aligned_i32(0);
codegen::gen_clear_flags_changed_bits(ctx.builder, 0xFF);
}
pub fn instr_9F_jit(ctx: &mut JitContext) {
ctx.builder.call_fn0_ret("get_eflags");
codegen::gen_set_reg8(ctx, regs::AH);
}
pub fn instr_A0_jit(ctx: &mut JitContext, immaddr: u32) {
ctx.builder.const_i32(immaddr as i32);
jit_add_seg_offset(ctx, regs::DS);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read8(ctx, &address_local);
ctx.builder.free_local(address_local);
codegen::gen_set_reg8(ctx, regs::AL);
}
pub fn instr16_A1_jit(ctx: &mut JitContext, immaddr: u32) {
ctx.builder.const_i32(immaddr as i32);
jit_add_seg_offset(ctx, regs::DS);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read16(ctx, &address_local);
ctx.builder.free_local(address_local);
codegen::gen_set_reg16(ctx, regs::AX);
}
pub fn instr32_A1_jit(ctx: &mut JitContext, immaddr: u32) {
ctx.builder.const_i32(immaddr as i32);
jit_add_seg_offset(ctx, regs::DS);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read32(ctx, &address_local);
ctx.builder.free_local(address_local);
codegen::gen_set_reg32(ctx, regs::EAX);
}
pub fn instr_A2_jit(ctx: &mut JitContext, immaddr: u32) {
ctx.builder.const_i32(immaddr as i32);
jit_add_seg_offset(ctx, regs::DS);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_write8(
ctx,
&address_local,
&ctx.register_locals[regs::EAX as usize].unsafe_clone(),
);
ctx.builder.free_local(address_local);
}
pub fn instr16_A3_jit(ctx: &mut JitContext, immaddr: u32) {
ctx.builder.const_i32(immaddr as i32);
jit_add_seg_offset(ctx, regs::DS);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(
ctx,
&address_local,
&ctx.register_locals[regs::EAX as usize].unsafe_clone(),
);
ctx.builder.free_local(address_local);
}
pub fn instr32_A3_jit(ctx: &mut JitContext, immaddr: u32) {
ctx.builder.const_i32(immaddr as i32);
jit_add_seg_offset(ctx, regs::DS);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(
ctx,
&address_local,
&ctx.register_locals[regs::EAX as usize].unsafe_clone(),
);
ctx.builder.free_local(address_local);
}
pub fn instr_A8_jit(ctx: &mut JitContext, imm8: u32) {
gen_test8(
ctx.builder,
&ctx.register_locals[0],
&LocalOrImmediate::Immediate(imm8 as i32),
);
}
pub fn instr16_A9_jit(ctx: &mut JitContext, imm16: u32) {
gen_test16(
ctx.builder,
&ctx.register_locals[0],
&LocalOrImmediate::Immediate(imm16 as i32),
);
}
pub fn instr32_A9_jit(ctx: &mut JitContext, imm32: u32) {
gen_test32(
ctx.builder,
&ctx.register_locals[0],
&LocalOrImmediate::Immediate(imm32 as i32),
);
}
#[derive(PartialEq)]
enum String {
INS,
OUTS,
MOVS,
CMPS,
STOS,
LODS,
SCAS,
}
fn gen_string_ins(ctx: &mut JitContext, ins: String, size: u8, prefix: u8) {
dbg_assert!(prefix == 0 || prefix == 0xF2 || prefix == 0xF3);
dbg_assert!(size == 8 || size == 16 || size == 32);
let mut args = 0;
args += 1;
ctx.builder.const_i32(ctx.cpu.asize_32() as i32);
if ins == String::OUTS || ins == String::CMPS || ins == String::LODS || ins == String::MOVS {
args += 1;
ctx.builder.const_i32(0);
jit_add_seg_offset(ctx, regs::DS);
}
let name = format!(
"{}{}{}",
match ins {
String::INS => "ins",
String::OUTS => "outs",
String::MOVS => "movs",
String::CMPS => "cmps",
String::STOS => "stos",
String::LODS => "lods",
String::SCAS => "scas",
},
if size == 8 {
"b"
}
else if size == 16 {
"w"
}
else {
"d"
},
if prefix == 0xF2 || prefix == 0xF3 {
match ins {
String::CMPS | String::SCAS => {
if prefix == 0xF2 {
"_repnz"
}
else {
"_repz"
}
},
_ => "_rep",
}
}
else {
"_no_rep"
}
);
codegen::gen_move_registers_from_locals_to_memory(ctx);
if args == 1 {
ctx.builder.call_fn1(&name)
}
else if args == 2 {
ctx.builder.call_fn2(&name)
}
else {
dbg_assert!(false);
}
codegen::gen_move_registers_from_memory_to_locals(ctx);
}
pub fn instr_6C_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 8, 0) }
pub fn instr_F26C_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 8, 0xF2) }
pub fn instr_F36C_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 8, 0xF3) }
pub fn instr16_6D_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 16, 0) }
pub fn instr16_F26D_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 16, 0xF2) }
pub fn instr16_F36D_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 16, 0xF3) }
pub fn instr32_6D_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 32, 0) }
pub fn instr32_F26D_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 32, 0xF2) }
pub fn instr32_F36D_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::INS, 32, 0xF3) }
pub fn instr_6E_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 8, 0) }
pub fn instr_F26E_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 8, 0xF2) }
pub fn instr_F36E_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 8, 0xF3) }
pub fn instr16_6F_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 16, 0) }
pub fn instr16_F26F_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 16, 0xF2) }
pub fn instr16_F36F_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 16, 0xF3) }
pub fn instr32_6F_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 32, 0) }
pub fn instr32_F26F_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 32, 0xF2) }
pub fn instr32_F36F_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::OUTS, 32, 0xF3) }
pub fn instr_A4_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 8, 0) }
pub fn instr_F2A4_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 8, 0xF2) }
pub fn instr_F3A4_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 8, 0xF3) }
pub fn instr16_A5_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 16, 0) }
pub fn instr16_F2A5_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 16, 0xF2) }
pub fn instr16_F3A5_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 16, 0xF3) }
pub fn instr32_A5_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 32, 0) }
pub fn instr32_F2A5_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 32, 0xF2) }
pub fn instr32_F3A5_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::MOVS, 32, 0xF3) }
pub fn instr_A6_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 8, 0) }
pub fn instr_F2A6_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 8, 0xF2) }
pub fn instr_F3A6_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 8, 0xF3) }
pub fn instr16_A7_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 16, 0) }
pub fn instr16_F2A7_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 16, 0xF2) }
pub fn instr16_F3A7_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 16, 0xF3) }
pub fn instr32_A7_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 32, 0) }
pub fn instr32_F2A7_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 32, 0xF2) }
pub fn instr32_F3A7_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::CMPS, 32, 0xF3) }
pub fn instr_AA_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 8, 0) }
pub fn instr_F2AA_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 8, 0xF2) }
pub fn instr_F3AA_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 8, 0xF3) }
pub fn instr16_AB_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 16, 0) }
pub fn instr16_F2AB_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 16, 0xF2) }
pub fn instr16_F3AB_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 16, 0xF3) }
pub fn instr32_AB_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 32, 0) }
pub fn instr32_F2AB_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 32, 0xF2) }
pub fn instr32_F3AB_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::STOS, 32, 0xF3) }
pub fn instr_AC_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 8, 0) }
pub fn instr_F2AC_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 8, 0xF2) }
pub fn instr_F3AC_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 8, 0xF3) }
pub fn instr16_AD_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 16, 0) }
pub fn instr16_F2AD_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 16, 0xF2) }
pub fn instr16_F3AD_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 16, 0xF3) }
pub fn instr32_AD_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 32, 0) }
pub fn instr32_F2AD_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 32, 0xF2) }
pub fn instr32_F3AD_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::LODS, 32, 0xF3) }
pub fn instr_AE_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 8, 0) }
pub fn instr_F2AE_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 8, 0xF2) }
pub fn instr_F3AE_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 8, 0xF3) }
pub fn instr16_AF_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 16, 0) }
pub fn instr16_F2AF_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 16, 0xF2) }
pub fn instr16_F3AF_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 16, 0xF3) }
pub fn instr32_AF_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 32, 0) }
pub fn instr32_F2AF_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 32, 0xF2) }
pub fn instr32_F3AF_jit(ctx: &mut JitContext) { gen_string_ins(ctx, String::SCAS, 32, 0xF3) }
pub fn instr_0F31_jit(ctx: &mut JitContext) {
ctx.builder.load_fixed_u8(global_pointers::cpl as u32);
ctx.builder.eqz_i32();
dbg_assert!(regs::CR4_TSD < 0x100);
ctx.builder
.load_fixed_u8(global_pointers::get_creg_offset(4));
ctx.builder.const_i32(regs::CR4_TSD as i32);
ctx.builder.and_i32();
ctx.builder.eqz_i32();
ctx.builder.or_i32();
ctx.builder.if_void();
ctx.builder.call_fn0_ret_i64("read_tsc");
let tsc = ctx.builder.tee_new_local_i64();
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EAX);
ctx.builder.get_local_i64(&tsc);
ctx.builder.const_i64(32);
ctx.builder.shr_u_i64();
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EDX);
ctx.builder.free_local_i64(tsc);
ctx.builder.else_();
codegen::gen_trigger_gp(ctx, 0);
ctx.builder.block_end();
}
pub fn instr_0F18_mem_jit(_ctx: &mut JitContext, _modrm_byte: ModrmByte, _reg: u32) {}
pub fn instr_0F18_reg_jit(_ctx: &mut JitContext, _r1: u32, _r2: u32) {}
pub fn instr_0F19_mem_jit(_ctx: &mut JitContext, _modrm_byte: ModrmByte, _reg: u32) {}
pub fn instr_0F19_reg_jit(_ctx: &mut JitContext, _r1: u32, _r2: u32) {}
pub fn instr_0F1C_mem_jit(_ctx: &mut JitContext, _modrm_byte: ModrmByte, _reg: u32) {}
pub fn instr_0F1C_reg_jit(_ctx: &mut JitContext, _r1: u32, _r2: u32) {}
pub fn instr_0F1D_mem_jit(_ctx: &mut JitContext, _modrm_byte: ModrmByte, _reg: u32) {}
pub fn instr_0F1D_reg_jit(_ctx: &mut JitContext, _r1: u32, _r2: u32) {}
pub fn instr_0F1E_mem_jit(_ctx: &mut JitContext, _modrm_byte: ModrmByte, _reg: u32) {}
pub fn instr_0F1E_reg_jit(_ctx: &mut JitContext, _r1: u32, _r2: u32) {}
pub fn instr_0F1F_mem_jit(_ctx: &mut JitContext, _modrm_byte: ModrmByte, _reg: u32) {}
pub fn instr_0F1F_reg_jit(_ctx: &mut JitContext, _r1: u32, _r2: u32) {}
define_instruction_read_write_mem16!(
"shld16",
instr16_0FA4_mem_jit,
instr16_0FA4_reg_jit,
reg,
imm8_5bits
);
define_instruction_read_write_mem32!(
"shld32",
instr32_0FA4_mem_jit,
instr32_0FA4_reg_jit,
reg,
imm8_5bits
);
define_instruction_read_write_mem16!(
"shld16",
instr16_0FA5_mem_jit,
instr16_0FA5_reg_jit,
reg,
cl
);
define_instruction_read_write_mem32!(
"shld32",
instr32_0FA5_mem_jit,
instr32_0FA5_reg_jit,
reg,
cl
);
define_instruction_read_write_mem16!(
"shrd16",
instr16_0FAC_mem_jit,
instr16_0FAC_reg_jit,
reg,
imm8_5bits
);
define_instruction_read_write_mem32!(
"shrd32",
instr32_0FAC_mem_jit,
instr32_0FAC_reg_jit,
reg,
imm8_5bits
);
define_instruction_read_write_mem16!(
"shrd16",
instr16_0FAD_mem_jit,
instr16_0FAD_reg_jit,
reg,
cl
);
define_instruction_read_write_mem32!(
"shrd32",
instr32_0FAD_mem_jit,
instr32_0FAD_reg_jit,
reg,
cl
);
pub fn instr16_0FB1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
ctx.builder.const_i32(r2 as i32);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn2_ret("cmpxchg16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
codegen::gen_set_reg16(ctx, r1);
}
pub fn instr16_0FB1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
ctx.builder.const_i32(r as i32);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn2_ret("cmpxchg16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
});
ctx.builder.free_local(address_local);
}
pub fn instr32_0FB1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg32(ctx, r1);
gen_cmpxchg32(ctx, r2);
codegen::gen_set_reg32(ctx, r1);
}
pub fn instr32_0FB1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
gen_cmpxchg32(ctx, r);
});
ctx.builder.free_local(address_local);
}
pub fn instr16_0FB6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg8(ctx, r1);
codegen::gen_set_reg16(ctx, r2);
}
pub fn instr16_0FB6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
codegen::gen_set_reg16(ctx, r);
}
pub fn instr32_0FB6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg8(ctx, r1);
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr32_0FB6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
codegen::gen_set_reg32(ctx, r);
}
pub fn instr16_0FB7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::gen_set_reg16(ctx, r);
}
pub fn instr16_0FB7_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
codegen::gen_set_reg16(ctx, r2);
}
pub fn instr32_0FB7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::gen_set_reg32(ctx, r);
}
pub fn instr32_0FB7_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr16_F30FB8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
ctx.builder.call_fn1_ret("popcnt");
codegen::gen_set_reg16(ctx, r);
}
pub fn instr16_F30FB8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
ctx.builder.call_fn1_ret("popcnt");
codegen::gen_set_reg16(ctx, r2);
}
pub fn instr32_F30FB8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
ctx.builder.call_fn1_ret("popcnt");
codegen::gen_set_reg32(ctx, r);
}
pub fn instr32_F30FB8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg32(ctx, r1);
ctx.builder.call_fn1_ret("popcnt");
codegen::gen_set_reg32(ctx, r2);
}
define_instruction_write_reg16!("bsf16", instr16_0FBC_mem_jit, instr16_0FBC_reg_jit);
define_instruction_write_reg32!(gen_bsf32, instr32_0FBC_mem_jit, instr32_0FBC_reg_jit);
define_instruction_write_reg16!("bsr16", instr16_0FBD_mem_jit, instr16_0FBD_reg_jit);
define_instruction_write_reg32!(gen_bsr32, instr32_0FBD_mem_jit, instr32_0FBD_reg_jit);
pub fn instr16_0FBE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg8(ctx, r1);
codegen::sign_extend_i8(ctx.builder);
codegen::gen_set_reg16(ctx, r2);
}
pub fn instr16_0FBE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
codegen::sign_extend_i8(ctx.builder);
codegen::gen_set_reg16(ctx, r);
}
pub fn instr32_0FBE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg8(ctx, r1);
codegen::sign_extend_i8(ctx.builder);
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr32_0FBE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read8(ctx, modrm_byte);
codegen::sign_extend_i8(ctx.builder);
codegen::gen_set_reg32(ctx, r);
}
pub fn instr16_0FBF_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_set_reg16(ctx, r2);
}
pub fn instr16_0FBF_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_set_reg16(ctx, r);
}
pub fn instr32_0FBF_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr32_0FBF_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
codegen::sign_extend_i16(ctx.builder);
codegen::gen_set_reg32(ctx, r);
}
pub fn instr16_0FC1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::WORD, &address_local, &|ref mut ctx| {
ctx.builder.const_i32(r as i32);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn2_ret("xadd16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
});
ctx.builder.free_local(address_local);
}
pub fn instr16_0FC1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg16(ctx, r1);
ctx.builder.const_i32(r2 as i32);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.call_fn2_ret("xadd16");
codegen::gen_move_registers_from_memory_to_locals(ctx);
codegen::gen_set_reg16(ctx, r1);
}
pub fn instr32_0FC1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::DWORD, &address_local, &|ref mut ctx| {
let dest_operand = ctx.builder.set_new_local();
gen_xadd32(ctx, &dest_operand, r);
ctx.builder.get_local(&dest_operand);
ctx.builder.free_local(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn instr32_0FC1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg32(ctx, r1);
let dest_operand = ctx.builder.set_new_local();
gen_xadd32(ctx, &dest_operand, r2);
ctx.builder.get_local(&dest_operand);
codegen::gen_set_reg32(ctx, r1);
ctx.builder.free_local(dest_operand);
}
pub fn instr_0FC3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(
ctx,
&address_local,
&ctx.register_locals[r as usize].unsafe_clone(),
);
ctx.builder.free_local(address_local);
}
pub fn instr_0FC3_reg_jit(ctx: &mut JitContext, _r1: u32, _r2: u32) { codegen::gen_trigger_ud(ctx) }
pub fn instr16_0FC7_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
// cmpxchg8b
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read_write(ctx, BitSize::QWORD, &address_local, &|ref mut ctx| {
let dest_operand = ctx.builder.tee_new_local_i64();
codegen::gen_get_reg32(ctx, regs::EDX);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.const_i64(32);
ctx.builder.shl_i64();
codegen::gen_get_reg32(ctx, regs::EAX);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.or_i64();
ctx.builder.eq_i64();
ctx.builder.if_i64();
{
codegen::gen_set_flags_bits(ctx.builder, FLAG_ZERO);
codegen::gen_get_reg32(ctx, regs::ECX);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.const_i64(32);
ctx.builder.shl_i64();
codegen::gen_get_reg32(ctx, regs::EBX);
ctx.builder.extend_unsigned_i32_to_i64();
ctx.builder.or_i64();
}
ctx.builder.else_();
{
codegen::gen_clear_flags_bits(ctx.builder, FLAG_ZERO);
ctx.builder.get_local_i64(&dest_operand);
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EAX);
ctx.builder.get_local_i64(&dest_operand);
ctx.builder.const_i64(32);
ctx.builder.shr_u_i64();
ctx.builder.wrap_i64_to_i32();
codegen::gen_set_reg32(ctx, regs::EDX);
ctx.builder.get_local_i64(&dest_operand);
}
ctx.builder.block_end();
codegen::gen_clear_flags_changed_bits(ctx.builder, FLAG_ZERO);
ctx.builder.free_local_i64(dest_operand);
});
ctx.builder.free_local(address_local);
}
pub fn instr16_0FC7_1_reg_jit(ctx: &mut JitContext, _r: u32) { codegen::gen_trigger_ud(ctx); }
pub fn instr32_0FC7_1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte) {
instr16_0FC7_1_mem_jit(ctx, modrm_byte);
}
pub fn instr32_0FC7_1_reg_jit(ctx: &mut JitContext, _r: u32) { codegen::gen_trigger_ud(ctx); }
pub fn instr_C6_0_reg_jit(ctx: &mut JitContext, r: u32, imm: u32) {
// reg8[r] = imm;
ctx.builder.const_i32(imm as i32);
codegen::gen_set_reg8(ctx, r);
}
pub fn instr_C6_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder.const_i32(imm as i32);
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write8(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr16_C7_0_reg_jit(ctx: &mut JitContext, r: u32, imm: u32) {
// reg16[r] = imm;
ctx.builder.const_i32(imm as i32);
codegen::gen_set_reg16(ctx, r);
}
pub fn instr16_C7_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder.const_i32(imm as i32);
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write16(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr32_C7_0_reg_jit(ctx: &mut JitContext, r: u32, imm: u32) {
// reg32[r] = imm;
ctx.builder.const_i32(imm as i32);
codegen::gen_set_reg32(ctx, r);
}
pub fn instr32_C7_0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder.const_i32(imm as i32);
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr_0FC8_jit(ctx: &mut JitContext) { gen_bswap(ctx, 0) }
pub fn instr_0FC9_jit(ctx: &mut JitContext) { gen_bswap(ctx, 1) }
pub fn instr_0FCA_jit(ctx: &mut JitContext) { gen_bswap(ctx, 2) }
pub fn instr_0FCB_jit(ctx: &mut JitContext) { gen_bswap(ctx, 3) }
pub fn instr_0FCC_jit(ctx: &mut JitContext) { gen_bswap(ctx, 4) }
pub fn instr_0FCD_jit(ctx: &mut JitContext) { gen_bswap(ctx, 5) }
pub fn instr_0FCE_jit(ctx: &mut JitContext) { gen_bswap(ctx, 6) }
pub fn instr_0FCF_jit(ctx: &mut JitContext) { gen_bswap(ctx, 7) }
define_instruction_write_reg16!("imul_reg16", instr16_0FAF_mem_jit, instr16_0FAF_reg_jit);
define_instruction_write_reg32!(gen_imul_reg32, instr32_0FAF_mem_jit, instr32_0FAF_reg_jit);
macro_rules! define_cmovcc16(
($cond:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read16(ctx, modrm_byte);
let value = ctx.builder.set_new_local();
codegen::gen_condition_fn(ctx, $cond);
ctx.builder.if_void();
ctx.builder.get_local(&value);
codegen::gen_set_reg16(ctx, r);
ctx.builder.block_end();
ctx.builder.free_local(value);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_condition_fn(ctx, $cond);
ctx.builder.if_void();
codegen::gen_get_reg16(ctx, r1);
codegen::gen_set_reg16(ctx, r2);
ctx.builder.block_end();
}
);
);
macro_rules! define_cmovcc32(
($cond:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
let value = ctx.builder.set_new_local();
codegen::gen_condition_fn(ctx, $cond);
ctx.builder.if_void();
ctx.builder.get_local(&value);
codegen::gen_set_reg32(ctx, r);
ctx.builder.block_end();
ctx.builder.free_local(value);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_condition_fn(ctx, $cond);
ctx.builder.if_void();
codegen::gen_get_reg32(ctx, r1);
codegen::gen_set_reg32(ctx, r2);
ctx.builder.block_end();
}
);
);
define_cmovcc16!(0x0, instr16_0F40_mem_jit, instr16_0F40_reg_jit);
define_cmovcc16!(0x1, instr16_0F41_mem_jit, instr16_0F41_reg_jit);
define_cmovcc16!(0x2, instr16_0F42_mem_jit, instr16_0F42_reg_jit);
define_cmovcc16!(0x3, instr16_0F43_mem_jit, instr16_0F43_reg_jit);
define_cmovcc16!(0x4, instr16_0F44_mem_jit, instr16_0F44_reg_jit);
define_cmovcc16!(0x5, instr16_0F45_mem_jit, instr16_0F45_reg_jit);
define_cmovcc16!(0x6, instr16_0F46_mem_jit, instr16_0F46_reg_jit);
define_cmovcc16!(0x7, instr16_0F47_mem_jit, instr16_0F47_reg_jit);
define_cmovcc16!(0x8, instr16_0F48_mem_jit, instr16_0F48_reg_jit);
define_cmovcc16!(0x9, instr16_0F49_mem_jit, instr16_0F49_reg_jit);
define_cmovcc16!(0xA, instr16_0F4A_mem_jit, instr16_0F4A_reg_jit);
define_cmovcc16!(0xB, instr16_0F4B_mem_jit, instr16_0F4B_reg_jit);
define_cmovcc16!(0xC, instr16_0F4C_mem_jit, instr16_0F4C_reg_jit);
define_cmovcc16!(0xD, instr16_0F4D_mem_jit, instr16_0F4D_reg_jit);
define_cmovcc16!(0xE, instr16_0F4E_mem_jit, instr16_0F4E_reg_jit);
define_cmovcc16!(0xF, instr16_0F4F_mem_jit, instr16_0F4F_reg_jit);
define_cmovcc32!(0x0, instr32_0F40_mem_jit, instr32_0F40_reg_jit);
define_cmovcc32!(0x1, instr32_0F41_mem_jit, instr32_0F41_reg_jit);
define_cmovcc32!(0x2, instr32_0F42_mem_jit, instr32_0F42_reg_jit);
define_cmovcc32!(0x3, instr32_0F43_mem_jit, instr32_0F43_reg_jit);
define_cmovcc32!(0x4, instr32_0F44_mem_jit, instr32_0F44_reg_jit);
define_cmovcc32!(0x5, instr32_0F45_mem_jit, instr32_0F45_reg_jit);
define_cmovcc32!(0x6, instr32_0F46_mem_jit, instr32_0F46_reg_jit);
define_cmovcc32!(0x7, instr32_0F47_mem_jit, instr32_0F47_reg_jit);
define_cmovcc32!(0x8, instr32_0F48_mem_jit, instr32_0F48_reg_jit);
define_cmovcc32!(0x9, instr32_0F49_mem_jit, instr32_0F49_reg_jit);
define_cmovcc32!(0xA, instr32_0F4A_mem_jit, instr32_0F4A_reg_jit);
define_cmovcc32!(0xB, instr32_0F4B_mem_jit, instr32_0F4B_reg_jit);
define_cmovcc32!(0xC, instr32_0F4C_mem_jit, instr32_0F4C_reg_jit);
define_cmovcc32!(0xD, instr32_0F4D_mem_jit, instr32_0F4D_reg_jit);
define_cmovcc32!(0xE, instr32_0F4E_mem_jit, instr32_0F4E_reg_jit);
define_cmovcc32!(0xF, instr32_0F4F_mem_jit, instr32_0F4F_reg_jit);
macro_rules! define_setcc(
($cond:expr, $name_mem:ident, $name_reg:ident) => (
pub fn $name_mem(ctx: &mut JitContext, modrm_byte: ModrmByte, _r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
codegen::gen_condition_fn(ctx, $cond);
ctx.builder.const_i32(0);
ctx.builder.ne_i32();
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write8(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn $name_reg(ctx: &mut JitContext, r1: u32, _r2: u32) {
codegen::gen_condition_fn(ctx, $cond);
ctx.builder.const_i32(0);
ctx.builder.ne_i32();
codegen::gen_set_reg8(ctx, r1);
}
);
);
define_setcc!(0x0, instr_0F90_mem_jit, instr_0F90_reg_jit);
define_setcc!(0x1, instr_0F91_mem_jit, instr_0F91_reg_jit);
define_setcc!(0x2, instr_0F92_mem_jit, instr_0F92_reg_jit);
define_setcc!(0x3, instr_0F93_mem_jit, instr_0F93_reg_jit);
define_setcc!(0x4, instr_0F94_mem_jit, instr_0F94_reg_jit);
define_setcc!(0x5, instr_0F95_mem_jit, instr_0F95_reg_jit);
define_setcc!(0x6, instr_0F96_mem_jit, instr_0F96_reg_jit);
define_setcc!(0x7, instr_0F97_mem_jit, instr_0F97_reg_jit);
define_setcc!(0x8, instr_0F98_mem_jit, instr_0F98_reg_jit);
define_setcc!(0x9, instr_0F99_mem_jit, instr_0F99_reg_jit);
define_setcc!(0xA, instr_0F9A_mem_jit, instr_0F9A_reg_jit);
define_setcc!(0xB, instr_0F9B_mem_jit, instr_0F9B_reg_jit);
define_setcc!(0xC, instr_0F9C_mem_jit, instr_0F9C_reg_jit);
define_setcc!(0xD, instr_0F9D_mem_jit, instr_0F9D_reg_jit);
define_setcc!(0xE, instr_0F9E_mem_jit, instr_0F9E_reg_jit);
define_setcc!(0xF, instr_0F9F_mem_jit, instr_0F9F_reg_jit);
pub fn instr_0F10_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
let dest = global_pointers::get_reg_xmm_offset(r);
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
}
pub fn instr_0F10_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r1, r2) }
pub fn instr_660F10_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
let dest = global_pointers::get_reg_xmm_offset(r);
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
}
pub fn instr_660F10_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r1, r2) }
pub fn instr_F20F10_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_F30F7E_mem_jit(ctx, modrm_byte, r)
}
pub fn instr_F20F10_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_F20F10_reg");
}
pub fn instr_0F11_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_0F29_mem_jit(ctx, modrm_byte, r)
}
pub fn instr_0F11_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r2, r1) }
pub fn instr_660F11_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_660F29_mem_jit(ctx, modrm_byte, r)
}
pub fn instr_660F11_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r2, r1) }
pub fn instr_F20F11_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_660FD6_mem_jit(ctx, modrm_byte, r)
}
pub fn instr_F20F11_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_F20F11_reg");
}
pub fn instr_0F28_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
let dest = global_pointers::get_reg_xmm_offset(r);
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
}
pub fn instr_0F28_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r1, r2) }
pub fn instr_660F28_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
let dest = global_pointers::get_reg_xmm_offset(r);
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
}
pub fn instr_660F28_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r1, r2) }
pub fn instr_0F29_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// XXX: Aligned write or #gp
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r) as i32);
ctx.builder.load_aligned_i64(0);
let value_local_low = ctx.builder.set_new_local_i64();
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r) as i32 + 8);
ctx.builder.load_aligned_i64(0);
let value_local_high = ctx.builder.set_new_local_i64();
codegen::gen_safe_write128(ctx, &address_local, &value_local_low, &value_local_high);
ctx.builder.free_local(address_local);
ctx.builder.free_local_i64(value_local_low);
ctx.builder.free_local_i64(value_local_high);
}
pub fn instr_0F29_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r2, r1) }
pub fn instr_660F29_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_0F29_mem_jit(ctx, modrm_byte, r);
}
pub fn instr_660F29_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r2, r1) }
pub fn instr_0F2B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_0F29_mem_jit(ctx, modrm_byte, r)
}
pub fn instr_0F2B_reg_jit(ctx: &mut JitContext, _r1: u32, _r2: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F2B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_0F29_mem_jit(ctx, modrm_byte, r)
}
pub fn instr_660F2B_reg_jit(ctx: &mut JitContext, _r1: u32, _r2: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_F20F2C_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.reinterpret_i64_as_f64();
ctx.builder
.call_fn1_f64_ret("sse_convert_with_truncation_f64_to_i32");
codegen::gen_set_reg32(ctx, r);
}
pub fn instr_F20F2C_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r1) as i32);
ctx.builder.load_aligned_f64(0);
ctx.builder
.call_fn1_f64_ret("sse_convert_with_truncation_f64_to_i32");
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr_F30F2C_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
ctx.builder.reinterpret_i32_as_f32();
ctx.builder
.call_fn1_f32_ret("sse_convert_with_truncation_f32_to_i32");
codegen::gen_set_reg32(ctx, r);
}
pub fn instr_F30F2C_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r1) as i32);
ctx.builder.load_aligned_f32(0);
ctx.builder
.call_fn1_f32_ret("sse_convert_with_truncation_f32_to_i32");
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr_F20F2D_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.reinterpret_i64_as_f64();
ctx.builder.call_fn1_f64_ret("sse_convert_f64_to_i32");
codegen::gen_set_reg32(ctx, r);
}
pub fn instr_F20F2D_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r1) as i32);
ctx.builder.load_aligned_f64(0);
ctx.builder.call_fn1_f64_ret("sse_convert_f64_to_i32");
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr_F30F2D_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
ctx.builder.reinterpret_i32_as_f32();
ctx.builder.call_fn1_f32_ret("sse_convert_f32_to_i32");
codegen::gen_set_reg32(ctx, r);
}
pub fn instr_F30F2D_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r1) as i32);
ctx.builder.load_aligned_f32(0);
ctx.builder.call_fn1_f32_ret("sse_convert_f32_to_i32");
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr_0F60_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem32(ctx, "instr_0F60", modrm_byte, r);
}
pub fn instr_0F60_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm32(ctx, "instr_0F60", r1, r2);
}
pub fn instr_0F61_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem32(ctx, "instr_0F61", modrm_byte, r);
}
pub fn instr_0F61_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm32(ctx, "instr_0F61", r1, r2);
}
pub fn instr_0F62_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem32(ctx, "instr_0F62", modrm_byte, r);
}
pub fn instr_0F62_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm32(ctx, "instr_0F62", r1, r2);
}
pub fn instr_0F63_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F63", modrm_byte, r);
}
pub fn instr_0F63_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F63", r1, r2);
}
pub fn instr_0F64_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F64", modrm_byte, r);
}
pub fn instr_0F64_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F64", r1, r2);
}
pub fn instr_0F65_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F65", modrm_byte, r);
}
pub fn instr_0F65_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F65", r1, r2);
}
pub fn instr_0F66_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F66", modrm_byte, r);
}
pub fn instr_0F66_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F66", r1, r2);
}
pub fn instr_0F67_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F67", modrm_byte, r);
}
pub fn instr_0F67_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F67", r1, r2);
}
pub fn instr_0F68_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F68", modrm_byte, r);
}
pub fn instr_0F68_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F68", r1, r2);
}
pub fn instr_0F69_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F69", modrm_byte, r);
}
pub fn instr_0F69_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F69", r1, r2);
}
pub fn instr_0F6A_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F6A", modrm_byte, r);
}
pub fn instr_0F6A_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F6A", r1, r2);
}
pub fn instr_0F6B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0F6B", modrm_byte, r);
}
pub fn instr_0F6B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0F6B", r1, r2);
}
pub fn instr_660F60_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// Note: Only requires 64-bit read, but is allowed to do 128-bit read
sse_read128_xmm_mem(ctx, "instr_660F60", modrm_byte, r);
}
pub fn instr_660F60_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F60", r1, r2);
}
pub fn instr_660F61_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// Note: Only requires 64-bit read, but is allowed to do 128-bit read
sse_read128_xmm_mem(ctx, "instr_660F61", modrm_byte, r);
}
pub fn instr_660F61_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F61", r1, r2);
}
pub fn instr_660F62_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F62", modrm_byte, r);
}
pub fn instr_660F62_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F62", r1, r2);
}
pub fn instr_660F63_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F63", modrm_byte, r);
}
pub fn instr_660F63_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F63", r1, r2);
}
pub fn instr_660F64_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F64", modrm_byte, r);
}
pub fn instr_660F64_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F64", r1, r2);
}
pub fn instr_660F65_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F65", modrm_byte, r);
}
pub fn instr_660F65_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F65", r1, r2);
}
pub fn instr_660F66_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F66", modrm_byte, r);
}
pub fn instr_660F66_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F66", r1, r2);
}
pub fn instr_660F67_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F67", modrm_byte, r);
}
pub fn instr_660F67_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F67", r1, r2);
}
pub fn instr_660F68_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F68", modrm_byte, r);
}
pub fn instr_660F68_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F68", r1, r2);
}
pub fn instr_660F69_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F69", modrm_byte, r);
}
pub fn instr_660F69_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F69", r1, r2);
}
pub fn instr_660F6A_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F6A", modrm_byte, r);
}
pub fn instr_660F6A_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F6A", r1, r2);
}
pub fn instr_660F6B_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F6B", modrm_byte, r);
}
pub fn instr_660F6B_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F6B", r1, r2);
}
pub fn instr_660F6C_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F6C", modrm_byte, r);
}
pub fn instr_660F6C_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F6C", r1, r2);
}
pub fn instr_660F6D_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F6D", modrm_byte, r);
}
pub fn instr_660F6D_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F6D", r1, r2);
}
pub fn instr_0F6E_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn2("instr_0F6E")
}
pub fn instr_0F6E_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg32(ctx, r1);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_0F6E")
}
pub fn instr_660F6E_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read32(ctx, modrm_byte);
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn2("instr_660F6E")
}
pub fn instr_660F6E_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_get_reg32(ctx, r1);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_660F6E")
}
pub fn instr_0F6F_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// XXX: Aligned read or #gp
codegen::gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn2_i64_i32("instr_0F6F")
}
pub fn instr_0F6F_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_0F6F_reg")
}
pub fn instr_660F6F_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
// XXX: Aligned read or #gp
let dest = global_pointers::get_reg_xmm_offset(r);
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
}
pub fn instr_660F6F_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r1, r2) }
pub fn instr_F30F6F_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
let dest = global_pointers::get_reg_xmm_offset(r);
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
}
pub fn instr_F30F6F_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r1, r2) }
pub fn instr_0F70_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm8: u32) {
codegen::gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3_i64_i32_i32("instr_0F70");
}
pub fn instr_0F70_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm8: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_mmx_offset(r1) as i32);
ctx.builder.load_aligned_i64(0);
ctx.builder.const_i32(r2 as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3_i64_i32_i32("instr_0F70");
}
pub fn instr_660F70_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm8: u32) {
let dest = global_pointers::sse_scratch_register as u32;
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3("instr_660F70");
}
pub fn instr_660F70_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm8: u32) {
codegen::gen_read_reg_xmm128_into_scratch(ctx, r1);
let dest = global_pointers::sse_scratch_register;
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3("instr_660F70");
}
pub fn instr_F20F70_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm8: u32) {
let dest = global_pointers::sse_scratch_register as u32;
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3("instr_F20F70");
}
pub fn instr_F20F70_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm8: u32) {
codegen::gen_read_reg_xmm128_into_scratch(ctx, r1);
let dest = global_pointers::sse_scratch_register;
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3("instr_F20F70");
}
pub fn instr_F30F70_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32, imm8: u32) {
let dest = global_pointers::sse_scratch_register as u32;
codegen::gen_modrm_resolve_safe_read128(ctx, modrm_byte, dest);
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3("instr_F30F70");
}
pub fn instr_F30F70_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32, imm8: u32) {
codegen::gen_read_reg_xmm128_into_scratch(ctx, r1);
let dest = global_pointers::sse_scratch_register;
ctx.builder.const_i32(dest as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn3("instr_F30F70");
}
pub fn instr_0F71_2_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F71_2_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F71_2_reg");
}
pub fn instr_0F71_4_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F71_4_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F71_4_reg");
}
pub fn instr_0F71_6_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F71_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F71_6_reg");
}
pub fn instr_0F72_2_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F72_2_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F72_2_reg");
}
pub fn instr_0F72_4_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F72_4_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F72_4_reg");
}
pub fn instr_0F72_6_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F72_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F72_6_reg");
}
pub fn instr_0F73_2_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F73_2_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F73_2_reg");
}
pub fn instr_0F73_6_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0F73_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_0F73_6_reg");
}
pub fn instr_660F71_2_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F71_2_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F71_2_reg");
}
pub fn instr_660F71_4_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F71_4_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F71_4_reg");
}
pub fn instr_660F71_6_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F71_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F71_6_reg");
}
pub fn instr_660F72_2_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F72_2_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F72_2_reg");
}
pub fn instr_660F72_4_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F72_4_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F72_4_reg");
}
pub fn instr_660F72_6_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F72_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F72_6_reg");
}
pub fn instr_660F73_2_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F73_2_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F73_2_reg");
}
pub fn instr_660F73_3_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F73_3_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F73_3_reg");
}
pub fn instr_660F73_6_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F73_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F73_6_reg");
}
pub fn instr_660F73_7_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _imm: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660F73_7_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
ctx.builder.const_i32(r as i32);
ctx.builder.const_i32(imm8 as i32);
ctx.builder.call_fn2("instr_660F73_7_reg");
}
pub fn instr_660F74_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660F74", modrm_byte, r);
}
pub fn instr_660F74_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660F74", r1, r2);
}
pub fn instr_0F7E_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1_ret("instr_0F7E");
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr_0F7E_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn1_ret("instr_0F7E");
codegen::gen_set_reg32(ctx, r1);
}
pub fn instr_660F7E_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder
.load_fixed_i32(global_pointers::get_reg_xmm_offset(r));
let value_local = ctx.builder.set_new_local();
codegen::gen_safe_write32(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local(value_local);
}
pub fn instr_660F7E_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder
.load_fixed_i32(global_pointers::get_reg_xmm_offset(r2));
codegen::gen_set_reg32(ctx, r1);
}
pub fn instr_0F7F_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn1_ret_i64("instr_0F7F");
let value_local = ctx.builder.set_new_local_i64();
codegen::gen_safe_write64(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local_i64(value_local);
}
pub fn instr_0F7F_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_0F7F_reg")
}
pub fn instr_F30F7E_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r) as i32);
codegen::gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.store_aligned_i64(0);
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r) as i32 + 8);
ctx.builder.const_i64(0);
ctx.builder.store_aligned_i64(0);
}
pub fn instr_F30F7E_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_F30F7E_reg");
}
pub fn instr_660F7F_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_0F29_mem_jit(ctx, modrm_byte, r);
}
pub fn instr_660F7F_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r2, r1) }
pub fn instr_F30F7F_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_0F29_mem_jit(ctx, modrm_byte, r);
}
pub fn instr_F30F7F_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) { sse_mov_xmm_xmm(ctx, r2, r1) }
pub fn instr16_0FA0_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::FS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr32_0FA0_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::FS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr16_0FA8_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::GS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push16(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr32_0FA8_jit(ctx: &mut JitContext) {
codegen::gen_get_sreg(ctx, regs::GS);
let sreg = ctx.builder.set_new_local();
codegen::gen_push32(ctx, &sreg);
ctx.builder.free_local(sreg);
}
pub fn instr16_0FA3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_bt(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
15,
)
}
pub fn instr16_0FA3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_get_reg16(ctx, r);
codegen::sign_extend_i16(ctx.builder);
ctx.builder.const_i32(3);
ctx.builder.shr_s_i32();
ctx.builder.add_i32();
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read8(ctx, &address_local);
ctx.builder.free_local(address_local);
let value = ctx.builder.set_new_local();
gen_bt(
&mut ctx.builder,
&value,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize]),
7,
);
ctx.builder.free_local(value);
}
pub fn instr32_0FA3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_bt(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
31,
)
}
pub fn instr32_0FA3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
codegen::gen_get_reg32(ctx, r);
ctx.builder.const_i32(3);
ctx.builder.shr_s_i32();
ctx.builder.add_i32();
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read8(ctx, &address_local);
ctx.builder.free_local(address_local);
let value = ctx.builder.set_new_local();
gen_bt(
&mut ctx.builder,
&value,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize]),
7,
);
ctx.builder.free_local(value);
}
pub fn instr16_0FAB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_bts(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
15,
)
}
pub fn instr16_0FAB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_bts,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize].unsafe_clone()),
16,
);
}
pub fn instr32_0FAB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_bts(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
31,
)
}
pub fn instr32_0FAB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_bts,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize].unsafe_clone()),
32,
);
}
pub fn instr16_0FB3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_btr(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
15,
)
}
pub fn instr16_0FB3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btr,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize].unsafe_clone()),
16,
);
}
pub fn instr32_0FB3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_btr(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
31,
)
}
pub fn instr32_0FB3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btr,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize].unsafe_clone()),
32,
);
}
pub fn instr16_0FBB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_btc(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
15,
)
}
pub fn instr16_0FBB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btc,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize].unsafe_clone()),
16,
);
}
pub fn instr32_0FBB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
gen_btc(
&mut ctx.builder,
&ctx.register_locals[r1 as usize],
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r2 as usize]),
31,
)
}
pub fn instr32_0FBB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btc,
&LocalOrImmediate::WasmLocal(&ctx.register_locals[r as usize].unsafe_clone()),
32,
);
}
pub fn instr16_0FBA_4_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_bt(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
15,
)
}
pub fn instr16_0FBA_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
ctx.builder.const_i32((imm8 as i32 & 15) >> 3);
ctx.builder.add_i32();
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read8(ctx, &address_local);
ctx.builder.free_local(address_local);
let value = ctx.builder.set_new_local();
gen_bt(
&mut ctx.builder,
&value,
&LocalOrImmediate::Immediate(imm8 as i32),
7,
);
ctx.builder.free_local(value);
}
pub fn instr32_0FBA_4_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_bt(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
31,
)
}
pub fn instr32_0FBA_4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
ctx.builder.const_i32((imm8 as i32 & 31) >> 3);
ctx.builder.add_i32();
let address_local = ctx.builder.set_new_local();
codegen::gen_safe_read8(ctx, &address_local);
ctx.builder.free_local(address_local);
let value = ctx.builder.set_new_local();
gen_bt(
&mut ctx.builder,
&value,
&LocalOrImmediate::Immediate(imm8 as i32),
7,
);
ctx.builder.free_local(value);
}
pub fn instr16_0FBA_5_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_bts(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
15,
)
}
pub fn instr16_0FBA_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_bts,
&LocalOrImmediate::Immediate(imm8 as i32),
16,
);
}
pub fn instr32_0FBA_5_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_bts(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
31,
)
}
pub fn instr32_0FBA_5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_bts,
&LocalOrImmediate::Immediate(imm8 as i32),
32,
);
}
pub fn instr16_0FBA_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_btr(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
15,
)
}
pub fn instr16_0FBA_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btr,
&LocalOrImmediate::Immediate(imm8 as i32),
16,
);
}
pub fn instr32_0FBA_6_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_btr(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
31,
)
}
pub fn instr32_0FBA_6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btr,
&LocalOrImmediate::Immediate(imm8 as i32),
32,
);
}
pub fn instr16_0FBA_7_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_btc(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
15,
)
}
pub fn instr16_0FBA_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btc,
&LocalOrImmediate::Immediate(imm8 as i32),
16,
);
}
pub fn instr32_0FBA_7_reg_jit(ctx: &mut JitContext, r: u32, imm8: u32) {
gen_btc(
&mut ctx.builder,
&ctx.register_locals[r as usize],
&LocalOrImmediate::Immediate(imm8 as i32),
31,
)
}
pub fn instr32_0FBA_7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, imm8: u32) {
gen_bit_rmw(
ctx,
modrm_byte,
&gen_btc,
&LocalOrImmediate::Immediate(imm8 as i32),
32,
);
}
pub fn instr_0FAE_5_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte) {
dbg_log!("Generating #ud for unimplemented instruction: instr_0FAE_5_mem_jit");
codegen::gen_trigger_ud(ctx);
}
pub fn instr_0FAE_5_reg_jit(_ctx: &mut JitContext, _r: u32) {
// For this instruction, the processor ignores the r/m field of the ModR/M byte.
}
pub fn instr_0FD1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FD1", modrm_byte, r);
}
pub fn instr_0FD1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FD1", r1, r2);
}
pub fn instr_0FD2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FD2", modrm_byte, r);
}
pub fn instr_0FD2_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FD2", r1, r2);
}
pub fn instr_0FD3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FD3", modrm_byte, r);
}
pub fn instr_0FD3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FD3", r1, r2);
}
pub fn instr_0FD4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FD4", modrm_byte, r);
}
pub fn instr_0FD4_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FD4", r1, r2);
}
pub fn instr_0FD5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FD5", modrm_byte, r);
}
pub fn instr_0FD5_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FD5", r1, r2);
}
pub fn instr_0FD7_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _r: u32) {
codegen::gen_trigger_ud(ctx)
}
pub fn instr_0FD7_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.call_fn1_ret("instr_0FD7");
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr_0FD8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FD8", modrm_byte, r);
}
pub fn instr_0FD8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FD8", r1, r2);
}
pub fn instr_0FD9_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FD9", modrm_byte, r);
}
pub fn instr_0FD9_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FD9", r1, r2);
}
pub fn instr_0FDA_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FDA", modrm_byte, r);
}
pub fn instr_0FDA_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FDA", r1, r2);
}
pub fn instr_0FDB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FDB", modrm_byte, r);
}
pub fn instr_0FDB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FDB", r1, r2);
}
pub fn instr_0FDC_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FDC", modrm_byte, r);
}
pub fn instr_0FDC_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FDC", r1, r2);
}
pub fn instr_0FDD_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FDD", modrm_byte, r);
}
pub fn instr_0FDD_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FDD", r1, r2);
}
pub fn instr_0FDE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FDE", modrm_byte, r);
}
pub fn instr_0FDE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FDE", r1, r2);
}
pub fn instr_0FDF_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FDF", modrm_byte, r);
}
pub fn instr_0FDF_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FDF", r1, r2);
}
pub fn instr_660FD1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FD1", modrm_byte, r);
}
pub fn instr_660FD1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FD1", r1, r2);
}
pub fn instr_660FD2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FD2", modrm_byte, r);
}
pub fn instr_660FD2_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FD2", r1, r2);
}
pub fn instr_660FD3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FD3", modrm_byte, r);
}
pub fn instr_660FD3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FD3", r1, r2);
}
pub fn instr_660FD4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FD4", modrm_byte, r);
}
pub fn instr_660FD4_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FD4", r1, r2);
}
pub fn instr_660FD5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FD5", modrm_byte, r);
}
pub fn instr_660FD5_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FD5", r1, r2);
}
pub fn instr_660FD6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve(ctx, modrm_byte);
let address_local = ctx.builder.set_new_local();
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r) as i32);
ctx.builder.load_aligned_i64(0);
let value_local = ctx.builder.set_new_local_i64();
codegen::gen_safe_write64(ctx, &address_local, &value_local);
ctx.builder.free_local(address_local);
ctx.builder.free_local_i64(value_local);
}
pub fn instr_660FD6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2("instr_660FD6_reg");
}
pub fn instr_660FD7_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _r: u32) {
codegen::gen_trigger_ud(ctx)
}
pub fn instr_660FD7_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder.const_i32(r1 as i32);
ctx.builder.call_fn1_ret("instr_660FD7");
codegen::gen_set_reg32(ctx, r2);
}
pub fn instr_660FD8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FD8", modrm_byte, r);
}
pub fn instr_660FD8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FD8", r1, r2);
}
pub fn instr_660FD9_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FD9", modrm_byte, r);
}
pub fn instr_660FD9_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FD9", r1, r2);
}
pub fn instr_660FDA_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FDA", modrm_byte, r);
}
pub fn instr_660FDA_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FDA", r1, r2);
}
pub fn instr_660FDB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FDB", modrm_byte, r);
}
pub fn instr_660FDB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FDB", r1, r2);
}
pub fn instr_660FDC_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FDC", modrm_byte, r);
}
pub fn instr_660FDC_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FDC", r1, r2);
}
pub fn instr_660FDD_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FDD", modrm_byte, r);
}
pub fn instr_660FDD_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FDD", r1, r2);
}
pub fn instr_660FDE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FDE", modrm_byte, r);
}
pub fn instr_660FDE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FDE", r1, r2);
}
pub fn instr_660FDF_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FDF", modrm_byte, r);
}
pub fn instr_660FDF_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FDF", r1, r2);
}
pub fn instr_0FE0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE0", modrm_byte, r);
}
pub fn instr_0FE0_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE0", r1, r2);
}
pub fn instr_0FE1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE1", modrm_byte, r);
}
pub fn instr_0FE1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE1", r1, r2);
}
pub fn instr_0FE2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE2", modrm_byte, r);
}
pub fn instr_0FE2_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE2", r1, r2);
}
pub fn instr_0FE3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE3", modrm_byte, r);
}
pub fn instr_0FE3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE3", r1, r2);
}
pub fn instr_0FE4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE4", modrm_byte, r);
}
pub fn instr_0FE4_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE4", r1, r2);
}
pub fn instr_0FE5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE5", modrm_byte, r);
}
pub fn instr_0FE5_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE5", r1, r2);
}
pub fn instr_0FE8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE8", modrm_byte, r);
}
pub fn instr_0FE8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE8", r1, r2);
}
pub fn instr_0FE9_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FE9", modrm_byte, r);
}
pub fn instr_0FE9_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FE9", r1, r2);
}
pub fn instr_0FEA_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FEA", modrm_byte, r);
}
pub fn instr_0FEA_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FEA", r1, r2);
}
pub fn instr_0FEB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FEB", modrm_byte, r);
}
pub fn instr_0FEB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FEB", r1, r2);
}
pub fn instr_0FEC_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FEC", modrm_byte, r);
}
pub fn instr_0FEC_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FEC", r1, r2);
}
pub fn instr_0FED_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FED", modrm_byte, r);
}
pub fn instr_0FED_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FED", r1, r2);
}
pub fn instr_0FEE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FEE", modrm_byte, r);
}
pub fn instr_0FEE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FEE", r1, r2);
}
pub fn instr_0FEF_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FEF", modrm_byte, r);
}
pub fn instr_0FEF_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FEF", r1, r2);
}
pub fn instr_660FE0_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE0", modrm_byte, r);
}
pub fn instr_660FE0_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE0", r1, r2);
}
pub fn instr_660FE1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE1", modrm_byte, r);
}
pub fn instr_660FE1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE1", r1, r2);
}
pub fn instr_660FE2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE2", modrm_byte, r);
}
pub fn instr_660FE2_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE2", r1, r2);
}
pub fn instr_660FE3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE3", modrm_byte, r);
}
pub fn instr_660FE3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE3", r1, r2);
}
pub fn instr_660FE4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE4", modrm_byte, r);
}
pub fn instr_660FE4_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE4", r1, r2);
}
pub fn instr_660FE5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE5", modrm_byte, r);
}
pub fn instr_660FE5_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE5", r1, r2);
}
pub fn instr_660FE6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE6", modrm_byte, r);
}
pub fn instr_660FE6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE6", r1, r2);
}
pub fn instr_F20FE6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_F20FE6", modrm_byte, r);
}
pub fn instr_F20FE6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_F20FE6", r1, r2);
}
pub fn instr_F30FE6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
codegen::gen_modrm_resolve_safe_read64(ctx, modrm_byte);
ctx.builder.const_i32(r as i32);
ctx.builder.call_fn2_i64_i32("instr_F30FE6")
}
pub fn instr_F30FE6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
ctx.builder
.const_i32(global_pointers::get_reg_xmm_offset(r1) as i32);
ctx.builder.load_aligned_i64(0);
ctx.builder.const_i32(r2 as i32);
ctx.builder.call_fn2_i64_i32("instr_F30FE6")
}
pub fn instr_660FE7_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
instr_0F29_mem_jit(ctx, modrm_byte, r);
}
pub fn instr_660FE7_reg_jit(ctx: &mut JitContext, _r1: u32, _r2: u32) {
codegen::gen_trigger_ud(ctx);
}
pub fn instr_660FE8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE8", modrm_byte, r);
}
pub fn instr_660FE8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE8", r1, r2);
}
pub fn instr_660FE9_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FE9", modrm_byte, r);
}
pub fn instr_660FE9_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FE9", r1, r2);
}
pub fn instr_660FEA_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FEA", modrm_byte, r);
}
pub fn instr_660FEA_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FEA", r1, r2);
}
pub fn instr_660FEB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FEB", modrm_byte, r);
}
pub fn instr_660FEB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FEB", r1, r2);
}
pub fn instr_660FEC_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FEC", modrm_byte, r);
}
pub fn instr_660FEC_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FEC", r1, r2);
}
pub fn instr_660FED_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FED", modrm_byte, r);
}
pub fn instr_660FED_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FED", r1, r2);
}
pub fn instr_660FEE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FEE", modrm_byte, r);
}
pub fn instr_660FEE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FEE", r1, r2);
}
pub fn instr_660FEF_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FEF", modrm_byte, r);
}
pub fn instr_660FEF_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FEF", r1, r2);
}
pub fn instr_0FF1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF1", modrm_byte, r);
}
pub fn instr_0FF1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF1", r1, r2);
}
pub fn instr_0FF2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF2", modrm_byte, r);
}
pub fn instr_0FF2_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF2", r1, r2);
}
pub fn instr_0FF3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF3", modrm_byte, r);
}
pub fn instr_0FF3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF3", r1, r2);
}
pub fn instr_0FF4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF4", modrm_byte, r);
}
pub fn instr_0FF4_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF4", r1, r2);
}
pub fn instr_0FF5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF5", modrm_byte, r);
}
pub fn instr_0FF5_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF5", r1, r2);
}
pub fn instr_0FF6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF6", modrm_byte, r);
}
pub fn instr_0FF6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF6", r1, r2);
}
pub fn instr_0FF7_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _r: u32) {
codegen::gen_trigger_ud(ctx)
}
pub fn instr_0FF7_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_previous_eip_offset_from_eip_with_low_bits(
ctx.builder,
ctx.start_of_current_instruction as i32 & 0xFFF,
);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
if ctx.cpu.asize_32() {
codegen::gen_get_reg32(ctx, regs::EDI);
}
else {
codegen::gen_get_reg16(ctx, regs::DI);
}
jit_add_seg_offset(ctx, regs::DS);
ctx.builder.call_fn3("maskmovq");
codegen::gen_move_registers_from_memory_to_locals(ctx);
codegen::gen_get_page_fault(ctx.builder);
ctx.builder.if_void();
codegen::gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.return_();
ctx.builder.block_end();
}
pub fn instr_0FF8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF8", modrm_byte, r);
}
pub fn instr_0FF8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF8", r1, r2);
}
pub fn instr_0FF9_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FF9", modrm_byte, r);
}
pub fn instr_0FF9_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FF9", r1, r2);
}
pub fn instr_0FFA_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FFA", modrm_byte, r);
}
pub fn instr_0FFA_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FFA", r1, r2);
}
pub fn instr_0FFB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FFB", modrm_byte, r);
}
pub fn instr_0FFB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FFB", r1, r2);
}
pub fn instr_0FFC_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FFC", modrm_byte, r);
}
pub fn instr_0FFC_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FFC", r1, r2);
}
pub fn instr_0FFD_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FFD", modrm_byte, r);
}
pub fn instr_0FFD_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FFD", r1, r2);
}
pub fn instr_0FFE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
mmx_read64_mm_mem(ctx, "instr_0FFE", modrm_byte, r);
}
pub fn instr_0FFE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
mmx_read64_mm_mm(ctx, "instr_0FFE", r1, r2);
}
pub fn instr_660FF1_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF1", modrm_byte, r);
}
pub fn instr_660FF1_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF1", r1, r2);
}
pub fn instr_660FF2_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF2", modrm_byte, r);
}
pub fn instr_660FF2_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF2", r1, r2);
}
pub fn instr_660FF3_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF3", modrm_byte, r);
}
pub fn instr_660FF3_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF3", r1, r2);
}
pub fn instr_660FF4_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF4", modrm_byte, r);
}
pub fn instr_660FF4_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF4", r1, r2);
}
pub fn instr_660FF5_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF5", modrm_byte, r);
}
pub fn instr_660FF5_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF5", r1, r2);
}
pub fn instr_660FF6_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF6", modrm_byte, r);
}
pub fn instr_660FF6_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF6", r1, r2);
}
pub fn instr_660FF7_mem_jit(ctx: &mut JitContext, _modrm_byte: ModrmByte, _r: u32) {
codegen::gen_trigger_ud(ctx)
}
pub fn instr_660FF7_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
codegen::gen_set_previous_eip_offset_from_eip_with_low_bits(
ctx.builder,
ctx.start_of_current_instruction as i32 & 0xFFF,
);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.const_i32(r1 as i32);
ctx.builder.const_i32(r2 as i32);
if ctx.cpu.asize_32() {
codegen::gen_get_reg32(ctx, regs::EDI);
}
else {
codegen::gen_get_reg16(ctx, regs::DI);
}
jit_add_seg_offset(ctx, regs::DS);
ctx.builder.call_fn3("maskmovdqu");
codegen::gen_move_registers_from_memory_to_locals(ctx);
codegen::gen_get_page_fault(ctx.builder);
ctx.builder.if_void();
codegen::gen_debug_track_jit_exit(ctx.builder, ctx.start_of_current_instruction);
codegen::gen_move_registers_from_locals_to_memory(ctx);
ctx.builder.return_();
ctx.builder.block_end();
}
pub fn instr_660FF8_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF8", modrm_byte, r);
}
pub fn instr_660FF8_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF8", r1, r2);
}
pub fn instr_660FF9_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FF9", modrm_byte, r);
}
pub fn instr_660FF9_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FF9", r1, r2);
}
pub fn instr_660FFA_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FFA", modrm_byte, r);
}
pub fn instr_660FFA_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FFA", r1, r2);
}
pub fn instr_660FFB_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FFB", modrm_byte, r);
}
pub fn instr_660FFB_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FFB", r1, r2);
}
pub fn instr_660FFC_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FFC", modrm_byte, r);
}
pub fn instr_660FFC_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FFC", r1, r2);
}
pub fn instr_660FFD_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FFD", modrm_byte, r);
}
pub fn instr_660FFD_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FFD", r1, r2);
}
pub fn instr_660FFE_mem_jit(ctx: &mut JitContext, modrm_byte: ModrmByte, r: u32) {
sse_read128_xmm_mem(ctx, "instr_660FFE", modrm_byte, r);
}
pub fn instr_660FFE_reg_jit(ctx: &mut JitContext, r1: u32, r2: u32) {
sse_read128_xmm_xmm(ctx, "instr_660FFE", r1, r2);
}
|
/*!
How to use an external rendering API with the NWG ExternalCanvas.
Also show how NWG controls can be subclassed
Requires the following features: `cargo run --example opengl_canvas --features "color-dialog extern-canvas"`
*/
extern crate glutin;
extern crate gl;
#[macro_use] extern crate native_windows_gui as nwg;
use std::cell::RefCell;
use crate::glutin::{
ContextBuilder, GlRequest, GlProfile, PossiblyCurrent, RawContext, Api,
dpi::PhysicalSize,
os::windows::RawContextExt
};
use crate::nwg::NativeUi;
type Ctx = RawContext<PossiblyCurrent>;
/**
Specialize the canvas.
To register a custom struct as a NWG control with full support you need to implement 4 traits:
* Deref
* DerefMut
* Into<nwg::ControlHandle>
* PartialEq<SubclassControl> for nwg::ControlHandle
You can either to it manually or the `subclass_control!(type, base_type, field)` macro.
*/
#[derive(Default)]
pub struct OpenGlCanvas {
ctx: RefCell<Option<Ctx>>,
canvas: nwg::ExternCanvas,
}
impl OpenGlCanvas {
/// Create an opengl canvas with glutin & gl
pub fn create_context(&self) {
use std::ffi::c_void;
use std::{mem, ptr};
unsafe {
let ctx = ContextBuilder::new()
.with_gl(GlRequest::Specific(Api::OpenGl, (3, 3)))
.with_gl_profile(GlProfile::Core)
.build_raw_context(self.canvas.handle.hwnd().unwrap() as *mut c_void)
.expect("Failed to build opengl context")
.make_current()
.expect("Failed to set opengl context as current");
// Load the function pointers
gl::Clear::load_with(|s| ctx.get_proc_address(s) as *const _);
gl::ClearColor::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::CreateShader::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::ShaderSource::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::CompileShader::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::CreateProgram::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::AttachShader::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::LinkProgram::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::UseProgram::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::GenBuffers::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::BindBuffer::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::BufferData::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::GetAttribLocation::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::VertexAttribPointer::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::EnableVertexAttribArray::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::GenVertexArrays::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::BindVertexArray::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::DrawArrays::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::Viewport::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::BufferSubData::load_with(|s| ctx.get_proc_address(s) as *const _ );
// Init default state
gl::ClearColor(0.0, 0.0, 0.0, 1.0);
let vs = gl::CreateShader(gl::VERTEX_SHADER);
gl::ShaderSource(vs, 1, [VS_SRC.as_ptr() as *const _].as_ptr(), ptr::null());
gl::CompileShader(vs);
let fs = gl::CreateShader(gl::FRAGMENT_SHADER);
gl::ShaderSource(fs, 1, [FS_SRC.as_ptr() as *const _].as_ptr(), ptr::null());
gl::CompileShader(fs);
let program = gl::CreateProgram();
gl::AttachShader(program, vs);
gl::AttachShader(program, fs);
gl::LinkProgram(program);
gl::UseProgram(program);
let vertex_data: &[f32] = &[
0.0, 1.0, 1.0, 1.0, 1.0,
-1.0, -1.0, 1.0, 1.0, 1.0,
1.0, -1.0, 1.0, 1.0, 1.0,
];
let vertex_size = vertex_data.len() * mem::size_of::<f32>();
let mut vb = mem::zeroed();
gl::GenBuffers(1, &mut vb);
gl::BindBuffer(gl::ARRAY_BUFFER, vb);
gl::BufferData(
gl::ARRAY_BUFFER,
vertex_size as gl::types::GLsizeiptr,
vertex_data.as_ptr() as *const _,
gl::STATIC_DRAW,
);
let mut vao = mem::zeroed();
gl::GenVertexArrays(1, &mut vao);
gl::BindVertexArray(vao);
gl::EnableVertexAttribArray(0);
gl::EnableVertexAttribArray(1);
let stride = mem::size_of::<f32>() * 5;
let color_offset = 8 as *const c_void;
gl::VertexAttribPointer(0, 2, gl::FLOAT, 0, stride as i32, ptr::null());
gl::VertexAttribPointer(1, 4, gl::FLOAT, 0, stride as i32, color_offset);
*self.ctx.borrow_mut() = Some(ctx);
}
}
/// Our render function
pub fn render(&self) {
self.ctx.borrow().as_ref().map(|ctx| unsafe {
gl::Clear(gl::COLOR_BUFFER_BIT);
gl::DrawArrays(gl::TRIANGLES, 0, 3);
ctx.swap_buffers().unwrap();
});
}
pub fn resize(&self) {
self.ctx.borrow().as_ref().map(|ctx| unsafe {
let (w, h) = self.canvas.physical_size();
gl::Viewport(0, 0, w as _, h as _);
ctx.resize(PhysicalSize::new(w as f64, h as f64));
});
self.render();
}
}
subclass_control!(OpenGlCanvas, ExternCanvas, canvas);
// subclass_control generates the following code
/*
use std::ops::{Deref, DerefMut};
impl Deref for OpenGlCanvas {
type Target = nwg::ExternCanvas;
fn deref(&self) -> &nwg::ExternCanvas { &self.canvas }
}
impl DerefMut for OpenGlCanvas {
fn deref_mut(&mut self) -> &mut Self::Target {&mut self.canvas }
}
impl Into<nwg::ControlHandle> for &OpenGlCanvas {
fn into(self) -> nwg::ControlHandle { self.canvas.handle.clone() }
}
impl PartialEq<OpenGlCanvas> for nwg::ControlHandle {
fn eq(&self, other: &OpenGlCanvas) -> bool {
*self == other.handle
}
}*/
/**
The Ui application. Spoiler alert, there's nothing much different from the other examples.
*/
#[derive(Default)]
pub struct ExternCanvas {
window: nwg::Window,
layout: nwg::GridLayout,
canvas: OpenGlCanvas,
color_dialog: nwg::ColorDialog,
choose_color_btn1: nwg::Button,
choose_color_btn2: nwg::Button,
}
impl ExternCanvas {
pub fn show(&self) {
self.window.set_visible(true);
self.window.set_focus();
}
pub fn exit(&self) {
nwg::stop_thread_dispatch();
}
pub fn resize_canvas(&self) {
self.canvas.resize();
}
pub fn select_bg_color(&self) {
if self.color_dialog.run(Some(&self.window)) {
let [r, g, b] = self.color_dialog.color();
let [r, g, b] = [r as f32 / 225.0, g as f32 / 225.0, b as f32 / 225.0];
unsafe {
gl::ClearColor(r, g, b, 1.0);
}
}
self.window.invalidate();
}
pub fn select_tri_color(&self) {
use std::mem;
if self.color_dialog.run(Some(&self.window)) {
let [r, g, b] = self.color_dialog.color();
let [r, g, b] = [r as f32 / 225.0, g as f32 / 225.0, b as f32 / 225.0];
let vertex_data: &[f32] = &[
0.0, 1.0, r, g, b,
-1.0, -1.0, r, g, b,
1.0, -1.0, r, g, b,
];
let vertex_size = vertex_data.len() * mem::size_of::<f32>();
unsafe {
gl::BufferSubData(gl::ARRAY_BUFFER, 0, vertex_size as gl::types::GLsizeiptr, vertex_data.as_ptr() as *const _);
}
}
self.window.invalidate();
}
}
mod extern_canvas_ui {
use native_windows_gui as nwg;
use super::*;
use std::rc::Rc;
use std::ops::Deref;
pub struct ExternCanvasUi {
inner: ExternCanvas,
default_handler: RefCell<Vec<nwg::EventHandler>>
}
impl nwg::NativeUi<ExternCanvas, Rc<ExternCanvasUi>> for ExternCanvas {
fn build_ui(mut data: ExternCanvas) -> Result<Rc<ExternCanvasUi>, nwg::NwgError> {
use nwg::Event as E;
// Resources
nwg::ColorDialog::builder()
.build(&mut data.color_dialog)?;
// Controls
nwg::Window::builder()
.flags(nwg::WindowFlags::MAIN_WINDOW)
.size((600, 500))
.position((300, 300))
.title("Native windows GUI / OpenGL")
.build(&mut data.window)?;
nwg::ExternCanvas::builder()
.parent(Some(&data.window))
.build(&mut data.canvas)?;
nwg::Button::builder()
.text("Background color")
.parent(&data.window)
.build(&mut data.choose_color_btn1)?;
nwg::Button::builder()
.text("Triangle color")
.parent(&data.window)
.build(&mut data.choose_color_btn2)?;
// Wrap-up
let ui = Rc::new(ExternCanvasUi {
inner: data,
default_handler: RefCell::default(),
});
// Events
let window_handles = [&ui.window.handle];
for handle in window_handles.iter() {
let evt_ui = ui.clone();
let handle_events = move |evt, _evt_data, handle| {
match evt {
E::OnResize => {
if &handle == &evt_ui.canvas {
ExternCanvas::resize_canvas(&evt_ui.inner);
}
},
E::OnButtonClick => {
if &handle == &evt_ui.choose_color_btn1 {
ExternCanvas::select_bg_color(&evt_ui.inner);
} else if &handle == &evt_ui.choose_color_btn2 {
ExternCanvas::select_tri_color(&evt_ui.inner);
}
},
E::OnWindowClose => {
if &handle == &evt_ui.window {
ExternCanvas::exit(&evt_ui.inner);
}
},
E::OnInit => {
if &handle == &evt_ui.window {
ExternCanvas::show(&evt_ui.inner);
}
},
_ => {}
}
};
ui.default_handler.borrow_mut().push(
nwg::full_bind_event_handler(handle, handle_events)
);
}
// Layouts
nwg::GridLayout::builder()
.parent(&ui.window)
.max_column(Some(4))
.max_row(Some(8))
.child_item(nwg::GridLayoutItem::new(&ui.canvas, 0, 0, 3, 8))
.child(3, 0, &ui.choose_color_btn1)
.child(3, 1, &ui.choose_color_btn2)
.build(&ui.layout);
return Ok(ui);
}
}
impl ExternCanvasUi {
/// To make sure that everything is freed without issues, the default handler must be unbound.
pub fn destroy(&self) {
let mut handlers = self.default_handler.borrow_mut();
for handler in handlers.drain(0..) {
nwg::unbind_event_handler(&handler);
}
}
}
impl Deref for ExternCanvasUi {
type Target = ExternCanvas;
fn deref(&self) -> &ExternCanvas {
&self.inner
}
}
}
pub fn main() {
unsafe {
nwg::set_dpi_awareness();
};
nwg::init().expect("Failed to init Native Windows GUI");
let app = ExternCanvas::build_ui(Default::default()).expect("Failed to build UI");
// Make sure to render everything at least once before showing the window to remove weird artifacts.
app.canvas.create_context();
app.canvas.render();
// Here we use the `with_callback` version of dispatch_thread_events
// Internally the callback will be executed almost as fast as `loop { callback() }`
let callback_app = app.clone();
nwg::dispatch_thread_events_with_callback(move || {
callback_app.canvas.render();
});
app.destroy();
}
const VS_SRC: &'static [u8] = b"#version 330
layout (location=0) in vec2 a_position;
layout (location=1) in vec4 a_color;
out vec4 color;
void main() {
color = a_color;
gl_Position = vec4(a_position, 0.0, 1.0);
}
\0";
const FS_SRC: &'static [u8] = b"#version 330
precision mediump float;
in vec4 color;
out vec4 outColor;
void main() {
outColor = color;
}
\0";
Fixed opengl-canvas example
/*!
How to use an external rendering API with the NWG ExternalCanvas.
Also show how NWG controls can be subclassed
Requires the following features: `cargo run --example opengl_canvas --features "color-dialog extern-canvas"`
*/
extern crate glutin;
extern crate gl;
#[macro_use] extern crate native_windows_gui as nwg;
use std::cell::RefCell;
use crate::glutin::{
ContextBuilder, GlRequest, GlProfile, PossiblyCurrent, RawContext, Api,
dpi::PhysicalSize,
os::windows::RawContextExt
};
use crate::nwg::NativeUi;
type Ctx = RawContext<PossiblyCurrent>;
/**
Specialize the canvas.
To register a custom struct as a NWG control with full support you need to implement 4 traits:
* Deref
* DerefMut
* Into<nwg::ControlHandle>
* PartialEq<SubclassControl> for nwg::ControlHandle
You can either to it manually or the `subclass_control!(type, base_type, field)` macro.
*/
#[derive(Default)]
pub struct OpenGlCanvas {
ctx: RefCell<Option<Ctx>>,
canvas: nwg::ExternCanvas,
}
impl OpenGlCanvas {
/// Create an opengl canvas with glutin & gl
pub fn create_context(&self) {
use std::ffi::c_void;
use std::{mem, ptr};
unsafe {
let ctx = ContextBuilder::new()
.with_gl(GlRequest::Specific(Api::OpenGl, (3, 3)))
.with_gl_profile(GlProfile::Core)
.build_raw_context(self.canvas.handle.hwnd().unwrap() as *mut c_void)
.expect("Failed to build opengl context")
.make_current()
.expect("Failed to set opengl context as current");
// Load the function pointers
gl::Clear::load_with(|s| ctx.get_proc_address(s) as *const _);
gl::ClearColor::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::CreateShader::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::ShaderSource::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::CompileShader::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::CreateProgram::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::AttachShader::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::LinkProgram::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::UseProgram::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::GenBuffers::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::BindBuffer::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::BufferData::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::GetAttribLocation::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::VertexAttribPointer::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::EnableVertexAttribArray::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::GenVertexArrays::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::BindVertexArray::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::DrawArrays::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::Viewport::load_with(|s| ctx.get_proc_address(s) as *const _ );
gl::BufferSubData::load_with(|s| ctx.get_proc_address(s) as *const _ );
// Init default state
gl::ClearColor(0.0, 0.0, 0.0, 1.0);
let vs = gl::CreateShader(gl::VERTEX_SHADER);
gl::ShaderSource(vs, 1, [VS_SRC.as_ptr() as *const _].as_ptr(), ptr::null());
gl::CompileShader(vs);
let fs = gl::CreateShader(gl::FRAGMENT_SHADER);
gl::ShaderSource(fs, 1, [FS_SRC.as_ptr() as *const _].as_ptr(), ptr::null());
gl::CompileShader(fs);
let program = gl::CreateProgram();
gl::AttachShader(program, vs);
gl::AttachShader(program, fs);
gl::LinkProgram(program);
gl::UseProgram(program);
let vertex_data: &[f32] = &[
0.0, 1.0, 1.0, 1.0, 1.0,
-1.0, -1.0, 1.0, 1.0, 1.0,
1.0, -1.0, 1.0, 1.0, 1.0,
];
let vertex_size = vertex_data.len() * mem::size_of::<f32>();
let mut vb = mem::zeroed();
gl::GenBuffers(1, &mut vb);
gl::BindBuffer(gl::ARRAY_BUFFER, vb);
gl::BufferData(
gl::ARRAY_BUFFER,
vertex_size as gl::types::GLsizeiptr,
vertex_data.as_ptr() as *const _,
gl::STATIC_DRAW,
);
let mut vao = mem::zeroed();
gl::GenVertexArrays(1, &mut vao);
gl::BindVertexArray(vao);
gl::EnableVertexAttribArray(0);
gl::EnableVertexAttribArray(1);
let stride = mem::size_of::<f32>() * 5;
let color_offset = 8 as *const c_void;
gl::VertexAttribPointer(0, 2, gl::FLOAT, 0, stride as i32, ptr::null());
gl::VertexAttribPointer(1, 4, gl::FLOAT, 0, stride as i32, color_offset);
*self.ctx.borrow_mut() = Some(ctx);
}
}
/// Our render function
pub fn render(&self) {
self.ctx.borrow().as_ref().map(|ctx| unsafe {
gl::Clear(gl::COLOR_BUFFER_BIT);
gl::DrawArrays(gl::TRIANGLES, 0, 3);
ctx.swap_buffers().unwrap();
});
}
pub fn resize(&self) {
self.ctx.borrow().as_ref().map(|ctx| unsafe {
let (w, h) = self.canvas.physical_size();
gl::Viewport(0, 0, w as _, h as _);
ctx.resize(PhysicalSize::new(w as f64, h as f64));
});
self.render();
}
}
subclass_control!(OpenGlCanvas, ExternCanvas, canvas);
// subclass_control generates the following code
/*
use std::ops::{Deref, DerefMut};
impl Deref for OpenGlCanvas {
type Target = nwg::ExternCanvas;
fn deref(&self) -> &nwg::ExternCanvas { &self.canvas }
}
impl DerefMut for OpenGlCanvas {
fn deref_mut(&mut self) -> &mut Self::Target {&mut self.canvas }
}
impl Into<nwg::ControlHandle> for &OpenGlCanvas {
fn into(self) -> nwg::ControlHandle { self.canvas.handle.clone() }
}
impl PartialEq<OpenGlCanvas> for nwg::ControlHandle {
fn eq(&self, other: &OpenGlCanvas) -> bool {
*self == other.handle
}
}*/
/**
The Ui application. Spoiler alert, there's nothing much different from the other examples.
*/
#[derive(Default)]
pub struct ExternCanvas {
window: nwg::Window,
layout: nwg::GridLayout,
canvas: OpenGlCanvas,
color_dialog: nwg::ColorDialog,
choose_color_btn1: nwg::Button,
choose_color_btn2: nwg::Button,
}
impl ExternCanvas {
pub fn show(&self) {
self.window.set_visible(true);
self.window.set_focus();
}
pub fn exit(&self) {
nwg::stop_thread_dispatch();
}
pub fn resize_canvas(&self) {
self.canvas.resize();
}
pub fn select_bg_color(&self) {
if self.color_dialog.run(Some(&self.window)) {
let [r, g, b] = self.color_dialog.color();
let [r, g, b] = [r as f32 / 225.0, g as f32 / 225.0, b as f32 / 225.0];
unsafe {
gl::ClearColor(r, g, b, 1.0);
}
}
self.window.invalidate();
}
pub fn select_tri_color(&self) {
use std::mem;
if self.color_dialog.run(Some(&self.window)) {
let [r, g, b] = self.color_dialog.color();
let [r, g, b] = [r as f32 / 225.0, g as f32 / 225.0, b as f32 / 225.0];
let vertex_data: &[f32] = &[
0.0, 1.0, r, g, b,
-1.0, -1.0, r, g, b,
1.0, -1.0, r, g, b,
];
let vertex_size = vertex_data.len() * mem::size_of::<f32>();
unsafe {
gl::BufferSubData(gl::ARRAY_BUFFER, 0, vertex_size as gl::types::GLsizeiptr, vertex_data.as_ptr() as *const _);
}
}
self.window.invalidate();
}
}
mod extern_canvas_ui {
use native_windows_gui as nwg;
use super::*;
use std::rc::Rc;
use std::ops::Deref;
pub struct ExternCanvasUi {
inner: Rc<ExternCanvas>,
default_handler: RefCell<Vec<nwg::EventHandler>>
}
impl nwg::NativeUi<ExternCanvasUi> for ExternCanvas {
fn build_ui(mut data: ExternCanvas) -> Result<ExternCanvasUi, nwg::NwgError> {
use nwg::Event as E;
// Resources
nwg::ColorDialog::builder()
.build(&mut data.color_dialog)?;
// Controls
nwg::Window::builder()
.flags(nwg::WindowFlags::MAIN_WINDOW)
.size((600, 500))
.position((300, 300))
.title("Native windows GUI / OpenGL")
.build(&mut data.window)?;
nwg::ExternCanvas::builder()
.parent(Some(&data.window))
.build(&mut data.canvas)?;
nwg::Button::builder()
.text("Background color")
.parent(&data.window)
.build(&mut data.choose_color_btn1)?;
nwg::Button::builder()
.text("Triangle color")
.parent(&data.window)
.build(&mut data.choose_color_btn2)?;
// Wrap-up
let ui = ExternCanvasUi {
inner: Rc::new(data),
default_handler: RefCell::default(),
};
// Events
let window_handles = [&ui.window.handle];
for handle in window_handles.iter() {
let evt_ui = Rc::downgrade(&ui.inner);
let handle_events = move |evt, _evt_data, handle| {
if let Some(evt_ui) = evt_ui.upgrade() {
match evt {
E::OnResize => {
if &handle == &evt_ui.canvas {
ExternCanvas::resize_canvas(&evt_ui);
}
},
E::OnButtonClick => {
if &handle == &evt_ui.choose_color_btn1 {
ExternCanvas::select_bg_color(&evt_ui);
} else if &handle == &evt_ui.choose_color_btn2 {
ExternCanvas::select_tri_color(&evt_ui);
}
},
E::OnWindowClose => {
if &handle == &evt_ui.window {
ExternCanvas::exit(&evt_ui);
}
},
E::OnInit => {
if &handle == &evt_ui.window {
ExternCanvas::show(&evt_ui);
}
},
_ => {}
}
}
};
ui.default_handler.borrow_mut().push(
nwg::full_bind_event_handler(handle, handle_events)
);
}
// Layouts
nwg::GridLayout::builder()
.parent(&ui.window)
.max_column(Some(4))
.max_row(Some(8))
.child_item(nwg::GridLayoutItem::new(&ui.canvas, 0, 0, 3, 8))
.child(3, 0, &ui.choose_color_btn1)
.child(3, 1, &ui.choose_color_btn2)
.build(&ui.layout)?;
return Ok(ui);
}
}
impl Drop for ExternCanvasUi {
/// To make sure that everything is freed without issues, the default handler must be unbound.
fn drop(&mut self) {
let mut handlers = self.default_handler.borrow_mut();
for handler in handlers.drain(0..) {
nwg::unbind_event_handler(&handler);
}
}
}
impl Deref for ExternCanvasUi {
type Target = ExternCanvas;
fn deref(&self) -> &ExternCanvas {
&self.inner
}
}
}
pub fn main() {
nwg::init().expect("Failed to init Native Windows GUI");
let app = ExternCanvas::build_ui(Default::default()).expect("Failed to build UI");
// Make sure to render everything at least once before showing the window to remove weird artifacts.
app.canvas.create_context();
app.canvas.render();
// Here we use the `with_callback` version of dispatch_thread_events
// Internally the callback will be executed almost as fast as `loop { callback() }`
nwg::dispatch_thread_events_with_callback(move || {
app.canvas.render();
});
}
const VS_SRC: &'static [u8] = b"#version 330
layout (location=0) in vec2 a_position;
layout (location=1) in vec4 a_color;
out vec4 color;
void main() {
color = a_color;
gl_Position = vec4(a_position, 0.0, 1.0);
}
\0";
const FS_SRC: &'static [u8] = b"#version 330
precision mediump float;
in vec4 color;
out vec4 outColor;
void main() {
outColor = color;
}
\0";
|
use std::collections::HashSet;
use std::io::{Read, Write, Error};
use std::iter::FromIterator;
use std::path::{Path, PathBuf};
use super::super::utils::DEFAULT_BUF_SIZE;
use super::super::io::binary::*;
#[derive(Debug)]
enum Directive {
// raw, file
Line(Vec<u8>, String),
// raw
HdrStop(Vec<u8>),
// raw
Unknown(Vec<u8>)
}
pub fn filter_preprocessed(base: &Option<PathBuf>, reader: &mut Read, writer: &mut Write, marker: &Option<String>, keep_headers: bool) -> Result<Vec<PathBuf>, Error> {
let mut line_begin = true;
// Entry file.
let mut entry_file: Option<String> = None;
let mut header_found: bool = false;
let mut header_files: HashSet<String> = HashSet::new();
loop {
let c = try! (read_u8(reader));
match c {
b'\n' | b'\r' => {
if keep_headers {
try! (write_u8(writer, c));
}
line_begin = true;
}
b'\t' | b' ' => {
if keep_headers {
try! (write_u8(writer, c));
}
}
b'#' if line_begin => {
let directive = try! (read_directive(c, reader));
match directive {
Directive::Line(raw, raw_file) => {
let file = raw_file.replace("\\", "/");
entry_file = match entry_file {
Some(path) => {
if header_found && (path == file) {
try! (writer.write_all(b"#pragma hdrstop\n"));
try! (writer.write_all(&raw));
break;
}
match *marker {
Some(ref raw_path) => {
let path = raw_path.replace("\\", "/");
if file == path || Path::new(&file).ends_with(&Path::new(&path)) {
header_found = true;
}
}
None => {}
}
Some(path)
}
None => Some(file.clone())
};
header_files.insert(file);
if keep_headers {
try! (writer.write_all(&raw));
}
}
Directive::HdrStop(raw) => {
try! (writer.write_all(&raw));
break;
}
Directive::Unknown(raw) => {
if keep_headers {
try! (writer.write_all(&raw));
}
}
}
}
_ => {
if keep_headers {
try! (write_u8(writer, c));
}
line_begin = false;
}
}
}
// Copy end of stream.
let mut buf: [u8; DEFAULT_BUF_SIZE] = [0; DEFAULT_BUF_SIZE];
loop {
let size = try! (reader.read(&mut buf));
if size <= 0 {
break;
}
try! (writer.write_all(&buf[0..size]));
}
Ok(Vec::from_iter(header_files.into_iter().map(|arg:String|->PathBuf {
match base {
&Some(ref path) => path.join(arg),
&None => Path::new(&arg).to_path_buf(),
}
})))
}
fn read_directive(first: u8, reader: &mut Read) -> Result<Directive, Error> {
let mut raw: Vec<u8> = Vec::new();
raw.push(first);
let (next, token) = try! (read_token(None, reader, &mut raw));
match &token[..] {
b"line" => read_directive_line(next, reader, raw),
b"pragma" => read_directive_pragma(next, reader, raw),
_ => {
try! (skip_line(next, reader, &mut raw));
Ok(Directive::Unknown(raw))
}
}
}
fn read_token(first: Option<u8>, reader: &mut Read, raw: &mut Vec<u8>) -> Result<(Option<u8>, Vec<u8>), Error> {
match try! (skip_spaces(first, reader, raw)) {
Some(first_char) => {
let mut token: Vec<u8> = Vec::new();
let mut escape = false;
let quote: bool;
if first_char == b'"' {
quote = true;
} else {
token.push(first_char);
quote = false;
}
loop {
let c = try! (read_u8(reader));
raw.push(c);
if quote {
if escape {
match c {
b'n' => token.push(b'\n'),
b'r' => token.push(b'\r'),
b't' => token.push(b'\t'),
v => token.push(v)
}
escape = false;
} else if c == ('\\' as u8) {
escape = true;
} else if c == b'"' {
let n = try! (read_u8(reader));
raw.push(n);
return Ok((Some(n), token));
} else {
token.push(c);
}
} else {
match c {
b'a' ... b'z' | b'A' ... b'Z' | b'0' ... b'9' => {
token.push(c);
}
_ => {
return Ok((Some(c), token));
}
}
}
}
}
None => {
return Ok((None, Vec::new()));
}
}
}
fn read_directive_line(first: Option<u8>, reader: &mut Read, mut raw: Vec<u8>) -> Result<Directive, Error> {
// Line number
let (next1, _) = try! (read_token(first, reader, &mut raw));
// File name
let (next2, file) = try! (read_token(next1, reader, &mut raw));
try! (skip_line(next2, reader, &mut raw));
Ok(Directive::Line(raw, String::from_utf8_lossy(&file).to_string()))
}
fn read_directive_pragma(first: Option<u8>, reader: &mut Read, mut raw: Vec<u8>) -> Result<Directive, Error> {
let (next, token) = try! (read_token(first, reader, &mut raw));
try! (skip_line(next, reader, &mut raw));
match &token[..] {
b"hdrstop" => Ok(Directive::HdrStop(raw)),
_ => Ok(Directive::Unknown(raw))
}
}
fn skip_spaces(first: Option<u8>, reader: &mut Read, raw: &mut Vec<u8>) -> Result<Option<u8>, Error> {
match first {
Some(c) => {
match c {
b'\n' | b'\r' => {return Ok(None);}
b'\t' | b' ' => {}
_ => {return Ok(first);}
}
}
_ => {}
}
loop {
let c = try! (read_u8(reader));
try! (write_u8(raw, c));
match c {
b'\n' | b'\r' => {return Ok(None);}
b'\t' | b' ' => {}
_ => {return Ok(Some(c));}
}
}
}
fn skip_line(first: Option<u8>, reader: &mut Read, raw: &mut Vec<u8>) -> Result<(), Error> {
match first {
Some(c) => {
match c {
b'\n' | b'\r' => {return Ok(());}
_ => {}
}
}
_ => {}
}
loop {
let c = try! (read_u8(reader));
try! (write_u8(raw, c));
match c {
b'\n' | b'\r' => {return Ok(());}
_ => {}
}
}
}
#[cfg(test)]
mod test {
extern crate test;
use std::io::{Read, Write, Cursor};
use std::fs::File;
use self::test::Bencher;
fn check_filter(original: &str, expected: &str, marker: Option<String>, keep_headers: bool) {
let mut writer: Vec<u8> = Vec::new();
let mut stream: Vec<u8> = Vec::new();
stream.write(&original.as_bytes()[..]).unwrap();
match super::filter_preprocessed(&None, &mut Cursor::new(stream), &mut writer, &marker, keep_headers) {
Ok(_) => {assert_eq! (String::from_utf8_lossy(&writer), expected)}
Err(e) => {panic! (e);}
}
}
#[test]
fn test_filter_precompiled_keep() {
check_filter(
r#"#line 1 "sample.cpp"
#line 1 "e:/work/octobuild/test_cl/sample header.h"
# pragma once
void hello();
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#,
r#"#line 1 "sample.cpp"
#line 1 "e:/work/octobuild/test_cl/sample header.h"
# pragma once
void hello();
#pragma hdrstop
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#, Some("sample header.h".to_string()), true)
}
#[test]
fn test_filter_precompiled_remove() {
check_filter(
r#"#line 1 "sample.cpp"
#line 1 "e:/work/octobuild/test_cl/sample header.h"
# pragma once
void hello1();
void hello2();
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#,
r#"#pragma hdrstop
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#, Some("sample header.h".to_string()), false);
}
#[test]
fn test_filter_precompiled_hdrstop() {
check_filter(
r#"#line 1 "sample.cpp"
#line 1 "e:/work/octobuild/test_cl/sample header.h"
void hello();
# pragma hdrstop
void data();
# pragma once
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#,
r#"# pragma hdrstop
void data();
# pragma once
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#, None, false);
}
#[test]
fn test_filter_precompiled_winpath() {
check_filter(
r#"#line 1 "sample.cpp"
#line 1 "e:\\work\\octobuild\\test_cl\\sample header.h"
# pragma once
void hello();
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#,
r#"#line 1 "sample.cpp"
#line 1 "e:\\work\\octobuild\\test_cl\\sample header.h"
# pragma once
void hello();
#pragma hdrstop
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#, Some("e:\\work\\octobuild\\test_cl\\sample header.h".to_string()), true);
}
fn bench_filter(b: &mut Bencher, path: &str, marker: Option<String>, keep_headers: bool) {
let mut source = Vec::new();
File::open(path).unwrap().read_to_end(&mut source).unwrap();
b.iter(|| {
super::filter_preprocessed(&None, &mut Cursor::new(source.clone()), &mut Vec::new(), &marker, keep_headers).unwrap();
});
}
#[bench]
fn bench_check_filter(b: &mut Bencher) {
bench_filter(b, "tests/filter_preprocessed.i", Some("c:\\bozaro\\github\\octobuild\\test_cl\\sample.h".to_string()), false)
}
}
Fix benchmark
use std::collections::HashSet;
use std::io::{Read, Write, Error};
use std::iter::FromIterator;
use std::path::{Path, PathBuf};
use super::super::utils::DEFAULT_BUF_SIZE;
use super::super::io::binary::*;
#[derive(Debug)]
enum Directive {
// raw, file
Line(Vec<u8>, String),
// raw
HdrStop(Vec<u8>),
// raw
Unknown(Vec<u8>)
}
pub fn filter_preprocessed(base: &Option<PathBuf>, reader: &mut Read, writer: &mut Write, marker: &Option<String>, keep_headers: bool) -> Result<Vec<PathBuf>, Error> {
let mut line_begin = true;
// Entry file.
let mut entry_file: Option<String> = None;
let mut header_found: bool = false;
let mut header_files: HashSet<String> = HashSet::new();
loop {
let c = try! (read_u8(reader));
match c {
b'\n' | b'\r' => {
if keep_headers {
try! (write_u8(writer, c));
}
line_begin = true;
}
b'\t' | b' ' => {
if keep_headers {
try! (write_u8(writer, c));
}
}
b'#' if line_begin => {
let directive = try! (read_directive(c, reader));
match directive {
Directive::Line(raw, raw_file) => {
let file = raw_file.replace("\\", "/");
entry_file = match entry_file {
Some(path) => {
if header_found && (path == file) {
try! (writer.write_all(b"#pragma hdrstop\n"));
try! (writer.write_all(&raw));
break;
}
match *marker {
Some(ref raw_path) => {
let path = raw_path.replace("\\", "/");
if file == path || Path::new(&file).ends_with(&Path::new(&path)) {
header_found = true;
}
}
None => {}
}
Some(path)
}
None => Some(file.clone())
};
header_files.insert(file);
if keep_headers {
try! (writer.write_all(&raw));
}
}
Directive::HdrStop(raw) => {
try! (writer.write_all(&raw));
break;
}
Directive::Unknown(raw) => {
if keep_headers {
try! (writer.write_all(&raw));
}
}
}
}
_ => {
if keep_headers {
try! (write_u8(writer, c));
}
line_begin = false;
}
}
}
// Copy end of stream.
let mut buf: [u8; DEFAULT_BUF_SIZE] = [0; DEFAULT_BUF_SIZE];
loop {
let size = try! (reader.read(&mut buf));
if size <= 0 {
break;
}
try! (writer.write_all(&buf[0..size]));
}
Ok(Vec::from_iter(header_files.into_iter().map(|arg:String|->PathBuf {
match base {
&Some(ref path) => path.join(arg),
&None => Path::new(&arg).to_path_buf(),
}
})))
}
fn read_directive(first: u8, reader: &mut Read) -> Result<Directive, Error> {
let mut raw: Vec<u8> = Vec::new();
raw.push(first);
let (next, token) = try! (read_token(None, reader, &mut raw));
match &token[..] {
b"line" => read_directive_line(next, reader, raw),
b"pragma" => read_directive_pragma(next, reader, raw),
_ => {
try! (skip_line(next, reader, &mut raw));
Ok(Directive::Unknown(raw))
}
}
}
fn read_token(first: Option<u8>, reader: &mut Read, raw: &mut Vec<u8>) -> Result<(Option<u8>, Vec<u8>), Error> {
match try! (skip_spaces(first, reader, raw)) {
Some(first_char) => {
let mut token: Vec<u8> = Vec::new();
let mut escape = false;
let quote: bool;
if first_char == b'"' {
quote = true;
} else {
token.push(first_char);
quote = false;
}
loop {
let c = try! (read_u8(reader));
raw.push(c);
if quote {
if escape {
match c {
b'n' => token.push(b'\n'),
b'r' => token.push(b'\r'),
b't' => token.push(b'\t'),
v => token.push(v)
}
escape = false;
} else if c == ('\\' as u8) {
escape = true;
} else if c == b'"' {
let n = try! (read_u8(reader));
raw.push(n);
return Ok((Some(n), token));
} else {
token.push(c);
}
} else {
match c {
b'a' ... b'z' | b'A' ... b'Z' | b'0' ... b'9' => {
token.push(c);
}
_ => {
return Ok((Some(c), token));
}
}
}
}
}
None => {
return Ok((None, Vec::new()));
}
}
}
fn read_directive_line(first: Option<u8>, reader: &mut Read, mut raw: Vec<u8>) -> Result<Directive, Error> {
// Line number
let (next1, _) = try! (read_token(first, reader, &mut raw));
// File name
let (next2, file) = try! (read_token(next1, reader, &mut raw));
try! (skip_line(next2, reader, &mut raw));
Ok(Directive::Line(raw, String::from_utf8_lossy(&file).to_string()))
}
fn read_directive_pragma(first: Option<u8>, reader: &mut Read, mut raw: Vec<u8>) -> Result<Directive, Error> {
let (next, token) = try! (read_token(first, reader, &mut raw));
try! (skip_line(next, reader, &mut raw));
match &token[..] {
b"hdrstop" => Ok(Directive::HdrStop(raw)),
_ => Ok(Directive::Unknown(raw))
}
}
fn skip_spaces(first: Option<u8>, reader: &mut Read, raw: &mut Vec<u8>) -> Result<Option<u8>, Error> {
match first {
Some(c) => {
match c {
b'\n' | b'\r' => {return Ok(None);}
b'\t' | b' ' => {}
_ => {return Ok(first);}
}
}
_ => {}
}
loop {
let c = try! (read_u8(reader));
try! (write_u8(raw, c));
match c {
b'\n' | b'\r' => {return Ok(None);}
b'\t' | b' ' => {}
_ => {return Ok(Some(c));}
}
}
}
fn skip_line(first: Option<u8>, reader: &mut Read, raw: &mut Vec<u8>) -> Result<(), Error> {
match first {
Some(c) => {
match c {
b'\n' | b'\r' => {return Ok(());}
_ => {}
}
}
_ => {}
}
loop {
let c = try! (read_u8(reader));
try! (write_u8(raw, c));
match c {
b'\n' | b'\r' => {return Ok(());}
_ => {}
}
}
}
#[cfg(test)]
mod test {
extern crate test;
use std::io::{Read, Write, Cursor};
use std::fs::File;
use self::test::Bencher;
fn check_filter(original: &str, expected: &str, marker: Option<String>, keep_headers: bool) {
let mut writer: Vec<u8> = Vec::new();
let mut stream: Vec<u8> = Vec::new();
stream.write(&original.as_bytes()[..]).unwrap();
match super::filter_preprocessed(&None, &mut Cursor::new(stream), &mut writer, &marker, keep_headers) {
Ok(_) => {assert_eq! (String::from_utf8_lossy(&writer), expected)}
Err(e) => {panic! (e);}
}
}
#[test]
fn test_filter_precompiled_keep() {
check_filter(
r#"#line 1 "sample.cpp"
#line 1 "e:/work/octobuild/test_cl/sample header.h"
# pragma once
void hello();
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#,
r#"#line 1 "sample.cpp"
#line 1 "e:/work/octobuild/test_cl/sample header.h"
# pragma once
void hello();
#pragma hdrstop
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#, Some("sample header.h".to_string()), true)
}
#[test]
fn test_filter_precompiled_remove() {
check_filter(
r#"#line 1 "sample.cpp"
#line 1 "e:/work/octobuild/test_cl/sample header.h"
# pragma once
void hello1();
void hello2();
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#,
r#"#pragma hdrstop
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#, Some("sample header.h".to_string()), false);
}
#[test]
fn test_filter_precompiled_hdrstop() {
check_filter(
r#"#line 1 "sample.cpp"
#line 1 "e:/work/octobuild/test_cl/sample header.h"
void hello();
# pragma hdrstop
void data();
# pragma once
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#,
r#"# pragma hdrstop
void data();
# pragma once
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#, None, false);
}
#[test]
fn test_filter_precompiled_winpath() {
check_filter(
r#"#line 1 "sample.cpp"
#line 1 "e:\\work\\octobuild\\test_cl\\sample header.h"
# pragma once
void hello();
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#,
r#"#line 1 "sample.cpp"
#line 1 "e:\\work\\octobuild\\test_cl\\sample header.h"
# pragma once
void hello();
#pragma hdrstop
#line 2 "sample.cpp"
int main(int argc, char **argv) {
return 0;
}
"#, Some("e:\\work\\octobuild\\test_cl\\sample header.h".to_string()), true);
}
fn bench_filter(b: &mut Bencher, path: &str, marker: Option<String>, keep_headers: bool) {
let mut source = Vec::new();
File::open(path).unwrap().read_to_end(&mut source).unwrap();
b.iter(|| {
let mut result = Vec::new();
super::filter_preprocessed(&None, &mut Cursor::new(source.clone()), &mut result, &marker, keep_headers).unwrap();
result
});
}
#[bench]
fn bench_check_filter(b: &mut Bencher) {
bench_filter(b, "tests/filter_preprocessed.i", Some("c:\\bozaro\\github\\octobuild\\test_cl\\sample.h".to_string()), false)
}
}
|
// Copyright (c) 2016-2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Traps and notifies UNIX signals.
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering, ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT};
use std::sync::{Once, ONCE_INIT};
use os::process::{OsSignal, Signal, SignalCode};
use super::SignalEvent;
static INIT: Once = ONCE_INIT;
// True when we have caught a signal
static CAUGHT: AtomicBool = ATOMIC_BOOL_INIT;
// Stores the value of the signal we caught
static SIGNAL: AtomicUsize = ATOMIC_USIZE_INIT;
// Functions from POSIX libc.
extern "C" {
fn signal(
sig: SignalCode,
cb: unsafe extern "C" fn(SignalCode),
) -> unsafe extern "C" fn(SignalCode);
}
unsafe extern "C" fn handle_signal(signal: SignalCode) {
CAUGHT.store(true, Ordering::SeqCst);
SIGNAL.store(signal as usize, Ordering::SeqCst);
}
pub fn init() {
INIT.call_once(|| {
self::set_signal_handlers();
CAUGHT.store(false, Ordering::SeqCst);
SIGNAL.store(0 as usize, Ordering::SeqCst);
});
}
pub fn check_for_signal() -> Option<SignalEvent> {
if CAUGHT.load(Ordering::SeqCst) {
let code = SIGNAL.load(Ordering::SeqCst) as SignalCode;
let event = match Signal::from_signal_code(code) {
Some(Signal::INT) | Some(Signal::TERM) => Some(SignalEvent::Shutdown),
Some(Signal::CHLD) => Some(SignalEvent::WaitForChild),
Some(signal) => Some(SignalEvent::Passthrough(signal)),
None => {
println!("Received invalid signal: #{}", code);
None
}
};
// Clear out a signal that has been caught so we don't end up
// processing it again.
CAUGHT.store(false, Ordering::SeqCst);
SIGNAL.store(0 as usize, Ordering::SeqCst);
event
} else {
None
}
}
fn set_signal_handlers() {
unsafe {
signal(Signal::HUP.os_signal(), handle_signal);
signal(Signal::INT.os_signal(), handle_signal);
signal(Signal::QUIT.os_signal(), handle_signal);
signal(Signal::ALRM.os_signal(), handle_signal);
signal(Signal::TERM.os_signal(), handle_signal);
signal(Signal::USR1.os_signal(), handle_signal);
signal(Signal::USR2.os_signal(), handle_signal);
signal(Signal::CHLD.os_signal(), handle_signal);
}
}
When multiple signals are received, return them in order; don't drop any
Resolves https://github.com/habitat-sh/core/issues/11
Signed-off-by: Jon Bauman <e6c6a4db54ad11b4de69fed67139594b59f3444f@users.noreply.github.com>
// Copyright (c) 2016-2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Traps and notifies UNIX signals.
use std::collections::VecDeque;
use std::sync::{Mutex, Once, ONCE_INIT};
use os::process::{OsSignal, Signal, SignalCode};
use super::SignalEvent;
static INIT: Once = ONCE_INIT;
lazy_static! {
static ref CAUGHT_SIGNALS: Mutex<VecDeque<SignalCode>> = Mutex::new(VecDeque::new());
}
// Functions from POSIX libc.
extern "C" {
fn signal(
sig: SignalCode,
cb: unsafe extern "C" fn(SignalCode),
) -> unsafe extern "C" fn(SignalCode);
}
unsafe extern "C" fn handle_signal(signal: SignalCode) {
CAUGHT_SIGNALS
.lock()
.expect("Signal mutex poisoned")
.push_back(signal);
}
pub fn init() {
INIT.call_once(|| {
self::set_signal_handlers();
});
}
/// Consumers should call this function fairly frequently and since the vast
/// majority of the time there is at most one signal event waiting, we return
/// at most one. If multiple signals have been received since the last call,
/// they will be returned, one per call in the order they were received.
pub fn check_for_signal() -> Option<SignalEvent> {
let mut signals = CAUGHT_SIGNALS.lock().expect("Signal mutex poisoned");
if let Some(code) = signals.pop_front() {
match Signal::from_signal_code(code) {
Some(Signal::INT) | Some(Signal::TERM) => Some(SignalEvent::Shutdown),
Some(Signal::CHLD) => Some(SignalEvent::WaitForChild),
Some(signal) => Some(SignalEvent::Passthrough(signal)),
None => {
println!("Received invalid signal: #{}", code);
None
}
}
} else {
None
}
}
fn set_signal_handlers() {
unsafe {
signal(Signal::HUP.os_signal(), handle_signal);
signal(Signal::INT.os_signal(), handle_signal);
signal(Signal::QUIT.os_signal(), handle_signal);
signal(Signal::ALRM.os_signal(), handle_signal);
signal(Signal::TERM.os_signal(), handle_signal);
signal(Signal::USR1.os_signal(), handle_signal);
signal(Signal::USR2.os_signal(), handle_signal);
signal(Signal::CHLD.os_signal(), handle_signal);
}
}
|
use rustc_serialize::json;
use ::xflow::errors::*;
pub type XFlowEdge = (i32, i32);
// Automatically generate `RustcDecodable` and `RustcEncodable` trait
// implementations
#[derive(RustcDecodable, RustcEncodable, Debug)]
pub struct XFlowStruct {
pub id: String,
pub version: i32,
pub name: String,
pub requirements: Vec<XFlowRequirement>,
pub variables: XFlowVariables,
pub nodes: Vec<XFlowNode>,
pub edges: Vec<XFlowEdge>,
pub branches: Vec<XFlowBranch>,
}
#[derive(RustcDecodable, RustcEncodable, Debug)]
pub struct XFlowRequirement {
pub xtype: String,
pub version: i32,
}
#[derive(RustcDecodable, RustcEncodable, Debug)]
pub struct XFlowVariableDefinition {
pub name: String,
pub vtype: String,
}
#[derive(RustcDecodable, RustcEncodable, Debug)]
pub struct XFlowVariable {
pub name: String,
pub vtype: String,
pub value: String,
}
#[derive(RustcDecodable, RustcEncodable, Debug)]
pub struct XFlowVariables {
pub input: Vec<XFlowVariable>,
pub local: Vec<XFlowVariable>,
pub output: Vec<XFlowVariableDefinition>,
}
#[derive(RustcDecodable, RustcEncodable, Debug, Eq, PartialEq)]
pub struct XFlowNode {
pub id: i32,
pub nodetype: String,
pub label: String,
pub action: String,
}
#[derive(RustcDecodable, RustcEncodable, Debug, Eq, PartialEq)]
pub struct XFlowBranch {
pub name: String,
pub edge: XFlowEdge,
}
impl XFlowStruct {
/// Constructs a new `XFlowStruct`
///
/// # Example
/// ```
/// use xfdocs::xflow::xfstruct::{XFlowStruct};
/// let xfs = XFlowStruct::new();
/// println!("XFlow version {}", xfs.id);
/// ```
pub fn new() -> XFlowStruct {
XFlowStruct {
id: "".to_string(),
name: "".to_string(),
version: 1,
requirements: Vec::<XFlowRequirement>::new(),
variables: XFlowVariables {
input: Vec::<XFlowVariable>::new(),
local: Vec::<XFlowVariable>::new(),
output: Vec::<XFlowVariableDefinition>::new(),
},
nodes: Vec::<XFlowNode>::new(),
edges: Vec::<XFlowEdge>::new(),
branches: Vec::<XFlowBranch>::new(),
}
}
/// Return a string representation of the XFlowStruct
///
/// # Example
/// ```
/// use xfdocs::xflow::xfstruct::{XFlowStruct};
/// let xfs = XFlowStruct::new();
/// xfs.to_string();
/// ```
pub fn to_string(&self) -> String {
format!("xflow {}", self.id)
}
/// Get `XFlowNode`s of `nodetype` and `action`
///
/// # Example
/// ```
/// use xfdocs::xflow::xfstruct::{XFlowStruct};
/// let xfs = XFlowStruct::new();
/// xfs.get_nodes_by("flow", "start");
/// ```
pub fn get_nodes_by(&self, nodetype:&str, action:&str) -> Vec<&XFlowNode> {
let res:Vec<&XFlowNode> = self.nodes.iter().filter({|node|
node.nodetype == nodetype &&
node.action == action
}).collect();
res
}
/// Get `XFlowNode`s of `nodetype`
///
/// # Example
/// ```
/// use xfdocs::xflow::xfstruct::{XFlowStruct};
/// let xfs = XFlowStruct::new();
/// xfs.get_nodes_of_type("flow");
/// ```
pub fn get_nodes_of_type(&self, nodetype:&str) -> Vec<&XFlowNode> {
let res:Vec<&XFlowNode> = self.nodes.iter().filter({|node|
node.nodetype == nodetype
}).collect();
res
}
/// Return a JSON representation of the XFlowStruct
///
/// # Example
/// ```
/// use xfdocs::xflow::xfstruct::{XFlowStruct};
/// let xfs = XFlowStruct::new();
/// xfs.to_json();
/// ```
pub fn to_json(&self) -> String {
json::encode(&self).unwrap()
}
/// Initialize a XFlowStruct from a JSON string
///
/// # Example
/// ```
/// use xfdocs::xflow::xfstruct::{XFlowStruct};
///
/// let empty_flow = "{\"id\":\"empty\",\"name\":\"empty\",\"version\":1,\"requirements\":[{\"xtype\":\"flow\",\"version\":1},{\"xtype\":\"flox\",\"version\":1}],\"variables\":{\"input\":[],\"output\":[],\"local\":[]},\"nodes\":[],\"edges\":[],\"branches\":[]}";
///
/// let xfs = XFlowStruct::from_json(empty_flow);
/// println!("XFlow has version {}", xfs.version);
/// ```
pub fn from_json(json_string:&str) -> XFlowStruct {
let xfs:XFlowStruct = json::decode(json_string).unwrap();
xfs
}
pub fn get_in_edges(&self, node:&XFlowNode) -> Vec<&XFlowEdge> {
let res:Vec<&XFlowEdge> = self.edges.iter().filter({|edge|
edge.1 == node.id
}).collect();
res
}
pub fn get_out_edges(&self, node:&XFlowNode) -> Vec<&XFlowEdge> {
let res:Vec<&XFlowEdge> = self.edges.iter().filter({|edge|
edge.0 == node.id
}).collect();
res
}
pub fn get_branches_for(&self, edge:&XFlowEdge) -> Vec<&XFlowBranch> {
let res:Vec<&XFlowBranch> = self.branches.iter().filter({|branch|
edge.0 == branch.edge.0 &&
edge.1 == branch.edge.1
}).collect();
res
}
pub fn get_entry_node(&self) -> Result<&XFlowNode, XFlowError> {
let res = self.get_nodes_by("flow", "start");
match res.len() {
0 => Err(XFlowError::NoEntryNode),
1 => Ok(res[0]),
_ => Err(XFlowError::MultipleEntryNodes),
}
}
pub fn get_terminal_nodes(&self) -> Result<Vec<&XFlowNode>, XFlowError> {
let res = self.get_nodes_by("flow", "end");
match res.len() {
0 => Err(XFlowError::NoTerminalNode),
_ => Ok(res),
}
}
}
Add minor doctests
use rustc_serialize::json;
use ::xflow::errors::*;
pub type XFlowEdge = (i32, i32);
// Automatically generate `RustcDecodable` and `RustcEncodable` trait
// implementations
#[derive(RustcDecodable, RustcEncodable, Debug)]
pub struct XFlowStruct {
pub id: String,
pub version: i32,
pub name: String,
pub requirements: Vec<XFlowRequirement>,
pub variables: XFlowVariables,
pub nodes: Vec<XFlowNode>,
pub edges: Vec<XFlowEdge>,
pub branches: Vec<XFlowBranch>,
}
#[derive(RustcDecodable, RustcEncodable, Debug)]
pub struct XFlowRequirement {
pub xtype: String,
pub version: i32,
}
#[derive(RustcDecodable, RustcEncodable, Debug)]
pub struct XFlowVariableDefinition {
pub name: String,
pub vtype: String,
}
#[derive(RustcDecodable, RustcEncodable, Debug)]
pub struct XFlowVariable {
pub name: String,
pub vtype: String,
pub value: String,
}
#[derive(RustcDecodable, RustcEncodable, Debug)]
pub struct XFlowVariables {
pub input: Vec<XFlowVariable>,
pub local: Vec<XFlowVariable>,
pub output: Vec<XFlowVariableDefinition>,
}
#[derive(RustcDecodable, RustcEncodable, Debug, Eq, PartialEq)]
pub struct XFlowNode {
pub id: i32,
pub nodetype: String,
pub label: String,
pub action: String,
}
#[derive(RustcDecodable, RustcEncodable, Debug, Eq, PartialEq)]
pub struct XFlowBranch {
pub name: String,
pub edge: XFlowEdge,
}
impl XFlowStruct {
/// Constructs a new `XFlowStruct`
///
/// # Example
/// ```
/// use xfdocs::xflow::xfstruct::{XFlowStruct};
/// let xfs = XFlowStruct::new();
/// println!("XFlow version {}", xfs.id);
/// ```
pub fn new() -> XFlowStruct {
XFlowStruct {
id: "".to_string(),
name: "".to_string(),
version: 1,
requirements: Vec::<XFlowRequirement>::new(),
variables: XFlowVariables {
input: Vec::<XFlowVariable>::new(),
local: Vec::<XFlowVariable>::new(),
output: Vec::<XFlowVariableDefinition>::new(),
},
nodes: Vec::<XFlowNode>::new(),
edges: Vec::<XFlowEdge>::new(),
branches: Vec::<XFlowBranch>::new(),
}
}
/// Return a string representation of the XFlowStruct
///
/// # Example
/// ```
/// use xfdocs::xflow::xfstruct::{XFlowStruct};
/// let xfs = XFlowStruct::new();
/// xfs.to_string();
/// ```
pub fn to_string(&self) -> String {
format!("xflow {}", self.id)
}
/// Get `XFlowNode`s of `nodetype` and `action`
///
/// # Example
/// ```
/// use xfdocs::xflow::xfstruct::{XFlowStruct};
/// let xfs = XFlowStruct::new();
/// let nodes = xfs.get_nodes_by("flow", "start");
/// assert_eq!(nodes.len(), 0);
/// ```
pub fn get_nodes_by(&self, nodetype:&str, action:&str) -> Vec<&XFlowNode> {
let res:Vec<&XFlowNode> = self.nodes.iter().filter({|node|
node.nodetype == nodetype &&
node.action == action
}).collect();
res
}
/// Get `XFlowNode`s of `nodetype`
///
/// # Example
/// ```
/// use xfdocs::xflow::xfstruct::{XFlowStruct};
/// let xfs = XFlowStruct::new();
/// let nodes = xfs.get_nodes_of_type("flow");
/// assert_eq!(nodes.len(), 0);
/// ```
pub fn get_nodes_of_type(&self, nodetype:&str) -> Vec<&XFlowNode> {
let res:Vec<&XFlowNode> = self.nodes.iter().filter({|node|
node.nodetype == nodetype
}).collect();
res
}
/// Return a JSON representation of the XFlowStruct
///
/// # Example
/// ```
/// use xfdocs::xflow::xfstruct::{XFlowStruct};
/// let xfs = XFlowStruct::new();
/// xfs.to_json();
/// ```
pub fn to_json(&self) -> String {
json::encode(&self).unwrap()
}
/// Initialize a XFlowStruct from a JSON string
///
/// # Example
/// ```
/// use xfdocs::xflow::xfstruct::{XFlowStruct};
///
/// let empty_flow = "{\"id\":\"empty\",\"name\":\"empty\",\"version\":1,\"requirements\":[{\"xtype\":\"flow\",\"version\":1},{\"xtype\":\"flox\",\"version\":1}],\"variables\":{\"input\":[],\"output\":[],\"local\":[]},\"nodes\":[],\"edges\":[],\"branches\":[]}";
///
/// let xfs = XFlowStruct::from_json(empty_flow);
/// println!("XFlow has version {}", xfs.version);
/// ```
pub fn from_json(json_string:&str) -> XFlowStruct {
let xfs:XFlowStruct = json::decode(json_string).unwrap();
xfs
}
pub fn get_in_edges(&self, node:&XFlowNode) -> Vec<&XFlowEdge> {
let res:Vec<&XFlowEdge> = self.edges.iter().filter({|edge|
edge.1 == node.id
}).collect();
res
}
pub fn get_out_edges(&self, node:&XFlowNode) -> Vec<&XFlowEdge> {
let res:Vec<&XFlowEdge> = self.edges.iter().filter({|edge|
edge.0 == node.id
}).collect();
res
}
pub fn get_branches_for(&self, edge:&XFlowEdge) -> Vec<&XFlowBranch> {
let res:Vec<&XFlowBranch> = self.branches.iter().filter({|branch|
edge.0 == branch.edge.0 &&
edge.1 == branch.edge.1
}).collect();
res
}
pub fn get_entry_node(&self) -> Result<&XFlowNode, XFlowError> {
let res = self.get_nodes_by("flow", "start");
match res.len() {
0 => Err(XFlowError::NoEntryNode),
1 => Ok(res[0]),
_ => Err(XFlowError::MultipleEntryNodes),
}
}
pub fn get_terminal_nodes(&self) -> Result<Vec<&XFlowNode>, XFlowError> {
let res = self.get_nodes_by("flow", "end");
match res.len() {
0 => Err(XFlowError::NoTerminalNode),
_ => Ok(res),
}
}
}
|
// Copyright 2015 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[deny(missing_docs)]
extern crate gfx_core;
extern crate gfx_device_gl;
extern crate glutin;
use gfx_core::{format, handle};
use gfx_core::tex::Size;
/// Initialize with a window builder.
/// Generically parametrized version over the main framebuffer format.
pub fn init<Cf, Df>(builder: glutin::WindowBuilder) ->
(glutin::Window, gfx_device_gl::Device, gfx_device_gl::Factory,
handle::RenderTargetView<gfx_device_gl::Resources, Cf>,
handle::DepthStencilView<gfx_device_gl::Resources, Df>)
where
Cf: format::RenderFormat,
Df: format::DepthFormat,
{
use gfx_core::factory::Phantom;
let (window, device, factory, color_view, ds_view) = init_raw(builder, Cf::get_format(), Df::get_format());
(window, device, factory, Phantom::new(color_view), Phantom::new(ds_view))
}
/// Initialize with a window builder. Raw version.
pub fn init_raw(builder: glutin::WindowBuilder,
color_format: format::Format, ds_format: format::Format) ->
(glutin::Window, gfx_device_gl::Device, gfx_device_gl::Factory,
handle::RawRenderTargetView<gfx_device_gl::Resources>,
handle::RawDepthStencilView<gfx_device_gl::Resources>)
{
let window = {
let color_total_bits = color_format.0.get_total_bits();
let alpha_bits = color_format.0.get_alpha_stencil_bits();
let depth_total_bits = ds_format.0.get_total_bits();
let stencil_bits = ds_format.0.get_alpha_stencil_bits();
builder
.with_depth_buffer(depth_total_bits - stencil_bits)
.with_stencil_buffer(stencil_bits)
.with_pixel_format(color_total_bits - alpha_bits, alpha_bits)
.with_srgb(Some(color_format.1 == format::ChannelType::Srgb))
.build()
}.unwrap();
unsafe { window.make_current().unwrap() };
let (device, factory) = gfx_device_gl::create(|s|
window.get_proc_address(s) as *const std::os::raw::c_void);
// create the main color/depth targets
let (width, height) = window.get_inner_size().unwrap();
let aa = window.get_pixel_format().multisampling
.unwrap_or(0) as gfx_core::tex::NumSamples;
let dim = (width as Size, height as Size, 1, aa.into());
let (color_view, ds_view) = gfx_device_gl::create_main_targets(dim, color_format.0, ds_format.0);
// done
(window, device, factory, color_view, ds_view)
}
Glutin - implemented update_views
// Copyright 2015 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[deny(missing_docs)]
extern crate gfx_core;
extern crate gfx_device_gl;
extern crate glutin;
use gfx_core::{format, handle, tex};
use gfx_device_gl::Resources as R;
/// Initialize with a window builder.
/// Generically parametrized version over the main framebuffer format.
pub fn init<Cf, Df>(builder: glutin::WindowBuilder) ->
(glutin::Window, gfx_device_gl::Device, gfx_device_gl::Factory,
handle::RenderTargetView<R, Cf>, handle::DepthStencilView<R, Df>)
where
Cf: format::RenderFormat,
Df: format::DepthFormat,
{
use gfx_core::factory::Phantom;
let (window, device, factory, color_view, ds_view) = init_raw(builder, Cf::get_format(), Df::get_format());
(window, device, factory, Phantom::new(color_view), Phantom::new(ds_view))
}
fn get_window_dimensions(window: &glutin::Window) -> tex::Dimensions {
let (width, height) = window.get_inner_size().unwrap();
let aa = window.get_pixel_format().multisampling
.unwrap_or(0) as tex::NumSamples;
(width as tex::Size, height as tex::Size, 1, aa.into())
}
/// Initialize with a window builder. Raw version.
pub fn init_raw(builder: glutin::WindowBuilder,
color_format: format::Format, ds_format: format::Format) ->
(glutin::Window, gfx_device_gl::Device, gfx_device_gl::Factory,
handle::RawRenderTargetView<R>, handle::RawDepthStencilView<R>)
{
let window = {
let color_total_bits = color_format.0.get_total_bits();
let alpha_bits = color_format.0.get_alpha_stencil_bits();
let depth_total_bits = ds_format.0.get_total_bits();
let stencil_bits = ds_format.0.get_alpha_stencil_bits();
builder
.with_depth_buffer(depth_total_bits - stencil_bits)
.with_stencil_buffer(stencil_bits)
.with_pixel_format(color_total_bits - alpha_bits, alpha_bits)
.with_srgb(Some(color_format.1 == format::ChannelType::Srgb))
.build()
}.unwrap();
unsafe { window.make_current().unwrap() };
let (device, factory) = gfx_device_gl::create(|s|
window.get_proc_address(s) as *const std::os::raw::c_void);
// create the main color/depth targets
let dim = get_window_dimensions(&window);
let (color_view, ds_view) = gfx_device_gl::create_main_targets(dim, color_format.0, ds_format.0);
// done
(window, device, factory, color_view, ds_view)
}
/// Update the internal dimensions of the main framebuffer targets. Generic version over the format.
pub fn update_views<Cf, Df>(window: &glutin::Window, color_view: &mut handle::RenderTargetView<R, Cf>,
ds_view: &mut handle::DepthStencilView<R, Df>)
where
Cf: format::RenderFormat,
Df: format::DepthFormat,
{
use gfx_core::factory::Phantom;
let dim = color_view.get_dimensions();
assert_eq!(dim, ds_view.get_dimensions());
if let Some((cv, dv)) = update_views_raw(window, dim, Cf::get_format(), Df::get_format()) {
*color_view = Phantom::new(cv);
*ds_view = Phantom::new(dv);
}
}
/// Return new main target views if the window resolution has changed from the old dimensions.
pub fn update_views_raw(window: &glutin::Window, old_dimensions: tex::Dimensions,
color_format: format::Format, ds_format: format::Format)
-> Option<(handle::RawRenderTargetView<R>, handle::RawDepthStencilView<R>)>
{
let dim = get_window_dimensions(window);
if dim != old_dimensions {
Some(gfx_device_gl::create_main_targets(dim, color_format.0, ds_format.0))
}else {
None
}
}
|
use std::fmt;
use std::io;
use lmdb_zero::{self, ConstAccessor, Database, EnvBuilder, Environment, ReadTransaction,
WriteTransaction, error, open, put};
use hex_slice::AsHex;
use tempdir::TempDir;
use data::Seqno;
use replica::Log;
use time::PreciseTime;
use std::sync::Arc;
use std::iter;
// Approximate structure of the log.
//
// The fun part is that each sequence number is Some(n) where n points to
// the *next* unwritten log slot. Unless it's unset, in which case we use
// None. This is ... suspect, and what happens when you just bang out code. It
// originally originated though, because at the time, the `Log#seqno` method
// was used solely to figure out the next slot that we should be writing to.
// However, we also use this very same logic to understand where we are in the
// replication stream. When we attempt to write say, two entries, we end up
// publishing the following messages:
//
// Prepare(0, "a")
// Prepare(1, "b")
// CommitTo(2)
//
// Which seems great, but we've just told the downstream replica to commit to
// an entry that hasn't been written yet. So've just told the downstream to
// commit an unwritten slot, triggers an `assert!` (the nearest I get to Hoare
// triples), and the whole castle comes tumbling down.
// Log Entries
//
// R R R Replicas
// ┌──┐ 0 1 2
// │ │ ◀─┐ 0 Head
// │00│ X X O │ 1 Middle
// │ 1│ X X O └───R2.Commit 2 Tail
// │ 2│ X X O
// │ 3│ X X O
// │ 4│ X X ◀─────R2.Prepare
// │ 5│ X X
// │ 6│ X X
// │ 7│ X X
// │ 8│ X O ◀─────R1.Commit
// │ 9│ X O
// │10│ X O
// │ 1│ X O
// │ 2│ O O ◀─────R0.Commit
// │ 3│ O O
// │ 4│ O O
// │ 5│ O O
// │ 6│ O ◀─────R1.Prepare
// │ 7│ O
// │ 8│ O
// │ 9│ O
// └──┘ ◀─────R0.Prepare
error_chain! {
types {
Error, ErrorKind, ChainErr, Result;
}
links {}
foreign_links {
io::Error, Io, "I/O error";
lmdb_zero::Error, Lmdb, "Db error";
}
errors {
BadSequence(saw: Seqno, expected: Seqno)
BadCommit(saw: Seqno, expected: Seqno)
MissingCf(cf: String)
}
}
// 1TGB. That'll be enough, right?
const ARBITARILY_LARGE: usize = 1 << 24;
pub struct RocksdbLog {
dir: TempDir,
env: Arc<Environment>,
seqno_prepared: Option<Seqno>,
}
const META: &'static str = "meta";
const DATA: &'static str = "data";
const META_PREPARED: &'static str = "prepared";
const META_COMMITTED: &'static str = "committed";
impl fmt::Debug for RocksdbLog {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("RocksdbLog").field("dir", &self.dir.path()).finish()
}
}
fn open_db<'a>(env: &'a Environment, name: &str) -> Result<Database<'a>> {
let db = try!(Database::open(env,
Some(name),
&lmdb_zero::DatabaseOptions::new(lmdb_zero::db::CREATE)));
Ok(db)
}
impl RocksdbLog {
pub fn new() -> Result<RocksdbLog> {
let d = try!(TempDir::new("rocksdb-log"));
info!("DB path: {:?}", d.path());
let mut b = try!(EnvBuilder::new());
try!(b.set_maxdbs(3));
try!(b.set_mapsize(ARBITARILY_LARGE));
let env = unsafe {
try!(b.open(d.path().to_str().expect("string"),
open::Flags::empty(),
0o777))
};
let seqno_prepared = {
let meta = try!(open_db(&env, META));
let _ = try!(open_db(&env, DATA));
let txn = try!(ReadTransaction::new(&env));
try!(Self::do_read_seqno(&meta, &txn.access(), META_PREPARED))
};
Ok(RocksdbLog {
dir: d,
env: Arc::new(env),
seqno_prepared: seqno_prepared,
})
}
fn meta(env: &Environment) -> Result<Database> {
Ok(try!(open_db(&env, META)))
}
fn data(env: &Environment) -> Result<Database> {
Ok(try!(open_db(&env, DATA)))
}
fn do_read_seqno(meta: &Database, txn: &ConstAccessor, name: &str) -> Result<Option<Seqno>> {
let val = {
let val = match txn.get(meta, name) {
Ok(val) => Some(Seqno::fromkey(val)),
Err(e) if e.code == error::NOTFOUND => None,
Err(e) => return Err(e.into()),
};
val
};
Ok(val)
}
fn read_seqno(&self, name: &str) -> Result<Option<Seqno>> {
let meta = Self::meta(&self.env);
let txn = try!(ReadTransaction::new(&self.env));
Self::do_read_seqno(&try!(meta), &txn.access(), name)
}
fn do_commit_to(env: &Environment, commit_seqno: Seqno) -> Result<()> {
debug!("Commit {:?}", commit_seqno);
let meta = try!(Self::meta(&env));
let key = Seqno::tokey(&commit_seqno);
let t0 = PreciseTime::now();
let txn = try!(WriteTransaction::new(&env));
{
let mut accessor = txn.access();
let prepared = try!(Self::do_read_seqno(&meta, &accessor, META_PREPARED));
let committed = try!(Self::do_read_seqno(&meta, &accessor, META_COMMITTED));
debug!("Committing: {:?}, committed, {:?}, prepared: {:?}",
committed,
committed,
prepared);
if let Some(p) = prepared {
if p < commit_seqno {
return Err(ErrorKind::BadCommit(p, commit_seqno).into());
}
}
if committed == Some(commit_seqno) {
debug!("Skipping, commits up to date");
return Ok(());
}
try!(accessor.put(&meta, META_COMMITTED.as_bytes(), &key, put::Flags::empty()));
}
try!(txn.commit());
let t1 = PreciseTime::now();
debug!("Committed {:?} in: {}", commit_seqno, t0.to(t1));
Ok(())
}
pub fn stop(self) {}
pub fn quiesce(&self) {}
}
impl Log for RocksdbLog {
type Cursor = RocksdbCursor;
type Error = Error;
fn seqno(&self) -> Result<Seqno> {
let prepared = try!(self.read_prepared());
Ok(prepared.as_ref().map(Seqno::succ).unwrap_or_else(Seqno::zero))
}
fn read_prepared(&self) -> Result<Option<Seqno>> {
Ok(self.seqno_prepared.clone())
}
fn read_committed(&self) -> Result<Option<Seqno>> {
self.read_seqno(META_COMMITTED)
}
fn read_from(&self, seqno: Seqno) -> Result<RocksdbCursor> {
Ok(RocksdbCursor(self.env.clone(), seqno))
}
fn prepare(&mut self, seqno: Seqno, data_bytes: &[u8]) -> Result<()> {
trace!("Prepare {:?}", seqno);
let key = Seqno::tokey(&seqno);
let next = try!(self.seqno());
if seqno != next {
return Err(ErrorKind::BadSequence(seqno, next).into());
}
// match self.db.get_cf(Self::data(&db), &key.as_ref()) {
// Ok(None) => (),
// Err(e) => panic!("Unexpected error reading index: {:?}: {:?}", seqno, e),
// Ok(_) => panic!("Unexpected entry at seqno: {:?}", seqno),
// };
let t0 = PreciseTime::now();
let data = try!(Self::data(&self.env));
let meta = try!(Self::meta(&self.env));
let txn = try!(WriteTransaction::new(&self.env));
{
let mut access = txn.access();
try!(access.put(&data, &key, data_bytes, put::NOOVERWRITE));
try!(access.put(&meta, META_PREPARED, &key, put::Flags::empty()));
}
try!(txn.commit());
drop(meta);
drop(data);
let t1 = PreciseTime::now();
trace!("Prepare: {}", t0.to(t1));
self.seqno_prepared = Some(seqno);
trace!("Watermarks: prepared: {:?}; committed: {:?}",
self.read_seqno(META_PREPARED),
self.read_seqno(META_COMMITTED));
Ok(())
}
fn commit_to(&mut self, seqno: Seqno) -> Result<bool> {
trace!("Request commit upto: {:?}", seqno);
let committed = try!(self.read_seqno(META_COMMITTED));
if committed.map(|c| c < seqno).unwrap_or(true) {
trace!("Request to commit {:?} -> {:?}", committed, seqno);
try!(Self::do_commit_to(&self.env, seqno));
Ok(true)
} else {
trace!("Request to commit {:?} -> {:?}; no-op", committed, seqno);
Ok(false)
}
}
}
#[derive(Debug)]
pub struct RocksdbCursor(Arc<Environment>, Seqno);
impl RocksdbCursor {
fn read_next(&mut self) -> Result<Option<(Seqno, Vec<u8>)>> {
let &mut RocksdbCursor(ref env, ref mut seqno) = self;
let data = try!(RocksdbLog::data(&env));
let txn = try!(ReadTransaction::new(&env));
let key = Seqno::tokey(&seqno);
let mut cursor = try!(txn.cursor(&data).chain_err(|| "get cursor"));
debug!("Attempt read from: {:?}/{:x}", seqno, key.as_hex());
let ret = match try!(mdb_maybe(cursor.seek_range_k::<[u8], [u8]>(&txn.access(), &key))) {
Some((k, v)) => {
let read_seq = Seqno::fromkey(k);
debug!("Read from: {:?}/{:x}", read_seq, key.as_hex());
*seqno = read_seq.succ();
(read_seq, v.to_vec())
}
None => return Ok(None),
};
Ok(Some(ret))
// if let Some((key, val)) = iter.next() {
// let seqno = Seqno::fromkey(&key);
// Some((seqno, val.to_vec()))
// } else {
// None
// }
//
}
}
impl iter::Iterator for RocksdbCursor {
type Item = Result<(Seqno, Vec<u8>)>;
fn next(&mut self) -> Option<Self::Item> {
return self.read_next()
.map(|ot| ot.map(Ok))
.unwrap_or_else(|e| Some(Err(e)));
}
}
fn mdb_maybe<T>(res: ::std::result::Result<T, lmdb_zero::Error>)
-> ::std::result::Result<Option<T>, lmdb_zero::Error> {
match res {
Ok(kv) => Ok(Some(kv)),
Err(e) if e.code == error::NOTFOUND => Ok(None),
Err(e) => Err(e),
}
}
Mostly implement auto-resizing the lmdb map allocation.
use std::fmt;
use std::io;
use lmdb_zero::{self, ConstAccessor, Database, EnvBuilder, Environment, ReadTransaction,
WriteTransaction, error, open, put};
use hex_slice::AsHex;
use tempdir::TempDir;
use data::Seqno;
use replica::Log;
use time::PreciseTime;
use std::sync::Arc;
use std::{iter,result};
// Approximate structure of the log.
//
// The fun part is that each sequence number is Some(n) where n points to
// the *next* unwritten log slot. Unless it's unset, in which case we use
// None. This is ... suspect, and what happens when you just bang out code. It
// originally originated though, because at the time, the `Log#seqno` method
// was used solely to figure out the next slot that we should be writing to.
// However, we also use this very same logic to understand where we are in the
// replication stream. When we attempt to write say, two entries, we end up
// publishing the following messages:
//
// Prepare(0, "a")
// Prepare(1, "b")
// CommitTo(2)
//
// Which seems great, but we've just told the downstream replica to commit to
// an entry that hasn't been written yet. So've just told the downstream to
// commit an unwritten slot, triggers an `assert!` (the nearest I get to Hoare
// triples), and the whole castle comes tumbling down.
// Log Entries
//
// R R R Replicas
// ┌──┐ 0 1 2
// │ │ ◀─┐ 0 Head
// │00│ X X O │ 1 Middle
// │ 1│ X X O └───R2.Commit 2 Tail
// │ 2│ X X O
// │ 3│ X X O
// │ 4│ X X ◀─────R2.Prepare
// │ 5│ X X
// │ 6│ X X
// │ 7│ X X
// │ 8│ X O ◀─────R1.Commit
// │ 9│ X O
// │10│ X O
// │ 1│ X O
// │ 2│ O O ◀─────R0.Commit
// │ 3│ O O
// │ 4│ O O
// │ 5│ O O
// │ 6│ O ◀─────R1.Prepare
// │ 7│ O
// │ 8│ O
// │ 9│ O
// └──┘ ◀─────R0.Prepare
error_chain! {
types {
Error, ErrorKind, ChainErr, Result;
}
links {}
foreign_links {
io::Error, Io, "I/O error";
lmdb_zero::Error, Lmdb, "Db error";
}
errors {
BadSequence(saw: Seqno, expected: Seqno)
BadCommit(saw: Seqno, expected: Seqno)
MissingCf(cf: String)
}
}
const INITIAL_MAP_SIZE: usize = 1 << 10;
pub struct RocksdbLog {
dir: TempDir,
env: Arc<Environment>,
seqno_prepared: Option<Seqno>,
}
const META: &'static str = "meta";
const DATA: &'static str = "data";
const META_PREPARED: &'static str = "prepared";
const META_COMMITTED: &'static str = "committed";
impl fmt::Debug for RocksdbLog {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("RocksdbLog").field("dir", &self.dir.path()).finish()
}
}
type LmdbResult<T> = result::Result<T, lmdb_zero::Error>;
fn open_db<'a>(env: &'a Environment, name: &str) -> LmdbResult<Database<'a>> {
let db = try!(Database::open(env,
Some(name),
&lmdb_zero::DatabaseOptions::new(lmdb_zero::db::CREATE)));
Ok(db)
}
fn open_env(path: &str, map_size: usize) -> LmdbResult<Environment> {
let mut b = try!(EnvBuilder::new());
try!(b.set_maxdbs(3));
try!(b.set_mapsize(map_size));
let env = unsafe {
try!(b.open(path,
open::Flags::empty(),
0o777))
};
Ok(env)
}
fn auto_expand_map<R, F: FnMut(&Arc<Environment>) -> LmdbResult<R>>(env: &mut Arc<Environment>, mut f: F) -> LmdbResult<R> {
loop {
match f(&*env) {
Err(e) if e.code == lmdb_zero::error::MAP_FULL => (),
other => return other,
}
let info = try!(env.info());
// We assume that because the path is passed in as a &str, it's
// already correct utf8.
let path = try!(env.path()).to_str().expect("db path to_str").to_string();
let next_size = info.mapsize * 2;
info!("Re-opening {:?} with map-size {:?}b", path, next_size);
let new = Arc::new(try!(open_env(&path, next_size)));
info!("Retrying... with: {:?}", env);
*env = new;
}
}
impl RocksdbLog {
pub fn new() -> Result<RocksdbLog> {
let d = try!(TempDir::new("rocksdb-log"));
info!("DB path: {:?}", d.path());
let mut env = Arc::new(try!(open_env(d.path().to_str().expect("string"), INITIAL_MAP_SIZE)));
let seqno_prepared = try!(auto_expand_map(&mut env, |env| {
let meta = try!(open_db(&env, META));
let _ = try!(open_db(&env, DATA));
let txn = try!(ReadTransaction::new(&env));
Self::do_read_seqno(&meta, &txn.access(), META_PREPARED)
}));
Ok(RocksdbLog {
dir: d,
env: env,
seqno_prepared: seqno_prepared,
})
}
fn meta(env: &Environment) -> LmdbResult<Database> {
Ok(try!(open_db(&env, META)))
}
fn data(env: &Environment) -> LmdbResult<Database> {
Ok(try!(open_db(&env, DATA)))
}
fn do_read_seqno(meta: &Database, txn: &ConstAccessor, name: &str) -> result::Result<Option<Seqno>, lmdb_zero::Error> {
let val = {
let val = match txn.get(meta, name) {
Ok(val) => Some(Seqno::fromkey(val)),
Err(e) if e.code == error::NOTFOUND => None,
Err(e) => return Err(e.into()),
};
val
};
Ok(val)
}
fn read_seqno(&self, name: &str) -> Result<Option<Seqno>> {
let meta = Self::meta(&self.env);
let txn = try!(ReadTransaction::new(&self.env));
Ok(try!(Self::do_read_seqno(&try!(meta), &txn.access(), name)))
}
fn do_commit_to(env: &mut Arc<Environment>, commit_seqno: Seqno) -> Result<()> {
debug!("Commit {:?}", commit_seqno);
let key = Seqno::tokey(&commit_seqno);
let t0 = PreciseTime::now();
let mut err = None;
try!(auto_expand_map(env, |env| {
let meta = try!(Self::meta(&env));
let txn = try!(WriteTransaction::new(&env));
{
let mut accessor = txn.access();
let prepared = try!(Self::do_read_seqno(&meta, &accessor, META_PREPARED));
let committed = try!(Self::do_read_seqno(&meta, &accessor, META_COMMITTED));
debug!("Committing: {:?}, committed, {:?}, prepared: {:?}",
committed,
committed,
prepared);
if let Some(p) = prepared {
if p < commit_seqno {
err = Some(ErrorKind::BadCommit(p, commit_seqno));
return Ok(());
}
}
if committed == Some(commit_seqno) {
debug!("Skipping, commits up to date");
return Ok(());
}
try!(accessor.put(&meta, META_COMMITTED.as_bytes(), &key, put::Flags::empty()));
}
try!(txn.commit());
Ok(())
}));
let t1 = PreciseTime::now();
if let Some(e) = err {
return Err(e.into())
}
debug!("Committed {:?} in: {}", commit_seqno, t0.to(t1));
Ok(())
}
pub fn stop(self) {}
pub fn quiesce(&self) {}
}
impl Log for RocksdbLog {
type Cursor = RocksdbCursor;
type Error = Error;
fn seqno(&self) -> Result<Seqno> {
let prepared = try!(self.read_prepared());
Ok(prepared.as_ref().map(Seqno::succ).unwrap_or_else(Seqno::zero))
}
fn read_prepared(&self) -> Result<Option<Seqno>> {
Ok(self.seqno_prepared.clone())
}
fn read_committed(&self) -> Result<Option<Seqno>> {
self.read_seqno(META_COMMITTED)
}
fn read_from(&self, seqno: Seqno) -> Result<RocksdbCursor> {
Ok(RocksdbCursor(self.env.clone(), seqno))
}
fn prepare(&mut self, seqno: Seqno, data_bytes: &[u8]) -> Result<()> {
let key = Seqno::tokey(&seqno);
debug!("Prepare {:?}", seqno);
let next = try!(self.seqno());
if seqno != next {
return Err(ErrorKind::BadSequence(seqno, next).into());
}
// match self.db.get_cf(Self::data(&db), &key.as_ref()) {
// Ok(None) => (),
// Err(e) => panic!("Unexpected error reading index: {:?}: {:?}", seqno, e),
// Ok(_) => panic!("Unexpected entry at seqno: {:?}", seqno),
// };
let t0 = PreciseTime::now();
try!(auto_expand_map(&mut self.env, |env| {
debug!("Write {:?}", seqno);
let data = try!(Self::data(&env));
let meta = try!(Self::meta(&env));
let txn = try!(WriteTransaction::new(&env));
{
let mut access = txn.access();
try!(access.put(&data, &key, data_bytes, put::NOOVERWRITE));
try!(access.put(&meta, META_PREPARED, &key, put::Flags::empty()));
}
try!(txn.commit());
Ok(())
}));
let t1 = PreciseTime::now();
trace!("Prepared in: {}", t0.to(t1));
self.seqno_prepared = Some(seqno);
trace!("Watermarks: prepared: {:?}; committed: {:?}",
self.read_seqno(META_PREPARED),
self.read_seqno(META_COMMITTED));
Ok(())
}
fn commit_to(&mut self, seqno: Seqno) -> Result<bool> {
trace!("Request commit upto: {:?}", seqno);
let committed = try!(self.read_seqno(META_COMMITTED));
if committed.map(|c| c < seqno).unwrap_or(true) {
trace!("Request to commit {:?} -> {:?}", committed, seqno);
try!(Self::do_commit_to(&mut self.env, seqno));
Ok(true)
} else {
trace!("Request to commit {:?} -> {:?}; no-op", committed, seqno);
Ok(false)
}
}
}
#[derive(Debug)]
pub struct RocksdbCursor(Arc<Environment>, Seqno);
impl RocksdbCursor {
fn read_next(&mut self) -> Result<Option<(Seqno, Vec<u8>)>> {
let &mut RocksdbCursor(ref env, ref mut seqno) = self;
let data = try!(RocksdbLog::data(&env));
let txn = try!(ReadTransaction::new(&env));
let key = Seqno::tokey(&seqno);
let mut cursor = try!(txn.cursor(&data).chain_err(|| "get cursor"));
debug!("Attempt read from: {:?}/{:x}", seqno, key.as_hex());
let ret = match try!(mdb_maybe(cursor.seek_range_k::<[u8], [u8]>(&txn.access(), &key))) {
Some((k, v)) => {
let read_seq = Seqno::fromkey(k);
debug!("Read from: {:?}/{:x}", read_seq, key.as_hex());
*seqno = read_seq.succ();
(read_seq, v.to_vec())
}
None => return Ok(None),
};
Ok(Some(ret))
// if let Some((key, val)) = iter.next() {
// let seqno = Seqno::fromkey(&key);
// Some((seqno, val.to_vec()))
// } else {
// None
// }
//
}
}
impl iter::Iterator for RocksdbCursor {
type Item = Result<(Seqno, Vec<u8>)>;
fn next(&mut self) -> Option<Self::Item> {
return self.read_next()
.map(|ot| ot.map(Ok))
.unwrap_or_else(|e| Some(Err(e)));
}
}
fn mdb_maybe<T>(res: ::std::result::Result<T, lmdb_zero::Error>)
-> ::std::result::Result<Option<T>, lmdb_zero::Error> {
match res {
Ok(kv) => Ok(Some(kv)),
Err(e) if e.code == error::NOTFOUND => Ok(None),
Err(e) => Err(e),
}
}
|
use nom::{self, IResult, Err, Needed};
use frame::{self, ChannelAssignment};
use subframe::{self, Subframe, CodingMethod, PartitionedRiceContents};
use utility::{ErrorKind, power_of_two};
// Parser used to parse unary notation. Naming the parser `leading_zeros`
// was something that felt more clear in the code. It actually tells the
// caller what the parser doing considering unary notation can -- and more
// commonly -- be leading ones.
pub fn leading_zeros(input: (&[u8], usize)) -> IResult<(&[u8], usize), u32> {
let (bytes, mut offset) = input;
let mut index = 0;
let mut count = 0;
let mut is_parsed = false;
let bytes_len = bytes.len();
for i in 0..bytes_len {
// Clear the number of offset bits
let byte = bytes[i] << offset;
let zeros = byte.leading_zeros() as usize;
index = i;
if byte > 0 {
is_parsed = true;
count += zeros;
offset += zeros + 1;
if offset >= 8 {
index += 1;
offset -= 8;
}
break;
} else {
count += zeros - offset;
offset = 0;
}
}
if is_parsed {
IResult::Done((&bytes[index..], offset), count as u32)
} else if index + 2 > bytes_len {
IResult::Incomplete(Needed::Size(index + 2))
} else {
IResult::Error(Err::Position(nom::ErrorKind::TakeUntil, (bytes, offset)))
}
}
// The channel's bits per sample that gets adjusted are the side channels
// for `LeftSide`, `MidpointSide`, and `RightSide`. The `Independent`
// channel assignment doesn't get adjust on any of the channels.
pub fn adjust_bits_per_sample(frame_header: &frame::Header,
channel: usize)
-> usize {
let bits_per_sample = frame_header.bits_per_sample;
match frame_header.channel_assignment {
ChannelAssignment::Independent => bits_per_sample,
ChannelAssignment::LeftSide |
ChannelAssignment::MidpointSide => {
if channel == 1 {
bits_per_sample + 1
} else {
bits_per_sample
}
}
ChannelAssignment::RightSide => {
if channel == 0 {
bits_per_sample + 1
} else {
bits_per_sample
}
}
}
}
/// Parse a single channel of audio data.
pub fn subframe_parser<'a>(input: (&'a [u8], usize),
frame_header: &frame::Header,
channel: &mut usize,
buffer: &mut [i32])
-> IResult<(&'a [u8], usize), Subframe,
ErrorKind> {
let block_size = frame_header.block_size as usize;
let bits_per_sample = adjust_bits_per_sample(frame_header, *channel);
let start = *channel * block_size;
let end = (*channel + 1) * block_size;
let buffer_slice = &mut buffer[start..end];
chain!(input,
subframe_header: header ~
wasted_bits: map!(
cond!(subframe_header.1,
to_custom_error!(leading_zeros, LeadingZerosParser)),
|option: Option<u32>| option.map_or(0, |zeros| zeros + 1)
) ~
subframe_data: apply!(data,
bits_per_sample - (wasted_bits as usize),
block_size, subframe_header.0,
buffer_slice),
|| {
// Iterate over the current channel being parsed. This probably should
// be abstracted away, but for now this is the solution.
*channel += 1;
Subframe {
data: subframe_data,
wasted_bits: wasted_bits,
}
}
)
}
// Parses the first byte of the subframe. The first bit must be zero to
// prevent sync-fooling, next six bits determines the subframe data type.
// Last bit is is there is wasted bits per sample, value one being true.
pub fn header(input: (&[u8], usize))
-> IResult<(&[u8], usize), (usize, bool), ErrorKind> {
let (i, byte) = try_parser! {
to_custom_error!(input, take_bits!(u8, 8), SubframeHeaderParser)
};
let is_valid = (byte >> 7) == 0;
let subframe_type = (byte >> 1) & 0b111111;
let has_wasted_bits = (byte & 0b01) == 1;
if is_valid {
IResult::Done(i, (subframe_type as usize, has_wasted_bits))
} else {
IResult::Error(Err::Position(
nom::ErrorKind::Custom(ErrorKind::InvalidSubframeHeader), input))
}
}
fn data<'a>(input: (&'a [u8], usize),
bits_per_sample: usize,
block_size: usize,
subframe_type: usize,
buffer: &mut [i32])
-> IResult<(&'a [u8], usize), subframe::Data, ErrorKind> {
match subframe_type {
0b000000 => constant(input, bits_per_sample)
.map_err(to_custom_error!(ConstantParser)),
0b000001 => verbatim(input, bits_per_sample, block_size)
.map_err(to_custom_error!(VerbatimParser)),
0b001000...0b001100 => fixed(input, subframe_type & 0b0111,
bits_per_sample, block_size, buffer)
.map_err(to_custom_error!(FixedParser)),
0b100000...0b111111 => lpc(input, (subframe_type & 0b011111) + 1,
bits_per_sample, block_size, buffer)
.map_err(to_custom_error!(LPCParser)),
_ => IResult::Error(Err::Position(
nom::ErrorKind::Custom(ErrorKind::Unknown),
input))
}
}
pub fn constant(input: (&[u8], usize), bits_per_sample: usize)
-> IResult<(&[u8], usize), subframe::Data> {
map!(input, take_signed_bits!(bits_per_sample), subframe::Data::Constant)
}
pub fn fixed<'a>(input: (&'a [u8], usize),
order: usize,
bits_per_sample: usize,
block_size: usize,
buffer: &mut [i32])
-> IResult<(&'a [u8], usize), subframe::Data> {
let mut warmup = [0; subframe::MAX_FIXED_ORDER];
chain!(input,
count_slice!(take_signed_bits!(bits_per_sample), &mut warmup[0..order]) ~
entropy_coding_method: apply!(residual, order, block_size, buffer),
|| {
subframe::Data::Fixed(subframe::Fixed {
entropy_coding_method: entropy_coding_method,
order: order as u8,
warmup: warmup,
residual: Vec::new(),
})
}
)
}
// This parser finds the bit length for each quantized linear predictor
// coefficient. To preven sync fooling, four bit value cant be all onces.
fn qlp_coefficient_precision(input: (&[u8], usize))
-> IResult<(&[u8], usize), u8> {
let (i, precision) = try_parse!(input, take_bits!(u8, 4));
if precision == 0b1111 {
IResult::Error(Err::Position(nom::ErrorKind::Digit, input))
} else {
IResult::Done(i, precision + 1)
}
}
pub fn lpc<'a>(input: (&'a [u8], usize),
order: usize,
bits_per_sample: usize,
block_size: usize,
buffer: &mut [i32])
-> IResult<(&'a [u8], usize), subframe::Data> {
let mut warmup = [0; subframe::MAX_LPC_ORDER];
let mut qlp_coefficients = [0; subframe::MAX_LPC_ORDER];
chain!(input,
count_slice!(take_signed_bits!(bits_per_sample), &mut warmup[0..order]) ~
qlp_coeff_precision: qlp_coefficient_precision ~
quantization_level: take_signed_bits!(i8, 5) ~
count_slice!(
take_signed_bits!(qlp_coeff_precision as usize),
&mut qlp_coefficients[0..order]
) ~
entropy_coding_method: apply!(residual, order, block_size, buffer),
|| {
subframe::Data::LPC(subframe::LPC {
entropy_coding_method: entropy_coding_method,
order: order as u8,
qlp_coeff_precision: qlp_coeff_precision,
quantization_level: quantization_level,
qlp_coefficients: qlp_coefficients,
warmup: warmup,
residual: Vec::new(),
})
}
)
}
pub fn verbatim(input: (&[u8], usize),
bits_per_sample: usize,
block_size: usize)
-> IResult<(&[u8], usize), subframe::Data> {
map!(input, count!(take_signed_bits!(bits_per_sample), block_size),
subframe::Data::Verbatim)
}
// Parser for figuring out the partitioned Rice coding, which there are only
// two, and the parser with fail when value is greater than one.
fn coding_method(input: (&[u8], usize))
-> IResult<(&[u8], usize), CodingMethod> {
let (i, method) = try_parse!(input, take_bits!(u8, 2));
match method {
0 => IResult::Done(i, CodingMethod::PartitionedRice),
1 => IResult::Done(i, CodingMethod::PartitionedRice2),
_ => IResult::Error(Err::Position(nom::ErrorKind::Alt, input)),
}
}
fn residual<'a>(input: (&'a [u8], usize),
predictor_order: usize,
block_size: usize,
buffer: &mut [i32])
-> IResult<(&'a [u8], usize), subframe::EntropyCodingMethod> {
let (i, data) = try_parse!(input,
pair!(coding_method, take_bits!(u32, 4)));
let (method, order) = data;
rice_partition(i, order, predictor_order, block_size, method, buffer)
}
fn rice_partition<'a>(input: (&'a [u8], usize),
partition_order: u32,
predictor_order: usize,
block_size: usize,
method: CodingMethod,
buffer: &mut [i32])
-> IResult<(&'a [u8], usize),
subframe::EntropyCodingMethod> {
let (param_size, escape_code) = match method {
CodingMethod::PartitionedRice => (4, 0b1111),
CodingMethod::PartitionedRice2 => (5, 0b11111),
};
// Adjust block size to not include allocation for warm up samples
let partitions = power_of_two(partition_order) as usize;
let residual = &mut buffer[predictor_order..];
let mut mut_input = input;
let mut sample = 0;
let mut contents = PartitionedRiceContents::new(partitions);
for partition in 0..partitions {
let offset = if partition_order == 0 {
block_size - predictor_order
} else if partition > 0 {
block_size / partitions
} else {
(block_size / partitions) - predictor_order
};
let start = sample;
let end = sample + offset;
let result = chain!(mut_input,
rice_parameter: take_bits!(u32, param_size) ~
size: cond!(rice_parameter == escape_code, take_bits!(usize, 5)) ~
apply!(residual_data,
size, rice_parameter,
&mut contents.raw_bits()[partition],
&mut residual[start..end]
),
|| { rice_parameter }
);
match result {
IResult::Done(i, parameter) => {
mut_input = i;
sample = end;
contents.parameters()[partition] = parameter;
}
IResult::Error(error) => return IResult::Error(error),
IResult::Incomplete(need) => return IResult::Incomplete(need),
}
}
let entropy_coding_method = subframe::EntropyCodingMethod {
method_type: method,
data: subframe::PartitionedRice {
order: partition_order,
contents: contents,
},
};
IResult::Done(mut_input, entropy_coding_method)
}
fn residual_data<'a>(input: (&'a [u8], usize),
option: Option<usize>,
rice_parameter: u32,
raw_bit: &mut u32,
samples: &mut [i32])
-> IResult<(&'a [u8], usize), ()> {
if let Some(size) = option {
unencoded_residuals(input, size, raw_bit, samples)
} else {
encoded_residuals(input, rice_parameter, raw_bit, samples)
}
}
fn unencoded_residuals<'a>(input: (&'a [u8], usize),
bits_per_sample: usize,
raw_bit: &mut u32,
samples: &mut [i32])
-> IResult<(&'a [u8], usize), ()> {
*raw_bit = bits_per_sample as u32;
count_slice!(input, take_signed_bits!(bits_per_sample), &mut samples[..])
}
fn encoded_residuals<'a>(input: (&'a [u8], usize),
parameter: u32,
raw_bit: &mut u32,
samples: &mut [i32])
-> IResult<(&'a [u8], usize), ()> {
let length = samples.len();
let modulus = power_of_two(parameter);
let mut count = 0;
let mut is_error = false;
let mut mut_input = input;
*raw_bit = 0;
for sample in samples {
let result = chain!(mut_input,
quotient: leading_zeros ~
// TODO: Figure out the varied remainder bit size
remainder: take_bits!(u32, parameter as usize),
|| {
let value = quotient * modulus + remainder;
((value as i32) >> 1) ^ -((value as i32) & 1)
});
match result {
IResult::Done(i, value) => {
mut_input = i;
count += 1;
*sample = value
}
IResult::Error(_) => {
is_error = true;
break;
}
IResult::Incomplete(_) => break,
}
}
if is_error {
IResult::Error(Err::Position(nom::ErrorKind::Count, input))
} else if count == length {
IResult::Done(mut_input, ())
} else {
IResult::Incomplete(Needed::Unknown)
}
}
#[cfg(test)]
mod tests {
use super::*;
use nom::{self, IResult, Err, Needed};
use frame::{self, ChannelAssignment, NumberType};
use utility::ErrorKind;
use subframe::{
Data,
Fixed, LPC,
EntropyCodingMethod, CodingMethod, PartitionedRice,
PartitionedRiceContents,
};
#[test]
fn test_leading_zeros() {
let inputs = [ (&[0b10000000][..], 0)
, (&[0b11000000][..], 1)
, (&[0b00000001][..], 0)
, (&[0b11111111][..], 7)
, (&[0b00000000, 0b10000000][..], 0)
, (&[0b10000000, 0b10000000][..], 1)
, (&[0b00000000, 0b00000001][..], 0)
, (&[0b11111110, 0b00000010][..], 7)
, (&[0b10101010, 0b00000000][..], 7)
];
let results = [ IResult::Done((&inputs[0].0[..], 1), 0)
, IResult::Done((&inputs[1].0[..], 2), 0)
, IResult::Done((&[][..], 0), 7)
, IResult::Done((&[][..], 0), 0)
, IResult::Done((&inputs[4].0[1..], 1), 8)
, IResult::Done((&inputs[5].0[1..], 1), 7)
, IResult::Done((&[][..], 0), 15)
, IResult::Done((&inputs[7].0[1..], 7), 7)
, IResult::Incomplete(Needed::Size(3))
];
assert_eq!(leading_zeros(inputs[0]), results[0]);
assert_eq!(leading_zeros(inputs[1]), results[1]);
assert_eq!(leading_zeros(inputs[2]), results[2]);
assert_eq!(leading_zeros(inputs[3]), results[3]);
assert_eq!(leading_zeros(inputs[4]), results[4]);
assert_eq!(leading_zeros(inputs[5]), results[5]);
assert_eq!(leading_zeros(inputs[6]), results[6]);
assert_eq!(leading_zeros(inputs[7]), results[7]);
assert_eq!(leading_zeros(inputs[8]), results[8]);
}
#[test]
fn test_header() {
let inputs = [ (&[0b01010100][..], 0)
, (&[0b00011111][..], 0)
, (&[0b00000000][..], 0)
, (&[0b10000000][..], 0)
];
let results = [ IResult::Done((&[][..], 0), (0b101010, false))
, IResult::Done((&[][..], 0), (0b001111, true))
, IResult::Done((&[][..], 0), (0b000000, false))
, IResult::Error(Err::Position(
nom::ErrorKind::Custom(
ErrorKind::InvalidSubframeHeader), inputs[3]))
];
assert_eq!(header(inputs[0]), results[0]);
assert_eq!(header(inputs[1]), results[1]);
assert_eq!(header(inputs[2]), results[2]);
assert_eq!(header(inputs[3]), results[3]);
}
#[test]
fn test_adjust_bits_per_sample() {
let mut frame_header = frame::Header {
block_size: 512,
sample_rate: 41000,
channels: 2,
channel_assignment: ChannelAssignment::Independent,
bits_per_sample: 16,
number: NumberType::Sample(40),
crc: 0xc4,
};
assert_eq!(adjust_bits_per_sample(&frame_header, 0), 16);
assert_eq!(adjust_bits_per_sample(&frame_header, 1), 16);
frame_header.channel_assignment = ChannelAssignment::LeftSide;
assert_eq!(adjust_bits_per_sample(&frame_header, 0), 16);
assert_eq!(adjust_bits_per_sample(&frame_header, 1), 17);
frame_header.channel_assignment = ChannelAssignment::RightSide;
assert_eq!(adjust_bits_per_sample(&frame_header, 0), 17);
assert_eq!(adjust_bits_per_sample(&frame_header, 1), 16);
frame_header.channel_assignment = ChannelAssignment::MidpointSide;
assert_eq!(adjust_bits_per_sample(&frame_header, 0), 16);
assert_eq!(adjust_bits_per_sample(&frame_header, 1), 17);
}
#[test]
fn test_constant() {
let inputs = [ (&b"\0\x80"[..], 0)
, (&b"\x18"[..], 3)
];
let results = [ IResult::Done((&[][..], 0), Data::Constant(128))
, IResult::Done((&[][..], 0), Data::Constant(-8))
];
assert_eq!(constant(inputs[0], 16), results[0]);
assert_eq!(constant(inputs[1], 5), results[1]);
}
#[test]
fn test_verbatim() {
let inputs = [ (&b"\xff\x80\0\x0a\xff\x65\0\0\x04\x28\xff\x28\
\0\0\xff\xe7"[..], 0)
, (&b"\xe2\x81\x07\x80\x89"[..], 0)
];
let results = [ IResult::Done((&[][..], 0), Data::Verbatim(vec![
-128, 10, -155, 0, 1064, -216, 0, -25]))
, IResult::Done((&[][..], 0), Data::Verbatim(vec![
-4, 10, 0, -16, 15, 0, 4, 9]))
];
assert_eq!(verbatim(inputs[0], 16, 8), results[0]);
assert_eq!(verbatim(inputs[1], 5, 8), results[1]);
}
#[test]
fn test_fixed() {
let inputs = [ (&b"\xe8\0\x40\xaf\x02\x01\x04\x80\x42\x92\x84\x65\
\x64"[..], 0)
, (&b"\xf5\x47\xf0\xff\xdc\0\x42\0\x8e\xf9\xbc\x08\x08\
\x10"[..], 0)
];
let results = [ IResult::Done((&[][..], 0), Data::Fixed(Fixed {
entropy_coding_method: EntropyCodingMethod {
method_type: CodingMethod::PartitionedRice,
data: PartitionedRice {
order: 0,
contents: PartitionedRiceContents {
capacity: 1,
data: vec![8, 0],
},
},
},
order: 4,
warmup: [-24, 0, 64, -81],
residual: Vec::new(),
}))
, IResult::Done((&[][..], 0), Data::Fixed(Fixed {
entropy_coding_method: EntropyCodingMethod {
method_type: CodingMethod::PartitionedRice2,
data: PartitionedRice {
order: 1,
contents: PartitionedRiceContents {
capacity: 2,
data: vec![31, 31, 16, 6],
},
},
},
order: 2,
warmup: [-1, 5, 0, 0],
residual: Vec::new(),
}))
];
let mut buffer = [0; 10];
let residuals = [ &[642, 0, 5, 148, -141, 178][..]
, &[-36, 66, 142, -4, 2, 0, -32, 16][..]
];
assert_eq!(fixed(inputs[0], 4, 8, 10, &mut buffer), results[0]);
assert_eq!(&buffer[4..10], residuals[0]);
assert_eq!(fixed(inputs[1], 2, 4, 10, &mut buffer), results[1]);
assert_eq!(&buffer[2..10], residuals[1]);
}
#[test]
fn test_lpc() {
let inputs = [ (&b"\xe8\0\x40\xaf\x74\x73\x19\0\x75\x81\xe8\x16\0\x05\
\x18\xef\x36"[..], 0)
, (&b"\x84\x01\xb6\xc2\x37\xf9\xd3\x82\x4a\xa2\x3b\xe9\xfc\
\x2b\x66\xea\x36\xcb\x85\x72\xc5\x13\x14\xed\x1b\
\x3f"[..], 0)
];
let slice = (&[27, 63][..], 2);
let results = [ IResult::Done((&[][..], 0), Data::LPC(LPC {
entropy_coding_method: EntropyCodingMethod {
method_type: CodingMethod::PartitionedRice,
data: PartitionedRice {
order: 0,
contents: PartitionedRiceContents {
capacity: 1,
data: vec![15, 8],
},
},
},
order: 4,
warmup: [ -24, 0, 64, -81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0
],
qlp_coeff_precision: 8,
quantization_level: 8,
qlp_coefficients: [ -26, 50, 0, -21, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0, 0
],
residual: Vec::new(),
}))
, IResult::Done(slice, Data::LPC(LPC {
entropy_coding_method: EntropyCodingMethod {
method_type: CodingMethod::PartitionedRice2,
data: PartitionedRice {
order: 1,
contents: PartitionedRiceContents {
capacity: 2,
data: vec![3, 5, 0, 0],
},
},
},
order: 8,
warmup: [ -8, 4, 0, 1, -5, 6, -4, 2, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0
],
qlp_coeff_precision: 4,
quantization_level: 15,
qlp_coefficients: [ -1, 3, -6, 7, 0, 4, -7, 5, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0, 0
],
residual: Vec::new(),
}))
];
let mut buffer = [0; 26];
let residuals = [ &[22, 0, 5, 24, -17, 54][..],
&[ -2, 3, -1, -4, 2, 27, -28, 20, 11, 9, 12, -22, -3, 1
, 1, -25, -20, 26
][..]
];
assert_eq!(lpc(inputs[0], 4, 8, 10, &mut buffer), results[0]);
assert_eq!(&buffer[4..10], residuals[0]);
assert_eq!(lpc(inputs[1], 8, 4, 26, &mut buffer), results[1]);
assert_eq!(&buffer[8..26], residuals[1]);
}
}
Change constant with new to_custom_error interface
use nom::{self, IResult, Err, Needed};
use frame::{self, ChannelAssignment};
use subframe::{self, Subframe, CodingMethod, PartitionedRiceContents};
use utility::{ErrorKind, power_of_two};
// Parser used to parse unary notation. Naming the parser `leading_zeros`
// was something that felt more clear in the code. It actually tells the
// caller what the parser doing considering unary notation can -- and more
// commonly -- be leading ones.
pub fn leading_zeros(input: (&[u8], usize)) -> IResult<(&[u8], usize), u32> {
let (bytes, mut offset) = input;
let mut index = 0;
let mut count = 0;
let mut is_parsed = false;
let bytes_len = bytes.len();
for i in 0..bytes_len {
// Clear the number of offset bits
let byte = bytes[i] << offset;
let zeros = byte.leading_zeros() as usize;
index = i;
if byte > 0 {
is_parsed = true;
count += zeros;
offset += zeros + 1;
if offset >= 8 {
index += 1;
offset -= 8;
}
break;
} else {
count += zeros - offset;
offset = 0;
}
}
if is_parsed {
IResult::Done((&bytes[index..], offset), count as u32)
} else if index + 2 > bytes_len {
IResult::Incomplete(Needed::Size(index + 2))
} else {
IResult::Error(Err::Position(nom::ErrorKind::TakeUntil, (bytes, offset)))
}
}
// The channel's bits per sample that gets adjusted are the side channels
// for `LeftSide`, `MidpointSide`, and `RightSide`. The `Independent`
// channel assignment doesn't get adjust on any of the channels.
pub fn adjust_bits_per_sample(frame_header: &frame::Header,
channel: usize)
-> usize {
let bits_per_sample = frame_header.bits_per_sample;
match frame_header.channel_assignment {
ChannelAssignment::Independent => bits_per_sample,
ChannelAssignment::LeftSide |
ChannelAssignment::MidpointSide => {
if channel == 1 {
bits_per_sample + 1
} else {
bits_per_sample
}
}
ChannelAssignment::RightSide => {
if channel == 0 {
bits_per_sample + 1
} else {
bits_per_sample
}
}
}
}
/// Parse a single channel of audio data.
pub fn subframe_parser<'a>(input: (&'a [u8], usize),
frame_header: &frame::Header,
channel: &mut usize,
buffer: &mut [i32])
-> IResult<(&'a [u8], usize), Subframe,
ErrorKind> {
let block_size = frame_header.block_size as usize;
let bits_per_sample = adjust_bits_per_sample(frame_header, *channel);
let start = *channel * block_size;
let end = (*channel + 1) * block_size;
let buffer_slice = &mut buffer[start..end];
chain!(input,
subframe_header: header ~
wasted_bits: map!(
cond!(subframe_header.1,
to_custom_error!(leading_zeros, LeadingZerosParser)),
|option: Option<u32>| option.map_or(0, |zeros| zeros + 1)
) ~
subframe_data: apply!(data,
bits_per_sample - (wasted_bits as usize),
block_size, subframe_header.0,
buffer_slice),
|| {
// Iterate over the current channel being parsed. This probably should
// be abstracted away, but for now this is the solution.
*channel += 1;
Subframe {
data: subframe_data,
wasted_bits: wasted_bits,
}
}
)
}
// Parses the first byte of the subframe. The first bit must be zero to
// prevent sync-fooling, next six bits determines the subframe data type.
// Last bit is is there is wasted bits per sample, value one being true.
pub fn header(input: (&[u8], usize))
-> IResult<(&[u8], usize), (usize, bool), ErrorKind> {
let (i, byte) = try_parser! {
to_custom_error!(input, take_bits!(u8, 8), SubframeHeaderParser)
};
let is_valid = (byte >> 7) == 0;
let subframe_type = (byte >> 1) & 0b111111;
let has_wasted_bits = (byte & 0b01) == 1;
if is_valid {
IResult::Done(i, (subframe_type as usize, has_wasted_bits))
} else {
IResult::Error(Err::Position(
nom::ErrorKind::Custom(ErrorKind::InvalidSubframeHeader), input))
}
}
fn data<'a>(input: (&'a [u8], usize),
bits_per_sample: usize,
block_size: usize,
subframe_type: usize,
buffer: &mut [i32])
-> IResult<(&'a [u8], usize), subframe::Data, ErrorKind> {
match subframe_type {
0b000000 => constant(input, bits_per_sample),
0b000001 => verbatim(input, bits_per_sample, block_size)
.map_err(to_custom_error!(VerbatimParser)),
0b001000...0b001100 => fixed(input, subframe_type & 0b0111,
bits_per_sample, block_size, buffer)
.map_err(to_custom_error!(FixedParser)),
0b100000...0b111111 => lpc(input, (subframe_type & 0b011111) + 1,
bits_per_sample, block_size, buffer)
.map_err(to_custom_error!(LPCParser)),
_ => IResult::Error(Err::Position(
nom::ErrorKind::Custom(ErrorKind::Unknown),
input))
}
}
pub fn constant(input: (&[u8], usize), bits_per_sample: usize)
-> IResult<(&[u8], usize), subframe::Data, ErrorKind> {
to_custom_error!(input,
map!(take_signed_bits!(bits_per_sample), subframe::Data::Constant),
ConstantParser)
}
pub fn fixed<'a>(input: (&'a [u8], usize),
order: usize,
bits_per_sample: usize,
block_size: usize,
buffer: &mut [i32])
-> IResult<(&'a [u8], usize), subframe::Data> {
let mut warmup = [0; subframe::MAX_FIXED_ORDER];
chain!(input,
count_slice!(take_signed_bits!(bits_per_sample), &mut warmup[0..order]) ~
entropy_coding_method: apply!(residual, order, block_size, buffer),
|| {
subframe::Data::Fixed(subframe::Fixed {
entropy_coding_method: entropy_coding_method,
order: order as u8,
warmup: warmup,
residual: Vec::new(),
})
}
)
}
// This parser finds the bit length for each quantized linear predictor
// coefficient. To preven sync fooling, four bit value cant be all onces.
fn qlp_coefficient_precision(input: (&[u8], usize))
-> IResult<(&[u8], usize), u8> {
let (i, precision) = try_parse!(input, take_bits!(u8, 4));
if precision == 0b1111 {
IResult::Error(Err::Position(nom::ErrorKind::Digit, input))
} else {
IResult::Done(i, precision + 1)
}
}
pub fn lpc<'a>(input: (&'a [u8], usize),
order: usize,
bits_per_sample: usize,
block_size: usize,
buffer: &mut [i32])
-> IResult<(&'a [u8], usize), subframe::Data> {
let mut warmup = [0; subframe::MAX_LPC_ORDER];
let mut qlp_coefficients = [0; subframe::MAX_LPC_ORDER];
chain!(input,
count_slice!(take_signed_bits!(bits_per_sample), &mut warmup[0..order]) ~
qlp_coeff_precision: qlp_coefficient_precision ~
quantization_level: take_signed_bits!(i8, 5) ~
count_slice!(
take_signed_bits!(qlp_coeff_precision as usize),
&mut qlp_coefficients[0..order]
) ~
entropy_coding_method: apply!(residual, order, block_size, buffer),
|| {
subframe::Data::LPC(subframe::LPC {
entropy_coding_method: entropy_coding_method,
order: order as u8,
qlp_coeff_precision: qlp_coeff_precision,
quantization_level: quantization_level,
qlp_coefficients: qlp_coefficients,
warmup: warmup,
residual: Vec::new(),
})
}
)
}
pub fn verbatim(input: (&[u8], usize),
bits_per_sample: usize,
block_size: usize)
-> IResult<(&[u8], usize), subframe::Data> {
map!(input, count!(take_signed_bits!(bits_per_sample), block_size),
subframe::Data::Verbatim)
}
// Parser for figuring out the partitioned Rice coding, which there are only
// two, and the parser with fail when value is greater than one.
fn coding_method(input: (&[u8], usize))
-> IResult<(&[u8], usize), CodingMethod> {
let (i, method) = try_parse!(input, take_bits!(u8, 2));
match method {
0 => IResult::Done(i, CodingMethod::PartitionedRice),
1 => IResult::Done(i, CodingMethod::PartitionedRice2),
_ => IResult::Error(Err::Position(nom::ErrorKind::Alt, input)),
}
}
fn residual<'a>(input: (&'a [u8], usize),
predictor_order: usize,
block_size: usize,
buffer: &mut [i32])
-> IResult<(&'a [u8], usize), subframe::EntropyCodingMethod> {
let (i, data) = try_parse!(input,
pair!(coding_method, take_bits!(u32, 4)));
let (method, order) = data;
rice_partition(i, order, predictor_order, block_size, method, buffer)
}
fn rice_partition<'a>(input: (&'a [u8], usize),
partition_order: u32,
predictor_order: usize,
block_size: usize,
method: CodingMethod,
buffer: &mut [i32])
-> IResult<(&'a [u8], usize),
subframe::EntropyCodingMethod> {
let (param_size, escape_code) = match method {
CodingMethod::PartitionedRice => (4, 0b1111),
CodingMethod::PartitionedRice2 => (5, 0b11111),
};
// Adjust block size to not include allocation for warm up samples
let partitions = power_of_two(partition_order) as usize;
let residual = &mut buffer[predictor_order..];
let mut mut_input = input;
let mut sample = 0;
let mut contents = PartitionedRiceContents::new(partitions);
for partition in 0..partitions {
let offset = if partition_order == 0 {
block_size - predictor_order
} else if partition > 0 {
block_size / partitions
} else {
(block_size / partitions) - predictor_order
};
let start = sample;
let end = sample + offset;
let result = chain!(mut_input,
rice_parameter: take_bits!(u32, param_size) ~
size: cond!(rice_parameter == escape_code, take_bits!(usize, 5)) ~
apply!(residual_data,
size, rice_parameter,
&mut contents.raw_bits()[partition],
&mut residual[start..end]
),
|| { rice_parameter }
);
match result {
IResult::Done(i, parameter) => {
mut_input = i;
sample = end;
contents.parameters()[partition] = parameter;
}
IResult::Error(error) => return IResult::Error(error),
IResult::Incomplete(need) => return IResult::Incomplete(need),
}
}
let entropy_coding_method = subframe::EntropyCodingMethod {
method_type: method,
data: subframe::PartitionedRice {
order: partition_order,
contents: contents,
},
};
IResult::Done(mut_input, entropy_coding_method)
}
fn residual_data<'a>(input: (&'a [u8], usize),
option: Option<usize>,
rice_parameter: u32,
raw_bit: &mut u32,
samples: &mut [i32])
-> IResult<(&'a [u8], usize), ()> {
if let Some(size) = option {
unencoded_residuals(input, size, raw_bit, samples)
} else {
encoded_residuals(input, rice_parameter, raw_bit, samples)
}
}
fn unencoded_residuals<'a>(input: (&'a [u8], usize),
bits_per_sample: usize,
raw_bit: &mut u32,
samples: &mut [i32])
-> IResult<(&'a [u8], usize), ()> {
*raw_bit = bits_per_sample as u32;
count_slice!(input, take_signed_bits!(bits_per_sample), &mut samples[..])
}
fn encoded_residuals<'a>(input: (&'a [u8], usize),
parameter: u32,
raw_bit: &mut u32,
samples: &mut [i32])
-> IResult<(&'a [u8], usize), ()> {
let length = samples.len();
let modulus = power_of_two(parameter);
let mut count = 0;
let mut is_error = false;
let mut mut_input = input;
*raw_bit = 0;
for sample in samples {
let result = chain!(mut_input,
quotient: leading_zeros ~
// TODO: Figure out the varied remainder bit size
remainder: take_bits!(u32, parameter as usize),
|| {
let value = quotient * modulus + remainder;
((value as i32) >> 1) ^ -((value as i32) & 1)
});
match result {
IResult::Done(i, value) => {
mut_input = i;
count += 1;
*sample = value
}
IResult::Error(_) => {
is_error = true;
break;
}
IResult::Incomplete(_) => break,
}
}
if is_error {
IResult::Error(Err::Position(nom::ErrorKind::Count, input))
} else if count == length {
IResult::Done(mut_input, ())
} else {
IResult::Incomplete(Needed::Unknown)
}
}
#[cfg(test)]
mod tests {
use super::*;
use nom::{self, IResult, Err, Needed};
use frame::{self, ChannelAssignment, NumberType};
use utility::ErrorKind;
use subframe::{
Data,
Fixed, LPC,
EntropyCodingMethod, CodingMethod, PartitionedRice,
PartitionedRiceContents,
};
#[test]
fn test_leading_zeros() {
let inputs = [ (&[0b10000000][..], 0)
, (&[0b11000000][..], 1)
, (&[0b00000001][..], 0)
, (&[0b11111111][..], 7)
, (&[0b00000000, 0b10000000][..], 0)
, (&[0b10000000, 0b10000000][..], 1)
, (&[0b00000000, 0b00000001][..], 0)
, (&[0b11111110, 0b00000010][..], 7)
, (&[0b10101010, 0b00000000][..], 7)
];
let results = [ IResult::Done((&inputs[0].0[..], 1), 0)
, IResult::Done((&inputs[1].0[..], 2), 0)
, IResult::Done((&[][..], 0), 7)
, IResult::Done((&[][..], 0), 0)
, IResult::Done((&inputs[4].0[1..], 1), 8)
, IResult::Done((&inputs[5].0[1..], 1), 7)
, IResult::Done((&[][..], 0), 15)
, IResult::Done((&inputs[7].0[1..], 7), 7)
, IResult::Incomplete(Needed::Size(3))
];
assert_eq!(leading_zeros(inputs[0]), results[0]);
assert_eq!(leading_zeros(inputs[1]), results[1]);
assert_eq!(leading_zeros(inputs[2]), results[2]);
assert_eq!(leading_zeros(inputs[3]), results[3]);
assert_eq!(leading_zeros(inputs[4]), results[4]);
assert_eq!(leading_zeros(inputs[5]), results[5]);
assert_eq!(leading_zeros(inputs[6]), results[6]);
assert_eq!(leading_zeros(inputs[7]), results[7]);
assert_eq!(leading_zeros(inputs[8]), results[8]);
}
#[test]
fn test_header() {
let inputs = [ (&[0b01010100][..], 0)
, (&[0b00011111][..], 0)
, (&[0b00000000][..], 0)
, (&[0b10000000][..], 0)
];
let results = [ IResult::Done((&[][..], 0), (0b101010, false))
, IResult::Done((&[][..], 0), (0b001111, true))
, IResult::Done((&[][..], 0), (0b000000, false))
, IResult::Error(Err::Position(
nom::ErrorKind::Custom(
ErrorKind::InvalidSubframeHeader), inputs[3]))
];
assert_eq!(header(inputs[0]), results[0]);
assert_eq!(header(inputs[1]), results[1]);
assert_eq!(header(inputs[2]), results[2]);
assert_eq!(header(inputs[3]), results[3]);
}
#[test]
fn test_adjust_bits_per_sample() {
let mut frame_header = frame::Header {
block_size: 512,
sample_rate: 41000,
channels: 2,
channel_assignment: ChannelAssignment::Independent,
bits_per_sample: 16,
number: NumberType::Sample(40),
crc: 0xc4,
};
assert_eq!(adjust_bits_per_sample(&frame_header, 0), 16);
assert_eq!(adjust_bits_per_sample(&frame_header, 1), 16);
frame_header.channel_assignment = ChannelAssignment::LeftSide;
assert_eq!(adjust_bits_per_sample(&frame_header, 0), 16);
assert_eq!(adjust_bits_per_sample(&frame_header, 1), 17);
frame_header.channel_assignment = ChannelAssignment::RightSide;
assert_eq!(adjust_bits_per_sample(&frame_header, 0), 17);
assert_eq!(adjust_bits_per_sample(&frame_header, 1), 16);
frame_header.channel_assignment = ChannelAssignment::MidpointSide;
assert_eq!(adjust_bits_per_sample(&frame_header, 0), 16);
assert_eq!(adjust_bits_per_sample(&frame_header, 1), 17);
}
#[test]
fn test_constant() {
let inputs = [ (&b"\0\x80"[..], 0)
, (&b"\x18"[..], 3)
];
let results = [ IResult::Done((&[][..], 0), Data::Constant(128))
, IResult::Done((&[][..], 0), Data::Constant(-8))
];
assert_eq!(constant(inputs[0], 16), results[0]);
assert_eq!(constant(inputs[1], 5), results[1]);
}
#[test]
fn test_verbatim() {
let inputs = [ (&b"\xff\x80\0\x0a\xff\x65\0\0\x04\x28\xff\x28\
\0\0\xff\xe7"[..], 0)
, (&b"\xe2\x81\x07\x80\x89"[..], 0)
];
let results = [ IResult::Done((&[][..], 0), Data::Verbatim(vec![
-128, 10, -155, 0, 1064, -216, 0, -25]))
, IResult::Done((&[][..], 0), Data::Verbatim(vec![
-4, 10, 0, -16, 15, 0, 4, 9]))
];
assert_eq!(verbatim(inputs[0], 16, 8), results[0]);
assert_eq!(verbatim(inputs[1], 5, 8), results[1]);
}
#[test]
fn test_fixed() {
let inputs = [ (&b"\xe8\0\x40\xaf\x02\x01\x04\x80\x42\x92\x84\x65\
\x64"[..], 0)
, (&b"\xf5\x47\xf0\xff\xdc\0\x42\0\x8e\xf9\xbc\x08\x08\
\x10"[..], 0)
];
let results = [ IResult::Done((&[][..], 0), Data::Fixed(Fixed {
entropy_coding_method: EntropyCodingMethod {
method_type: CodingMethod::PartitionedRice,
data: PartitionedRice {
order: 0,
contents: PartitionedRiceContents {
capacity: 1,
data: vec![8, 0],
},
},
},
order: 4,
warmup: [-24, 0, 64, -81],
residual: Vec::new(),
}))
, IResult::Done((&[][..], 0), Data::Fixed(Fixed {
entropy_coding_method: EntropyCodingMethod {
method_type: CodingMethod::PartitionedRice2,
data: PartitionedRice {
order: 1,
contents: PartitionedRiceContents {
capacity: 2,
data: vec![31, 31, 16, 6],
},
},
},
order: 2,
warmup: [-1, 5, 0, 0],
residual: Vec::new(),
}))
];
let mut buffer = [0; 10];
let residuals = [ &[642, 0, 5, 148, -141, 178][..]
, &[-36, 66, 142, -4, 2, 0, -32, 16][..]
];
assert_eq!(fixed(inputs[0], 4, 8, 10, &mut buffer), results[0]);
assert_eq!(&buffer[4..10], residuals[0]);
assert_eq!(fixed(inputs[1], 2, 4, 10, &mut buffer), results[1]);
assert_eq!(&buffer[2..10], residuals[1]);
}
#[test]
fn test_lpc() {
let inputs = [ (&b"\xe8\0\x40\xaf\x74\x73\x19\0\x75\x81\xe8\x16\0\x05\
\x18\xef\x36"[..], 0)
, (&b"\x84\x01\xb6\xc2\x37\xf9\xd3\x82\x4a\xa2\x3b\xe9\xfc\
\x2b\x66\xea\x36\xcb\x85\x72\xc5\x13\x14\xed\x1b\
\x3f"[..], 0)
];
let slice = (&[27, 63][..], 2);
let results = [ IResult::Done((&[][..], 0), Data::LPC(LPC {
entropy_coding_method: EntropyCodingMethod {
method_type: CodingMethod::PartitionedRice,
data: PartitionedRice {
order: 0,
contents: PartitionedRiceContents {
capacity: 1,
data: vec![15, 8],
},
},
},
order: 4,
warmup: [ -24, 0, 64, -81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0
],
qlp_coeff_precision: 8,
quantization_level: 8,
qlp_coefficients: [ -26, 50, 0, -21, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0, 0
],
residual: Vec::new(),
}))
, IResult::Done(slice, Data::LPC(LPC {
entropy_coding_method: EntropyCodingMethod {
method_type: CodingMethod::PartitionedRice2,
data: PartitionedRice {
order: 1,
contents: PartitionedRiceContents {
capacity: 2,
data: vec![3, 5, 0, 0],
},
},
},
order: 8,
warmup: [ -8, 4, 0, 1, -5, 6, -4, 2, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0
],
qlp_coeff_precision: 4,
quantization_level: 15,
qlp_coefficients: [ -1, 3, -6, 7, 0, 4, -7, 5, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0, 0, 0, 0, 0
],
residual: Vec::new(),
}))
];
let mut buffer = [0; 26];
let residuals = [ &[22, 0, 5, 24, -17, 54][..],
&[ -2, 3, -1, -4, 2, 27, -28, 20, 11, 9, 12, -22, -3, 1
, 1, -25, -20, 26
][..]
];
assert_eq!(lpc(inputs[0], 4, 8, 10, &mut buffer), results[0]);
assert_eq!(&buffer[4..10], residuals[0]);
assert_eq!(lpc(inputs[1], 8, 4, 26, &mut buffer), results[1]);
assert_eq!(&buffer[8..26], residuals[1]);
}
}
|
use alloc::sync::Arc;
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::alloc::{GlobalAlloc, Layout};
use core::{intrinsics, mem};
use core::ops::DerefMut;
use spin::Mutex;
use crate::context::file::FileDescriptor;
use crate::context::{ContextId, WaitpidKey};
use crate::context;
#[cfg(not(feature="doc"))]
use crate::elf::{self, program_header};
use crate::interrupt;
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::memory::allocate_frames;
use crate::paging::entry::EntryFlags;
use crate::paging::mapper::MapperFlushAll;
use crate::paging::temporary_page::TemporaryPage;
use crate::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, PAGE_SIZE};
use crate::{ptrace, syscall};
use crate::scheme::FileHandle;
use crate::start::usermode;
use crate::syscall::data::{SigAction, Stat};
use crate::syscall::error::*;
use crate::syscall::flag::{wifcontinued, wifstopped, AT_ENTRY, AT_NULL, AT_PHDR, CloneFlags,
CLONE_FILES, CLONE_FS, CLONE_SIGHAND, CLONE_STACK, CLONE_VFORK, CLONE_VM,
MapFlags, PROT_EXEC, PROT_READ, PROT_WRITE, PTRACE_EVENT_CLONE,
PTRACE_STOP_EXIT, SigActionFlags, SIG_BLOCK, SIG_DFL, SIG_SETMASK, SIG_UNBLOCK,
SIGCONT, SIGTERM, WaitFlags, WCONTINUED, WNOHANG, WUNTRACED};
use crate::syscall::ptrace_event;
use crate::syscall::validate::{validate_slice, validate_slice_mut};
pub fn brk(address: usize) -> Result<usize> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
//println!("{}: {}: BRK {:X}", unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) },
// context.id.into(), address);
let current = if let Some(ref heap_shared) = context.heap {
heap_shared.with(|heap| {
heap.start_address().get() + heap.size()
})
} else {
panic!("user heap not initialized");
};
if address == 0 {
//println!("Brk query {:X}", current);
Ok(current)
} else if address >= crate::USER_HEAP_OFFSET {
//TODO: out of memory errors
if let Some(ref heap_shared) = context.heap {
heap_shared.with(|heap| {
heap.resize(address - crate::USER_HEAP_OFFSET, true);
});
} else {
panic!("user heap not initialized");
}
//println!("Brk resize {:X}", address);
Ok(address)
} else {
//println!("Brk no mem");
Err(Error::new(ENOMEM))
}
}
pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
let ppid;
let pid;
{
let pgid;
let ruid;
let rgid;
let rns;
let euid;
let egid;
let ens;
let umask;
let sigmask;
let cpu_id_opt = None;
let arch;
let vfork;
let mut kfx_opt = None;
let mut kstack_opt = None;
let mut offset = 0;
let mut image = vec![];
let mut heap_opt = None;
let mut stack_opt = None;
let mut sigstack_opt = None;
let mut tls_opt = None;
let grants;
let name;
let cwd;
let files;
let actions;
// Copy from old process
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
ppid = context.id;
pgid = context.pgid;
ruid = context.ruid;
rgid = context.rgid;
rns = context.rns;
euid = context.euid;
egid = context.egid;
ens = context.ens;
sigmask = context.sigmask;
umask = context.umask;
// Uncomment to disable threads on different CPUs
// if flags.contains(CLONE_VM) {
// cpu_id_opt = context.cpu_id;
// }
arch = context.arch.clone();
if let Some(ref fx) = context.kfx {
let mut new_fx = unsafe { Box::from_raw(crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) };
for (new_b, b) in new_fx.iter_mut().zip(fx.iter()) {
*new_b = *b;
}
kfx_opt = Some(new_fx);
}
if let Some(ref stack) = context.kstack {
// Get the relative offset to the return address of the function
// obtaining `stack_base`.
//
// (base pointer - start of stack) - one
offset = stack_base - stack.as_ptr() as usize - mem::size_of::<usize>(); // Add clone ret
let mut new_stack = stack.clone();
unsafe {
// Set clone's return value to zero. This is done because
// the clone won't return like normal, which means the value
// would otherwise never get set.
if let Some(regs) = ptrace::rebase_regs_ptr_mut(context.regs, Some(&mut new_stack)) {
(*regs).scratch.rax = 0;
}
// Change the return address of the child (previously
// syscall) to the arch-specific clone_ret callback
let func_ptr = new_stack.as_mut_ptr().add(offset);
*(func_ptr as *mut usize) = interrupt::syscall::clone_ret as usize;
}
kstack_opt = Some(new_stack);
}
if flags.contains(CLONE_VM) {
for memory_shared in context.image.iter() {
image.push(memory_shared.clone());
}
if let Some(ref heap_shared) = context.heap {
heap_opt = Some(heap_shared.clone());
}
} else {
for memory_shared in context.image.iter() {
memory_shared.with(|memory| {
let mut new_memory = context::memory::Memory::new(
VirtualAddress::new(memory.start_address().get() + crate::USER_TMP_OFFSET),
memory.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
false
);
unsafe {
intrinsics::copy(memory.start_address().get() as *const u8,
new_memory.start_address().get() as *mut u8,
memory.size());
}
new_memory.remap(memory.flags());
image.push(new_memory.to_shared());
});
}
if let Some(ref heap_shared) = context.heap {
heap_shared.with(|heap| {
let mut new_heap = context::memory::Memory::new(
VirtualAddress::new(crate::USER_TMP_HEAP_OFFSET),
heap.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
false
);
unsafe {
intrinsics::copy(heap.start_address().get() as *const u8,
new_heap.start_address().get() as *mut u8,
heap.size());
}
new_heap.remap(heap.flags());
heap_opt = Some(new_heap.to_shared());
});
}
}
if let Some(ref stack_shared) = context.stack {
if flags.contains(CLONE_STACK) {
stack_opt = Some(stack_shared.clone());
} else {
stack_shared.with(|stack| {
let mut new_stack = context::memory::Memory::new(
VirtualAddress::new(crate::USER_TMP_STACK_OFFSET),
stack.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
false
);
unsafe {
intrinsics::copy(stack.start_address().get() as *const u8,
new_stack.start_address().get() as *mut u8,
stack.size());
}
new_stack.remap(stack.flags());
stack_opt = Some(new_stack.to_shared());
});
}
}
if let Some(ref sigstack) = context.sigstack {
let mut new_sigstack = context::memory::Memory::new(
VirtualAddress::new(crate::USER_TMP_SIGSTACK_OFFSET),
sigstack.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
false
);
unsafe {
intrinsics::copy(sigstack.start_address().get() as *const u8,
new_sigstack.start_address().get() as *mut u8,
sigstack.size());
}
new_sigstack.remap(sigstack.flags());
sigstack_opt = Some(new_sigstack);
}
if let Some(ref tls) = context.tls {
let mut new_tls = context::memory::Tls {
master: tls.master,
file_size: tls.file_size,
mem: context::memory::Memory::new(
VirtualAddress::new(crate::USER_TMP_TLS_OFFSET),
tls.mem.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
true
),
offset: tls.offset,
};
if flags.contains(CLONE_VM) {
unsafe {
new_tls.load();
}
} else {
unsafe {
intrinsics::copy(tls.mem.start_address().get() as *const u8,
new_tls.mem.start_address().get() as *mut u8,
tls.mem.size());
}
}
new_tls.mem.remap(tls.mem.flags());
tls_opt = Some(new_tls);
}
if flags.contains(CLONE_VM) {
grants = Arc::clone(&context.grants);
} else {
let mut grants_vec = Vec::new();
for grant in context.grants.lock().iter() {
let start = VirtualAddress::new(grant.start_address().get() + crate::USER_TMP_GRANT_OFFSET - crate::USER_GRANT_OFFSET);
grants_vec.push(grant.secret_clone(start));
}
grants = Arc::new(Mutex::new(grants_vec));
}
if flags.contains(CLONE_VM) {
name = Arc::clone(&context.name);
} else {
name = Arc::new(Mutex::new(context.name.lock().clone()));
}
if flags.contains(CLONE_FS) {
cwd = Arc::clone(&context.cwd);
} else {
cwd = Arc::new(Mutex::new(context.cwd.lock().clone()));
}
if flags.contains(CLONE_FILES) {
files = Arc::clone(&context.files);
} else {
files = Arc::new(Mutex::new(context.files.lock().clone()));
}
if flags.contains(CLONE_SIGHAND) {
actions = Arc::clone(&context.actions);
} else {
actions = Arc::new(Mutex::new(context.actions.lock().clone()));
}
}
// If not cloning files, dup to get a new number from scheme
// This has to be done outside the context lock to prevent deadlocks
if !flags.contains(CLONE_FILES) {
for (_fd, file_opt) in files.lock().iter_mut().enumerate() {
let new_file_opt = if let Some(ref file) = *file_opt {
Some(FileDescriptor {
description: Arc::clone(&file.description),
cloexec: file.cloexec,
})
} else {
None
};
*file_opt = new_file_opt;
}
}
// If not cloning virtual memory, use fmap to re-obtain every grant where possible
if !flags.contains(CLONE_VM) {
let mut i = 0;
while i < grants.lock().len() {
let remove = false;
if let Some(grant) = grants.lock().get(i) {
if let Some(ref _desc) = grant.desc_opt {
println!("todo: clone grant {} using fmap: {:?}", i, grant);
}
}
if remove {
grants.lock().remove(i);
} else {
i += 1;
}
}
}
// If vfork, block the current process
// This has to be done after the operations that may require context switches
if flags.contains(CLONE_VFORK) {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
context.block("vfork");
vfork = true;
} else {
vfork = false;
}
// Set up new process
{
let mut contexts = context::contexts_mut();
let context_lock = contexts.new_context()?;
let mut context = context_lock.write();
pid = context.id;
context.pgid = pgid;
context.ppid = ppid;
context.ruid = ruid;
context.rgid = rgid;
context.rns = rns;
context.euid = euid;
context.egid = egid;
context.ens = ens;
context.sigmask = sigmask;
context.umask = umask;
//TODO: Better CPU balancing
if let Some(cpu_id) = cpu_id_opt {
context.cpu_id = Some(cpu_id);
} else {
context.cpu_id = Some(pid.into() % crate::cpu_count());
}
context.status = context::Status::Runnable;
context.vfork = vfork;
context.arch = arch;
let mut active_table = unsafe { ActivePageTable::new() };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET)));
let mut new_table = {
let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
context.arch.set_page_table(unsafe { new_table.address() });
// Copy kernel image mapping
{
let frame = active_table.p4()[crate::KERNEL_PML4].pointed_frame().expect("kernel image not mapped");
let flags = active_table.p4()[crate::KERNEL_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[crate::KERNEL_PML4].set(frame, flags);
});
}
// Copy kernel heap mapping
{
let frame = active_table.p4()[crate::KERNEL_HEAP_PML4].pointed_frame().expect("kernel heap not mapped");
let flags = active_table.p4()[crate::KERNEL_HEAP_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[crate::KERNEL_HEAP_PML4].set(frame, flags);
});
}
if let Some(fx) = kfx_opt.take() {
context.arch.set_fx(fx.as_ptr() as usize);
context.kfx = Some(fx);
}
// Set kernel stack
if let Some(stack) = kstack_opt.take() {
context.arch.set_stack(stack.as_ptr() as usize + offset);
context.kstack = Some(stack);
}
// TODO: Clone ksig?
// Setup image, heap, and grants
if flags.contains(CLONE_VM) {
// Copy user image mapping, if found
if ! image.is_empty() {
let frame = active_table.p4()[crate::USER_PML4].pointed_frame().expect("user image not mapped");
let flags = active_table.p4()[crate::USER_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[crate::USER_PML4].set(frame, flags);
});
}
context.image = image;
// Copy user heap mapping, if found
if let Some(heap_shared) = heap_opt {
let frame = active_table.p4()[crate::USER_HEAP_PML4].pointed_frame().expect("user heap not mapped");
let flags = active_table.p4()[crate::USER_HEAP_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[crate::USER_HEAP_PML4].set(frame, flags);
});
context.heap = Some(heap_shared);
}
// Copy grant mapping
if ! grants.lock().is_empty() {
let frame = active_table.p4()[crate::USER_GRANT_PML4].pointed_frame().expect("user grants not mapped");
let flags = active_table.p4()[crate::USER_GRANT_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[crate::USER_GRANT_PML4].set(frame, flags);
});
}
context.grants = grants;
} else {
// Copy percpu mapping
for cpu_id in 0..crate::cpu_count() {
extern {
// The starting byte of the thread data segment
static mut __tdata_start: u8;
// The ending byte of the thread BSS segment
static mut __tbss_end: u8;
}
let size = unsafe { & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize };
let start = crate::KERNEL_PERCPU_OFFSET + crate::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = active_table.translate_page(page).expect("kernel percpu not mapped");
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
let result = mapper.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE);
// Ignore result due to operating on inactive table
unsafe { result.ignore(); }
});
}
}
// Move copy of image
for memory_shared in image.iter_mut() {
memory_shared.with(|memory| {
let start = VirtualAddress::new(memory.start_address().get() - crate::USER_TMP_OFFSET + crate::USER_OFFSET);
memory.move_to(start, &mut new_table, &mut temporary_page);
});
}
context.image = image;
// Move copy of heap
if let Some(heap_shared) = heap_opt {
heap_shared.with(|heap| {
heap.move_to(VirtualAddress::new(crate::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page);
});
context.heap = Some(heap_shared);
}
// Move grants
for grant in grants.lock().iter_mut() {
let start = VirtualAddress::new(grant.start_address().get() + crate::USER_GRANT_OFFSET - crate::USER_TMP_GRANT_OFFSET);
grant.move_to(start, &mut new_table, &mut temporary_page);
}
context.grants = grants;
}
// Setup user stack
if let Some(stack_shared) = stack_opt {
if flags.contains(CLONE_STACK) {
let frame = active_table.p4()[crate::USER_STACK_PML4].pointed_frame().expect("user stack not mapped");
let flags = active_table.p4()[crate::USER_STACK_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[crate::USER_STACK_PML4].set(frame, flags);
});
} else {
stack_shared.with(|stack| {
stack.move_to(VirtualAddress::new(crate::USER_STACK_OFFSET), &mut new_table, &mut temporary_page);
});
}
context.stack = Some(stack_shared);
}
// Setup user sigstack
if let Some(mut sigstack) = sigstack_opt {
sigstack.move_to(VirtualAddress::new(crate::USER_SIGSTACK_OFFSET), &mut new_table, &mut temporary_page);
context.sigstack = Some(sigstack);
}
// Set up TCB
let tcb_addr = crate::USER_TCB_OFFSET + context.id.into() * PAGE_SIZE;
let mut tcb = context::memory::Memory::new(
VirtualAddress::new(tcb_addr),
PAGE_SIZE,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
);
// Setup user TLS
if let Some(mut tls) = tls_opt {
// Copy TLS mapping
{
let frame = active_table.p4()[crate::USER_TLS_PML4].pointed_frame().expect("user tls not mapped");
let flags = active_table.p4()[crate::USER_TLS_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[crate::USER_TLS_PML4].set(frame, flags);
});
}
// TODO: Make sure size is not greater than USER_TLS_SIZE
let tls_addr = crate::USER_TLS_OFFSET + context.id.into() * crate::USER_TLS_SIZE;
//println!("{}: Copy TLS: address 0x{:x}, size 0x{:x}", context.id.into(), tls_addr, tls.mem.size());
tls.mem.move_to(VirtualAddress::new(tls_addr), &mut new_table, &mut temporary_page);
unsafe {
*(tcb_addr as *mut usize) = tls.mem.start_address().get() + tls.mem.size();
}
context.tls = Some(tls);
} else {
//println!("{}: Copy TCB", context.id.into());
let parent_tcb_addr = crate::USER_TCB_OFFSET + ppid.into() * PAGE_SIZE;
unsafe {
intrinsics::copy(parent_tcb_addr as *const u8,
tcb_addr as *mut u8,
tcb.size());
}
}
tcb.move_to(VirtualAddress::new(tcb_addr), &mut new_table, &mut temporary_page);
context.image.push(tcb.to_shared());
context.name = name;
context.cwd = cwd;
context.files = files;
context.actions = actions;
}
}
if ptrace::send_event(ptrace_event!(PTRACE_EVENT_CLONE, pid.into())).is_some() {
// Freeze the clone, allow ptrace to put breakpoints
// to it before it starts
let contexts = context::contexts();
let context = contexts.get(pid).expect("Newly created context doesn't exist??");
let mut context = context.write();
context.ptrace_stop = true;
}
// Race to pick up the new process!
ipi(IpiKind::Switch, IpiTarget::Other);
let _ = unsafe { context::switch() };
Ok(pid)
}
fn empty(context: &mut context::Context, reaping: bool) {
if reaping {
// Memory should already be unmapped
assert!(context.image.is_empty());
assert!(context.heap.is_none());
assert!(context.stack.is_none());
assert!(context.sigstack.is_none());
assert!(context.tls.is_none());
} else {
// Unmap previous image, heap, grants, stack, and tls
context.image.clear();
drop(context.heap.take());
drop(context.stack.take());
drop(context.sigstack.take());
drop(context.tls.take());
}
let mut grants = context.grants.lock();
if Arc::strong_count(&context.grants) == 1 {
for grant in grants.drain(..) {
if reaping {
println!("{}: {}: Grant should not exist: {:?}", context.id.into(), unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) }, grant);
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET)));
grant.unmap_inactive(&mut new_table, &mut temporary_page);
} else {
grant.unmap();
}
}
}
}
struct ExecFile(FileHandle);
impl Drop for ExecFile {
fn drop(&mut self) {
let _ = syscall::close(self.0);
}
}
fn fexec_noreturn(
setuid: Option<u32>,
setgid: Option<u32>,
name: Box<[u8]>,
data: Box<[u8]>,
args: Box<[Box<[u8]>]>,
vars: Box<[Box<[u8]>]>,
auxv: Box<[usize]>,
) -> ! {
let entry;
let singlestep;
let mut sp = crate::USER_STACK_OFFSET + crate::USER_STACK_SIZE - 256;
{
let (vfork, ppid, files) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH)).expect("exec_noreturn pid not found");
let mut context = context_lock.write();
singlestep = unsafe {
ptrace::regs_for(&context).map(|s| s.is_singlestep()).unwrap_or(false)
};
context.name = Arc::new(Mutex::new(name));
empty(&mut context, false);
if let Some(uid) = setuid {
context.euid = uid;
}
if let Some(gid) = setgid {
context.egid = gid;
}
// Map and copy new segments
let mut tls_opt = None;
{
let elf = elf::Elf::from(&data).unwrap();
entry = elf.entry();
// Always map TCB
let tcb_addr = crate::USER_TCB_OFFSET + context.id.into() * PAGE_SIZE;
let tcb_mem = context::memory::Memory::new(
VirtualAddress::new(tcb_addr),
PAGE_SIZE,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
);
for segment in elf.segments() {
match segment.p_type {
program_header::PT_LOAD => {
let voff = segment.p_vaddr as usize % PAGE_SIZE;
let vaddr = segment.p_vaddr as usize - voff;
let mut memory = context::memory::Memory::new(
VirtualAddress::new(vaddr),
segment.p_memsz as usize + voff,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
true
);
unsafe {
// Copy file data
intrinsics::copy((elf.data.as_ptr() as usize + segment.p_offset as usize) as *const u8,
segment.p_vaddr as *mut u8,
segment.p_filesz as usize);
}
let mut flags = EntryFlags::NO_EXECUTE | EntryFlags::USER_ACCESSIBLE;
if segment.p_flags & program_header::PF_R == program_header::PF_R {
flags.insert(EntryFlags::PRESENT);
}
// W ^ X. If it is executable, do not allow it to be writable, even if requested
if segment.p_flags & program_header::PF_X == program_header::PF_X {
flags.remove(EntryFlags::NO_EXECUTE);
} else if segment.p_flags & program_header::PF_W == program_header::PF_W {
flags.insert(EntryFlags::WRITABLE);
}
memory.remap(flags);
context.image.push(memory.to_shared());
},
program_header::PT_TLS => {
let aligned_size = if segment.p_align > 0 {
((segment.p_memsz + (segment.p_align - 1))/segment.p_align) * segment.p_align
} else {
segment.p_memsz
} as usize;
let rounded_size = ((aligned_size + PAGE_SIZE - 1)/PAGE_SIZE) * PAGE_SIZE;
let rounded_offset = rounded_size - aligned_size;
// TODO: Make sure size is not greater than USER_TLS_SIZE
let tls_addr = crate::USER_TLS_OFFSET + context.id.into() * crate::USER_TLS_SIZE;
let tls = context::memory::Tls {
master: VirtualAddress::new(segment.p_vaddr as usize),
file_size: segment.p_filesz as usize,
mem: context::memory::Memory::new(
VirtualAddress::new(tls_addr),
rounded_size as usize,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
),
offset: rounded_offset as usize,
};
unsafe {
*(tcb_addr as *mut usize) = tls.mem.start_address().get() + tls.mem.size();
}
tls_opt = Some(tls);
},
_ => (),
}
}
context.image.push(tcb_mem.to_shared());
}
// Data no longer required, can deallocate
drop(data);
// Map heap
context.heap = Some(context::memory::Memory::new(
VirtualAddress::new(crate::USER_HEAP_OFFSET),
0,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
).to_shared());
// Map stack
context.stack = Some(context::memory::Memory::new(
VirtualAddress::new(crate::USER_STACK_OFFSET),
crate::USER_STACK_SIZE,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
).to_shared());
// Map stack
context.sigstack = Some(context::memory::Memory::new(
VirtualAddress::new(crate::USER_SIGSTACK_OFFSET),
crate::USER_SIGSTACK_SIZE,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
));
// Map TLS
if let Some(mut tls) = tls_opt {
unsafe {
tls.load();
}
context.tls = Some(tls);
}
let mut push = |arg| {
sp -= mem::size_of::<usize>();
unsafe { *(sp as *mut usize) = arg; }
};
// Push auxiliery vector
push(AT_NULL);
for &arg in auxv.iter().rev() {
push(arg);
}
drop(auxv); // no longer required
let mut arg_size = 0;
// Push environment variables and arguments
for iter in &[&vars, &args] {
// Push null-terminator
push(0);
// Push pointer to content
for arg in iter.iter().rev() {
push(crate::USER_ARG_OFFSET + arg_size);
arg_size += arg.len() + 1;
}
}
// For some reason, Linux pushes the argument count here (in
// addition to being null-terminated), but not the environment
// variable count.
// TODO: Push more counts? Less? Stop having null-termination?
push(args.len());
// Write environment and argument pointers to USER_ARG_OFFSET
if arg_size > 0 {
let mut memory = context::memory::Memory::new(
VirtualAddress::new(crate::USER_ARG_OFFSET),
arg_size,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
true
);
let mut arg_offset = 0;
for arg in vars.iter().rev().chain(args.iter().rev()) {
unsafe {
intrinsics::copy(arg.as_ptr(),
(crate::USER_ARG_OFFSET + arg_offset) as *mut u8,
arg.len());
}
arg_offset += arg.len();
unsafe {
*((crate::USER_ARG_OFFSET + arg_offset) as *mut u8) = 0;
}
arg_offset += 1;
}
memory.remap(EntryFlags::NO_EXECUTE | EntryFlags::USER_ACCESSIBLE);
context.image.push(memory.to_shared());
}
// Args and vars no longer required, can deallocate
drop(args);
drop(vars);
context.actions = Arc::new(Mutex::new(vec![(
SigAction {
sa_handler: unsafe { mem::transmute(SIG_DFL) },
sa_mask: [0; 2],
sa_flags: SigActionFlags::empty(),
},
0
); 128]));
let vfork = context.vfork;
context.vfork = false;
let files = Arc::clone(&context.files);
(vfork, context.ppid, files)
};
for (_fd, file_opt) in files.lock().iter_mut().enumerate() {
let mut cloexec = false;
if let Some(ref file) = *file_opt {
if file.cloexec {
cloexec = true;
}
}
if cloexec {
let _ = file_opt.take().unwrap().close();
}
}
if vfork {
let contexts = context::contexts();
if let Some(context_lock) = contexts.get(ppid) {
let mut context = context_lock.write();
if ! context.unblock() {
println!("{} not blocked for exec vfork unblock", ppid.into());
}
} else {
println!("{} not found for exec vfork unblock", ppid.into());
}
}
}
// Go to usermode
unsafe { usermode(entry, sp, 0, singlestep) }
}
pub fn fexec_kernel(fd: FileHandle, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]>]>, name_override_opt: Option<Box<[u8]>>, auxv: Option<Vec<usize>>) -> Result<usize> {
let (uid, gid) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.euid, context.egid)
};
let mut stat: Stat;
let mut name: Vec<u8>;
let mut data: Vec<u8>;
{
let file = ExecFile(fd);
stat = Stat::default();
syscall::file_op_mut_slice(syscall::number::SYS_FSTAT, file.0, &mut stat)?;
let mut perm = stat.st_mode & 0o7;
if stat.st_uid == uid {
perm |= (stat.st_mode >> 6) & 0o7;
}
if stat.st_gid == gid {
perm |= (stat.st_mode >> 3) & 0o7;
}
if uid == 0 {
perm |= 0o7;
}
if perm & 0o1 != 0o1 {
return Err(Error::new(EACCES));
}
if let Some(name_override) = name_override_opt {
name = Vec::from(name_override);
} else {
name = vec![0; 4096];
let len = syscall::file_op_mut_slice(syscall::number::SYS_FPATH, file.0, &mut name)?;
name.truncate(len);
}
//TODO: Only read elf header, not entire file. Then read required segments
data = vec![0; stat.st_size as usize];
syscall::file_op_mut_slice(syscall::number::SYS_READ, file.0, &mut data)?;
drop(file);
}
// Set UID and GID are determined after resolving any hashbangs
let setuid = if stat.st_mode & syscall::flag::MODE_SETUID == syscall::flag::MODE_SETUID {
Some(stat.st_uid)
} else {
None
};
let setgid = if stat.st_mode & syscall::flag::MODE_SETGID == syscall::flag::MODE_SETGID {
Some(stat.st_gid)
} else {
None
};
// The argument list is limited to avoid using too much userspace stack
// This check is done last to allow all hashbangs to be resolved
//
// This should be based on the size of the userspace stack, divided
// by the cost of each argument, which should be usize * 2, with
// one additional argument added to represent the total size of the
// argument pointer array and potential padding
//
// A limit of 4095 would mean a stack of (4095 + 1) * 8 * 2 = 65536, or 64KB
if (args.len() + vars.len()) > 4095 {
return Err(Error::new(E2BIG));
}
let elf = match elf::Elf::from(&data) {
Ok(elf) => elf,
Err(err) => {
println!("fexec: failed to execute {}: {}", fd.into(), err);
return Err(Error::new(ENOEXEC));
}
};
// `fexec_kernel` can recurse if an interpreter is found. We get the
// auxiliery vector from the first invocation, which is passed via an
// argument, or if this is the first one we create it.
let auxv = if let Some(auxv) = auxv {
auxv
} else {
let mut auxv = Vec::with_capacity(3);
auxv.push(AT_ENTRY);
auxv.push(elf.entry());
auxv.push(AT_PHDR);
auxv.push(elf.program_headers());
auxv
};
// We check the validity of all loadable sections here
for segment in elf.segments() {
match segment.p_type {
program_header::PT_INTERP => {
//TODO: length restraint, parse interp earlier
let mut interp = vec![0; segment.p_memsz as usize];
unsafe {
intrinsics::copy((elf.data.as_ptr() as usize + segment.p_offset as usize) as *const u8,
interp.as_mut_ptr(),
segment.p_filesz as usize);
}
let mut i = 0;
while i < interp.len() {
if interp[i] == 0 {
break;
}
i += 1;
}
interp.truncate(i);
println!(" interpreter: {:?}", ::core::str::from_utf8(&interp));
let interp_fd = super::fs::open(&interp, super::flag::O_RDONLY | super::flag::O_CLOEXEC)?;
let mut args_vec = Vec::from(args);
//TODO: pass file handle in auxv
let name_override = name.into_boxed_slice();
args_vec[0] = name_override.clone();
// Drop variables, since fexec_kernel probably won't return
drop(elf);
drop(interp);
return fexec_kernel(
interp_fd,
args_vec.into_boxed_slice(),
vars,
Some(name_override),
Some(auxv),
);
},
program_header::PT_LOAD => {
let voff = segment.p_vaddr as usize % PAGE_SIZE;
let vaddr = segment.p_vaddr as usize - voff;
// Due to the Userspace and kernel TLS bases being located right above 2GB,
// limit any loadable sections to lower than that. Eventually we will need
// to replace this with a more intelligent TLS address
if vaddr >= 0x8000_0000 {
println!("exec: invalid section address {:X}", segment.p_vaddr);
return Err(Error::new(ENOEXEC));
}
},
_ => (),
}
}
// This is the point of no return, quite literaly. Any checks for validity need
// to be done before, and appropriate errors returned. Otherwise, we have nothing
// to return to.
fexec_noreturn(setuid, setgid, name.into_boxed_slice(), data.into_boxed_slice(), args, vars, auxv.into_boxed_slice());
}
pub fn fexec(fd: FileHandle, arg_ptrs: &[[usize; 2]], var_ptrs: &[[usize; 2]]) -> Result<usize> {
let mut args = Vec::new();
for arg_ptr in arg_ptrs {
let arg = validate_slice(arg_ptr[0] as *const u8, arg_ptr[1])?;
// Argument must be moved into kernel space before exec unmaps all memory
args.push(arg.to_vec().into_boxed_slice());
}
let mut vars = Vec::new();
for var_ptr in var_ptrs {
let var = validate_slice(var_ptr[0] as *const u8, var_ptr[1])?;
// Argument must be moved into kernel space before exec unmaps all memory
vars.push(var.to_vec().into_boxed_slice());
}
// Neither arg_ptrs nor var_ptrs should be used after this point, the kernel
// now has owned copies in args and vars
fexec_kernel(fd, args.into_boxed_slice(), vars.into_boxed_slice(), None, None)
}
pub fn exit(status: usize) -> ! {
ptrace::breakpoint_callback(PTRACE_STOP_EXIT, Some(ptrace_event!(PTRACE_STOP_EXIT, status)));
{
let context_lock = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH)).expect("exit failed to find context");
Arc::clone(&context_lock)
};
let mut close_files = Vec::new();
let pid = {
let mut context = context_lock.write();
{
let mut lock = context.files.lock();
if Arc::strong_count(&context.files) == 1 {
mem::swap(lock.deref_mut(), &mut close_files);
}
}
context.files = Arc::new(Mutex::new(Vec::new()));
context.id
};
// Files must be closed while context is valid so that messages can be passed
for (_fd, file_opt) in close_files.drain(..).enumerate() {
if let Some(file) = file_opt {
let _ = file.close();
}
}
// PGID and PPID must be grabbed after close, as context switches could change PGID or PPID if parent exits
let (pgid, ppid) = {
let context = context_lock.read();
(context.pgid, context.ppid)
};
// Transfer child processes to parent
{
let contexts = context::contexts();
for (_id, context_lock) in contexts.iter() {
let mut context = context_lock.write();
if context.ppid == pid {
context.ppid = ppid;
context.vfork = false;
}
}
}
let (vfork, children) = {
let mut context = context_lock.write();
empty(&mut context, false);
let vfork = context.vfork;
context.vfork = false;
context.status = context::Status::Exited(status);
let children = context.waitpid.receive_all();
(vfork, children)
};
{
let contexts = context::contexts();
if let Some(parent_lock) = contexts.get(ppid) {
let waitpid = {
let mut parent = parent_lock.write();
if vfork && ! parent.unblock() {
println!("{}: {} not blocked for exit vfork unblock", pid.into(), ppid.into());
}
Arc::clone(&parent.waitpid)
};
for (c_pid, c_status) in children {
waitpid.send(c_pid, c_status);
}
waitpid.send(WaitpidKey {
pid: Some(pid),
pgid: Some(pgid)
}, (pid, status));
} else {
println!("{}: {} not found for exit vfork unblock", pid.into(), ppid.into());
}
}
// Alert any tracers waiting of this process
ptrace::close_tracee(pid);
if pid == ContextId::from(1) {
println!("Main kernel thread exited with status {:X}", status);
extern {
fn kreset() -> !;
fn kstop() -> !;
}
if status == SIGTERM {
unsafe { kreset(); }
} else {
unsafe { kstop(); }
}
}
}
let _ = unsafe { context::switch() };
unreachable!();
}
pub fn getpid() -> Result<ContextId> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
Ok(context.id)
}
pub fn getpgid(pid: ContextId) -> Result<ContextId> {
let contexts = context::contexts();
let context_lock = if pid.into() == 0 {
contexts.current().ok_or(Error::new(ESRCH))?
} else {
contexts.get(pid).ok_or(Error::new(ESRCH))?
};
let context = context_lock.read();
Ok(context.pgid)
}
pub fn getppid() -> Result<ContextId> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
Ok(context.ppid)
}
pub fn kill(pid: ContextId, sig: usize) -> Result<usize> {
let (ruid, euid, current_pgid) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.ruid, context.euid, context.pgid)
};
if sig < 0x7F {
let mut found = 0;
let mut sent = 0;
{
let contexts = context::contexts();
let send = |context: &mut context::Context| -> bool {
if euid == 0
|| euid == context.ruid
|| ruid == context.ruid
{
// If sig = 0, test that process exists and can be
// signalled, but don't send any signal.
if sig != 0 {
//TODO: sigprocmask
context.pending.push_back(sig as u8);
// Convert stopped processes to blocked if sending SIGCONT
if sig == SIGCONT {
if let context::Status::Stopped(_sig) = context.status {
context.status = context::Status::Blocked;
}
}
}
true
} else {
false
}
};
if pid.into() as isize > 0 {
// Send to a single process
if let Some(context_lock) = contexts.get(pid) {
let mut context = context_lock.write();
found += 1;
if send(&mut context) {
sent += 1;
}
}
} else if pid.into() as isize == -1 {
// Send to every process with permission, except for init
for (_id, context_lock) in contexts.iter() {
let mut context = context_lock.write();
if context.id.into() > 2 {
found += 1;
if send(&mut context) {
sent += 1;
}
}
}
} else {
let pgid = if pid.into() == 0 {
current_pgid
} else {
ContextId::from(-(pid.into() as isize) as usize)
};
// Send to every process in the process group whose ID
for (_id, context_lock) in contexts.iter() {
let mut context = context_lock.write();
if context.pgid == pgid {
found += 1;
if send(&mut context) {
sent += 1;
}
}
}
}
}
if found == 0 {
Err(Error::new(ESRCH))
} else if sent == 0 {
Err(Error::new(EPERM))
} else {
// Switch to ensure delivery to self
unsafe { context::switch(); }
Ok(0)
}
} else {
Err(Error::new(EINVAL))
}
}
pub fn mprotect(address: usize, size: usize, flags: MapFlags) -> Result<usize> {
// println!("mprotect {:#X}, {}, {:#X}", address, size, flags);
let end_offset = size.checked_sub(1).ok_or(Error::new(EFAULT))?;
let end_address = address.checked_add(end_offset).ok_or(Error::new(EFAULT))?;
let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(VirtualAddress::new(address));
let end_page = Page::containing_address(VirtualAddress::new(end_address));
for page in Page::range_inclusive(start_page, end_page) {
// Check if the page is actually mapped before trying to change the flags.
// FIXME can other processes change if a page is mapped beneath our feet?
let mut page_flags = if let Some(page_flags) = active_table.translate_page_flags(page) {
page_flags
} else {
flush_all.flush(&mut active_table);
return Err(Error::new(EFAULT));
};
if !page_flags.contains(EntryFlags::PRESENT) {
flush_all.flush(&mut active_table);
return Err(Error::new(EFAULT));
}
if flags.contains(PROT_EXEC) {
page_flags.remove(EntryFlags::NO_EXECUTE);
} else {
page_flags.insert(EntryFlags::NO_EXECUTE);
}
if flags.contains(PROT_WRITE) {
//TODO: Not allowing gain of write privileges
} else {
page_flags.remove(EntryFlags::WRITABLE);
}
if flags.contains(PROT_READ) {
//TODO: No flags for readable pages
} else {
//TODO: No flags for readable pages
}
let flush = active_table.remap(page, page_flags);
flush_all.consume(flush);
}
flush_all.flush(&mut active_table);
Ok(0)
}
pub fn setpgid(pid: ContextId, pgid: ContextId) -> Result<usize> {
let contexts = context::contexts();
let current_pid = {
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
context.id
};
let context_lock = if pid.into() == 0 {
contexts.current().ok_or(Error::new(ESRCH))?
} else {
contexts.get(pid).ok_or(Error::new(ESRCH))?
};
let mut context = context_lock.write();
if context.id == current_pid || context.ppid == current_pid {
if pgid.into() == 0 {
context.pgid = context.id;
} else {
context.pgid = pgid;
}
Ok(0)
} else {
Err(Error::new(ESRCH))
}
}
pub fn sigaction(sig: usize, act_opt: Option<&SigAction>, oldact_opt: Option<&mut SigAction>, restorer: usize) -> Result<usize> {
if sig > 0 && sig <= 0x7F {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let mut actions = context.actions.lock();
if let Some(oldact) = oldact_opt {
*oldact = actions[sig].0;
}
if let Some(act) = act_opt {
actions[sig] = (*act, restorer);
}
Ok(0)
} else {
Err(Error::new(EINVAL))
}
}
pub fn sigprocmask(how: usize, mask_opt: Option<&[u64; 2]>, oldmask_opt: Option<&mut [u64; 2]>) -> Result<usize> {
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
if let Some(oldmask) = oldmask_opt {
*oldmask = context.sigmask;
}
if let Some(mask) = mask_opt {
match how {
SIG_BLOCK => {
context.sigmask[0] |= mask[0];
context.sigmask[1] |= mask[1];
},
SIG_UNBLOCK => {
context.sigmask[0] &= !mask[0];
context.sigmask[1] &= !mask[1];
},
SIG_SETMASK => {
context.sigmask[0] = mask[0];
context.sigmask[1] = mask[1];
},
_ => {
return Err(Error::new(EINVAL));
}
}
}
}
Ok(0)
}
pub fn sigreturn() -> Result<usize> {
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
context.ksig_restore = true;
context.block("sigreturn");
}
let _ = unsafe { context::switch() };
unreachable!();
}
pub fn umask(mask: usize) -> Result<usize> {
let previous;
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
previous = context.umask;
context.umask = mask;
}
Ok(previous)
}
fn reap(pid: ContextId) -> Result<ContextId> {
// Spin until not running
let mut running = true;
while running {
{
let contexts = context::contexts();
let context_lock = contexts.get(pid).ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
running = context.running;
}
interrupt::pause();
}
let mut contexts = context::contexts_mut();
let context_lock = contexts.remove(pid).ok_or(Error::new(ESRCH))?;
{
let mut context = context_lock.write();
empty(&mut context, true);
}
drop(context_lock);
Ok(pid)
}
pub fn waitpid(pid: ContextId, status_ptr: usize, flags: WaitFlags) -> Result<ContextId> {
let (ppid, waitpid) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.id, Arc::clone(&context.waitpid))
};
let mut tmp = [0];
let status_slice = if status_ptr != 0 {
validate_slice_mut(status_ptr as *mut usize, 1)?
} else {
&mut tmp
};
let mut grim_reaper = |w_pid: ContextId, status: usize| -> Option<Result<ContextId>> {
if wifcontinued(status) {
if flags & WCONTINUED == WCONTINUED {
status_slice[0] = status;
Some(Ok(w_pid))
} else {
None
}
} else if wifstopped(status) {
if flags & WUNTRACED == WUNTRACED {
status_slice[0] = status;
Some(Ok(w_pid))
} else {
None
}
} else {
status_slice[0] = status;
Some(reap(w_pid))
}
};
loop {
let res_opt = if pid.into() == 0 {
// Check for existence of child
{
let mut found = false;
let contexts = context::contexts();
for (_id, context_lock) in contexts.iter() {
let context = context_lock.read();
if context.ppid == ppid {
found = true;
break;
}
}
if ! found {
return Err(Error::new(ECHILD));
}
}
if flags & WNOHANG == WNOHANG {
if let Some((_wid, (w_pid, status))) = waitpid.receive_any_nonblock() {
grim_reaper(w_pid, status)
} else {
Some(Ok(ContextId::from(0)))
}
} else {
let (_wid, (w_pid, status)) = waitpid.receive_any("waitpid any");
grim_reaper(w_pid, status)
}
} else if (pid.into() as isize) < 0 {
let pgid = ContextId::from(-(pid.into() as isize) as usize);
// Check for existence of child in process group PGID
{
let mut found = false;
let contexts = context::contexts();
for (_id, context_lock) in contexts.iter() {
let context = context_lock.read();
if context.pgid == pgid {
found = true;
break;
}
}
if ! found {
return Err(Error::new(ECHILD));
}
}
if flags & WNOHANG == WNOHANG {
if let Some((w_pid, status)) = waitpid.receive_nonblock(&WaitpidKey {
pid: None,
pgid: Some(pgid)
}) {
grim_reaper(w_pid, status)
} else {
Some(Ok(ContextId::from(0)))
}
} else {
let (w_pid, status) = waitpid.receive(&WaitpidKey {
pid: None,
pgid: Some(pgid)
}, "waitpid pgid");
grim_reaper(w_pid, status)
}
} else {
let hack_status = {
let contexts = context::contexts();
let context_lock = contexts.get(pid).ok_or(Error::new(ECHILD))?;
let mut context = context_lock.write();
if context.ppid != ppid {
println!("TODO: Hack for rustc - changing ppid of {} from {} to {}", context.id.into(), context.ppid.into(), ppid.into());
context.ppid = ppid;
//return Err(Error::new(ECHILD));
Some(context.status)
} else {
None
}
};
if let Some(context::Status::Exited(status)) = hack_status {
let _ = waitpid.receive_nonblock(&WaitpidKey {
pid: Some(pid),
pgid: None
});
grim_reaper(pid, status)
} else if flags & WNOHANG == WNOHANG {
if let Some((w_pid, status)) = waitpid.receive_nonblock(&WaitpidKey {
pid: Some(pid),
pgid: None
}) {
grim_reaper(w_pid, status)
} else {
Some(Ok(ContextId::from(0)))
}
} else {
let (w_pid, status) = waitpid.receive(&WaitpidKey {
pid: Some(pid),
pgid: None
}, "waitpid pid");
grim_reaper(w_pid, status)
}
};
if let Some(res) = res_opt {
return res;
}
}
}
Apply suggestion to src/syscall/process.rs
use alloc::sync::Arc;
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::alloc::{GlobalAlloc, Layout};
use core::{intrinsics, mem};
use core::ops::DerefMut;
use spin::Mutex;
use crate::context::file::FileDescriptor;
use crate::context::{ContextId, WaitpidKey};
use crate::context;
#[cfg(not(feature="doc"))]
use crate::elf::{self, program_header};
use crate::interrupt;
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::memory::allocate_frames;
use crate::paging::entry::EntryFlags;
use crate::paging::mapper::MapperFlushAll;
use crate::paging::temporary_page::TemporaryPage;
use crate::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, PAGE_SIZE};
use crate::{ptrace, syscall};
use crate::scheme::FileHandle;
use crate::start::usermode;
use crate::syscall::data::{SigAction, Stat};
use crate::syscall::error::*;
use crate::syscall::flag::{wifcontinued, wifstopped, AT_ENTRY, AT_NULL, AT_PHDR, CloneFlags,
CLONE_FILES, CLONE_FS, CLONE_SIGHAND, CLONE_STACK, CLONE_VFORK, CLONE_VM,
MapFlags, PROT_EXEC, PROT_READ, PROT_WRITE, PTRACE_EVENT_CLONE,
PTRACE_STOP_EXIT, SigActionFlags, SIG_BLOCK, SIG_DFL, SIG_SETMASK, SIG_UNBLOCK,
SIGCONT, SIGTERM, WaitFlags, WCONTINUED, WNOHANG, WUNTRACED};
use crate::syscall::ptrace_event;
use crate::syscall::validate::{validate_slice, validate_slice_mut};
pub fn brk(address: usize) -> Result<usize> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
//println!("{}: {}: BRK {:X}", unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) },
// context.id.into(), address);
let current = if let Some(ref heap_shared) = context.heap {
heap_shared.with(|heap| {
heap.start_address().get() + heap.size()
})
} else {
panic!("user heap not initialized");
};
if address == 0 {
//println!("Brk query {:X}", current);
Ok(current)
} else if address >= crate::USER_HEAP_OFFSET {
//TODO: out of memory errors
if let Some(ref heap_shared) = context.heap {
heap_shared.with(|heap| {
heap.resize(address - crate::USER_HEAP_OFFSET, true);
});
} else {
panic!("user heap not initialized");
}
//println!("Brk resize {:X}", address);
Ok(address)
} else {
//println!("Brk no mem");
Err(Error::new(ENOMEM))
}
}
pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
let ppid;
let pid;
{
let pgid;
let ruid;
let rgid;
let rns;
let euid;
let egid;
let ens;
let umask;
let sigmask;
let cpu_id_opt = None;
let arch;
let vfork;
let mut kfx_opt = None;
let mut kstack_opt = None;
let mut offset = 0;
let mut image = vec![];
let mut heap_opt = None;
let mut stack_opt = None;
let mut sigstack_opt = None;
let mut tls_opt = None;
let grants;
let name;
let cwd;
let files;
let actions;
// Copy from old process
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
ppid = context.id;
pgid = context.pgid;
ruid = context.ruid;
rgid = context.rgid;
rns = context.rns;
euid = context.euid;
egid = context.egid;
ens = context.ens;
sigmask = context.sigmask;
umask = context.umask;
// Uncomment to disable threads on different CPUs
// if flags.contains(CLONE_VM) {
// cpu_id_opt = context.cpu_id;
// }
arch = context.arch.clone();
if let Some(ref fx) = context.kfx {
let mut new_fx = unsafe { Box::from_raw(crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) };
for (new_b, b) in new_fx.iter_mut().zip(fx.iter()) {
*new_b = *b;
}
kfx_opt = Some(new_fx);
}
if let Some(ref stack) = context.kstack {
// Get the relative offset to the return address of the function
// obtaining `stack_base`.
//
// (base pointer - start of stack) - one
offset = stack_base - stack.as_ptr() as usize - mem::size_of::<usize>(); // Add clone ret
let mut new_stack = stack.clone();
unsafe {
// Set clone's return value to zero. This is done because
// the clone won't return like normal, which means the value
// would otherwise never get set.
if let Some(regs) = ptrace::rebase_regs_ptr_mut(context.regs, Some(&mut new_stack)) {
(*regs).scratch.rax = 0;
}
// Change the return address of the child (previously
// syscall) to the arch-specific clone_ret callback
let func_ptr = new_stack.as_mut_ptr().add(offset);
*(func_ptr as *mut usize) = interrupt::syscall::clone_ret as usize;
}
kstack_opt = Some(new_stack);
}
if flags.contains(CLONE_VM) {
for memory_shared in context.image.iter() {
image.push(memory_shared.clone());
}
if let Some(ref heap_shared) = context.heap {
heap_opt = Some(heap_shared.clone());
}
} else {
for memory_shared in context.image.iter() {
memory_shared.with(|memory| {
let mut new_memory = context::memory::Memory::new(
VirtualAddress::new(memory.start_address().get() + crate::USER_TMP_OFFSET),
memory.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
false
);
unsafe {
intrinsics::copy(memory.start_address().get() as *const u8,
new_memory.start_address().get() as *mut u8,
memory.size());
}
new_memory.remap(memory.flags());
image.push(new_memory.to_shared());
});
}
if let Some(ref heap_shared) = context.heap {
heap_shared.with(|heap| {
let mut new_heap = context::memory::Memory::new(
VirtualAddress::new(crate::USER_TMP_HEAP_OFFSET),
heap.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
false
);
unsafe {
intrinsics::copy(heap.start_address().get() as *const u8,
new_heap.start_address().get() as *mut u8,
heap.size());
}
new_heap.remap(heap.flags());
heap_opt = Some(new_heap.to_shared());
});
}
}
if let Some(ref stack_shared) = context.stack {
if flags.contains(CLONE_STACK) {
stack_opt = Some(stack_shared.clone());
} else {
stack_shared.with(|stack| {
let mut new_stack = context::memory::Memory::new(
VirtualAddress::new(crate::USER_TMP_STACK_OFFSET),
stack.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
false
);
unsafe {
intrinsics::copy(stack.start_address().get() as *const u8,
new_stack.start_address().get() as *mut u8,
stack.size());
}
new_stack.remap(stack.flags());
stack_opt = Some(new_stack.to_shared());
});
}
}
if let Some(ref sigstack) = context.sigstack {
let mut new_sigstack = context::memory::Memory::new(
VirtualAddress::new(crate::USER_TMP_SIGSTACK_OFFSET),
sigstack.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
false
);
unsafe {
intrinsics::copy(sigstack.start_address().get() as *const u8,
new_sigstack.start_address().get() as *mut u8,
sigstack.size());
}
new_sigstack.remap(sigstack.flags());
sigstack_opt = Some(new_sigstack);
}
if let Some(ref tls) = context.tls {
let mut new_tls = context::memory::Tls {
master: tls.master,
file_size: tls.file_size,
mem: context::memory::Memory::new(
VirtualAddress::new(crate::USER_TMP_TLS_OFFSET),
tls.mem.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
true
),
offset: tls.offset,
};
if flags.contains(CLONE_VM) {
unsafe {
new_tls.load();
}
} else {
unsafe {
intrinsics::copy(tls.mem.start_address().get() as *const u8,
new_tls.mem.start_address().get() as *mut u8,
tls.mem.size());
}
}
new_tls.mem.remap(tls.mem.flags());
tls_opt = Some(new_tls);
}
if flags.contains(CLONE_VM) {
grants = Arc::clone(&context.grants);
} else {
let mut grants_vec = Vec::new();
for grant in context.grants.lock().iter() {
let start = VirtualAddress::new(grant.start_address().get() + crate::USER_TMP_GRANT_OFFSET - crate::USER_GRANT_OFFSET);
grants_vec.push(grant.secret_clone(start));
}
grants = Arc::new(Mutex::new(grants_vec));
}
if flags.contains(CLONE_VM) {
name = Arc::clone(&context.name);
} else {
name = Arc::new(Mutex::new(context.name.lock().clone()));
}
if flags.contains(CLONE_FS) {
cwd = Arc::clone(&context.cwd);
} else {
cwd = Arc::new(Mutex::new(context.cwd.lock().clone()));
}
if flags.contains(CLONE_FILES) {
files = Arc::clone(&context.files);
} else {
files = Arc::new(Mutex::new(context.files.lock().clone()));
}
if flags.contains(CLONE_SIGHAND) {
actions = Arc::clone(&context.actions);
} else {
actions = Arc::new(Mutex::new(context.actions.lock().clone()));
}
}
// If not cloning files, dup to get a new number from scheme
// This has to be done outside the context lock to prevent deadlocks
if !flags.contains(CLONE_FILES) {
for (_fd, file_opt) in files.lock().iter_mut().enumerate() {
let new_file_opt = if let Some(ref file) = *file_opt {
Some(FileDescriptor {
description: Arc::clone(&file.description),
cloexec: file.cloexec,
})
} else {
None
};
*file_opt = new_file_opt;
}
}
// If not cloning virtual memory, use fmap to re-obtain every grant where possible
if !flags.contains(CLONE_VM) {
let mut i = 0;
while i < grants.lock().len() {
let remove = false;
if let Some(grant) = grants.lock().get(i) {
if let Some(ref _desc) = grant.desc_opt {
println!("todo: clone grant {} using fmap: {:?}", i, grant);
}
}
if remove {
grants.lock().remove(i);
} else {
i += 1;
}
}
}
// If vfork, block the current process
// This has to be done after the operations that may require context switches
if flags.contains(CLONE_VFORK) {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
context.block("vfork");
vfork = true;
} else {
vfork = false;
}
// Set up new process
{
let mut contexts = context::contexts_mut();
let context_lock = contexts.new_context()?;
let mut context = context_lock.write();
pid = context.id;
context.pgid = pgid;
context.ppid = ppid;
context.ruid = ruid;
context.rgid = rgid;
context.rns = rns;
context.euid = euid;
context.egid = egid;
context.ens = ens;
context.sigmask = sigmask;
context.umask = umask;
//TODO: Better CPU balancing
if let Some(cpu_id) = cpu_id_opt {
context.cpu_id = Some(cpu_id);
} else {
context.cpu_id = Some(pid.into() % crate::cpu_count());
}
context.status = context::Status::Runnable;
context.vfork = vfork;
context.arch = arch;
let mut active_table = unsafe { ActivePageTable::new() };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET)));
let mut new_table = {
let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
context.arch.set_page_table(unsafe { new_table.address() });
// Copy kernel image mapping
{
let frame = active_table.p4()[crate::KERNEL_PML4].pointed_frame().expect("kernel image not mapped");
let flags = active_table.p4()[crate::KERNEL_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[crate::KERNEL_PML4].set(frame, flags);
});
}
// Copy kernel heap mapping
{
let frame = active_table.p4()[crate::KERNEL_HEAP_PML4].pointed_frame().expect("kernel heap not mapped");
let flags = active_table.p4()[crate::KERNEL_HEAP_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[crate::KERNEL_HEAP_PML4].set(frame, flags);
});
}
if let Some(fx) = kfx_opt.take() {
context.arch.set_fx(fx.as_ptr() as usize);
context.kfx = Some(fx);
}
// Set kernel stack
if let Some(stack) = kstack_opt.take() {
context.arch.set_stack(stack.as_ptr() as usize + offset);
context.kstack = Some(stack);
}
// TODO: Clone ksig?
// Setup image, heap, and grants
if flags.contains(CLONE_VM) {
// Copy user image mapping, if found
if ! image.is_empty() {
let frame = active_table.p4()[crate::USER_PML4].pointed_frame().expect("user image not mapped");
let flags = active_table.p4()[crate::USER_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[crate::USER_PML4].set(frame, flags);
});
}
context.image = image;
// Copy user heap mapping, if found
if let Some(heap_shared) = heap_opt {
let frame = active_table.p4()[crate::USER_HEAP_PML4].pointed_frame().expect("user heap not mapped");
let flags = active_table.p4()[crate::USER_HEAP_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[crate::USER_HEAP_PML4].set(frame, flags);
});
context.heap = Some(heap_shared);
}
// Copy grant mapping
if ! grants.lock().is_empty() {
let frame = active_table.p4()[crate::USER_GRANT_PML4].pointed_frame().expect("user grants not mapped");
let flags = active_table.p4()[crate::USER_GRANT_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[crate::USER_GRANT_PML4].set(frame, flags);
});
}
context.grants = grants;
} else {
// Copy percpu mapping
for cpu_id in 0..crate::cpu_count() {
extern {
// The starting byte of the thread data segment
static mut __tdata_start: u8;
// The ending byte of the thread BSS segment
static mut __tbss_end: u8;
}
let size = unsafe { & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize };
let start = crate::KERNEL_PERCPU_OFFSET + crate::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = active_table.translate_page(page).expect("kernel percpu not mapped");
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
let result = mapper.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE);
// Ignore result due to operating on inactive table
unsafe { result.ignore(); }
});
}
}
// Move copy of image
for memory_shared in image.iter_mut() {
memory_shared.with(|memory| {
let start = VirtualAddress::new(memory.start_address().get() - crate::USER_TMP_OFFSET + crate::USER_OFFSET);
memory.move_to(start, &mut new_table, &mut temporary_page);
});
}
context.image = image;
// Move copy of heap
if let Some(heap_shared) = heap_opt {
heap_shared.with(|heap| {
heap.move_to(VirtualAddress::new(crate::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page);
});
context.heap = Some(heap_shared);
}
// Move grants
for grant in grants.lock().iter_mut() {
let start = VirtualAddress::new(grant.start_address().get() + crate::USER_GRANT_OFFSET - crate::USER_TMP_GRANT_OFFSET);
grant.move_to(start, &mut new_table, &mut temporary_page);
}
context.grants = grants;
}
// Setup user stack
if let Some(stack_shared) = stack_opt {
if flags.contains(CLONE_STACK) {
let frame = active_table.p4()[crate::USER_STACK_PML4].pointed_frame().expect("user stack not mapped");
let flags = active_table.p4()[crate::USER_STACK_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[crate::USER_STACK_PML4].set(frame, flags);
});
} else {
stack_shared.with(|stack| {
stack.move_to(VirtualAddress::new(crate::USER_STACK_OFFSET), &mut new_table, &mut temporary_page);
});
}
context.stack = Some(stack_shared);
}
// Setup user sigstack
if let Some(mut sigstack) = sigstack_opt {
sigstack.move_to(VirtualAddress::new(crate::USER_SIGSTACK_OFFSET), &mut new_table, &mut temporary_page);
context.sigstack = Some(sigstack);
}
// Set up TCB
let tcb_addr = crate::USER_TCB_OFFSET + context.id.into() * PAGE_SIZE;
let mut tcb = context::memory::Memory::new(
VirtualAddress::new(tcb_addr),
PAGE_SIZE,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
);
// Setup user TLS
if let Some(mut tls) = tls_opt {
// Copy TLS mapping
{
let frame = active_table.p4()[crate::USER_TLS_PML4].pointed_frame().expect("user tls not mapped");
let flags = active_table.p4()[crate::USER_TLS_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[crate::USER_TLS_PML4].set(frame, flags);
});
}
// TODO: Make sure size is not greater than USER_TLS_SIZE
let tls_addr = crate::USER_TLS_OFFSET + context.id.into() * crate::USER_TLS_SIZE;
//println!("{}: Copy TLS: address 0x{:x}, size 0x{:x}", context.id.into(), tls_addr, tls.mem.size());
tls.mem.move_to(VirtualAddress::new(tls_addr), &mut new_table, &mut temporary_page);
unsafe {
*(tcb_addr as *mut usize) = tls.mem.start_address().get() + tls.mem.size();
}
context.tls = Some(tls);
} else {
//println!("{}: Copy TCB", context.id.into());
let parent_tcb_addr = crate::USER_TCB_OFFSET + ppid.into() * PAGE_SIZE;
unsafe {
intrinsics::copy(parent_tcb_addr as *const u8,
tcb_addr as *mut u8,
tcb.size());
}
}
tcb.move_to(VirtualAddress::new(tcb_addr), &mut new_table, &mut temporary_page);
context.image.push(tcb.to_shared());
context.name = name;
context.cwd = cwd;
context.files = files;
context.actions = actions;
}
}
if ptrace::send_event(ptrace_event!(PTRACE_EVENT_CLONE, pid.into())).is_some() {
// Freeze the clone, allow ptrace to put breakpoints
// to it before it starts
let contexts = context::contexts();
let context = contexts.get(pid).expect("Newly created context doesn't exist??");
let mut context = context.write();
context.ptrace_stop = true;
}
// Race to pick up the new process!
ipi(IpiKind::Switch, IpiTarget::Other);
let _ = unsafe { context::switch() };
Ok(pid)
}
fn empty(context: &mut context::Context, reaping: bool) {
if reaping {
// Memory should already be unmapped
assert!(context.image.is_empty());
assert!(context.heap.is_none());
assert!(context.stack.is_none());
assert!(context.sigstack.is_none());
assert!(context.tls.is_none());
} else {
// Unmap previous image, heap, grants, stack, and tls
context.image.clear();
drop(context.heap.take());
drop(context.stack.take());
drop(context.sigstack.take());
drop(context.tls.take());
}
let mut grants = context.grants.lock();
if Arc::strong_count(&context.grants) == 1 {
for grant in grants.drain(..) {
if reaping {
println!("{}: {}: Grant should not exist: {:?}", context.id.into(), unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) }, grant);
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET)));
grant.unmap_inactive(&mut new_table, &mut temporary_page);
} else {
grant.unmap();
}
}
}
}
struct ExecFile(FileHandle);
impl Drop for ExecFile {
fn drop(&mut self) {
let _ = syscall::close(self.0);
}
}
fn fexec_noreturn(
setuid: Option<u32>,
setgid: Option<u32>,
name: Box<[u8]>,
data: Box<[u8]>,
args: Box<[Box<[u8]>]>,
vars: Box<[Box<[u8]>]>,
auxv: Box<[usize]>,
) -> ! {
let entry;
let singlestep;
let mut sp = crate::USER_STACK_OFFSET + crate::USER_STACK_SIZE - 256;
{
let (vfork, ppid, files) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH)).expect("exec_noreturn pid not found");
let mut context = context_lock.write();
singlestep = unsafe {
ptrace::regs_for(&context).map(|s| s.is_singlestep()).unwrap_or(false)
};
context.name = Arc::new(Mutex::new(name));
empty(&mut context, false);
if let Some(uid) = setuid {
context.euid = uid;
}
if let Some(gid) = setgid {
context.egid = gid;
}
// Map and copy new segments
let mut tls_opt = None;
{
let elf = elf::Elf::from(&data).unwrap();
entry = elf.entry();
// Always map TCB
let tcb_addr = crate::USER_TCB_OFFSET + context.id.into() * PAGE_SIZE;
let tcb_mem = context::memory::Memory::new(
VirtualAddress::new(tcb_addr),
PAGE_SIZE,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
);
for segment in elf.segments() {
match segment.p_type {
program_header::PT_LOAD => {
let voff = segment.p_vaddr as usize % PAGE_SIZE;
let vaddr = segment.p_vaddr as usize - voff;
let mut memory = context::memory::Memory::new(
VirtualAddress::new(vaddr),
segment.p_memsz as usize + voff,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
true
);
unsafe {
// Copy file data
intrinsics::copy((elf.data.as_ptr() as usize + segment.p_offset as usize) as *const u8,
segment.p_vaddr as *mut u8,
segment.p_filesz as usize);
}
let mut flags = EntryFlags::NO_EXECUTE | EntryFlags::USER_ACCESSIBLE;
if segment.p_flags & program_header::PF_R == program_header::PF_R {
flags.insert(EntryFlags::PRESENT);
}
// W ^ X. If it is executable, do not allow it to be writable, even if requested
if segment.p_flags & program_header::PF_X == program_header::PF_X {
flags.remove(EntryFlags::NO_EXECUTE);
} else if segment.p_flags & program_header::PF_W == program_header::PF_W {
flags.insert(EntryFlags::WRITABLE);
}
memory.remap(flags);
context.image.push(memory.to_shared());
},
program_header::PT_TLS => {
let aligned_size = if segment.p_align > 0 {
((segment.p_memsz + (segment.p_align - 1))/segment.p_align) * segment.p_align
} else {
segment.p_memsz
} as usize;
let rounded_size = ((aligned_size + PAGE_SIZE - 1)/PAGE_SIZE) * PAGE_SIZE;
let rounded_offset = rounded_size - aligned_size;
// TODO: Make sure size is not greater than USER_TLS_SIZE
let tls_addr = crate::USER_TLS_OFFSET + context.id.into() * crate::USER_TLS_SIZE;
let tls = context::memory::Tls {
master: VirtualAddress::new(segment.p_vaddr as usize),
file_size: segment.p_filesz as usize,
mem: context::memory::Memory::new(
VirtualAddress::new(tls_addr),
rounded_size as usize,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
),
offset: rounded_offset as usize,
};
unsafe {
*(tcb_addr as *mut usize) = tls.mem.start_address().get() + tls.mem.size();
}
tls_opt = Some(tls);
},
_ => (),
}
}
context.image.push(tcb_mem.to_shared());
}
// Data no longer required, can deallocate
drop(data);
// Map heap
context.heap = Some(context::memory::Memory::new(
VirtualAddress::new(crate::USER_HEAP_OFFSET),
0,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
).to_shared());
// Map stack
context.stack = Some(context::memory::Memory::new(
VirtualAddress::new(crate::USER_STACK_OFFSET),
crate::USER_STACK_SIZE,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
).to_shared());
// Map stack
context.sigstack = Some(context::memory::Memory::new(
VirtualAddress::new(crate::USER_SIGSTACK_OFFSET),
crate::USER_SIGSTACK_SIZE,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
));
// Map TLS
if let Some(mut tls) = tls_opt {
unsafe {
tls.load();
}
context.tls = Some(tls);
}
let mut push = |arg| {
sp -= mem::size_of::<usize>();
unsafe { *(sp as *mut usize) = arg; }
};
// Push auxiliary vector
push(AT_NULL);
for &arg in auxv.iter().rev() {
push(arg);
}
drop(auxv); // no longer required
let mut arg_size = 0;
// Push environment variables and arguments
for iter in &[&vars, &args] {
// Push null-terminator
push(0);
// Push pointer to content
for arg in iter.iter().rev() {
push(crate::USER_ARG_OFFSET + arg_size);
arg_size += arg.len() + 1;
}
}
// For some reason, Linux pushes the argument count here (in
// addition to being null-terminated), but not the environment
// variable count.
// TODO: Push more counts? Less? Stop having null-termination?
push(args.len());
// Write environment and argument pointers to USER_ARG_OFFSET
if arg_size > 0 {
let mut memory = context::memory::Memory::new(
VirtualAddress::new(crate::USER_ARG_OFFSET),
arg_size,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
true
);
let mut arg_offset = 0;
for arg in vars.iter().rev().chain(args.iter().rev()) {
unsafe {
intrinsics::copy(arg.as_ptr(),
(crate::USER_ARG_OFFSET + arg_offset) as *mut u8,
arg.len());
}
arg_offset += arg.len();
unsafe {
*((crate::USER_ARG_OFFSET + arg_offset) as *mut u8) = 0;
}
arg_offset += 1;
}
memory.remap(EntryFlags::NO_EXECUTE | EntryFlags::USER_ACCESSIBLE);
context.image.push(memory.to_shared());
}
// Args and vars no longer required, can deallocate
drop(args);
drop(vars);
context.actions = Arc::new(Mutex::new(vec![(
SigAction {
sa_handler: unsafe { mem::transmute(SIG_DFL) },
sa_mask: [0; 2],
sa_flags: SigActionFlags::empty(),
},
0
); 128]));
let vfork = context.vfork;
context.vfork = false;
let files = Arc::clone(&context.files);
(vfork, context.ppid, files)
};
for (_fd, file_opt) in files.lock().iter_mut().enumerate() {
let mut cloexec = false;
if let Some(ref file) = *file_opt {
if file.cloexec {
cloexec = true;
}
}
if cloexec {
let _ = file_opt.take().unwrap().close();
}
}
if vfork {
let contexts = context::contexts();
if let Some(context_lock) = contexts.get(ppid) {
let mut context = context_lock.write();
if ! context.unblock() {
println!("{} not blocked for exec vfork unblock", ppid.into());
}
} else {
println!("{} not found for exec vfork unblock", ppid.into());
}
}
}
// Go to usermode
unsafe { usermode(entry, sp, 0, singlestep) }
}
pub fn fexec_kernel(fd: FileHandle, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]>]>, name_override_opt: Option<Box<[u8]>>, auxv: Option<Vec<usize>>) -> Result<usize> {
let (uid, gid) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.euid, context.egid)
};
let mut stat: Stat;
let mut name: Vec<u8>;
let mut data: Vec<u8>;
{
let file = ExecFile(fd);
stat = Stat::default();
syscall::file_op_mut_slice(syscall::number::SYS_FSTAT, file.0, &mut stat)?;
let mut perm = stat.st_mode & 0o7;
if stat.st_uid == uid {
perm |= (stat.st_mode >> 6) & 0o7;
}
if stat.st_gid == gid {
perm |= (stat.st_mode >> 3) & 0o7;
}
if uid == 0 {
perm |= 0o7;
}
if perm & 0o1 != 0o1 {
return Err(Error::new(EACCES));
}
if let Some(name_override) = name_override_opt {
name = Vec::from(name_override);
} else {
name = vec![0; 4096];
let len = syscall::file_op_mut_slice(syscall::number::SYS_FPATH, file.0, &mut name)?;
name.truncate(len);
}
//TODO: Only read elf header, not entire file. Then read required segments
data = vec![0; stat.st_size as usize];
syscall::file_op_mut_slice(syscall::number::SYS_READ, file.0, &mut data)?;
drop(file);
}
// Set UID and GID are determined after resolving any hashbangs
let setuid = if stat.st_mode & syscall::flag::MODE_SETUID == syscall::flag::MODE_SETUID {
Some(stat.st_uid)
} else {
None
};
let setgid = if stat.st_mode & syscall::flag::MODE_SETGID == syscall::flag::MODE_SETGID {
Some(stat.st_gid)
} else {
None
};
// The argument list is limited to avoid using too much userspace stack
// This check is done last to allow all hashbangs to be resolved
//
// This should be based on the size of the userspace stack, divided
// by the cost of each argument, which should be usize * 2, with
// one additional argument added to represent the total size of the
// argument pointer array and potential padding
//
// A limit of 4095 would mean a stack of (4095 + 1) * 8 * 2 = 65536, or 64KB
if (args.len() + vars.len()) > 4095 {
return Err(Error::new(E2BIG));
}
let elf = match elf::Elf::from(&data) {
Ok(elf) => elf,
Err(err) => {
println!("fexec: failed to execute {}: {}", fd.into(), err);
return Err(Error::new(ENOEXEC));
}
};
// `fexec_kernel` can recurse if an interpreter is found. We get the
// auxiliery vector from the first invocation, which is passed via an
// argument, or if this is the first one we create it.
let auxv = if let Some(auxv) = auxv {
auxv
} else {
let mut auxv = Vec::with_capacity(3);
auxv.push(AT_ENTRY);
auxv.push(elf.entry());
auxv.push(AT_PHDR);
auxv.push(elf.program_headers());
auxv
};
// We check the validity of all loadable sections here
for segment in elf.segments() {
match segment.p_type {
program_header::PT_INTERP => {
//TODO: length restraint, parse interp earlier
let mut interp = vec![0; segment.p_memsz as usize];
unsafe {
intrinsics::copy((elf.data.as_ptr() as usize + segment.p_offset as usize) as *const u8,
interp.as_mut_ptr(),
segment.p_filesz as usize);
}
let mut i = 0;
while i < interp.len() {
if interp[i] == 0 {
break;
}
i += 1;
}
interp.truncate(i);
println!(" interpreter: {:?}", ::core::str::from_utf8(&interp));
let interp_fd = super::fs::open(&interp, super::flag::O_RDONLY | super::flag::O_CLOEXEC)?;
let mut args_vec = Vec::from(args);
//TODO: pass file handle in auxv
let name_override = name.into_boxed_slice();
args_vec[0] = name_override.clone();
// Drop variables, since fexec_kernel probably won't return
drop(elf);
drop(interp);
return fexec_kernel(
interp_fd,
args_vec.into_boxed_slice(),
vars,
Some(name_override),
Some(auxv),
);
},
program_header::PT_LOAD => {
let voff = segment.p_vaddr as usize % PAGE_SIZE;
let vaddr = segment.p_vaddr as usize - voff;
// Due to the Userspace and kernel TLS bases being located right above 2GB,
// limit any loadable sections to lower than that. Eventually we will need
// to replace this with a more intelligent TLS address
if vaddr >= 0x8000_0000 {
println!("exec: invalid section address {:X}", segment.p_vaddr);
return Err(Error::new(ENOEXEC));
}
},
_ => (),
}
}
// This is the point of no return, quite literaly. Any checks for validity need
// to be done before, and appropriate errors returned. Otherwise, we have nothing
// to return to.
fexec_noreturn(setuid, setgid, name.into_boxed_slice(), data.into_boxed_slice(), args, vars, auxv.into_boxed_slice());
}
pub fn fexec(fd: FileHandle, arg_ptrs: &[[usize; 2]], var_ptrs: &[[usize; 2]]) -> Result<usize> {
let mut args = Vec::new();
for arg_ptr in arg_ptrs {
let arg = validate_slice(arg_ptr[0] as *const u8, arg_ptr[1])?;
// Argument must be moved into kernel space before exec unmaps all memory
args.push(arg.to_vec().into_boxed_slice());
}
let mut vars = Vec::new();
for var_ptr in var_ptrs {
let var = validate_slice(var_ptr[0] as *const u8, var_ptr[1])?;
// Argument must be moved into kernel space before exec unmaps all memory
vars.push(var.to_vec().into_boxed_slice());
}
// Neither arg_ptrs nor var_ptrs should be used after this point, the kernel
// now has owned copies in args and vars
fexec_kernel(fd, args.into_boxed_slice(), vars.into_boxed_slice(), None, None)
}
pub fn exit(status: usize) -> ! {
ptrace::breakpoint_callback(PTRACE_STOP_EXIT, Some(ptrace_event!(PTRACE_STOP_EXIT, status)));
{
let context_lock = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH)).expect("exit failed to find context");
Arc::clone(&context_lock)
};
let mut close_files = Vec::new();
let pid = {
let mut context = context_lock.write();
{
let mut lock = context.files.lock();
if Arc::strong_count(&context.files) == 1 {
mem::swap(lock.deref_mut(), &mut close_files);
}
}
context.files = Arc::new(Mutex::new(Vec::new()));
context.id
};
// Files must be closed while context is valid so that messages can be passed
for (_fd, file_opt) in close_files.drain(..).enumerate() {
if let Some(file) = file_opt {
let _ = file.close();
}
}
// PGID and PPID must be grabbed after close, as context switches could change PGID or PPID if parent exits
let (pgid, ppid) = {
let context = context_lock.read();
(context.pgid, context.ppid)
};
// Transfer child processes to parent
{
let contexts = context::contexts();
for (_id, context_lock) in contexts.iter() {
let mut context = context_lock.write();
if context.ppid == pid {
context.ppid = ppid;
context.vfork = false;
}
}
}
let (vfork, children) = {
let mut context = context_lock.write();
empty(&mut context, false);
let vfork = context.vfork;
context.vfork = false;
context.status = context::Status::Exited(status);
let children = context.waitpid.receive_all();
(vfork, children)
};
{
let contexts = context::contexts();
if let Some(parent_lock) = contexts.get(ppid) {
let waitpid = {
let mut parent = parent_lock.write();
if vfork && ! parent.unblock() {
println!("{}: {} not blocked for exit vfork unblock", pid.into(), ppid.into());
}
Arc::clone(&parent.waitpid)
};
for (c_pid, c_status) in children {
waitpid.send(c_pid, c_status);
}
waitpid.send(WaitpidKey {
pid: Some(pid),
pgid: Some(pgid)
}, (pid, status));
} else {
println!("{}: {} not found for exit vfork unblock", pid.into(), ppid.into());
}
}
// Alert any tracers waiting of this process
ptrace::close_tracee(pid);
if pid == ContextId::from(1) {
println!("Main kernel thread exited with status {:X}", status);
extern {
fn kreset() -> !;
fn kstop() -> !;
}
if status == SIGTERM {
unsafe { kreset(); }
} else {
unsafe { kstop(); }
}
}
}
let _ = unsafe { context::switch() };
unreachable!();
}
pub fn getpid() -> Result<ContextId> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
Ok(context.id)
}
pub fn getpgid(pid: ContextId) -> Result<ContextId> {
let contexts = context::contexts();
let context_lock = if pid.into() == 0 {
contexts.current().ok_or(Error::new(ESRCH))?
} else {
contexts.get(pid).ok_or(Error::new(ESRCH))?
};
let context = context_lock.read();
Ok(context.pgid)
}
pub fn getppid() -> Result<ContextId> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
Ok(context.ppid)
}
pub fn kill(pid: ContextId, sig: usize) -> Result<usize> {
let (ruid, euid, current_pgid) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.ruid, context.euid, context.pgid)
};
if sig < 0x7F {
let mut found = 0;
let mut sent = 0;
{
let contexts = context::contexts();
let send = |context: &mut context::Context| -> bool {
if euid == 0
|| euid == context.ruid
|| ruid == context.ruid
{
// If sig = 0, test that process exists and can be
// signalled, but don't send any signal.
if sig != 0 {
//TODO: sigprocmask
context.pending.push_back(sig as u8);
// Convert stopped processes to blocked if sending SIGCONT
if sig == SIGCONT {
if let context::Status::Stopped(_sig) = context.status {
context.status = context::Status::Blocked;
}
}
}
true
} else {
false
}
};
if pid.into() as isize > 0 {
// Send to a single process
if let Some(context_lock) = contexts.get(pid) {
let mut context = context_lock.write();
found += 1;
if send(&mut context) {
sent += 1;
}
}
} else if pid.into() as isize == -1 {
// Send to every process with permission, except for init
for (_id, context_lock) in contexts.iter() {
let mut context = context_lock.write();
if context.id.into() > 2 {
found += 1;
if send(&mut context) {
sent += 1;
}
}
}
} else {
let pgid = if pid.into() == 0 {
current_pgid
} else {
ContextId::from(-(pid.into() as isize) as usize)
};
// Send to every process in the process group whose ID
for (_id, context_lock) in contexts.iter() {
let mut context = context_lock.write();
if context.pgid == pgid {
found += 1;
if send(&mut context) {
sent += 1;
}
}
}
}
}
if found == 0 {
Err(Error::new(ESRCH))
} else if sent == 0 {
Err(Error::new(EPERM))
} else {
// Switch to ensure delivery to self
unsafe { context::switch(); }
Ok(0)
}
} else {
Err(Error::new(EINVAL))
}
}
pub fn mprotect(address: usize, size: usize, flags: MapFlags) -> Result<usize> {
// println!("mprotect {:#X}, {}, {:#X}", address, size, flags);
let end_offset = size.checked_sub(1).ok_or(Error::new(EFAULT))?;
let end_address = address.checked_add(end_offset).ok_or(Error::new(EFAULT))?;
let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(VirtualAddress::new(address));
let end_page = Page::containing_address(VirtualAddress::new(end_address));
for page in Page::range_inclusive(start_page, end_page) {
// Check if the page is actually mapped before trying to change the flags.
// FIXME can other processes change if a page is mapped beneath our feet?
let mut page_flags = if let Some(page_flags) = active_table.translate_page_flags(page) {
page_flags
} else {
flush_all.flush(&mut active_table);
return Err(Error::new(EFAULT));
};
if !page_flags.contains(EntryFlags::PRESENT) {
flush_all.flush(&mut active_table);
return Err(Error::new(EFAULT));
}
if flags.contains(PROT_EXEC) {
page_flags.remove(EntryFlags::NO_EXECUTE);
} else {
page_flags.insert(EntryFlags::NO_EXECUTE);
}
if flags.contains(PROT_WRITE) {
//TODO: Not allowing gain of write privileges
} else {
page_flags.remove(EntryFlags::WRITABLE);
}
if flags.contains(PROT_READ) {
//TODO: No flags for readable pages
} else {
//TODO: No flags for readable pages
}
let flush = active_table.remap(page, page_flags);
flush_all.consume(flush);
}
flush_all.flush(&mut active_table);
Ok(0)
}
pub fn setpgid(pid: ContextId, pgid: ContextId) -> Result<usize> {
let contexts = context::contexts();
let current_pid = {
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
context.id
};
let context_lock = if pid.into() == 0 {
contexts.current().ok_or(Error::new(ESRCH))?
} else {
contexts.get(pid).ok_or(Error::new(ESRCH))?
};
let mut context = context_lock.write();
if context.id == current_pid || context.ppid == current_pid {
if pgid.into() == 0 {
context.pgid = context.id;
} else {
context.pgid = pgid;
}
Ok(0)
} else {
Err(Error::new(ESRCH))
}
}
pub fn sigaction(sig: usize, act_opt: Option<&SigAction>, oldact_opt: Option<&mut SigAction>, restorer: usize) -> Result<usize> {
if sig > 0 && sig <= 0x7F {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let mut actions = context.actions.lock();
if let Some(oldact) = oldact_opt {
*oldact = actions[sig].0;
}
if let Some(act) = act_opt {
actions[sig] = (*act, restorer);
}
Ok(0)
} else {
Err(Error::new(EINVAL))
}
}
pub fn sigprocmask(how: usize, mask_opt: Option<&[u64; 2]>, oldmask_opt: Option<&mut [u64; 2]>) -> Result<usize> {
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
if let Some(oldmask) = oldmask_opt {
*oldmask = context.sigmask;
}
if let Some(mask) = mask_opt {
match how {
SIG_BLOCK => {
context.sigmask[0] |= mask[0];
context.sigmask[1] |= mask[1];
},
SIG_UNBLOCK => {
context.sigmask[0] &= !mask[0];
context.sigmask[1] &= !mask[1];
},
SIG_SETMASK => {
context.sigmask[0] = mask[0];
context.sigmask[1] = mask[1];
},
_ => {
return Err(Error::new(EINVAL));
}
}
}
}
Ok(0)
}
pub fn sigreturn() -> Result<usize> {
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
context.ksig_restore = true;
context.block("sigreturn");
}
let _ = unsafe { context::switch() };
unreachable!();
}
pub fn umask(mask: usize) -> Result<usize> {
let previous;
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
previous = context.umask;
context.umask = mask;
}
Ok(previous)
}
fn reap(pid: ContextId) -> Result<ContextId> {
// Spin until not running
let mut running = true;
while running {
{
let contexts = context::contexts();
let context_lock = contexts.get(pid).ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
running = context.running;
}
interrupt::pause();
}
let mut contexts = context::contexts_mut();
let context_lock = contexts.remove(pid).ok_or(Error::new(ESRCH))?;
{
let mut context = context_lock.write();
empty(&mut context, true);
}
drop(context_lock);
Ok(pid)
}
pub fn waitpid(pid: ContextId, status_ptr: usize, flags: WaitFlags) -> Result<ContextId> {
let (ppid, waitpid) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.id, Arc::clone(&context.waitpid))
};
let mut tmp = [0];
let status_slice = if status_ptr != 0 {
validate_slice_mut(status_ptr as *mut usize, 1)?
} else {
&mut tmp
};
let mut grim_reaper = |w_pid: ContextId, status: usize| -> Option<Result<ContextId>> {
if wifcontinued(status) {
if flags & WCONTINUED == WCONTINUED {
status_slice[0] = status;
Some(Ok(w_pid))
} else {
None
}
} else if wifstopped(status) {
if flags & WUNTRACED == WUNTRACED {
status_slice[0] = status;
Some(Ok(w_pid))
} else {
None
}
} else {
status_slice[0] = status;
Some(reap(w_pid))
}
};
loop {
let res_opt = if pid.into() == 0 {
// Check for existence of child
{
let mut found = false;
let contexts = context::contexts();
for (_id, context_lock) in contexts.iter() {
let context = context_lock.read();
if context.ppid == ppid {
found = true;
break;
}
}
if ! found {
return Err(Error::new(ECHILD));
}
}
if flags & WNOHANG == WNOHANG {
if let Some((_wid, (w_pid, status))) = waitpid.receive_any_nonblock() {
grim_reaper(w_pid, status)
} else {
Some(Ok(ContextId::from(0)))
}
} else {
let (_wid, (w_pid, status)) = waitpid.receive_any("waitpid any");
grim_reaper(w_pid, status)
}
} else if (pid.into() as isize) < 0 {
let pgid = ContextId::from(-(pid.into() as isize) as usize);
// Check for existence of child in process group PGID
{
let mut found = false;
let contexts = context::contexts();
for (_id, context_lock) in contexts.iter() {
let context = context_lock.read();
if context.pgid == pgid {
found = true;
break;
}
}
if ! found {
return Err(Error::new(ECHILD));
}
}
if flags & WNOHANG == WNOHANG {
if let Some((w_pid, status)) = waitpid.receive_nonblock(&WaitpidKey {
pid: None,
pgid: Some(pgid)
}) {
grim_reaper(w_pid, status)
} else {
Some(Ok(ContextId::from(0)))
}
} else {
let (w_pid, status) = waitpid.receive(&WaitpidKey {
pid: None,
pgid: Some(pgid)
}, "waitpid pgid");
grim_reaper(w_pid, status)
}
} else {
let hack_status = {
let contexts = context::contexts();
let context_lock = contexts.get(pid).ok_or(Error::new(ECHILD))?;
let mut context = context_lock.write();
if context.ppid != ppid {
println!("TODO: Hack for rustc - changing ppid of {} from {} to {}", context.id.into(), context.ppid.into(), ppid.into());
context.ppid = ppid;
//return Err(Error::new(ECHILD));
Some(context.status)
} else {
None
}
};
if let Some(context::Status::Exited(status)) = hack_status {
let _ = waitpid.receive_nonblock(&WaitpidKey {
pid: Some(pid),
pgid: None
});
grim_reaper(pid, status)
} else if flags & WNOHANG == WNOHANG {
if let Some((w_pid, status)) = waitpid.receive_nonblock(&WaitpidKey {
pid: Some(pid),
pgid: None
}) {
grim_reaper(w_pid, status)
} else {
Some(Ok(ContextId::from(0)))
}
} else {
let (w_pid, status) = waitpid.receive(&WaitpidKey {
pid: Some(pid),
pgid: None
}, "waitpid pid");
grim_reaper(w_pid, status)
}
};
if let Some(res) = res_opt {
return res;
}
}
}
|
//
use std::thread;
use std::time::Duration;
use std::rc::Rc;
use std::cell::RefCell;
use std::io::{self, Read, Write, Stdout};
//
extern crate termion;
use self::termion::screen::{AlternateScreen, ToMainScreen};
use self::termion::input::MouseTerminal;
use self::termion::raw::IntoRawMode;
use self::termion::terminal_size;
use self::termion::async_stdin;
use self::termion::event::parse_event;
//
use core::view::{View, build_screen_layout, screen_putstr};
use core::screen::Screen;
use core::event::InputEvent;
use core::event::Key;
use core::editor::Editor;
//
struct UiState {
keys: Vec<InputEvent>,
quit: bool,
status: String,
display_status: bool,
display_view: bool,
vid: u64,
nb_view: usize,
last_offset: u64,
mark_offset: u64,
input_wait_time_ms: u64,
}
impl UiState {
fn new() -> UiState {
UiState {
keys: Vec::new(),
quit: false,
status: String::new(),
display_status: !true,
display_view: true,
vid: 0,
nb_view: 0,
last_offset: 0,
mark_offset: 0,
input_wait_time_ms: 20,
}
}
}
pub fn main_loop(mut editor: &mut Editor) {
let mut ui_state = UiState::new();
let (width, height) = terminal_size().unwrap();
setup_views(editor, width as usize, height as usize);
//
let stdout = MouseTerminal::from(io::stdout().into_raw_mode().unwrap());
let mut stdout = AlternateScreen::from(stdout);
write!(stdout, "{}{}", termion::cursor::Hide, termion::clear::All).unwrap();
stdout.flush().unwrap();
let mut stdin = async_stdin().bytes();
while !ui_state.quit {
ui_state.nb_view = editor.view_map.len();
let mut view = editor.view_map.get_mut(&ui_state.vid);
let status_line_y = height;
if ui_state.display_view == true {
draw_view(&mut ui_state,
&mut view.as_mut().unwrap().borrow_mut(),
&mut stdout);
}
if ui_state.display_status == true {
display_status_line(&ui_state,
&mut view.as_mut().unwrap().borrow_mut(),
status_line_y,
width,
&mut stdout);
}
let vec_evt = get_input_event(&mut stdin, &mut ui_state);
for evt in vec_evt {
process_input_events(&mut ui_state, &mut view.as_mut().unwrap().borrow_mut(), evt);
}
}
// quit
// clear, restore cursor
write!(stdout, "{}{}", termion::clear::All, termion::cursor::Show).unwrap();
write!(stdout, "{}{}", ToMainScreen, termion::cursor::Show).unwrap();
stdout.flush().unwrap();
}
fn setup_views(editor: &mut Editor, width: usize, height: usize) {
let mut views = Vec::new();
let mut vid = 0;
for (_, b) in &editor.document_map {
let view = View::new(vid,
0 as u64,
width as usize,
height as usize,
Some(b.clone()));
views.push(view);
vid += 1;
}
for view in views {
&editor
.view_map
.insert(view.id, Rc::new(RefCell::new(view)));
}
}
fn fill_screen(mut ui_state: &mut UiState, mut view: &mut View) {
match view.document {
Some(ref buf) => {
let mut screen = &mut view.screen;
screen.clear();
// render first screen line
if 0 == 1 {
let s = " unlimitED! v0.0.1\n\n";
screen_putstr(&mut screen, &s);
let mut line = screen.get_mut_line(0).unwrap();
for c in 0..line.width {
let mut cpi = line.get_mut_cpi(c).unwrap();
cpi.is_selected = true;
}
}
let data = &buf.borrow().buffer.data;
let len = data.len();
let max_offset = buf.borrow().buffer.size as u64;
view.end_offset =
build_screen_layout(&data[0..len], view.start_offset, max_offset, &mut screen);
ui_state.last_offset = view.end_offset;
// render marks
// brute force for now
for m in view.moving_marks.borrow().iter() {
// TODO: screen.find_line_by_offset(m.offset) -> Option<&mut Line>
if m.offset >= view.start_offset && m.offset <= view.end_offset {
for l in 0..screen.height {
let line = screen.get_mut_line(l).unwrap();
for c in 0..line.nb_cells {
let mut cpi = line.get_mut_cpi(c).unwrap();
if cpi.offset > m.offset {
break;
}
if cpi.offset == m.offset {
cpi.is_selected = true;
ui_state.mark_offset = m.offset;
}
}
}
}
}
}
None => {}
}
}
fn draw_screen(screen: &mut Screen, mut stdout: &mut Stdout) {
write!(stdout, "{}", termion::cursor::Goto(1, 1)).unwrap();
write!(stdout, "{}", termion::style::Reset).unwrap();
for l in 0..screen.height {
terminal_cursor_to(&mut stdout, 1, (l + 1) as u16);
let line = screen.get_line(l).unwrap();
for c in 0..line.width {
let cpi = line.get_cpi(c).unwrap();
if cpi.is_selected == true {
write!(stdout, "{}", termion::style::Invert).unwrap();
}
write!(stdout, "{}", cpi.displayed_cp).unwrap();
write!(stdout, "{}", termion::style::Reset).unwrap();
}
/*
for _ in line.used..line.width {
write!(stdout, " ").unwrap();
}
*/
}
stdout.flush().unwrap();
}
/*
TODO:
1 : be explicit
2 : create editor internal result type Result<>
3 : use idomatic func()? style
*/
fn draw_view(mut ui_state: &mut UiState, mut view: &mut View, mut stdout: &mut Stdout) {
fill_screen(&mut ui_state, &mut view);
draw_screen(&mut view.screen, &mut stdout);
}
fn terminal_clear_current_line(mut stdout: &mut Stdout, line_width: u16) {
for _ in 0..line_width {
write!(stdout, " ").unwrap();
}
}
fn terminal_cursor_to(mut stdout: &mut Stdout, x: u16, y: u16) {
write!(stdout, "{}", termion::cursor::Goto(x, y)).unwrap();
}
fn translate_termion_event(evt: self::termion::event::Event, ui_state: &mut UiState) -> InputEvent {
fn termion_mouse_button_to_u32(mb: self::termion::event::MouseButton) -> u32 {
match mb {
self::termion::event::MouseButton::Left => 0,
self::termion::event::MouseButton::Right => 1,
self::termion::event::MouseButton::Middle => 2,
self::termion::event::MouseButton::WheelUp => 3,
self::termion::event::MouseButton::WheelDown => 4,
}
}
// translate termion event
match evt {
self::termion::event::Event::Key(k) => {
match k {
self::termion::event::Key::Ctrl('c') => {
ui_state.status = format!("Ctrl-c");
return InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('c'),
};
}
self::termion::event::Key::Char('\n') => {
ui_state.status = format!("{}", "<newline>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::UNICODE('\n'),
};
}
self::termion::event::Key::Char(c) => {
ui_state.status = format!("{}", c);
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::UNICODE(c),
};
}
self::termion::event::Key::Alt(c) => {
ui_state.status = format!("Alt-{}", c);
return InputEvent::KeyPress {
ctrl: false,
alt: true,
shift: false,
key: Key::UNICODE(c),
};
}
self::termion::event::Key::Ctrl(c) => {
ui_state.status = format!("Ctrl-{}", c);
return InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE(c),
};
}
self::termion::event::Key::F(1) => {
if ui_state.vid > 0 {
ui_state.vid -= 1;
}
}
self::termion::event::Key::F(2) => {
ui_state.vid = ::std::cmp::min(ui_state.vid + 1, (ui_state.nb_view - 1) as u64);
}
self::termion::event::Key::F(f) => ui_state.status = format!("F{:?}", f),
self::termion::event::Key::Left => {
ui_state.status = format!("<left>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Left,
};
}
self::termion::event::Key::Right => {
ui_state.status = format!("<right>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Right,
};
}
self::termion::event::Key::Up => {
ui_state.status = format!("<up>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Up,
};
}
self::termion::event::Key::Down => {
ui_state.status = format!("<down>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Down,
};
}
self::termion::event::Key::Backspace => {
ui_state.status = format!("<backspc>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::BackSpace,
};
}
self::termion::event::Key::Home => {
ui_state.status = format!("<Home>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Home,
};
}
self::termion::event::Key::End => {
ui_state.status = format!("<End>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::End,
};
}
self::termion::event::Key::PageUp => {
ui_state.status = format!("<PageUp>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::PageUp,
};
}
self::termion::event::Key::PageDown => {
ui_state.status = format!("<PageDown>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::PageDown,
};
}
self::termion::event::Key::Delete => {
ui_state.status = format!("<Delete>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Delete,
};
}
self::termion::event::Key::Insert => {
ui_state.status = format!("<Insert>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Insert,
};
}
self::termion::event::Key::Esc => {
ui_state.status = format!("<Esc>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Escape,
};
}
_ => ui_state.status = format!("Other"),
}
}
self::termion::event::Event::Mouse(m) => {
match m {
self::termion::event::MouseEvent::Press(mb, x, y) => {
ui_state.status =
format!("MouseEvent::Press => MouseButton {:?} @ ({}, {})", mb, x, y);
let button = termion_mouse_button_to_u32(mb);
return InputEvent::ButtonPress {
ctrl: false,
alt: false,
shift: false,
x: (x - 1) as i32,
y: (y - 1) as i32,
button,
};
}
self::termion::event::MouseEvent::Release(x, y) => {
ui_state.status = format!("MouseEvent::Release => @ ({}, {})", x, y);
return InputEvent::ButtonRelease {
ctrl: false,
alt: false,
shift: false,
x: (x - 1) as i32,
y: (y - 1) as i32,
button: 0xff,
};
}
self::termion::event::MouseEvent::Hold(x, y) => {
ui_state.status = format!("MouseEvent::Hold => @ ({}, {})", x, y);
}
};
}
self::termion::event::Event::Unsupported(e) => {
ui_state.status = format!("Event::Unsupported {:?}", e);
}
}
::core::event::InputEvent::NoInputEvent
}
fn get_input_event(mut stdin: &mut ::std::io::Bytes<self::termion::AsyncReader>,
ui_state: &mut UiState)
-> Vec<InputEvent> {
let mut v = Vec::<InputEvent>::new();
// prepare async read
loop {
let b = stdin.next();
match b {
Some(b) => {
match b {
Ok(val) => {
match parse_event(val, &mut stdin) {
Err(_) => {
}
Ok(evt) => {
let evt = translate_termion_event(evt, ui_state);
v.push(evt);
ui_state.input_wait_time_ms = 0;
}
}
}
Err(_) => {
}
}
}
None => {
if v.len() != 0 {
break;
}
// TODO: use last input event time
ui_state.status = format!(" async no event");
ui_state.input_wait_time_ms += 10;
ui_state.input_wait_time_ms = ::std::cmp::max(ui_state.input_wait_time_ms, 20);
thread::sleep(Duration::from_millis(ui_state.input_wait_time_ms));
}
}
}
v
}
fn process_input_events(ui_state: &mut UiState, mut view: &mut View, ev: InputEvent) {
if ev == ::core::event::InputEvent::NoInputEvent {
// ignore no input event event :-)
return;
}
ui_state.keys.push(ev.clone());
let mut clear_keys = true;
match ev {
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('c'),
} => {
if ui_state.keys.len() > 1 {
let prev_ev = &ui_state.keys[ui_state.keys.len() - 2];
match *prev_ev {
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('x'),
} => {
ui_state.quit = true;
clear_keys = false;
}
_ => {}
}
} else {
clear_keys = true;
}
}
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('x'),
} => {
clear_keys = false;
}
// ctrl+a
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('a'),
} => {
view.move_marks_to_beginning_of_line();
}
// ctrl+e
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('e'),
} => {
view.move_marks_to_end_of_line();
}
// ctrl+d
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('d'),
} => {
view.remove_codepoint();
}
// ctrl+s
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('s'),
} => {
view.save_document();
}
// ctrl+?
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE(_),
} => {}
// left
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Left,
} => {
view.move_marks_backward();
ui_state.status = format!("<left>");
}
// up
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Up,
} => {
view.move_marks_to_previous_line();
ui_state.status = format!("<up>");
}
// down
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Down,
} => {
view.move_marks_to_next_line();
ui_state.status = format!("<down>");
}
// right
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Right,
} => {
view.move_marks_forward();
ui_state.status = format!("<right>");
}
// page_up
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::PageUp,
} => {
view.scroll_to_previous_screen();
ui_state.status = format!("<page_up>");
}
// page_down
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::PageDown,
} => {
view.scroll_to_next_screen();
ui_state.status = format!("<page_down>");
}
// delete
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Delete,
} => {
view.remove_codepoint();
ui_state.status = format!("<del>");
}
// backspace
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::BackSpace,
} => {
view.remove_previous_codepoint();
ui_state.status = format!("<backspace>");
}
// insert text
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::UNICODE(cp),
} => {
view.insert_codepoint(cp);
ui_state.status = format!("<insert [0x{:x}]>", cp as u32);
}
// mouse button pressed
InputEvent::ButtonPress {
ctrl: false,
alt: false,
shift: false,
x,
y,
button,
} => {
view.button_press(button, x, y);
ui_state.status = format!("<click({},@({},{}))]>", button, x, y);
}
// mouse button released
InputEvent::ButtonRelease {
ctrl: false,
alt: false,
shift: false,
x,
y,
button,
} => {
view.button_release(button, x, y);
ui_state.status = format!("<unclick({},@({},{}))]>", button, x, y);
}
_ => {}
}
if clear_keys {
ui_state.keys.clear();
}
}
fn display_status_line(ui_state: &UiState,
view: &View,
line: u16,
width: u16,
mut stdout: &mut Stdout) {
let doc = match view.document {
Some(ref d) => d.borrow(),
None => return,
};
let name = doc.name.as_str();
let file_name = doc.buffer.file_name.as_str();
// select/clear last line
terminal_cursor_to(&mut stdout, 1, line);
terminal_clear_current_line(&mut stdout, width);
terminal_cursor_to(&mut stdout, 1, line);
let (_, x, y) = view.screen.find_cpi_by_offset(ui_state.mark_offset);
let status_str = format!("line {} document_name '{}' \
, file('{}'), event('{}') \
mark(({},{})@{}) keys({})",
line,
name,
file_name,
ui_state.status,
x,
y,
ui_state.mark_offset,
ui_state.keys.len());
print!("{}", status_str);
stdout.flush().unwrap();
}
reserve the last line for status line (when enabled)
//
use std::thread;
use std::time::Duration;
use std::rc::Rc;
use std::cell::RefCell;
use std::io::{self, Read, Write, Stdout};
//
extern crate termion;
use self::termion::screen::{AlternateScreen, ToMainScreen};
use self::termion::input::MouseTerminal;
use self::termion::raw::IntoRawMode;
use self::termion::terminal_size;
use self::termion::async_stdin;
use self::termion::event::parse_event;
//
use core::view::{View, build_screen_layout, screen_putstr};
use core::screen::Screen;
use core::event::InputEvent;
use core::event::Key;
use core::editor::Editor;
//
struct UiState {
keys: Vec<InputEvent>,
quit: bool,
status: String,
display_status: bool,
display_view: bool,
vid: u64,
nb_view: usize,
last_offset: u64,
mark_offset: u64,
input_wait_time_ms: u64,
}
impl UiState {
fn new() -> UiState {
UiState {
keys: Vec::new(),
quit: false,
status: String::new(),
display_status: !true,
display_view: true,
vid: 0,
nb_view: 0,
last_offset: 0,
mark_offset: 0,
input_wait_time_ms: 20,
}
}
}
pub fn main_loop(mut editor: &mut Editor) {
let mut ui_state = UiState::new();
let (width, height) = if ui_state.display_status == true {
let (width, height) = terminal_size().unwrap();
(width, height - 1)
} else {
terminal_size().unwrap()
};
setup_views(editor, width as usize, height as usize);
//
let stdout = MouseTerminal::from(io::stdout().into_raw_mode().unwrap());
let mut stdout = AlternateScreen::from(stdout);
write!(stdout, "{}{}", termion::cursor::Hide, termion::clear::All).unwrap();
stdout.flush().unwrap();
let mut stdin = async_stdin().bytes();
while !ui_state.quit {
ui_state.nb_view = editor.view_map.len();
let mut view = editor.view_map.get_mut(&ui_state.vid);
let status_line_y = height + 1;
if ui_state.display_view == true {
draw_view(&mut ui_state,
&mut view.as_mut().unwrap().borrow_mut(),
&mut stdout);
}
if ui_state.display_status == true {
display_status_line(&ui_state,
&mut view.as_mut().unwrap().borrow_mut(),
status_line_y,
width,
&mut stdout);
}
let vec_evt = get_input_event(&mut stdin, &mut ui_state);
for evt in vec_evt {
process_input_events(&mut ui_state, &mut view.as_mut().unwrap().borrow_mut(), evt);
}
}
// quit
// clear, restore cursor
write!(stdout, "{}{}", termion::clear::All, termion::cursor::Show).unwrap();
write!(stdout, "{}{}", ToMainScreen, termion::cursor::Show).unwrap();
stdout.flush().unwrap();
}
fn setup_views(editor: &mut Editor, width: usize, height: usize) {
let mut views = Vec::new();
let mut vid = 0;
for (_, b) in &editor.document_map {
let view = View::new(vid,
0 as u64,
width as usize,
height as usize,
Some(b.clone()));
views.push(view);
vid += 1;
}
for view in views {
&editor
.view_map
.insert(view.id, Rc::new(RefCell::new(view)));
}
}
fn fill_screen(mut ui_state: &mut UiState, mut view: &mut View) {
match view.document {
Some(ref buf) => {
let mut screen = &mut view.screen;
screen.clear();
// render first screen line
if 0 == 1 {
let s = " unlimitED! v0.0.1\n\n";
screen_putstr(&mut screen, &s);
let mut line = screen.get_mut_line(0).unwrap();
for c in 0..line.width {
let mut cpi = line.get_mut_cpi(c).unwrap();
cpi.is_selected = true;
}
}
let data = &buf.borrow().buffer.data;
let len = data.len();
let max_offset = buf.borrow().buffer.size as u64;
view.end_offset =
build_screen_layout(&data[0..len], view.start_offset, max_offset, &mut screen);
ui_state.last_offset = view.end_offset;
// render marks
// brute force for now
for m in view.moving_marks.borrow().iter() {
// TODO: screen.find_line_by_offset(m.offset) -> Option<&mut Line>
if m.offset >= view.start_offset && m.offset <= view.end_offset {
for l in 0..screen.height {
let line = screen.get_mut_line(l).unwrap();
for c in 0..line.nb_cells {
let mut cpi = line.get_mut_cpi(c).unwrap();
if cpi.offset > m.offset {
break;
}
if cpi.offset == m.offset {
cpi.is_selected = true;
ui_state.mark_offset = m.offset;
}
}
}
}
}
}
None => {}
}
}
fn draw_screen(screen: &mut Screen, mut stdout: &mut Stdout) {
write!(stdout, "{}", termion::cursor::Goto(1, 1)).unwrap();
write!(stdout, "{}", termion::style::Reset).unwrap();
for l in 0..screen.height {
terminal_cursor_to(&mut stdout, 1, (l + 1) as u16);
let line = screen.get_line(l).unwrap();
for c in 0..line.width {
let cpi = line.get_cpi(c).unwrap();
if cpi.is_selected == true {
write!(stdout, "{}", termion::style::Invert).unwrap();
}
write!(stdout, "{}", cpi.displayed_cp).unwrap();
write!(stdout, "{}", termion::style::Reset).unwrap();
}
/*
for _ in line.used..line.width {
write!(stdout, " ").unwrap();
}
*/
}
stdout.flush().unwrap();
}
/*
TODO:
1 : be explicit
2 : create editor internal result type Result<>
3 : use idomatic func()? style
*/
fn draw_view(mut ui_state: &mut UiState, mut view: &mut View, mut stdout: &mut Stdout) {
fill_screen(&mut ui_state, &mut view);
draw_screen(&mut view.screen, &mut stdout);
}
fn terminal_clear_current_line(mut stdout: &mut Stdout, line_width: u16) {
for _ in 0..line_width {
write!(stdout, " ").unwrap();
}
}
fn terminal_cursor_to(mut stdout: &mut Stdout, x: u16, y: u16) {
write!(stdout, "{}", termion::cursor::Goto(x, y)).unwrap();
}
fn translate_termion_event(evt: self::termion::event::Event, ui_state: &mut UiState) -> InputEvent {
fn termion_mouse_button_to_u32(mb: self::termion::event::MouseButton) -> u32 {
match mb {
self::termion::event::MouseButton::Left => 0,
self::termion::event::MouseButton::Right => 1,
self::termion::event::MouseButton::Middle => 2,
self::termion::event::MouseButton::WheelUp => 3,
self::termion::event::MouseButton::WheelDown => 4,
}
}
// translate termion event
match evt {
self::termion::event::Event::Key(k) => {
match k {
self::termion::event::Key::Ctrl('c') => {
ui_state.status = format!("Ctrl-c");
return InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('c'),
};
}
self::termion::event::Key::Char('\n') => {
ui_state.status = format!("{}", "<newline>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::UNICODE('\n'),
};
}
self::termion::event::Key::Char(c) => {
ui_state.status = format!("{}", c);
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::UNICODE(c),
};
}
self::termion::event::Key::Alt(c) => {
ui_state.status = format!("Alt-{}", c);
return InputEvent::KeyPress {
ctrl: false,
alt: true,
shift: false,
key: Key::UNICODE(c),
};
}
self::termion::event::Key::Ctrl(c) => {
ui_state.status = format!("Ctrl-{}", c);
return InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE(c),
};
}
self::termion::event::Key::F(1) => {
if ui_state.vid > 0 {
ui_state.vid -= 1;
}
}
self::termion::event::Key::F(2) => {
ui_state.vid = ::std::cmp::min(ui_state.vid + 1, (ui_state.nb_view - 1) as u64);
}
self::termion::event::Key::F(f) => ui_state.status = format!("F{:?}", f),
self::termion::event::Key::Left => {
ui_state.status = format!("<left>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Left,
};
}
self::termion::event::Key::Right => {
ui_state.status = format!("<right>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Right,
};
}
self::termion::event::Key::Up => {
ui_state.status = format!("<up>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Up,
};
}
self::termion::event::Key::Down => {
ui_state.status = format!("<down>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Down,
};
}
self::termion::event::Key::Backspace => {
ui_state.status = format!("<backspc>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::BackSpace,
};
}
self::termion::event::Key::Home => {
ui_state.status = format!("<Home>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Home,
};
}
self::termion::event::Key::End => {
ui_state.status = format!("<End>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::End,
};
}
self::termion::event::Key::PageUp => {
ui_state.status = format!("<PageUp>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::PageUp,
};
}
self::termion::event::Key::PageDown => {
ui_state.status = format!("<PageDown>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::PageDown,
};
}
self::termion::event::Key::Delete => {
ui_state.status = format!("<Delete>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Delete,
};
}
self::termion::event::Key::Insert => {
ui_state.status = format!("<Insert>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Insert,
};
}
self::termion::event::Key::Esc => {
ui_state.status = format!("<Esc>");
return InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Escape,
};
}
_ => ui_state.status = format!("Other"),
}
}
self::termion::event::Event::Mouse(m) => {
match m {
self::termion::event::MouseEvent::Press(mb, x, y) => {
ui_state.status =
format!("MouseEvent::Press => MouseButton {:?} @ ({}, {})", mb, x, y);
let button = termion_mouse_button_to_u32(mb);
return InputEvent::ButtonPress {
ctrl: false,
alt: false,
shift: false,
x: (x - 1) as i32,
y: (y - 1) as i32,
button,
};
}
self::termion::event::MouseEvent::Release(x, y) => {
ui_state.status = format!("MouseEvent::Release => @ ({}, {})", x, y);
return InputEvent::ButtonRelease {
ctrl: false,
alt: false,
shift: false,
x: (x - 1) as i32,
y: (y - 1) as i32,
button: 0xff,
};
}
self::termion::event::MouseEvent::Hold(x, y) => {
ui_state.status = format!("MouseEvent::Hold => @ ({}, {})", x, y);
}
};
}
self::termion::event::Event::Unsupported(e) => {
ui_state.status = format!("Event::Unsupported {:?}", e);
}
}
::core::event::InputEvent::NoInputEvent
}
fn get_input_event(mut stdin: &mut ::std::io::Bytes<self::termion::AsyncReader>,
ui_state: &mut UiState)
-> Vec<InputEvent> {
let mut v = Vec::<InputEvent>::new();
// prepare async read
loop {
let b = stdin.next();
match b {
Some(b) => {
match b {
Ok(val) => {
match parse_event(val, &mut stdin) {
Err(_) => {
}
Ok(evt) => {
let evt = translate_termion_event(evt, ui_state);
v.push(evt);
ui_state.input_wait_time_ms = 0;
}
}
}
Err(_) => {
}
}
}
None => {
if v.len() != 0 {
break;
}
// TODO: use last input event time
ui_state.status = format!(" async no event");
ui_state.input_wait_time_ms += 10;
ui_state.input_wait_time_ms = ::std::cmp::max(ui_state.input_wait_time_ms, 20);
thread::sleep(Duration::from_millis(ui_state.input_wait_time_ms));
}
}
}
v
}
fn process_input_events(ui_state: &mut UiState, mut view: &mut View, ev: InputEvent) {
if ev == ::core::event::InputEvent::NoInputEvent {
// ignore no input event event :-)
return;
}
ui_state.keys.push(ev.clone());
let mut clear_keys = true;
match ev {
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('c'),
} => {
if ui_state.keys.len() > 1 {
let prev_ev = &ui_state.keys[ui_state.keys.len() - 2];
match *prev_ev {
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('x'),
} => {
ui_state.quit = true;
clear_keys = false;
}
_ => {}
}
} else {
clear_keys = true;
}
}
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('x'),
} => {
clear_keys = false;
}
// ctrl+a
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('a'),
} => {
view.move_marks_to_beginning_of_line();
}
// ctrl+e
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('e'),
} => {
view.move_marks_to_end_of_line();
}
// ctrl+d
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('d'),
} => {
view.remove_codepoint();
}
// ctrl+s
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE('s'),
} => {
view.save_document();
}
// ctrl+?
InputEvent::KeyPress {
ctrl: true,
alt: false,
shift: false,
key: Key::UNICODE(_),
} => {}
// left
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Left,
} => {
view.move_marks_backward();
ui_state.status = format!("<left>");
}
// up
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Up,
} => {
view.move_marks_to_previous_line();
ui_state.status = format!("<up>");
}
// down
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Down,
} => {
view.move_marks_to_next_line();
ui_state.status = format!("<down>");
}
// right
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Right,
} => {
view.move_marks_forward();
ui_state.status = format!("<right>");
}
// page_up
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::PageUp,
} => {
view.scroll_to_previous_screen();
ui_state.status = format!("<page_up>");
}
// page_down
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::PageDown,
} => {
view.scroll_to_next_screen();
ui_state.status = format!("<page_down>");
}
// delete
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::Delete,
} => {
view.remove_codepoint();
ui_state.status = format!("<del>");
}
// backspace
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::BackSpace,
} => {
view.remove_previous_codepoint();
ui_state.status = format!("<backspace>");
}
// insert text
InputEvent::KeyPress {
ctrl: false,
alt: false,
shift: false,
key: Key::UNICODE(cp),
} => {
view.insert_codepoint(cp);
ui_state.status = format!("<insert [0x{:x}]>", cp as u32);
}
// mouse button pressed
InputEvent::ButtonPress {
ctrl: false,
alt: false,
shift: false,
x,
y,
button,
} => {
view.button_press(button, x, y);
ui_state.status = format!("<click({},@({},{}))]>", button, x, y);
}
// mouse button released
InputEvent::ButtonRelease {
ctrl: false,
alt: false,
shift: false,
x,
y,
button,
} => {
view.button_release(button, x, y);
ui_state.status = format!("<unclick({},@({},{}))]>", button, x, y);
}
_ => {}
}
if clear_keys {
ui_state.keys.clear();
}
}
fn display_status_line(ui_state: &UiState,
view: &View,
line: u16,
width: u16,
mut stdout: &mut Stdout) {
let doc = match view.document {
Some(ref d) => d.borrow(),
None => return,
};
let name = doc.name.as_str();
let file_name = doc.buffer.file_name.as_str();
// select/clear last line
terminal_cursor_to(&mut stdout, 1, line);
terminal_clear_current_line(&mut stdout, width);
terminal_cursor_to(&mut stdout, 1, line);
let (_, x, y) = view.screen.find_cpi_by_offset(ui_state.mark_offset);
let status_str = format!("line {} document_name '{}' \
, file('{}'), event('{}') \
mark(({},{})@{}) keys({})",
line,
name,
file_name,
ui_state.status,
x,
y,
ui_state.mark_offset,
ui_state.keys.len());
print!("{}", status_str);
stdout.flush().unwrap();
}
|
//! A Rust interface for Objective-C blocks.
//!
//! For more information on the specifics of the block implementation, see
//! Clang's documentation: http://clang.llvm.org/docs/Block-ABI-Apple.html
use std::mem;
use std::ptr;
use libc::{c_int, c_ulong};
use runtime::Class;
use {Id, Message};
#[allow(improper_ctypes)]
#[link(name = "Foundation", kind = "framework")]
extern {
static _NSConcreteStackBlock: Class;
}
/// An invoke function for a `Block`; this is the raw C function called by the
/// Objective-C runtime.
type BlockInvoke<A, R> = unsafe extern fn(*mut Block<A, R>, ...) -> R;
/// An invoke function for a `ConcreteBlock`; this is the raw C function called
/// by the Objective-C runtime.
pub type ConcreteBlockInvoke<A, R, C> =
unsafe extern fn(*mut ConcreteBlock<A, R, C>, ...) -> R;
/// Types that may be used as the arguments to an Objective-C block.
pub trait BlockArguments {
/// Calls the given `Block` with self as the arguments.
fn call_block<R>(self, block: &Block<Self, R>) -> R;
/// Returns an invoke function for a `ConcreteBlock` that takes this type
/// of arguments.
fn invoke_for_concrete_block<R, C: Clone>() -> ConcreteBlockInvoke<Self, R, C>;
}
macro_rules! block_args_impl(
($f:ident $(, $a:ident : $t:ident)*) => (
impl<$($t),*> BlockArguments for ($($t,)*) {
fn call_block<R>(self, block: &Block<($($t,)*), R>) -> R {
let invoke: unsafe extern fn(*mut Block<($($t,)*), R> $(, $t)*) -> R = unsafe {
mem::transmute(block.invoke)
};
let ($($a,)*) = self;
let block_ptr = block as *const _ as *mut _;
unsafe {
invoke(block_ptr $(, $a)*)
}
}
fn invoke_for_concrete_block<R, X: Clone>() ->
ConcreteBlockInvoke<($($t,)*), R, X> {
unsafe extern fn $f<R, X: Clone $(, $t)*>(
block_ptr: *mut ConcreteBlock<($($t,)*), R, X>
$(, $a: $t)*) -> R {
let args = ($($a,)*);
let block = &*block_ptr;
(block.rust_invoke)(&block.context, args)
}
unsafe {
mem::transmute($f::<R, X $(, $t)*>)
}
}
}
);
);
block_args_impl!(concrete_block_invoke_args0);
block_args_impl!(concrete_block_invoke_args1, a: A);
block_args_impl!(concrete_block_invoke_args2, a: A, b: B);
block_args_impl!(concrete_block_invoke_args3, a: A, b: B, c: C);
block_args_impl!(concrete_block_invoke_args4, a: A, b: B, c: C, d: D);
block_args_impl!(concrete_block_invoke_args5, a: A, b: B, c: C, d: D, e: E);
block_args_impl!(concrete_block_invoke_args6, a: A, b: B, c: C, d: D, e: E, f: F);
block_args_impl!(concrete_block_invoke_args7, a: A, b: B, c: C, d: D, e: E, f: F, g: G);
block_args_impl!(concrete_block_invoke_args8, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H);
block_args_impl!(concrete_block_invoke_args9, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I);
block_args_impl!(concrete_block_invoke_args10, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J);
block_args_impl!(concrete_block_invoke_args11, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J, k: K);
block_args_impl!(concrete_block_invoke_args12, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J, k: K, l: L);
/// An Objective-C block that takes arguments of `A` when called and
/// returns a value of `R`.
#[repr(C)]
pub struct Block<A: BlockArguments, R> {
isa: *const Class,
flags: c_int,
_reserved: c_int,
invoke: BlockInvoke<A, R>,
}
impl<A: BlockArguments, R> Block<A, R> {
/// Copy self onto the heap.
pub fn copy(&self) -> Id<Block<A, R>> {
unsafe {
let block = msg_send![self copy] as *mut Block<A, R>;
Id::from_retained_ptr(block)
}
}
/// Call self with the given arguments.
pub fn call(&self, args: A) -> R {
args.call_block(self)
}
}
impl<A: BlockArguments, R> Message for Block<A, R> { }
/// An Objective-C block whose size is known at compile time and may be
/// constructed on the stack.
#[repr(C)]
pub struct ConcreteBlock<A: BlockArguments, R, C: Clone> {
base: Block<A, R>,
descriptor: Box<BlockDescriptor<ConcreteBlock<A, R, C>>>,
rust_invoke: fn (&C, A) -> R,
context: C,
}
impl<A: BlockArguments, R, C: Clone> ConcreteBlock<A, R, C> {
/// Constructs a `ConcreteBlock` with the given invoke function and context.
/// When the block is called, it will return the value that results from
/// calling the invoke function with a reference to its context.
pub fn new(invoke: fn (&C, A) -> R, context: C) -> ConcreteBlock<A, R, C> {
let extern_invoke: ConcreteBlockInvoke<A, R, C> =
BlockArguments::invoke_for_concrete_block();
ConcreteBlock {
base: Block {
isa: &_NSConcreteStackBlock,
// 1 << 25 = BLOCK_HAS_COPY_DISPOSE
flags: 1 << 25,
_reserved: 0,
invoke: unsafe { mem::transmute(extern_invoke) },
},
descriptor: box BlockDescriptor::<A, R, C>::new(),
rust_invoke: invoke,
context: context,
}
}
}
impl<A: BlockArguments, R, C: Clone> Clone for ConcreteBlock<A, R, C> {
fn clone(&self) -> ConcreteBlock<A, R, C> {
ConcreteBlock::new(self.rust_invoke, self.context.clone())
}
}
impl<A: BlockArguments, R, C: Clone> Deref<Block<A, R>> for ConcreteBlock<A, R, C> {
fn deref(&self) -> &Block<A, R> {
&self.base
}
}
unsafe extern fn block_context_dispose<A: BlockArguments, R, C: Clone>(
block: &mut ConcreteBlock<A, R, C>) {
// Read the block onto the stack and let it drop
ptr::read(block);
}
unsafe extern fn block_context_copy<A: BlockArguments, R, C: Clone>(
dst: &mut ConcreteBlock<A, R, C>, src: &ConcreteBlock<A, R, C>) {
// Doesn't seem like the descriptor is supposed to change in this function,
// but our descriptor isn't static (that's hard), so we just clone the box.
ptr::write(&mut dst.descriptor, src.descriptor.clone());
// The src block actually gets memmoved to the destination beforehand,
// but we'll set the function pointer, too, to be safe.
ptr::write(&mut dst.rust_invoke, src.rust_invoke);
ptr::write(&mut dst.context, src.context.clone());
}
#[repr(C)]
struct BlockDescriptor<B> {
_reserved: c_ulong,
block_size: c_ulong,
copy_helper: unsafe extern fn(&mut B, &B),
dispose_helper: unsafe extern fn(&mut B),
}
impl<A: BlockArguments, R, C: Clone> BlockDescriptor<ConcreteBlock<A, R, C>> {
fn new() -> BlockDescriptor<ConcreteBlock<A, R, C>> {
BlockDescriptor {
_reserved: 0,
block_size: mem::size_of::<ConcreteBlock<A, R, C>>() as c_ulong,
copy_helper: block_context_copy::<A, R, C>,
dispose_helper: block_context_dispose::<A, R, C>,
}
}
}
impl<B> Copy for BlockDescriptor<B> { }
impl<B> Clone for BlockDescriptor<B> {
fn clone(&self) -> BlockDescriptor<B> {
*self
}
}
#[cfg(test)]
mod tests {
use Id;
use objc_test_utils;
use super::{Block, ConcreteBlock};
fn get_int_block() -> &'static Block<(), int> {
unsafe {
&*(objc_test_utils::get_int_block() as *const _)
}
}
fn get_int_block_with(i: int) -> Id<Block<(), int>> {
unsafe {
let ptr = objc_test_utils::get_int_block_with(i);
Id::from_retained_ptr(ptr as *mut _)
}
}
fn get_add_block() -> &'static Block<(int,), int> {
unsafe {
&*(objc_test_utils::get_add_block() as *const _)
}
}
fn get_add_block_with(i: int) -> Id<Block<(int,), int>> {
unsafe {
let ptr = objc_test_utils::get_add_block_with(i);
Id::from_retained_ptr(ptr as *mut _)
}
}
fn invoke_int_block(block: &Block<(), int>) -> int {
let ptr = block as *const _;
unsafe {
objc_test_utils::invoke_int_block(ptr as *const _)
}
}
fn invoke_add_block(block: &Block<(int,), int>, a: int) -> int {
let ptr = block as *const _;
unsafe {
objc_test_utils::invoke_add_block(ptr as *const _, a)
}
}
#[test]
fn test_call_block() {
let block = get_int_block();
assert!(block.call(()) == 7);
let block = get_int_block_with(13);
assert!(block.call(()) == 13);
}
#[test]
fn test_call_block_args() {
let block = get_add_block();
assert!(block.call((2,)) == 9);
let block = get_add_block_with(13);
assert!(block.call((2,)) == 15);
}
#[test]
fn test_create_block() {
fn block_get_int(context: &int, _args: ()) -> int {
*context
}
let block = ConcreteBlock::new(block_get_int, 13);
let result = invoke_int_block(&*block);
assert!(result == 13);
}
#[test]
fn test_create_block_args() {
fn block_add_int(context: &int, (a,): (int,)) -> int {
a + *context
}
let block = ConcreteBlock::new(block_add_int, 5);
let result = invoke_add_block(&*block, 6);
assert!(result == 11);
}
#[test]
fn test_concrete_block_clone() {
fn block_get_string_len(context: &String, _args: ()) -> uint {
context.len()
}
let s = "Hello!".into_string();
let expected_len = s.len();
let block = ConcreteBlock::new(block_get_string_len, s);
assert!(block.call(()) == expected_len);
let cloned = block.clone();
assert!(cloned.call(()) == expected_len);
drop(block);
assert!(cloned.call(()) == expected_len);
}
#[test]
fn test_concrete_block_copy() {
fn block_get_string_len(context: &String, _args: ()) -> uint {
context.len()
}
let s = "Hello!".into_string();
let expected_len = s.len();
let block = ConcreteBlock::new(block_get_string_len, s);
assert!(block.call(()) == expected_len);
let copied = block.copy();
assert!(copied.call(()) == expected_len);
drop(block);
assert!(copied.call(()) == expected_len);
}
}
Relax some ConcreteBlock type bounds.
//! A Rust interface for Objective-C blocks.
//!
//! For more information on the specifics of the block implementation, see
//! Clang's documentation: http://clang.llvm.org/docs/Block-ABI-Apple.html
use std::mem;
use std::ptr;
use libc::{c_int, c_ulong};
use runtime::Class;
use {Id, Message};
#[allow(improper_ctypes)]
#[link(name = "Foundation", kind = "framework")]
extern {
static _NSConcreteStackBlock: Class;
}
/// An invoke function for a `Block`; this is the raw C function called by the
/// Objective-C runtime.
type BlockInvoke<A, R> = unsafe extern fn(*mut Block<A, R>, ...) -> R;
/// An invoke function for a `ConcreteBlock`; this is the raw C function called
/// by the Objective-C runtime.
pub type ConcreteBlockInvoke<A, R, C> =
unsafe extern fn(*mut ConcreteBlock<A, R, C>, ...) -> R;
/// Types that may be used as the arguments to an Objective-C block.
pub trait BlockArguments {
/// Calls the given `Block` with self as the arguments.
fn call_block<R>(self, block: &Block<Self, R>) -> R;
/// Returns an invoke function for a `ConcreteBlock` that takes this type
/// of arguments.
fn invoke_for_concrete_block<R, C: Clone>() -> ConcreteBlockInvoke<Self, R, C>;
}
macro_rules! block_args_impl(
($f:ident $(, $a:ident : $t:ident)*) => (
impl<$($t),*> BlockArguments for ($($t,)*) {
fn call_block<R>(self, block: &Block<($($t,)*), R>) -> R {
let invoke: unsafe extern fn(*mut Block<($($t,)*), R> $(, $t)*) -> R = unsafe {
mem::transmute(block.invoke)
};
let ($($a,)*) = self;
let block_ptr = block as *const _ as *mut _;
unsafe {
invoke(block_ptr $(, $a)*)
}
}
fn invoke_for_concrete_block<R, X: Clone>() ->
ConcreteBlockInvoke<($($t,)*), R, X> {
unsafe extern fn $f<R, X: Clone $(, $t)*>(
block_ptr: *mut ConcreteBlock<($($t,)*), R, X>
$(, $a: $t)*) -> R {
let args = ($($a,)*);
let block = &*block_ptr;
(block.rust_invoke)(&block.context, args)
}
unsafe {
mem::transmute($f::<R, X $(, $t)*>)
}
}
}
);
);
block_args_impl!(concrete_block_invoke_args0);
block_args_impl!(concrete_block_invoke_args1, a: A);
block_args_impl!(concrete_block_invoke_args2, a: A, b: B);
block_args_impl!(concrete_block_invoke_args3, a: A, b: B, c: C);
block_args_impl!(concrete_block_invoke_args4, a: A, b: B, c: C, d: D);
block_args_impl!(concrete_block_invoke_args5, a: A, b: B, c: C, d: D, e: E);
block_args_impl!(concrete_block_invoke_args6, a: A, b: B, c: C, d: D, e: E, f: F);
block_args_impl!(concrete_block_invoke_args7, a: A, b: B, c: C, d: D, e: E, f: F, g: G);
block_args_impl!(concrete_block_invoke_args8, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H);
block_args_impl!(concrete_block_invoke_args9, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I);
block_args_impl!(concrete_block_invoke_args10, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J);
block_args_impl!(concrete_block_invoke_args11, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J, k: K);
block_args_impl!(concrete_block_invoke_args12, a: A, b: B, c: C, d: D, e: E, f: F, g: G, h: H, i: I, j: J, k: K, l: L);
/// An Objective-C block that takes arguments of `A` when called and
/// returns a value of `R`.
#[repr(C)]
pub struct Block<A: BlockArguments, R> {
isa: *const Class,
flags: c_int,
_reserved: c_int,
invoke: BlockInvoke<A, R>,
}
impl<A: BlockArguments, R> Block<A, R> {
/// Copy self onto the heap.
pub fn copy(&self) -> Id<Block<A, R>> {
unsafe {
let block = msg_send![self copy] as *mut Block<A, R>;
Id::from_retained_ptr(block)
}
}
/// Call self with the given arguments.
pub fn call(&self, args: A) -> R {
args.call_block(self)
}
}
impl<A: BlockArguments, R> Message for Block<A, R> { }
/// An Objective-C block whose size is known at compile time and may be
/// constructed on the stack.
#[repr(C)]
pub struct ConcreteBlock<A: BlockArguments, R, C> {
base: Block<A, R>,
descriptor: Box<BlockDescriptor<ConcreteBlock<A, R, C>>>,
rust_invoke: fn (&C, A) -> R,
context: C,
}
impl<A: BlockArguments, R, C: Clone> ConcreteBlock<A, R, C> {
/// Constructs a `ConcreteBlock` with the given invoke function and context.
/// When the block is called, it will return the value that results from
/// calling the invoke function with a reference to its context.
pub fn new(invoke: fn (&C, A) -> R, context: C) -> ConcreteBlock<A, R, C> {
let extern_invoke: ConcreteBlockInvoke<A, R, C> =
BlockArguments::invoke_for_concrete_block();
ConcreteBlock {
base: Block {
isa: &_NSConcreteStackBlock,
// 1 << 25 = BLOCK_HAS_COPY_DISPOSE
flags: 1 << 25,
_reserved: 0,
invoke: unsafe { mem::transmute(extern_invoke) },
},
descriptor: box BlockDescriptor::<A, R, C>::new(),
rust_invoke: invoke,
context: context,
}
}
}
impl<A: BlockArguments, R, C: Clone> Clone for ConcreteBlock<A, R, C> {
fn clone(&self) -> ConcreteBlock<A, R, C> {
ConcreteBlock::new(self.rust_invoke, self.context.clone())
}
}
impl<A: BlockArguments, R, C> Deref<Block<A, R>> for ConcreteBlock<A, R, C> {
fn deref(&self) -> &Block<A, R> {
&self.base
}
}
unsafe extern fn block_context_dispose<A: BlockArguments, R, C>(
block: &mut ConcreteBlock<A, R, C>) {
// Read the block onto the stack and let it drop
ptr::read(block);
}
unsafe extern fn block_context_copy<A: BlockArguments, R, C: Clone>(
dst: &mut ConcreteBlock<A, R, C>, src: &ConcreteBlock<A, R, C>) {
// Doesn't seem like the descriptor is supposed to change in this function,
// but our descriptor isn't static (that's hard), so we just clone the box.
ptr::write(&mut dst.descriptor, src.descriptor.clone());
// The src block actually gets memmoved to the destination beforehand,
// but we'll set the function pointer, too, to be safe.
ptr::write(&mut dst.rust_invoke, src.rust_invoke);
ptr::write(&mut dst.context, src.context.clone());
}
#[repr(C)]
struct BlockDescriptor<B> {
_reserved: c_ulong,
block_size: c_ulong,
copy_helper: unsafe extern fn(&mut B, &B),
dispose_helper: unsafe extern fn(&mut B),
}
impl<A: BlockArguments, R, C: Clone> BlockDescriptor<ConcreteBlock<A, R, C>> {
fn new() -> BlockDescriptor<ConcreteBlock<A, R, C>> {
BlockDescriptor {
_reserved: 0,
block_size: mem::size_of::<ConcreteBlock<A, R, C>>() as c_ulong,
copy_helper: block_context_copy::<A, R, C>,
dispose_helper: block_context_dispose::<A, R, C>,
}
}
}
impl<B> Copy for BlockDescriptor<B> { }
impl<B> Clone for BlockDescriptor<B> {
fn clone(&self) -> BlockDescriptor<B> {
*self
}
}
#[cfg(test)]
mod tests {
use Id;
use objc_test_utils;
use super::{Block, ConcreteBlock};
fn get_int_block() -> &'static Block<(), int> {
unsafe {
&*(objc_test_utils::get_int_block() as *const _)
}
}
fn get_int_block_with(i: int) -> Id<Block<(), int>> {
unsafe {
let ptr = objc_test_utils::get_int_block_with(i);
Id::from_retained_ptr(ptr as *mut _)
}
}
fn get_add_block() -> &'static Block<(int,), int> {
unsafe {
&*(objc_test_utils::get_add_block() as *const _)
}
}
fn get_add_block_with(i: int) -> Id<Block<(int,), int>> {
unsafe {
let ptr = objc_test_utils::get_add_block_with(i);
Id::from_retained_ptr(ptr as *mut _)
}
}
fn invoke_int_block(block: &Block<(), int>) -> int {
let ptr = block as *const _;
unsafe {
objc_test_utils::invoke_int_block(ptr as *const _)
}
}
fn invoke_add_block(block: &Block<(int,), int>, a: int) -> int {
let ptr = block as *const _;
unsafe {
objc_test_utils::invoke_add_block(ptr as *const _, a)
}
}
#[test]
fn test_call_block() {
let block = get_int_block();
assert!(block.call(()) == 7);
let block = get_int_block_with(13);
assert!(block.call(()) == 13);
}
#[test]
fn test_call_block_args() {
let block = get_add_block();
assert!(block.call((2,)) == 9);
let block = get_add_block_with(13);
assert!(block.call((2,)) == 15);
}
#[test]
fn test_create_block() {
fn block_get_int(context: &int, _args: ()) -> int {
*context
}
let block = ConcreteBlock::new(block_get_int, 13);
let result = invoke_int_block(&*block);
assert!(result == 13);
}
#[test]
fn test_create_block_args() {
fn block_add_int(context: &int, (a,): (int,)) -> int {
a + *context
}
let block = ConcreteBlock::new(block_add_int, 5);
let result = invoke_add_block(&*block, 6);
assert!(result == 11);
}
#[test]
fn test_concrete_block_clone() {
fn block_get_string_len(context: &String, _args: ()) -> uint {
context.len()
}
let s = "Hello!".into_string();
let expected_len = s.len();
let block = ConcreteBlock::new(block_get_string_len, s);
assert!(block.call(()) == expected_len);
let cloned = block.clone();
assert!(cloned.call(()) == expected_len);
drop(block);
assert!(cloned.call(()) == expected_len);
}
#[test]
fn test_concrete_block_copy() {
fn block_get_string_len(context: &String, _args: ()) -> uint {
context.len()
}
let s = "Hello!".into_string();
let expected_len = s.len();
let block = ConcreteBlock::new(block_get_string_len, s);
assert!(block.call(()) == expected_len);
let copied = block.copy();
assert!(copied.call(()) == expected_len);
drop(block);
assert!(copied.call(()) == expected_len);
}
}
|
extern crate cranelift_codegen;
extern crate cranelift_entity;
extern crate cranelift_frontend;
extern crate cranelift_module;
extern crate cranelift_simplejit;
use cranelift_codegen::ir::*;
use cranelift_codegen::settings::*;
use cranelift_codegen::Context;
use cranelift_entity::EntityRef;
use cranelift_frontend::*;
use cranelift_module::*;
use cranelift_simplejit::*;
#[test]
fn error_on_incompatible_sig_in_declare_function() {
let mut module: Module<SimpleJITBackend> = Module::new(SimpleJITBuilder::new());
let mut sig = Signature {
params: vec![AbiParam::new(types::I64)],
returns: vec![],
call_conv: CallConv::SystemV,
};
module
.declare_function("abc", Linkage::Local, &sig)
.unwrap();
sig.params[0] = AbiParam::new(types::I32);
module
.declare_function("abc", Linkage::Local, &sig)
.err()
.unwrap(); // Make sure this is an error
}
fn define_simple_function(module: &mut Module<SimpleJITBackend>) -> FuncId {
let sig = Signature {
params: vec![],
returns: vec![],
call_conv: CallConv::SystemV,
};
let func_id = module
.declare_function("abc", Linkage::Local, &sig)
.unwrap();
let mut ctx = Context::new();
ctx.func = Function::with_name_signature(ExternalName::user(0, func_id.index() as u32), sig);
let mut func_ctx = FunctionBuilderContext::new();
{
let mut bcx: FunctionBuilder = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
let ebb = bcx.create_ebb();
bcx.switch_to_block(ebb);
bcx.ins().return_(&[]);
}
module.define_function(func_id, &mut ctx).unwrap();
func_id
}
#[test]
fn double_finalize() {
let mut module: Module<SimpleJITBackend> = Module::new(SimpleJITBuilder::new());
let func_id = define_simple_function(&mut module);
module.finalize_definitions();
// Calling `finalize_definitions` a second time without any new definitions
// should have no effect.
module.finalize_definitions();
}
#[test]
#[should_panic(expected = "Result::unwrap()` on an `Err` value: DuplicateDefinition(\"abc\")")]
fn panic_on_define_after_finalize() {
let mut module: Module<SimpleJITBackend> = Module::new(SimpleJITBuilder::new());
let func_id = define_simple_function(&mut module);
module.finalize_definitions();
define_simple_function(&mut module);
}
Fix unused variable warnings.
extern crate cranelift_codegen;
extern crate cranelift_entity;
extern crate cranelift_frontend;
extern crate cranelift_module;
extern crate cranelift_simplejit;
use cranelift_codegen::ir::*;
use cranelift_codegen::settings::*;
use cranelift_codegen::Context;
use cranelift_entity::EntityRef;
use cranelift_frontend::*;
use cranelift_module::*;
use cranelift_simplejit::*;
#[test]
fn error_on_incompatible_sig_in_declare_function() {
let mut module: Module<SimpleJITBackend> = Module::new(SimpleJITBuilder::new());
let mut sig = Signature {
params: vec![AbiParam::new(types::I64)],
returns: vec![],
call_conv: CallConv::SystemV,
};
module
.declare_function("abc", Linkage::Local, &sig)
.unwrap();
sig.params[0] = AbiParam::new(types::I32);
module
.declare_function("abc", Linkage::Local, &sig)
.err()
.unwrap(); // Make sure this is an error
}
fn define_simple_function(module: &mut Module<SimpleJITBackend>) -> FuncId {
let sig = Signature {
params: vec![],
returns: vec![],
call_conv: CallConv::SystemV,
};
let func_id = module
.declare_function("abc", Linkage::Local, &sig)
.unwrap();
let mut ctx = Context::new();
ctx.func = Function::with_name_signature(ExternalName::user(0, func_id.index() as u32), sig);
let mut func_ctx = FunctionBuilderContext::new();
{
let mut bcx: FunctionBuilder = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
let ebb = bcx.create_ebb();
bcx.switch_to_block(ebb);
bcx.ins().return_(&[]);
}
module.define_function(func_id, &mut ctx).unwrap();
func_id
}
#[test]
fn double_finalize() {
let mut module: Module<SimpleJITBackend> = Module::new(SimpleJITBuilder::new());
define_simple_function(&mut module);
module.finalize_definitions();
// Calling `finalize_definitions` a second time without any new definitions
// should have no effect.
module.finalize_definitions();
}
#[test]
#[should_panic(expected = "Result::unwrap()` on an `Err` value: DuplicateDefinition(\"abc\")")]
fn panic_on_define_after_finalize() {
let mut module: Module<SimpleJITBackend> = Module::new(SimpleJITBuilder::new());
define_simple_function(&mut module);
module.finalize_definitions();
define_simple_function(&mut module);
}
|
use rand::prelude::*;
use std::iter::{repeat, FromIterator};
use test::{black_box, Bencher};
#[bench]
fn bench_new(b: &mut Bencher) {
b.iter(|| {
let v: Vec<u32> = Vec::new();
assert_eq!(v.len(), 0);
assert_eq!(v.capacity(), 0);
v
})
}
fn do_bench_with_capacity(b: &mut Bencher, src_len: usize) {
b.bytes = src_len as u64;
b.iter(|| {
let v: Vec<u32> = Vec::with_capacity(src_len);
assert_eq!(v.len(), 0);
assert_eq!(v.capacity(), src_len);
v
})
}
#[bench]
fn bench_with_capacity_0000(b: &mut Bencher) {
do_bench_with_capacity(b, 0)
}
#[bench]
fn bench_with_capacity_0010(b: &mut Bencher) {
do_bench_with_capacity(b, 10)
}
#[bench]
fn bench_with_capacity_0100(b: &mut Bencher) {
do_bench_with_capacity(b, 100)
}
#[bench]
fn bench_with_capacity_1000(b: &mut Bencher) {
do_bench_with_capacity(b, 1000)
}
fn do_bench_from_fn(b: &mut Bencher, src_len: usize) {
b.bytes = src_len as u64;
b.iter(|| {
let dst = (0..src_len).collect::<Vec<_>>();
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
dst
})
}
#[bench]
fn bench_from_fn_0000(b: &mut Bencher) {
do_bench_from_fn(b, 0)
}
#[bench]
fn bench_from_fn_0010(b: &mut Bencher) {
do_bench_from_fn(b, 10)
}
#[bench]
fn bench_from_fn_0100(b: &mut Bencher) {
do_bench_from_fn(b, 100)
}
#[bench]
fn bench_from_fn_1000(b: &mut Bencher) {
do_bench_from_fn(b, 1000)
}
fn do_bench_from_elem(b: &mut Bencher, src_len: usize) {
b.bytes = src_len as u64;
b.iter(|| {
let dst: Vec<usize> = repeat(5).take(src_len).collect();
assert_eq!(dst.len(), src_len);
assert!(dst.iter().all(|x| *x == 5));
dst
})
}
#[bench]
fn bench_from_elem_0000(b: &mut Bencher) {
do_bench_from_elem(b, 0)
}
#[bench]
fn bench_from_elem_0010(b: &mut Bencher) {
do_bench_from_elem(b, 10)
}
#[bench]
fn bench_from_elem_0100(b: &mut Bencher) {
do_bench_from_elem(b, 100)
}
#[bench]
fn bench_from_elem_1000(b: &mut Bencher) {
do_bench_from_elem(b, 1000)
}
fn do_bench_from_slice(b: &mut Bencher, src_len: usize) {
let src: Vec<_> = FromIterator::from_iter(0..src_len);
b.bytes = src_len as u64;
b.iter(|| {
let dst = src.clone()[..].to_vec();
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
dst
});
}
#[bench]
fn bench_from_slice_0000(b: &mut Bencher) {
do_bench_from_slice(b, 0)
}
#[bench]
fn bench_from_slice_0010(b: &mut Bencher) {
do_bench_from_slice(b, 10)
}
#[bench]
fn bench_from_slice_0100(b: &mut Bencher) {
do_bench_from_slice(b, 100)
}
#[bench]
fn bench_from_slice_1000(b: &mut Bencher) {
do_bench_from_slice(b, 1000)
}
fn do_bench_from_iter(b: &mut Bencher, src_len: usize) {
let src: Vec<_> = FromIterator::from_iter(0..src_len);
b.bytes = src_len as u64;
b.iter(|| {
let dst: Vec<_> = FromIterator::from_iter(src.clone());
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
dst
});
}
#[bench]
fn bench_from_iter_0000(b: &mut Bencher) {
do_bench_from_iter(b, 0)
}
#[bench]
fn bench_from_iter_0010(b: &mut Bencher) {
do_bench_from_iter(b, 10)
}
#[bench]
fn bench_from_iter_0100(b: &mut Bencher) {
do_bench_from_iter(b, 100)
}
#[bench]
fn bench_from_iter_1000(b: &mut Bencher) {
do_bench_from_iter(b, 1000)
}
fn do_bench_extend(b: &mut Bencher, dst_len: usize, src_len: usize) {
let dst: Vec<_> = FromIterator::from_iter(0..dst_len);
let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
b.bytes = src_len as u64;
b.iter(|| {
let mut dst = dst.clone();
dst.extend(src.clone());
assert_eq!(dst.len(), dst_len + src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
dst
});
}
#[bench]
fn bench_extend_0000_0000(b: &mut Bencher) {
do_bench_extend(b, 0, 0)
}
#[bench]
fn bench_extend_0000_0010(b: &mut Bencher) {
do_bench_extend(b, 0, 10)
}
#[bench]
fn bench_extend_0000_0100(b: &mut Bencher) {
do_bench_extend(b, 0, 100)
}
#[bench]
fn bench_extend_0000_1000(b: &mut Bencher) {
do_bench_extend(b, 0, 1000)
}
#[bench]
fn bench_extend_0010_0010(b: &mut Bencher) {
do_bench_extend(b, 10, 10)
}
#[bench]
fn bench_extend_0100_0100(b: &mut Bencher) {
do_bench_extend(b, 100, 100)
}
#[bench]
fn bench_extend_1000_1000(b: &mut Bencher) {
do_bench_extend(b, 1000, 1000)
}
fn do_bench_extend_from_slice(b: &mut Bencher, dst_len: usize, src_len: usize) {
let dst: Vec<_> = FromIterator::from_iter(0..dst_len);
let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
b.bytes = src_len as u64;
b.iter(|| {
let mut dst = dst.clone();
dst.extend_from_slice(&src);
assert_eq!(dst.len(), dst_len + src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
dst
});
}
#[bench]
fn bench_extend_recycle(b: &mut Bencher) {
let mut data = vec![0; 1000];
b.iter(|| {
let tmp = std::mem::replace(&mut data, Vec::new());
let mut to_extend = black_box(Vec::new());
to_extend.extend(tmp.into_iter());
data = black_box(to_extend);
});
black_box(data);
}
#[bench]
fn bench_extend_from_slice_0000_0000(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 0)
}
#[bench]
fn bench_extend_from_slice_0000_0010(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 10)
}
#[bench]
fn bench_extend_from_slice_0000_0100(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 100)
}
#[bench]
fn bench_extend_from_slice_0000_1000(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 1000)
}
#[bench]
fn bench_extend_from_slice_0010_0010(b: &mut Bencher) {
do_bench_extend_from_slice(b, 10, 10)
}
#[bench]
fn bench_extend_from_slice_0100_0100(b: &mut Bencher) {
do_bench_extend_from_slice(b, 100, 100)
}
#[bench]
fn bench_extend_from_slice_1000_1000(b: &mut Bencher) {
do_bench_extend_from_slice(b, 1000, 1000)
}
fn do_bench_clone(b: &mut Bencher, src_len: usize) {
let src: Vec<usize> = FromIterator::from_iter(0..src_len);
b.bytes = src_len as u64;
b.iter(|| {
let dst = src.clone();
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
dst
});
}
#[bench]
fn bench_clone_0000(b: &mut Bencher) {
do_bench_clone(b, 0)
}
#[bench]
fn bench_clone_0010(b: &mut Bencher) {
do_bench_clone(b, 10)
}
#[bench]
fn bench_clone_0100(b: &mut Bencher) {
do_bench_clone(b, 100)
}
#[bench]
fn bench_clone_1000(b: &mut Bencher) {
do_bench_clone(b, 1000)
}
fn do_bench_clone_from(b: &mut Bencher, times: usize, dst_len: usize, src_len: usize) {
let dst: Vec<_> = FromIterator::from_iter(0..src_len);
let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
b.bytes = (times * src_len) as u64;
b.iter(|| {
let mut dst = dst.clone();
for _ in 0..times {
dst.clone_from(&src);
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| dst_len + i == *x));
}
dst
});
}
#[bench]
fn bench_clone_from_01_0000_0000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 0)
}
#[bench]
fn bench_clone_from_01_0000_0010(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 10)
}
#[bench]
fn bench_clone_from_01_0000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 100)
}
#[bench]
fn bench_clone_from_01_0000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 1000)
}
#[bench]
fn bench_clone_from_01_0010_0010(b: &mut Bencher) {
do_bench_clone_from(b, 1, 10, 10)
}
#[bench]
fn bench_clone_from_01_0100_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 100, 100)
}
#[bench]
fn bench_clone_from_01_1000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 1000, 1000)
}
#[bench]
fn bench_clone_from_01_0010_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 10, 100)
}
#[bench]
fn bench_clone_from_01_0100_1000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 100, 1000)
}
#[bench]
fn bench_clone_from_01_0010_0000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 10, 0)
}
#[bench]
fn bench_clone_from_01_0100_0010(b: &mut Bencher) {
do_bench_clone_from(b, 1, 100, 10)
}
#[bench]
fn bench_clone_from_01_1000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 1000, 100)
}
#[bench]
fn bench_clone_from_10_0000_0000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 0)
}
#[bench]
fn bench_clone_from_10_0000_0010(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 10)
}
#[bench]
fn bench_clone_from_10_0000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 100)
}
#[bench]
fn bench_clone_from_10_0000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 1000)
}
#[bench]
fn bench_clone_from_10_0010_0010(b: &mut Bencher) {
do_bench_clone_from(b, 10, 10, 10)
}
#[bench]
fn bench_clone_from_10_0100_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 100, 100)
}
#[bench]
fn bench_clone_from_10_1000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 1000, 1000)
}
#[bench]
fn bench_clone_from_10_0010_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 10, 100)
}
#[bench]
fn bench_clone_from_10_0100_1000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 100, 1000)
}
#[bench]
fn bench_clone_from_10_0010_0000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 10, 0)
}
#[bench]
fn bench_clone_from_10_0100_0010(b: &mut Bencher) {
do_bench_clone_from(b, 10, 100, 10)
}
#[bench]
fn bench_clone_from_10_1000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 1000, 100)
}
macro_rules! bench_in_place {
($($fname:ident, $type:ty, $count:expr, $init:expr);*) => {
$(
#[bench]
fn $fname(b: &mut Bencher) {
b.iter(|| {
let src: Vec<$type> = black_box(vec![$init; $count]);
let mut sink = src.into_iter()
.enumerate()
.map(|(idx, e)| idx as $type ^ e)
.collect::<Vec<$type>>();
black_box(sink.as_mut_ptr())
});
}
)+
};
}
bench_in_place![
bench_in_place_xxu8_0010_i0, u8, 10, 0;
bench_in_place_xxu8_0100_i0, u8, 100, 0;
bench_in_place_xxu8_1000_i0, u8, 1000, 0;
bench_in_place_xxu8_0010_i1, u8, 10, 1;
bench_in_place_xxu8_0100_i1, u8, 100, 1;
bench_in_place_xxu8_1000_i1, u8, 1000, 1;
bench_in_place_xu32_0010_i0, u32, 10, 0;
bench_in_place_xu32_0100_i0, u32, 100, 0;
bench_in_place_xu32_1000_i0, u32, 1000, 0;
bench_in_place_xu32_0010_i1, u32, 10, 1;
bench_in_place_xu32_0100_i1, u32, 100, 1;
bench_in_place_xu32_1000_i1, u32, 1000, 1;
bench_in_place_u128_0010_i0, u128, 10, 0;
bench_in_place_u128_0100_i0, u128, 100, 0;
bench_in_place_u128_1000_i0, u128, 1000, 0;
bench_in_place_u128_0010_i1, u128, 10, 1;
bench_in_place_u128_0100_i1, u128, 100, 1;
bench_in_place_u128_1000_i1, u128, 1000, 1
];
#[bench]
fn bench_in_place_recycle(b: &mut Bencher) {
let mut data = vec![0; 1000];
b.iter(|| {
let tmp = std::mem::replace(&mut data, Vec::new());
data = black_box(
tmp.into_iter()
.enumerate()
.map(|(idx, e)| idx.wrapping_add(e))
.fuse()
.peekable()
.collect::<Vec<usize>>(),
);
});
}
#[bench]
fn bench_in_place_zip_recycle(b: &mut Bencher) {
let mut data = vec![0u8; 1000];
let mut rng = rand::thread_rng();
let mut subst = vec![0u8; 1000];
rng.fill_bytes(&mut subst[..]);
b.iter(|| {
let tmp = std::mem::replace(&mut data, Vec::new());
let mangled = tmp
.into_iter()
.zip(subst.iter().copied())
.enumerate()
.map(|(i, (d, s))| d.wrapping_add(i as u8) ^ s)
.collect::<Vec<_>>();
assert_eq!(mangled.len(), 1000);
data = black_box(mangled);
});
}
#[bench]
fn bench_in_place_zip_iter_mut(b: &mut Bencher) {
let mut data = vec![0u8; 256];
let mut rng = rand::thread_rng();
let mut subst = vec![0u8; 1000];
rng.fill_bytes(&mut subst[..]);
b.iter(|| {
data.iter_mut().enumerate().for_each(|(i, d)| {
*d = d.wrapping_add(i as u8) ^ subst[i];
});
});
black_box(data);
}
#[derive(Clone)]
struct Droppable(usize);
impl Drop for Droppable {
fn drop(&mut self) {
black_box(self);
}
}
#[bench]
fn bench_in_place_collect_droppable(b: &mut Bencher) {
let v: Vec<Droppable> = std::iter::repeat_with(|| Droppable(0)).take(1000).collect();
b.iter(|| {
v.clone()
.into_iter()
.skip(100)
.enumerate()
.map(|(i, e)| Droppable(i ^ e.0))
.collect::<Vec<_>>()
})
}
#[bench]
fn bench_chain_collect(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| data.iter().cloned().chain([1].iter().cloned()).collect::<Vec<_>>());
}
#[bench]
fn bench_chain_chain_collect(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
data.iter()
.cloned()
.chain([1].iter().cloned())
.chain([2].iter().cloned())
.collect::<Vec<_>>()
});
}
#[bench]
fn bench_nest_chain_chain_collect(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
data.iter().cloned().chain([1].iter().chain([2].iter()).cloned()).collect::<Vec<_>>()
});
}
pub fn example_plain_slow(l: &[u32]) -> Vec<u32> {
let mut result = Vec::with_capacity(l.len());
result.extend(l.iter().rev());
result
}
pub fn map_fast(l: &[(u32, u32)]) -> Vec<u32> {
let mut result = Vec::with_capacity(l.len());
for i in 0..l.len() {
unsafe {
*result.get_unchecked_mut(i) = l[i].0;
result.set_len(i);
}
}
result
}
const LEN: usize = 16384;
#[bench]
fn bench_range_map_collect(b: &mut Bencher) {
b.iter(|| (0..LEN).map(|_| u32::default()).collect::<Vec<_>>());
}
#[bench]
fn bench_chain_extend_ref(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
let mut v = Vec::<u32>::with_capacity(data.len() + 1);
v.extend(data.iter().chain([1].iter()));
v
});
}
#[bench]
fn bench_chain_extend_value(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
let mut v = Vec::<u32>::with_capacity(data.len() + 1);
v.extend(data.iter().cloned().chain(Some(1)));
v
});
}
#[bench]
fn bench_rev_1(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
let mut v = Vec::<u32>::new();
v.extend(data.iter().rev());
v
});
}
#[bench]
fn bench_rev_2(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| example_plain_slow(&data));
}
#[bench]
fn bench_map_regular(b: &mut Bencher) {
let data = black_box([(0, 0); LEN]);
b.iter(|| {
let mut v = Vec::<u32>::new();
v.extend(data.iter().map(|t| t.1));
v
});
}
#[bench]
fn bench_map_fast(b: &mut Bencher) {
let data = black_box([(0, 0); LEN]);
b.iter(|| map_fast(&data));
}
Rollup merge of #77044 - pickfire:patch-4, r=jyn514
Liballoc bench vec use mem take not replace
use rand::prelude::*;
use std::iter::{repeat, FromIterator};
use test::{black_box, Bencher};
#[bench]
fn bench_new(b: &mut Bencher) {
b.iter(|| {
let v: Vec<u32> = Vec::new();
assert_eq!(v.len(), 0);
assert_eq!(v.capacity(), 0);
v
})
}
fn do_bench_with_capacity(b: &mut Bencher, src_len: usize) {
b.bytes = src_len as u64;
b.iter(|| {
let v: Vec<u32> = Vec::with_capacity(src_len);
assert_eq!(v.len(), 0);
assert_eq!(v.capacity(), src_len);
v
})
}
#[bench]
fn bench_with_capacity_0000(b: &mut Bencher) {
do_bench_with_capacity(b, 0)
}
#[bench]
fn bench_with_capacity_0010(b: &mut Bencher) {
do_bench_with_capacity(b, 10)
}
#[bench]
fn bench_with_capacity_0100(b: &mut Bencher) {
do_bench_with_capacity(b, 100)
}
#[bench]
fn bench_with_capacity_1000(b: &mut Bencher) {
do_bench_with_capacity(b, 1000)
}
fn do_bench_from_fn(b: &mut Bencher, src_len: usize) {
b.bytes = src_len as u64;
b.iter(|| {
let dst = (0..src_len).collect::<Vec<_>>();
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
dst
})
}
#[bench]
fn bench_from_fn_0000(b: &mut Bencher) {
do_bench_from_fn(b, 0)
}
#[bench]
fn bench_from_fn_0010(b: &mut Bencher) {
do_bench_from_fn(b, 10)
}
#[bench]
fn bench_from_fn_0100(b: &mut Bencher) {
do_bench_from_fn(b, 100)
}
#[bench]
fn bench_from_fn_1000(b: &mut Bencher) {
do_bench_from_fn(b, 1000)
}
fn do_bench_from_elem(b: &mut Bencher, src_len: usize) {
b.bytes = src_len as u64;
b.iter(|| {
let dst: Vec<usize> = repeat(5).take(src_len).collect();
assert_eq!(dst.len(), src_len);
assert!(dst.iter().all(|x| *x == 5));
dst
})
}
#[bench]
fn bench_from_elem_0000(b: &mut Bencher) {
do_bench_from_elem(b, 0)
}
#[bench]
fn bench_from_elem_0010(b: &mut Bencher) {
do_bench_from_elem(b, 10)
}
#[bench]
fn bench_from_elem_0100(b: &mut Bencher) {
do_bench_from_elem(b, 100)
}
#[bench]
fn bench_from_elem_1000(b: &mut Bencher) {
do_bench_from_elem(b, 1000)
}
fn do_bench_from_slice(b: &mut Bencher, src_len: usize) {
let src: Vec<_> = FromIterator::from_iter(0..src_len);
b.bytes = src_len as u64;
b.iter(|| {
let dst = src.clone()[..].to_vec();
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
dst
});
}
#[bench]
fn bench_from_slice_0000(b: &mut Bencher) {
do_bench_from_slice(b, 0)
}
#[bench]
fn bench_from_slice_0010(b: &mut Bencher) {
do_bench_from_slice(b, 10)
}
#[bench]
fn bench_from_slice_0100(b: &mut Bencher) {
do_bench_from_slice(b, 100)
}
#[bench]
fn bench_from_slice_1000(b: &mut Bencher) {
do_bench_from_slice(b, 1000)
}
fn do_bench_from_iter(b: &mut Bencher, src_len: usize) {
let src: Vec<_> = FromIterator::from_iter(0..src_len);
b.bytes = src_len as u64;
b.iter(|| {
let dst: Vec<_> = FromIterator::from_iter(src.clone());
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
dst
});
}
#[bench]
fn bench_from_iter_0000(b: &mut Bencher) {
do_bench_from_iter(b, 0)
}
#[bench]
fn bench_from_iter_0010(b: &mut Bencher) {
do_bench_from_iter(b, 10)
}
#[bench]
fn bench_from_iter_0100(b: &mut Bencher) {
do_bench_from_iter(b, 100)
}
#[bench]
fn bench_from_iter_1000(b: &mut Bencher) {
do_bench_from_iter(b, 1000)
}
fn do_bench_extend(b: &mut Bencher, dst_len: usize, src_len: usize) {
let dst: Vec<_> = FromIterator::from_iter(0..dst_len);
let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
b.bytes = src_len as u64;
b.iter(|| {
let mut dst = dst.clone();
dst.extend(src.clone());
assert_eq!(dst.len(), dst_len + src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
dst
});
}
#[bench]
fn bench_extend_0000_0000(b: &mut Bencher) {
do_bench_extend(b, 0, 0)
}
#[bench]
fn bench_extend_0000_0010(b: &mut Bencher) {
do_bench_extend(b, 0, 10)
}
#[bench]
fn bench_extend_0000_0100(b: &mut Bencher) {
do_bench_extend(b, 0, 100)
}
#[bench]
fn bench_extend_0000_1000(b: &mut Bencher) {
do_bench_extend(b, 0, 1000)
}
#[bench]
fn bench_extend_0010_0010(b: &mut Bencher) {
do_bench_extend(b, 10, 10)
}
#[bench]
fn bench_extend_0100_0100(b: &mut Bencher) {
do_bench_extend(b, 100, 100)
}
#[bench]
fn bench_extend_1000_1000(b: &mut Bencher) {
do_bench_extend(b, 1000, 1000)
}
fn do_bench_extend_from_slice(b: &mut Bencher, dst_len: usize, src_len: usize) {
let dst: Vec<_> = FromIterator::from_iter(0..dst_len);
let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
b.bytes = src_len as u64;
b.iter(|| {
let mut dst = dst.clone();
dst.extend_from_slice(&src);
assert_eq!(dst.len(), dst_len + src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
dst
});
}
#[bench]
fn bench_extend_recycle(b: &mut Bencher) {
let mut data = vec![0; 1000];
b.iter(|| {
let tmp = std::mem::take(&mut data);
let mut to_extend = black_box(Vec::new());
to_extend.extend(tmp.into_iter());
data = black_box(to_extend);
});
black_box(data);
}
#[bench]
fn bench_extend_from_slice_0000_0000(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 0)
}
#[bench]
fn bench_extend_from_slice_0000_0010(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 10)
}
#[bench]
fn bench_extend_from_slice_0000_0100(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 100)
}
#[bench]
fn bench_extend_from_slice_0000_1000(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 1000)
}
#[bench]
fn bench_extend_from_slice_0010_0010(b: &mut Bencher) {
do_bench_extend_from_slice(b, 10, 10)
}
#[bench]
fn bench_extend_from_slice_0100_0100(b: &mut Bencher) {
do_bench_extend_from_slice(b, 100, 100)
}
#[bench]
fn bench_extend_from_slice_1000_1000(b: &mut Bencher) {
do_bench_extend_from_slice(b, 1000, 1000)
}
fn do_bench_clone(b: &mut Bencher, src_len: usize) {
let src: Vec<usize> = FromIterator::from_iter(0..src_len);
b.bytes = src_len as u64;
b.iter(|| {
let dst = src.clone();
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
dst
});
}
#[bench]
fn bench_clone_0000(b: &mut Bencher) {
do_bench_clone(b, 0)
}
#[bench]
fn bench_clone_0010(b: &mut Bencher) {
do_bench_clone(b, 10)
}
#[bench]
fn bench_clone_0100(b: &mut Bencher) {
do_bench_clone(b, 100)
}
#[bench]
fn bench_clone_1000(b: &mut Bencher) {
do_bench_clone(b, 1000)
}
fn do_bench_clone_from(b: &mut Bencher, times: usize, dst_len: usize, src_len: usize) {
let dst: Vec<_> = FromIterator::from_iter(0..src_len);
let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
b.bytes = (times * src_len) as u64;
b.iter(|| {
let mut dst = dst.clone();
for _ in 0..times {
dst.clone_from(&src);
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| dst_len + i == *x));
}
dst
});
}
#[bench]
fn bench_clone_from_01_0000_0000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 0)
}
#[bench]
fn bench_clone_from_01_0000_0010(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 10)
}
#[bench]
fn bench_clone_from_01_0000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 100)
}
#[bench]
fn bench_clone_from_01_0000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 1000)
}
#[bench]
fn bench_clone_from_01_0010_0010(b: &mut Bencher) {
do_bench_clone_from(b, 1, 10, 10)
}
#[bench]
fn bench_clone_from_01_0100_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 100, 100)
}
#[bench]
fn bench_clone_from_01_1000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 1000, 1000)
}
#[bench]
fn bench_clone_from_01_0010_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 10, 100)
}
#[bench]
fn bench_clone_from_01_0100_1000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 100, 1000)
}
#[bench]
fn bench_clone_from_01_0010_0000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 10, 0)
}
#[bench]
fn bench_clone_from_01_0100_0010(b: &mut Bencher) {
do_bench_clone_from(b, 1, 100, 10)
}
#[bench]
fn bench_clone_from_01_1000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 1000, 100)
}
#[bench]
fn bench_clone_from_10_0000_0000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 0)
}
#[bench]
fn bench_clone_from_10_0000_0010(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 10)
}
#[bench]
fn bench_clone_from_10_0000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 100)
}
#[bench]
fn bench_clone_from_10_0000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 1000)
}
#[bench]
fn bench_clone_from_10_0010_0010(b: &mut Bencher) {
do_bench_clone_from(b, 10, 10, 10)
}
#[bench]
fn bench_clone_from_10_0100_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 100, 100)
}
#[bench]
fn bench_clone_from_10_1000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 1000, 1000)
}
#[bench]
fn bench_clone_from_10_0010_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 10, 100)
}
#[bench]
fn bench_clone_from_10_0100_1000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 100, 1000)
}
#[bench]
fn bench_clone_from_10_0010_0000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 10, 0)
}
#[bench]
fn bench_clone_from_10_0100_0010(b: &mut Bencher) {
do_bench_clone_from(b, 10, 100, 10)
}
#[bench]
fn bench_clone_from_10_1000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 1000, 100)
}
macro_rules! bench_in_place {
($($fname:ident, $type:ty, $count:expr, $init:expr);*) => {
$(
#[bench]
fn $fname(b: &mut Bencher) {
b.iter(|| {
let src: Vec<$type> = black_box(vec![$init; $count]);
let mut sink = src.into_iter()
.enumerate()
.map(|(idx, e)| idx as $type ^ e)
.collect::<Vec<$type>>();
black_box(sink.as_mut_ptr())
});
}
)+
};
}
bench_in_place![
bench_in_place_xxu8_0010_i0, u8, 10, 0;
bench_in_place_xxu8_0100_i0, u8, 100, 0;
bench_in_place_xxu8_1000_i0, u8, 1000, 0;
bench_in_place_xxu8_0010_i1, u8, 10, 1;
bench_in_place_xxu8_0100_i1, u8, 100, 1;
bench_in_place_xxu8_1000_i1, u8, 1000, 1;
bench_in_place_xu32_0010_i0, u32, 10, 0;
bench_in_place_xu32_0100_i0, u32, 100, 0;
bench_in_place_xu32_1000_i0, u32, 1000, 0;
bench_in_place_xu32_0010_i1, u32, 10, 1;
bench_in_place_xu32_0100_i1, u32, 100, 1;
bench_in_place_xu32_1000_i1, u32, 1000, 1;
bench_in_place_u128_0010_i0, u128, 10, 0;
bench_in_place_u128_0100_i0, u128, 100, 0;
bench_in_place_u128_1000_i0, u128, 1000, 0;
bench_in_place_u128_0010_i1, u128, 10, 1;
bench_in_place_u128_0100_i1, u128, 100, 1;
bench_in_place_u128_1000_i1, u128, 1000, 1
];
#[bench]
fn bench_in_place_recycle(b: &mut Bencher) {
let mut data = vec![0; 1000];
b.iter(|| {
let tmp = std::mem::take(&mut data);
data = black_box(
tmp.into_iter()
.enumerate()
.map(|(idx, e)| idx.wrapping_add(e))
.fuse()
.peekable()
.collect::<Vec<usize>>(),
);
});
}
#[bench]
fn bench_in_place_zip_recycle(b: &mut Bencher) {
let mut data = vec![0u8; 1000];
let mut rng = rand::thread_rng();
let mut subst = vec![0u8; 1000];
rng.fill_bytes(&mut subst[..]);
b.iter(|| {
let tmp = std::mem::take(&mut data);
let mangled = tmp
.into_iter()
.zip(subst.iter().copied())
.enumerate()
.map(|(i, (d, s))| d.wrapping_add(i as u8) ^ s)
.collect::<Vec<_>>();
assert_eq!(mangled.len(), 1000);
data = black_box(mangled);
});
}
#[bench]
fn bench_in_place_zip_iter_mut(b: &mut Bencher) {
let mut data = vec![0u8; 256];
let mut rng = rand::thread_rng();
let mut subst = vec![0u8; 1000];
rng.fill_bytes(&mut subst[..]);
b.iter(|| {
data.iter_mut().enumerate().for_each(|(i, d)| {
*d = d.wrapping_add(i as u8) ^ subst[i];
});
});
black_box(data);
}
#[derive(Clone)]
struct Droppable(usize);
impl Drop for Droppable {
fn drop(&mut self) {
black_box(self);
}
}
#[bench]
fn bench_in_place_collect_droppable(b: &mut Bencher) {
let v: Vec<Droppable> = std::iter::repeat_with(|| Droppable(0)).take(1000).collect();
b.iter(|| {
v.clone()
.into_iter()
.skip(100)
.enumerate()
.map(|(i, e)| Droppable(i ^ e.0))
.collect::<Vec<_>>()
})
}
#[bench]
fn bench_chain_collect(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| data.iter().cloned().chain([1].iter().cloned()).collect::<Vec<_>>());
}
#[bench]
fn bench_chain_chain_collect(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
data.iter()
.cloned()
.chain([1].iter().cloned())
.chain([2].iter().cloned())
.collect::<Vec<_>>()
});
}
#[bench]
fn bench_nest_chain_chain_collect(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
data.iter().cloned().chain([1].iter().chain([2].iter()).cloned()).collect::<Vec<_>>()
});
}
pub fn example_plain_slow(l: &[u32]) -> Vec<u32> {
let mut result = Vec::with_capacity(l.len());
result.extend(l.iter().rev());
result
}
pub fn map_fast(l: &[(u32, u32)]) -> Vec<u32> {
let mut result = Vec::with_capacity(l.len());
for i in 0..l.len() {
unsafe {
*result.get_unchecked_mut(i) = l[i].0;
result.set_len(i);
}
}
result
}
const LEN: usize = 16384;
#[bench]
fn bench_range_map_collect(b: &mut Bencher) {
b.iter(|| (0..LEN).map(|_| u32::default()).collect::<Vec<_>>());
}
#[bench]
fn bench_chain_extend_ref(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
let mut v = Vec::<u32>::with_capacity(data.len() + 1);
v.extend(data.iter().chain([1].iter()));
v
});
}
#[bench]
fn bench_chain_extend_value(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
let mut v = Vec::<u32>::with_capacity(data.len() + 1);
v.extend(data.iter().cloned().chain(Some(1)));
v
});
}
#[bench]
fn bench_rev_1(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| {
let mut v = Vec::<u32>::new();
v.extend(data.iter().rev());
v
});
}
#[bench]
fn bench_rev_2(b: &mut Bencher) {
let data = black_box([0; LEN]);
b.iter(|| example_plain_slow(&data));
}
#[bench]
fn bench_map_regular(b: &mut Bencher) {
let data = black_box([(0, 0); LEN]);
b.iter(|| {
let mut v = Vec::<u32>::new();
v.extend(data.iter().map(|t| t.1));
v
});
}
#[bench]
fn bench_map_fast(b: &mut Bencher) {
let data = black_box([(0, 0); LEN]);
b.iter(|| map_fast(&data));
}
|
#![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "none")]
#![doc(hidden)]
use core::alloc::LayoutError;
use core::cmp;
use core::intrinsics;
use core::mem::{self, ManuallyDrop, MaybeUninit};
use core::ops::Drop;
use core::ptr::{self, NonNull, Unique};
use core::slice;
#[cfg(not(no_global_oom_handling))]
use crate::alloc::handle_alloc_error;
use crate::alloc::{Allocator, Global, Layout};
use crate::boxed::Box;
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind::*;
#[cfg(test)]
mod tests;
#[cfg(not(no_global_oom_handling))]
enum AllocInit {
/// The contents of the new memory are uninitialized.
Uninitialized,
/// The new memory is guaranteed to be zeroed.
Zeroed,
}
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
/// In particular:
///
/// * Produces `Unique::dangling()` on zero-sized types.
/// * Produces `Unique::dangling()` on zero-length allocations.
/// * Avoids freeing `Unique::dangling()`.
/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
/// * Guards against overflowing your length.
/// * Calls `handle_alloc_error` for fallible allocations.
/// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
/// * Uses the excess returned from the allocator to use the largest available capacity.
///
/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
/// to handle the actual things *stored* inside of a `RawVec`.
///
/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
/// `Box<[T]>`, since `capacity()` won't yield the length.
#[allow(missing_debug_implementations)]
pub struct RawVec<T, A: Allocator = Global> {
ptr: Unique<T>,
cap: usize,
alloc: A,
}
impl<T> RawVec<T, Global> {
/// HACK(Centril): This exists because stable `const fn` can only call stable `const fn`, so
/// they cannot call `Self::new()`.
///
/// If you change `RawVec<T>::new` or dependencies, please take care to not introduce anything
/// that would truly const-call something unstable.
pub const NEW: Self = Self::new();
/// Creates the biggest possible `RawVec` (on the system heap)
/// without allocating. If `T` has positive size, then this makes a
/// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
/// `RawVec` with capacity `usize::MAX`. Useful for implementing
/// delayed allocation.
pub const fn new() -> Self {
Self::new_in(Global)
}
/// Creates a `RawVec` (on the system heap) with exactly the
/// capacity and alignment requirements for a `[T; capacity]`. This is
/// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
/// zero-sized. Note that if `T` is zero-sized this means you will
/// *not* get a `RawVec` with the requested capacity.
///
/// # Panics
///
/// Panics if the requested capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_in(capacity, Global)
}
/// Like `with_capacity`, but guarantees the buffer is zeroed.
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn with_capacity_zeroed(capacity: usize) -> Self {
Self::with_capacity_zeroed_in(capacity, Global)
}
/// Reconstitutes a `RawVec` from a pointer and capacity.
///
/// # Safety
///
/// The `ptr` must be allocated (on the system heap), and with the given `capacity`.
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
/// systems). ZST vectors may have a capacity up to `usize::MAX`.
/// If the `ptr` and `capacity` come from a `RawVec`, then this is guaranteed.
#[inline]
pub unsafe fn from_raw_parts(ptr: *mut T, capacity: usize) -> Self {
unsafe { Self::from_raw_parts_in(ptr, capacity, Global) }
}
}
impl<T, A: Allocator> RawVec<T, A> {
// Tiny Vecs are dumb. Skip to:
// - 8 if the element size is 1, because any heap allocators is likely
// to round up a request of less than 8 bytes to at least 8 bytes.
// - 4 if elements are moderate-sized (<= 1 KiB).
// - 1 otherwise, to avoid wasting too much space for very short Vecs.
const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
8
} else if mem::size_of::<T>() <= 1024 {
4
} else {
1
};
/// Like `new`, but parameterized over the choice of allocator for
/// the returned `RawVec`.
#[rustc_allow_const_fn_unstable(const_fn)]
pub const fn new_in(alloc: A) -> Self {
// `cap: 0` means "unallocated". zero-sized types are ignored.
Self { ptr: Unique::dangling(), cap: 0, alloc }
}
/// Like `with_capacity`, but parameterized over the choice of
/// allocator for the returned `RawVec`.
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
}
/// Like `with_capacity_zeroed`, but parameterized over the choice
/// of allocator for the returned `RawVec`.
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
}
/// Converts a `Box<[T]>` into a `RawVec<T>`.
pub fn from_box(slice: Box<[T], A>) -> Self {
unsafe {
let (slice, alloc) = Box::into_raw_with_allocator(slice);
RawVec::from_raw_parts_in(slice.as_mut_ptr(), slice.len(), alloc)
}
}
/// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
///
/// Note that this will correctly reconstitute any `cap` changes
/// that may have been performed. (See description of type for details.)
///
/// # Safety
///
/// * `len` must be greater than or equal to the most recently requested capacity, and
/// * `len` must be less than or equal to `self.capacity()`.
///
/// Note, that the requested capacity and `self.capacity()` could differ, as
/// an allocator could overallocate and return a greater memory block than requested.
pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
// Sanity-check one half of the safety requirement (we cannot check the other half).
debug_assert!(
len <= self.capacity(),
"`len` must be smaller than or equal to `self.capacity()`"
);
let me = ManuallyDrop::new(self);
unsafe {
let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
Box::from_raw_in(slice, ptr::read(&me.alloc))
}
}
#[cfg(not(no_global_oom_handling))]
fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
if mem::size_of::<T>() == 0 {
Self::new_in(alloc)
} else {
// We avoid `unwrap_or_else` here because it bloats the amount of
// LLVM IR generated.
let layout = match Layout::array::<T>(capacity) {
Ok(layout) => layout,
Err(_) => capacity_overflow(),
};
match alloc_guard(layout.size()) {
Ok(_) => {}
Err(_) => capacity_overflow(),
}
let result = match init {
AllocInit::Uninitialized => alloc.allocate(layout),
AllocInit::Zeroed => alloc.allocate_zeroed(layout),
};
let ptr = match result {
Ok(ptr) => ptr,
Err(_) => handle_alloc_error(layout),
};
Self {
ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
cap: Self::capacity_from_bytes(ptr.len()),
alloc,
}
}
}
/// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
///
/// # Safety
///
/// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
/// `capacity`.
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
/// systems). ZST vectors may have a capacity up to `usize::MAX`.
/// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
/// guaranteed.
#[inline]
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc }
}
/// Gets a raw pointer to the start of the allocation. Note that this is
/// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
/// be careful.
#[inline]
pub fn ptr(&self) -> *mut T {
self.ptr.as_ptr()
}
/// Gets the capacity of the allocation.
///
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
}
/// Returns a shared reference to the allocator backing this `RawVec`.
pub fn allocator(&self) -> &A {
&self.alloc
}
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
if mem::size_of::<T>() == 0 || self.cap == 0 {
None
} else {
// We have an allocated chunk of memory, so we can bypass runtime
// checks to get our current layout.
unsafe {
let align = mem::align_of::<T>();
let size = mem::size_of::<T>() * self.cap;
let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
}
}
/// Ensures that the buffer contains at least enough space to hold `len +
/// additional` elements. If it doesn't already have enough capacity, will
/// reallocate enough space plus comfortable slack space to get amortized
/// *O*(1) behavior. Will limit this behavior if it would needlessly cause
/// itself to panic.
///
/// If `len` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behavior of this function may break.
///
/// This is ideal for implementing a bulk-push operation like `extend`.
///
/// # Panics
///
/// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
///
/// # Examples
///
/// ```
/// # #![feature(raw_vec_internals)]
/// # extern crate alloc;
/// # use std::ptr;
/// # use alloc::raw_vec::RawVec;
/// struct MyVec<T> {
/// buf: RawVec<T>,
/// len: usize,
/// }
///
/// impl<T: Clone> MyVec<T> {
/// pub fn push_all(&mut self, elems: &[T]) {
/// self.buf.reserve(self.len, elems.len());
/// // reserve would have aborted or panicked if the len exceeded
/// // `isize::MAX` so this is safe to do unchecked now.
/// for x in elems {
/// unsafe {
/// ptr::write(self.buf.ptr().add(self.len), x.clone());
/// }
/// self.len += 1;
/// }
/// }
/// }
/// # fn main() {
/// # let mut vector = MyVec { buf: RawVec::new(), len: 0 };
/// # vector.push_all(&[1, 3, 5, 7, 9]);
/// # }
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn reserve(&mut self, len: usize, additional: usize) {
// Callers expect this function to be very cheap when there is already sufficient capacity.
// Therefore, we move all the resizing and error-handling logic from grow_amortized and
// handle_reserve behind a call, while making sure that the this function is likely to be
// inlined as just a comparison and a call if the comparison fails.
#[cold]
fn do_reserve_and_handle<T, A: Allocator>(
slf: &mut RawVec<T, A>,
len: usize,
additional: usize,
) {
handle_reserve(slf.grow_amortized(len, additional));
}
if self.needs_to_grow(len, additional) {
do_reserve_and_handle(self, len, additional);
}
}
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
self.grow_amortized(len, additional)
} else {
Ok(())
}
}
/// Ensures that the buffer contains at least enough space to hold `len +
/// additional` elements. If it doesn't already, will reallocate the
/// minimum possible amount of memory necessary. Generally this will be
/// exactly the amount of memory necessary, but in principle the allocator
/// is free to give back more than we asked for.
///
/// If `len` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe code
/// *you* write that relies on the behavior of this function may break.
///
/// # Panics
///
/// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
pub fn reserve_exact(&mut self, len: usize, additional: usize) {
handle_reserve(self.try_reserve_exact(len, additional));
}
/// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
pub fn try_reserve_exact(
&mut self,
len: usize,
additional: usize,
) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) }
}
/// Shrinks the allocation down to the specified amount. If the given amount
/// is 0, actually completely deallocates.
///
/// # Panics
///
/// Panics if the given amount is *larger* than the current capacity.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
pub fn shrink_to_fit(&mut self, amount: usize) {
handle_reserve(self.shrink(amount));
}
}
impl<T, A: Allocator> RawVec<T, A> {
/// Returns if the buffer needs to grow to fulfill the needed extra capacity.
/// Mainly used to make inlining reserve-calls possible without inlining `grow`.
fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
additional > self.capacity().wrapping_sub(len)
}
fn capacity_from_bytes(excess: usize) -> usize {
debug_assert_ne!(mem::size_of::<T>(), 0);
excess / mem::size_of::<T>()
}
fn set_ptr(&mut self, ptr: NonNull<[u8]>) {
self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) };
self.cap = Self::capacity_from_bytes(ptr.len());
}
// This method is usually instantiated many times. So we want it to be as
// small as possible, to improve compile times. But we also want as much of
// its contents to be statically computable as possible, to make the
// generated code run faster. Therefore, this method is carefully written
// so that all of the code that depends on `T` is within it, while as much
// of the code that doesn't depend on `T` as possible is in functions that
// are non-generic over `T`.
fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
// This is ensured by the calling contexts.
debug_assert!(additional > 0);
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
}
// Nothing we can really do about these checks, sadly.
let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
// This guarantees exponential growth. The doubling cannot overflow
// because `cap <= isize::MAX` and the type of `cap` is `usize`.
let cap = cmp::max(self.cap * 2, required_cap);
let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
self.set_ptr(ptr);
Ok(())
}
// The constraints on this method are much the same as those on
// `grow_amortized`, but this method is usually instantiated less often so
// it's less critical.
fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
}
let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
self.set_ptr(ptr);
Ok(())
}
fn shrink(&mut self, amount: usize) -> Result<(), TryReserveError> {
assert!(amount <= self.capacity(), "Tried to shrink to a larger capacity");
let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
let new_size = amount * mem::size_of::<T>();
let ptr = unsafe {
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
self.alloc
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
};
self.set_ptr(ptr);
Ok(())
}
}
// This function is outside `RawVec` to minimize compile times. See the comment
// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
// significant, because the number of different `A` types seen in practice is
// much smaller than the number of `T` types.)
#[inline(never)]
fn finish_grow<A>(
new_layout: Result<Layout, LayoutError>,
current_memory: Option<(NonNull<u8>, Layout)>,
alloc: &mut A,
) -> Result<NonNull<[u8]>, TryReserveError>
where
A: Allocator,
{
// Check for the error here to minimize the size of `RawVec::grow_*`.
let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
alloc_guard(new_layout.size())?;
let memory = if let Some((ptr, old_layout)) = current_memory {
debug_assert_eq!(old_layout.align(), new_layout.align());
unsafe {
// The allocator checks for alignment equality
intrinsics::assume(old_layout.align() == new_layout.align());
alloc.grow(ptr, old_layout, new_layout)
}
} else {
alloc.allocate(new_layout)
};
memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into())
}
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> {
/// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
fn drop(&mut self) {
if let Some((ptr, layout)) = self.current_memory() {
unsafe { self.alloc.deallocate(ptr, layout) }
}
}
}
// Central function for reserve error handling.
#[cfg(not(no_global_oom_handling))]
#[inline]
fn handle_reserve(result: Result<(), TryReserveError>) {
match result.map_err(|e| e.kind()) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
Ok(()) => { /* yay */ }
}
}
// We need to guarantee the following:
// * We don't ever allocate `> isize::MAX` byte-size objects.
// * We don't overflow `usize::MAX` and actually allocate too little.
//
// On 64-bit we just need to check for overflow since trying to allocate
// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
// an extra guard for this in case we're running on a platform which can use
// all 4GB in user-space, e.g., PAE or x32.
#[inline]
fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
if usize::BITS < 64 && alloc_size > isize::MAX as usize {
Err(CapacityOverflow.into())
} else {
Ok(())
}
}
// One central function responsible for reporting capacity overflows. This'll
// ensure that the code generation related to these panics is minimal as there's
// only one location which panics rather than a bunch throughout the module.
#[cfg(not(no_global_oom_handling))]
fn capacity_overflow() -> ! {
panic!("capacity overflow");
}
Rollup merge of #88432 - terrarier2111:patch-1, r=joshtriplett
Fix a typo in raw_vec
#![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "none")]
#![doc(hidden)]
use core::alloc::LayoutError;
use core::cmp;
use core::intrinsics;
use core::mem::{self, ManuallyDrop, MaybeUninit};
use core::ops::Drop;
use core::ptr::{self, NonNull, Unique};
use core::slice;
#[cfg(not(no_global_oom_handling))]
use crate::alloc::handle_alloc_error;
use crate::alloc::{Allocator, Global, Layout};
use crate::boxed::Box;
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind::*;
#[cfg(test)]
mod tests;
#[cfg(not(no_global_oom_handling))]
enum AllocInit {
/// The contents of the new memory are uninitialized.
Uninitialized,
/// The new memory is guaranteed to be zeroed.
Zeroed,
}
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
/// In particular:
///
/// * Produces `Unique::dangling()` on zero-sized types.
/// * Produces `Unique::dangling()` on zero-length allocations.
/// * Avoids freeing `Unique::dangling()`.
/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
/// * Guards against overflowing your length.
/// * Calls `handle_alloc_error` for fallible allocations.
/// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
/// * Uses the excess returned from the allocator to use the largest available capacity.
///
/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
/// to handle the actual things *stored* inside of a `RawVec`.
///
/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
/// `Box<[T]>`, since `capacity()` won't yield the length.
#[allow(missing_debug_implementations)]
pub struct RawVec<T, A: Allocator = Global> {
ptr: Unique<T>,
cap: usize,
alloc: A,
}
impl<T> RawVec<T, Global> {
/// HACK(Centril): This exists because stable `const fn` can only call stable `const fn`, so
/// they cannot call `Self::new()`.
///
/// If you change `RawVec<T>::new` or dependencies, please take care to not introduce anything
/// that would truly const-call something unstable.
pub const NEW: Self = Self::new();
/// Creates the biggest possible `RawVec` (on the system heap)
/// without allocating. If `T` has positive size, then this makes a
/// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
/// `RawVec` with capacity `usize::MAX`. Useful for implementing
/// delayed allocation.
pub const fn new() -> Self {
Self::new_in(Global)
}
/// Creates a `RawVec` (on the system heap) with exactly the
/// capacity and alignment requirements for a `[T; capacity]`. This is
/// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
/// zero-sized. Note that if `T` is zero-sized this means you will
/// *not* get a `RawVec` with the requested capacity.
///
/// # Panics
///
/// Panics if the requested capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_in(capacity, Global)
}
/// Like `with_capacity`, but guarantees the buffer is zeroed.
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn with_capacity_zeroed(capacity: usize) -> Self {
Self::with_capacity_zeroed_in(capacity, Global)
}
/// Reconstitutes a `RawVec` from a pointer and capacity.
///
/// # Safety
///
/// The `ptr` must be allocated (on the system heap), and with the given `capacity`.
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
/// systems). ZST vectors may have a capacity up to `usize::MAX`.
/// If the `ptr` and `capacity` come from a `RawVec`, then this is guaranteed.
#[inline]
pub unsafe fn from_raw_parts(ptr: *mut T, capacity: usize) -> Self {
unsafe { Self::from_raw_parts_in(ptr, capacity, Global) }
}
}
impl<T, A: Allocator> RawVec<T, A> {
// Tiny Vecs are dumb. Skip to:
// - 8 if the element size is 1, because any heap allocators is likely
// to round up a request of less than 8 bytes to at least 8 bytes.
// - 4 if elements are moderate-sized (<= 1 KiB).
// - 1 otherwise, to avoid wasting too much space for very short Vecs.
const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
8
} else if mem::size_of::<T>() <= 1024 {
4
} else {
1
};
/// Like `new`, but parameterized over the choice of allocator for
/// the returned `RawVec`.
#[rustc_allow_const_fn_unstable(const_fn)]
pub const fn new_in(alloc: A) -> Self {
// `cap: 0` means "unallocated". zero-sized types are ignored.
Self { ptr: Unique::dangling(), cap: 0, alloc }
}
/// Like `with_capacity`, but parameterized over the choice of
/// allocator for the returned `RawVec`.
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
}
/// Like `with_capacity_zeroed`, but parameterized over the choice
/// of allocator for the returned `RawVec`.
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
}
/// Converts a `Box<[T]>` into a `RawVec<T>`.
pub fn from_box(slice: Box<[T], A>) -> Self {
unsafe {
let (slice, alloc) = Box::into_raw_with_allocator(slice);
RawVec::from_raw_parts_in(slice.as_mut_ptr(), slice.len(), alloc)
}
}
/// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
///
/// Note that this will correctly reconstitute any `cap` changes
/// that may have been performed. (See description of type for details.)
///
/// # Safety
///
/// * `len` must be greater than or equal to the most recently requested capacity, and
/// * `len` must be less than or equal to `self.capacity()`.
///
/// Note, that the requested capacity and `self.capacity()` could differ, as
/// an allocator could overallocate and return a greater memory block than requested.
pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
// Sanity-check one half of the safety requirement (we cannot check the other half).
debug_assert!(
len <= self.capacity(),
"`len` must be smaller than or equal to `self.capacity()`"
);
let me = ManuallyDrop::new(self);
unsafe {
let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
Box::from_raw_in(slice, ptr::read(&me.alloc))
}
}
#[cfg(not(no_global_oom_handling))]
fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
if mem::size_of::<T>() == 0 {
Self::new_in(alloc)
} else {
// We avoid `unwrap_or_else` here because it bloats the amount of
// LLVM IR generated.
let layout = match Layout::array::<T>(capacity) {
Ok(layout) => layout,
Err(_) => capacity_overflow(),
};
match alloc_guard(layout.size()) {
Ok(_) => {}
Err(_) => capacity_overflow(),
}
let result = match init {
AllocInit::Uninitialized => alloc.allocate(layout),
AllocInit::Zeroed => alloc.allocate_zeroed(layout),
};
let ptr = match result {
Ok(ptr) => ptr,
Err(_) => handle_alloc_error(layout),
};
Self {
ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
cap: Self::capacity_from_bytes(ptr.len()),
alloc,
}
}
}
/// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
///
/// # Safety
///
/// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
/// `capacity`.
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
/// systems). ZST vectors may have a capacity up to `usize::MAX`.
/// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
/// guaranteed.
#[inline]
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc }
}
/// Gets a raw pointer to the start of the allocation. Note that this is
/// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
/// be careful.
#[inline]
pub fn ptr(&self) -> *mut T {
self.ptr.as_ptr()
}
/// Gets the capacity of the allocation.
///
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
}
/// Returns a shared reference to the allocator backing this `RawVec`.
pub fn allocator(&self) -> &A {
&self.alloc
}
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
if mem::size_of::<T>() == 0 || self.cap == 0 {
None
} else {
// We have an allocated chunk of memory, so we can bypass runtime
// checks to get our current layout.
unsafe {
let align = mem::align_of::<T>();
let size = mem::size_of::<T>() * self.cap;
let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
}
}
/// Ensures that the buffer contains at least enough space to hold `len +
/// additional` elements. If it doesn't already have enough capacity, will
/// reallocate enough space plus comfortable slack space to get amortized
/// *O*(1) behavior. Will limit this behavior if it would needlessly cause
/// itself to panic.
///
/// If `len` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behavior of this function may break.
///
/// This is ideal for implementing a bulk-push operation like `extend`.
///
/// # Panics
///
/// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
///
/// # Examples
///
/// ```
/// # #![feature(raw_vec_internals)]
/// # extern crate alloc;
/// # use std::ptr;
/// # use alloc::raw_vec::RawVec;
/// struct MyVec<T> {
/// buf: RawVec<T>,
/// len: usize,
/// }
///
/// impl<T: Clone> MyVec<T> {
/// pub fn push_all(&mut self, elems: &[T]) {
/// self.buf.reserve(self.len, elems.len());
/// // reserve would have aborted or panicked if the len exceeded
/// // `isize::MAX` so this is safe to do unchecked now.
/// for x in elems {
/// unsafe {
/// ptr::write(self.buf.ptr().add(self.len), x.clone());
/// }
/// self.len += 1;
/// }
/// }
/// }
/// # fn main() {
/// # let mut vector = MyVec { buf: RawVec::new(), len: 0 };
/// # vector.push_all(&[1, 3, 5, 7, 9]);
/// # }
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn reserve(&mut self, len: usize, additional: usize) {
// Callers expect this function to be very cheap when there is already sufficient capacity.
// Therefore, we move all the resizing and error-handling logic from grow_amortized and
// handle_reserve behind a call, while making sure that this function is likely to be
// inlined as just a comparison and a call if the comparison fails.
#[cold]
fn do_reserve_and_handle<T, A: Allocator>(
slf: &mut RawVec<T, A>,
len: usize,
additional: usize,
) {
handle_reserve(slf.grow_amortized(len, additional));
}
if self.needs_to_grow(len, additional) {
do_reserve_and_handle(self, len, additional);
}
}
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
self.grow_amortized(len, additional)
} else {
Ok(())
}
}
/// Ensures that the buffer contains at least enough space to hold `len +
/// additional` elements. If it doesn't already, will reallocate the
/// minimum possible amount of memory necessary. Generally this will be
/// exactly the amount of memory necessary, but in principle the allocator
/// is free to give back more than we asked for.
///
/// If `len` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe code
/// *you* write that relies on the behavior of this function may break.
///
/// # Panics
///
/// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
pub fn reserve_exact(&mut self, len: usize, additional: usize) {
handle_reserve(self.try_reserve_exact(len, additional));
}
/// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
pub fn try_reserve_exact(
&mut self,
len: usize,
additional: usize,
) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) }
}
/// Shrinks the allocation down to the specified amount. If the given amount
/// is 0, actually completely deallocates.
///
/// # Panics
///
/// Panics if the given amount is *larger* than the current capacity.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
pub fn shrink_to_fit(&mut self, amount: usize) {
handle_reserve(self.shrink(amount));
}
}
impl<T, A: Allocator> RawVec<T, A> {
/// Returns if the buffer needs to grow to fulfill the needed extra capacity.
/// Mainly used to make inlining reserve-calls possible without inlining `grow`.
fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
additional > self.capacity().wrapping_sub(len)
}
fn capacity_from_bytes(excess: usize) -> usize {
debug_assert_ne!(mem::size_of::<T>(), 0);
excess / mem::size_of::<T>()
}
fn set_ptr(&mut self, ptr: NonNull<[u8]>) {
self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) };
self.cap = Self::capacity_from_bytes(ptr.len());
}
// This method is usually instantiated many times. So we want it to be as
// small as possible, to improve compile times. But we also want as much of
// its contents to be statically computable as possible, to make the
// generated code run faster. Therefore, this method is carefully written
// so that all of the code that depends on `T` is within it, while as much
// of the code that doesn't depend on `T` as possible is in functions that
// are non-generic over `T`.
fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
// This is ensured by the calling contexts.
debug_assert!(additional > 0);
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
}
// Nothing we can really do about these checks, sadly.
let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
// This guarantees exponential growth. The doubling cannot overflow
// because `cap <= isize::MAX` and the type of `cap` is `usize`.
let cap = cmp::max(self.cap * 2, required_cap);
let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
self.set_ptr(ptr);
Ok(())
}
// The constraints on this method are much the same as those on
// `grow_amortized`, but this method is usually instantiated less often so
// it's less critical.
fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
}
let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
self.set_ptr(ptr);
Ok(())
}
fn shrink(&mut self, amount: usize) -> Result<(), TryReserveError> {
assert!(amount <= self.capacity(), "Tried to shrink to a larger capacity");
let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
let new_size = amount * mem::size_of::<T>();
let ptr = unsafe {
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
self.alloc
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
};
self.set_ptr(ptr);
Ok(())
}
}
// This function is outside `RawVec` to minimize compile times. See the comment
// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
// significant, because the number of different `A` types seen in practice is
// much smaller than the number of `T` types.)
#[inline(never)]
fn finish_grow<A>(
new_layout: Result<Layout, LayoutError>,
current_memory: Option<(NonNull<u8>, Layout)>,
alloc: &mut A,
) -> Result<NonNull<[u8]>, TryReserveError>
where
A: Allocator,
{
// Check for the error here to minimize the size of `RawVec::grow_*`.
let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
alloc_guard(new_layout.size())?;
let memory = if let Some((ptr, old_layout)) = current_memory {
debug_assert_eq!(old_layout.align(), new_layout.align());
unsafe {
// The allocator checks for alignment equality
intrinsics::assume(old_layout.align() == new_layout.align());
alloc.grow(ptr, old_layout, new_layout)
}
} else {
alloc.allocate(new_layout)
};
memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into())
}
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> {
/// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
fn drop(&mut self) {
if let Some((ptr, layout)) = self.current_memory() {
unsafe { self.alloc.deallocate(ptr, layout) }
}
}
}
// Central function for reserve error handling.
#[cfg(not(no_global_oom_handling))]
#[inline]
fn handle_reserve(result: Result<(), TryReserveError>) {
match result.map_err(|e| e.kind()) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
Ok(()) => { /* yay */ }
}
}
// We need to guarantee the following:
// * We don't ever allocate `> isize::MAX` byte-size objects.
// * We don't overflow `usize::MAX` and actually allocate too little.
//
// On 64-bit we just need to check for overflow since trying to allocate
// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
// an extra guard for this in case we're running on a platform which can use
// all 4GB in user-space, e.g., PAE or x32.
#[inline]
fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
if usize::BITS < 64 && alloc_size > isize::MAX as usize {
Err(CapacityOverflow.into())
} else {
Ok(())
}
}
// One central function responsible for reporting capacity overflows. This'll
// ensure that the code generation related to these panics is minimal as there's
// only one location which panics rather than a bunch throughout the module.
#[cfg(not(no_global_oom_handling))]
fn capacity_overflow() -> ! {
panic!("capacity overflow");
}
|
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
/// Worker contains all workers that do the expensive job in background.
mod metrics;
use std::sync::{Arc, Mutex};
use std::thread::{self, JoinHandle, Builder};
use std::io;
use std::fmt::{self, Formatter, Display, Debug};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{self, Sender, Receiver, SendError};
use std::error::Error;
use util::SlowTimer;
use self::metrics::*;
pub struct Stopped<T>(pub T);
impl<T> Display for Stopped<T> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "channel has been closed")
}
}
impl<T> Debug for Stopped<T> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "channel has been closed")
}
}
impl<T> From<Stopped<T>> for Box<Error + Sync + Send + 'static> {
fn from(_: Stopped<T>) -> Box<Error + Sync + Send + 'static> {
box_err!("channel has been closed")
}
}
pub trait Runnable<T: Display> {
fn run(&mut self, t: T);
}
pub trait BatchRunnable<T: Display> {
/// run a batch of tasks.
///
/// Please note that ts will be clear after invoking this method.
fn run_batch(&mut self, ts: &mut Vec<T>);
}
impl<T: Display, R: Runnable<T>> BatchRunnable<T> for R {
fn run_batch(&mut self, ts: &mut Vec<T>) {
for t in ts.drain(..) {
let task_str = format!("{}", t);
let timer = SlowTimer::new();
self.run(t);
slow_log!(timer, "handle task {}", task_str);
}
}
}
/// Scheduler provides interface to schedule task to underlying workers.
pub struct Scheduler<T> {
name: Arc<String>,
counter: Arc<AtomicUsize>,
sender: Sender<Option<T>>,
}
impl<T: Display> Scheduler<T> {
fn new<S: Into<String>>(name: S,
counter: AtomicUsize,
sender: Sender<Option<T>>)
-> Scheduler<T> {
Scheduler {
name: Arc::new(name.into()),
counter: Arc::new(counter),
sender: sender,
}
}
/// Schedule a task to run.
///
/// If the worker is stopped, an error will return.
pub fn schedule(&self, task: T) -> Result<(), Stopped<T>> {
debug!("scheduling task {}", task);
if let Err(SendError(Some(t))) = self.sender.send(Some(task)) {
return Err(Stopped(t));
}
self.counter.fetch_add(1, Ordering::SeqCst);
PENDING_TASKS.with_label_values(&[&self.name]).inc();
Ok(())
}
/// Check if underlying worker can't handle task immediately.
pub fn is_busy(&self) -> bool {
self.counter.load(Ordering::SeqCst) > 0
}
}
impl<T: Display> Clone for Scheduler<T> {
fn clone(&self) -> Scheduler<T> {
Scheduler {
name: self.name.clone(),
counter: self.counter.clone(),
sender: self.sender.clone(),
}
}
}
/// Create a scheduler that can't be scheduled any task.
///
/// Useful for test purpose.
#[cfg(test)]
pub fn dummy_scheduler<T: Display>() -> Scheduler<T> {
let (tx, _) = mpsc::channel();
Scheduler::new("dummy scheduler", AtomicUsize::new(0), tx)
}
/// A worker that can schedule time consuming tasks.
pub struct Worker<T: Display> {
scheduler: Scheduler<T>,
receiver: Mutex<Option<Receiver<Option<T>>>>,
handle: Option<JoinHandle<()>>,
}
fn poll<R, T>(mut runner: R, rx: Receiver<Option<T>>, counter: Arc<AtomicUsize>, batch_size: usize)
where R: BatchRunnable<T> + Send + 'static,
T: Display + Send + 'static
{
let name = thread::current().name().unwrap().to_owned();
let mut keep_going = true;
let mut buffer = Vec::with_capacity(batch_size);
while keep_going {
let t = rx.recv();
match t {
Ok(Some(t)) => buffer.push(t),
_ => return,
}
while buffer.len() < batch_size {
match rx.try_recv() {
Ok(None) => {
keep_going = false;
break;
}
Ok(Some(t)) => buffer.push(t),
_ => break,
}
}
counter.fetch_sub(buffer.len(), Ordering::SeqCst);
PENDING_TASKS.with_label_values(&[&name]).sub(buffer.len() as f64);
runner.run_batch(&mut buffer);
buffer.clear();
}
}
impl<T: Display + Send + 'static> Worker<T> {
/// Create a worker.
pub fn new<S: Into<String>>(name: S) -> Worker<T> {
let (tx, rx) = mpsc::channel();
Worker {
scheduler: Scheduler::new(name, AtomicUsize::new(0), tx),
receiver: Mutex::new(Some(rx)),
handle: None,
}
}
/// Start the worker.
pub fn start<R: Runnable<T> + Send + 'static>(&mut self, runner: R) -> Result<(), io::Error> {
self.start_batch(runner, 1)
}
pub fn start_batch<R>(&mut self, runner: R, batch_size: usize) -> Result<(), io::Error>
where R: BatchRunnable<T> + Send + 'static
{
let mut receiver = self.receiver.lock().unwrap();
info!("starting working thread: {}", self.scheduler.name);
if receiver.is_none() {
warn!("worker {} has been started.", self.scheduler.name);
return Ok(());
}
let rx = receiver.take().unwrap();
let counter = self.scheduler.counter.clone();
let h = try!(Builder::new()
.name(thd_name!(self.scheduler.name.as_ref()))
.spawn(move || poll(runner, rx, counter, batch_size)));
self.handle = Some(h);
Ok(())
}
/// Get a scheduler to schedule task.
pub fn scheduler(&self) -> Scheduler<T> {
self.scheduler.clone()
}
/// Schedule a task to run.
///
/// If the worker is stopped, an error will return.
pub fn schedule(&self, task: T) -> Result<(), Stopped<T>> {
self.scheduler.schedule(task)
}
/// Check if underlying worker can't handle task immediately.
pub fn is_busy(&self) -> bool {
self.handle.is_none() || self.scheduler.is_busy()
}
pub fn name(&self) -> &str {
self.scheduler.name.as_str()
}
/// Stop the worker thread.
pub fn stop(&mut self) -> Option<thread::JoinHandle<()>> {
// close sender explicitly so the background thread will exit.
info!("stoping {}", self.scheduler.name);
if self.handle.is_none() {
return None;
}
if let Err(e) = self.scheduler.sender.send(None) {
warn!("failed to stop worker thread: {:?}", e);
}
self.handle.take()
}
}
#[cfg(test)]
mod test {
use std::thread;
use std::sync::*;
use std::sync::mpsc::*;
use std::time::Duration;
use super::*;
struct StepRunner {
ch: Sender<u64>,
}
impl Runnable<u64> for StepRunner {
fn run(&mut self, step: u64) {
self.ch.send(step).unwrap();
thread::sleep(Duration::from_millis(step));
}
}
struct BatchRunner {
ch: Sender<Vec<u64>>,
}
impl BatchRunnable<u64> for BatchRunner {
fn run_batch(&mut self, ms: &mut Vec<u64>) {
self.ch.send(ms.to_vec()).unwrap();
}
}
#[test]
fn test_worker() {
let mut worker = Worker::new("test-worker");
let (tx, rx) = mpsc::channel();
worker.start(StepRunner { ch: tx }).unwrap();
assert!(!worker.is_busy());
worker.schedule(60).unwrap();
worker.schedule(40).unwrap();
worker.schedule(50).unwrap();
assert!(worker.is_busy());
assert_eq!(rx.recv_timeout(Duration::from_secs(3)).unwrap(), 60);
assert_eq!(rx.recv_timeout(Duration::from_secs(3)).unwrap(), 40);
assert_eq!(rx.recv_timeout(Duration::from_secs(3)).unwrap(), 50);
assert!(!worker.is_busy());
worker.stop().unwrap().join().unwrap();
// now worker can't handle any task
assert!(worker.is_busy());
}
#[test]
fn test_threaded() {
let mut worker = Worker::new("test-worker-threaded");
let (tx, rx) = mpsc::channel();
worker.start(StepRunner { ch: tx }).unwrap();
let scheduler = worker.scheduler();
thread::spawn(move || {
scheduler.schedule(90).unwrap();
scheduler.schedule(110).unwrap();
});
assert_eq!(rx.recv_timeout(Duration::from_secs(3)).unwrap(), 90);
assert_eq!(rx.recv_timeout(Duration::from_secs(3)).unwrap(), 110);
worker.stop().unwrap().join().unwrap();
}
#[test]
fn test_batch() {
let mut worker = Worker::new("test-worker-batch");
let (tx, rx) = mpsc::channel();
worker.start_batch(BatchRunner { ch: tx }, 10).unwrap();
for _ in 0..20 {
worker.schedule(50).unwrap();
}
worker.stop().unwrap().join().unwrap();
let mut sum = 0;
loop {
match rx.recv_timeout(Duration::from_secs(3)) {
Ok(v) => sum += v.into_iter().fold(0, |a, b| a + b),
Err(RecvTimeoutError::Timeout) => panic!("unexpected timeout"),
Err(RecvTimeoutError::Disconnected) => break,
}
}
assert_eq!(sum, 50 * 20);
}
}
worker: support shutdown (#1586)
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
/// Worker contains all workers that do the expensive job in background.
mod metrics;
use std::sync::{Arc, Mutex};
use std::thread::{self, JoinHandle, Builder};
use std::io;
use std::fmt::{self, Formatter, Display, Debug};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{self, Sender, Receiver, SendError};
use std::error::Error;
use util::SlowTimer;
use self::metrics::*;
pub struct Stopped<T>(pub T);
impl<T> Display for Stopped<T> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "channel has been closed")
}
}
impl<T> Debug for Stopped<T> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "channel has been closed")
}
}
impl<T> From<Stopped<T>> for Box<Error + Sync + Send + 'static> {
fn from(_: Stopped<T>) -> Box<Error + Sync + Send + 'static> {
box_err!("channel has been closed")
}
}
pub trait Runnable<T: Display> {
fn run(&mut self, t: T);
fn shutdown(&mut self) {}
}
pub trait BatchRunnable<T: Display> {
/// run a batch of tasks.
///
/// Please note that ts will be clear after invoking this method.
fn run_batch(&mut self, ts: &mut Vec<T>);
fn shutdown(&mut self) {}
}
impl<T: Display, R: Runnable<T>> BatchRunnable<T> for R {
fn run_batch(&mut self, ts: &mut Vec<T>) {
for t in ts.drain(..) {
let task_str = format!("{}", t);
let timer = SlowTimer::new();
self.run(t);
slow_log!(timer, "handle task {}", task_str);
}
}
fn shutdown(&mut self) {
Runnable::shutdown(self)
}
}
/// Scheduler provides interface to schedule task to underlying workers.
pub struct Scheduler<T> {
name: Arc<String>,
counter: Arc<AtomicUsize>,
sender: Sender<Option<T>>,
}
impl<T: Display> Scheduler<T> {
fn new<S: Into<String>>(name: S,
counter: AtomicUsize,
sender: Sender<Option<T>>)
-> Scheduler<T> {
Scheduler {
name: Arc::new(name.into()),
counter: Arc::new(counter),
sender: sender,
}
}
/// Schedule a task to run.
///
/// If the worker is stopped, an error will return.
pub fn schedule(&self, task: T) -> Result<(), Stopped<T>> {
debug!("scheduling task {}", task);
if let Err(SendError(Some(t))) = self.sender.send(Some(task)) {
return Err(Stopped(t));
}
self.counter.fetch_add(1, Ordering::SeqCst);
PENDING_TASKS.with_label_values(&[&self.name]).inc();
Ok(())
}
/// Check if underlying worker can't handle task immediately.
pub fn is_busy(&self) -> bool {
self.counter.load(Ordering::SeqCst) > 0
}
}
impl<T: Display> Clone for Scheduler<T> {
fn clone(&self) -> Scheduler<T> {
Scheduler {
name: self.name.clone(),
counter: self.counter.clone(),
sender: self.sender.clone(),
}
}
}
/// Create a scheduler that can't be scheduled any task.
///
/// Useful for test purpose.
#[cfg(test)]
pub fn dummy_scheduler<T: Display>() -> Scheduler<T> {
let (tx, _) = mpsc::channel();
Scheduler::new("dummy scheduler", AtomicUsize::new(0), tx)
}
/// A worker that can schedule time consuming tasks.
pub struct Worker<T: Display> {
scheduler: Scheduler<T>,
receiver: Mutex<Option<Receiver<Option<T>>>>,
handle: Option<JoinHandle<()>>,
}
fn poll<R, T>(mut runner: R, rx: Receiver<Option<T>>, counter: Arc<AtomicUsize>, batch_size: usize)
where R: BatchRunnable<T> + Send + 'static,
T: Display + Send + 'static
{
let name = thread::current().name().unwrap().to_owned();
let mut keep_going = true;
let mut buffer = Vec::with_capacity(batch_size);
while keep_going {
let t = rx.recv();
match t {
Ok(Some(t)) => buffer.push(t),
_ => break,
}
while buffer.len() < batch_size {
match rx.try_recv() {
Ok(None) => {
keep_going = false;
break;
}
Ok(Some(t)) => buffer.push(t),
_ => break,
}
}
counter.fetch_sub(buffer.len(), Ordering::SeqCst);
PENDING_TASKS.with_label_values(&[&name]).sub(buffer.len() as f64);
runner.run_batch(&mut buffer);
buffer.clear();
}
runner.shutdown();
}
impl<T: Display + Send + 'static> Worker<T> {
/// Create a worker.
pub fn new<S: Into<String>>(name: S) -> Worker<T> {
let (tx, rx) = mpsc::channel();
Worker {
scheduler: Scheduler::new(name, AtomicUsize::new(0), tx),
receiver: Mutex::new(Some(rx)),
handle: None,
}
}
/// Start the worker.
pub fn start<R: Runnable<T> + Send + 'static>(&mut self, runner: R) -> Result<(), io::Error> {
self.start_batch(runner, 1)
}
pub fn start_batch<R>(&mut self, runner: R, batch_size: usize) -> Result<(), io::Error>
where R: BatchRunnable<T> + Send + 'static
{
let mut receiver = self.receiver.lock().unwrap();
info!("starting working thread: {}", self.scheduler.name);
if receiver.is_none() {
warn!("worker {} has been started.", self.scheduler.name);
return Ok(());
}
let rx = receiver.take().unwrap();
let counter = self.scheduler.counter.clone();
let h = try!(Builder::new()
.name(thd_name!(self.scheduler.name.as_ref()))
.spawn(move || poll(runner, rx, counter, batch_size)));
self.handle = Some(h);
Ok(())
}
/// Get a scheduler to schedule task.
pub fn scheduler(&self) -> Scheduler<T> {
self.scheduler.clone()
}
/// Schedule a task to run.
///
/// If the worker is stopped, an error will return.
pub fn schedule(&self, task: T) -> Result<(), Stopped<T>> {
self.scheduler.schedule(task)
}
/// Check if underlying worker can't handle task immediately.
pub fn is_busy(&self) -> bool {
self.handle.is_none() || self.scheduler.is_busy()
}
pub fn name(&self) -> &str {
self.scheduler.name.as_str()
}
/// Stop the worker thread.
pub fn stop(&mut self) -> Option<thread::JoinHandle<()>> {
// close sender explicitly so the background thread will exit.
info!("stoping {}", self.scheduler.name);
if self.handle.is_none() {
return None;
}
if let Err(e) = self.scheduler.sender.send(None) {
warn!("failed to stop worker thread: {:?}", e);
}
self.handle.take()
}
}
#[cfg(test)]
mod test {
use std::thread;
use std::sync::*;
use std::sync::mpsc::*;
use std::time::Duration;
use super::*;
struct StepRunner {
ch: Sender<u64>,
}
impl Runnable<u64> for StepRunner {
fn run(&mut self, step: u64) {
self.ch.send(step).unwrap();
thread::sleep(Duration::from_millis(step));
}
fn shutdown(&mut self) {
self.ch.send(0).unwrap();
}
}
struct BatchRunner {
ch: Sender<Vec<u64>>,
}
impl BatchRunnable<u64> for BatchRunner {
fn run_batch(&mut self, ms: &mut Vec<u64>) {
self.ch.send(ms.to_vec()).unwrap();
}
fn shutdown(&mut self) {
self.ch.send(vec![]).unwrap();
}
}
#[test]
fn test_worker() {
let mut worker = Worker::new("test-worker");
let (tx, rx) = mpsc::channel();
worker.start(StepRunner { ch: tx }).unwrap();
assert!(!worker.is_busy());
worker.schedule(60).unwrap();
worker.schedule(40).unwrap();
worker.schedule(50).unwrap();
assert!(worker.is_busy());
assert_eq!(rx.recv_timeout(Duration::from_secs(3)).unwrap(), 60);
assert_eq!(rx.recv_timeout(Duration::from_secs(3)).unwrap(), 40);
assert_eq!(rx.recv_timeout(Duration::from_secs(3)).unwrap(), 50);
assert!(!worker.is_busy());
worker.stop().unwrap().join().unwrap();
// now worker can't handle any task
assert!(worker.is_busy());
// when shutdown, StepRunner should send back a 0.
assert_eq!(0, rx.recv().unwrap());
}
#[test]
fn test_threaded() {
let mut worker = Worker::new("test-worker-threaded");
let (tx, rx) = mpsc::channel();
worker.start(StepRunner { ch: tx }).unwrap();
let scheduler = worker.scheduler();
thread::spawn(move || {
scheduler.schedule(90).unwrap();
scheduler.schedule(110).unwrap();
});
assert_eq!(rx.recv_timeout(Duration::from_secs(3)).unwrap(), 90);
assert_eq!(rx.recv_timeout(Duration::from_secs(3)).unwrap(), 110);
worker.stop().unwrap().join().unwrap();
assert_eq!(0, rx.recv().unwrap());
}
#[test]
fn test_batch() {
let mut worker = Worker::new("test-worker-batch");
let (tx, rx) = mpsc::channel();
worker.start_batch(BatchRunner { ch: tx }, 10).unwrap();
for _ in 0..20 {
worker.schedule(50).unwrap();
}
worker.stop().unwrap().join().unwrap();
let mut sum = 0;
loop {
let v = rx.recv_timeout(Duration::from_secs(3)).unwrap();
// when runner is shutdown, it will send back an empty vector.
if v.is_empty() {
break;
}
sum += v.into_iter().fold(0, |a, b| a + b);
}
assert_eq!(sum, 50 * 20);
assert!(rx.recv().is_err());
}
#[test]
fn test_autowired_batch() {
let mut worker = Worker::new("test-worker-batch");
let (tx, rx) = mpsc::channel();
worker.start_batch(StepRunner { ch: tx }, 10).unwrap();
for _ in 0..20 {
worker.schedule(50).unwrap();
}
worker.stop().unwrap().join().unwrap();
for _ in 0..20 {
rx.recv_timeout(Duration::from_secs(3)).unwrap();
}
assert_eq!(rx.recv().unwrap(), 0);
}
}
|
#![feature(plugin)]
#![plugin(clippy)]
#![deny(print_with_newline)]
fn main() {
print!("Hello");
print!("Hello\n"); //~ERROR using `print!()` with a format string
print!("Hello {}\n", "world"); //~ERROR using `print!()` with a format string
print!("Hello {} {}\n\n", "world", "#2"); //~ERROR using `print!()` with a format string
// these are all fine
println!("Hello");
println!("Hello\n");
println!("Hello {}\n", "world");
}
chmod -x tests/compile-fail/print_with_newline.rs
#![feature(plugin)]
#![plugin(clippy)]
#![deny(print_with_newline)]
fn main() {
print!("Hello");
print!("Hello\n"); //~ERROR using `print!()` with a format string
print!("Hello {}\n", "world"); //~ERROR using `print!()` with a format string
print!("Hello {} {}\n\n", "world", "#2"); //~ERROR using `print!()` with a format string
// these are all fine
println!("Hello");
println!("Hello\n");
println!("Hello {}\n", "world");
}
|
use std::ptr::null;
use libc::{c_char, c_int, c_double};
use std::ffi::CString;
use std::cell::RefCell;
use utils::_string;
use vector::ogr;
use geo;
/// OGR Geometry
pub struct Geometry {
c_geometry_ref: RefCell<Option<*const ()>>,
owned: bool,
}
impl Geometry {
pub unsafe fn lazy_feature_geometry() -> Geometry {
// Geometry objects created with this method map to a Feature's
// geometry whose memory is managed by the GDAL feature.
// This object has a tricky lifecycle:
//
// * Initially it's created with a null c_geometry
// * The first time `Feature::geometry` is called, it gets
// c_geometry from GDAL and calls `set_c_geometry` with it.
// * When the Feature is destroyed, this object is also destroyed,
// which is good, because that's when c_geometry (which is managed
// by the GDAL feature) becomes invalid. Because `self.owned` is
// `true`, we don't call `OGR_G_DestroyGeometry`.
return Geometry{c_geometry_ref: RefCell::new(None), owned: false};
}
pub fn has_gdal_ptr(&self) -> bool {
return self.c_geometry_ref.borrow().is_some();
}
pub unsafe fn set_c_geometry(&self, c_geometry: *const ()) {
assert!(! self.has_gdal_ptr());
assert_eq!(self.owned, false);
*(self.c_geometry_ref.borrow_mut()) = Some(c_geometry);
}
unsafe fn with_c_geometry(c_geom: *const()) -> Geometry {
return Geometry{
c_geometry_ref: RefCell::new(Some(c_geom)),
owned: true,
};
}
pub fn empty(wkb_type: c_int) -> Geometry {
let c_geom = unsafe { ogr::OGR_G_CreateGeometry(wkb_type) };
assert!(c_geom != null());
return unsafe { Geometry::with_c_geometry(c_geom) };
}
/// Create a geometry by parsing a
/// [WKT](https://en.wikipedia.org/wiki/Well-known_text) string.
pub fn from_wkt(wkt: &str) -> Geometry {
let c_wkt = CString::new(wkt.as_bytes()).unwrap();
let mut c_wkt_ptr: *const c_char = c_wkt.as_ptr();
let mut c_geom: *const () = null();
let rv = unsafe { ogr::OGR_G_CreateFromWkt(&mut c_wkt_ptr, null(), &mut c_geom) };
assert_eq!(rv, ogr::OGRERR_NONE);
return unsafe { Geometry::with_c_geometry(c_geom) };
}
/// Create a rectangular geometry from West, South, East and North values.
pub fn bbox(w: f64, s: f64, e: f64, n: f64) -> Geometry {
Geometry::from_wkt(&format!(
"POLYGON (({} {}, {} {}, {} {}, {} {}, {} {}))",
w, n,
e, n,
e, s,
w, s,
w, n,
))
}
/// Serialize the geometry as JSON.
pub fn json(&self) -> String {
let c_json = unsafe { ogr::OGR_G_ExportToJson(self.c_geometry()) };
let rv = _string(c_json);
unsafe { ogr::VSIFree(c_json as *mut ()) };
return rv;
}
/// Serialize the geometry as WKT.
pub fn wkt(&self) -> String {
let mut c_wkt: *const c_char = null();
let _err = unsafe { ogr::OGR_G_ExportToWkt(self.c_geometry(), &mut c_wkt) };
assert_eq!(_err, ogr::OGRERR_NONE);
let wkt = _string(c_wkt);
unsafe { ogr::OGRFree(c_wkt as *mut ()) };
return wkt;
}
pub unsafe fn c_geometry(&self) -> *const () {
return self.c_geometry_ref.borrow().unwrap();
}
pub fn set_point_2d(&mut self, i: usize, p: (f64, f64)) {
let (x, y) = p;
unsafe { ogr::OGR_G_SetPoint_2D(
self.c_geometry(),
i as c_int,
x as c_double,
y as c_double,
) };
}
pub fn get_point(&self, i: i32) -> (f64, f64, f64) {
let mut x: c_double = 0.;
let mut y: c_double = 0.;
let mut z: c_double = 0.;
unsafe { ogr::OGR_G_GetPoint(self.c_geometry(), i, &mut x, &mut y, &mut z) };
return (x as f64, y as f64, z as f64);
}
pub fn get_point_vec(&self) -> Vec<(f64, f64, f64)> {
let length = unsafe{ ogr::OGR_G_GetPointCount(self.c_geometry()) };
return (0..length).map(|i| self.get_point(i)).collect();
}
/// Compute the convex hull of this geometry.
pub fn convex_hull(&self) -> Geometry {
let c_geom = unsafe { ogr::OGR_G_ConvexHull(self.c_geometry()) };
return unsafe { Geometry::with_c_geometry(c_geom) };
}
}
impl geo::ToGeo for Geometry {
fn to_geo(&self) -> geo::Geometry {
let geometry_type = unsafe { ogr::OGR_G_GetGeometryType(self.c_geometry()) };
match geometry_type {
ogr::WKB_POINT => {
let (x, y, _) = self.get_point(0);
geo::Geometry::Point(geo::Point(geo::Coordinate{x: x, y: y}))
},
ogr::WKB_LINESTRING => {
let coords = self.get_point_vec().iter()
.map(|&(x, y, _)| geo::Point(geo::Coordinate{x: x, y: y}))
.collect();
geo::Geometry::LineString(geo::LineString(coords))
},
_ => panic!("Unknown geometry type")
}
}
}
impl Drop for Geometry {
fn drop(&mut self) {
if self.owned {
let c_geometry = self.c_geometry_ref.borrow();
unsafe { ogr::OGR_G_DestroyGeometry(c_geometry.unwrap() as *mut ()) };
}
}
}
pub trait ToGdal {
fn to_gdal(&self) -> Geometry;
}
impl ToGdal for geo::Point {
fn to_gdal(&self) -> Geometry {
let mut geom = Geometry::empty(ogr::WKB_POINT);
let &geo::Point(coordinate) = self;
geom.set_point_2d(0, (coordinate.x, coordinate.y));
return geom;
}
}
impl ToGdal for geo::LineString {
fn to_gdal(&self) -> Geometry {
let mut geom = Geometry::empty(ogr::WKB_LINESTRING);
let &geo::LineString(ref linestring) = self;
for (i, &geo::Point(coordinate)) in linestring.iter().enumerate() {
geom.set_point_2d(i, (coordinate.x, coordinate.y));
}
return geom;
}
}
impl ToGdal for geo::Geometry {
fn to_gdal(&self) -> Geometry {
return match *self {
geo::Geometry::Point(ref c) => c.to_gdal(),
geo::Geometry::LineString(ref c) => c.to_gdal(),
_ => panic!("Unknown geometry type")
}
}
}
#[cfg(test)]
mod tests {
use vector::{Geometry, ToGdal};
use geo;
use geo::ToGeo;
#[test]
fn test_import_export_point() {
let wkt = "POINT (1 2)";
let coord = geo::Coordinate{x: 1., y: 2.};
let geo = geo::Geometry::Point(geo::Point(coord));
assert_eq!(Geometry::from_wkt(wkt).to_geo(), geo);
assert_eq!(geo.to_gdal().wkt(), wkt);
}
#[test]
fn test_import_export_linestring() {
let wkt = "LINESTRING (0 0,0 1,1 2)";
let coord = vec!(
geo::Point(geo::Coordinate{x: 0., y: 0.}),
geo::Point(geo::Coordinate{x: 0., y: 1.}),
geo::Point(geo::Coordinate{x: 1., y: 2.}),
);
let geo = geo::Geometry::LineString(geo::LineString(coord));
assert_eq!(Geometry::from_wkt(wkt).to_geo(), geo);
assert_eq!(geo.to_gdal().wkt(), wkt);
}
}
get a geometry wrapper for a sub-geometry
use std::ptr::null;
use libc::{c_char, c_int, c_double};
use std::ffi::CString;
use std::cell::RefCell;
use utils::_string;
use vector::ogr;
use geo;
/// OGR Geometry
pub struct Geometry {
c_geometry_ref: RefCell<Option<*const ()>>,
owned: bool,
}
impl Geometry {
pub unsafe fn lazy_feature_geometry() -> Geometry {
// Geometry objects created with this method map to a Feature's
// geometry whose memory is managed by the GDAL feature.
// This object has a tricky lifecycle:
//
// * Initially it's created with a null c_geometry
// * The first time `Feature::geometry` is called, it gets
// c_geometry from GDAL and calls `set_c_geometry` with it.
// * When the Feature is destroyed, this object is also destroyed,
// which is good, because that's when c_geometry (which is managed
// by the GDAL feature) becomes invalid. Because `self.owned` is
// `true`, we don't call `OGR_G_DestroyGeometry`.
return Geometry{c_geometry_ref: RefCell::new(None), owned: false};
}
pub fn has_gdal_ptr(&self) -> bool {
return self.c_geometry_ref.borrow().is_some();
}
pub unsafe fn set_c_geometry(&self, c_geometry: *const ()) {
assert!(! self.has_gdal_ptr());
assert_eq!(self.owned, false);
*(self.c_geometry_ref.borrow_mut()) = Some(c_geometry);
}
unsafe fn with_c_geometry(c_geom: *const(), owned: bool) -> Geometry {
return Geometry{
c_geometry_ref: RefCell::new(Some(c_geom)),
owned: owned,
};
}
pub fn empty(wkb_type: c_int) -> Geometry {
let c_geom = unsafe { ogr::OGR_G_CreateGeometry(wkb_type) };
assert!(c_geom != null());
return unsafe { Geometry::with_c_geometry(c_geom, true) };
}
/// Create a geometry by parsing a
/// [WKT](https://en.wikipedia.org/wiki/Well-known_text) string.
pub fn from_wkt(wkt: &str) -> Geometry {
let c_wkt = CString::new(wkt.as_bytes()).unwrap();
let mut c_wkt_ptr: *const c_char = c_wkt.as_ptr();
let mut c_geom: *const () = null();
let rv = unsafe { ogr::OGR_G_CreateFromWkt(&mut c_wkt_ptr, null(), &mut c_geom) };
assert_eq!(rv, ogr::OGRERR_NONE);
return unsafe { Geometry::with_c_geometry(c_geom, true) };
}
/// Create a rectangular geometry from West, South, East and North values.
pub fn bbox(w: f64, s: f64, e: f64, n: f64) -> Geometry {
Geometry::from_wkt(&format!(
"POLYGON (({} {}, {} {}, {} {}, {} {}, {} {}))",
w, n,
e, n,
e, s,
w, s,
w, n,
))
}
/// Serialize the geometry as JSON.
pub fn json(&self) -> String {
let c_json = unsafe { ogr::OGR_G_ExportToJson(self.c_geometry()) };
let rv = _string(c_json);
unsafe { ogr::VSIFree(c_json as *mut ()) };
return rv;
}
/// Serialize the geometry as WKT.
pub fn wkt(&self) -> String {
let mut c_wkt: *const c_char = null();
let _err = unsafe { ogr::OGR_G_ExportToWkt(self.c_geometry(), &mut c_wkt) };
assert_eq!(_err, ogr::OGRERR_NONE);
let wkt = _string(c_wkt);
unsafe { ogr::OGRFree(c_wkt as *mut ()) };
return wkt;
}
pub unsafe fn c_geometry(&self) -> *const () {
return self.c_geometry_ref.borrow().unwrap();
}
pub fn set_point_2d(&mut self, i: usize, p: (f64, f64)) {
let (x, y) = p;
unsafe { ogr::OGR_G_SetPoint_2D(
self.c_geometry(),
i as c_int,
x as c_double,
y as c_double,
) };
}
pub fn get_point(&self, i: i32) -> (f64, f64, f64) {
let mut x: c_double = 0.;
let mut y: c_double = 0.;
let mut z: c_double = 0.;
unsafe { ogr::OGR_G_GetPoint(self.c_geometry(), i, &mut x, &mut y, &mut z) };
return (x as f64, y as f64, z as f64);
}
pub fn get_point_vec(&self) -> Vec<(f64, f64, f64)> {
let length = unsafe{ ogr::OGR_G_GetPointCount(self.c_geometry()) };
return (0..length).map(|i| self.get_point(i)).collect();
}
/// Compute the convex hull of this geometry.
pub fn convex_hull(&self) -> Geometry {
let c_geom = unsafe { ogr::OGR_G_ConvexHull(self.c_geometry()) };
return unsafe { Geometry::with_c_geometry(c_geom, true) };
}
unsafe fn _get_geometry(&self, n: usize) -> Geometry {
// get the n-th sub-geometry as a non-owned Geometry; don't keep this
// object for long.
let c_geom = ogr::OGR_G_GetGeometryRef(self.c_geometry(), n as c_int);
return Geometry::with_c_geometry(c_geom, false);
}
}
impl geo::ToGeo for Geometry {
fn to_geo(&self) -> geo::Geometry {
let geometry_type = unsafe { ogr::OGR_G_GetGeometryType(self.c_geometry()) };
match geometry_type {
ogr::WKB_POINT => {
let (x, y, _) = self.get_point(0);
geo::Geometry::Point(geo::Point(geo::Coordinate{x: x, y: y}))
},
ogr::WKB_LINESTRING => {
let coords = self.get_point_vec().iter()
.map(|&(x, y, _)| geo::Point(geo::Coordinate{x: x, y: y}))
.collect();
geo::Geometry::LineString(geo::LineString(coords))
},
_ => panic!("Unknown geometry type")
}
}
}
impl Drop for Geometry {
fn drop(&mut self) {
if self.owned {
let c_geometry = self.c_geometry_ref.borrow();
unsafe { ogr::OGR_G_DestroyGeometry(c_geometry.unwrap() as *mut ()) };
}
}
}
pub trait ToGdal {
fn to_gdal(&self) -> Geometry;
}
impl ToGdal for geo::Point {
fn to_gdal(&self) -> Geometry {
let mut geom = Geometry::empty(ogr::WKB_POINT);
let &geo::Point(coordinate) = self;
geom.set_point_2d(0, (coordinate.x, coordinate.y));
return geom;
}
}
impl ToGdal for geo::LineString {
fn to_gdal(&self) -> Geometry {
let mut geom = Geometry::empty(ogr::WKB_LINESTRING);
let &geo::LineString(ref linestring) = self;
for (i, &geo::Point(coordinate)) in linestring.iter().enumerate() {
geom.set_point_2d(i, (coordinate.x, coordinate.y));
}
return geom;
}
}
impl ToGdal for geo::Geometry {
fn to_gdal(&self) -> Geometry {
return match *self {
geo::Geometry::Point(ref c) => c.to_gdal(),
geo::Geometry::LineString(ref c) => c.to_gdal(),
_ => panic!("Unknown geometry type")
}
}
}
#[cfg(test)]
mod tests {
use vector::{Geometry, ToGdal};
use geo;
use geo::ToGeo;
#[test]
fn test_import_export_point() {
let wkt = "POINT (1 2)";
let coord = geo::Coordinate{x: 1., y: 2.};
let geo = geo::Geometry::Point(geo::Point(coord));
assert_eq!(Geometry::from_wkt(wkt).to_geo(), geo);
assert_eq!(geo.to_gdal().wkt(), wkt);
}
#[test]
fn test_import_export_linestring() {
let wkt = "LINESTRING (0 0,0 1,1 2)";
let coord = vec!(
geo::Point(geo::Coordinate{x: 0., y: 0.}),
geo::Point(geo::Coordinate{x: 0., y: 1.}),
geo::Point(geo::Coordinate{x: 1., y: 2.}),
);
let geo = geo::Geometry::LineString(geo::LineString(coord));
assert_eq!(Geometry::from_wkt(wkt).to_geo(), geo);
assert_eq!(geo.to_gdal().wkt(), wkt);
}
}
|
//! Streams
//!
//! This module contains a number of functions for working with `Streams`s
//! that return `Result`s, allowing for short-circuiting computations.
use core::pin::Pin;
use futures_core::future::{Future, TryFuture};
use futures_core::stream::TryStream;
use futures_core::task::{Context, Poll};
#[cfg(feature = "compat")]
use crate::compat::Compat;
mod and_then;
pub use self::and_then::AndThen;
mod err_into;
pub use self::err_into::ErrInto;
mod inspect_ok;
pub use self::inspect_ok::InspectOk;
mod inspect_err;
pub use self::inspect_err::InspectErr;
mod into_stream;
pub use self::into_stream::IntoStream;
mod map_ok;
pub use self::map_ok::MapOk;
mod map_err;
pub use self::map_err::MapErr;
mod or_else;
pub use self::or_else::OrElse;
mod try_next;
pub use self::try_next::TryNext;
mod try_for_each;
pub use self::try_for_each::TryForEach;
mod try_filter;
pub use self::try_filter::TryFilter;
mod try_filter_map;
pub use self::try_filter_map::TryFilterMap;
mod try_collect;
pub use self::try_collect::TryCollect;
mod try_concat;
pub use self::try_concat::TryConcat;
mod try_fold;
pub use self::try_fold::TryFold;
mod try_skip_while;
pub use self::try_skip_while::TrySkipWhile;
cfg_target_has_atomic! {
#[cfg(feature = "alloc")]
mod try_buffer_unordered;
#[cfg(feature = "alloc")]
pub use self::try_buffer_unordered::TryBufferUnordered;
#[cfg(feature = "alloc")]
mod try_for_each_concurrent;
#[cfg(feature = "alloc")]
pub use self::try_for_each_concurrent::TryForEachConcurrent;
}
#[cfg(feature = "std")]
mod into_async_read;
#[cfg(feature = "std")]
pub use self::into_async_read::IntoAsyncRead;
impl<S: ?Sized + TryStream> TryStreamExt for S {}
/// Adapters specific to `Result`-returning streams
pub trait TryStreamExt: TryStream {
/// Wraps the current stream in a new stream which converts the error type
/// into the one provided.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::stream::{self, TryStreamExt};
///
/// let mut stream =
/// stream::iter(vec![Ok(()), Err(5i32)])
/// .err_into::<i64>();
///
/// assert_eq!(await!(stream.try_next()), Ok(Some(())));
/// assert_eq!(await!(stream.try_next()), Err(5i64));
/// # })
/// ```
fn err_into<E>(self) -> ErrInto<Self, E>
where
Self: Sized,
Self::Error: Into<E>
{
ErrInto::new(self)
}
/// Wraps the current stream in a new stream which maps the success value
/// using the provided closure.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::stream::{self, TryStreamExt};
///
/// let mut stream =
/// stream::iter(vec![Ok(5), Err(0)])
/// .map_ok(|x| x + 2);
///
/// assert_eq!(await!(stream.try_next()), Ok(Some(7)));
/// assert_eq!(await!(stream.try_next()), Err(0));
/// # })
/// ```
fn map_ok<T, F>(self, f: F) -> MapOk<Self, F>
where
Self: Sized,
F: FnMut(Self::Ok) -> T,
{
MapOk::new(self, f)
}
/// Wraps the current stream in a new stream which maps the error value
/// using the provided closure.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::stream::{self, TryStreamExt};
///
/// let mut stream =
/// stream::iter(vec![Ok(5), Err(0)])
/// .map_err(|x| x + 2);
///
/// assert_eq!(await!(stream.try_next()), Ok(Some(5)));
/// assert_eq!(await!(stream.try_next()), Err(2));
/// # })
/// ```
fn map_err<E, F>(self, f: F) -> MapErr<Self, F>
where
Self: Sized,
F: FnMut(Self::Error) -> E,
{
MapErr::new(self, f)
}
/// Chain on a computation for when a value is ready, passing the successful
/// results to the provided closure `f`.
///
/// This function can be used to run a unit of work when the next successful
/// value on a stream is ready. The closure provided will be yielded a value
/// when ready, and the returned future will then be run to completion to
/// produce the next value on this stream.
///
/// Any errors produced by this stream will not be passed to the closure,
/// and will be passed through.
///
/// The returned value of the closure must implement the `TryFuture` trait
/// and can represent some more work to be done before the composed stream
/// is finished.
///
/// Note that this function consumes the receiving stream and returns a
/// wrapped version of it.
///
/// To process the entire stream and return a single future representing
/// success or error, use `try_for_each` instead.
///
/// # Examples
///
/// ```
/// use futures::channel::mpsc;
/// use futures::future;
/// use futures::stream::TryStreamExt;
///
/// let (_tx, rx) = mpsc::channel::<Result<i32, ()>>(1);
///
/// let rx = rx.and_then(|result| {
/// future::ok(if result % 2 == 0 {
/// Some(result)
/// } else {
/// None
/// })
/// });
/// ```
fn and_then<Fut, F>(self, f: F) -> AndThen<Self, Fut, F>
where F: FnMut(Self::Ok) -> Fut,
Fut: TryFuture<Error = Self::Error>,
Self: Sized,
{
AndThen::new(self, f)
}
/// Chain on a computation for when an error happens, passing the
/// erroneous result to the provided closure `f`.
///
/// This function can be used to run a unit of work and attempt to recover from
/// an error if one happens. The closure provided will be yielded an error
/// when one appears, and the returned future will then be run to completion
/// to produce the next value on this stream.
///
/// Any successful values produced by this stream will not be passed to the
/// closure, and will be passed through.
///
/// The returned value of the closure must implement the [`TryFuture`] trait
/// and can represent some more work to be done before the composed stream
/// is finished.
///
/// Note that this function consumes the receiving stream and returns a
/// wrapped version of it.
fn or_else<Fut, F>(self, f: F) -> OrElse<Self, Fut, F>
where F: FnMut(Self::Error) -> Fut,
Fut: TryFuture<Ok = Self::Ok>,
Self: Sized,
{
OrElse::new(self, f)
}
/// Do something with the success value of this stream, afterwards passing
/// it on.
///
/// This is similar to the `StreamExt::inspect` method where it allows
/// easily inspecting the success value as it passes through the stream, for
/// example to debug what's going on.
fn inspect_ok<F>(self, f: F) -> InspectOk<Self, F>
where F: FnMut(&Self::Ok),
Self: Sized,
{
InspectOk::new(self, f)
}
/// Do something with the error value of this stream, afterwards passing it on.
///
/// This is similar to the `StreamExt::inspect` method where it allows
/// easily inspecting the error value as it passes through the stream, for
/// example to debug what's going on.
fn inspect_err<F>(self, f: F) -> InspectErr<Self, F>
where F: FnMut(&Self::Error),
Self: Sized,
{
InspectErr::new(self, f)
}
/// Wraps a [`TryStream`] into a type that implements
/// [`Stream`](futures_core::Stream)
///
/// [`TryStream`]s currently do not implement the
/// [`Stream`](futures_core::Stream) trait because of limitations
/// of the compiler.
///
/// # Examples
///
/// ```
/// use futures::stream::{Stream, TryStream, TryStreamExt};
///
/// # type T = i32;
/// # type E = ();
/// fn make_try_stream() -> impl TryStream<Ok = T, Error = E> { // ... }
/// # futures::stream::empty()
/// # }
/// fn take_stream(stream: impl Stream<Item = Result<T, E>>) { /* ... */ }
///
/// take_stream(make_try_stream().into_stream());
/// ```
fn into_stream(self) -> IntoStream<Self>
where Self: Sized,
{
IntoStream::new(self)
}
/// Creates a future that attempts to resolve the next item in the stream.
/// If an error is encountered before the next item, the error is returned
/// instead.
///
/// This is similar to the `Stream::next` combinator, but returns a
/// `Result<Option<T>, E>` rather than an `Option<Result<T, E>>`, making
/// for easy use with the `?` operator.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::stream::{self, TryStreamExt};
///
/// let mut stream = stream::iter(vec![Ok(()), Err(())]);
///
/// assert_eq!(await!(stream.try_next()), Ok(Some(())));
/// assert_eq!(await!(stream.try_next()), Err(()));
/// # })
/// ```
fn try_next(&mut self) -> TryNext<'_, Self>
where Self: Sized + Unpin,
{
TryNext::new(self)
}
/// Attempts to run this stream to completion, executing the provided
/// asynchronous closure for each element on the stream.
///
/// The provided closure will be called for each item this stream produces,
/// yielding a future. That future will then be executed to completion
/// before moving on to the next item.
///
/// The returned value is a [`Future`](futures_core::Future) where the
/// [`Output`](futures_core::Future::Output) type is
/// `Result<(), Self::Error>`. If any of the intermediate
/// futures or the stream returns an error, this future will return
/// immediately with an error.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::future;
/// use futures::stream::{self, TryStreamExt};
///
/// let mut x = 0i32;
///
/// {
/// let fut = stream::repeat(Ok(1)).try_for_each(|item| {
/// x += item;
/// future::ready(if x == 3 { Err(()) } else { Ok(()) })
/// });
/// assert_eq!(await!(fut), Err(()));
/// }
///
/// assert_eq!(x, 3);
/// # })
/// ```
fn try_for_each<Fut, F>(self, f: F) -> TryForEach<Self, Fut, F>
where F: FnMut(Self::Ok) -> Fut,
Fut: TryFuture<Ok = (), Error=Self::Error>,
Self: Sized
{
TryForEach::new(self, f)
}
/// Skip elements on this stream while the provided asynchronous predicate
/// resolves to `true`.
///
/// This function is similar to [`StreamExt::skip_while`](crate::stream::StreamExt::skip_while)
/// but exits early if an error occurs.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::future;
/// use futures::stream::{self, TryStreamExt};
///
/// let stream = stream::iter(vec![Ok::<i32, i32>(1), Ok(3), Ok(2)]);
/// let mut stream = stream.try_skip_while(|x| future::ready(Ok(*x < 3)));
///
/// let output: Result<Vec<i32>, i32> = await!(stream.try_collect());
/// assert_eq!(output, Ok(vec![3, 2]));
/// # })
/// ```
fn try_skip_while<Fut, F>(self, f: F) -> TrySkipWhile<Self, Fut, F>
where F: FnMut(&Self::Ok) -> Fut,
Fut: TryFuture<Ok = bool, Error = Self::Error>,
Self: Sized
{
TrySkipWhile::new(self, f)
}
/// Attempts to run this stream to completion, executing the provided asynchronous
/// closure for each element on the stream concurrently as elements become
/// available, exiting as soon as an error occurs.
///
/// This is similar to
/// [`StreamExt::for_each_concurrent`](super::StreamExt::for_each_concurrent),
/// but will resolve to an error immediately if the underlying stream or the provided
/// closure return an error.
///
/// This method is only available when the `std` feature of this
/// library is activated, and it is activated by default.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::channel::oneshot;
/// use futures::stream::{self, StreamExt, TryStreamExt};
///
/// let (tx1, rx1) = oneshot::channel();
/// let (tx2, rx2) = oneshot::channel();
/// let (_tx3, rx3) = oneshot::channel();
///
/// let stream = stream::iter(vec![rx1, rx2, rx3]);
/// let fut = stream.map(Ok).try_for_each_concurrent(
/// /* limit */ 2,
/// async move |rx| {
/// let res: Result<(), oneshot::Canceled> = await!(rx);
/// res
/// }
/// );
///
/// tx1.send(()).unwrap();
/// // Drop the second sender so that `rx2` resolves to `Canceled`.
/// drop(tx2);
///
/// // The final result is an error because the second future
/// // resulted in an error.
/// assert_eq!(Err(oneshot::Canceled), await!(fut));
/// # })
/// ```
#[cfg_attr(
feature = "cfg-target-has-atomic",
cfg(all(target_has_atomic = "cas", target_has_atomic = "ptr"))
)]
#[cfg(feature = "alloc")]
fn try_for_each_concurrent<Fut, F>(
self,
limit: impl Into<Option<usize>>,
f: F,
) -> TryForEachConcurrent<Self, Fut, F>
where F: FnMut(Self::Ok) -> Fut,
Fut: Future<Output = Result<(), Self::Error>>,
Self: Sized,
{
TryForEachConcurrent::new(self, limit.into(), f)
}
/// Attempt to Collect all of the values of this stream into a vector,
/// returning a future representing the result of that computation.
///
/// This combinator will collect all successful results of this stream and
/// collect them into a `Vec<Self::Item>`. If an error happens then all
/// collected elements will be dropped and the error will be returned.
///
/// The returned future will be resolved when the stream terminates.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::channel::mpsc;
/// use futures::executor::block_on;
/// use futures::stream::TryStreamExt;
/// use std::thread;
///
/// let (mut tx, rx) = mpsc::unbounded();
///
/// thread::spawn(move || {
/// for i in (1..=5) {
/// tx.unbounded_send(Ok(i)).unwrap();
/// }
/// tx.unbounded_send(Err(6)).unwrap();
/// });
///
/// let output: Result<Vec<i32>, i32> = await!(rx.try_collect());
/// assert_eq!(output, Err(6));
/// # })
/// ```
fn try_collect<C: Default + Extend<Self::Ok>>(self) -> TryCollect<Self, C>
where Self: Sized
{
TryCollect::new(self)
}
/// Attempt to filter the values produced by this stream according to the
/// provided asynchronous closure.
///
/// As values of this stream are made available, the provided predicate `f`
/// will be run on them. If the predicate returns a `Future` which resolves
/// to `true`, then the stream will yield the value, but if the predicate
/// return a `Future` which resolves to `false`, then the value will be
/// discarded and the next value will be produced.
///
/// All errors are passed through without filtering in this combinator.
///
/// Note that this function consumes the stream passed into it and returns a
/// wrapped version of it, similar to the existing `filter` methods in
/// the standard library.
///
/// # Examples
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::executor::block_on;
/// use futures::future;
/// use futures::stream::{self, StreamExt, TryStreamExt};
///
/// let stream = stream::iter(vec![Ok(1i32), Ok(2i32), Ok(3i32), Err("error")]);
/// let mut evens = stream.try_filter(|x| {
/// future::ready(x % 2 == 0)
/// });
///
/// assert_eq!(await!(evens.next()), Some(Ok(2)));
/// assert_eq!(await!(evens.next()), Some(Err("error")));
/// # })
/// ```
fn try_filter<Fut, F>(self, f: F) -> TryFilter<Self, Fut, F>
where Fut: Future<Output = bool>,
F: FnMut(&Self::Ok) -> Fut,
Self: Sized
{
TryFilter::new(self, f)
}
/// Attempt to filter the values produced by this stream while
/// simultaneously mapping them to a different type according to the
/// provided asynchronous closure.
///
/// As values of this stream are made available, the provided function will
/// be run on them. If the future returned by the predicate `f` resolves to
/// [`Some(item)`](Some) then the stream will yield the value `item`, but if
/// it resolves to [`None`] then the next value will be produced.
///
/// All errors are passed through without filtering in this combinator.
///
/// Note that this function consumes the stream passed into it and returns a
/// wrapped version of it, similar to the existing `filter_map` methods in
/// the standard library.
///
/// # Examples
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::executor::block_on;
/// use futures::future;
/// use futures::stream::{self, StreamExt, TryStreamExt};
///
/// let stream = stream::iter(vec![Ok(1i32), Ok(6i32), Err("error")]);
/// let mut halves = stream.try_filter_map(|x| {
/// let ret = if x % 2 == 0 { Some(x / 2) } else { None };
/// future::ready(Ok(ret))
/// });
///
/// assert_eq!(await!(halves.next()), Some(Ok(3)));
/// assert_eq!(await!(halves.next()), Some(Err("error")));
/// # })
/// ```
fn try_filter_map<Fut, F, T>(self, f: F) -> TryFilterMap<Self, Fut, F>
where Fut: TryFuture<Ok = Option<T>, Error = Self::Error>,
F: FnMut(Self::Ok) -> Fut,
Self: Sized
{
TryFilterMap::new(self, f)
}
/// Attempt to execute an accumulating asynchronous computation over a
/// stream, collecting all the values into one final result.
///
/// This combinator will accumulate all values returned by this stream
/// according to the closure provided. The initial state is also provided to
/// this method and then is returned again by each execution of the closure.
/// Once the entire stream has been exhausted the returned future will
/// resolve to this value.
///
/// This method is similar to [`fold`](super::StreamExt::fold), but will
/// exit early if an error is encountered in either the stream or the
/// provided closure.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::future;
/// use futures::stream::{self, TryStreamExt};
///
/// let number_stream = stream::iter(vec![Ok::<i32, i32>(1), Ok(2)]);
/// let sum = number_stream.try_fold(0, |acc, x| future::ready(Ok(acc + x)));
/// assert_eq!(await!(sum), Ok(3));
///
/// let number_stream_with_err = stream::iter(vec![Ok::<i32, i32>(1), Err(2), Ok(1)]);
/// let sum = number_stream_with_err.try_fold(0, |acc, x| future::ready(Ok(acc + x)));
/// assert_eq!(await!(sum), Err(2));
/// # })
/// ```
fn try_fold<T, Fut, F>(self, init: T, f: F) -> TryFold<Self, Fut, T, F>
where F: FnMut(T, Self::Ok) -> Fut,
Fut: TryFuture<Ok = T, Error = Self::Error>,
Self: Sized,
{
TryFold::new(self, f, init)
}
/// Attempt to concatenate all items of a stream into a single
/// extendable destination, returning a future representing the end result.
///
/// This combinator will extend the first item with the contents of all
/// the subsequent successful results of the stream. If the stream is empty,
/// the default value will be returned.
///
/// Works with all collections that implement the [`Extend`](std::iter::Extend) trait.
///
/// This method is similar to [`concat`](super::StreamExt::concat), but will
/// exit early if an error is encountered in the stream.
///
/// # Examples
///
/// ```
/// use futures::channel::mpsc;
/// use futures::executor::block_on;
/// use futures::stream::TryStreamExt;
/// use std::thread;
///
/// let (mut tx, rx) = mpsc::unbounded::<Result<Vec<i32>, ()>>();
///
/// thread::spawn(move || {
/// for i in (0..3).rev() {
/// let n = i * 3;
/// tx.unbounded_send(Ok(vec![n + 1, n + 2, n + 3])).unwrap();
/// }
/// });
///
/// let result = block_on(rx.try_concat());
///
/// assert_eq!(result, Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3]));
/// ```
fn try_concat(self) -> TryConcat<Self>
where Self: Sized,
Self::Ok: Extend<<<Self as TryStream>::Ok as IntoIterator>::Item> +
IntoIterator + Default,
{
TryConcat::new(self)
}
/// Attempt to execute several futures from a stream concurrently.
///
/// This stream's `Ok` type must be a [`TryFuture`] with an `Error` type
/// that matches the stream's `Error` type.
///
/// This adaptor will buffer up to `n` futures and then return their
/// outputs in the order in which they complete. If the underlying stream
/// returns an error, it will be immediately propagated.
///
/// The returned stream will be a stream of results, each containing either
/// an error or a future's output. An error can be produced either by the
/// underlying stream itself or by one of the futures it yielded.
///
/// This method is only available when the `std` feature of this
/// library is activated, and it is activated by default.
///
/// # Examples
///
/// Results are returned in the order of completion:
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::channel::oneshot;
/// use futures::stream::{self, StreamExt, TryStreamExt};
///
/// let (send_one, recv_one) = oneshot::channel();
/// let (send_two, recv_two) = oneshot::channel();
///
/// let stream_of_futures = stream::iter(vec![Ok(recv_one), Ok(recv_two)]);
///
/// let mut buffered = stream_of_futures.try_buffer_unordered(10);
///
/// send_two.send(2i32);
/// assert_eq!(await!(buffered.next()), Some(Ok(2i32)));
///
/// send_one.send(1i32);
/// assert_eq!(await!(buffered.next()), Some(Ok(1i32)));
///
/// assert_eq!(await!(buffered.next()), None);
/// # })
/// ```
///
/// Errors from the underlying stream itself are propagated:
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::channel::mpsc;
/// use futures::future;
/// use futures::stream::{StreamExt, TryStreamExt};
///
/// let (sink, stream_of_futures) = mpsc::unbounded();
/// let mut buffered = stream_of_futures.try_buffer_unordered(10);
///
/// sink.unbounded_send(Ok(future::ready(Ok(7i32))));
/// assert_eq!(await!(buffered.next()), Some(Ok(7i32)));
///
/// sink.unbounded_send(Err("error in the stream"));
/// assert_eq!(await!(buffered.next()), Some(Err("error in the stream")));
/// # })
/// ```
#[cfg_attr(
feature = "cfg-target-has-atomic",
cfg(all(target_has_atomic = "cas", target_has_atomic = "ptr"))
)]
#[cfg(feature = "alloc")]
fn try_buffer_unordered(self, n: usize) -> TryBufferUnordered<Self>
where Self::Ok: TryFuture<Error = Self::Error>,
Self: Sized
{
TryBufferUnordered::new(self, n)
}
// TODO: false positive warning from rustdoc. Verify once #43466 settles
//
/// A convenience method for calling [`TryStream::try_poll_next`] on [`Unpin`]
/// stream types.
fn try_poll_next_unpin(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Self::Ok, Self::Error>>>
where Self: Unpin,
{
Pin::new(self).try_poll_next(cx)
}
/// Wraps a [`TryStream`] into a stream compatible with libraries using
/// futures 0.1 `Stream`. Requires the `compat` feature to be enabled.
/// ```
/// #![feature(async_await, await_macro)]
/// use futures::future::{FutureExt, TryFutureExt};
/// # let (tx, rx) = futures::channel::oneshot::channel();
///
/// let future03 = async {
/// println!("Running on the pool");
/// tx.send(42).unwrap();
/// };
///
/// let future01 = future03
/// .unit_error() // Make it a TryFuture
/// .boxed() // Make it Unpin
/// .compat();
///
/// tokio::run(future01);
/// # assert_eq!(42, futures::executor::block_on(rx).unwrap());
/// ```
#[cfg(feature = "compat")]
fn compat(self) -> Compat<Self>
where
Self: Sized + Unpin,
{
Compat::new(self)
}
// TODO: I tried to make these doc links work, but failed to have the link work with
// anything other than `trait.AsyncRead.html`. We should probably try again to use
// paths once rustdoc has less issues.
//
/// Adapter that converts this stream into an [`AsyncRead`](trait.AsyncRead.html).
///
/// Note that because `into_async_read` moves the stream, the [`Stream`](trait.Stream.html) type must be
/// [`Unpin`]. If you want to use `into_async_read` with a [`!Unpin`](Unpin) stream, you'll
/// first have to pin the stream. This can be done by boxing the stream using [`Box::pin`]
/// or pinning it to the stack using the `pin_mut!` macro from the `pin_utils` crate.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::executor::block_on;
/// use futures::future::lazy;
/// use futures::stream::{self, StreamExt, TryStreamExt};
/// use futures::io::{AsyncRead, AsyncReadExt};
/// use std::io::Error;
///
/// let stream = stream::iter(vec![Ok(vec![1, 2, 3, 4, 5])]);
/// let mut reader = stream.into_async_read();
/// let mut buf = Vec::new();
///
/// assert!(await!(reader.read_to_end(&mut buf)).is_ok());
/// assert_eq!(buf, &[1, 2, 3, 4, 5]);
/// # })
/// ```
#[cfg(feature = "std")]
fn into_async_read(self) -> IntoAsyncRead<Self>
where
Self: Sized + TryStreamExt<Error = std::io::Error> + Unpin,
Self::Ok: AsRef<[u8]>,
{
IntoAsyncRead::new(self)
}
}
Remove links to html files
//! Streams
//!
//! This module contains a number of functions for working with `Streams`s
//! that return `Result`s, allowing for short-circuiting computations.
use core::pin::Pin;
use futures_core::future::{Future, TryFuture};
use futures_core::stream::TryStream;
use futures_core::task::{Context, Poll};
#[cfg(feature = "compat")]
use crate::compat::Compat;
mod and_then;
pub use self::and_then::AndThen;
mod err_into;
pub use self::err_into::ErrInto;
mod inspect_ok;
pub use self::inspect_ok::InspectOk;
mod inspect_err;
pub use self::inspect_err::InspectErr;
mod into_stream;
pub use self::into_stream::IntoStream;
mod map_ok;
pub use self::map_ok::MapOk;
mod map_err;
pub use self::map_err::MapErr;
mod or_else;
pub use self::or_else::OrElse;
mod try_next;
pub use self::try_next::TryNext;
mod try_for_each;
pub use self::try_for_each::TryForEach;
mod try_filter;
pub use self::try_filter::TryFilter;
mod try_filter_map;
pub use self::try_filter_map::TryFilterMap;
mod try_collect;
pub use self::try_collect::TryCollect;
mod try_concat;
pub use self::try_concat::TryConcat;
mod try_fold;
pub use self::try_fold::TryFold;
mod try_skip_while;
pub use self::try_skip_while::TrySkipWhile;
cfg_target_has_atomic! {
#[cfg(feature = "alloc")]
mod try_buffer_unordered;
#[cfg(feature = "alloc")]
pub use self::try_buffer_unordered::TryBufferUnordered;
#[cfg(feature = "alloc")]
mod try_for_each_concurrent;
#[cfg(feature = "alloc")]
pub use self::try_for_each_concurrent::TryForEachConcurrent;
}
#[cfg(feature = "std")]
mod into_async_read;
#[cfg(feature = "std")]
pub use self::into_async_read::IntoAsyncRead;
impl<S: ?Sized + TryStream> TryStreamExt for S {}
/// Adapters specific to `Result`-returning streams
pub trait TryStreamExt: TryStream {
/// Wraps the current stream in a new stream which converts the error type
/// into the one provided.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::stream::{self, TryStreamExt};
///
/// let mut stream =
/// stream::iter(vec![Ok(()), Err(5i32)])
/// .err_into::<i64>();
///
/// assert_eq!(await!(stream.try_next()), Ok(Some(())));
/// assert_eq!(await!(stream.try_next()), Err(5i64));
/// # })
/// ```
fn err_into<E>(self) -> ErrInto<Self, E>
where
Self: Sized,
Self::Error: Into<E>
{
ErrInto::new(self)
}
/// Wraps the current stream in a new stream which maps the success value
/// using the provided closure.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::stream::{self, TryStreamExt};
///
/// let mut stream =
/// stream::iter(vec![Ok(5), Err(0)])
/// .map_ok(|x| x + 2);
///
/// assert_eq!(await!(stream.try_next()), Ok(Some(7)));
/// assert_eq!(await!(stream.try_next()), Err(0));
/// # })
/// ```
fn map_ok<T, F>(self, f: F) -> MapOk<Self, F>
where
Self: Sized,
F: FnMut(Self::Ok) -> T,
{
MapOk::new(self, f)
}
/// Wraps the current stream in a new stream which maps the error value
/// using the provided closure.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::stream::{self, TryStreamExt};
///
/// let mut stream =
/// stream::iter(vec![Ok(5), Err(0)])
/// .map_err(|x| x + 2);
///
/// assert_eq!(await!(stream.try_next()), Ok(Some(5)));
/// assert_eq!(await!(stream.try_next()), Err(2));
/// # })
/// ```
fn map_err<E, F>(self, f: F) -> MapErr<Self, F>
where
Self: Sized,
F: FnMut(Self::Error) -> E,
{
MapErr::new(self, f)
}
/// Chain on a computation for when a value is ready, passing the successful
/// results to the provided closure `f`.
///
/// This function can be used to run a unit of work when the next successful
/// value on a stream is ready. The closure provided will be yielded a value
/// when ready, and the returned future will then be run to completion to
/// produce the next value on this stream.
///
/// Any errors produced by this stream will not be passed to the closure,
/// and will be passed through.
///
/// The returned value of the closure must implement the `TryFuture` trait
/// and can represent some more work to be done before the composed stream
/// is finished.
///
/// Note that this function consumes the receiving stream and returns a
/// wrapped version of it.
///
/// To process the entire stream and return a single future representing
/// success or error, use `try_for_each` instead.
///
/// # Examples
///
/// ```
/// use futures::channel::mpsc;
/// use futures::future;
/// use futures::stream::TryStreamExt;
///
/// let (_tx, rx) = mpsc::channel::<Result<i32, ()>>(1);
///
/// let rx = rx.and_then(|result| {
/// future::ok(if result % 2 == 0 {
/// Some(result)
/// } else {
/// None
/// })
/// });
/// ```
fn and_then<Fut, F>(self, f: F) -> AndThen<Self, Fut, F>
where F: FnMut(Self::Ok) -> Fut,
Fut: TryFuture<Error = Self::Error>,
Self: Sized,
{
AndThen::new(self, f)
}
/// Chain on a computation for when an error happens, passing the
/// erroneous result to the provided closure `f`.
///
/// This function can be used to run a unit of work and attempt to recover from
/// an error if one happens. The closure provided will be yielded an error
/// when one appears, and the returned future will then be run to completion
/// to produce the next value on this stream.
///
/// Any successful values produced by this stream will not be passed to the
/// closure, and will be passed through.
///
/// The returned value of the closure must implement the [`TryFuture`] trait
/// and can represent some more work to be done before the composed stream
/// is finished.
///
/// Note that this function consumes the receiving stream and returns a
/// wrapped version of it.
fn or_else<Fut, F>(self, f: F) -> OrElse<Self, Fut, F>
where F: FnMut(Self::Error) -> Fut,
Fut: TryFuture<Ok = Self::Ok>,
Self: Sized,
{
OrElse::new(self, f)
}
/// Do something with the success value of this stream, afterwards passing
/// it on.
///
/// This is similar to the `StreamExt::inspect` method where it allows
/// easily inspecting the success value as it passes through the stream, for
/// example to debug what's going on.
fn inspect_ok<F>(self, f: F) -> InspectOk<Self, F>
where F: FnMut(&Self::Ok),
Self: Sized,
{
InspectOk::new(self, f)
}
/// Do something with the error value of this stream, afterwards passing it on.
///
/// This is similar to the `StreamExt::inspect` method where it allows
/// easily inspecting the error value as it passes through the stream, for
/// example to debug what's going on.
fn inspect_err<F>(self, f: F) -> InspectErr<Self, F>
where F: FnMut(&Self::Error),
Self: Sized,
{
InspectErr::new(self, f)
}
/// Wraps a [`TryStream`] into a type that implements
/// [`Stream`](futures_core::Stream)
///
/// [`TryStream`]s currently do not implement the
/// [`Stream`](futures_core::Stream) trait because of limitations
/// of the compiler.
///
/// # Examples
///
/// ```
/// use futures::stream::{Stream, TryStream, TryStreamExt};
///
/// # type T = i32;
/// # type E = ();
/// fn make_try_stream() -> impl TryStream<Ok = T, Error = E> { // ... }
/// # futures::stream::empty()
/// # }
/// fn take_stream(stream: impl Stream<Item = Result<T, E>>) { /* ... */ }
///
/// take_stream(make_try_stream().into_stream());
/// ```
fn into_stream(self) -> IntoStream<Self>
where Self: Sized,
{
IntoStream::new(self)
}
/// Creates a future that attempts to resolve the next item in the stream.
/// If an error is encountered before the next item, the error is returned
/// instead.
///
/// This is similar to the `Stream::next` combinator, but returns a
/// `Result<Option<T>, E>` rather than an `Option<Result<T, E>>`, making
/// for easy use with the `?` operator.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::stream::{self, TryStreamExt};
///
/// let mut stream = stream::iter(vec![Ok(()), Err(())]);
///
/// assert_eq!(await!(stream.try_next()), Ok(Some(())));
/// assert_eq!(await!(stream.try_next()), Err(()));
/// # })
/// ```
fn try_next(&mut self) -> TryNext<'_, Self>
where Self: Sized + Unpin,
{
TryNext::new(self)
}
/// Attempts to run this stream to completion, executing the provided
/// asynchronous closure for each element on the stream.
///
/// The provided closure will be called for each item this stream produces,
/// yielding a future. That future will then be executed to completion
/// before moving on to the next item.
///
/// The returned value is a [`Future`](futures_core::Future) where the
/// [`Output`](futures_core::Future::Output) type is
/// `Result<(), Self::Error>`. If any of the intermediate
/// futures or the stream returns an error, this future will return
/// immediately with an error.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::future;
/// use futures::stream::{self, TryStreamExt};
///
/// let mut x = 0i32;
///
/// {
/// let fut = stream::repeat(Ok(1)).try_for_each(|item| {
/// x += item;
/// future::ready(if x == 3 { Err(()) } else { Ok(()) })
/// });
/// assert_eq!(await!(fut), Err(()));
/// }
///
/// assert_eq!(x, 3);
/// # })
/// ```
fn try_for_each<Fut, F>(self, f: F) -> TryForEach<Self, Fut, F>
where F: FnMut(Self::Ok) -> Fut,
Fut: TryFuture<Ok = (), Error=Self::Error>,
Self: Sized
{
TryForEach::new(self, f)
}
/// Skip elements on this stream while the provided asynchronous predicate
/// resolves to `true`.
///
/// This function is similar to [`StreamExt::skip_while`](crate::stream::StreamExt::skip_while)
/// but exits early if an error occurs.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::future;
/// use futures::stream::{self, TryStreamExt};
///
/// let stream = stream::iter(vec![Ok::<i32, i32>(1), Ok(3), Ok(2)]);
/// let mut stream = stream.try_skip_while(|x| future::ready(Ok(*x < 3)));
///
/// let output: Result<Vec<i32>, i32> = await!(stream.try_collect());
/// assert_eq!(output, Ok(vec![3, 2]));
/// # })
/// ```
fn try_skip_while<Fut, F>(self, f: F) -> TrySkipWhile<Self, Fut, F>
where F: FnMut(&Self::Ok) -> Fut,
Fut: TryFuture<Ok = bool, Error = Self::Error>,
Self: Sized
{
TrySkipWhile::new(self, f)
}
/// Attempts to run this stream to completion, executing the provided asynchronous
/// closure for each element on the stream concurrently as elements become
/// available, exiting as soon as an error occurs.
///
/// This is similar to
/// [`StreamExt::for_each_concurrent`](super::StreamExt::for_each_concurrent),
/// but will resolve to an error immediately if the underlying stream or the provided
/// closure return an error.
///
/// This method is only available when the `std` feature of this
/// library is activated, and it is activated by default.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::channel::oneshot;
/// use futures::stream::{self, StreamExt, TryStreamExt};
///
/// let (tx1, rx1) = oneshot::channel();
/// let (tx2, rx2) = oneshot::channel();
/// let (_tx3, rx3) = oneshot::channel();
///
/// let stream = stream::iter(vec![rx1, rx2, rx3]);
/// let fut = stream.map(Ok).try_for_each_concurrent(
/// /* limit */ 2,
/// async move |rx| {
/// let res: Result<(), oneshot::Canceled> = await!(rx);
/// res
/// }
/// );
///
/// tx1.send(()).unwrap();
/// // Drop the second sender so that `rx2` resolves to `Canceled`.
/// drop(tx2);
///
/// // The final result is an error because the second future
/// // resulted in an error.
/// assert_eq!(Err(oneshot::Canceled), await!(fut));
/// # })
/// ```
#[cfg_attr(
feature = "cfg-target-has-atomic",
cfg(all(target_has_atomic = "cas", target_has_atomic = "ptr"))
)]
#[cfg(feature = "alloc")]
fn try_for_each_concurrent<Fut, F>(
self,
limit: impl Into<Option<usize>>,
f: F,
) -> TryForEachConcurrent<Self, Fut, F>
where F: FnMut(Self::Ok) -> Fut,
Fut: Future<Output = Result<(), Self::Error>>,
Self: Sized,
{
TryForEachConcurrent::new(self, limit.into(), f)
}
/// Attempt to Collect all of the values of this stream into a vector,
/// returning a future representing the result of that computation.
///
/// This combinator will collect all successful results of this stream and
/// collect them into a `Vec<Self::Item>`. If an error happens then all
/// collected elements will be dropped and the error will be returned.
///
/// The returned future will be resolved when the stream terminates.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::channel::mpsc;
/// use futures::executor::block_on;
/// use futures::stream::TryStreamExt;
/// use std::thread;
///
/// let (mut tx, rx) = mpsc::unbounded();
///
/// thread::spawn(move || {
/// for i in (1..=5) {
/// tx.unbounded_send(Ok(i)).unwrap();
/// }
/// tx.unbounded_send(Err(6)).unwrap();
/// });
///
/// let output: Result<Vec<i32>, i32> = await!(rx.try_collect());
/// assert_eq!(output, Err(6));
/// # })
/// ```
fn try_collect<C: Default + Extend<Self::Ok>>(self) -> TryCollect<Self, C>
where Self: Sized
{
TryCollect::new(self)
}
/// Attempt to filter the values produced by this stream according to the
/// provided asynchronous closure.
///
/// As values of this stream are made available, the provided predicate `f`
/// will be run on them. If the predicate returns a `Future` which resolves
/// to `true`, then the stream will yield the value, but if the predicate
/// return a `Future` which resolves to `false`, then the value will be
/// discarded and the next value will be produced.
///
/// All errors are passed through without filtering in this combinator.
///
/// Note that this function consumes the stream passed into it and returns a
/// wrapped version of it, similar to the existing `filter` methods in
/// the standard library.
///
/// # Examples
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::executor::block_on;
/// use futures::future;
/// use futures::stream::{self, StreamExt, TryStreamExt};
///
/// let stream = stream::iter(vec![Ok(1i32), Ok(2i32), Ok(3i32), Err("error")]);
/// let mut evens = stream.try_filter(|x| {
/// future::ready(x % 2 == 0)
/// });
///
/// assert_eq!(await!(evens.next()), Some(Ok(2)));
/// assert_eq!(await!(evens.next()), Some(Err("error")));
/// # })
/// ```
fn try_filter<Fut, F>(self, f: F) -> TryFilter<Self, Fut, F>
where Fut: Future<Output = bool>,
F: FnMut(&Self::Ok) -> Fut,
Self: Sized
{
TryFilter::new(self, f)
}
/// Attempt to filter the values produced by this stream while
/// simultaneously mapping them to a different type according to the
/// provided asynchronous closure.
///
/// As values of this stream are made available, the provided function will
/// be run on them. If the future returned by the predicate `f` resolves to
/// [`Some(item)`](Some) then the stream will yield the value `item`, but if
/// it resolves to [`None`] then the next value will be produced.
///
/// All errors are passed through without filtering in this combinator.
///
/// Note that this function consumes the stream passed into it and returns a
/// wrapped version of it, similar to the existing `filter_map` methods in
/// the standard library.
///
/// # Examples
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::executor::block_on;
/// use futures::future;
/// use futures::stream::{self, StreamExt, TryStreamExt};
///
/// let stream = stream::iter(vec![Ok(1i32), Ok(6i32), Err("error")]);
/// let mut halves = stream.try_filter_map(|x| {
/// let ret = if x % 2 == 0 { Some(x / 2) } else { None };
/// future::ready(Ok(ret))
/// });
///
/// assert_eq!(await!(halves.next()), Some(Ok(3)));
/// assert_eq!(await!(halves.next()), Some(Err("error")));
/// # })
/// ```
fn try_filter_map<Fut, F, T>(self, f: F) -> TryFilterMap<Self, Fut, F>
where Fut: TryFuture<Ok = Option<T>, Error = Self::Error>,
F: FnMut(Self::Ok) -> Fut,
Self: Sized
{
TryFilterMap::new(self, f)
}
/// Attempt to execute an accumulating asynchronous computation over a
/// stream, collecting all the values into one final result.
///
/// This combinator will accumulate all values returned by this stream
/// according to the closure provided. The initial state is also provided to
/// this method and then is returned again by each execution of the closure.
/// Once the entire stream has been exhausted the returned future will
/// resolve to this value.
///
/// This method is similar to [`fold`](super::StreamExt::fold), but will
/// exit early if an error is encountered in either the stream or the
/// provided closure.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::future;
/// use futures::stream::{self, TryStreamExt};
///
/// let number_stream = stream::iter(vec![Ok::<i32, i32>(1), Ok(2)]);
/// let sum = number_stream.try_fold(0, |acc, x| future::ready(Ok(acc + x)));
/// assert_eq!(await!(sum), Ok(3));
///
/// let number_stream_with_err = stream::iter(vec![Ok::<i32, i32>(1), Err(2), Ok(1)]);
/// let sum = number_stream_with_err.try_fold(0, |acc, x| future::ready(Ok(acc + x)));
/// assert_eq!(await!(sum), Err(2));
/// # })
/// ```
fn try_fold<T, Fut, F>(self, init: T, f: F) -> TryFold<Self, Fut, T, F>
where F: FnMut(T, Self::Ok) -> Fut,
Fut: TryFuture<Ok = T, Error = Self::Error>,
Self: Sized,
{
TryFold::new(self, f, init)
}
/// Attempt to concatenate all items of a stream into a single
/// extendable destination, returning a future representing the end result.
///
/// This combinator will extend the first item with the contents of all
/// the subsequent successful results of the stream. If the stream is empty,
/// the default value will be returned.
///
/// Works with all collections that implement the [`Extend`](std::iter::Extend) trait.
///
/// This method is similar to [`concat`](super::StreamExt::concat), but will
/// exit early if an error is encountered in the stream.
///
/// # Examples
///
/// ```
/// use futures::channel::mpsc;
/// use futures::executor::block_on;
/// use futures::stream::TryStreamExt;
/// use std::thread;
///
/// let (mut tx, rx) = mpsc::unbounded::<Result<Vec<i32>, ()>>();
///
/// thread::spawn(move || {
/// for i in (0..3).rev() {
/// let n = i * 3;
/// tx.unbounded_send(Ok(vec![n + 1, n + 2, n + 3])).unwrap();
/// }
/// });
///
/// let result = block_on(rx.try_concat());
///
/// assert_eq!(result, Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3]));
/// ```
fn try_concat(self) -> TryConcat<Self>
where Self: Sized,
Self::Ok: Extend<<<Self as TryStream>::Ok as IntoIterator>::Item> +
IntoIterator + Default,
{
TryConcat::new(self)
}
/// Attempt to execute several futures from a stream concurrently.
///
/// This stream's `Ok` type must be a [`TryFuture`] with an `Error` type
/// that matches the stream's `Error` type.
///
/// This adaptor will buffer up to `n` futures and then return their
/// outputs in the order in which they complete. If the underlying stream
/// returns an error, it will be immediately propagated.
///
/// The returned stream will be a stream of results, each containing either
/// an error or a future's output. An error can be produced either by the
/// underlying stream itself or by one of the futures it yielded.
///
/// This method is only available when the `std` feature of this
/// library is activated, and it is activated by default.
///
/// # Examples
///
/// Results are returned in the order of completion:
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::channel::oneshot;
/// use futures::stream::{self, StreamExt, TryStreamExt};
///
/// let (send_one, recv_one) = oneshot::channel();
/// let (send_two, recv_two) = oneshot::channel();
///
/// let stream_of_futures = stream::iter(vec![Ok(recv_one), Ok(recv_two)]);
///
/// let mut buffered = stream_of_futures.try_buffer_unordered(10);
///
/// send_two.send(2i32);
/// assert_eq!(await!(buffered.next()), Some(Ok(2i32)));
///
/// send_one.send(1i32);
/// assert_eq!(await!(buffered.next()), Some(Ok(1i32)));
///
/// assert_eq!(await!(buffered.next()), None);
/// # })
/// ```
///
/// Errors from the underlying stream itself are propagated:
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::channel::mpsc;
/// use futures::future;
/// use futures::stream::{StreamExt, TryStreamExt};
///
/// let (sink, stream_of_futures) = mpsc::unbounded();
/// let mut buffered = stream_of_futures.try_buffer_unordered(10);
///
/// sink.unbounded_send(Ok(future::ready(Ok(7i32))));
/// assert_eq!(await!(buffered.next()), Some(Ok(7i32)));
///
/// sink.unbounded_send(Err("error in the stream"));
/// assert_eq!(await!(buffered.next()), Some(Err("error in the stream")));
/// # })
/// ```
#[cfg_attr(
feature = "cfg-target-has-atomic",
cfg(all(target_has_atomic = "cas", target_has_atomic = "ptr"))
)]
#[cfg(feature = "alloc")]
fn try_buffer_unordered(self, n: usize) -> TryBufferUnordered<Self>
where Self::Ok: TryFuture<Error = Self::Error>,
Self: Sized
{
TryBufferUnordered::new(self, n)
}
// TODO: false positive warning from rustdoc. Verify once #43466 settles
//
/// A convenience method for calling [`TryStream::try_poll_next`] on [`Unpin`]
/// stream types.
fn try_poll_next_unpin(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Self::Ok, Self::Error>>>
where Self: Unpin,
{
Pin::new(self).try_poll_next(cx)
}
/// Wraps a [`TryStream`] into a stream compatible with libraries using
/// futures 0.1 `Stream`. Requires the `compat` feature to be enabled.
/// ```
/// #![feature(async_await, await_macro)]
/// use futures::future::{FutureExt, TryFutureExt};
/// # let (tx, rx) = futures::channel::oneshot::channel();
///
/// let future03 = async {
/// println!("Running on the pool");
/// tx.send(42).unwrap();
/// };
///
/// let future01 = future03
/// .unit_error() // Make it a TryFuture
/// .boxed() // Make it Unpin
/// .compat();
///
/// tokio::run(future01);
/// # assert_eq!(42, futures::executor::block_on(rx).unwrap());
/// ```
#[cfg(feature = "compat")]
fn compat(self) -> Compat<Self>
where
Self: Sized + Unpin,
{
Compat::new(self)
}
/// Adapter that converts this stream into an [`AsyncRead`](crate::io::AsyncRead).
///
/// Note that because `into_async_read` moves the stream, the [`Stream`](futures_core::stream::Stream) type must be
/// [`Unpin`]. If you want to use `into_async_read` with a [`!Unpin`](Unpin) stream, you'll
/// first have to pin the stream. This can be done by boxing the stream using [`Box::pin`]
/// or pinning it to the stack using the `pin_mut!` macro from the `pin_utils` crate.
///
/// # Examples
///
/// ```
/// #![feature(async_await, await_macro)]
/// # futures::executor::block_on(async {
/// use futures::executor::block_on;
/// use futures::future::lazy;
/// use futures::stream::{self, StreamExt, TryStreamExt};
/// use futures::io::{AsyncRead, AsyncReadExt};
/// use std::io::Error;
///
/// let stream = stream::iter(vec![Ok(vec![1, 2, 3, 4, 5])]);
/// let mut reader = stream.into_async_read();
/// let mut buf = Vec::new();
///
/// assert!(await!(reader.read_to_end(&mut buf)).is_ok());
/// assert_eq!(buf, &[1, 2, 3, 4, 5]);
/// # })
/// ```
#[cfg(feature = "std")]
fn into_async_read(self) -> IntoAsyncRead<Self>
where
Self: Sized + TryStreamExt<Error = std::io::Error> + Unpin,
Self::Ok: AsRef<[u8]>,
{
IntoAsyncRead::new(self)
}
}
|
// Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Provides parts of crosvm as a library to communicate with running crosvm instances.
//!
//! This crate is a programmatic alternative to invoking crosvm with subcommands that produce the
//! result on stdout.
use std::convert::{TryFrom, TryInto};
use std::ffi::CStr;
use std::panic::catch_unwind;
use std::path::{Path, PathBuf};
use libc::{c_char, ssize_t};
use vm_control::{
client::*, BalloonControlCommand, BalloonStats, DiskControlCommand, UsbControlAttachedDevice,
UsbControlResult, VmRequest, VmResponse,
};
fn validate_socket_path(socket_path: *const c_char) -> Option<PathBuf> {
if !socket_path.is_null() {
let socket_path = unsafe { CStr::from_ptr(socket_path) };
Some(PathBuf::from(socket_path.to_str().ok()?))
} else {
None
}
}
/// Stops the crosvm instance whose control socket is listening on `socket_path`.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_stop_vm(socket_path: *const c_char) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
vms_request(&VmRequest::Exit, &socket_path).is_ok()
} else {
false
}
})
.unwrap_or(false)
}
/// Suspends the crosvm instance whose control socket is listening on `socket_path`.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_suspend_vm(socket_path: *const c_char) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
vms_request(&VmRequest::Suspend, &socket_path).is_ok()
} else {
false
}
})
.unwrap_or(false)
}
/// Resumes the crosvm instance whose control socket is listening on `socket_path`.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_resume_vm(socket_path: *const c_char) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
vms_request(&VmRequest::Resume, &socket_path).is_ok()
} else {
false
}
})
.unwrap_or(false)
}
/// Creates an RT vCPU for the crosvm instance whose control socket is listening on `socket_path`.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_make_rt_vm(socket_path: *const c_char) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
vms_request(&VmRequest::MakeRT, &socket_path).is_ok()
} else {
false
}
})
.unwrap_or(false)
}
/// Adjusts the balloon size of the crosvm instance whose control socket is
/// listening on `socket_path`.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_balloon_vms(socket_path: *const c_char, num_bytes: u64) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
let command = BalloonControlCommand::Adjust { num_bytes };
vms_request(&VmRequest::BalloonCommand(command), &socket_path).is_ok()
} else {
false
}
})
.unwrap_or(false)
}
/// Represents an individual attached USB device.
#[repr(C)]
pub struct UsbDeviceEntry {
/// Internal port index used for identifying this individual device.
port: u8,
/// USB vendor ID
vendor_id: u16,
/// USB product ID
product_id: u16,
}
impl From<&UsbControlAttachedDevice> for UsbDeviceEntry {
fn from(other: &UsbControlAttachedDevice) -> Self {
Self {
port: other.port,
vendor_id: other.vendor_id,
product_id: other.product_id,
}
}
}
/// Returns all USB devices passed through the crosvm instance whose control socket is listening on `socket_path`.
///
/// The function returns the amount of entries written.
/// # Arguments
///
/// * `socket_path` - Path to the crosvm control socket
/// * `entries` - Pointer to an array of `UsbDeviceEntry` where the details about the attached
/// devices will be written to
/// * `entries_length` - Amount of entries in the array specified by `entries`
///
/// Crosvm supports passing through up to 255 devices, so pasing an array with 255 entries will
/// guarantee to return all entries.
#[no_mangle]
pub extern "C" fn crosvm_client_usb_list(
socket_path: *const c_char,
entries: *mut UsbDeviceEntry,
entries_length: ssize_t,
) -> ssize_t {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
if let Ok(UsbControlResult::Devices(res)) = do_usb_list(&socket_path) {
let mut i = 0;
for entry in res.iter().filter(|x| x.valid()) {
if i >= entries_length {
break;
}
unsafe {
*entries.offset(i) = entry.into();
i += 1;
}
}
i
} else {
-1
}
} else {
-1
}
})
.unwrap_or(-1)
}
/// Attaches an USB device to crosvm instance whose control socket is listening on `socket_path`.
///
/// The function returns the amount of entries written.
/// # Arguments
///
/// * `socket_path` - Path to the crosvm control socket
/// * `bus` - USB device bus ID (unused)
/// * `addr` - USB device address (unused)
/// * `vid` - USB device vendor ID (unused)
/// * `pid` - USB device product ID (unused)
/// * `dev_path` - Path to the USB device (Most likely `/dev/bus/usb/<bus>/<addr>`).
/// * `out_port` - (optional) internal port will be written here if provided.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_usb_attach(
socket_path: *const c_char,
_bus: u8,
_addr: u8,
_vid: u16,
_pid: u16,
dev_path: *const c_char,
out_port: *mut u8,
) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
if dev_path.is_null() {
return false;
}
let dev_path = Path::new(unsafe { CStr::from_ptr(dev_path) }.to_str().unwrap_or(""));
if let Ok(UsbControlResult::Ok { port }) = do_usb_attach(&socket_path, dev_path) {
if !out_port.is_null() {
unsafe { *out_port = port };
}
true
} else {
false
}
} else {
false
}
})
.unwrap_or(false)
}
/// Detaches an USB device from crosvm instance whose control socket is listening on `socket_path`.
/// `port` determines device to be detached.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_usb_detach(socket_path: *const c_char, port: u8) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
do_usb_detach(&socket_path, port).is_ok()
} else {
false
}
})
.unwrap_or(false)
}
/// Modifies the battery status of crosvm instance whose control socket is listening on
/// `socket_path`.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_modify_battery(
socket_path: *const c_char,
battery_type: *const c_char,
property: *const c_char,
target: *const c_char,
) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
if battery_type.is_null() || property.is_null() || target.is_null() {
return false;
}
let battery_type = unsafe { CStr::from_ptr(battery_type) };
let property = unsafe { CStr::from_ptr(property) };
let target = unsafe { CStr::from_ptr(target) };
do_modify_battery(
&socket_path,
battery_type.to_str().unwrap(),
property.to_str().unwrap(),
target.to_str().unwrap(),
)
.is_ok()
} else {
false
}
})
.unwrap_or(false)
}
/// Resizes the disk of the crosvm instance whose control socket is listening on `socket_path`.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_resize_disk(
socket_path: *const c_char,
disk_index: u64,
new_size: u64,
) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
if let Ok(disk_index) = usize::try_from(disk_index) {
let request = VmRequest::DiskCommand {
disk_index,
command: DiskControlCommand::Resize { new_size },
};
vms_request(&request, &socket_path).is_ok()
} else {
false
}
} else {
false
}
})
.unwrap_or(false)
}
/// Similar to internally used `BalloonStats` but using i64 instead of
/// Option<u64>. `None` (or values bigger than i64::max) will be encoded as -1.
#[repr(C)]
pub struct BalloonStatsFfi {
swap_in: i64,
swap_out: i64,
major_faults: i64,
minor_faults: i64,
free_memory: i64,
total_memory: i64,
available_memory: i64,
disk_caches: i64,
hugetlb_allocations: i64,
hugetlb_failures: i64,
}
impl From<&BalloonStats> for BalloonStatsFfi {
fn from(other: &BalloonStats) -> Self {
let convert = |x: Option<u64>| -> i64 { x.and_then(|y| y.try_into().ok()).unwrap_or(-1) };
Self {
swap_in: convert(other.swap_in),
swap_out: convert(other.swap_out),
major_faults: convert(other.major_faults),
minor_faults: convert(other.minor_faults),
free_memory: convert(other.free_memory),
total_memory: convert(other.total_memory),
available_memory: convert(other.available_memory),
disk_caches: convert(other.disk_caches),
hugetlb_allocations: convert(other.hugetlb_allocations),
hugetlb_failures: convert(other.hugetlb_failures),
}
}
}
/// Returns balloon stats of the crosvm instance whose control socket is listening on `socket_path`.
///
/// The parameters `stats` and `actual` are optional and will only be written to if they are
/// non-null.
///
/// The function returns true on success or false if an error occured.
///
/// # Note
///
/// Entries in `BalloonStatsFfi` that are not available will be set to `-1`.
#[no_mangle]
pub extern "C" fn crosvm_client_balloon_stats(
socket_path: *const c_char,
stats: *mut BalloonStatsFfi,
actual: *mut u64,
) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
let request = &VmRequest::BalloonCommand(BalloonControlCommand::Stats {});
if let Ok(VmResponse::BalloonStats {
stats: ref balloon_stats,
balloon_actual,
}) = handle_request(request, &socket_path)
{
if !stats.is_null() {
unsafe {
*stats = balloon_stats.into();
}
}
if !actual.is_null() {
unsafe {
*actual = balloon_actual;
}
}
true
} else {
false
}
} else {
false
}
})
.unwrap_or(false)
}
crosvm_control: Add shared and unevictable memory to BalloonStats
Adds the shared_memory and unevicatble_memory fields to BalloonStatsFfi
struct and also returns them from crosvm_client_balloon_stats.
BUG=b:188858559
TEST=cq
Change-Id: I9ff52b77cd19e16a860596f24ff45407a2acdd1e
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/3713864
Tested-by: kokoro <2ac7b1f3fa578934c95181d4272be0d3bca00121@google.com>
Reviewed-by: Noah Gold <2d4550d8af93bfc6e6fbe1fb1b0cc97a9ee7c3ce@google.com>
Commit-Queue: Kameron Lutes <d812b9f24eeb5acef9cc7e601b6b603be1696a90@chromium.org>
// Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Provides parts of crosvm as a library to communicate with running crosvm instances.
//!
//! This crate is a programmatic alternative to invoking crosvm with subcommands that produce the
//! result on stdout.
use std::convert::{TryFrom, TryInto};
use std::ffi::CStr;
use std::panic::catch_unwind;
use std::path::{Path, PathBuf};
use libc::{c_char, ssize_t};
use vm_control::{
client::*, BalloonControlCommand, BalloonStats, DiskControlCommand, UsbControlAttachedDevice,
UsbControlResult, VmRequest, VmResponse,
};
fn validate_socket_path(socket_path: *const c_char) -> Option<PathBuf> {
if !socket_path.is_null() {
let socket_path = unsafe { CStr::from_ptr(socket_path) };
Some(PathBuf::from(socket_path.to_str().ok()?))
} else {
None
}
}
/// Stops the crosvm instance whose control socket is listening on `socket_path`.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_stop_vm(socket_path: *const c_char) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
vms_request(&VmRequest::Exit, &socket_path).is_ok()
} else {
false
}
})
.unwrap_or(false)
}
/// Suspends the crosvm instance whose control socket is listening on `socket_path`.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_suspend_vm(socket_path: *const c_char) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
vms_request(&VmRequest::Suspend, &socket_path).is_ok()
} else {
false
}
})
.unwrap_or(false)
}
/// Resumes the crosvm instance whose control socket is listening on `socket_path`.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_resume_vm(socket_path: *const c_char) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
vms_request(&VmRequest::Resume, &socket_path).is_ok()
} else {
false
}
})
.unwrap_or(false)
}
/// Creates an RT vCPU for the crosvm instance whose control socket is listening on `socket_path`.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_make_rt_vm(socket_path: *const c_char) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
vms_request(&VmRequest::MakeRT, &socket_path).is_ok()
} else {
false
}
})
.unwrap_or(false)
}
/// Adjusts the balloon size of the crosvm instance whose control socket is
/// listening on `socket_path`.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_balloon_vms(socket_path: *const c_char, num_bytes: u64) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
let command = BalloonControlCommand::Adjust { num_bytes };
vms_request(&VmRequest::BalloonCommand(command), &socket_path).is_ok()
} else {
false
}
})
.unwrap_or(false)
}
/// Represents an individual attached USB device.
#[repr(C)]
pub struct UsbDeviceEntry {
/// Internal port index used for identifying this individual device.
port: u8,
/// USB vendor ID
vendor_id: u16,
/// USB product ID
product_id: u16,
}
impl From<&UsbControlAttachedDevice> for UsbDeviceEntry {
fn from(other: &UsbControlAttachedDevice) -> Self {
Self {
port: other.port,
vendor_id: other.vendor_id,
product_id: other.product_id,
}
}
}
/// Returns all USB devices passed through the crosvm instance whose control socket is listening on `socket_path`.
///
/// The function returns the amount of entries written.
/// # Arguments
///
/// * `socket_path` - Path to the crosvm control socket
/// * `entries` - Pointer to an array of `UsbDeviceEntry` where the details about the attached
/// devices will be written to
/// * `entries_length` - Amount of entries in the array specified by `entries`
///
/// Crosvm supports passing through up to 255 devices, so pasing an array with 255 entries will
/// guarantee to return all entries.
#[no_mangle]
pub extern "C" fn crosvm_client_usb_list(
socket_path: *const c_char,
entries: *mut UsbDeviceEntry,
entries_length: ssize_t,
) -> ssize_t {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
if let Ok(UsbControlResult::Devices(res)) = do_usb_list(&socket_path) {
let mut i = 0;
for entry in res.iter().filter(|x| x.valid()) {
if i >= entries_length {
break;
}
unsafe {
*entries.offset(i) = entry.into();
i += 1;
}
}
i
} else {
-1
}
} else {
-1
}
})
.unwrap_or(-1)
}
/// Attaches an USB device to crosvm instance whose control socket is listening on `socket_path`.
///
/// The function returns the amount of entries written.
/// # Arguments
///
/// * `socket_path` - Path to the crosvm control socket
/// * `bus` - USB device bus ID (unused)
/// * `addr` - USB device address (unused)
/// * `vid` - USB device vendor ID (unused)
/// * `pid` - USB device product ID (unused)
/// * `dev_path` - Path to the USB device (Most likely `/dev/bus/usb/<bus>/<addr>`).
/// * `out_port` - (optional) internal port will be written here if provided.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_usb_attach(
socket_path: *const c_char,
_bus: u8,
_addr: u8,
_vid: u16,
_pid: u16,
dev_path: *const c_char,
out_port: *mut u8,
) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
if dev_path.is_null() {
return false;
}
let dev_path = Path::new(unsafe { CStr::from_ptr(dev_path) }.to_str().unwrap_or(""));
if let Ok(UsbControlResult::Ok { port }) = do_usb_attach(&socket_path, dev_path) {
if !out_port.is_null() {
unsafe { *out_port = port };
}
true
} else {
false
}
} else {
false
}
})
.unwrap_or(false)
}
/// Detaches an USB device from crosvm instance whose control socket is listening on `socket_path`.
/// `port` determines device to be detached.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_usb_detach(socket_path: *const c_char, port: u8) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
do_usb_detach(&socket_path, port).is_ok()
} else {
false
}
})
.unwrap_or(false)
}
/// Modifies the battery status of crosvm instance whose control socket is listening on
/// `socket_path`.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_modify_battery(
socket_path: *const c_char,
battery_type: *const c_char,
property: *const c_char,
target: *const c_char,
) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
if battery_type.is_null() || property.is_null() || target.is_null() {
return false;
}
let battery_type = unsafe { CStr::from_ptr(battery_type) };
let property = unsafe { CStr::from_ptr(property) };
let target = unsafe { CStr::from_ptr(target) };
do_modify_battery(
&socket_path,
battery_type.to_str().unwrap(),
property.to_str().unwrap(),
target.to_str().unwrap(),
)
.is_ok()
} else {
false
}
})
.unwrap_or(false)
}
/// Resizes the disk of the crosvm instance whose control socket is listening on `socket_path`.
///
/// The function returns true on success or false if an error occured.
#[no_mangle]
pub extern "C" fn crosvm_client_resize_disk(
socket_path: *const c_char,
disk_index: u64,
new_size: u64,
) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
if let Ok(disk_index) = usize::try_from(disk_index) {
let request = VmRequest::DiskCommand {
disk_index,
command: DiskControlCommand::Resize { new_size },
};
vms_request(&request, &socket_path).is_ok()
} else {
false
}
} else {
false
}
})
.unwrap_or(false)
}
/// Similar to internally used `BalloonStats` but using i64 instead of
/// Option<u64>. `None` (or values bigger than i64::max) will be encoded as -1.
#[repr(C)]
pub struct BalloonStatsFfi {
swap_in: i64,
swap_out: i64,
major_faults: i64,
minor_faults: i64,
free_memory: i64,
total_memory: i64,
available_memory: i64,
disk_caches: i64,
hugetlb_allocations: i64,
hugetlb_failures: i64,
shared_memory: i64,
unevictable_memory: i64,
}
impl From<&BalloonStats> for BalloonStatsFfi {
fn from(other: &BalloonStats) -> Self {
let convert = |x: Option<u64>| -> i64 { x.and_then(|y| y.try_into().ok()).unwrap_or(-1) };
Self {
swap_in: convert(other.swap_in),
swap_out: convert(other.swap_out),
major_faults: convert(other.major_faults),
minor_faults: convert(other.minor_faults),
free_memory: convert(other.free_memory),
total_memory: convert(other.total_memory),
available_memory: convert(other.available_memory),
disk_caches: convert(other.disk_caches),
hugetlb_allocations: convert(other.hugetlb_allocations),
hugetlb_failures: convert(other.hugetlb_failures),
shared_memory: convert(other.shared_memory),
unevictable_memory: convert(other.unevictable_memory),
}
}
}
/// Returns balloon stats of the crosvm instance whose control socket is listening on `socket_path`.
///
/// The parameters `stats` and `actual` are optional and will only be written to if they are
/// non-null.
///
/// The function returns true on success or false if an error occured.
///
/// # Note
///
/// Entries in `BalloonStatsFfi` that are not available will be set to `-1`.
#[no_mangle]
pub extern "C" fn crosvm_client_balloon_stats(
socket_path: *const c_char,
stats: *mut BalloonStatsFfi,
actual: *mut u64,
) -> bool {
catch_unwind(|| {
if let Some(socket_path) = validate_socket_path(socket_path) {
let request = &VmRequest::BalloonCommand(BalloonControlCommand::Stats {});
if let Ok(VmResponse::BalloonStats {
stats: ref balloon_stats,
balloon_actual,
}) = handle_request(request, &socket_path)
{
if !stats.is_null() {
unsafe {
*stats = balloon_stats.into();
}
}
if !actual.is_null() {
unsafe {
*actual = balloon_actual;
}
}
true
} else {
false
}
} else {
false
}
})
.unwrap_or(false)
}
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use crate::{atomic_histogram::*, cluster::Cluster, instance::Instance};
use std::{
env, fmt, slice,
sync::Arc,
time::{Duration, Instant},
};
use anyhow::{format_err, Result};
use diem_crypto::{
ed25519::{Ed25519PrivateKey, Ed25519PublicKey},
test_utils::KeyPair,
traits::Uniform,
};
use diem_logger::*;
use diem_types::{
account_address::AccountAddress,
account_config::{self, testnet_dd_account_address, XUS_NAME},
chain_id::ChainId,
transaction::{
authenticator::AuthenticationKey, helpers::create_user_txn, Script, TransactionPayload,
},
};
use itertools::zip;
use rand::{
prelude::ThreadRng,
rngs::{OsRng, StdRng},
seq::{IteratorRandom, SliceRandom},
Rng, SeedableRng,
};
use tokio::runtime::Handle;
use diem_client::{views::AmountView, Client as JsonRpcClient, MethodRequest};
use diem_types::{
account_config::{diem_root_address, treasury_compliance_account_address},
transaction::SignedTransaction,
};
use futures::future::{try_join_all, FutureExt};
use once_cell::sync::Lazy;
use std::{
cmp::{max, min},
ops::Sub,
sync::atomic::{AtomicBool, AtomicU64, Ordering},
};
use tokio::{task::JoinHandle, time};
const MAX_TXN_BATCH_SIZE: usize = 100; // Max transactions per account in mempool
// Please make 'MAX_CHILD_VASP_NUM' consistency with 'MAX_CHILD_ACCOUNTS' constant under VASP.move
const MAX_CHILD_VASP_NUM: usize = 65536;
const MAX_VASP_ACCOUNT_NUM: usize = 16;
const DD_KEY: &str = "dd.key";
#[derive(Debug)]
pub enum InvalidTxType {
/// invalid tx with wrong chain id
ChainId,
/// invalid tx with sender not on chain
Sender,
/// invalid tx with receiver not on chain
Receiver,
/// Last element of enum, please add new case above
MaxValue,
}
pub struct TxEmitter {
accounts: Vec<AccountData>,
mint_key_pair: KeyPair<Ed25519PrivateKey, Ed25519PublicKey>,
chain_id: ChainId,
vasp: bool,
}
pub struct EmitJob {
workers: Vec<Worker>,
stop: Arc<AtomicBool>,
stats: Arc<StatsAccumulator>,
}
#[derive(Default)]
struct StatsAccumulator {
submitted: AtomicU64,
committed: AtomicU64,
expired: AtomicU64,
latency: AtomicU64,
latencies: Arc<AtomicHistogramAccumulator>,
}
#[derive(Debug, Default)]
pub struct TxStats {
pub submitted: u64,
pub committed: u64,
pub expired: u64,
pub latency: u64,
pub latency_buckets: AtomicHistogramSnapshot,
}
#[derive(Debug, Default)]
pub struct TxStatsRate {
pub submitted: u64,
pub committed: u64,
pub expired: u64,
pub latency: u64,
pub p99_latency: u64,
}
#[derive(Clone)]
pub struct EmitThreadParams {
pub wait_millis: u64,
pub wait_committed: bool,
}
impl Default for EmitThreadParams {
fn default() -> Self {
Self {
wait_millis: 0,
wait_committed: true,
}
}
}
#[derive(Clone)]
pub struct EmitJobRequest {
pub instances: Vec<Instance>,
pub accounts_per_client: usize,
pub workers_per_ac: Option<usize>,
pub thread_params: EmitThreadParams,
pub gas_price: u64,
pub invalid_tx: u64,
}
pub static REUSE_ACC: Lazy<bool> = Lazy::new(|| env::var("REUSE_ACC").is_ok());
impl EmitJobRequest {
pub fn for_instances(
instances: Vec<Instance>,
global_emit_job_request: &Option<EmitJobRequest>,
gas_price: u64,
invalid_tx: u64,
) -> Self {
let mut req = match global_emit_job_request {
Some(global_emit_job_request) => EmitJobRequest {
instances,
accounts_per_client: global_emit_job_request.accounts_per_client,
workers_per_ac: global_emit_job_request.workers_per_ac,
thread_params: global_emit_job_request.thread_params.clone(),
gas_price,
invalid_tx,
},
None => Self {
instances,
accounts_per_client: 15,
workers_per_ac: None,
thread_params: EmitThreadParams::default(),
gas_price,
invalid_tx,
},
};
if invalid_tx != 0 {
req.thread_params.wait_committed = false;
}
req
}
pub fn fixed_tps_params(instance_count: usize, tps: u64) -> (usize, u64) {
if tps < 1 {
panic!("Target tps {} can not less than 1", tps)
}
let num_workers = tps as usize / instance_count + 1;
let wait_time = (instance_count * num_workers * 1000_usize / tps as usize) as u64;
(num_workers, wait_time)
}
pub fn fixed_tps(instances: Vec<Instance>, tps: u64, gas_price: u64, invalid_tx: u64) -> Self {
let (num_workers, wait_time) = EmitJobRequest::fixed_tps_params(instances.len(), tps);
Self {
instances,
accounts_per_client: 1,
workers_per_ac: Some(num_workers),
thread_params: EmitThreadParams {
wait_millis: wait_time,
wait_committed: invalid_tx == 0,
},
gas_price,
invalid_tx,
}
}
}
impl TxEmitter {
pub fn new(cluster: &Cluster, vasp: bool) -> Self {
Self {
accounts: vec![],
mint_key_pair: cluster.mint_key_pair().clone(),
chain_id: cluster.chain_id,
vasp,
}
}
pub fn take_account(&mut self) -> AccountData {
self.accounts.remove(0)
}
pub fn clear(&mut self) {
self.accounts.clear();
}
fn pick_mint_instance<'a, 'b>(&'a self, instances: &'b [Instance]) -> &'b Instance {
let mut rng = ThreadRng::default();
instances
.choose(&mut rng)
.expect("Instances can not be empty")
}
fn pick_mint_client(&self, instances: &[Instance]) -> JsonRpcClient {
self.pick_mint_instance(instances).json_rpc_client()
}
pub async fn submit_single_transaction(
&self,
instance: &Instance,
sender: &mut AccountData,
receiver: &AccountAddress,
num_coins: u64,
) -> Result<Instant> {
let client = instance.json_rpc_client();
client
.submit(&gen_transfer_txn_request(
sender,
receiver,
num_coins,
self.chain_id,
0,
))
.await?;
let deadline = Instant::now() + TXN_MAX_WAIT;
Ok(deadline)
}
pub async fn start_job(&mut self, req: EmitJobRequest) -> Result<EmitJob> {
let workers_per_ac = match req.workers_per_ac {
Some(x) => x,
None => {
let target_threads = 300;
// Trying to create somewhere between target_threads/2..target_threads threads
// We want to have equal numbers of threads for each AC, so that they are equally loaded
// Otherwise things like flamegrap/perf going to show different numbers depending on which AC is chosen
// Also limiting number of threads as max 10 per AC for use cases with very small number of nodes or use --peers
min(10, max(1, target_threads / req.instances.len()))
}
};
let num_clients = req.instances.len() * workers_per_ac;
info!(
"Will use {} workers per AC with total {} AC clients",
workers_per_ac, num_clients
);
let num_accounts = req.accounts_per_client * num_clients;
if self.vasp {
assert!(
num_accounts <= MAX_VASP_ACCOUNT_NUM * MAX_CHILD_VASP_NUM,
"VASP only supports to create max {} child accounts, but try to create {} accounts",
MAX_VASP_ACCOUNT_NUM * MAX_CHILD_VASP_NUM,
num_accounts
);
}
info!(
"Will create {} accounts_per_client with total {} accounts",
req.accounts_per_client, num_accounts
);
self.mint_accounts(&req, num_accounts).await?;
let all_accounts = self.accounts.split_off(self.accounts.len() - num_accounts);
let mut workers = vec![];
let all_addresses: Vec<_> = all_accounts.iter().map(|d| d.address).collect();
let all_addresses = Arc::new(all_addresses);
let mut all_accounts = all_accounts.into_iter();
let stop = Arc::new(AtomicBool::new(false));
let stats = Arc::new(StatsAccumulator::default());
let tokio_handle = Handle::current();
for instance in &req.instances {
for _ in 0..workers_per_ac {
let client = instance.json_rpc_client();
let accounts = (&mut all_accounts).take(req.accounts_per_client).collect();
let all_addresses = all_addresses.clone();
let stop = stop.clone();
let params = req.thread_params.clone();
let stats = Arc::clone(&stats);
let worker = SubmissionWorker {
accounts,
client,
all_addresses,
stop,
params,
stats,
chain_id: self.chain_id,
invalid_tx: req.invalid_tx,
};
let join_handle = tokio_handle.spawn(worker.run(req.gas_price).boxed());
workers.push(Worker { join_handle });
}
}
info!("Tx emitter workers started");
Ok(EmitJob {
workers,
stop,
stats,
})
}
async fn load_account_with_mint_key(
&self,
client: &JsonRpcClient,
address: AccountAddress,
) -> Result<AccountData> {
let sequence_number = query_sequence_numbers(&client, &[address])
.await
.map_err(|e| {
format_err!(
"query_sequence_numbers on {:?} for account {} failed: {}",
client,
address,
e
)
})?[0];
Ok(AccountData {
address,
key_pair: self.mint_key_pair.clone(),
sequence_number,
})
}
pub async fn load_diem_root_account(&self, client: &JsonRpcClient) -> Result<AccountData> {
self.load_account_with_mint_key(client, diem_root_address())
.await
}
pub async fn load_faucet_account(&self, client: &JsonRpcClient) -> Result<AccountData> {
self.load_account_with_mint_key(client, testnet_dd_account_address())
.await
}
pub async fn load_tc_account(&self, client: &JsonRpcClient) -> Result<AccountData> {
self.load_account_with_mint_key(client, treasury_compliance_account_address())
.await
}
pub async fn load_dd_account(&self, client: &JsonRpcClient) -> Result<AccountData> {
let mint_key: Ed25519PrivateKey = generate_key::load_key(DD_KEY);
let mint_key_pair: KeyPair<Ed25519PrivateKey, Ed25519PublicKey> = KeyPair::from(mint_key);
let address = diem_types::account_address::from_public_key(&mint_key_pair.public_key);
let sequence_number = query_sequence_numbers(&client, &[address])
.await
.map_err(|e| {
format_err!(
"query_sequence_numbers on {:?} for dd account failed: {}",
client,
e
)
})?[0];
Ok(AccountData {
address,
key_pair: mint_key_pair.clone(),
sequence_number,
})
}
pub async fn load_vasp_account(
&self,
client: &JsonRpcClient,
index: usize,
) -> Result<AccountData> {
let file = "vasp".to_owned() + index.to_string().as_str() + ".key";
let mint_key: Ed25519PrivateKey = generate_key::load_key(file);
let mint_key_pair: KeyPair<Ed25519PrivateKey, Ed25519PublicKey> = KeyPair::from(mint_key);
let address = diem_types::account_address::from_public_key(&mint_key_pair.public_key);
let sequence_number = query_sequence_numbers(&client, &[address])
.await
.map_err(|e| {
format_err!(
"query_sequence_numbers on {:?} for dd account failed: {}",
client,
e
)
})?[0];
Ok(AccountData {
address,
key_pair: mint_key_pair.clone(),
sequence_number,
})
}
pub async fn get_money_source(
&self,
instances: &[Instance],
coins_total: u64,
) -> Result<AccountData> {
let client = self.pick_mint_instance(instances).json_rpc_client();
let faucet_account = if !self.vasp {
info!("Creating and minting faucet account");
let mut account = self.load_faucet_account(&client).await?;
let mint_txn = gen_mint_request(&mut account, coins_total, self.chain_id);
execute_and_wait_transactions(
&mut self.pick_mint_client(instances),
&mut account,
vec![mint_txn],
)
.await
.map_err(|e| format_err!("Failed to mint into faucet account: {}", e))?;
account
} else {
info!("Loading faucet account from DD account");
self.load_dd_account(&client).await?
};
let balance = retrieve_account_balance(&client, faucet_account.address).await?;
for b in balance {
if b.currency.eq(XUS_NAME) {
info!(
"DD account current balances are {}, requested {} coins",
b.amount, coins_total
);
break;
}
}
Ok(faucet_account)
}
pub async fn get_seed_accounts(
&self,
instances: &[Instance],
seed_account_num: usize,
) -> Result<Vec<AccountData>> {
let client = self.pick_mint_instance(instances).json_rpc_client();
let seed_accounts = if !self.vasp {
info!("Creating and minting seeds accounts");
let mut account = self.load_tc_account(&client).await?;
let seed_accounts = create_seed_accounts(
&mut account,
seed_account_num,
100,
self.pick_mint_client(instances),
self.chain_id,
)
.await
.map_err(|e| format_err!("Failed to create seed accounts: {}", e))?;
info!("Completed creating seed accounts");
seed_accounts
} else {
let mut seed_accounts = vec![];
info!("Loading VASP account as seed accounts");
let load_account_num = min(seed_account_num, MAX_VASP_ACCOUNT_NUM);
for i in 0..load_account_num {
let account = self.load_vasp_account(&client, i).await?;
seed_accounts.push(account);
}
info!("Loaded {} VASP accounts", seed_accounts.len());
seed_accounts
};
Ok(seed_accounts)
}
pub async fn mint_accounts(
&mut self,
req: &EmitJobRequest,
requested_accounts: usize,
) -> Result<()> {
if self.accounts.len() >= requested_accounts {
info!("Not minting accounts");
return Ok(()); // Early return to skip printing 'Minting ...' logs
}
let expected_num_seed_accounts =
if requested_accounts / req.instances.len() > MAX_CHILD_VASP_NUM {
requested_accounts / MAX_CHILD_VASP_NUM + 1
} else {
req.instances.len()
};
let num_accounts = requested_accounts - self.accounts.len(); // Only minting extra accounts
let coins_per_account = (SEND_AMOUNT + req.gas_price) * MAX_TXNS;
let coins_total = coins_per_account * num_accounts as u64;
let mut faucet_account = self.get_money_source(&req.instances, coins_total).await?;
// Create seed accounts with which we can create actual accounts concurrently
let seed_accounts = self
.get_seed_accounts(&req.instances, expected_num_seed_accounts)
.await?;
let actual_num_seed_accounts = seed_accounts.len();
let num_new_child_accounts =
(num_accounts + actual_num_seed_accounts - 1) / actual_num_seed_accounts;
let coins_per_seed_account = coins_per_account * num_new_child_accounts as u64;
mint_to_new_accounts(
&mut faucet_account,
&seed_accounts,
coins_per_seed_account as u64,
100,
self.pick_mint_client(&req.instances),
self.chain_id,
)
.await
.map_err(|e| format_err!("Failed to mint seed_accounts: {}", e))?;
info!("Completed minting seed accounts");
info!("Minting additional {} accounts", num_accounts);
let seed_rngs = gen_rng_for_reusable_account(actual_num_seed_accounts);
// For each seed account, create a future and transfer diem from that seed account to new accounts
let account_futures = seed_accounts
.into_iter()
.enumerate()
.map(|(i, seed_account)| {
// Spawn new threads
let index = i % req.instances.len();
let instance = req.instances[index].clone();
let client = instance.json_rpc_client();
create_new_accounts(
seed_account,
num_new_child_accounts,
coins_per_account,
20,
client,
self.chain_id,
self.vasp || *REUSE_ACC,
seed_rngs[i].clone(),
)
});
let mut minted_accounts = try_join_all(account_futures)
.await
.map_err(|e| format_err!("Failed to mint accounts {}", e))?
.into_iter()
.flatten()
.collect();
self.accounts.append(&mut minted_accounts);
assert!(
self.accounts.len() >= num_accounts,
"Something wrong in mint_account, wanted to mint {}, only have {}",
requested_accounts,
self.accounts.len()
);
info!("Mint is done");
Ok(())
}
pub fn peek_job_stats(&self, job: &EmitJob) -> TxStats {
job.stats.accumulate()
}
pub async fn stop_job(&mut self, job: EmitJob) -> TxStats {
job.stop.store(true, Ordering::Relaxed);
for worker in job.workers {
let mut accounts = worker
.join_handle
.await
.expect("TxEmitter worker thread failed");
self.accounts.append(&mut accounts);
}
job.stats.accumulate()
}
pub async fn periodic_stat(&mut self, job: &EmitJob, duration: Duration, interval_secs: u64) {
let deadline = Instant::now() + duration;
let mut prev_stats: Option<TxStats> = None;
while Instant::now() < deadline {
let window = Duration::from_secs(interval_secs);
tokio::time::sleep(window).await;
let stats = self.peek_job_stats(job);
let delta = &stats - &prev_stats.unwrap_or_default();
prev_stats = Some(stats);
info!("{}", delta.rate(window));
}
}
pub async fn emit_txn_for(
&mut self,
duration: Duration,
emit_job_request: EmitJobRequest,
) -> Result<TxStats> {
let job = self.start_job(emit_job_request).await?;
tokio::time::sleep(duration).await;
let stats = self.stop_job(job).await;
Ok(stats)
}
pub async fn emit_txn_for_with_stats(
&mut self,
duration: Duration,
emit_job_request: EmitJobRequest,
interval_secs: u64,
) -> Result<TxStats> {
let job = self.start_job(emit_job_request).await?;
self.periodic_stat(&job, duration, interval_secs).await;
let stats = self.stop_job(job).await;
Ok(stats)
}
pub async fn query_sequence_numbers(
&self,
instance: &Instance,
address: &AccountAddress,
) -> Result<u64> {
let client = instance.json_rpc_client();
let resp = client
.get_account(*address)
.await
.map_err(|e| format_err!("[{:?}] get_accounts failed: {:?} ", client, e))?
.into_inner();
Ok(resp
.as_ref()
.ok_or_else(|| format_err!("account does not exist"))?
.sequence_number)
}
}
struct Worker {
join_handle: JoinHandle<Vec<AccountData>>,
}
struct SubmissionWorker {
accounts: Vec<AccountData>,
client: JsonRpcClient,
all_addresses: Arc<Vec<AccountAddress>>,
stop: Arc<AtomicBool>,
params: EmitThreadParams,
stats: Arc<StatsAccumulator>,
chain_id: ChainId,
invalid_tx: u64,
}
fn get_invalid_type() -> InvalidTxType {
let mut rng = rand::thread_rng();
match rng.gen_range(0, InvalidTxType::MaxValue as usize) {
1 => InvalidTxType::Receiver,
2 => InvalidTxType::Sender,
_ => InvalidTxType::ChainId,
}
}
fn invalid_tx(
sender: &mut AccountData,
receiver: &AccountAddress,
chain_id: ChainId,
gas_price: u64,
) -> SignedTransaction {
let seed: [u8; 32] = OsRng.gen();
let mut rng = StdRng::from_seed(seed);
let mut invalid_account = gen_random_account(&mut rng);
let invalid_address = gen_random_account(&mut rng).address;
match get_invalid_type() {
InvalidTxType::Receiver => {
gen_transfer_txn_request(sender, &invalid_address, SEND_AMOUNT, chain_id, gas_price)
}
InvalidTxType::Sender => gen_transfer_txn_request(
&mut invalid_account,
receiver,
SEND_AMOUNT,
chain_id,
gas_price,
),
InvalidTxType::ChainId => {
gen_transfer_txn_request(sender, receiver, SEND_AMOUNT, ChainId::new(255), gas_price)
}
_ => panic!("wrong invalid type"),
}
}
impl SubmissionWorker {
#[allow(clippy::collapsible_if)]
async fn run(mut self, gas_price: u64) -> Vec<AccountData> {
let wait = Duration::from_millis(self.params.wait_millis);
while !self.stop.load(Ordering::Relaxed) {
let requests = self.gen_requests(gas_price);
let num_requests = requests.len();
let start_time = Instant::now();
let wait_util = start_time + wait;
let mut tx_offset_time = 0u64;
for request in requests {
let cur_time = Instant::now();
tx_offset_time += (cur_time - start_time).as_millis() as u64;
self.stats.submitted.fetch_add(1, Ordering::Relaxed);
let resp = self.client.submit(&request).await;
if let Err(e) = resp {
warn!("[{:?}] Failed to submit request: {:?}", self.client, e);
}
}
if self.params.wait_committed {
if let Err(uncommitted) =
wait_for_accounts_sequence(&self.client, &mut self.accounts).await
{
let end_time = (Instant::now() - start_time).as_millis() as u64;
let num_committed = (num_requests - uncommitted.len()) as u64;
let latency = end_time - tx_offset_time / num_requests as u64;
self.stats
.committed
.fetch_add(num_committed, Ordering::Relaxed);
self.stats
.expired
.fetch_add(uncommitted.len() as u64, Ordering::Relaxed);
self.stats.latency.fetch_add(
// To avoid negative result caused by uncommitted tx occur
// Simplified from:
// end_time * num_committed - (tx_offset_time/num_requests) * num_committed
// to
// (end_time - tx_offset_time / num_requests) * num_committed
latency * num_committed as u64,
Ordering::Relaxed,
);
self.stats
.latencies
.record_data_point(latency, num_committed);
info!(
"[{:?}] Transactions were not committed before expiration: {:?}",
self.client, uncommitted
);
} else {
let end_time = (Instant::now() - start_time).as_millis() as u64;
let latency = end_time - tx_offset_time / num_requests as u64;
self.stats
.committed
.fetch_add(num_requests as u64, Ordering::Relaxed);
self.stats
.latency
.fetch_add(latency * num_requests as u64, Ordering::Relaxed);
self.stats
.latencies
.record_data_point(latency, num_requests as u64);
}
}
let now = Instant::now();
if wait_util > now {
time::sleep(wait_util - now).await;
}
}
self.accounts
}
fn gen_requests(&mut self, gas_price: u64) -> Vec<SignedTransaction> {
let mut rng = ThreadRng::default();
let batch_size = max(MAX_TXN_BATCH_SIZE, self.accounts.len());
let accounts = self
.accounts
.iter_mut()
.choose_multiple(&mut rng, batch_size);
let mut requests = Vec::with_capacity(accounts.len());
let mut invalid_size = if self.invalid_tx != 0 {
// if enable mix invalid tx, at least 1 invalid tx per batch
max(1, accounts.len() * self.invalid_tx as usize / 100)
} else {
0
};
for sender in accounts {
let receiver = self
.all_addresses
.choose(&mut rng)
.expect("all_addresses can't be empty");
if invalid_size > 0 {
let request = invalid_tx(sender, receiver, self.chain_id, gas_price);
requests.push(request);
invalid_size -= 1;
} else {
let request = gen_transfer_txn_request(
sender,
receiver,
SEND_AMOUNT,
self.chain_id,
gas_price,
);
requests.push(request);
}
}
requests
}
}
async fn wait_for_accounts_sequence(
client: &JsonRpcClient,
accounts: &mut [AccountData],
) -> Result<(), Vec<(AccountAddress, u64)>> {
let deadline = Instant::now() + TXN_MAX_WAIT;
let addresses: Vec<_> = accounts.iter().map(|d| d.address).collect();
loop {
match query_sequence_numbers(client, &addresses).await {
Err(e) => {
info!(
"Failed to query ledger info on accounts {:?} for instance {:?} : {:?}",
addresses, client, e
);
time::sleep(Duration::from_millis(300)).await;
}
Ok(sequence_numbers) => {
if is_sequence_equal(accounts, &sequence_numbers) {
break;
}
let mut uncommitted = vec![];
if Instant::now() > deadline {
for (account, sequence_number) in zip(accounts, &sequence_numbers) {
if account.sequence_number != *sequence_number {
warn!("Wait deadline exceeded for account {}, expected sequence {}, got from server: {}", account.address, account.sequence_number, sequence_number);
uncommitted.push((account.address, *sequence_number));
account.sequence_number = *sequence_number;
}
}
return Err(uncommitted);
}
}
}
time::sleep(Duration::from_millis(100)).await;
}
Ok(())
}
fn is_sequence_equal(accounts: &[AccountData], sequence_numbers: &[u64]) -> bool {
for (account, sequence_number) in zip(accounts, sequence_numbers) {
if *sequence_number != account.sequence_number {
return false;
}
}
true
}
async fn query_sequence_numbers(
client: &JsonRpcClient,
addresses: &[AccountAddress],
) -> Result<Vec<u64>> {
let mut result = vec![];
for addresses_batch in addresses.chunks(20) {
let resp = client
.batch(
addresses_batch
.iter()
.map(|a| MethodRequest::get_account(*a))
.collect(),
)
.await?
.into_iter()
.map(|r| r.map_err(anyhow::Error::new))
.map(|r| r.map(|response| response.into_inner().unwrap_get_account()))
.collect::<Result<Vec<_>>>()
.map_err(|e| format_err!("[{:?}] get_accounts failed: {:?} ", client, e))?;
for item in resp.into_iter() {
result.push(
item.ok_or_else(|| format_err!("account does not exist"))?
.sequence_number,
);
}
}
Ok(result)
}
const MAX_GAS_AMOUNT: u64 = 1_000_000;
const GAS_CURRENCY_CODE: &str = XUS_NAME;
const TXN_EXPIRATION_SECONDS: i64 = 50;
const TXN_MAX_WAIT: Duration = Duration::from_secs(TXN_EXPIRATION_SECONDS as u64 + 30);
const MAX_TXNS: u64 = 1_000_000;
const SEND_AMOUNT: u64 = 1;
async fn retrieve_account_balance(
client: &JsonRpcClient,
address: AccountAddress,
) -> Result<Vec<AmountView>> {
let resp = client
.get_account(address)
.await
.map_err(|e| format_err!("[{:?}] get_accounts failed: {:?} ", client, e))?
.into_inner();
Ok(resp
.ok_or_else(|| format_err!("account does not exist"))?
.balances)
}
pub fn gen_submit_transaction_request(
script: Script,
sender_account: &mut AccountData,
chain_id: ChainId,
gas_price: u64,
) -> SignedTransaction {
let transaction = create_user_txn(
&sender_account.key_pair,
TransactionPayload::Script(script),
sender_account.address,
sender_account.sequence_number,
MAX_GAS_AMOUNT,
gas_price,
GAS_CURRENCY_CODE.to_owned(),
TXN_EXPIRATION_SECONDS,
chain_id,
)
.expect("Failed to create signed transaction");
sender_account.sequence_number += 1;
transaction
}
fn gen_mint_request(
faucet_account: &mut AccountData,
num_coins: u64,
chain_id: ChainId,
) -> SignedTransaction {
let receiver = faucet_account.address;
gen_submit_transaction_request(
transaction_builder::encode_peer_to_peer_with_metadata_script(
account_config::xus_tag(),
receiver,
num_coins,
vec![],
vec![],
),
faucet_account,
chain_id,
0,
)
}
pub fn gen_transfer_txn_request(
sender: &mut AccountData,
receiver: &AccountAddress,
num_coins: u64,
chain_id: ChainId,
gas_price: u64,
) -> SignedTransaction {
gen_submit_transaction_request(
transaction_builder::encode_peer_to_peer_with_metadata_script(
account_config::xus_tag(),
*receiver,
num_coins,
vec![],
vec![],
),
sender,
chain_id,
gas_price,
)
}
fn gen_create_child_txn_request(
sender: &mut AccountData,
receiver: &AccountAddress,
receiver_auth_key_prefix: Vec<u8>,
num_coins: u64,
chain_id: ChainId,
) -> SignedTransaction {
let add_all_currencies = false;
gen_submit_transaction_request(
transaction_builder::encode_create_child_vasp_account_script(
account_config::xus_tag(),
*receiver,
receiver_auth_key_prefix,
add_all_currencies,
num_coins,
),
sender,
chain_id,
0,
)
}
fn gen_create_account_txn_request(
sender: &mut AccountData,
receiver: &AccountAddress,
auth_key_prefix: Vec<u8>,
chain_id: ChainId,
) -> SignedTransaction {
gen_submit_transaction_request(
transaction_builder::encode_create_parent_vasp_account_script(
account_config::xus_tag(),
0,
*receiver,
auth_key_prefix,
vec![],
false,
),
sender,
chain_id,
0,
)
}
fn gen_mint_txn_request(
sender: &mut AccountData,
receiver: &AccountAddress,
num_coins: u64,
chain_id: ChainId,
) -> SignedTransaction {
gen_submit_transaction_request(
transaction_builder::encode_peer_to_peer_with_metadata_script(
account_config::xus_tag(),
*receiver,
num_coins,
vec![],
vec![],
),
sender,
chain_id,
0,
)
}
fn gen_random_account(rng: &mut StdRng) -> AccountData {
let key_pair = KeyPair::generate(rng);
AccountData {
address: diem_types::account_address::from_public_key(&key_pair.public_key),
key_pair,
sequence_number: 0,
}
}
fn gen_random_accounts(num_accounts: usize) -> Vec<AccountData> {
let seed: [u8; 32] = OsRng.gen();
let mut rng = StdRng::from_seed(seed);
(0..num_accounts)
.map(|_| gen_random_account(&mut rng))
.collect()
}
fn gen_rng_for_reusable_account(count: usize) -> Vec<StdRng> {
// use same seed for reuse account creation and reuse
let mut seed = [
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0,
];
let mut rngs = vec![];
for i in 0..count {
seed[31] = i as u8;
rngs.push(StdRng::from_seed(seed));
}
rngs
}
async fn gen_reusable_account(client: &JsonRpcClient, rng: &mut StdRng) -> Result<AccountData> {
let mint_key_pair = KeyPair::generate(rng);
let address = diem_types::account_address::from_public_key(&mint_key_pair.public_key);
let sequence_number = match query_sequence_numbers(&client, &[address]).await {
Ok(v) => v[0],
Err(_) => 0,
};
Ok(AccountData {
address,
key_pair: mint_key_pair.clone(),
sequence_number,
})
}
async fn gen_reusable_accounts(
client: &JsonRpcClient,
num_accounts: usize,
rng: &mut StdRng,
) -> Result<Vec<AccountData>> {
let mut vasp_accounts = vec![];
let mut i = 0;
while i < num_accounts {
vasp_accounts.push(gen_reusable_account(client, rng).await?);
i += 1;
}
Ok(vasp_accounts)
}
fn gen_create_child_txn_requests(
source_account: &mut AccountData,
accounts: &[AccountData],
amount: u64,
chain_id: ChainId,
) -> Vec<SignedTransaction> {
accounts
.iter()
.map(|account| {
gen_create_child_txn_request(
source_account,
&account.address,
account.auth_key_prefix(),
amount,
chain_id,
)
})
.collect()
}
fn gen_account_creation_txn_requests(
sending_account: &mut AccountData,
accounts: &[AccountData],
chain_id: ChainId,
) -> Vec<SignedTransaction> {
accounts
.iter()
.map(|account| {
gen_create_account_txn_request(
sending_account,
&account.address,
account.auth_key_prefix(),
chain_id,
)
})
.collect()
}
fn gen_mint_txn_requests(
sending_account: &mut AccountData,
accounts: &[AccountData],
amount: u64,
chain_id: ChainId,
) -> Vec<SignedTransaction> {
accounts
.iter()
.map(|account| gen_mint_txn_request(sending_account, &account.address, amount, chain_id))
.collect()
}
pub async fn execute_and_wait_transactions(
client: &mut JsonRpcClient,
account: &mut AccountData,
txn: Vec<SignedTransaction>,
) -> Result<()> {
debug!(
"[{:?}] Submitting transactions {} - {} for {}",
client,
account.sequence_number - txn.len() as u64,
account.sequence_number,
account.address
);
for request in txn {
diem_retrier::retry_async(diem_retrier::fixed_retry_strategy(5_000, 20), || {
let request = request.clone();
let c = client.clone();
let client_name = format!("{:?}", client);
Box::pin(async move {
let txn_str = format!("{}::{}", request.sender(), request.sequence_number());
debug!("Submitting txn {}", txn_str);
let resp = c.submit(&request).await;
debug!("txn {} status: {:?}", txn_str, resp);
resp.map_err(|e| format_err!("[{}] Failed to submit request: {:?}", client_name, e))
})
})
.await?;
}
let r = wait_for_accounts_sequence(client, slice::from_mut(account))
.await
.map_err(|_| format_err!("Mint transactions were not committed before expiration"));
debug!(
"[{:?}] Account {} is at sequence number {} now",
client, account.address, account.sequence_number
);
r
}
/// Create `num_new_accounts` by transferring diem from `source_account`. Return Vec of created
/// accounts
async fn create_new_accounts(
mut source_account: AccountData,
num_new_accounts: usize,
diem_per_new_account: u64,
max_num_accounts_per_batch: u64,
mut client: JsonRpcClient,
chain_id: ChainId,
reuse_account: bool,
mut rng: StdRng,
) -> Result<Vec<AccountData>> {
let mut i = 0;
let mut accounts = vec![];
while i < num_new_accounts {
let batch_size = min(
max_num_accounts_per_batch as usize,
min(MAX_TXN_BATCH_SIZE, num_new_accounts - i),
);
let mut batch = if reuse_account {
info!("loading {} accounts if they exist", batch_size);
gen_reusable_accounts(&client, batch_size, &mut rng).await?
} else {
gen_random_accounts(batch_size)
};
let requests = gen_create_child_txn_requests(
&mut source_account,
&batch,
diem_per_new_account,
chain_id,
);
execute_and_wait_transactions(&mut client, &mut source_account, requests).await?;
i += batch.len();
accounts.append(&mut batch);
}
Ok(accounts)
}
/// Create `num_new_accounts`. Return Vec of created accounts
async fn create_seed_accounts(
creation_account: &mut AccountData,
num_new_accounts: usize,
max_num_accounts_per_batch: u64,
mut client: JsonRpcClient,
chain_id: ChainId,
) -> Result<Vec<AccountData>> {
let mut i = 0;
let mut accounts = vec![];
while i < num_new_accounts {
let mut batch = gen_random_accounts(min(
max_num_accounts_per_batch as usize,
min(MAX_TXN_BATCH_SIZE, num_new_accounts - i),
));
let create_requests = gen_account_creation_txn_requests(creation_account, &batch, chain_id);
execute_and_wait_transactions(&mut client, creation_account, create_requests).await?;
i += batch.len();
accounts.append(&mut batch);
}
Ok(accounts)
}
/// Mint `diem_per_new_account` from `minting_account` to each account in `accounts`.
async fn mint_to_new_accounts(
minting_account: &mut AccountData,
accounts: &[AccountData],
diem_per_new_account: u64,
max_num_accounts_per_batch: u64,
mut client: JsonRpcClient,
chain_id: ChainId,
) -> Result<()> {
let mut left = accounts;
let mut i = 0;
let num_accounts = accounts.len();
while !left.is_empty() {
let batch_size = OsRng.gen::<usize>()
% min(
max_num_accounts_per_batch as usize,
min(MAX_TXN_BATCH_SIZE, num_accounts - i),
);
let (to_batch, rest) = left.split_at(batch_size + 1);
let mint_requests =
gen_mint_txn_requests(minting_account, to_batch, diem_per_new_account, chain_id);
execute_and_wait_transactions(&mut client, minting_account, mint_requests).await?;
i += to_batch.len();
left = rest;
}
Ok(())
}
#[derive(Clone)]
pub struct AccountData {
pub address: AccountAddress,
pub key_pair: KeyPair<Ed25519PrivateKey, Ed25519PublicKey>,
pub sequence_number: u64,
}
impl AccountData {
pub fn auth_key_prefix(&self) -> Vec<u8> {
AuthenticationKey::ed25519(&self.key_pair.public_key)
.prefix()
.to_vec()
}
}
impl StatsAccumulator {
pub fn accumulate(&self) -> TxStats {
TxStats {
submitted: self.submitted.load(Ordering::Relaxed),
committed: self.committed.load(Ordering::Relaxed),
expired: self.expired.load(Ordering::Relaxed),
latency: self.latency.load(Ordering::Relaxed),
latency_buckets: self.latencies.snapshot(),
}
}
}
impl TxStats {
pub fn rate(&self, window: Duration) -> TxStatsRate {
TxStatsRate {
submitted: self.submitted / window.as_secs(),
committed: self.committed / window.as_secs(),
expired: self.expired / window.as_secs(),
latency: if self.committed == 0 {
0u64
} else {
self.latency / self.committed
},
p99_latency: self.latency_buckets.percentile(99, 100),
}
}
}
impl Sub for &TxStats {
type Output = TxStats;
fn sub(self, other: &TxStats) -> TxStats {
TxStats {
submitted: self.submitted - other.submitted,
committed: self.committed - other.committed,
expired: self.expired - other.expired,
latency: self.latency - other.latency,
latency_buckets: &self.latency_buckets - &other.latency_buckets,
}
}
}
impl fmt::Display for TxStats {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"submitted: {}, committed: {}, expired: {}",
self.submitted, self.committed, self.expired,
)
}
}
impl fmt::Display for TxStatsRate {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"submitted: {} txn/s, committed: {} txn/s, expired: {} txn/s, latency: {} ms, p99 latency: {} ms",
self.submitted, self.committed, self.expired, self.latency, self.p99_latency,
)
}
}
#[cfg(test)]
mod test {
use crate::tx_emitter::EmitJobRequest;
#[test]
pub fn test_fixed_tps_params() {
let inst_num = 30;
let target_tps = 10;
let (num_workers, wait_time) = EmitJobRequest::fixed_tps_params(inst_num, target_tps);
assert_eq!(num_workers, 1usize);
assert_eq!(wait_time, 3000u64);
let target_tps = 30;
let (num_workers, wait_time) = EmitJobRequest::fixed_tps_params(inst_num, target_tps);
assert_eq!(num_workers, 2usize);
assert_eq!(wait_time, 2000u64);
}
}
[cluster-test] new invalid tx type for duplication
Closes: #7422
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use crate::{atomic_histogram::*, cluster::Cluster, instance::Instance};
use std::{
env, fmt, slice,
sync::Arc,
time::{Duration, Instant},
};
use anyhow::{format_err, Result};
use diem_crypto::{
ed25519::{Ed25519PrivateKey, Ed25519PublicKey},
test_utils::KeyPair,
traits::Uniform,
};
use diem_logger::*;
use diem_types::{
account_address::AccountAddress,
account_config::{self, testnet_dd_account_address, XUS_NAME},
chain_id::ChainId,
transaction::{
authenticator::AuthenticationKey, helpers::create_user_txn, Script, TransactionPayload,
},
};
use itertools::zip;
use rand::{
prelude::ThreadRng,
rngs::{OsRng, StdRng},
seq::{IteratorRandom, SliceRandom},
Rng, SeedableRng,
};
use tokio::runtime::Handle;
use diem_client::{views::AmountView, Client as JsonRpcClient, MethodRequest};
use diem_types::{
account_config::{diem_root_address, treasury_compliance_account_address},
transaction::SignedTransaction,
};
use futures::future::{try_join_all, FutureExt};
use once_cell::sync::Lazy;
use std::{
cmp::{max, min},
ops::Sub,
sync::atomic::{AtomicBool, AtomicU64, Ordering},
};
use tokio::{task::JoinHandle, time};
const MAX_TXN_BATCH_SIZE: usize = 100; // Max transactions per account in mempool
// Please make 'MAX_CHILD_VASP_NUM' consistency with 'MAX_CHILD_ACCOUNTS' constant under VASP.move
const MAX_CHILD_VASP_NUM: usize = 65536;
const MAX_VASP_ACCOUNT_NUM: usize = 16;
const DD_KEY: &str = "dd.key";
#[derive(Debug)]
pub enum InvalidTxType {
/// invalid tx with wrong chain id
ChainId,
/// invalid tx with sender not on chain
Sender,
/// invalid tx with receiver not on chain
Receiver,
/// duplicate an exist tx
Duplication,
/// Last element of enum, please add new case above
MaxValue,
}
pub struct TxEmitter {
accounts: Vec<AccountData>,
mint_key_pair: KeyPair<Ed25519PrivateKey, Ed25519PublicKey>,
chain_id: ChainId,
vasp: bool,
}
pub struct EmitJob {
workers: Vec<Worker>,
stop: Arc<AtomicBool>,
stats: Arc<StatsAccumulator>,
}
#[derive(Default)]
struct StatsAccumulator {
submitted: AtomicU64,
committed: AtomicU64,
expired: AtomicU64,
latency: AtomicU64,
latencies: Arc<AtomicHistogramAccumulator>,
}
#[derive(Debug, Default)]
pub struct TxStats {
pub submitted: u64,
pub committed: u64,
pub expired: u64,
pub latency: u64,
pub latency_buckets: AtomicHistogramSnapshot,
}
#[derive(Debug, Default)]
pub struct TxStatsRate {
pub submitted: u64,
pub committed: u64,
pub expired: u64,
pub latency: u64,
pub p99_latency: u64,
}
#[derive(Clone)]
pub struct EmitThreadParams {
pub wait_millis: u64,
pub wait_committed: bool,
}
impl Default for EmitThreadParams {
fn default() -> Self {
Self {
wait_millis: 0,
wait_committed: true,
}
}
}
#[derive(Clone)]
pub struct EmitJobRequest {
pub instances: Vec<Instance>,
pub accounts_per_client: usize,
pub workers_per_ac: Option<usize>,
pub thread_params: EmitThreadParams,
pub gas_price: u64,
pub invalid_tx: u64,
}
pub static REUSE_ACC: Lazy<bool> = Lazy::new(|| env::var("REUSE_ACC").is_ok());
impl EmitJobRequest {
pub fn for_instances(
instances: Vec<Instance>,
global_emit_job_request: &Option<EmitJobRequest>,
gas_price: u64,
invalid_tx: u64,
) -> Self {
let mut req = match global_emit_job_request {
Some(global_emit_job_request) => EmitJobRequest {
instances,
accounts_per_client: global_emit_job_request.accounts_per_client,
workers_per_ac: global_emit_job_request.workers_per_ac,
thread_params: global_emit_job_request.thread_params.clone(),
gas_price,
invalid_tx,
},
None => Self {
instances,
accounts_per_client: 15,
workers_per_ac: None,
thread_params: EmitThreadParams::default(),
gas_price,
invalid_tx,
},
};
if invalid_tx != 0 {
req.thread_params.wait_committed = false;
}
req
}
pub fn fixed_tps_params(instance_count: usize, tps: u64) -> (usize, u64) {
if tps < 1 {
panic!("Target tps {} can not less than 1", tps)
}
let num_workers = tps as usize / instance_count + 1;
let wait_time = (instance_count * num_workers * 1000_usize / tps as usize) as u64;
(num_workers, wait_time)
}
pub fn fixed_tps(instances: Vec<Instance>, tps: u64, gas_price: u64, invalid_tx: u64) -> Self {
let (num_workers, wait_time) = EmitJobRequest::fixed_tps_params(instances.len(), tps);
Self {
instances,
accounts_per_client: 1,
workers_per_ac: Some(num_workers),
thread_params: EmitThreadParams {
wait_millis: wait_time,
wait_committed: invalid_tx == 0,
},
gas_price,
invalid_tx,
}
}
}
impl TxEmitter {
pub fn new(cluster: &Cluster, vasp: bool) -> Self {
Self {
accounts: vec![],
mint_key_pair: cluster.mint_key_pair().clone(),
chain_id: cluster.chain_id,
vasp,
}
}
pub fn take_account(&mut self) -> AccountData {
self.accounts.remove(0)
}
pub fn clear(&mut self) {
self.accounts.clear();
}
fn pick_mint_instance<'a, 'b>(&'a self, instances: &'b [Instance]) -> &'b Instance {
let mut rng = ThreadRng::default();
instances
.choose(&mut rng)
.expect("Instances can not be empty")
}
fn pick_mint_client(&self, instances: &[Instance]) -> JsonRpcClient {
self.pick_mint_instance(instances).json_rpc_client()
}
pub async fn submit_single_transaction(
&self,
instance: &Instance,
sender: &mut AccountData,
receiver: &AccountAddress,
num_coins: u64,
) -> Result<Instant> {
let client = instance.json_rpc_client();
client
.submit(&gen_transfer_txn_request(
sender,
receiver,
num_coins,
self.chain_id,
0,
))
.await?;
let deadline = Instant::now() + TXN_MAX_WAIT;
Ok(deadline)
}
pub async fn start_job(&mut self, req: EmitJobRequest) -> Result<EmitJob> {
let workers_per_ac = match req.workers_per_ac {
Some(x) => x,
None => {
let target_threads = 300;
// Trying to create somewhere between target_threads/2..target_threads threads
// We want to have equal numbers of threads for each AC, so that they are equally loaded
// Otherwise things like flamegrap/perf going to show different numbers depending on which AC is chosen
// Also limiting number of threads as max 10 per AC for use cases with very small number of nodes or use --peers
min(10, max(1, target_threads / req.instances.len()))
}
};
let num_clients = req.instances.len() * workers_per_ac;
info!(
"Will use {} workers per AC with total {} AC clients",
workers_per_ac, num_clients
);
let num_accounts = req.accounts_per_client * num_clients;
if self.vasp {
assert!(
num_accounts <= MAX_VASP_ACCOUNT_NUM * MAX_CHILD_VASP_NUM,
"VASP only supports to create max {} child accounts, but try to create {} accounts",
MAX_VASP_ACCOUNT_NUM * MAX_CHILD_VASP_NUM,
num_accounts
);
}
info!(
"Will create {} accounts_per_client with total {} accounts",
req.accounts_per_client, num_accounts
);
self.mint_accounts(&req, num_accounts).await?;
let all_accounts = self.accounts.split_off(self.accounts.len() - num_accounts);
let mut workers = vec![];
let all_addresses: Vec<_> = all_accounts.iter().map(|d| d.address).collect();
let all_addresses = Arc::new(all_addresses);
let mut all_accounts = all_accounts.into_iter();
let stop = Arc::new(AtomicBool::new(false));
let stats = Arc::new(StatsAccumulator::default());
let tokio_handle = Handle::current();
for instance in &req.instances {
for _ in 0..workers_per_ac {
let client = instance.json_rpc_client();
let accounts = (&mut all_accounts).take(req.accounts_per_client).collect();
let all_addresses = all_addresses.clone();
let stop = stop.clone();
let params = req.thread_params.clone();
let stats = Arc::clone(&stats);
let worker = SubmissionWorker {
accounts,
client,
all_addresses,
stop,
params,
stats,
chain_id: self.chain_id,
invalid_tx: req.invalid_tx,
};
let join_handle = tokio_handle.spawn(worker.run(req.gas_price).boxed());
workers.push(Worker { join_handle });
}
}
info!("Tx emitter workers started");
Ok(EmitJob {
workers,
stop,
stats,
})
}
async fn load_account_with_mint_key(
&self,
client: &JsonRpcClient,
address: AccountAddress,
) -> Result<AccountData> {
let sequence_number = query_sequence_numbers(&client, &[address])
.await
.map_err(|e| {
format_err!(
"query_sequence_numbers on {:?} for account {} failed: {}",
client,
address,
e
)
})?[0];
Ok(AccountData {
address,
key_pair: self.mint_key_pair.clone(),
sequence_number,
})
}
pub async fn load_diem_root_account(&self, client: &JsonRpcClient) -> Result<AccountData> {
self.load_account_with_mint_key(client, diem_root_address())
.await
}
pub async fn load_faucet_account(&self, client: &JsonRpcClient) -> Result<AccountData> {
self.load_account_with_mint_key(client, testnet_dd_account_address())
.await
}
pub async fn load_tc_account(&self, client: &JsonRpcClient) -> Result<AccountData> {
self.load_account_with_mint_key(client, treasury_compliance_account_address())
.await
}
pub async fn load_dd_account(&self, client: &JsonRpcClient) -> Result<AccountData> {
let mint_key: Ed25519PrivateKey = generate_key::load_key(DD_KEY);
let mint_key_pair: KeyPair<Ed25519PrivateKey, Ed25519PublicKey> = KeyPair::from(mint_key);
let address = diem_types::account_address::from_public_key(&mint_key_pair.public_key);
let sequence_number = query_sequence_numbers(&client, &[address])
.await
.map_err(|e| {
format_err!(
"query_sequence_numbers on {:?} for dd account failed: {}",
client,
e
)
})?[0];
Ok(AccountData {
address,
key_pair: mint_key_pair.clone(),
sequence_number,
})
}
pub async fn load_vasp_account(
&self,
client: &JsonRpcClient,
index: usize,
) -> Result<AccountData> {
let file = "vasp".to_owned() + index.to_string().as_str() + ".key";
let mint_key: Ed25519PrivateKey = generate_key::load_key(file);
let mint_key_pair: KeyPair<Ed25519PrivateKey, Ed25519PublicKey> = KeyPair::from(mint_key);
let address = diem_types::account_address::from_public_key(&mint_key_pair.public_key);
let sequence_number = query_sequence_numbers(&client, &[address])
.await
.map_err(|e| {
format_err!(
"query_sequence_numbers on {:?} for dd account failed: {}",
client,
e
)
})?[0];
Ok(AccountData {
address,
key_pair: mint_key_pair.clone(),
sequence_number,
})
}
pub async fn get_money_source(
&self,
instances: &[Instance],
coins_total: u64,
) -> Result<AccountData> {
let client = self.pick_mint_instance(instances).json_rpc_client();
let faucet_account = if !self.vasp {
info!("Creating and minting faucet account");
let mut account = self.load_faucet_account(&client).await?;
let mint_txn = gen_mint_request(&mut account, coins_total, self.chain_id);
execute_and_wait_transactions(
&mut self.pick_mint_client(instances),
&mut account,
vec![mint_txn],
)
.await
.map_err(|e| format_err!("Failed to mint into faucet account: {}", e))?;
account
} else {
info!("Loading faucet account from DD account");
self.load_dd_account(&client).await?
};
let balance = retrieve_account_balance(&client, faucet_account.address).await?;
for b in balance {
if b.currency.eq(XUS_NAME) {
info!(
"DD account current balances are {}, requested {} coins",
b.amount, coins_total
);
break;
}
}
Ok(faucet_account)
}
pub async fn get_seed_accounts(
&self,
instances: &[Instance],
seed_account_num: usize,
) -> Result<Vec<AccountData>> {
let client = self.pick_mint_instance(instances).json_rpc_client();
let seed_accounts = if !self.vasp {
info!("Creating and minting seeds accounts");
let mut account = self.load_tc_account(&client).await?;
let seed_accounts = create_seed_accounts(
&mut account,
seed_account_num,
100,
self.pick_mint_client(instances),
self.chain_id,
)
.await
.map_err(|e| format_err!("Failed to create seed accounts: {}", e))?;
info!("Completed creating seed accounts");
seed_accounts
} else {
let mut seed_accounts = vec![];
info!("Loading VASP account as seed accounts");
let load_account_num = min(seed_account_num, MAX_VASP_ACCOUNT_NUM);
for i in 0..load_account_num {
let account = self.load_vasp_account(&client, i).await?;
seed_accounts.push(account);
}
info!("Loaded {} VASP accounts", seed_accounts.len());
seed_accounts
};
Ok(seed_accounts)
}
pub async fn mint_accounts(
&mut self,
req: &EmitJobRequest,
requested_accounts: usize,
) -> Result<()> {
if self.accounts.len() >= requested_accounts {
info!("Not minting accounts");
return Ok(()); // Early return to skip printing 'Minting ...' logs
}
let expected_num_seed_accounts =
if requested_accounts / req.instances.len() > MAX_CHILD_VASP_NUM {
requested_accounts / MAX_CHILD_VASP_NUM + 1
} else {
req.instances.len()
};
let num_accounts = requested_accounts - self.accounts.len(); // Only minting extra accounts
let coins_per_account = (SEND_AMOUNT + req.gas_price) * MAX_TXNS;
let coins_total = coins_per_account * num_accounts as u64;
let mut faucet_account = self.get_money_source(&req.instances, coins_total).await?;
// Create seed accounts with which we can create actual accounts concurrently
let seed_accounts = self
.get_seed_accounts(&req.instances, expected_num_seed_accounts)
.await?;
let actual_num_seed_accounts = seed_accounts.len();
let num_new_child_accounts =
(num_accounts + actual_num_seed_accounts - 1) / actual_num_seed_accounts;
let coins_per_seed_account = coins_per_account * num_new_child_accounts as u64;
mint_to_new_accounts(
&mut faucet_account,
&seed_accounts,
coins_per_seed_account as u64,
100,
self.pick_mint_client(&req.instances),
self.chain_id,
)
.await
.map_err(|e| format_err!("Failed to mint seed_accounts: {}", e))?;
info!("Completed minting seed accounts");
info!("Minting additional {} accounts", num_accounts);
let seed_rngs = gen_rng_for_reusable_account(actual_num_seed_accounts);
// For each seed account, create a future and transfer diem from that seed account to new accounts
let account_futures = seed_accounts
.into_iter()
.enumerate()
.map(|(i, seed_account)| {
// Spawn new threads
let index = i % req.instances.len();
let instance = req.instances[index].clone();
let client = instance.json_rpc_client();
create_new_accounts(
seed_account,
num_new_child_accounts,
coins_per_account,
20,
client,
self.chain_id,
self.vasp || *REUSE_ACC,
seed_rngs[i].clone(),
)
});
let mut minted_accounts = try_join_all(account_futures)
.await
.map_err(|e| format_err!("Failed to mint accounts {}", e))?
.into_iter()
.flatten()
.collect();
self.accounts.append(&mut minted_accounts);
assert!(
self.accounts.len() >= num_accounts,
"Something wrong in mint_account, wanted to mint {}, only have {}",
requested_accounts,
self.accounts.len()
);
info!("Mint is done");
Ok(())
}
pub fn peek_job_stats(&self, job: &EmitJob) -> TxStats {
job.stats.accumulate()
}
pub async fn stop_job(&mut self, job: EmitJob) -> TxStats {
job.stop.store(true, Ordering::Relaxed);
for worker in job.workers {
let mut accounts = worker
.join_handle
.await
.expect("TxEmitter worker thread failed");
self.accounts.append(&mut accounts);
}
job.stats.accumulate()
}
pub async fn periodic_stat(&mut self, job: &EmitJob, duration: Duration, interval_secs: u64) {
let deadline = Instant::now() + duration;
let mut prev_stats: Option<TxStats> = None;
while Instant::now() < deadline {
let window = Duration::from_secs(interval_secs);
tokio::time::sleep(window).await;
let stats = self.peek_job_stats(job);
let delta = &stats - &prev_stats.unwrap_or_default();
prev_stats = Some(stats);
info!("{}", delta.rate(window));
}
}
pub async fn emit_txn_for(
&mut self,
duration: Duration,
emit_job_request: EmitJobRequest,
) -> Result<TxStats> {
let job = self.start_job(emit_job_request).await?;
tokio::time::sleep(duration).await;
let stats = self.stop_job(job).await;
Ok(stats)
}
pub async fn emit_txn_for_with_stats(
&mut self,
duration: Duration,
emit_job_request: EmitJobRequest,
interval_secs: u64,
) -> Result<TxStats> {
let job = self.start_job(emit_job_request).await?;
self.periodic_stat(&job, duration, interval_secs).await;
let stats = self.stop_job(job).await;
Ok(stats)
}
pub async fn query_sequence_numbers(
&self,
instance: &Instance,
address: &AccountAddress,
) -> Result<u64> {
let client = instance.json_rpc_client();
let resp = client
.get_account(*address)
.await
.map_err(|e| format_err!("[{:?}] get_accounts failed: {:?} ", client, e))?
.into_inner();
Ok(resp
.as_ref()
.ok_or_else(|| format_err!("account does not exist"))?
.sequence_number)
}
}
struct Worker {
join_handle: JoinHandle<Vec<AccountData>>,
}
struct SubmissionWorker {
accounts: Vec<AccountData>,
client: JsonRpcClient,
all_addresses: Arc<Vec<AccountAddress>>,
stop: Arc<AtomicBool>,
params: EmitThreadParams,
stats: Arc<StatsAccumulator>,
chain_id: ChainId,
invalid_tx: u64,
}
fn get_invalid_type() -> InvalidTxType {
let mut rng = rand::thread_rng();
match rng.gen_range(0, InvalidTxType::MaxValue as usize) {
1 => InvalidTxType::Receiver,
2 => InvalidTxType::Sender,
3 => InvalidTxType::ChainId,
_ => InvalidTxType::Duplication,
}
}
fn invalid_tx(
sender: &mut AccountData,
receiver: &AccountAddress,
chain_id: ChainId,
gas_price: u64,
reqs: &[SignedTransaction],
) -> SignedTransaction {
let seed: [u8; 32] = OsRng.gen();
let mut rng = StdRng::from_seed(seed);
let mut invalid_account = gen_random_account(&mut rng);
let invalid_address = invalid_account.address;
match get_invalid_type() {
InvalidTxType::Receiver => {
gen_transfer_txn_request(sender, &invalid_address, SEND_AMOUNT, chain_id, gas_price)
}
InvalidTxType::Sender => gen_transfer_txn_request(
&mut invalid_account,
receiver,
SEND_AMOUNT,
chain_id,
gas_price,
),
InvalidTxType::ChainId => {
gen_transfer_txn_request(sender, receiver, SEND_AMOUNT, ChainId::new(255), gas_price)
}
InvalidTxType::Duplication => {
// if this is the first tx, default to generate invalid tx with wrong chain id
// otherwise, make a duplication of an exist valid tx
if reqs.is_empty() {
gen_transfer_txn_request(
sender,
receiver,
SEND_AMOUNT,
ChainId::new(255),
gas_price,
)
} else {
let random_index = rng.gen_range(0, reqs.len() as usize);
reqs[random_index].clone()
}
}
_ => panic!("wrong invalid type"),
}
}
impl SubmissionWorker {
#[allow(clippy::collapsible_if)]
async fn run(mut self, gas_price: u64) -> Vec<AccountData> {
let wait = Duration::from_millis(self.params.wait_millis);
while !self.stop.load(Ordering::Relaxed) {
let requests = self.gen_requests(gas_price);
let num_requests = requests.len();
let start_time = Instant::now();
let wait_util = start_time + wait;
let mut tx_offset_time = 0u64;
for request in requests {
let cur_time = Instant::now();
tx_offset_time += (cur_time - start_time).as_millis() as u64;
self.stats.submitted.fetch_add(1, Ordering::Relaxed);
let resp = self.client.submit(&request).await;
if let Err(e) = resp {
warn!("[{:?}] Failed to submit request: {:?}", self.client, e);
}
}
if self.params.wait_committed {
if let Err(uncommitted) =
wait_for_accounts_sequence(&self.client, &mut self.accounts).await
{
let end_time = (Instant::now() - start_time).as_millis() as u64;
let num_committed = (num_requests - uncommitted.len()) as u64;
let latency = end_time - tx_offset_time / num_requests as u64;
self.stats
.committed
.fetch_add(num_committed, Ordering::Relaxed);
self.stats
.expired
.fetch_add(uncommitted.len() as u64, Ordering::Relaxed);
self.stats.latency.fetch_add(
// To avoid negative result caused by uncommitted tx occur
// Simplified from:
// end_time * num_committed - (tx_offset_time/num_requests) * num_committed
// to
// (end_time - tx_offset_time / num_requests) * num_committed
latency * num_committed as u64,
Ordering::Relaxed,
);
self.stats
.latencies
.record_data_point(latency, num_committed);
info!(
"[{:?}] Transactions were not committed before expiration: {:?}",
self.client, uncommitted
);
} else {
let end_time = (Instant::now() - start_time).as_millis() as u64;
let latency = end_time - tx_offset_time / num_requests as u64;
self.stats
.committed
.fetch_add(num_requests as u64, Ordering::Relaxed);
self.stats
.latency
.fetch_add(latency * num_requests as u64, Ordering::Relaxed);
self.stats
.latencies
.record_data_point(latency, num_requests as u64);
}
}
let now = Instant::now();
if wait_util > now {
time::sleep(wait_util - now).await;
}
}
self.accounts
}
fn gen_requests(&mut self, gas_price: u64) -> Vec<SignedTransaction> {
let mut rng = ThreadRng::default();
let batch_size = max(MAX_TXN_BATCH_SIZE, self.accounts.len());
let accounts = self
.accounts
.iter_mut()
.choose_multiple(&mut rng, batch_size);
let mut requests = Vec::with_capacity(accounts.len());
let invalid_size = if self.invalid_tx != 0 {
// if enable mix invalid tx, at least 1 invalid tx per batch
max(1, accounts.len() * self.invalid_tx as usize / 100)
} else {
0
};
let mut num_valid_tx = accounts.len() - invalid_size;
for sender in accounts {
let receiver = self
.all_addresses
.choose(&mut rng)
.expect("all_addresses can't be empty");
if num_valid_tx > 0 {
let request = gen_transfer_txn_request(
sender,
receiver,
SEND_AMOUNT,
self.chain_id,
gas_price,
);
requests.push(request);
num_valid_tx -= 1;
} else {
let request = invalid_tx(sender, receiver, self.chain_id, gas_price, &requests);
requests.push(request);
}
}
requests
}
}
async fn wait_for_accounts_sequence(
client: &JsonRpcClient,
accounts: &mut [AccountData],
) -> Result<(), Vec<(AccountAddress, u64)>> {
let deadline = Instant::now() + TXN_MAX_WAIT;
let addresses: Vec<_> = accounts.iter().map(|d| d.address).collect();
loop {
match query_sequence_numbers(client, &addresses).await {
Err(e) => {
info!(
"Failed to query ledger info on accounts {:?} for instance {:?} : {:?}",
addresses, client, e
);
time::sleep(Duration::from_millis(300)).await;
}
Ok(sequence_numbers) => {
if is_sequence_equal(accounts, &sequence_numbers) {
break;
}
let mut uncommitted = vec![];
if Instant::now() > deadline {
for (account, sequence_number) in zip(accounts, &sequence_numbers) {
if account.sequence_number != *sequence_number {
warn!("Wait deadline exceeded for account {}, expected sequence {}, got from server: {}", account.address, account.sequence_number, sequence_number);
uncommitted.push((account.address, *sequence_number));
account.sequence_number = *sequence_number;
}
}
return Err(uncommitted);
}
}
}
time::sleep(Duration::from_millis(100)).await;
}
Ok(())
}
fn is_sequence_equal(accounts: &[AccountData], sequence_numbers: &[u64]) -> bool {
for (account, sequence_number) in zip(accounts, sequence_numbers) {
if *sequence_number != account.sequence_number {
return false;
}
}
true
}
async fn query_sequence_numbers(
client: &JsonRpcClient,
addresses: &[AccountAddress],
) -> Result<Vec<u64>> {
let mut result = vec![];
for addresses_batch in addresses.chunks(20) {
let resp = client
.batch(
addresses_batch
.iter()
.map(|a| MethodRequest::get_account(*a))
.collect(),
)
.await?
.into_iter()
.map(|r| r.map_err(anyhow::Error::new))
.map(|r| r.map(|response| response.into_inner().unwrap_get_account()))
.collect::<Result<Vec<_>>>()
.map_err(|e| format_err!("[{:?}] get_accounts failed: {:?} ", client, e))?;
for item in resp.into_iter() {
result.push(
item.ok_or_else(|| format_err!("account does not exist"))?
.sequence_number,
);
}
}
Ok(result)
}
const MAX_GAS_AMOUNT: u64 = 1_000_000;
const GAS_CURRENCY_CODE: &str = XUS_NAME;
const TXN_EXPIRATION_SECONDS: i64 = 50;
const TXN_MAX_WAIT: Duration = Duration::from_secs(TXN_EXPIRATION_SECONDS as u64 + 30);
const MAX_TXNS: u64 = 1_000_000;
const SEND_AMOUNT: u64 = 1;
async fn retrieve_account_balance(
client: &JsonRpcClient,
address: AccountAddress,
) -> Result<Vec<AmountView>> {
let resp = client
.get_account(address)
.await
.map_err(|e| format_err!("[{:?}] get_accounts failed: {:?} ", client, e))?
.into_inner();
Ok(resp
.ok_or_else(|| format_err!("account does not exist"))?
.balances)
}
pub fn gen_submit_transaction_request(
script: Script,
sender_account: &mut AccountData,
chain_id: ChainId,
gas_price: u64,
) -> SignedTransaction {
let transaction = create_user_txn(
&sender_account.key_pair,
TransactionPayload::Script(script),
sender_account.address,
sender_account.sequence_number,
MAX_GAS_AMOUNT,
gas_price,
GAS_CURRENCY_CODE.to_owned(),
TXN_EXPIRATION_SECONDS,
chain_id,
)
.expect("Failed to create signed transaction");
sender_account.sequence_number += 1;
transaction
}
fn gen_mint_request(
faucet_account: &mut AccountData,
num_coins: u64,
chain_id: ChainId,
) -> SignedTransaction {
let receiver = faucet_account.address;
gen_submit_transaction_request(
transaction_builder::encode_peer_to_peer_with_metadata_script(
account_config::xus_tag(),
receiver,
num_coins,
vec![],
vec![],
),
faucet_account,
chain_id,
0,
)
}
pub fn gen_transfer_txn_request(
sender: &mut AccountData,
receiver: &AccountAddress,
num_coins: u64,
chain_id: ChainId,
gas_price: u64,
) -> SignedTransaction {
gen_submit_transaction_request(
transaction_builder::encode_peer_to_peer_with_metadata_script(
account_config::xus_tag(),
*receiver,
num_coins,
vec![],
vec![],
),
sender,
chain_id,
gas_price,
)
}
fn gen_create_child_txn_request(
sender: &mut AccountData,
receiver: &AccountAddress,
receiver_auth_key_prefix: Vec<u8>,
num_coins: u64,
chain_id: ChainId,
) -> SignedTransaction {
let add_all_currencies = false;
gen_submit_transaction_request(
transaction_builder::encode_create_child_vasp_account_script(
account_config::xus_tag(),
*receiver,
receiver_auth_key_prefix,
add_all_currencies,
num_coins,
),
sender,
chain_id,
0,
)
}
fn gen_create_account_txn_request(
sender: &mut AccountData,
receiver: &AccountAddress,
auth_key_prefix: Vec<u8>,
chain_id: ChainId,
) -> SignedTransaction {
gen_submit_transaction_request(
transaction_builder::encode_create_parent_vasp_account_script(
account_config::xus_tag(),
0,
*receiver,
auth_key_prefix,
vec![],
false,
),
sender,
chain_id,
0,
)
}
fn gen_mint_txn_request(
sender: &mut AccountData,
receiver: &AccountAddress,
num_coins: u64,
chain_id: ChainId,
) -> SignedTransaction {
gen_submit_transaction_request(
transaction_builder::encode_peer_to_peer_with_metadata_script(
account_config::xus_tag(),
*receiver,
num_coins,
vec![],
vec![],
),
sender,
chain_id,
0,
)
}
fn gen_random_account(rng: &mut StdRng) -> AccountData {
let key_pair = KeyPair::generate(rng);
AccountData {
address: diem_types::account_address::from_public_key(&key_pair.public_key),
key_pair,
sequence_number: 0,
}
}
fn gen_random_accounts(num_accounts: usize) -> Vec<AccountData> {
let seed: [u8; 32] = OsRng.gen();
let mut rng = StdRng::from_seed(seed);
(0..num_accounts)
.map(|_| gen_random_account(&mut rng))
.collect()
}
fn gen_rng_for_reusable_account(count: usize) -> Vec<StdRng> {
// use same seed for reuse account creation and reuse
let mut seed = [
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0,
];
let mut rngs = vec![];
for i in 0..count {
seed[31] = i as u8;
rngs.push(StdRng::from_seed(seed));
}
rngs
}
async fn gen_reusable_account(client: &JsonRpcClient, rng: &mut StdRng) -> Result<AccountData> {
let mint_key_pair = KeyPair::generate(rng);
let address = diem_types::account_address::from_public_key(&mint_key_pair.public_key);
let sequence_number = match query_sequence_numbers(&client, &[address]).await {
Ok(v) => v[0],
Err(_) => 0,
};
Ok(AccountData {
address,
key_pair: mint_key_pair.clone(),
sequence_number,
})
}
async fn gen_reusable_accounts(
client: &JsonRpcClient,
num_accounts: usize,
rng: &mut StdRng,
) -> Result<Vec<AccountData>> {
let mut vasp_accounts = vec![];
let mut i = 0;
while i < num_accounts {
vasp_accounts.push(gen_reusable_account(client, rng).await?);
i += 1;
}
Ok(vasp_accounts)
}
fn gen_create_child_txn_requests(
source_account: &mut AccountData,
accounts: &[AccountData],
amount: u64,
chain_id: ChainId,
) -> Vec<SignedTransaction> {
accounts
.iter()
.map(|account| {
gen_create_child_txn_request(
source_account,
&account.address,
account.auth_key_prefix(),
amount,
chain_id,
)
})
.collect()
}
fn gen_account_creation_txn_requests(
sending_account: &mut AccountData,
accounts: &[AccountData],
chain_id: ChainId,
) -> Vec<SignedTransaction> {
accounts
.iter()
.map(|account| {
gen_create_account_txn_request(
sending_account,
&account.address,
account.auth_key_prefix(),
chain_id,
)
})
.collect()
}
fn gen_mint_txn_requests(
sending_account: &mut AccountData,
accounts: &[AccountData],
amount: u64,
chain_id: ChainId,
) -> Vec<SignedTransaction> {
accounts
.iter()
.map(|account| gen_mint_txn_request(sending_account, &account.address, amount, chain_id))
.collect()
}
pub async fn execute_and_wait_transactions(
client: &mut JsonRpcClient,
account: &mut AccountData,
txn: Vec<SignedTransaction>,
) -> Result<()> {
debug!(
"[{:?}] Submitting transactions {} - {} for {}",
client,
account.sequence_number - txn.len() as u64,
account.sequence_number,
account.address
);
for request in txn {
diem_retrier::retry_async(diem_retrier::fixed_retry_strategy(5_000, 20), || {
let request = request.clone();
let c = client.clone();
let client_name = format!("{:?}", client);
Box::pin(async move {
let txn_str = format!("{}::{}", request.sender(), request.sequence_number());
debug!("Submitting txn {}", txn_str);
let resp = c.submit(&request).await;
debug!("txn {} status: {:?}", txn_str, resp);
resp.map_err(|e| format_err!("[{}] Failed to submit request: {:?}", client_name, e))
})
})
.await?;
}
let r = wait_for_accounts_sequence(client, slice::from_mut(account))
.await
.map_err(|_| format_err!("Mint transactions were not committed before expiration"));
debug!(
"[{:?}] Account {} is at sequence number {} now",
client, account.address, account.sequence_number
);
r
}
/// Create `num_new_accounts` by transferring diem from `source_account`. Return Vec of created
/// accounts
async fn create_new_accounts(
mut source_account: AccountData,
num_new_accounts: usize,
diem_per_new_account: u64,
max_num_accounts_per_batch: u64,
mut client: JsonRpcClient,
chain_id: ChainId,
reuse_account: bool,
mut rng: StdRng,
) -> Result<Vec<AccountData>> {
let mut i = 0;
let mut accounts = vec![];
while i < num_new_accounts {
let batch_size = min(
max_num_accounts_per_batch as usize,
min(MAX_TXN_BATCH_SIZE, num_new_accounts - i),
);
let mut batch = if reuse_account {
info!("loading {} accounts if they exist", batch_size);
gen_reusable_accounts(&client, batch_size, &mut rng).await?
} else {
gen_random_accounts(batch_size)
};
let requests = gen_create_child_txn_requests(
&mut source_account,
&batch,
diem_per_new_account,
chain_id,
);
execute_and_wait_transactions(&mut client, &mut source_account, requests).await?;
i += batch.len();
accounts.append(&mut batch);
}
Ok(accounts)
}
/// Create `num_new_accounts`. Return Vec of created accounts
async fn create_seed_accounts(
creation_account: &mut AccountData,
num_new_accounts: usize,
max_num_accounts_per_batch: u64,
mut client: JsonRpcClient,
chain_id: ChainId,
) -> Result<Vec<AccountData>> {
let mut i = 0;
let mut accounts = vec![];
while i < num_new_accounts {
let mut batch = gen_random_accounts(min(
max_num_accounts_per_batch as usize,
min(MAX_TXN_BATCH_SIZE, num_new_accounts - i),
));
let create_requests = gen_account_creation_txn_requests(creation_account, &batch, chain_id);
execute_and_wait_transactions(&mut client, creation_account, create_requests).await?;
i += batch.len();
accounts.append(&mut batch);
}
Ok(accounts)
}
/// Mint `diem_per_new_account` from `minting_account` to each account in `accounts`.
async fn mint_to_new_accounts(
minting_account: &mut AccountData,
accounts: &[AccountData],
diem_per_new_account: u64,
max_num_accounts_per_batch: u64,
mut client: JsonRpcClient,
chain_id: ChainId,
) -> Result<()> {
let mut left = accounts;
let mut i = 0;
let num_accounts = accounts.len();
while !left.is_empty() {
let batch_size = OsRng.gen::<usize>()
% min(
max_num_accounts_per_batch as usize,
min(MAX_TXN_BATCH_SIZE, num_accounts - i),
);
let (to_batch, rest) = left.split_at(batch_size + 1);
let mint_requests =
gen_mint_txn_requests(minting_account, to_batch, diem_per_new_account, chain_id);
execute_and_wait_transactions(&mut client, minting_account, mint_requests).await?;
i += to_batch.len();
left = rest;
}
Ok(())
}
#[derive(Clone)]
pub struct AccountData {
pub address: AccountAddress,
pub key_pair: KeyPair<Ed25519PrivateKey, Ed25519PublicKey>,
pub sequence_number: u64,
}
impl AccountData {
pub fn auth_key_prefix(&self) -> Vec<u8> {
AuthenticationKey::ed25519(&self.key_pair.public_key)
.prefix()
.to_vec()
}
}
impl StatsAccumulator {
pub fn accumulate(&self) -> TxStats {
TxStats {
submitted: self.submitted.load(Ordering::Relaxed),
committed: self.committed.load(Ordering::Relaxed),
expired: self.expired.load(Ordering::Relaxed),
latency: self.latency.load(Ordering::Relaxed),
latency_buckets: self.latencies.snapshot(),
}
}
}
impl TxStats {
pub fn rate(&self, window: Duration) -> TxStatsRate {
TxStatsRate {
submitted: self.submitted / window.as_secs(),
committed: self.committed / window.as_secs(),
expired: self.expired / window.as_secs(),
latency: if self.committed == 0 {
0u64
} else {
self.latency / self.committed
},
p99_latency: self.latency_buckets.percentile(99, 100),
}
}
}
impl Sub for &TxStats {
type Output = TxStats;
fn sub(self, other: &TxStats) -> TxStats {
TxStats {
submitted: self.submitted - other.submitted,
committed: self.committed - other.committed,
expired: self.expired - other.expired,
latency: self.latency - other.latency,
latency_buckets: &self.latency_buckets - &other.latency_buckets,
}
}
}
impl fmt::Display for TxStats {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"submitted: {}, committed: {}, expired: {}",
self.submitted, self.committed, self.expired,
)
}
}
impl fmt::Display for TxStatsRate {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"submitted: {} txn/s, committed: {} txn/s, expired: {} txn/s, latency: {} ms, p99 latency: {} ms",
self.submitted, self.committed, self.expired, self.latency, self.p99_latency,
)
}
}
#[cfg(test)]
mod test {
use crate::tx_emitter::EmitJobRequest;
#[test]
pub fn test_fixed_tps_params() {
let inst_num = 30;
let target_tps = 10;
let (num_workers, wait_time) = EmitJobRequest::fixed_tps_params(inst_num, target_tps);
assert_eq!(num_workers, 1usize);
assert_eq!(wait_time, 3000u64);
let target_tps = 30;
let (num_workers, wait_time) = EmitJobRequest::fixed_tps_params(inst_num, target_tps);
assert_eq!(num_workers, 2usize);
assert_eq!(wait_time, 2000u64);
}
}
|
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A logger configured via an environment variable.
//!
//! ## Example
//!
//! ```
//! #[macro_use] extern crate log;
//! extern crate env_logger;
//!
//! use log::LogLevel;
//!
//! fn main() {
//! env_logger::init().unwrap();
//!
//! debug!("this is a debug {}", "message");
//! error!("this is printed by default");
//!
//! if log_enabled!(LogLevel::Info) {
//! let x = 3 * 4; // expensive computation
//! info!("the answer was: {}", x);
//! }
//! }
//! ```
//!
//! Assumes the binary is `main`:
//!
//! ```{.bash}
//! $ RUST_LOG=error ./main
//! ERROR:main: this is printed by default
//! ```
//!
//! ```{.bash}
//! $ RUST_LOG=info ./main
//! ERROR:main: this is printed by default
//! INFO:main: the answer was: 12
//! ```
//!
//! ```{.bash}
//! $ RUST_LOG=debug ./main
//! DEBUG:main: this is a debug message
//! ERROR:main: this is printed by default
//! INFO:main: the answer was: 12
//! ```
//!
//! You can also set the log level on a per module basis:
//!
//! ```{.bash}
//! $ RUST_LOG=main=info ./main
//! ERROR:main: this is printed by default
//! INFO:main: the answer was: 12
//! ```
//!
//! And enable all logging:
//!
//! ```{.bash}
//! $ RUST_LOG=main ./main
//! DEBUG:main: this is a debug message
//! ERROR:main: this is printed by default
//! INFO:main: the answer was: 12
//! ```
//!
//! See the documentation for the log crate for more information about its API.
//!
//! ## Enabling logging
//!
//! Log levels are controlled on a per-module basis, and by default all logging
//! is disabled. Logging is controlled via the `RUST_LOG` environment variable.
//! The value of this environment variable is a comma-separated list of logging
//! directives. A logging directive is of the form:
//!
//! ```text
//! path::to::module=log_level
//! ```
//!
//! The path to the module is rooted in the name of the crate it was compiled
//! for, so if your program is contained in a file `hello.rs`, for example, to
//! turn on logging for this file you would use a value of `RUST_LOG=hello`.
//! Furthermore, this path is a prefix-search, so all modules nested in the
//! specified module will also have logging enabled.
//!
//! The actual `log_level` is optional to specify. If omitted, all logging will
//! be enabled. If specified, it must be one of the strings `debug`, `error`,
//! `info`, `warn`, or `trace`.
//!
//! As the log level for a module is optional, the module to enable logging for
//! is also optional. If only a `log_level` is provided, then the global log
//! level for all modules is set to this value.
//!
//! Some examples of valid values of `RUST_LOG` are:
//!
//! * `hello` turns on all logging for the 'hello' module
//! * `info` turns on all info logging
//! * `hello=debug` turns on debug logging for 'hello'
//! * `hello,std::option` turns on hello, and std's option logging
//! * `error,hello=warn` turn on global error logging and also warn for hello
//!
//! ## Filtering results
//!
//! A RUST_LOG directive may include a regex filter. The syntax is to append `/`
//! followed by a regex. Each message is checked against the regex, and is only
//! logged if it matches. Note that the matching is done after formatting the
//! log string but before adding any logging meta-data. There is a single filter
//! for all modules.
//!
//! Some examples:
//!
//! * `hello/foo` turns on all logging for the 'hello' module where the log
//! message includes 'foo'.
//! * `info/f.o` turns on all info logging where the log message includes 'foo',
//! 'f1o', 'fao', etc.
//! * `hello=debug/foo*foo` turns on debug logging for 'hello' where the log
//! message includes 'foofoo' or 'fofoo' or 'fooooooofoo', etc.
//! * `error,hello=warn/[0-9] scopes` turn on global error logging and also
//! warn for hello. In both cases the log message must include a single digit
//! number followed by 'scopes'.
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/env_logger/")]
#![cfg_attr(test, deny(warnings))]
extern crate regex;
extern crate log;
use regex::Regex;
use std::io::prelude::*;
use std::io::{self, Stderr};
use std::sync::Mutex;
use std::env;
use log::{Log, LogLevel, LogLevelFilter, LogRecord, SetLoggerError, LogMetadata};
struct Logger {
directives: Vec<LogDirective>,
filter: Option<Regex>,
out: Mutex<Stderr>,
}
impl Logger {
fn enabled(&self, level: LogLevel, target: &str) -> bool {
// Search for the longest match, the vector is assumed to be pre-sorted.
for directive in self.directives.iter().rev() {
match directive.name {
Some(ref name) if !target.starts_with(&**name) => {},
Some(..) | None => {
return level <= directive.level
}
}
}
false
}
}
impl Log for Logger {
fn enabled(&self, metadata: &LogMetadata) -> bool {
self.enabled(metadata.level(), metadata.target())
}
fn log(&self, record: &LogRecord) {
if !Log::enabled(self, record.metadata()) {
return;
}
if let Some(filter) = self.filter.as_ref() {
if filter.is_match(&*record.args().to_string()) {
return;
}
}
let _ = writeln!(&mut *self.out.lock().unwrap(),
"{}:{}: {}",
record.level(),
record.location().module_path(),
record.args());
}
}
struct LogDirective {
name: Option<String>,
level: LogLevelFilter,
}
/// Initializes the global logger with an env logger.
///
/// This should be called early in the execution of a Rust program, and the
/// global logger may only be initialized once. Future initialization attempts
/// will return an error.
pub fn init() -> Result<(), SetLoggerError> {
log::set_logger(|max_level| {
let (mut directives, filter) = match env::var("RUST_LOG") {
Ok(spec) => parse_logging_spec(&spec),
Err(..) => (Vec::new(), None),
};
// Sort the provided directives by length of their name, this allows a
// little more efficient lookup at runtime.
directives.sort_by(|a, b| {
let alen = a.name.as_ref().map(|a| a.len()).unwrap_or(0);
let blen = b.name.as_ref().map(|b| b.len()).unwrap_or(0);
alen.cmp(&blen)
});
let level = {
let max = directives.iter().map(|d| d.level).max();
max.unwrap_or(LogLevelFilter::Off)
};
max_level.set(level);
Box::new(Logger {
directives: directives,
filter: filter,
out: Mutex::new(io::stderr()),
})
})
}
/// Parse a logging specification string (e.g: "crate1,crate2::mod3,crate3::x=error/foo")
/// and return a vector with log directives.
fn parse_logging_spec(spec: &str) -> (Vec<LogDirective>, Option<Regex>) {
let mut dirs = Vec::new();
let mut parts = spec.split('/');
let mods = parts.next();
let filter = parts.next();
if parts.next().is_some() {
println!("warning: invalid logging spec '{}', \
ignoring it (too many '/'s)", spec);
return (dirs, None);
}
mods.map(|m| { for s in m.split(',') {
if s.len() == 0 { continue }
let mut parts = s.split('=');
let (log_level, name) = match (parts.next(), parts.next().map(|s| s.trim()), parts.next()) {
(Some(part0), None, None) => {
// if the single argument is a log-level string or number,
// treat that as a global fallback
match part0.parse() {
Ok(num) => (num, None),
Err(_) => (LogLevelFilter::max(), Some(part0)),
}
}
(Some(part0), Some(""), None) => (LogLevelFilter::max(), Some(part0)),
(Some(part0), Some(part1), None) => {
match part1.parse() {
Ok(num) => (num, Some(part0)),
_ => {
println!("warning: invalid logging spec '{}', \
ignoring it", part1);
continue
}
}
},
_ => {
println!("warning: invalid logging spec '{}', \
ignoring it", s);
continue
}
};
dirs.push(LogDirective {
name: name.map(|s| s.to_string()),
level: log_level,
});
}});
let filter = filter.map_or(None, |filter| {
match Regex::new(filter) {
Ok(re) => Some(re),
Err(e) => {
println!("warning: invalid regex filter - {}", e);
None
}
}
});
return (dirs, filter);
}
#[cfg(test)]
mod tests {
use std::io;
use std::sync::Mutex;
use log::{Log, LogLevel, LogLevelFilter};
use super::{Logger, LogDirective, parse_logging_spec};
fn make_logger(dirs: Vec<LogDirective>) -> Logger {
Logger {
directives: dirs,
filter: None,
out: Mutex::new(io::stderr())
}
}
#[test]
fn match_full_path() {
let logger = make_logger(vec![
LogDirective {
name: Some("crate2".to_string()),
level: LogLevelFilter::Info
},
LogDirective {
name: Some("crate1::mod1".to_string()),
level: LogLevelFilter::Warn
}
]);
assert!(logger.enabled(LogLevel::Warn, "crate1::mod1"));
assert!(!logger.enabled(LogLevel::Info, "crate1::mod1"));
assert!(logger.enabled(LogLevel::Info, "crate2"));
assert!(!logger.enabled(LogLevel::Debug, "crate2"));
}
#[test]
fn no_match() {
let logger = make_logger(vec![
LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info },
LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn }
]);
assert!(!logger.enabled(LogLevel::Warn, "crate3"));
}
#[test]
fn match_beginning() {
let logger = make_logger(vec![
LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info },
LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn }
]);
assert!(logger.enabled(LogLevel::Info, "crate2::mod1"));
}
#[test]
fn match_beginning_longest_match() {
let logger = make_logger(vec![
LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info },
LogDirective { name: Some("crate2::mod".to_string()), level: LogLevelFilter::Debug },
LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn }
]);
assert!(logger.enabled(LogLevel::Debug, "crate2::mod1"));
assert!(!logger.enabled(LogLevel::Debug, "crate2"));
}
#[test]
fn match_default() {
let logger = make_logger(vec![
LogDirective { name: None, level: LogLevelFilter::Info },
LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn }
]);
assert!(logger.enabled(LogLevel::Warn, "crate1::mod1"));
assert!(logger.enabled(LogLevel::Info, "crate2::mod2"));
}
#[test]
fn zero_level() {
let logger = make_logger(vec![
LogDirective { name: None, level: LogLevelFilter::Info },
LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Off }
]);
assert!(!logger.enabled(LogLevel::Error, "crate1::mod1"));
assert!(logger.enabled(LogLevel::Info, "crate2::mod2"));
}
#[test]
fn parse_logging_spec_valid() {
let (dirs, filter) = parse_logging_spec("crate1::mod1=error,crate1::mod2,crate2=debug");
assert_eq!(dirs.len(), 3);
assert_eq!(dirs[0].name, Some("crate1::mod1".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::Error);
assert_eq!(dirs[1].name, Some("crate1::mod2".to_string()));
assert_eq!(dirs[1].level, LogLevelFilter::max());
assert_eq!(dirs[2].name, Some("crate2".to_string()));
assert_eq!(dirs[2].level, LogLevelFilter::Debug);
assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_invalid_crate() {
// test parse_logging_spec with multiple = in specification
let (dirs, filter) = parse_logging_spec("crate1::mod1=warn=info,crate2=debug");
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate2".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::Debug);
assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_invalid_log_level() {
// test parse_logging_spec with 'noNumber' as log level
let (dirs, filter) = parse_logging_spec("crate1::mod1=noNumber,crate2=debug");
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate2".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::Debug);
assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_string_log_level() {
// test parse_logging_spec with 'warn' as log level
let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2=warn");
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate2".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::Warn);
assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_empty_log_level() {
// test parse_logging_spec with '' as log level
let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2=");
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate2".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::max());
assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_global() {
// test parse_logging_spec with no crate
let (dirs, filter) = parse_logging_spec("warn,crate2=debug");
assert_eq!(dirs.len(), 2);
assert_eq!(dirs[0].name, None);
assert_eq!(dirs[0].level, LogLevelFilter::Warn);
assert_eq!(dirs[1].name, Some("crate2".to_string()));
assert_eq!(dirs[1].level, LogLevelFilter::Debug);
assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_valid_filter() {
let (dirs, filter) = parse_logging_spec("crate1::mod1=error,crate1::mod2,crate2=debug/abc");
assert_eq!(dirs.len(), 3);
assert_eq!(dirs[0].name, Some("crate1::mod1".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::Error);
assert_eq!(dirs[1].name, Some("crate1::mod2".to_string()));
assert_eq!(dirs[1].level, LogLevelFilter::max());
assert_eq!(dirs[2].name, Some("crate2".to_string()));
assert_eq!(dirs[2].level, LogLevelFilter::Debug);
assert!(filter.is_some() && filter.unwrap().to_string() == "abc");
}
#[test]
fn parse_logging_spec_invalid_crate_filter() {
let (dirs, filter) = parse_logging_spec("crate1::mod1=error=warn,crate2=debug/a.c");
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate2".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::Debug);
assert!(filter.is_some() && filter.unwrap().to_string() == "a.c");
}
#[test]
fn parse_logging_spec_empty_with_filter() {
let (dirs, filter) = parse_logging_spec("crate1/a*c");
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate1".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::max());
assert!(filter.is_some() && filter.unwrap().to_string() == "a*c");
}
}
Don't add an extra mutex around stderr
It's synchronized internally so there's no reason to slap another mutex
on top.
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A logger configured via an environment variable.
//!
//! ## Example
//!
//! ```
//! #[macro_use] extern crate log;
//! extern crate env_logger;
//!
//! use log::LogLevel;
//!
//! fn main() {
//! env_logger::init().unwrap();
//!
//! debug!("this is a debug {}", "message");
//! error!("this is printed by default");
//!
//! if log_enabled!(LogLevel::Info) {
//! let x = 3 * 4; // expensive computation
//! info!("the answer was: {}", x);
//! }
//! }
//! ```
//!
//! Assumes the binary is `main`:
//!
//! ```{.bash}
//! $ RUST_LOG=error ./main
//! ERROR:main: this is printed by default
//! ```
//!
//! ```{.bash}
//! $ RUST_LOG=info ./main
//! ERROR:main: this is printed by default
//! INFO:main: the answer was: 12
//! ```
//!
//! ```{.bash}
//! $ RUST_LOG=debug ./main
//! DEBUG:main: this is a debug message
//! ERROR:main: this is printed by default
//! INFO:main: the answer was: 12
//! ```
//!
//! You can also set the log level on a per module basis:
//!
//! ```{.bash}
//! $ RUST_LOG=main=info ./main
//! ERROR:main: this is printed by default
//! INFO:main: the answer was: 12
//! ```
//!
//! And enable all logging:
//!
//! ```{.bash}
//! $ RUST_LOG=main ./main
//! DEBUG:main: this is a debug message
//! ERROR:main: this is printed by default
//! INFO:main: the answer was: 12
//! ```
//!
//! See the documentation for the log crate for more information about its API.
//!
//! ## Enabling logging
//!
//! Log levels are controlled on a per-module basis, and by default all logging
//! is disabled. Logging is controlled via the `RUST_LOG` environment variable.
//! The value of this environment variable is a comma-separated list of logging
//! directives. A logging directive is of the form:
//!
//! ```text
//! path::to::module=log_level
//! ```
//!
//! The path to the module is rooted in the name of the crate it was compiled
//! for, so if your program is contained in a file `hello.rs`, for example, to
//! turn on logging for this file you would use a value of `RUST_LOG=hello`.
//! Furthermore, this path is a prefix-search, so all modules nested in the
//! specified module will also have logging enabled.
//!
//! The actual `log_level` is optional to specify. If omitted, all logging will
//! be enabled. If specified, it must be one of the strings `debug`, `error`,
//! `info`, `warn`, or `trace`.
//!
//! As the log level for a module is optional, the module to enable logging for
//! is also optional. If only a `log_level` is provided, then the global log
//! level for all modules is set to this value.
//!
//! Some examples of valid values of `RUST_LOG` are:
//!
//! * `hello` turns on all logging for the 'hello' module
//! * `info` turns on all info logging
//! * `hello=debug` turns on debug logging for 'hello'
//! * `hello,std::option` turns on hello, and std's option logging
//! * `error,hello=warn` turn on global error logging and also warn for hello
//!
//! ## Filtering results
//!
//! A RUST_LOG directive may include a regex filter. The syntax is to append `/`
//! followed by a regex. Each message is checked against the regex, and is only
//! logged if it matches. Note that the matching is done after formatting the
//! log string but before adding any logging meta-data. There is a single filter
//! for all modules.
//!
//! Some examples:
//!
//! * `hello/foo` turns on all logging for the 'hello' module where the log
//! message includes 'foo'.
//! * `info/f.o` turns on all info logging where the log message includes 'foo',
//! 'f1o', 'fao', etc.
//! * `hello=debug/foo*foo` turns on debug logging for 'hello' where the log
//! message includes 'foofoo' or 'fofoo' or 'fooooooofoo', etc.
//! * `error,hello=warn/[0-9] scopes` turn on global error logging and also
//! warn for hello. In both cases the log message must include a single digit
//! number followed by 'scopes'.
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/env_logger/")]
#![cfg_attr(test, deny(warnings))]
extern crate regex;
extern crate log;
use regex::Regex;
use std::io::prelude::*;
use std::io;
use std::env;
use log::{Log, LogLevel, LogLevelFilter, LogRecord, SetLoggerError, LogMetadata};
struct Logger {
directives: Vec<LogDirective>,
filter: Option<Regex>,
}
impl Logger {
fn enabled(&self, level: LogLevel, target: &str) -> bool {
// Search for the longest match, the vector is assumed to be pre-sorted.
for directive in self.directives.iter().rev() {
match directive.name {
Some(ref name) if !target.starts_with(&**name) => {},
Some(..) | None => {
return level <= directive.level
}
}
}
false
}
}
impl Log for Logger {
fn enabled(&self, metadata: &LogMetadata) -> bool {
self.enabled(metadata.level(), metadata.target())
}
fn log(&self, record: &LogRecord) {
if !Log::enabled(self, record.metadata()) {
return;
}
if let Some(filter) = self.filter.as_ref() {
if filter.is_match(&*record.args().to_string()) {
return;
}
}
let _ = writeln!(&mut io::stderr(),
"{}:{}: {}",
record.level(),
record.location().module_path(),
record.args());
}
}
struct LogDirective {
name: Option<String>,
level: LogLevelFilter,
}
/// Initializes the global logger with an env logger.
///
/// This should be called early in the execution of a Rust program, and the
/// global logger may only be initialized once. Future initialization attempts
/// will return an error.
pub fn init() -> Result<(), SetLoggerError> {
log::set_logger(|max_level| {
let (mut directives, filter) = match env::var("RUST_LOG") {
Ok(spec) => parse_logging_spec(&spec),
Err(..) => (Vec::new(), None),
};
// Sort the provided directives by length of their name, this allows a
// little more efficient lookup at runtime.
directives.sort_by(|a, b| {
let alen = a.name.as_ref().map(|a| a.len()).unwrap_or(0);
let blen = b.name.as_ref().map(|b| b.len()).unwrap_or(0);
alen.cmp(&blen)
});
let level = {
let max = directives.iter().map(|d| d.level).max();
max.unwrap_or(LogLevelFilter::Off)
};
max_level.set(level);
Box::new(Logger {
directives: directives,
filter: filter,
})
})
}
/// Parse a logging specification string (e.g: "crate1,crate2::mod3,crate3::x=error/foo")
/// and return a vector with log directives.
fn parse_logging_spec(spec: &str) -> (Vec<LogDirective>, Option<Regex>) {
let mut dirs = Vec::new();
let mut parts = spec.split('/');
let mods = parts.next();
let filter = parts.next();
if parts.next().is_some() {
println!("warning: invalid logging spec '{}', \
ignoring it (too many '/'s)", spec);
return (dirs, None);
}
mods.map(|m| { for s in m.split(',') {
if s.len() == 0 { continue }
let mut parts = s.split('=');
let (log_level, name) = match (parts.next(), parts.next().map(|s| s.trim()), parts.next()) {
(Some(part0), None, None) => {
// if the single argument is a log-level string or number,
// treat that as a global fallback
match part0.parse() {
Ok(num) => (num, None),
Err(_) => (LogLevelFilter::max(), Some(part0)),
}
}
(Some(part0), Some(""), None) => (LogLevelFilter::max(), Some(part0)),
(Some(part0), Some(part1), None) => {
match part1.parse() {
Ok(num) => (num, Some(part0)),
_ => {
println!("warning: invalid logging spec '{}', \
ignoring it", part1);
continue
}
}
},
_ => {
println!("warning: invalid logging spec '{}', \
ignoring it", s);
continue
}
};
dirs.push(LogDirective {
name: name.map(|s| s.to_string()),
level: log_level,
});
}});
let filter = filter.map_or(None, |filter| {
match Regex::new(filter) {
Ok(re) => Some(re),
Err(e) => {
println!("warning: invalid regex filter - {}", e);
None
}
}
});
return (dirs, filter);
}
#[cfg(test)]
mod tests {
use log::{Log, LogLevel, LogLevelFilter};
use super::{Logger, LogDirective, parse_logging_spec};
fn make_logger(dirs: Vec<LogDirective>) -> Logger {
Logger {
directives: dirs,
filter: None,
}
}
#[test]
fn match_full_path() {
let logger = make_logger(vec![
LogDirective {
name: Some("crate2".to_string()),
level: LogLevelFilter::Info
},
LogDirective {
name: Some("crate1::mod1".to_string()),
level: LogLevelFilter::Warn
}
]);
assert!(logger.enabled(LogLevel::Warn, "crate1::mod1"));
assert!(!logger.enabled(LogLevel::Info, "crate1::mod1"));
assert!(logger.enabled(LogLevel::Info, "crate2"));
assert!(!logger.enabled(LogLevel::Debug, "crate2"));
}
#[test]
fn no_match() {
let logger = make_logger(vec![
LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info },
LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn }
]);
assert!(!logger.enabled(LogLevel::Warn, "crate3"));
}
#[test]
fn match_beginning() {
let logger = make_logger(vec![
LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info },
LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn }
]);
assert!(logger.enabled(LogLevel::Info, "crate2::mod1"));
}
#[test]
fn match_beginning_longest_match() {
let logger = make_logger(vec![
LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info },
LogDirective { name: Some("crate2::mod".to_string()), level: LogLevelFilter::Debug },
LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn }
]);
assert!(logger.enabled(LogLevel::Debug, "crate2::mod1"));
assert!(!logger.enabled(LogLevel::Debug, "crate2"));
}
#[test]
fn match_default() {
let logger = make_logger(vec![
LogDirective { name: None, level: LogLevelFilter::Info },
LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn }
]);
assert!(logger.enabled(LogLevel::Warn, "crate1::mod1"));
assert!(logger.enabled(LogLevel::Info, "crate2::mod2"));
}
#[test]
fn zero_level() {
let logger = make_logger(vec![
LogDirective { name: None, level: LogLevelFilter::Info },
LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Off }
]);
assert!(!logger.enabled(LogLevel::Error, "crate1::mod1"));
assert!(logger.enabled(LogLevel::Info, "crate2::mod2"));
}
#[test]
fn parse_logging_spec_valid() {
let (dirs, filter) = parse_logging_spec("crate1::mod1=error,crate1::mod2,crate2=debug");
assert_eq!(dirs.len(), 3);
assert_eq!(dirs[0].name, Some("crate1::mod1".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::Error);
assert_eq!(dirs[1].name, Some("crate1::mod2".to_string()));
assert_eq!(dirs[1].level, LogLevelFilter::max());
assert_eq!(dirs[2].name, Some("crate2".to_string()));
assert_eq!(dirs[2].level, LogLevelFilter::Debug);
assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_invalid_crate() {
// test parse_logging_spec with multiple = in specification
let (dirs, filter) = parse_logging_spec("crate1::mod1=warn=info,crate2=debug");
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate2".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::Debug);
assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_invalid_log_level() {
// test parse_logging_spec with 'noNumber' as log level
let (dirs, filter) = parse_logging_spec("crate1::mod1=noNumber,crate2=debug");
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate2".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::Debug);
assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_string_log_level() {
// test parse_logging_spec with 'warn' as log level
let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2=warn");
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate2".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::Warn);
assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_empty_log_level() {
// test parse_logging_spec with '' as log level
let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2=");
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate2".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::max());
assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_global() {
// test parse_logging_spec with no crate
let (dirs, filter) = parse_logging_spec("warn,crate2=debug");
assert_eq!(dirs.len(), 2);
assert_eq!(dirs[0].name, None);
assert_eq!(dirs[0].level, LogLevelFilter::Warn);
assert_eq!(dirs[1].name, Some("crate2".to_string()));
assert_eq!(dirs[1].level, LogLevelFilter::Debug);
assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_valid_filter() {
let (dirs, filter) = parse_logging_spec("crate1::mod1=error,crate1::mod2,crate2=debug/abc");
assert_eq!(dirs.len(), 3);
assert_eq!(dirs[0].name, Some("crate1::mod1".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::Error);
assert_eq!(dirs[1].name, Some("crate1::mod2".to_string()));
assert_eq!(dirs[1].level, LogLevelFilter::max());
assert_eq!(dirs[2].name, Some("crate2".to_string()));
assert_eq!(dirs[2].level, LogLevelFilter::Debug);
assert!(filter.is_some() && filter.unwrap().to_string() == "abc");
}
#[test]
fn parse_logging_spec_invalid_crate_filter() {
let (dirs, filter) = parse_logging_spec("crate1::mod1=error=warn,crate2=debug/a.c");
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate2".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::Debug);
assert!(filter.is_some() && filter.unwrap().to_string() == "a.c");
}
#[test]
fn parse_logging_spec_empty_with_filter() {
let (dirs, filter) = parse_logging_spec("crate1/a*c");
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate1".to_string()));
assert_eq!(dirs[0].level, LogLevelFilter::max());
assert!(filter.is_some() && filter.unwrap().to_string() == "a*c");
}
}
|
use crate::api::ChannelError::AccessError;
use crate::api::{ChannelError, WriteError, Writer};
use crate::header::Header;
use crate::utils::{align, store_atomic_u64, CLOSE, REC_HEADER_LEN, WATERMARK};
use log::{debug, error, info};
use memmap::MmapMut;
use std::cmp::min;
use std::io::Write;
use std::ptr::copy_nonoverlapping;
use std::result::Result;
use std::sync::atomic::Ordering;
/// An implementation of the [Writer](trait.Writer.html) which access a persistent channel through
/// memory mapping. A `ShmWriter` must be created using the [shm_writer](fn.shm_writer.html) function.
/// Any `ShmWriter` exclusively holds the channel is bound to, and it is *not thread safe*.
/// If multiple threads must write into a channel they should be externally synchronized.
///
/// # Examples
///
/// ```
/// use kekbit_core::tick::TickUnit::Nanos;
/// use kekbit_core::shm::*;
/// use kekbit_core::header::Header;
/// use kekbit_core::api::Writer;
///
/// const FOREVER: u64 = 99_999_999_999;
/// let writer_id = 1850;
/// let channel_id = 42;
/// let capacity = 3000;
/// let max_msg_len = 100;
/// let header = Header::new(writer_id, channel_id, capacity, max_msg_len, FOREVER, Nanos);
/// let test_tmp_dir = tempdir::TempDir::new("kektest").unwrap();
/// let mut writer = shm_writer(&test_tmp_dir.path(), &header).unwrap();
/// writer.heartbeat().unwrap();
/// ```
pub struct ShmWriter {
header: Header,
data_ptr: *mut u8,
write_offset: u32,
mmap: MmapMut,
write: KekWrite,
}
impl ShmWriter {
#[allow(clippy::cast_ptr_alignment)]
pub(super) fn new(mut mmap: MmapMut) -> Result<ShmWriter, ChannelError> {
let buf = &mut mmap[..];
let header = Header::read(buf)?;
let header_ptr = buf.as_ptr() as *mut u64;
let head_len = header.len();
let data_ptr = unsafe { header_ptr.add(head_len) } as *mut u8;
let write = KekWrite::new(data_ptr, header.max_msg_len() as usize);
let mut writer = ShmWriter {
header,
data_ptr,
write_offset: 0,
mmap,
write,
};
info!(
"Kekbit channel writer created. Size is {}MB. Max msg size {}KB",
writer.header.capacity() / 1_000_000,
writer.header.max_msg_len() / 1_000
);
//sent the very first original heart bear
match writer.heartbeat() {
Ok(_) => {
info!("Initial hearbeat successfully sent!");
Ok(writer)
}
Err(we) => Err(AccessError {
reason: format!("Initial heartbeat failed!. Reason {:?}", we),
}),
}
}
#[inline(always)]
fn write_metadata(&mut self, write_ptr: *mut u64, len: u64, aligned_rec_len: u32) {
unsafe {
//we should always have the 8 bytes required by WATERMARK as they are acounted in the Footer
store_atomic_u64(write_ptr.add(aligned_rec_len as usize), WATERMARK, Ordering::Release);
}
store_atomic_u64(write_ptr, len, Ordering::Release);
}
}
impl Writer for ShmWriter {
/// Writes a message into the channel. This operation will copy the message into the channel storage.
/// While this is a non blocking operation, only one write should be executed at any given time.
///
/// Returns the total amount of bytes wrote into the channel which includes, the size of the message,
/// the size of the message header and the amount of padding add to that message.
///
/// # Arguments
///
/// *`data` - The buffer which contains the data which is going to be wrote into the channel.
/// * `len` - The amount of data which is going to be wrote into to he channel
///
/// # Errors
///
/// Two types of [failures](enum.WriteError.html) may occur: message size is larger than the maximum allowed,
/// or the there is not enough space in the channel to write that message. In the second case, a future write may succeed,
/// if the message has a smaller size that the current one.
///
////// # Examples
///
/// ```
/// use kekbit_core::tick::TickUnit::Nanos;
/// use kekbit_core::shm::*;
/// use kekbit_core::header::Header;
/// use kekbit_core::api::Writer;
///
/// const FOREVER: u64 = 99_999_999_999;
/// let writer_id = 1850;
/// let channel_id = 42;
/// let capacity = 30_000;
/// let max_msg_len = 100;
/// let header = Header::new(writer_id, channel_id, capacity, max_msg_len, FOREVER, Nanos);
/// let test_tmp_dir = tempdir::TempDir::new("kektest").unwrap();
/// let mut writer = shm_writer(&test_tmp_dir.path(), &header).unwrap();
/// let msg = "There are 10 kinds of people: those who know binary and those who don't";
/// let msg_data = msg.as_bytes();
/// writer.write(&msg_data, msg_data.len() as u32).unwrap();
/// ```
#[allow(clippy::cast_ptr_alignment)]
fn write(&mut self, data: &[u8], _len: u32) -> Result<u32, WriteError> {
let read_head_ptr = unsafe { self.data_ptr.add(self.write_offset as usize) };
let write_ptr = unsafe { read_head_ptr.add(REC_HEADER_LEN as usize) };
let available = self.available();
if available <= REC_HEADER_LEN {
return Err(WriteError::ChannelFull);
}
let alen = min(self.header.max_msg_len(), available - REC_HEADER_LEN) as usize;
//self.encoder.encode(data, self.write.reset(write_ptr, len));
self.write.reset(write_ptr, alen);
self.write.write(data).unwrap();
if !self.write.failed {
let aligned_rec_len = align(self.write.total as u32 + REC_HEADER_LEN);
self.write_metadata(read_head_ptr as *mut u64, self.write.total as u64, aligned_rec_len >> 3);
self.write_offset += aligned_rec_len;
Ok(aligned_rec_len)
} else {
Err(WriteError::NoSpaceForRecord)
}
}
#[allow(clippy::cast_ptr_alignment)]
fn heartbeat(&mut self) -> Result<u32, WriteError> {
let read_head_ptr = unsafe { self.data_ptr.add(self.write_offset as usize) };
let available = self.available();
if available <= REC_HEADER_LEN {
return Err(WriteError::ChannelFull);
}
let aligned_rec_len = REC_HEADER_LEN; //no need to align REC_HEADER)LEN must be align
self.write_metadata(read_head_ptr as *mut u64, 0u64, aligned_rec_len >> 3);
self.write_offset += aligned_rec_len;
Ok(aligned_rec_len)
}
/// Flushes the channel's outstanding memory map modifications to disk. Calling this method explicitly
/// it is not encouraged as flushing does occur automatically and comes with a performance penalty.
/// It should be used only if for various reasons a writer wants to persist the channel data to the disk
/// at a higher rate than is done automatically.
///
/// Returns Ok(()) if the operation succeeds.
///
/// # Errors
///
/// If flushing fails an I/O error is returned.
///
/// # Examples
///
/// ```
/// use kekbit_core::tick::TickUnit::Nanos;
/// use kekbit_core::shm::*;
/// use kekbit_core::header::Header;
/// use kekbit_core::api::Writer;
///
/// const FOREVER: u64 = 99_999_999_999;
/// let writer_id = 1850;
/// let channel_id = 42;
/// let capacity = 30_000;
/// let max_msg_len = 100;
/// let header = Header::new(writer_id, channel_id, capacity, max_msg_len, FOREVER, Nanos);
/// let test_tmp_dir = tempdir::TempDir::new("kektest").unwrap();
/// let mut writer = shm_writer(&test_tmp_dir.path(), &header).unwrap();
/// let msg = "There are 10 kinds of people: those who know binary and those who don't";
/// let msg_data = msg.as_bytes();
/// writer.write(&msg_data, msg_data.len() as u32).unwrap();
/// writer.flush().unwrap();
/// ```
#[inline]
fn flush(&mut self) -> Result<(), std::io::Error> {
debug!("Flushing the channel");
self.mmap.flush()
}
}
impl Drop for ShmWriter {
/// Marks this channel as `closed`, flushes the changes to the disk, and removes the memory mapping.
fn drop(&mut self) {
let write_index = self.write_offset;
info!("Closing message queue..");
unsafe {
#[allow(clippy::cast_ptr_alignment)]
//we should always have the 8 bytes required by CLOSE as they are acounted in the Footer
let write_ptr = self.data_ptr.offset(write_index as isize) as *mut u64;
store_atomic_u64(write_ptr, CLOSE, Ordering::Release);
info!("Closing message sent")
}
self.write_offset = self.mmap.len() as u32;
if self.mmap.flush().is_ok() {
info!("All changes flushed");
} else {
error!("Flush Failed");
}
}
}
impl ShmWriter {
///Returns the amount of space in this channel still available for write.
#[inline]
pub fn available(&self) -> u32 {
(self.header.capacity() - self.write_offset) & 0xFFFF_FFF8 //rounded down to alignement
}
///Returns the amount of data written into this channel.
#[inline]
pub fn write_offset(&self) -> u32 {
self.write_offset
}
///Returns a reference to the [Header](struct.Header.html) associated with this channel.
#[inline]
pub fn header(&self) -> &Header {
&self.header
}
}
#[derive(Debug)]
struct KekWrite {
write_ptr: *mut u8,
max_size: usize,
total: usize,
failed: bool,
}
impl KekWrite {
#[inline]
fn new(write_ptr: *mut u8, max_size: usize) -> Self {
KekWrite {
write_ptr,
max_size,
total: 0,
failed: false,
}
}
#[inline]
fn reset(&mut self, write_ptr: *mut u8, max_size: usize) -> &mut Self {
self.write_ptr = write_ptr;
self.max_size = max_size;
self.total = 0;
self.failed = false;
self
}
}
impl Write for KekWrite {
#[inline]
fn write(&mut self, data: &[u8]) -> Result<usize, std::io::Error> {
if self.failed {
return Ok(0);
}
let data_len = data.len();
if self.total + data_len > self.max_size {
self.failed = true;
return Ok(0);
}
unsafe {
let crt_ptr: *mut u8;
if self.total > 0 {
crt_ptr = self.write_ptr.offset(self.total as isize);
} else {
crt_ptr = self.write_ptr;
}
copy_nonoverlapping(data.as_ptr(), crt_ptr, data_len);
self.total += data_len;
}
Ok(data_len)
}
#[inline]
fn flush(&mut self) -> Result<(), std::io::Error> {
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_write() {
let mut raw_data: [u8; 1000] = [0; 1000];
let write_ptr = raw_data.as_mut_ptr();
let mut kw = KekWrite::new(write_ptr, 20);
kw.flush().unwrap(); //should never crash as it does nothing
let d1: [u8; 10] = [1; 10];
let r1 = kw.write(&d1).unwrap();
assert_eq!(kw.total, r1);
assert!(!kw.failed);
for i in 0..10 {
assert_eq!(raw_data[i], 1);
}
kw.flush().unwrap(); //should never crash as it does nothing
let r2 = kw.write(&d1).unwrap();
assert_eq!(kw.total, r1 + r2);
assert!(!kw.failed);
for i in 10..20 {
assert_eq!(raw_data[i], 1);
}
let r3 = kw.write(&d1).unwrap();
assert_eq!(0, r3);
assert!(kw.failed);
kw.reset(write_ptr, 15);
assert!(!kw.failed);
let d2: [u8; 10] = [2; 10];
let r4 = kw.write(&d2).unwrap();
assert_eq!(kw.total, r4);
assert!(!kw.failed);
for i in 0..10 {
assert_eq!(raw_data[i], 2);
}
assert_eq!(kw.total, 10);
let r5 = kw.write(&d2).unwrap();
assert_eq!(0, r5);
assert!(kw.failed);
assert_eq!(kw.total, 10);
//once it fails it will never recover, even if it has enough space
let r6 = kw.write(&d2[0..3]).unwrap();
assert_eq!(0, r6);
assert!(kw.failed);
assert_eq!(kw.total, 10);
}
}
Fix clippy complains.
use crate::api::ChannelError::AccessError;
use crate::api::{ChannelError, WriteError, Writer};
use crate::header::Header;
use crate::utils::{align, store_atomic_u64, CLOSE, REC_HEADER_LEN, WATERMARK};
use log::{debug, error, info};
use memmap::MmapMut;
use std::cmp::min;
use std::io::Write;
use std::ptr::copy_nonoverlapping;
use std::result::Result;
use std::sync::atomic::Ordering;
/// An implementation of the [Writer](trait.Writer.html) which access a persistent channel through
/// memory mapping. A `ShmWriter` must be created using the [shm_writer](fn.shm_writer.html) function.
/// Any `ShmWriter` exclusively holds the channel is bound to, and it is *not thread safe*.
/// If multiple threads must write into a channel they should be externally synchronized.
///
/// # Examples
///
/// ```
/// use kekbit_core::tick::TickUnit::Nanos;
/// use kekbit_core::shm::*;
/// use kekbit_core::header::Header;
/// use kekbit_core::api::Writer;
///
/// const FOREVER: u64 = 99_999_999_999;
/// let writer_id = 1850;
/// let channel_id = 42;
/// let capacity = 3000;
/// let max_msg_len = 100;
/// let header = Header::new(writer_id, channel_id, capacity, max_msg_len, FOREVER, Nanos);
/// let test_tmp_dir = tempdir::TempDir::new("kektest").unwrap();
/// let mut writer = shm_writer(&test_tmp_dir.path(), &header).unwrap();
/// writer.heartbeat().unwrap();
/// ```
pub struct ShmWriter {
header: Header,
data_ptr: *mut u8,
write_offset: u32,
mmap: MmapMut,
write: KekWrite,
}
impl ShmWriter {
#[allow(clippy::cast_ptr_alignment)]
pub(super) fn new(mut mmap: MmapMut) -> Result<ShmWriter, ChannelError> {
let buf = &mut mmap[..];
let header = Header::read(buf)?;
let header_ptr = buf.as_ptr() as *mut u64;
let head_len = header.len();
let data_ptr = unsafe { header_ptr.add(head_len) } as *mut u8;
let write = KekWrite::new(data_ptr, header.max_msg_len() as usize);
let mut writer = ShmWriter {
header,
data_ptr,
write_offset: 0,
mmap,
write,
};
info!(
"Kekbit channel writer created. Size is {}MB. Max msg size {}KB",
writer.header.capacity() / 1_000_000,
writer.header.max_msg_len() / 1_000
);
//sent the very first original heart bear
match writer.heartbeat() {
Ok(_) => {
info!("Initial hearbeat successfully sent!");
Ok(writer)
}
Err(we) => Err(AccessError {
reason: format!("Initial heartbeat failed!. Reason {:?}", we),
}),
}
}
#[inline(always)]
fn write_metadata(&mut self, write_ptr: *mut u64, len: u64, aligned_rec_len: u32) {
unsafe {
//we should always have the 8 bytes required by WATERMARK as they are acounted in the Footer
store_atomic_u64(write_ptr.add(aligned_rec_len as usize), WATERMARK, Ordering::Release);
}
store_atomic_u64(write_ptr, len, Ordering::Release);
}
}
impl Writer for ShmWriter {
/// Writes a message into the channel. This operation will copy the message into the channel storage.
/// While this is a non blocking operation, only one write should be executed at any given time.
///
/// Returns the total amount of bytes wrote into the channel which includes, the size of the message,
/// the size of the message header and the amount of padding add to that message.
///
/// # Arguments
///
/// *`data` - The buffer which contains the data which is going to be wrote into the channel.
/// * `len` - The amount of data which is going to be wrote into to he channel
///
/// # Errors
///
/// Two types of [failures](enum.WriteError.html) may occur: message size is larger than the maximum allowed,
/// or the there is not enough space in the channel to write that message. In the second case, a future write may succeed,
/// if the message has a smaller size that the current one.
///
////// # Examples
///
/// ```
/// use kekbit_core::tick::TickUnit::Nanos;
/// use kekbit_core::shm::*;
/// use kekbit_core::header::Header;
/// use kekbit_core::api::Writer;
///
/// const FOREVER: u64 = 99_999_999_999;
/// let writer_id = 1850;
/// let channel_id = 42;
/// let capacity = 30_000;
/// let max_msg_len = 100;
/// let header = Header::new(writer_id, channel_id, capacity, max_msg_len, FOREVER, Nanos);
/// let test_tmp_dir = tempdir::TempDir::new("kektest").unwrap();
/// let mut writer = shm_writer(&test_tmp_dir.path(), &header).unwrap();
/// let msg = "There are 10 kinds of people: those who know binary and those who don't";
/// let msg_data = msg.as_bytes();
/// writer.write(&msg_data, msg_data.len() as u32).unwrap();
/// ```
#[allow(clippy::cast_ptr_alignment)]
#[allow(clippy::unused_io_amount)]
fn write(&mut self, data: &[u8], _len: u32) -> Result<u32, WriteError> {
let read_head_ptr = unsafe { self.data_ptr.add(self.write_offset as usize) };
let write_ptr = unsafe { read_head_ptr.add(REC_HEADER_LEN as usize) };
let available = self.available();
if available <= REC_HEADER_LEN {
return Err(WriteError::ChannelFull);
}
let alen = min(self.header.max_msg_len(), available - REC_HEADER_LEN) as usize;
//self.encoder.encode(data, self.write.reset(write_ptr, len));
self.write.reset(write_ptr, alen);
let total = self.write.write(data).unwrap(); //this will cahnge to encoder
if total > 0 && !self.write.failed {
let aligned_rec_len = align(self.write.total as u32 + REC_HEADER_LEN);
self.write_metadata(read_head_ptr as *mut u64, self.write.total as u64, aligned_rec_len >> 3);
self.write_offset += aligned_rec_len;
Ok(aligned_rec_len)
} else {
Err(WriteError::NoSpaceForRecord)
}
}
#[allow(clippy::cast_ptr_alignment)]
fn heartbeat(&mut self) -> Result<u32, WriteError> {
let read_head_ptr = unsafe { self.data_ptr.add(self.write_offset as usize) };
let available = self.available();
if available <= REC_HEADER_LEN {
return Err(WriteError::ChannelFull);
}
let aligned_rec_len = REC_HEADER_LEN; //no need to align REC_HEADER)LEN must be align
self.write_metadata(read_head_ptr as *mut u64, 0u64, aligned_rec_len >> 3);
self.write_offset += aligned_rec_len;
Ok(aligned_rec_len)
}
/// Flushes the channel's outstanding memory map modifications to disk. Calling this method explicitly
/// it is not encouraged as flushing does occur automatically and comes with a performance penalty.
/// It should be used only if for various reasons a writer wants to persist the channel data to the disk
/// at a higher rate than is done automatically.
///
/// Returns Ok(()) if the operation succeeds.
///
/// # Errors
///
/// If flushing fails an I/O error is returned.
///
/// # Examples
///
/// ```
/// use kekbit_core::tick::TickUnit::Nanos;
/// use kekbit_core::shm::*;
/// use kekbit_core::header::Header;
/// use kekbit_core::api::Writer;
///
/// const FOREVER: u64 = 99_999_999_999;
/// let writer_id = 1850;
/// let channel_id = 42;
/// let capacity = 30_000;
/// let max_msg_len = 100;
/// let header = Header::new(writer_id, channel_id, capacity, max_msg_len, FOREVER, Nanos);
/// let test_tmp_dir = tempdir::TempDir::new("kektest").unwrap();
/// let mut writer = shm_writer(&test_tmp_dir.path(), &header).unwrap();
/// let msg = "There are 10 kinds of people: those who know binary and those who don't";
/// let msg_data = msg.as_bytes();
/// writer.write(&msg_data, msg_data.len() as u32).unwrap();
/// writer.flush().unwrap();
/// ```
#[inline]
fn flush(&mut self) -> Result<(), std::io::Error> {
debug!("Flushing the channel");
self.mmap.flush()
}
}
impl Drop for ShmWriter {
/// Marks this channel as `closed`, flushes the changes to the disk, and removes the memory mapping.
fn drop(&mut self) {
let write_index = self.write_offset;
info!("Closing message queue..");
unsafe {
#[allow(clippy::cast_ptr_alignment)]
//we should always have the 8 bytes required by CLOSE as they are acounted in the Footer
let write_ptr = self.data_ptr.offset(write_index as isize) as *mut u64;
store_atomic_u64(write_ptr, CLOSE, Ordering::Release);
info!("Closing message sent")
}
self.write_offset = self.mmap.len() as u32;
if self.mmap.flush().is_ok() {
info!("All changes flushed");
} else {
error!("Flush Failed");
}
}
}
impl ShmWriter {
///Returns the amount of space in this channel still available for write.
#[inline]
pub fn available(&self) -> u32 {
(self.header.capacity() - self.write_offset) & 0xFFFF_FFF8 //rounded down to alignement
}
///Returns the amount of data written into this channel.
#[inline]
pub fn write_offset(&self) -> u32 {
self.write_offset
}
///Returns a reference to the [Header](struct.Header.html) associated with this channel.
#[inline]
pub fn header(&self) -> &Header {
&self.header
}
}
#[derive(Debug)]
struct KekWrite {
write_ptr: *mut u8,
max_size: usize,
total: usize,
failed: bool,
}
impl KekWrite {
#[inline]
fn new(write_ptr: *mut u8, max_size: usize) -> Self {
KekWrite {
write_ptr,
max_size,
total: 0,
failed: false,
}
}
#[inline]
fn reset(&mut self, write_ptr: *mut u8, max_size: usize) -> &mut Self {
self.write_ptr = write_ptr;
self.max_size = max_size;
self.total = 0;
self.failed = false;
self
}
}
impl Write for KekWrite {
#[inline]
fn write(&mut self, data: &[u8]) -> Result<usize, std::io::Error> {
if self.failed {
return Ok(0);
}
let data_len = data.len();
if self.total + data_len > self.max_size {
self.failed |= true;
return Ok(0);
}
unsafe {
// let crt_ptr = if self.total > 0 {
// self.write_ptr.add(self.total as usize)
// } else {
// self.write_ptr
// };
let crt_ptr = self.write_ptr.add(self.total as usize);
copy_nonoverlapping(data.as_ptr(), crt_ptr, data_len);
}
self.total += data_len;
Ok(data_len)
}
#[inline]
fn flush(&mut self) -> Result<(), std::io::Error> {
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_write() {
let mut raw_data: [u8; 1000] = [0; 1000];
let write_ptr = raw_data.as_mut_ptr();
let mut kw = KekWrite::new(write_ptr, 20);
kw.flush().unwrap(); //should never crash as it does nothing
let d1: [u8; 10] = [1; 10];
let r1 = kw.write(&d1).unwrap();
assert_eq!(kw.total, r1);
assert!(!kw.failed);
for i in 0..10 {
assert_eq!(raw_data[i], 1);
}
kw.flush().unwrap(); //should never crash as it does nothing
let r2 = kw.write(&d1).unwrap();
assert_eq!(kw.total, r1 + r2);
assert!(!kw.failed);
for i in 10..20 {
assert_eq!(raw_data[i], 1);
}
let r3 = kw.write(&d1).unwrap();
assert_eq!(0, r3);
assert!(kw.failed);
kw.reset(write_ptr, 15);
assert!(!kw.failed);
let d2: [u8; 10] = [2; 10];
let r4 = kw.write(&d2).unwrap();
assert_eq!(kw.total, r4);
assert!(!kw.failed);
for i in 0..10 {
assert_eq!(raw_data[i], 2);
}
assert_eq!(kw.total, 10);
let r5 = kw.write(&d2).unwrap();
assert_eq!(0, r5);
assert!(kw.failed);
assert_eq!(kw.total, 10);
//once it fails it will never recover, even if it has enough space
let r6 = kw.write(&d2[0..3]).unwrap();
assert_eq!(0, r6);
assert!(kw.failed);
assert_eq!(kw.total, 10);
}
}
|
// Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
mod aptos_debug_natives;
use crate::common::utils::{create_dir_if_not_exist, dir_default_to_current};
use crate::{
common::{
types::{
load_account_arg, AccountAddressWrapper, CliError, CliTypedResult, MovePackageDir,
PromptOptions, TransactionOptions, TransactionSummary,
},
utils::check_if_file_exists,
},
CliCommand, CliResult,
};
use aptos_module_verifier::module_init::verify_module_init_function;
use aptos_rest_client::aptos_api_types::MoveType;
use aptos_types::transaction::{ModuleBundle, ScriptFunction, TransactionPayload};
use async_trait::async_trait;
use clap::{Parser, Subcommand};
use move_deps::move_cli::base::test::UnitTestResult;
use move_deps::{
move_cli,
move_command_line_common::env::get_bytecode_version_from_env,
move_core_types::{
identifier::Identifier,
language_storage::{ModuleId, TypeTag},
},
move_package::{
compilation::compiled_package::CompiledPackage,
source_package::layout::SourcePackageLayout, BuildConfig,
},
move_unit_test::UnitTestingConfig,
};
use std::{
collections::BTreeMap,
convert::TryFrom,
io::Write,
path::{Path, PathBuf},
str::FromStr,
};
/// CLI tool for performing Move tasks
///
#[derive(Subcommand)]
pub enum MoveTool {
Compile(CompilePackage),
Init(InitPackage),
Publish(PublishPackage),
Run(RunFunction),
Test(TestPackage),
}
impl MoveTool {
pub async fn execute(self) -> CliResult {
match self {
MoveTool::Compile(tool) => tool.execute_serialized().await,
MoveTool::Init(tool) => tool.execute_serialized_success().await,
MoveTool::Publish(tool) => tool.execute_serialized().await,
MoveTool::Run(tool) => tool.execute_serialized().await,
MoveTool::Test(tool) => tool.execute_serialized().await,
}
}
}
/// Creates a new Move package at the given location
#[derive(Parser)]
pub struct InitPackage {
/// Name of the new move package
#[clap(long)]
name: String,
/// Path to create the new move package
#[clap(long, parse(from_os_str))]
package_dir: Option<PathBuf>,
/// Named addresses for the move binary
///
/// Example: alice=0x1234, bob=0x5678
///
/// Note: This will fail if there are duplicates in the Move.toml file remove those first.
#[clap(long, parse(try_from_str = crate::common::utils::parse_map), default_value = "")]
named_addresses: BTreeMap<String, AccountAddressWrapper>,
#[clap(flatten)]
prompt_options: PromptOptions,
}
#[async_trait]
impl CliCommand<()> for InitPackage {
fn command_name(&self) -> &'static str {
"InitPackage"
}
async fn execute(self) -> CliTypedResult<()> {
let package_dir = dir_default_to_current(self.package_dir.clone())?;
let move_toml = package_dir.join(SourcePackageLayout::Manifest.path());
check_if_file_exists(move_toml.as_path(), self.prompt_options)?;
create_dir_if_not_exist(
package_dir
.join(SourcePackageLayout::Sources.path())
.as_path(),
)?;
let mut w = std::fs::File::create(move_toml.as_path()).map_err(|err| {
CliError::UnexpectedError(format!(
"Failed to create {}: {}",
package_dir.join(Path::new("Move.toml")).display(),
err
))
})?;
let addresses: BTreeMap<String, String> = self
.named_addresses
.clone()
.into_iter()
.map(|(key, value)| (key, value.account_address.to_hex_literal()))
.collect();
// TODO: Support Git as default when Github credentials are properly handled from GH CLI
writeln!(
&mut w,
"[package]
name = \"{}\"
version = \"0.0.0\"
[dependencies]
AptosFramework = {{ git = \"https://github.com/aptos-labs/aptos-core.git\", subdir = \"aptos-move/framework/aptos-framework/\", rev = \"main\" }}
[addresses]
{}
",
self.name,
toml::to_string(&addresses).unwrap()
)
.map_err(|err| {
CliError::UnexpectedError(format!(
"Failed to write {:?}: {}",
package_dir.join(Path::new("Move.toml")),
err
))
})
}
}
/// Compiles a package and returns the [`ModuleId`]s
#[derive(Parser)]
pub struct CompilePackage {
#[clap(flatten)]
move_options: MovePackageDir,
}
#[async_trait]
impl CliCommand<Vec<String>> for CompilePackage {
fn command_name(&self) -> &'static str {
"CompilePackage"
}
async fn execute(self) -> CliTypedResult<Vec<String>> {
let build_config = BuildConfig {
additional_named_addresses: self.move_options.named_addresses(),
generate_abis: true,
generate_docs: true,
install_dir: self.move_options.output_dir.clone(),
..Default::default()
};
let compiled_package =
compile_move(build_config, self.move_options.get_package_dir()?.as_path())?;
let mut ids = Vec::new();
for &module in compiled_package.root_modules_map().iter_modules().iter() {
verify_module_init_function(module)
.map_err(|e| CliError::MoveCompilationError(e.to_string()))?;
ids.push(module.self_id().to_string());
}
Ok(ids)
}
}
/// Run Move unit tests against a package path
#[derive(Parser)]
pub struct TestPackage {
#[clap(flatten)]
move_options: MovePackageDir,
/// A filter string to determine which unit tests to run
#[clap(long)]
pub filter: Option<String>,
}
#[async_trait]
impl CliCommand<&'static str> for TestPackage {
fn command_name(&self) -> &'static str {
"TestPackage"
}
async fn execute(self) -> CliTypedResult<&'static str> {
let config = BuildConfig {
additional_named_addresses: self.move_options.named_addresses(),
test_mode: true,
install_dir: self.move_options.output_dir.clone(),
..Default::default()
};
let result = move_cli::base::test::run_move_unit_tests(
self.move_options.get_package_dir()?.as_path(),
config,
UnitTestingConfig {
filter: self.filter,
..UnitTestingConfig::default_with_bound(Some(100_000))
},
aptos_debug_natives::aptos_debug_natives(),
false,
&mut std::io::stdout(),
)
.map_err(|err| CliError::UnexpectedError(err.to_string()))?;
// TODO: commit back up to the move repo
match result {
UnitTestResult::Success => Ok("Success"),
UnitTestResult::Failure => Err(CliError::MoveTestError),
}
}
}
/// Compiles a Move package dir, and returns the compiled modules.
fn compile_move(build_config: BuildConfig, package_dir: &Path) -> CliTypedResult<CompiledPackage> {
// TODO: Add caching
build_config
.compile_package(package_dir, &mut Vec::new())
.map_err(|err| CliError::MoveCompilationError(err.to_string()))
}
/// Publishes the modules in a Move package
#[derive(Parser)]
pub struct PublishPackage {
#[clap(flatten)]
move_options: MovePackageDir,
#[clap(flatten)]
txn_options: TransactionOptions,
}
#[async_trait]
impl CliCommand<TransactionSummary> for PublishPackage {
fn command_name(&self) -> &'static str {
"PublishPackage"
}
async fn execute(self) -> CliTypedResult<TransactionSummary> {
let build_config = BuildConfig {
additional_named_addresses: self.move_options.named_addresses(),
generate_abis: false,
generate_docs: true,
install_dir: self.move_options.output_dir.clone(),
..Default::default()
};
let package = compile_move(build_config, self.move_options.get_package_dir()?.as_path())?;
let compiled_units: Vec<Vec<u8>> = package
.root_compiled_units
.iter()
.map(|unit_with_source| {
unit_with_source
.unit
.serialize(get_bytecode_version_from_env())
})
.collect();
// Send the compiled module
self.txn_options
.submit_transaction(TransactionPayload::ModuleBundle(ModuleBundle::new(
compiled_units,
)))
.await
.map(TransactionSummary::from)
}
}
/// Run a Move function
#[derive(Parser)]
pub struct RunFunction {
#[clap(flatten)]
txn_options: TransactionOptions,
/// Function name as `<ADDRESS>::<MODULE_ID>::<FUNCTION_NAME>`
///
/// Example: `0x842ed41fad9640a2ad08fdd7d3e4f7f505319aac7d67e1c0dd6a7cce8732c7e3::message::set_message`
#[clap(long, parse(try_from_str = parse_function_name))]
function_id: FunctionId,
/// Hex encoded arguments separated by spaces.
///
/// Example: `0x01 0x02 0x03`
#[clap(long, multiple_values = true)]
args: Vec<ArgWithType>,
/// TypeTag arguments separated by spaces.
///
/// Example: `u8 u64 u128 bool address vector true false signer`
#[clap(long, multiple_values = true)]
type_args: Vec<MoveType>,
}
#[async_trait]
impl CliCommand<TransactionSummary> for RunFunction {
fn command_name(&self) -> &'static str {
"RunFunction"
}
async fn execute(self) -> CliTypedResult<TransactionSummary> {
let args: Vec<Vec<u8>> = self
.args
.iter()
.map(|arg_with_type| arg_with_type.arg.clone())
.collect();
let mut type_args: Vec<TypeTag> = Vec::new();
// These TypeArgs are used for generics
for type_arg in self.type_args.iter().cloned() {
let type_tag = TypeTag::try_from(type_arg)
.map_err(|err| CliError::UnableToParse("--type-args", err.to_string()))?;
type_args.push(type_tag)
}
self.txn_options
.submit_transaction(TransactionPayload::ScriptFunction(ScriptFunction::new(
self.function_id.module_id.clone(),
self.function_id.function_id.clone(),
type_args,
args,
)))
.await
.map(TransactionSummary::from)
}
}
#[derive(Clone, Debug)]
enum FunctionArgType {
Address,
Bool,
Hex,
String,
U8,
U64,
U128,
}
impl FunctionArgType {
fn parse_arg(&self, arg: &str) -> CliTypedResult<Vec<u8>> {
match self {
FunctionArgType::Address => bcs::to_bytes(
&load_account_arg(arg)
.map_err(|err| CliError::UnableToParse("address", err.to_string()))?,
),
FunctionArgType::Bool => bcs::to_bytes(
&bool::from_str(arg)
.map_err(|err| CliError::UnableToParse("bool", err.to_string()))?,
),
FunctionArgType::Hex => bcs::to_bytes(
&hex::decode(arg).map_err(|err| CliError::UnableToParse("hex", err.to_string()))?,
),
FunctionArgType::String => bcs::to_bytes(arg),
FunctionArgType::U8 => bcs::to_bytes(
&u8::from_str(arg).map_err(|err| CliError::UnableToParse("u8", err.to_string()))?,
),
FunctionArgType::U64 => bcs::to_bytes(
&u64::from_str(arg)
.map_err(|err| CliError::UnableToParse("u64", err.to_string()))?,
),
FunctionArgType::U128 => bcs::to_bytes(
&u128::from_str(arg)
.map_err(|err| CliError::UnableToParse("u128", err.to_string()))?,
),
}
.map_err(|err| CliError::BCS("arg", err))
}
}
impl FromStr for FunctionArgType {
type Err = CliError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"address" => Ok(FunctionArgType::Address),
"bool" => Ok(FunctionArgType::Bool),
"hex" => Ok(FunctionArgType::Hex),
"string" => Ok(FunctionArgType::String),
"u8" => Ok(FunctionArgType::U8),
"u64" => Ok(FunctionArgType::U64),
"u128" => Ok(FunctionArgType::U128),
str => Err(CliError::CommandArgumentError(format!("Invalid arg type '{}'. Must be one of: ['address','bool','hex','string','u8','u64','u128']", str))),
}
}
}
/// A parseable arg with a type separated by a colon
pub struct ArgWithType {
_ty: FunctionArgType,
arg: Vec<u8>,
}
impl FromStr for ArgWithType {
type Err = CliError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<_> = s.split(':').collect();
if parts.len() != 2 {
return Err(CliError::CommandArgumentError(
"Arguments must be pairs of <type>:<arg> e.g. bool:true".to_string(),
));
}
let ty = FunctionArgType::from_str(parts.first().unwrap())?;
let arg = parts.last().unwrap();
let arg = ty.parse_arg(arg)?;
Ok(ArgWithType { _ty: ty, arg })
}
}
pub struct FunctionId {
pub module_id: ModuleId,
pub function_id: Identifier,
}
fn parse_function_name(function_id: &str) -> CliTypedResult<FunctionId> {
let ids: Vec<&str> = function_id.split_terminator("::").collect();
if ids.len() != 3 {
return Err(CliError::CommandArgumentError(
"FunctionId is not well formed. Must be of the form <address>::<module>::<function>"
.to_string(),
));
}
let address = load_account_arg(ids.get(0).unwrap())?;
let module = Identifier::from_str(ids.get(1).unwrap())
.map_err(|err| CliError::UnableToParse("Module Name", err.to_string()))?;
let function_id = Identifier::from_str(ids.get(2).unwrap())
.map_err(|err| CliError::UnableToParse("Function Name", err.to_string()))?;
let module_id = ModuleId::new(address, module);
Ok(FunctionId {
module_id,
function_id,
})
}
[aptos-cli] Use devnet instead of main to initialize move packages
This should prevent users from creating packages with an improper version
of the aptos framework
// Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
mod aptos_debug_natives;
use crate::common::utils::{create_dir_if_not_exist, dir_default_to_current};
use crate::{
common::{
types::{
load_account_arg, AccountAddressWrapper, CliError, CliTypedResult, MovePackageDir,
PromptOptions, TransactionOptions, TransactionSummary,
},
utils::check_if_file_exists,
},
CliCommand, CliResult,
};
use aptos_module_verifier::module_init::verify_module_init_function;
use aptos_rest_client::aptos_api_types::MoveType;
use aptos_types::transaction::{ModuleBundle, ScriptFunction, TransactionPayload};
use async_trait::async_trait;
use clap::{Parser, Subcommand};
use move_deps::move_cli::base::test::UnitTestResult;
use move_deps::{
move_cli,
move_command_line_common::env::get_bytecode_version_from_env,
move_core_types::{
identifier::Identifier,
language_storage::{ModuleId, TypeTag},
},
move_package::{
compilation::compiled_package::CompiledPackage,
source_package::layout::SourcePackageLayout, BuildConfig,
},
move_unit_test::UnitTestingConfig,
};
use std::{
collections::BTreeMap,
convert::TryFrom,
io::Write,
path::{Path, PathBuf},
str::FromStr,
};
/// CLI tool for performing Move tasks
///
#[derive(Subcommand)]
pub enum MoveTool {
Compile(CompilePackage),
Init(InitPackage),
Publish(PublishPackage),
Run(RunFunction),
Test(TestPackage),
}
impl MoveTool {
pub async fn execute(self) -> CliResult {
match self {
MoveTool::Compile(tool) => tool.execute_serialized().await,
MoveTool::Init(tool) => tool.execute_serialized_success().await,
MoveTool::Publish(tool) => tool.execute_serialized().await,
MoveTool::Run(tool) => tool.execute_serialized().await,
MoveTool::Test(tool) => tool.execute_serialized().await,
}
}
}
/// Creates a new Move package at the given location
#[derive(Parser)]
pub struct InitPackage {
/// Name of the new move package
#[clap(long)]
name: String,
/// Path to create the new move package
#[clap(long, parse(from_os_str))]
package_dir: Option<PathBuf>,
/// Named addresses for the move binary
///
/// Example: alice=0x1234, bob=0x5678
///
/// Note: This will fail if there are duplicates in the Move.toml file remove those first.
#[clap(long, parse(try_from_str = crate::common::utils::parse_map), default_value = "")]
named_addresses: BTreeMap<String, AccountAddressWrapper>,
#[clap(flatten)]
prompt_options: PromptOptions,
}
#[async_trait]
impl CliCommand<()> for InitPackage {
fn command_name(&self) -> &'static str {
"InitPackage"
}
async fn execute(self) -> CliTypedResult<()> {
let package_dir = dir_default_to_current(self.package_dir.clone())?;
let move_toml = package_dir.join(SourcePackageLayout::Manifest.path());
check_if_file_exists(move_toml.as_path(), self.prompt_options)?;
create_dir_if_not_exist(
package_dir
.join(SourcePackageLayout::Sources.path())
.as_path(),
)?;
let mut w = std::fs::File::create(move_toml.as_path()).map_err(|err| {
CliError::UnexpectedError(format!(
"Failed to create {}: {}",
package_dir.join(Path::new("Move.toml")).display(),
err
))
})?;
let addresses: BTreeMap<String, String> = self
.named_addresses
.clone()
.into_iter()
.map(|(key, value)| (key, value.account_address.to_hex_literal()))
.collect();
// TODO: Support Git as default when Github credentials are properly handled from GH CLI
writeln!(
&mut w,
"[package]
name = \"{}\"
version = \"0.0.0\"
[dependencies]
AptosFramework = {{ git = \"https://github.com/aptos-labs/aptos-core.git\", subdir = \"aptos-move/framework/aptos-framework/\", rev = \"devnet\" }}
[addresses]
{}
",
self.name,
toml::to_string(&addresses).unwrap()
)
.map_err(|err| {
CliError::UnexpectedError(format!(
"Failed to write {:?}: {}",
package_dir.join(Path::new("Move.toml")),
err
))
})
}
}
/// Compiles a package and returns the [`ModuleId`]s
#[derive(Parser)]
pub struct CompilePackage {
#[clap(flatten)]
move_options: MovePackageDir,
}
#[async_trait]
impl CliCommand<Vec<String>> for CompilePackage {
fn command_name(&self) -> &'static str {
"CompilePackage"
}
async fn execute(self) -> CliTypedResult<Vec<String>> {
let build_config = BuildConfig {
additional_named_addresses: self.move_options.named_addresses(),
generate_abis: true,
generate_docs: true,
install_dir: self.move_options.output_dir.clone(),
..Default::default()
};
let compiled_package =
compile_move(build_config, self.move_options.get_package_dir()?.as_path())?;
let mut ids = Vec::new();
for &module in compiled_package.root_modules_map().iter_modules().iter() {
verify_module_init_function(module)
.map_err(|e| CliError::MoveCompilationError(e.to_string()))?;
ids.push(module.self_id().to_string());
}
Ok(ids)
}
}
/// Run Move unit tests against a package path
#[derive(Parser)]
pub struct TestPackage {
#[clap(flatten)]
move_options: MovePackageDir,
/// A filter string to determine which unit tests to run
#[clap(long)]
pub filter: Option<String>,
}
#[async_trait]
impl CliCommand<&'static str> for TestPackage {
fn command_name(&self) -> &'static str {
"TestPackage"
}
async fn execute(self) -> CliTypedResult<&'static str> {
let config = BuildConfig {
additional_named_addresses: self.move_options.named_addresses(),
test_mode: true,
install_dir: self.move_options.output_dir.clone(),
..Default::default()
};
let result = move_cli::base::test::run_move_unit_tests(
self.move_options.get_package_dir()?.as_path(),
config,
UnitTestingConfig {
filter: self.filter,
..UnitTestingConfig::default_with_bound(Some(100_000))
},
aptos_debug_natives::aptos_debug_natives(),
false,
&mut std::io::stdout(),
)
.map_err(|err| CliError::UnexpectedError(err.to_string()))?;
// TODO: commit back up to the move repo
match result {
UnitTestResult::Success => Ok("Success"),
UnitTestResult::Failure => Err(CliError::MoveTestError),
}
}
}
/// Compiles a Move package dir, and returns the compiled modules.
fn compile_move(build_config: BuildConfig, package_dir: &Path) -> CliTypedResult<CompiledPackage> {
// TODO: Add caching
build_config
.compile_package(package_dir, &mut Vec::new())
.map_err(|err| CliError::MoveCompilationError(err.to_string()))
}
/// Publishes the modules in a Move package
#[derive(Parser)]
pub struct PublishPackage {
#[clap(flatten)]
move_options: MovePackageDir,
#[clap(flatten)]
txn_options: TransactionOptions,
}
#[async_trait]
impl CliCommand<TransactionSummary> for PublishPackage {
fn command_name(&self) -> &'static str {
"PublishPackage"
}
async fn execute(self) -> CliTypedResult<TransactionSummary> {
let build_config = BuildConfig {
additional_named_addresses: self.move_options.named_addresses(),
generate_abis: false,
generate_docs: true,
install_dir: self.move_options.output_dir.clone(),
..Default::default()
};
let package = compile_move(build_config, self.move_options.get_package_dir()?.as_path())?;
let compiled_units: Vec<Vec<u8>> = package
.root_compiled_units
.iter()
.map(|unit_with_source| {
unit_with_source
.unit
.serialize(get_bytecode_version_from_env())
})
.collect();
// Send the compiled module
self.txn_options
.submit_transaction(TransactionPayload::ModuleBundle(ModuleBundle::new(
compiled_units,
)))
.await
.map(TransactionSummary::from)
}
}
/// Run a Move function
#[derive(Parser)]
pub struct RunFunction {
#[clap(flatten)]
txn_options: TransactionOptions,
/// Function name as `<ADDRESS>::<MODULE_ID>::<FUNCTION_NAME>`
///
/// Example: `0x842ed41fad9640a2ad08fdd7d3e4f7f505319aac7d67e1c0dd6a7cce8732c7e3::message::set_message`
#[clap(long, parse(try_from_str = parse_function_name))]
function_id: FunctionId,
/// Hex encoded arguments separated by spaces.
///
/// Example: `0x01 0x02 0x03`
#[clap(long, multiple_values = true)]
args: Vec<ArgWithType>,
/// TypeTag arguments separated by spaces.
///
/// Example: `u8 u64 u128 bool address vector true false signer`
#[clap(long, multiple_values = true)]
type_args: Vec<MoveType>,
}
#[async_trait]
impl CliCommand<TransactionSummary> for RunFunction {
fn command_name(&self) -> &'static str {
"RunFunction"
}
async fn execute(self) -> CliTypedResult<TransactionSummary> {
let args: Vec<Vec<u8>> = self
.args
.iter()
.map(|arg_with_type| arg_with_type.arg.clone())
.collect();
let mut type_args: Vec<TypeTag> = Vec::new();
// These TypeArgs are used for generics
for type_arg in self.type_args.iter().cloned() {
let type_tag = TypeTag::try_from(type_arg)
.map_err(|err| CliError::UnableToParse("--type-args", err.to_string()))?;
type_args.push(type_tag)
}
self.txn_options
.submit_transaction(TransactionPayload::ScriptFunction(ScriptFunction::new(
self.function_id.module_id.clone(),
self.function_id.function_id.clone(),
type_args,
args,
)))
.await
.map(TransactionSummary::from)
}
}
#[derive(Clone, Debug)]
enum FunctionArgType {
Address,
Bool,
Hex,
String,
U8,
U64,
U128,
}
impl FunctionArgType {
fn parse_arg(&self, arg: &str) -> CliTypedResult<Vec<u8>> {
match self {
FunctionArgType::Address => bcs::to_bytes(
&load_account_arg(arg)
.map_err(|err| CliError::UnableToParse("address", err.to_string()))?,
),
FunctionArgType::Bool => bcs::to_bytes(
&bool::from_str(arg)
.map_err(|err| CliError::UnableToParse("bool", err.to_string()))?,
),
FunctionArgType::Hex => bcs::to_bytes(
&hex::decode(arg).map_err(|err| CliError::UnableToParse("hex", err.to_string()))?,
),
FunctionArgType::String => bcs::to_bytes(arg),
FunctionArgType::U8 => bcs::to_bytes(
&u8::from_str(arg).map_err(|err| CliError::UnableToParse("u8", err.to_string()))?,
),
FunctionArgType::U64 => bcs::to_bytes(
&u64::from_str(arg)
.map_err(|err| CliError::UnableToParse("u64", err.to_string()))?,
),
FunctionArgType::U128 => bcs::to_bytes(
&u128::from_str(arg)
.map_err(|err| CliError::UnableToParse("u128", err.to_string()))?,
),
}
.map_err(|err| CliError::BCS("arg", err))
}
}
impl FromStr for FunctionArgType {
type Err = CliError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"address" => Ok(FunctionArgType::Address),
"bool" => Ok(FunctionArgType::Bool),
"hex" => Ok(FunctionArgType::Hex),
"string" => Ok(FunctionArgType::String),
"u8" => Ok(FunctionArgType::U8),
"u64" => Ok(FunctionArgType::U64),
"u128" => Ok(FunctionArgType::U128),
str => Err(CliError::CommandArgumentError(format!("Invalid arg type '{}'. Must be one of: ['address','bool','hex','string','u8','u64','u128']", str))),
}
}
}
/// A parseable arg with a type separated by a colon
pub struct ArgWithType {
_ty: FunctionArgType,
arg: Vec<u8>,
}
impl FromStr for ArgWithType {
type Err = CliError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<_> = s.split(':').collect();
if parts.len() != 2 {
return Err(CliError::CommandArgumentError(
"Arguments must be pairs of <type>:<arg> e.g. bool:true".to_string(),
));
}
let ty = FunctionArgType::from_str(parts.first().unwrap())?;
let arg = parts.last().unwrap();
let arg = ty.parse_arg(arg)?;
Ok(ArgWithType { _ty: ty, arg })
}
}
pub struct FunctionId {
pub module_id: ModuleId,
pub function_id: Identifier,
}
fn parse_function_name(function_id: &str) -> CliTypedResult<FunctionId> {
let ids: Vec<&str> = function_id.split_terminator("::").collect();
if ids.len() != 3 {
return Err(CliError::CommandArgumentError(
"FunctionId is not well formed. Must be of the form <address>::<module>::<function>"
.to_string(),
));
}
let address = load_account_arg(ids.get(0).unwrap())?;
let module = Identifier::from_str(ids.get(1).unwrap())
.map_err(|err| CliError::UnableToParse("Module Name", err.to_string()))?;
let function_id = Identifier::from_str(ids.get(2).unwrap())
.map_err(|err| CliError::UnableToParse("Function Name", err.to_string()))?;
let module_id = ModuleId::new(address, module);
Ok(FunctionId {
module_id,
function_id,
})
}
|
//! Name resolution façade.
use std::sync::Arc;
use hir_expand::{
name::{self, Name},
MacroDefId,
};
use ra_db::CrateId;
use rustc_hash::FxHashSet;
use crate::{
body::scope::{ExprScopes, ScopeId},
builtin_type::BuiltinType,
db::DefDatabase2,
expr::{ExprId, PatId},
generics::GenericParams,
nameres::{per_ns::PerNs, CrateDefMap},
path::{Path, PathKind},
AdtId, AstItemDef, ConstId, ContainerId, CrateModuleId, DefWithBodyId, EnumId, EnumVariantId,
FunctionId, GenericDefId, ImplId, Lookup, ModuleDefId, ModuleId, StaticId, StructId, TraitId,
TypeAliasId, UnionId,
};
#[derive(Debug, Clone, Default)]
pub struct Resolver {
scopes: Vec<Scope>,
}
// FIXME how to store these best
#[derive(Debug, Clone)]
pub(crate) struct ModuleItemMap {
crate_def_map: Arc<CrateDefMap>,
module_id: CrateModuleId,
}
#[derive(Debug, Clone)]
pub(crate) struct ExprScope {
owner: DefWithBodyId,
expr_scopes: Arc<ExprScopes>,
scope_id: ScopeId,
}
#[derive(Debug, Clone)]
pub(crate) enum Scope {
/// All the items and imported names of a module
ModuleScope(ModuleItemMap),
/// Brings the generic parameters of an item into scope
GenericParams { def: GenericDefId, params: Arc<GenericParams> },
/// Brings `Self` in `impl` block into scope
ImplBlockScope(ImplId),
/// Brings `Self` in enum, struct and union definitions into scope
AdtScope(AdtId),
/// Local bindings
ExprScope(ExprScope),
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum TypeNs {
SelfType(ImplId),
GenericParam(u32),
AdtId(AdtId),
AdtSelfType(AdtId),
EnumVariantId(EnumVariantId),
TypeAliasId(TypeAliasId),
BuiltinType(BuiltinType),
TraitId(TraitId),
// Module belong to type ns, but the resolver is used when all module paths
// are fully resolved.
// ModuleId(ModuleId)
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum ResolveValueResult {
ValueNs(ValueNs),
Partial(TypeNs, usize),
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum ValueNs {
LocalBinding(PatId),
FunctionId(FunctionId),
ConstId(ConstId),
StaticId(StaticId),
StructId(StructId),
EnumVariantId(EnumVariantId),
}
impl Resolver {
/// Resolve known trait from std, like `std::futures::Future`
pub fn resolve_known_trait(&self, db: &impl DefDatabase2, path: &Path) -> Option<TraitId> {
let res = self.resolve_module_path(db, path).take_types()?;
match res {
ModuleDefId::TraitId(it) => Some(it),
_ => None,
}
}
/// Resolve known struct from std, like `std::boxed::Box`
pub fn resolve_known_struct(&self, db: &impl DefDatabase2, path: &Path) -> Option<StructId> {
let res = self.resolve_module_path(db, path).take_types()?;
match res {
ModuleDefId::AdtId(AdtId::StructId(it)) => Some(it),
_ => None,
}
}
/// Resolve known enum from std, like `std::result::Result`
pub fn resolve_known_enum(&self, db: &impl DefDatabase2, path: &Path) -> Option<EnumId> {
let res = self.resolve_module_path(db, path).take_types()?;
match res {
ModuleDefId::AdtId(AdtId::EnumId(it)) => Some(it),
_ => None,
}
}
/// pub only for source-binder
pub fn resolve_module_path(&self, db: &impl DefDatabase2, path: &Path) -> PerNs {
let (item_map, module) = match self.module() {
Some(it) => it,
None => return PerNs::none(),
};
let (module_res, segment_index) = item_map.resolve_path(db, module, path);
if segment_index.is_some() {
return PerNs::none();
}
module_res
}
pub fn resolve_path_in_type_ns(
&self,
db: &impl DefDatabase2,
path: &Path,
) -> Option<(TypeNs, Option<usize>)> {
if path.is_type_relative() {
return None;
}
let first_name = &path.segments.first()?.name;
let skip_to_mod = path.kind != PathKind::Plain;
for scope in self.scopes.iter().rev() {
match scope {
Scope::ExprScope(_) => continue,
Scope::GenericParams { .. } | Scope::ImplBlockScope(_) if skip_to_mod => continue,
Scope::GenericParams { params, .. } => {
if let Some(param) = params.find_by_name(first_name) {
let idx = if path.segments.len() == 1 { None } else { Some(1) };
return Some((TypeNs::GenericParam(param.idx), idx));
}
}
Scope::ImplBlockScope(impl_) => {
if first_name == &name::SELF_TYPE {
let idx = if path.segments.len() == 1 { None } else { Some(1) };
return Some((TypeNs::SelfType(*impl_), idx));
}
}
Scope::AdtScope(adt) => {
if first_name == &name::SELF_TYPE {
let idx = if path.segments.len() == 1 { None } else { Some(1) };
return Some((TypeNs::AdtSelfType(*adt), idx));
}
}
Scope::ModuleScope(m) => {
let (module_def, idx) = m.crate_def_map.resolve_path(db, m.module_id, path);
let res = match module_def.take_types()? {
ModuleDefId::AdtId(it) => TypeNs::AdtId(it),
ModuleDefId::EnumVariantId(it) => TypeNs::EnumVariantId(it),
ModuleDefId::TypeAliasId(it) => TypeNs::TypeAliasId(it),
ModuleDefId::BuiltinType(it) => TypeNs::BuiltinType(it),
ModuleDefId::TraitId(it) => TypeNs::TraitId(it),
ModuleDefId::FunctionId(_)
| ModuleDefId::ConstId(_)
| ModuleDefId::StaticId(_)
| ModuleDefId::ModuleId(_) => return None,
};
return Some((res, idx));
}
}
}
None
}
pub fn resolve_path_in_type_ns_fully(
&self,
db: &impl DefDatabase2,
path: &Path,
) -> Option<TypeNs> {
let (res, unresolved) = self.resolve_path_in_type_ns(db, path)?;
if unresolved.is_some() {
return None;
}
Some(res)
}
pub fn resolve_path_in_value_ns<'p>(
&self,
db: &impl DefDatabase2,
path: &'p Path,
) -> Option<ResolveValueResult> {
if path.is_type_relative() {
return None;
}
let n_segments = path.segments.len();
let tmp = name::SELF_PARAM;
let first_name = if path.is_self() { &tmp } else { &path.segments.first()?.name };
let skip_to_mod = path.kind != PathKind::Plain && !path.is_self();
for scope in self.scopes.iter().rev() {
match scope {
Scope::AdtScope(_)
| Scope::ExprScope(_)
| Scope::GenericParams { .. }
| Scope::ImplBlockScope(_)
if skip_to_mod =>
{
continue
}
Scope::ExprScope(scope) if n_segments <= 1 => {
let entry = scope
.expr_scopes
.entries(scope.scope_id)
.iter()
.find(|entry| entry.name() == first_name);
if let Some(e) = entry {
return Some(ResolveValueResult::ValueNs(ValueNs::LocalBinding(e.pat())));
}
}
Scope::ExprScope(_) => continue,
Scope::GenericParams { params, .. } if n_segments > 1 => {
if let Some(param) = params.find_by_name(first_name) {
let ty = TypeNs::GenericParam(param.idx);
return Some(ResolveValueResult::Partial(ty, 1));
}
}
Scope::GenericParams { .. } => continue,
Scope::ImplBlockScope(impl_) if n_segments > 1 => {
if first_name == &name::SELF_TYPE {
let ty = TypeNs::SelfType(*impl_);
return Some(ResolveValueResult::Partial(ty, 1));
}
}
Scope::AdtScope(adt) if n_segments > 1 => {
if first_name == &name::SELF_TYPE {
let ty = TypeNs::AdtSelfType(*adt);
return Some(ResolveValueResult::Partial(ty, 1));
}
}
Scope::ImplBlockScope(_) | Scope::AdtScope(_) => continue,
Scope::ModuleScope(m) => {
let (module_def, idx) = m.crate_def_map.resolve_path(db, m.module_id, path);
return match idx {
None => {
let value = match module_def.take_values()? {
ModuleDefId::FunctionId(it) => ValueNs::FunctionId(it),
ModuleDefId::AdtId(AdtId::StructId(it)) => ValueNs::StructId(it),
ModuleDefId::EnumVariantId(it) => ValueNs::EnumVariantId(it),
ModuleDefId::ConstId(it) => ValueNs::ConstId(it),
ModuleDefId::StaticId(it) => ValueNs::StaticId(it),
ModuleDefId::AdtId(AdtId::EnumId(_))
| ModuleDefId::AdtId(AdtId::UnionId(_))
| ModuleDefId::TraitId(_)
| ModuleDefId::TypeAliasId(_)
| ModuleDefId::BuiltinType(_)
| ModuleDefId::ModuleId(_) => return None,
};
Some(ResolveValueResult::ValueNs(value))
}
Some(idx) => {
let ty = match module_def.take_types()? {
ModuleDefId::AdtId(it) => TypeNs::AdtId(it),
ModuleDefId::TraitId(it) => TypeNs::TraitId(it),
ModuleDefId::TypeAliasId(it) => TypeNs::TypeAliasId(it),
ModuleDefId::BuiltinType(it) => TypeNs::BuiltinType(it),
ModuleDefId::ModuleId(_)
| ModuleDefId::FunctionId(_)
| ModuleDefId::EnumVariantId(_)
| ModuleDefId::ConstId(_)
| ModuleDefId::StaticId(_) => return None,
};
Some(ResolveValueResult::Partial(ty, idx))
}
};
}
}
}
None
}
pub fn resolve_path_in_value_ns_fully(
&self,
db: &impl DefDatabase2,
path: &Path,
) -> Option<ValueNs> {
match self.resolve_path_in_value_ns(db, path)? {
ResolveValueResult::ValueNs(it) => Some(it),
ResolveValueResult::Partial(..) => None,
}
}
pub fn resolve_path_as_macro(&self, db: &impl DefDatabase2, path: &Path) -> Option<MacroDefId> {
let (item_map, module) = self.module()?;
item_map.resolve_path(db, module, path).0.get_macros()
}
pub fn process_all_names(&self, db: &impl DefDatabase2, f: &mut dyn FnMut(Name, ScopeDef)) {
for scope in self.scopes.iter().rev() {
scope.process_names(db, f);
}
}
pub fn traits_in_scope(&self, db: &impl DefDatabase2) -> FxHashSet<TraitId> {
let mut traits = FxHashSet::default();
for scope in &self.scopes {
if let Scope::ModuleScope(m) = scope {
if let Some(prelude) = m.crate_def_map.prelude() {
let prelude_def_map = db.crate_def_map(prelude.krate);
traits.extend(prelude_def_map[prelude.module_id].scope.traits());
}
traits.extend(m.crate_def_map[m.module_id].scope.traits());
}
}
traits
}
fn module(&self) -> Option<(&CrateDefMap, CrateModuleId)> {
self.scopes.iter().rev().find_map(|scope| match scope {
Scope::ModuleScope(m) => Some((&*m.crate_def_map, m.module_id)),
_ => None,
})
}
pub fn krate(&self) -> Option<CrateId> {
self.module().map(|t| t.0.krate())
}
pub fn where_predicates_in_scope<'a>(
&'a self,
) -> impl Iterator<Item = &'a crate::generics::WherePredicate> + 'a {
self.scopes
.iter()
.filter_map(|scope| match scope {
Scope::GenericParams { params, .. } => Some(params),
_ => None,
})
.flat_map(|params| params.where_predicates.iter())
}
pub fn generic_def(&self) -> Option<GenericDefId> {
self.scopes.iter().find_map(|scope| match scope {
Scope::GenericParams { def, .. } => Some(*def),
_ => None,
})
}
pub fn body_owner(&self) -> Option<DefWithBodyId> {
self.scopes.iter().find_map(|scope| match scope {
Scope::ExprScope(it) => Some(it.owner),
_ => None,
})
}
}
impl Resolver {
pub(crate) fn push_scope(mut self, scope: Scope) -> Resolver {
self.scopes.push(scope);
self
}
pub(crate) fn push_generic_params_scope(
self,
db: &impl DefDatabase2,
def: GenericDefId,
) -> Resolver {
let params = db.generic_params(def);
if params.params.is_empty() {
self
} else {
self.push_scope(Scope::GenericParams { def, params })
}
}
pub(crate) fn push_impl_block_scope(self, impl_block: ImplId) -> Resolver {
self.push_scope(Scope::ImplBlockScope(impl_block))
}
pub(crate) fn push_module_scope(
self,
crate_def_map: Arc<CrateDefMap>,
module_id: CrateModuleId,
) -> Resolver {
self.push_scope(Scope::ModuleScope(ModuleItemMap { crate_def_map, module_id }))
}
pub(crate) fn push_expr_scope(
self,
owner: DefWithBodyId,
expr_scopes: Arc<ExprScopes>,
scope_id: ScopeId,
) -> Resolver {
self.push_scope(Scope::ExprScope(ExprScope { owner, expr_scopes, scope_id }))
}
}
pub enum ScopeDef {
PerNs(PerNs),
ImplSelfType(ImplId),
AdtSelfType(AdtId),
GenericParam(u32),
Local(PatId),
}
impl Scope {
fn process_names(&self, db: &impl DefDatabase2, f: &mut dyn FnMut(Name, ScopeDef)) {
match self {
Scope::ModuleScope(m) => {
// FIXME: should we provide `self` here?
// f(
// Name::self_param(),
// PerNs::types(Resolution::Def {
// def: m.module.into(),
// }),
// );
m.crate_def_map[m.module_id].scope.entries().for_each(|(name, res)| {
f(name.clone(), ScopeDef::PerNs(res.def));
});
m.crate_def_map[m.module_id].scope.legacy_macros().for_each(|(name, macro_)| {
f(name.clone(), ScopeDef::PerNs(PerNs::macros(macro_)));
});
m.crate_def_map.extern_prelude().iter().for_each(|(name, &def)| {
f(name.clone(), ScopeDef::PerNs(PerNs::types(def.into())));
});
if let Some(prelude) = m.crate_def_map.prelude() {
let prelude_def_map = db.crate_def_map(prelude.krate);
prelude_def_map[prelude.module_id].scope.entries().for_each(|(name, res)| {
f(name.clone(), ScopeDef::PerNs(res.def));
});
}
}
Scope::GenericParams { params, .. } => {
for param in params.params.iter() {
f(param.name.clone(), ScopeDef::GenericParam(param.idx))
}
}
Scope::ImplBlockScope(i) => {
f(name::SELF_TYPE, ScopeDef::ImplSelfType((*i).into()));
}
Scope::AdtScope(i) => {
f(name::SELF_TYPE, ScopeDef::AdtSelfType((*i).into()));
}
Scope::ExprScope(scope) => {
scope.expr_scopes.entries(scope.scope_id).iter().for_each(|e| {
f(e.name().clone(), ScopeDef::Local(e.pat()));
});
}
}
}
}
// needs arbitrary_self_types to be a method... or maybe move to the def?
pub fn resolver_for_expr(
db: &impl DefDatabase2,
owner: DefWithBodyId,
expr_id: ExprId,
) -> Resolver {
let scopes = db.expr_scopes(owner);
resolver_for_scope(db, owner, scopes.scope_for(expr_id))
}
pub fn resolver_for_scope(
db: &impl DefDatabase2,
owner: DefWithBodyId,
scope_id: Option<ScopeId>,
) -> Resolver {
let mut r = owner.resolver(db);
let scopes = db.expr_scopes(owner);
let scope_chain = scopes.scope_chain(scope_id).collect::<Vec<_>>();
for scope in scope_chain.into_iter().rev() {
r = r.push_expr_scope(owner, Arc::clone(&scopes), scope);
}
r
}
pub trait HasResolver {
/// Builds a resolver for type references inside this def.
fn resolver(self, db: &impl DefDatabase2) -> Resolver;
}
impl HasResolver for ModuleId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
let def_map = db.crate_def_map(self.krate);
Resolver::default().push_module_scope(def_map, self.module_id)
}
}
impl HasResolver for TraitId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
self.module(db).resolver(db).push_generic_params_scope(db, self.into())
}
}
impl HasResolver for AdtId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
let module = match self {
AdtId::StructId(it) => it.0.module(db),
AdtId::UnionId(it) => it.0.module(db),
AdtId::EnumId(it) => it.module(db),
};
module
.resolver(db)
.push_generic_params_scope(db, self.into())
.push_scope(Scope::AdtScope(self.into()))
}
}
impl HasResolver for StructId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
AdtId::from(self).resolver(db)
}
}
impl HasResolver for UnionId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
AdtId::from(self).resolver(db)
}
}
impl HasResolver for EnumId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
AdtId::from(self).resolver(db)
}
}
impl HasResolver for FunctionId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
self.lookup(db).container.resolver(db).push_generic_params_scope(db, self.into())
}
}
impl HasResolver for DefWithBodyId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
match self {
DefWithBodyId::ConstId(c) => c.resolver(db),
DefWithBodyId::FunctionId(f) => f.resolver(db),
DefWithBodyId::StaticId(s) => s.resolver(db),
}
}
}
impl HasResolver for ConstId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
self.lookup(db).container.resolver(db)
}
}
impl HasResolver for StaticId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
self.module(db).resolver(db)
}
}
impl HasResolver for TypeAliasId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
self.lookup(db).container.resolver(db).push_generic_params_scope(db, self.into())
}
}
impl HasResolver for ContainerId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
match self {
ContainerId::TraitId(it) => it.resolver(db),
ContainerId::ImplId(it) => it.resolver(db),
ContainerId::ModuleId(it) => it.resolver(db),
}
}
}
impl HasResolver for GenericDefId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
match self {
GenericDefId::FunctionId(inner) => inner.resolver(db),
GenericDefId::AdtId(adt) => adt.resolver(db),
GenericDefId::TraitId(inner) => inner.resolver(db),
GenericDefId::TypeAliasId(inner) => inner.resolver(db),
GenericDefId::ImplId(inner) => inner.resolver(db),
GenericDefId::EnumVariantId(inner) => inner.parent.resolver(db),
GenericDefId::ConstId(inner) => inner.resolver(db),
}
}
}
impl HasResolver for ImplId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
self.module(db)
.resolver(db)
.push_generic_params_scope(db, self.into())
.push_impl_block_scope(self)
}
}
Resimplify
//! Name resolution façade.
use std::sync::Arc;
use hir_expand::{
name::{self, Name},
MacroDefId,
};
use ra_db::CrateId;
use rustc_hash::FxHashSet;
use crate::{
body::scope::{ExprScopes, ScopeId},
builtin_type::BuiltinType,
db::DefDatabase2,
expr::{ExprId, PatId},
generics::GenericParams,
nameres::{per_ns::PerNs, CrateDefMap},
path::{Path, PathKind},
AdtId, AstItemDef, ConstId, ContainerId, CrateModuleId, DefWithBodyId, EnumId, EnumVariantId,
FunctionId, GenericDefId, ImplId, Lookup, ModuleDefId, ModuleId, StaticId, StructId, TraitId,
TypeAliasId,
};
#[derive(Debug, Clone, Default)]
pub struct Resolver {
scopes: Vec<Scope>,
}
// FIXME how to store these best
#[derive(Debug, Clone)]
pub(crate) struct ModuleItemMap {
crate_def_map: Arc<CrateDefMap>,
module_id: CrateModuleId,
}
#[derive(Debug, Clone)]
pub(crate) struct ExprScope {
owner: DefWithBodyId,
expr_scopes: Arc<ExprScopes>,
scope_id: ScopeId,
}
#[derive(Debug, Clone)]
pub(crate) enum Scope {
/// All the items and imported names of a module
ModuleScope(ModuleItemMap),
/// Brings the generic parameters of an item into scope
GenericParams { def: GenericDefId, params: Arc<GenericParams> },
/// Brings `Self` in `impl` block into scope
ImplBlockScope(ImplId),
/// Brings `Self` in enum, struct and union definitions into scope
AdtScope(AdtId),
/// Local bindings
ExprScope(ExprScope),
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum TypeNs {
SelfType(ImplId),
GenericParam(u32),
AdtId(AdtId),
AdtSelfType(AdtId),
EnumVariantId(EnumVariantId),
TypeAliasId(TypeAliasId),
BuiltinType(BuiltinType),
TraitId(TraitId),
// Module belong to type ns, but the resolver is used when all module paths
// are fully resolved.
// ModuleId(ModuleId)
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum ResolveValueResult {
ValueNs(ValueNs),
Partial(TypeNs, usize),
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum ValueNs {
LocalBinding(PatId),
FunctionId(FunctionId),
ConstId(ConstId),
StaticId(StaticId),
StructId(StructId),
EnumVariantId(EnumVariantId),
}
impl Resolver {
/// Resolve known trait from std, like `std::futures::Future`
pub fn resolve_known_trait(&self, db: &impl DefDatabase2, path: &Path) -> Option<TraitId> {
let res = self.resolve_module_path(db, path).take_types()?;
match res {
ModuleDefId::TraitId(it) => Some(it),
_ => None,
}
}
/// Resolve known struct from std, like `std::boxed::Box`
pub fn resolve_known_struct(&self, db: &impl DefDatabase2, path: &Path) -> Option<StructId> {
let res = self.resolve_module_path(db, path).take_types()?;
match res {
ModuleDefId::AdtId(AdtId::StructId(it)) => Some(it),
_ => None,
}
}
/// Resolve known enum from std, like `std::result::Result`
pub fn resolve_known_enum(&self, db: &impl DefDatabase2, path: &Path) -> Option<EnumId> {
let res = self.resolve_module_path(db, path).take_types()?;
match res {
ModuleDefId::AdtId(AdtId::EnumId(it)) => Some(it),
_ => None,
}
}
/// pub only for source-binder
pub fn resolve_module_path(&self, db: &impl DefDatabase2, path: &Path) -> PerNs {
let (item_map, module) = match self.module() {
Some(it) => it,
None => return PerNs::none(),
};
let (module_res, segment_index) = item_map.resolve_path(db, module, path);
if segment_index.is_some() {
return PerNs::none();
}
module_res
}
pub fn resolve_path_in_type_ns(
&self,
db: &impl DefDatabase2,
path: &Path,
) -> Option<(TypeNs, Option<usize>)> {
if path.is_type_relative() {
return None;
}
let first_name = &path.segments.first()?.name;
let skip_to_mod = path.kind != PathKind::Plain;
for scope in self.scopes.iter().rev() {
match scope {
Scope::ExprScope(_) => continue,
Scope::GenericParams { .. } | Scope::ImplBlockScope(_) if skip_to_mod => continue,
Scope::GenericParams { params, .. } => {
if let Some(param) = params.find_by_name(first_name) {
let idx = if path.segments.len() == 1 { None } else { Some(1) };
return Some((TypeNs::GenericParam(param.idx), idx));
}
}
Scope::ImplBlockScope(impl_) => {
if first_name == &name::SELF_TYPE {
let idx = if path.segments.len() == 1 { None } else { Some(1) };
return Some((TypeNs::SelfType(*impl_), idx));
}
}
Scope::AdtScope(adt) => {
if first_name == &name::SELF_TYPE {
let idx = if path.segments.len() == 1 { None } else { Some(1) };
return Some((TypeNs::AdtSelfType(*adt), idx));
}
}
Scope::ModuleScope(m) => {
let (module_def, idx) = m.crate_def_map.resolve_path(db, m.module_id, path);
let res = match module_def.take_types()? {
ModuleDefId::AdtId(it) => TypeNs::AdtId(it),
ModuleDefId::EnumVariantId(it) => TypeNs::EnumVariantId(it),
ModuleDefId::TypeAliasId(it) => TypeNs::TypeAliasId(it),
ModuleDefId::BuiltinType(it) => TypeNs::BuiltinType(it),
ModuleDefId::TraitId(it) => TypeNs::TraitId(it),
ModuleDefId::FunctionId(_)
| ModuleDefId::ConstId(_)
| ModuleDefId::StaticId(_)
| ModuleDefId::ModuleId(_) => return None,
};
return Some((res, idx));
}
}
}
None
}
pub fn resolve_path_in_type_ns_fully(
&self,
db: &impl DefDatabase2,
path: &Path,
) -> Option<TypeNs> {
let (res, unresolved) = self.resolve_path_in_type_ns(db, path)?;
if unresolved.is_some() {
return None;
}
Some(res)
}
pub fn resolve_path_in_value_ns<'p>(
&self,
db: &impl DefDatabase2,
path: &'p Path,
) -> Option<ResolveValueResult> {
if path.is_type_relative() {
return None;
}
let n_segments = path.segments.len();
let tmp = name::SELF_PARAM;
let first_name = if path.is_self() { &tmp } else { &path.segments.first()?.name };
let skip_to_mod = path.kind != PathKind::Plain && !path.is_self();
for scope in self.scopes.iter().rev() {
match scope {
Scope::AdtScope(_)
| Scope::ExprScope(_)
| Scope::GenericParams { .. }
| Scope::ImplBlockScope(_)
if skip_to_mod =>
{
continue
}
Scope::ExprScope(scope) if n_segments <= 1 => {
let entry = scope
.expr_scopes
.entries(scope.scope_id)
.iter()
.find(|entry| entry.name() == first_name);
if let Some(e) = entry {
return Some(ResolveValueResult::ValueNs(ValueNs::LocalBinding(e.pat())));
}
}
Scope::ExprScope(_) => continue,
Scope::GenericParams { params, .. } if n_segments > 1 => {
if let Some(param) = params.find_by_name(first_name) {
let ty = TypeNs::GenericParam(param.idx);
return Some(ResolveValueResult::Partial(ty, 1));
}
}
Scope::GenericParams { .. } => continue,
Scope::ImplBlockScope(impl_) if n_segments > 1 => {
if first_name == &name::SELF_TYPE {
let ty = TypeNs::SelfType(*impl_);
return Some(ResolveValueResult::Partial(ty, 1));
}
}
Scope::AdtScope(adt) if n_segments > 1 => {
if first_name == &name::SELF_TYPE {
let ty = TypeNs::AdtSelfType(*adt);
return Some(ResolveValueResult::Partial(ty, 1));
}
}
Scope::ImplBlockScope(_) | Scope::AdtScope(_) => continue,
Scope::ModuleScope(m) => {
let (module_def, idx) = m.crate_def_map.resolve_path(db, m.module_id, path);
return match idx {
None => {
let value = match module_def.take_values()? {
ModuleDefId::FunctionId(it) => ValueNs::FunctionId(it),
ModuleDefId::AdtId(AdtId::StructId(it)) => ValueNs::StructId(it),
ModuleDefId::EnumVariantId(it) => ValueNs::EnumVariantId(it),
ModuleDefId::ConstId(it) => ValueNs::ConstId(it),
ModuleDefId::StaticId(it) => ValueNs::StaticId(it),
ModuleDefId::AdtId(AdtId::EnumId(_))
| ModuleDefId::AdtId(AdtId::UnionId(_))
| ModuleDefId::TraitId(_)
| ModuleDefId::TypeAliasId(_)
| ModuleDefId::BuiltinType(_)
| ModuleDefId::ModuleId(_) => return None,
};
Some(ResolveValueResult::ValueNs(value))
}
Some(idx) => {
let ty = match module_def.take_types()? {
ModuleDefId::AdtId(it) => TypeNs::AdtId(it),
ModuleDefId::TraitId(it) => TypeNs::TraitId(it),
ModuleDefId::TypeAliasId(it) => TypeNs::TypeAliasId(it),
ModuleDefId::BuiltinType(it) => TypeNs::BuiltinType(it),
ModuleDefId::ModuleId(_)
| ModuleDefId::FunctionId(_)
| ModuleDefId::EnumVariantId(_)
| ModuleDefId::ConstId(_)
| ModuleDefId::StaticId(_) => return None,
};
Some(ResolveValueResult::Partial(ty, idx))
}
};
}
}
}
None
}
pub fn resolve_path_in_value_ns_fully(
&self,
db: &impl DefDatabase2,
path: &Path,
) -> Option<ValueNs> {
match self.resolve_path_in_value_ns(db, path)? {
ResolveValueResult::ValueNs(it) => Some(it),
ResolveValueResult::Partial(..) => None,
}
}
pub fn resolve_path_as_macro(&self, db: &impl DefDatabase2, path: &Path) -> Option<MacroDefId> {
let (item_map, module) = self.module()?;
item_map.resolve_path(db, module, path).0.get_macros()
}
pub fn process_all_names(&self, db: &impl DefDatabase2, f: &mut dyn FnMut(Name, ScopeDef)) {
for scope in self.scopes.iter().rev() {
scope.process_names(db, f);
}
}
pub fn traits_in_scope(&self, db: &impl DefDatabase2) -> FxHashSet<TraitId> {
let mut traits = FxHashSet::default();
for scope in &self.scopes {
if let Scope::ModuleScope(m) = scope {
if let Some(prelude) = m.crate_def_map.prelude() {
let prelude_def_map = db.crate_def_map(prelude.krate);
traits.extend(prelude_def_map[prelude.module_id].scope.traits());
}
traits.extend(m.crate_def_map[m.module_id].scope.traits());
}
}
traits
}
fn module(&self) -> Option<(&CrateDefMap, CrateModuleId)> {
self.scopes.iter().rev().find_map(|scope| match scope {
Scope::ModuleScope(m) => Some((&*m.crate_def_map, m.module_id)),
_ => None,
})
}
pub fn krate(&self) -> Option<CrateId> {
self.module().map(|t| t.0.krate())
}
pub fn where_predicates_in_scope<'a>(
&'a self,
) -> impl Iterator<Item = &'a crate::generics::WherePredicate> + 'a {
self.scopes
.iter()
.filter_map(|scope| match scope {
Scope::GenericParams { params, .. } => Some(params),
_ => None,
})
.flat_map(|params| params.where_predicates.iter())
}
pub fn generic_def(&self) -> Option<GenericDefId> {
self.scopes.iter().find_map(|scope| match scope {
Scope::GenericParams { def, .. } => Some(*def),
_ => None,
})
}
pub fn body_owner(&self) -> Option<DefWithBodyId> {
self.scopes.iter().find_map(|scope| match scope {
Scope::ExprScope(it) => Some(it.owner),
_ => None,
})
}
}
impl Resolver {
pub(crate) fn push_scope(mut self, scope: Scope) -> Resolver {
self.scopes.push(scope);
self
}
pub(crate) fn push_generic_params_scope(
self,
db: &impl DefDatabase2,
def: GenericDefId,
) -> Resolver {
let params = db.generic_params(def);
if params.params.is_empty() {
self
} else {
self.push_scope(Scope::GenericParams { def, params })
}
}
pub(crate) fn push_impl_block_scope(self, impl_block: ImplId) -> Resolver {
self.push_scope(Scope::ImplBlockScope(impl_block))
}
pub(crate) fn push_module_scope(
self,
crate_def_map: Arc<CrateDefMap>,
module_id: CrateModuleId,
) -> Resolver {
self.push_scope(Scope::ModuleScope(ModuleItemMap { crate_def_map, module_id }))
}
pub(crate) fn push_expr_scope(
self,
owner: DefWithBodyId,
expr_scopes: Arc<ExprScopes>,
scope_id: ScopeId,
) -> Resolver {
self.push_scope(Scope::ExprScope(ExprScope { owner, expr_scopes, scope_id }))
}
}
pub enum ScopeDef {
PerNs(PerNs),
ImplSelfType(ImplId),
AdtSelfType(AdtId),
GenericParam(u32),
Local(PatId),
}
impl Scope {
fn process_names(&self, db: &impl DefDatabase2, f: &mut dyn FnMut(Name, ScopeDef)) {
match self {
Scope::ModuleScope(m) => {
// FIXME: should we provide `self` here?
// f(
// Name::self_param(),
// PerNs::types(Resolution::Def {
// def: m.module.into(),
// }),
// );
m.crate_def_map[m.module_id].scope.entries().for_each(|(name, res)| {
f(name.clone(), ScopeDef::PerNs(res.def));
});
m.crate_def_map[m.module_id].scope.legacy_macros().for_each(|(name, macro_)| {
f(name.clone(), ScopeDef::PerNs(PerNs::macros(macro_)));
});
m.crate_def_map.extern_prelude().iter().for_each(|(name, &def)| {
f(name.clone(), ScopeDef::PerNs(PerNs::types(def.into())));
});
if let Some(prelude) = m.crate_def_map.prelude() {
let prelude_def_map = db.crate_def_map(prelude.krate);
prelude_def_map[prelude.module_id].scope.entries().for_each(|(name, res)| {
f(name.clone(), ScopeDef::PerNs(res.def));
});
}
}
Scope::GenericParams { params, .. } => {
for param in params.params.iter() {
f(param.name.clone(), ScopeDef::GenericParam(param.idx))
}
}
Scope::ImplBlockScope(i) => {
f(name::SELF_TYPE, ScopeDef::ImplSelfType((*i).into()));
}
Scope::AdtScope(i) => {
f(name::SELF_TYPE, ScopeDef::AdtSelfType((*i).into()));
}
Scope::ExprScope(scope) => {
scope.expr_scopes.entries(scope.scope_id).iter().for_each(|e| {
f(e.name().clone(), ScopeDef::Local(e.pat()));
});
}
}
}
}
// needs arbitrary_self_types to be a method... or maybe move to the def?
pub fn resolver_for_expr(
db: &impl DefDatabase2,
owner: DefWithBodyId,
expr_id: ExprId,
) -> Resolver {
let scopes = db.expr_scopes(owner);
resolver_for_scope(db, owner, scopes.scope_for(expr_id))
}
pub fn resolver_for_scope(
db: &impl DefDatabase2,
owner: DefWithBodyId,
scope_id: Option<ScopeId>,
) -> Resolver {
let mut r = owner.resolver(db);
let scopes = db.expr_scopes(owner);
let scope_chain = scopes.scope_chain(scope_id).collect::<Vec<_>>();
for scope in scope_chain.into_iter().rev() {
r = r.push_expr_scope(owner, Arc::clone(&scopes), scope);
}
r
}
pub trait HasResolver {
/// Builds a resolver for type references inside this def.
fn resolver(self, db: &impl DefDatabase2) -> Resolver;
}
impl HasResolver for ModuleId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
let def_map = db.crate_def_map(self.krate);
Resolver::default().push_module_scope(def_map, self.module_id)
}
}
impl HasResolver for TraitId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
self.module(db).resolver(db).push_generic_params_scope(db, self.into())
}
}
impl<T: Into<AdtId>> HasResolver for T {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
let def = self.into();
let module = match def {
AdtId::StructId(it) => it.0.module(db),
AdtId::UnionId(it) => it.0.module(db),
AdtId::EnumId(it) => it.module(db),
};
module
.resolver(db)
.push_generic_params_scope(db, def.into())
.push_scope(Scope::AdtScope(def))
}
}
impl HasResolver for FunctionId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
self.lookup(db).container.resolver(db).push_generic_params_scope(db, self.into())
}
}
impl HasResolver for DefWithBodyId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
match self {
DefWithBodyId::ConstId(c) => c.resolver(db),
DefWithBodyId::FunctionId(f) => f.resolver(db),
DefWithBodyId::StaticId(s) => s.resolver(db),
}
}
}
impl HasResolver for ConstId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
self.lookup(db).container.resolver(db)
}
}
impl HasResolver for StaticId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
self.module(db).resolver(db)
}
}
impl HasResolver for TypeAliasId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
self.lookup(db).container.resolver(db).push_generic_params_scope(db, self.into())
}
}
impl HasResolver for ContainerId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
match self {
ContainerId::TraitId(it) => it.resolver(db),
ContainerId::ImplId(it) => it.resolver(db),
ContainerId::ModuleId(it) => it.resolver(db),
}
}
}
impl HasResolver for GenericDefId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
match self {
GenericDefId::FunctionId(inner) => inner.resolver(db),
GenericDefId::AdtId(adt) => adt.resolver(db),
GenericDefId::TraitId(inner) => inner.resolver(db),
GenericDefId::TypeAliasId(inner) => inner.resolver(db),
GenericDefId::ImplId(inner) => inner.resolver(db),
GenericDefId::EnumVariantId(inner) => inner.parent.resolver(db),
GenericDefId::ConstId(inner) => inner.resolver(db),
}
}
}
impl HasResolver for ImplId {
fn resolver(self, db: &impl DefDatabase2) -> Resolver {
self.module(db)
.resolver(db)
.push_generic_params_scope(db, self.into())
.push_impl_block_scope(self)
}
}
|
use futures::{try_ready, Async, Future, Poll};
use futures_cpupool::{CpuFuture, CpuPool};
use lazy_static::lazy_static;
use state_machine_future::{transition, RentToOwn, StateMachineFuture};
use std::collections::HashMap;
use std::io;
use std::net::{SocketAddr, ToSocketAddrs};
#[cfg(unix)]
use std::path::Path;
use std::vec;
use tokio_tcp::TcpStream;
#[cfg(unix)]
use tokio_uds::UnixStream;
use crate::proto::{Client, Connection, HandshakeFuture};
use crate::{Error, Socket, TlsMode};
lazy_static! {
static ref DNS_POOL: CpuPool = futures_cpupool::Builder::new()
.name_prefix("postgres-dns-")
.pool_size(2)
.create();
}
#[derive(StateMachineFuture)]
pub enum ConnectOnce<T>
where
T: TlsMode<Socket>,
{
#[state_machine_future(start)]
#[cfg_attr(unix, state_machine_future(transitions(ConnectingUnix, ResolvingDns)))]
#[cfg_attr(not(unix), state_machine_future(transitions(ConnectingTcp)))]
Start {
host: String,
port: u16,
tls_mode: T,
params: HashMap<String, String>,
},
#[cfg(unix)]
#[state_machine_future(transitions(Handshaking))]
ConnectingUnix {
future: tokio_uds::ConnectFuture,
tls_mode: T,
params: HashMap<String, String>,
},
#[state_machine_future(transitions(ConnectingTcp))]
ResolvingDns {
future: CpuFuture<vec::IntoIter<SocketAddr>, io::Error>,
tls_mode: T,
params: HashMap<String, String>,
},
#[state_machine_future(transitions(Handshaking))]
ConnectingTcp {
future: tokio_tcp::ConnectFuture,
addrs: vec::IntoIter<SocketAddr>,
tls_mode: T,
params: HashMap<String, String>,
},
#[state_machine_future(transitions(Finished))]
Handshaking { future: HandshakeFuture<Socket, T> },
#[state_machine_future(ready)]
Finished((Client, Connection<T::Stream>)),
#[state_machine_future(error)]
Failed(Error),
}
impl<T> PollConnectOnce<T> for ConnectOnce<T>
where
T: TlsMode<Socket>,
{
fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start<T>>) -> Poll<AfterStart<T>, Error> {
let state = state.take();
#[cfg(unix)]
{
if state.host.starts_with('/') {
let path = Path::new(&state.host).join(format!(".s.PGSQL.{}", state.port));
transition!(ConnectingUnix {
future: UnixStream::connect(path),
tls_mode: state.tls_mode,
params: state.params,
})
}
}
let host = state.host;
let port = state.port;
transition!(ResolvingDns {
future: DNS_POOL.spawn_fn(move || (&*host, port).to_socket_addrs()),
tls_mode: state.tls_mode,
params: state.params,
})
}
#[cfg(unix)]
fn poll_connecting_unix<'a>(
state: &'a mut RentToOwn<'a, ConnectingUnix<T>>,
) -> Poll<AfterConnectingUnix<T>, Error> {
let stream = try_ready!(state.future.poll().map_err(Error::connect));
let stream = Socket::new_unix(stream);
let state = state.take();
transition!(Handshaking {
future: HandshakeFuture::new(stream, state.tls_mode, state.params)
})
}
fn poll_resolving_dns<'a>(
state: &'a mut RentToOwn<'a, ResolvingDns<T>>,
) -> Poll<AfterResolvingDns<T>, Error> {
let mut addrs = try_ready!(state.future.poll().map_err(Error::connect));
let state = state.take();
let addr = match addrs.next() {
Some(addr) => addr,
None => {
return Err(Error::connect(io::Error::new(
io::ErrorKind::InvalidData,
"resolved 0 addresses",
)))
}
};
transition!(ConnectingTcp {
future: TcpStream::connect(&addr),
addrs,
tls_mode: state.tls_mode,
params: state.params,
})
}
fn poll_connecting_tcp<'a>(
state: &'a mut RentToOwn<'a, ConnectingTcp<T>>,
) -> Poll<AfterConnectingTcp<T>, Error> {
let stream = loop {
match state.future.poll() {
Ok(Async::Ready(stream)) => break Socket::new_tcp(stream),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(e) => {
let addr = match state.addrs.next() {
Some(addr) => addr,
None => return Err(Error::connect(e)),
};
state.future = TcpStream::connect(&addr);
}
}
};
let state = state.take();
transition!(Handshaking {
future: HandshakeFuture::new(stream, state.tls_mode, state.params),
})
}
fn poll_handshaking<'a>(
state: &'a mut RentToOwn<'a, Handshaking<T>>,
) -> Poll<AfterHandshaking<T>, Error> {
let r = try_ready!(state.future.poll());
transition!(Finished(r))
}
}
impl<T> ConnectOnceFuture<T>
where
T: TlsMode<Socket>,
{
pub fn new(
host: String,
port: u16,
tls_mode: T,
params: HashMap<String, String>,
) -> ConnectOnceFuture<T> {
ConnectOnce::start(host, port, tls_mode, params)
}
}
Turn on TCP nodelay in socket
use futures::{try_ready, Async, Future, Poll};
use futures_cpupool::{CpuFuture, CpuPool};
use lazy_static::lazy_static;
use state_machine_future::{transition, RentToOwn, StateMachineFuture};
use std::collections::HashMap;
use std::io;
use std::net::{SocketAddr, ToSocketAddrs};
#[cfg(unix)]
use std::path::Path;
use std::vec;
use tokio_tcp::TcpStream;
#[cfg(unix)]
use tokio_uds::UnixStream;
use crate::proto::{Client, Connection, HandshakeFuture};
use crate::{Error, Socket, TlsMode};
lazy_static! {
static ref DNS_POOL: CpuPool = futures_cpupool::Builder::new()
.name_prefix("postgres-dns-")
.pool_size(2)
.create();
}
#[derive(StateMachineFuture)]
pub enum ConnectOnce<T>
where
T: TlsMode<Socket>,
{
#[state_machine_future(start)]
#[cfg_attr(unix, state_machine_future(transitions(ConnectingUnix, ResolvingDns)))]
#[cfg_attr(not(unix), state_machine_future(transitions(ConnectingTcp)))]
Start {
host: String,
port: u16,
tls_mode: T,
params: HashMap<String, String>,
},
#[cfg(unix)]
#[state_machine_future(transitions(Handshaking))]
ConnectingUnix {
future: tokio_uds::ConnectFuture,
tls_mode: T,
params: HashMap<String, String>,
},
#[state_machine_future(transitions(ConnectingTcp))]
ResolvingDns {
future: CpuFuture<vec::IntoIter<SocketAddr>, io::Error>,
tls_mode: T,
params: HashMap<String, String>,
},
#[state_machine_future(transitions(Handshaking))]
ConnectingTcp {
future: tokio_tcp::ConnectFuture,
addrs: vec::IntoIter<SocketAddr>,
tls_mode: T,
params: HashMap<String, String>,
},
#[state_machine_future(transitions(Finished))]
Handshaking { future: HandshakeFuture<Socket, T> },
#[state_machine_future(ready)]
Finished((Client, Connection<T::Stream>)),
#[state_machine_future(error)]
Failed(Error),
}
impl<T> PollConnectOnce<T> for ConnectOnce<T>
where
T: TlsMode<Socket>,
{
fn poll_start<'a>(state: &'a mut RentToOwn<'a, Start<T>>) -> Poll<AfterStart<T>, Error> {
let state = state.take();
#[cfg(unix)]
{
if state.host.starts_with('/') {
let path = Path::new(&state.host).join(format!(".s.PGSQL.{}", state.port));
transition!(ConnectingUnix {
future: UnixStream::connect(path),
tls_mode: state.tls_mode,
params: state.params,
})
}
}
let host = state.host;
let port = state.port;
transition!(ResolvingDns {
future: DNS_POOL.spawn_fn(move || (&*host, port).to_socket_addrs()),
tls_mode: state.tls_mode,
params: state.params,
})
}
#[cfg(unix)]
fn poll_connecting_unix<'a>(
state: &'a mut RentToOwn<'a, ConnectingUnix<T>>,
) -> Poll<AfterConnectingUnix<T>, Error> {
let stream = try_ready!(state.future.poll().map_err(Error::connect));
let stream = Socket::new_unix(stream);
let state = state.take();
transition!(Handshaking {
future: HandshakeFuture::new(stream, state.tls_mode, state.params)
})
}
fn poll_resolving_dns<'a>(
state: &'a mut RentToOwn<'a, ResolvingDns<T>>,
) -> Poll<AfterResolvingDns<T>, Error> {
let mut addrs = try_ready!(state.future.poll().map_err(Error::connect));
let state = state.take();
let addr = match addrs.next() {
Some(addr) => addr,
None => {
return Err(Error::connect(io::Error::new(
io::ErrorKind::InvalidData,
"resolved 0 addresses",
)));
}
};
transition!(ConnectingTcp {
future: TcpStream::connect(&addr),
addrs,
tls_mode: state.tls_mode,
params: state.params,
})
}
fn poll_connecting_tcp<'a>(
state: &'a mut RentToOwn<'a, ConnectingTcp<T>>,
) -> Poll<AfterConnectingTcp<T>, Error> {
let stream = loop {
match state.future.poll() {
Ok(Async::Ready(stream)) => break stream,
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(e) => {
let addr = match state.addrs.next() {
Some(addr) => addr,
None => return Err(Error::connect(e)),
};
state.future = TcpStream::connect(&addr);
}
}
};
let state = state.take();
stream.set_nodelay(true).map_err(Error::connect)?;
let stream = Socket::new_tcp(stream);
transition!(Handshaking {
future: HandshakeFuture::new(stream, state.tls_mode, state.params),
})
}
fn poll_handshaking<'a>(
state: &'a mut RentToOwn<'a, Handshaking<T>>,
) -> Poll<AfterHandshaking<T>, Error> {
let r = try_ready!(state.future.poll());
transition!(Finished(r))
}
}
impl<T> ConnectOnceFuture<T>
where
T: TlsMode<Socket>,
{
pub fn new(
host: String,
port: u16,
tls_mode: T,
params: HashMap<String, String>,
) -> ConnectOnceFuture<T> {
ConnectOnce::start(host, port, tls_mode, params)
}
}
|
//! Frame a stream of bytes based on a length prefix
//!
//! Many protocols delimit their frames by prefacing frame data with a
//! frame head that specifies the length of the frame. The
//! `length_delimited` module provides utilities for handling the length
//! based framing. This allows the consumer to work with entire frames
//! without having to worry about buffering or other framing logic.
//!
//! # Getting started
//!
//! If implementing a protocol from scratch, using length delimited framing
//! is an easy way to get started. [`LengthDelimitedCodec::new()`] will
//! return a length delimited codec using default configuration values.
//! This can then be used to construct a framer to adapt a full-duplex
//! byte stream into a stream of frames.
//!
//! ```
//! use tokio::io::{AsyncRead, AsyncWrite};
//! use tokio_util::codec::{Framed, LengthDelimitedCodec};
//!
//! fn bind_transport<T: AsyncRead + AsyncWrite>(io: T)
//! -> Framed<T, LengthDelimitedCodec>
//! {
//! Framed::new(io, LengthDelimitedCodec::new())
//! }
//! # pub fn main() {}
//! ```
//!
//! The returned transport implements `Sink + Stream` for `BytesMut`. It
//! encodes the frame with a big-endian `u32` header denoting the frame
//! payload length:
//!
//! ```text
//! +----------+--------------------------------+
//! | len: u32 | frame payload |
//! +----------+--------------------------------+
//! ```
//!
//! Specifically, given the following:
//!
//! ```
//! use tokio::io::{AsyncRead, AsyncWrite};
//! use tokio_util::codec::{Framed, LengthDelimitedCodec};
//!
//! use futures::SinkExt;
//! use bytes::Bytes;
//!
//! async fn write_frame<T>(io: T) -> Result<(), Box<dyn std::error::Error>>
//! where
//! T: AsyncRead + AsyncWrite + Unpin,
//! {
//! let mut transport = Framed::new(io, LengthDelimitedCodec::new());
//! let frame = Bytes::from("hello world");
//!
//! transport.send(frame).await?;
//! Ok(())
//! }
//! ```
//!
//! The encoded frame will look like this:
//!
//! ```text
//! +---- len: u32 ----+---- data ----+
//! | \x00\x00\x00\x0b | hello world |
//! +------------------+--------------+
//! ```
//!
//! # Decoding
//!
//! [`FramedRead`] adapts an [`AsyncRead`] into a `Stream` of [`BytesMut`],
//! such that each yielded [`BytesMut`] value contains the contents of an
//! entire frame. There are many configuration parameters enabling
//! [`FramedRead`] to handle a wide range of protocols. Here are some
//! examples that will cover the various options at a high level.
//!
//! ## Example 1
//!
//! The following will parse a `u16` length field at offset 0, including the
//! frame head in the yielded `BytesMut`.
//!
//! ```
//! # use tokio::io::AsyncRead;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! LengthDelimitedCodec::builder()
//! .length_field_offset(0) // default value
//! .length_field_length(2)
//! .length_adjustment(0) // default value
//! .num_skip(0) // Do not strip frame header
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT DECODED
//! +-- len ---+--- Payload ---+ +-- len ---+--- Payload ---+
//! | \x00\x0B | Hello world | --> | \x00\x0B | Hello world |
//! +----------+---------------+ +----------+---------------+
//! ```
//!
//! The value of the length field is 11 (`\x0B`) which represents the length
//! of the payload, `hello world`. By default, [`FramedRead`] assumes that
//! the length field represents the number of bytes that **follows** the
//! length field. Thus, the entire frame has a length of 13: 2 bytes for the
//! frame head + 11 bytes for the payload.
//!
//! ## Example 2
//!
//! The following will parse a `u16` length field at offset 0, omitting the
//! frame head in the yielded `BytesMut`.
//!
//! ```
//! # use tokio::io::AsyncRead;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! LengthDelimitedCodec::builder()
//! .length_field_offset(0) // default value
//! .length_field_length(2)
//! .length_adjustment(0) // default value
//! // `num_skip` is not needed, the default is to skip
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT DECODED
//! +-- len ---+--- Payload ---+ +--- Payload ---+
//! | \x00\x0B | Hello world | --> | Hello world |
//! +----------+---------------+ +---------------+
//! ```
//!
//! This is similar to the first example, the only difference is that the
//! frame head is **not** included in the yielded `BytesMut` value.
//!
//! ## Example 3
//!
//! The following will parse a `u16` length field at offset 0, including the
//! frame head in the yielded `BytesMut`. In this case, the length field
//! **includes** the frame head length.
//!
//! ```
//! # use tokio::io::AsyncRead;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! LengthDelimitedCodec::builder()
//! .length_field_offset(0) // default value
//! .length_field_length(2)
//! .length_adjustment(-2) // size of head
//! .num_skip(0)
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT DECODED
//! +-- len ---+--- Payload ---+ +-- len ---+--- Payload ---+
//! | \x00\x0D | Hello world | --> | \x00\x0D | Hello world |
//! +----------+---------------+ +----------+---------------+
//! ```
//!
//! In most cases, the length field represents the length of the payload
//! only, as shown in the previous examples. However, in some protocols the
//! length field represents the length of the whole frame, including the
//! head. In such cases, we specify a negative `length_adjustment` to adjust
//! the value provided in the frame head to represent the payload length.
//!
//! ## Example 4
//!
//! The following will parse a 3 byte length field at offset 0 in a 5 byte
//! frame head, including the frame head in the yielded `BytesMut`.
//!
//! ```
//! # use tokio::io::AsyncRead;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! LengthDelimitedCodec::builder()
//! .length_field_offset(0) // default value
//! .length_field_length(3)
//! .length_adjustment(2) // remaining head
//! .num_skip(0)
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT
//! +---- len -----+- head -+--- Payload ---+
//! | \x00\x00\x0B | \xCAFE | Hello world |
//! +--------------+--------+---------------+
//!
//! DECODED
//! +---- len -----+- head -+--- Payload ---+
//! | \x00\x00\x0B | \xCAFE | Hello world |
//! +--------------+--------+---------------+
//! ```
//!
//! A more advanced example that shows a case where there is extra frame
//! head data between the length field and the payload. In such cases, it is
//! usually desirable to include the frame head as part of the yielded
//! `BytesMut`. This lets consumers of the length delimited framer to
//! process the frame head as needed.
//!
//! The positive `length_adjustment` value lets `FramedRead` factor in the
//! additional head into the frame length calculation.
//!
//! ## Example 5
//!
//! The following will parse a `u16` length field at offset 1 of a 4 byte
//! frame head. The first byte and the length field will be omitted from the
//! yielded `BytesMut`, but the trailing 2 bytes of the frame head will be
//! included.
//!
//! ```
//! # use tokio::io::AsyncRead;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! LengthDelimitedCodec::builder()
//! .length_field_offset(1) // length of hdr1
//! .length_field_length(2)
//! .length_adjustment(1) // length of hdr2
//! .num_skip(3) // length of hdr1 + LEN
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT
//! +- hdr1 -+-- len ---+- hdr2 -+--- Payload ---+
//! | \xCA | \x00\x0B | \xFE | Hello world |
//! +--------+----------+--------+---------------+
//!
//! DECODED
//! +- hdr2 -+--- Payload ---+
//! | \xFE | Hello world |
//! +--------+---------------+
//! ```
//!
//! The length field is situated in the middle of the frame head. In this
//! case, the first byte in the frame head could be a version or some other
//! identifier that is not needed for processing. On the other hand, the
//! second half of the head is needed.
//!
//! `length_field_offset` indicates how many bytes to skip before starting
//! to read the length field. `length_adjustment` is the number of bytes to
//! skip starting at the end of the length field. In this case, it is the
//! second half of the head.
//!
//! ## Example 6
//!
//! The following will parse a `u16` length field at offset 1 of a 4 byte
//! frame head. The first byte and the length field will be omitted from the
//! yielded `BytesMut`, but the trailing 2 bytes of the frame head will be
//! included. In this case, the length field **includes** the frame head
//! length.
//!
//! ```
//! # use tokio::io::AsyncRead;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! LengthDelimitedCodec::builder()
//! .length_field_offset(1) // length of hdr1
//! .length_field_length(2)
//! .length_adjustment(-3) // length of hdr1 + LEN, negative
//! .num_skip(3)
//! .new_read(io);
//! # }
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT
//! +- hdr1 -+-- len ---+- hdr2 -+--- Payload ---+
//! | \xCA | \x00\x0F | \xFE | Hello world |
//! +--------+----------+--------+---------------+
//!
//! DECODED
//! +- hdr2 -+--- Payload ---+
//! | \xFE | Hello world |
//! +--------+---------------+
//! ```
//!
//! Similar to the example above, the difference is that the length field
//! represents the length of the entire frame instead of just the payload.
//! The length of `hdr1` and `len` must be counted in `length_adjustment`.
//! Note that the length of `hdr2` does **not** need to be explicitly set
//! anywhere because it already is factored into the total frame length that
//! is read from the byte stream.
//!
//! ## Example 7
//!
//! The following will parse a 3 byte length field at offset 0 in a 4 byte
//! frame head, excluding the 4th byte from the yielded `BytesMut`.
//!
//! ```
//! # use tokio::io::AsyncRead;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! LengthDelimitedCodec::builder()
//! .length_field_offset(0) // default value
//! .length_field_length(3)
//! .length_adjustment(0) // default value
//! .num_skip(4) // skip the first 4 bytes
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT DECODED
//! +------- len ------+--- Payload ---+ +--- Payload ---+
//! | \x00\x00\x0B\xFF | Hello world | => | Hello world |
//! +------------------+---------------+ +---------------+
//! ```
//!
//! A simple example where there are unused bytes between the length field
//! and the payload.
//!
//! # Encoding
//!
//! [`FramedWrite`] adapts an [`AsyncWrite`] into a `Sink` of [`BytesMut`],
//! such that each submitted [`BytesMut`] is prefaced by a length field.
//! There are fewer configuration options than [`FramedRead`]. Given
//! protocols that have more complex frame heads, an encoder should probably
//! be written by hand using [`Encoder`].
//!
//! Here is a simple example, given a `FramedWrite` with the following
//! configuration:
//!
//! ```
//! # use tokio::io::AsyncWrite;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn write_frame<T: AsyncWrite>(io: T) {
//! # let _ =
//! LengthDelimitedCodec::builder()
//! .length_field_length(2)
//! .new_write(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! A payload of `hello world` will be encoded as:
//!
//! ```text
//! +- len: u16 -+---- data ----+
//! | \x00\x0b | hello world |
//! +------------+--------------+
//! ```
//!
//! [`LengthDelimitedCodec::new()`]: method@LengthDelimitedCodec::new
//! [`FramedRead`]: struct@FramedRead
//! [`FramedWrite`]: struct@FramedWrite
//! [`AsyncRead`]: trait@tokio::io::AsyncRead
//! [`AsyncWrite`]: trait@tokio::io::AsyncWrite
//! [`Encoder`]: trait@Encoder
//! [`BytesMut`]: bytes::BytesMut
use crate::codec::{Decoder, Encoder, Framed, FramedRead, FramedWrite};
use tokio::io::{AsyncRead, AsyncWrite};
use bytes::{Buf, BufMut, Bytes, BytesMut};
use std::error::Error as StdError;
use std::io::{self, Cursor};
use std::{cmp, fmt};
/// Configure length delimited `LengthDelimitedCodec`s.
///
/// `Builder` enables constructing configured length delimited codecs. Note
/// that not all configuration settings apply to both encoding and decoding. See
/// the documentation for specific methods for more detail.
#[derive(Debug, Clone, Copy)]
pub struct Builder {
// Maximum frame length
max_frame_len: usize,
// Number of bytes representing the field length
length_field_len: usize,
// Number of bytes in the header before the length field
length_field_offset: usize,
// Adjust the length specified in the header field by this amount
length_adjustment: isize,
// Total number of bytes to skip before reading the payload, if not set,
// `length_field_len + length_field_offset`
num_skip: Option<usize>,
// Length field byte order (little or big endian)
length_field_is_big_endian: bool,
}
/// An error when the number of bytes read is more than max frame length.
pub struct LengthDelimitedCodecError {
_priv: (),
}
/// A codec for frames delimited by a frame head specifying their lengths.
///
/// This allows the consumer to work with entire frames without having to worry
/// about buffering or other framing logic.
///
/// See [module level] documentation for more detail.
///
/// [module level]: index.html
#[derive(Debug, Clone)]
pub struct LengthDelimitedCodec {
// Configuration values
builder: Builder,
// Read state
state: DecodeState,
}
#[derive(Debug, Clone, Copy)]
enum DecodeState {
Head,
Data(usize),
}
// ===== impl LengthDelimitedCodec ======
impl LengthDelimitedCodec {
/// Creates a new `LengthDelimitedCodec` with the default configuration values.
pub fn new() -> Self {
Self {
builder: Builder::new(),
state: DecodeState::Head,
}
}
/// Creates a new length delimited codec builder with default configuration
/// values.
pub fn builder() -> Builder {
Builder::new()
}
/// Returns the current max frame setting
///
/// This is the largest size this codec will accept from the wire. Larger
/// frames will be rejected.
pub fn max_frame_length(&self) -> usize {
self.builder.max_frame_len
}
/// Updates the max frame setting.
///
/// The change takes effect the next time a frame is decoded. In other
/// words, if a frame is currently in process of being decoded with a frame
/// size greater than `val` but less than the max frame length in effect
/// before calling this function, then the frame will be allowed.
pub fn set_max_frame_length(&mut self, val: usize) {
self.builder.max_frame_length(val);
}
fn decode_head(&mut self, src: &mut BytesMut) -> io::Result<Option<usize>> {
let head_len = self.builder.num_head_bytes();
let field_len = self.builder.length_field_len;
if src.len() < head_len {
// Not enough data
return Ok(None);
}
let n = {
let mut src = Cursor::new(&mut *src);
// Skip the required bytes
src.advance(self.builder.length_field_offset);
// match endianness
let n = if self.builder.length_field_is_big_endian {
src.get_uint(field_len)
} else {
src.get_uint_le(field_len)
};
if n > self.builder.max_frame_len as u64 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
LengthDelimitedCodecError { _priv: () },
));
}
// The check above ensures there is no overflow
let n = n as usize;
// Adjust `n` with bounds checking
let n = if self.builder.length_adjustment < 0 {
n.checked_sub(-self.builder.length_adjustment as usize)
} else {
n.checked_add(self.builder.length_adjustment as usize)
};
// Error handling
match n {
Some(n) => n,
None => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"provided length would overflow after adjustment",
));
}
}
};
let num_skip = self.builder.get_num_skip();
if num_skip > 0 {
src.advance(num_skip);
}
// Ensure that the buffer has enough space to read the incoming
// payload
src.reserve(n);
Ok(Some(n))
}
fn decode_data(&self, n: usize, src: &mut BytesMut) -> Option<BytesMut> {
// At this point, the buffer has already had the required capacity
// reserved. All there is to do is read.
if src.len() < n {
return None;
}
Some(src.split_to(n))
}
}
impl Decoder for LengthDelimitedCodec {
type Item = BytesMut;
type Error = io::Error;
fn decode(&mut self, src: &mut BytesMut) -> io::Result<Option<BytesMut>> {
let n = match self.state {
DecodeState::Head => match self.decode_head(src)? {
Some(n) => {
self.state = DecodeState::Data(n);
n
}
None => return Ok(None),
},
DecodeState::Data(n) => n,
};
match self.decode_data(n, src) {
Some(data) => {
// Update the decode state
self.state = DecodeState::Head;
// Make sure the buffer has enough space to read the next head
src.reserve(self.builder.num_head_bytes());
Ok(Some(data))
}
None => Ok(None),
}
}
}
impl Encoder<Bytes> for LengthDelimitedCodec {
type Error = io::Error;
fn encode(&mut self, data: Bytes, dst: &mut BytesMut) -> Result<(), io::Error> {
let n = data.len();
if n > self.builder.max_frame_len {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
LengthDelimitedCodecError { _priv: () },
));
}
// Adjust `n` with bounds checking
let n = if self.builder.length_adjustment < 0 {
n.checked_add(-self.builder.length_adjustment as usize)
} else {
n.checked_sub(self.builder.length_adjustment as usize)
};
let n = n.ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"provided length would overflow after adjustment",
)
})?;
// Reserve capacity in the destination buffer to fit the frame and
// length field (plus adjustment).
dst.reserve(self.builder.length_field_len + n);
if self.builder.length_field_is_big_endian {
dst.put_uint(n as u64, self.builder.length_field_len);
} else {
dst.put_uint_le(n as u64, self.builder.length_field_len);
}
// Write the frame to the buffer
dst.extend_from_slice(&data[..]);
Ok(())
}
}
impl Default for LengthDelimitedCodec {
fn default() -> Self {
Self::new()
}
}
// ===== impl Builder =====
impl Builder {
/// Creates a new length delimited codec builder with default configuration
/// values.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .length_field_offset(0)
/// .length_field_length(2)
/// .length_adjustment(0)
/// .num_skip(0)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new() -> Builder {
Builder {
// Default max frame length of 8MB
max_frame_len: 8 * 1_024 * 1_024,
// Default byte length of 4
length_field_len: 4,
// Default to the header field being at the start of the header.
length_field_offset: 0,
length_adjustment: 0,
// Total number of bytes to skip before reading the payload, if not set,
// `length_field_len + length_field_offset`
num_skip: None,
// Default to reading the length field in network (big) endian.
length_field_is_big_endian: true,
}
}
/// Read the length field as a big endian integer
///
/// This is the default setting.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .big_endian()
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn big_endian(&mut self) -> &mut Self {
self.length_field_is_big_endian = true;
self
}
/// Read the length field as a little endian integer
///
/// The default setting is big endian.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .little_endian()
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn little_endian(&mut self) -> &mut Self {
self.length_field_is_big_endian = false;
self
}
/// Read the length field as a native endian integer
///
/// The default setting is big endian.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .native_endian()
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn native_endian(&mut self) -> &mut Self {
if cfg!(target_endian = "big") {
self.big_endian()
} else {
self.little_endian()
}
}
/// Sets the max frame length in bytes
///
/// This configuration option applies to both encoding and decoding. The
/// default value is 8MB.
///
/// When decoding, the length field read from the byte stream is checked
/// against this setting **before** any adjustments are applied. When
/// encoding, the length of the submitted payload is checked against this
/// setting.
///
/// When frames exceed the max length, an `io::Error` with the custom value
/// of the `LengthDelimitedCodecError` type will be returned.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .max_frame_length(8 * 1024 * 1024)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn max_frame_length(&mut self, val: usize) -> &mut Self {
self.max_frame_len = val;
self
}
/// Sets the number of bytes used to represent the length field
///
/// The default value is `4`. The max value is `8`.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .length_field_length(4)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn length_field_length(&mut self, val: usize) -> &mut Self {
assert!(val > 0 && val <= 8, "invalid length field length");
self.length_field_len = val;
self
}
/// Sets the number of bytes in the header before the length field
///
/// This configuration option only applies to decoding.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .length_field_offset(1)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn length_field_offset(&mut self, val: usize) -> &mut Self {
self.length_field_offset = val;
self
}
/// Delta between the payload length specified in the header and the real
/// payload length
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .length_adjustment(-2)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn length_adjustment(&mut self, val: isize) -> &mut Self {
self.length_adjustment = val;
self
}
/// Sets the number of bytes to skip before reading the payload
///
/// Default value is `length_field_len + length_field_offset`
///
/// This configuration option only applies to decoding
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .num_skip(4)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn num_skip(&mut self, val: usize) -> &mut Self {
self.num_skip = Some(val);
self
}
/// Create a configured length delimited `LengthDelimitedCodec`
///
/// # Examples
///
/// ```
/// use tokio_util::codec::LengthDelimitedCodec;
/// # pub fn main() {
/// LengthDelimitedCodec::builder()
/// .length_field_offset(0)
/// .length_field_length(2)
/// .length_adjustment(0)
/// .num_skip(0)
/// .new_codec();
/// # }
/// ```
pub fn new_codec(&self) -> LengthDelimitedCodec {
LengthDelimitedCodec {
builder: *self,
state: DecodeState::Head,
}
}
/// Create a configured length delimited `FramedRead`
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .length_field_offset(0)
/// .length_field_length(2)
/// .length_adjustment(0)
/// .num_skip(0)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new_read<T>(&self, upstream: T) -> FramedRead<T, LengthDelimitedCodec>
where
T: AsyncRead,
{
FramedRead::new(upstream, self.new_codec())
}
/// Create a configured length delimited `FramedWrite`
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncWrite;
/// # use tokio_util::codec::LengthDelimitedCodec;
/// # fn write_frame<T: AsyncWrite>(io: T) {
/// LengthDelimitedCodec::builder()
/// .length_field_length(2)
/// .new_write(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new_write<T>(&self, inner: T) -> FramedWrite<T, LengthDelimitedCodec>
where
T: AsyncWrite,
{
FramedWrite::new(inner, self.new_codec())
}
/// Create a configured length delimited `Framed`
///
/// # Examples
///
/// ```
/// # use tokio::io::{AsyncRead, AsyncWrite};
/// # use tokio_util::codec::LengthDelimitedCodec;
/// # fn write_frame<T: AsyncRead + AsyncWrite>(io: T) {
/// # let _ =
/// LengthDelimitedCodec::builder()
/// .length_field_length(2)
/// .new_framed(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new_framed<T>(&self, inner: T) -> Framed<T, LengthDelimitedCodec>
where
T: AsyncRead + AsyncWrite,
{
Framed::new(inner, self.new_codec())
}
fn num_head_bytes(&self) -> usize {
let num = self.length_field_offset + self.length_field_len;
cmp::max(num, self.num_skip.unwrap_or(0))
}
fn get_num_skip(&self) -> usize {
self.num_skip
.unwrap_or(self.length_field_offset + self.length_field_len)
}
}
impl Default for Builder {
fn default() -> Self {
Self::new()
}
}
// ===== impl LengthDelimitedCodecError =====
impl fmt::Debug for LengthDelimitedCodecError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("LengthDelimitedCodecError").finish()
}
}
impl fmt::Display for LengthDelimitedCodecError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("frame size too big")
}
}
impl StdError for LengthDelimitedCodecError {}
codec: add `length_field_type` to `LengthDelimitedCodec` builder (#4508)
//! Frame a stream of bytes based on a length prefix
//!
//! Many protocols delimit their frames by prefacing frame data with a
//! frame head that specifies the length of the frame. The
//! `length_delimited` module provides utilities for handling the length
//! based framing. This allows the consumer to work with entire frames
//! without having to worry about buffering or other framing logic.
//!
//! # Getting started
//!
//! If implementing a protocol from scratch, using length delimited framing
//! is an easy way to get started. [`LengthDelimitedCodec::new()`] will
//! return a length delimited codec using default configuration values.
//! This can then be used to construct a framer to adapt a full-duplex
//! byte stream into a stream of frames.
//!
//! ```
//! use tokio::io::{AsyncRead, AsyncWrite};
//! use tokio_util::codec::{Framed, LengthDelimitedCodec};
//!
//! fn bind_transport<T: AsyncRead + AsyncWrite>(io: T)
//! -> Framed<T, LengthDelimitedCodec>
//! {
//! Framed::new(io, LengthDelimitedCodec::new())
//! }
//! # pub fn main() {}
//! ```
//!
//! The returned transport implements `Sink + Stream` for `BytesMut`. It
//! encodes the frame with a big-endian `u32` header denoting the frame
//! payload length:
//!
//! ```text
//! +----------+--------------------------------+
//! | len: u32 | frame payload |
//! +----------+--------------------------------+
//! ```
//!
//! Specifically, given the following:
//!
//! ```
//! use tokio::io::{AsyncRead, AsyncWrite};
//! use tokio_util::codec::{Framed, LengthDelimitedCodec};
//!
//! use futures::SinkExt;
//! use bytes::Bytes;
//!
//! async fn write_frame<T>(io: T) -> Result<(), Box<dyn std::error::Error>>
//! where
//! T: AsyncRead + AsyncWrite + Unpin,
//! {
//! let mut transport = Framed::new(io, LengthDelimitedCodec::new());
//! let frame = Bytes::from("hello world");
//!
//! transport.send(frame).await?;
//! Ok(())
//! }
//! ```
//!
//! The encoded frame will look like this:
//!
//! ```text
//! +---- len: u32 ----+---- data ----+
//! | \x00\x00\x00\x0b | hello world |
//! +------------------+--------------+
//! ```
//!
//! # Decoding
//!
//! [`FramedRead`] adapts an [`AsyncRead`] into a `Stream` of [`BytesMut`],
//! such that each yielded [`BytesMut`] value contains the contents of an
//! entire frame. There are many configuration parameters enabling
//! [`FramedRead`] to handle a wide range of protocols. Here are some
//! examples that will cover the various options at a high level.
//!
//! ## Example 1
//!
//! The following will parse a `u16` length field at offset 0, including the
//! frame head in the yielded `BytesMut`.
//!
//! ```
//! # use tokio::io::AsyncRead;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! LengthDelimitedCodec::builder()
//! .length_field_offset(0) // default value
//! .length_field_type::<u16>()
//! .length_adjustment(0) // default value
//! .num_skip(0) // Do not strip frame header
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT DECODED
//! +-- len ---+--- Payload ---+ +-- len ---+--- Payload ---+
//! | \x00\x0B | Hello world | --> | \x00\x0B | Hello world |
//! +----------+---------------+ +----------+---------------+
//! ```
//!
//! The value of the length field is 11 (`\x0B`) which represents the length
//! of the payload, `hello world`. By default, [`FramedRead`] assumes that
//! the length field represents the number of bytes that **follows** the
//! length field. Thus, the entire frame has a length of 13: 2 bytes for the
//! frame head + 11 bytes for the payload.
//!
//! ## Example 2
//!
//! The following will parse a `u16` length field at offset 0, omitting the
//! frame head in the yielded `BytesMut`.
//!
//! ```
//! # use tokio::io::AsyncRead;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! LengthDelimitedCodec::builder()
//! .length_field_offset(0) // default value
//! .length_field_type::<u16>()
//! .length_adjustment(0) // default value
//! // `num_skip` is not needed, the default is to skip
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT DECODED
//! +-- len ---+--- Payload ---+ +--- Payload ---+
//! | \x00\x0B | Hello world | --> | Hello world |
//! +----------+---------------+ +---------------+
//! ```
//!
//! This is similar to the first example, the only difference is that the
//! frame head is **not** included in the yielded `BytesMut` value.
//!
//! ## Example 3
//!
//! The following will parse a `u16` length field at offset 0, including the
//! frame head in the yielded `BytesMut`. In this case, the length field
//! **includes** the frame head length.
//!
//! ```
//! # use tokio::io::AsyncRead;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! LengthDelimitedCodec::builder()
//! .length_field_offset(0) // default value
//! .length_field_type::<u16>()
//! .length_adjustment(-2) // size of head
//! .num_skip(0)
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT DECODED
//! +-- len ---+--- Payload ---+ +-- len ---+--- Payload ---+
//! | \x00\x0D | Hello world | --> | \x00\x0D | Hello world |
//! +----------+---------------+ +----------+---------------+
//! ```
//!
//! In most cases, the length field represents the length of the payload
//! only, as shown in the previous examples. However, in some protocols the
//! length field represents the length of the whole frame, including the
//! head. In such cases, we specify a negative `length_adjustment` to adjust
//! the value provided in the frame head to represent the payload length.
//!
//! ## Example 4
//!
//! The following will parse a 3 byte length field at offset 0 in a 5 byte
//! frame head, including the frame head in the yielded `BytesMut`.
//!
//! ```
//! # use tokio::io::AsyncRead;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! LengthDelimitedCodec::builder()
//! .length_field_offset(0) // default value
//! .length_field_length(3)
//! .length_adjustment(2) // remaining head
//! .num_skip(0)
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT
//! +---- len -----+- head -+--- Payload ---+
//! | \x00\x00\x0B | \xCAFE | Hello world |
//! +--------------+--------+---------------+
//!
//! DECODED
//! +---- len -----+- head -+--- Payload ---+
//! | \x00\x00\x0B | \xCAFE | Hello world |
//! +--------------+--------+---------------+
//! ```
//!
//! A more advanced example that shows a case where there is extra frame
//! head data between the length field and the payload. In such cases, it is
//! usually desirable to include the frame head as part of the yielded
//! `BytesMut`. This lets consumers of the length delimited framer to
//! process the frame head as needed.
//!
//! The positive `length_adjustment` value lets `FramedRead` factor in the
//! additional head into the frame length calculation.
//!
//! ## Example 5
//!
//! The following will parse a `u16` length field at offset 1 of a 4 byte
//! frame head. The first byte and the length field will be omitted from the
//! yielded `BytesMut`, but the trailing 2 bytes of the frame head will be
//! included.
//!
//! ```
//! # use tokio::io::AsyncRead;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! LengthDelimitedCodec::builder()
//! .length_field_offset(1) // length of hdr1
//! .length_field_type::<u16>()
//! .length_adjustment(1) // length of hdr2
//! .num_skip(3) // length of hdr1 + LEN
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT
//! +- hdr1 -+-- len ---+- hdr2 -+--- Payload ---+
//! | \xCA | \x00\x0B | \xFE | Hello world |
//! +--------+----------+--------+---------------+
//!
//! DECODED
//! +- hdr2 -+--- Payload ---+
//! | \xFE | Hello world |
//! +--------+---------------+
//! ```
//!
//! The length field is situated in the middle of the frame head. In this
//! case, the first byte in the frame head could be a version or some other
//! identifier that is not needed for processing. On the other hand, the
//! second half of the head is needed.
//!
//! `length_field_offset` indicates how many bytes to skip before starting
//! to read the length field. `length_adjustment` is the number of bytes to
//! skip starting at the end of the length field. In this case, it is the
//! second half of the head.
//!
//! ## Example 6
//!
//! The following will parse a `u16` length field at offset 1 of a 4 byte
//! frame head. The first byte and the length field will be omitted from the
//! yielded `BytesMut`, but the trailing 2 bytes of the frame head will be
//! included. In this case, the length field **includes** the frame head
//! length.
//!
//! ```
//! # use tokio::io::AsyncRead;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! LengthDelimitedCodec::builder()
//! .length_field_offset(1) // length of hdr1
//! .length_field_type::<u16>()
//! .length_adjustment(-3) // length of hdr1 + LEN, negative
//! .num_skip(3)
//! .new_read(io);
//! # }
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT
//! +- hdr1 -+-- len ---+- hdr2 -+--- Payload ---+
//! | \xCA | \x00\x0F | \xFE | Hello world |
//! +--------+----------+--------+---------------+
//!
//! DECODED
//! +- hdr2 -+--- Payload ---+
//! | \xFE | Hello world |
//! +--------+---------------+
//! ```
//!
//! Similar to the example above, the difference is that the length field
//! represents the length of the entire frame instead of just the payload.
//! The length of `hdr1` and `len` must be counted in `length_adjustment`.
//! Note that the length of `hdr2` does **not** need to be explicitly set
//! anywhere because it already is factored into the total frame length that
//! is read from the byte stream.
//!
//! ## Example 7
//!
//! The following will parse a 3 byte length field at offset 0 in a 4 byte
//! frame head, excluding the 4th byte from the yielded `BytesMut`.
//!
//! ```
//! # use tokio::io::AsyncRead;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn bind_read<T: AsyncRead>(io: T) {
//! LengthDelimitedCodec::builder()
//! .length_field_offset(0) // default value
//! .length_field_length(3)
//! .length_adjustment(0) // default value
//! .num_skip(4) // skip the first 4 bytes
//! .new_read(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! The following frame will be decoded as such:
//!
//! ```text
//! INPUT DECODED
//! +------- len ------+--- Payload ---+ +--- Payload ---+
//! | \x00\x00\x0B\xFF | Hello world | => | Hello world |
//! +------------------+---------------+ +---------------+
//! ```
//!
//! A simple example where there are unused bytes between the length field
//! and the payload.
//!
//! # Encoding
//!
//! [`FramedWrite`] adapts an [`AsyncWrite`] into a `Sink` of [`BytesMut`],
//! such that each submitted [`BytesMut`] is prefaced by a length field.
//! There are fewer configuration options than [`FramedRead`]. Given
//! protocols that have more complex frame heads, an encoder should probably
//! be written by hand using [`Encoder`].
//!
//! Here is a simple example, given a `FramedWrite` with the following
//! configuration:
//!
//! ```
//! # use tokio::io::AsyncWrite;
//! # use tokio_util::codec::LengthDelimitedCodec;
//! # fn write_frame<T: AsyncWrite>(io: T) {
//! # let _ =
//! LengthDelimitedCodec::builder()
//! .length_field_type::<u16>()
//! .new_write(io);
//! # }
//! # pub fn main() {}
//! ```
//!
//! A payload of `hello world` will be encoded as:
//!
//! ```text
//! +- len: u16 -+---- data ----+
//! | \x00\x0b | hello world |
//! +------------+--------------+
//! ```
//!
//! [`LengthDelimitedCodec::new()`]: method@LengthDelimitedCodec::new
//! [`FramedRead`]: struct@FramedRead
//! [`FramedWrite`]: struct@FramedWrite
//! [`AsyncRead`]: trait@tokio::io::AsyncRead
//! [`AsyncWrite`]: trait@tokio::io::AsyncWrite
//! [`Encoder`]: trait@Encoder
//! [`BytesMut`]: bytes::BytesMut
use crate::codec::{Decoder, Encoder, Framed, FramedRead, FramedWrite};
use tokio::io::{AsyncRead, AsyncWrite};
use bytes::{Buf, BufMut, Bytes, BytesMut};
use std::error::Error as StdError;
use std::io::{self, Cursor};
use std::{cmp, fmt, mem};
/// Configure length delimited `LengthDelimitedCodec`s.
///
/// `Builder` enables constructing configured length delimited codecs. Note
/// that not all configuration settings apply to both encoding and decoding. See
/// the documentation for specific methods for more detail.
#[derive(Debug, Clone, Copy)]
pub struct Builder {
// Maximum frame length
max_frame_len: usize,
// Number of bytes representing the field length
length_field_len: usize,
// Number of bytes in the header before the length field
length_field_offset: usize,
// Adjust the length specified in the header field by this amount
length_adjustment: isize,
// Total number of bytes to skip before reading the payload, if not set,
// `length_field_len + length_field_offset`
num_skip: Option<usize>,
// Length field byte order (little or big endian)
length_field_is_big_endian: bool,
}
/// An error when the number of bytes read is more than max frame length.
pub struct LengthDelimitedCodecError {
_priv: (),
}
/// A codec for frames delimited by a frame head specifying their lengths.
///
/// This allows the consumer to work with entire frames without having to worry
/// about buffering or other framing logic.
///
/// See [module level] documentation for more detail.
///
/// [module level]: index.html
#[derive(Debug, Clone)]
pub struct LengthDelimitedCodec {
// Configuration values
builder: Builder,
// Read state
state: DecodeState,
}
#[derive(Debug, Clone, Copy)]
enum DecodeState {
Head,
Data(usize),
}
// ===== impl LengthDelimitedCodec ======
impl LengthDelimitedCodec {
/// Creates a new `LengthDelimitedCodec` with the default configuration values.
pub fn new() -> Self {
Self {
builder: Builder::new(),
state: DecodeState::Head,
}
}
/// Creates a new length delimited codec builder with default configuration
/// values.
pub fn builder() -> Builder {
Builder::new()
}
/// Returns the current max frame setting
///
/// This is the largest size this codec will accept from the wire. Larger
/// frames will be rejected.
pub fn max_frame_length(&self) -> usize {
self.builder.max_frame_len
}
/// Updates the max frame setting.
///
/// The change takes effect the next time a frame is decoded. In other
/// words, if a frame is currently in process of being decoded with a frame
/// size greater than `val` but less than the max frame length in effect
/// before calling this function, then the frame will be allowed.
pub fn set_max_frame_length(&mut self, val: usize) {
self.builder.max_frame_length(val);
}
fn decode_head(&mut self, src: &mut BytesMut) -> io::Result<Option<usize>> {
let head_len = self.builder.num_head_bytes();
let field_len = self.builder.length_field_len;
if src.len() < head_len {
// Not enough data
return Ok(None);
}
let n = {
let mut src = Cursor::new(&mut *src);
// Skip the required bytes
src.advance(self.builder.length_field_offset);
// match endianness
let n = if self.builder.length_field_is_big_endian {
src.get_uint(field_len)
} else {
src.get_uint_le(field_len)
};
if n > self.builder.max_frame_len as u64 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
LengthDelimitedCodecError { _priv: () },
));
}
// The check above ensures there is no overflow
let n = n as usize;
// Adjust `n` with bounds checking
let n = if self.builder.length_adjustment < 0 {
n.checked_sub(-self.builder.length_adjustment as usize)
} else {
n.checked_add(self.builder.length_adjustment as usize)
};
// Error handling
match n {
Some(n) => n,
None => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"provided length would overflow after adjustment",
));
}
}
};
let num_skip = self.builder.get_num_skip();
if num_skip > 0 {
src.advance(num_skip);
}
// Ensure that the buffer has enough space to read the incoming
// payload
src.reserve(n);
Ok(Some(n))
}
fn decode_data(&self, n: usize, src: &mut BytesMut) -> Option<BytesMut> {
// At this point, the buffer has already had the required capacity
// reserved. All there is to do is read.
if src.len() < n {
return None;
}
Some(src.split_to(n))
}
}
impl Decoder for LengthDelimitedCodec {
type Item = BytesMut;
type Error = io::Error;
fn decode(&mut self, src: &mut BytesMut) -> io::Result<Option<BytesMut>> {
let n = match self.state {
DecodeState::Head => match self.decode_head(src)? {
Some(n) => {
self.state = DecodeState::Data(n);
n
}
None => return Ok(None),
},
DecodeState::Data(n) => n,
};
match self.decode_data(n, src) {
Some(data) => {
// Update the decode state
self.state = DecodeState::Head;
// Make sure the buffer has enough space to read the next head
src.reserve(self.builder.num_head_bytes());
Ok(Some(data))
}
None => Ok(None),
}
}
}
impl Encoder<Bytes> for LengthDelimitedCodec {
type Error = io::Error;
fn encode(&mut self, data: Bytes, dst: &mut BytesMut) -> Result<(), io::Error> {
let n = data.len();
if n > self.builder.max_frame_len {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
LengthDelimitedCodecError { _priv: () },
));
}
// Adjust `n` with bounds checking
let n = if self.builder.length_adjustment < 0 {
n.checked_add(-self.builder.length_adjustment as usize)
} else {
n.checked_sub(self.builder.length_adjustment as usize)
};
let n = n.ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"provided length would overflow after adjustment",
)
})?;
// Reserve capacity in the destination buffer to fit the frame and
// length field (plus adjustment).
dst.reserve(self.builder.length_field_len + n);
if self.builder.length_field_is_big_endian {
dst.put_uint(n as u64, self.builder.length_field_len);
} else {
dst.put_uint_le(n as u64, self.builder.length_field_len);
}
// Write the frame to the buffer
dst.extend_from_slice(&data[..]);
Ok(())
}
}
impl Default for LengthDelimitedCodec {
fn default() -> Self {
Self::new()
}
}
// ===== impl Builder =====
mod builder {
/// Types that can be used with `Builder::length_field_type`.
pub trait LengthFieldType {}
impl LengthFieldType for u8 {}
impl LengthFieldType for u16 {}
impl LengthFieldType for u32 {}
impl LengthFieldType for u64 {}
#[cfg(any(
target_pointer_width = "8",
target_pointer_width = "16",
target_pointer_width = "32",
target_pointer_width = "64",
))]
impl LengthFieldType for usize {}
}
impl Builder {
/// Creates a new length delimited codec builder with default configuration
/// values.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .length_field_offset(0)
/// .length_field_type::<u16>()
/// .length_adjustment(0)
/// .num_skip(0)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new() -> Builder {
Builder {
// Default max frame length of 8MB
max_frame_len: 8 * 1_024 * 1_024,
// Default byte length of 4
length_field_len: 4,
// Default to the header field being at the start of the header.
length_field_offset: 0,
length_adjustment: 0,
// Total number of bytes to skip before reading the payload, if not set,
// `length_field_len + length_field_offset`
num_skip: None,
// Default to reading the length field in network (big) endian.
length_field_is_big_endian: true,
}
}
/// Read the length field as a big endian integer
///
/// This is the default setting.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .big_endian()
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn big_endian(&mut self) -> &mut Self {
self.length_field_is_big_endian = true;
self
}
/// Read the length field as a little endian integer
///
/// The default setting is big endian.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .little_endian()
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn little_endian(&mut self) -> &mut Self {
self.length_field_is_big_endian = false;
self
}
/// Read the length field as a native endian integer
///
/// The default setting is big endian.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .native_endian()
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn native_endian(&mut self) -> &mut Self {
if cfg!(target_endian = "big") {
self.big_endian()
} else {
self.little_endian()
}
}
/// Sets the max frame length in bytes
///
/// This configuration option applies to both encoding and decoding. The
/// default value is 8MB.
///
/// When decoding, the length field read from the byte stream is checked
/// against this setting **before** any adjustments are applied. When
/// encoding, the length of the submitted payload is checked against this
/// setting.
///
/// When frames exceed the max length, an `io::Error` with the custom value
/// of the `LengthDelimitedCodecError` type will be returned.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .max_frame_length(8 * 1024 * 1024)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn max_frame_length(&mut self, val: usize) -> &mut Self {
self.max_frame_len = val;
self
}
/// Sets the unsigned integer type used to represent the length field.
///
/// The default type is [`u32`]. The max type is [`u64`] (or [`usize`] on
/// 64-bit targets).
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .length_field_type::<u32>()
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
///
/// Unlike [`Builder::length_field_length`], this does not fail at runtime
/// and instead produces a compile error:
///
/// ```compile_fail
/// # use tokio::io::AsyncRead;
/// # use tokio_util::codec::LengthDelimitedCodec;
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .length_field_type::<u128>()
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn length_field_type<T: builder::LengthFieldType>(&mut self) -> &mut Self {
self.length_field_length(mem::size_of::<T>())
}
/// Sets the number of bytes used to represent the length field
///
/// The default value is `4`. The max value is `8`.
///
/// This configuration option applies to both encoding and decoding.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .length_field_length(4)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn length_field_length(&mut self, val: usize) -> &mut Self {
assert!(val > 0 && val <= 8, "invalid length field length");
self.length_field_len = val;
self
}
/// Sets the number of bytes in the header before the length field
///
/// This configuration option only applies to decoding.
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .length_field_offset(1)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn length_field_offset(&mut self, val: usize) -> &mut Self {
self.length_field_offset = val;
self
}
/// Delta between the payload length specified in the header and the real
/// payload length
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .length_adjustment(-2)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn length_adjustment(&mut self, val: isize) -> &mut Self {
self.length_adjustment = val;
self
}
/// Sets the number of bytes to skip before reading the payload
///
/// Default value is `length_field_len + length_field_offset`
///
/// This configuration option only applies to decoding
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .num_skip(4)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn num_skip(&mut self, val: usize) -> &mut Self {
self.num_skip = Some(val);
self
}
/// Create a configured length delimited `LengthDelimitedCodec`
///
/// # Examples
///
/// ```
/// use tokio_util::codec::LengthDelimitedCodec;
/// # pub fn main() {
/// LengthDelimitedCodec::builder()
/// .length_field_offset(0)
/// .length_field_type::<u16>()
/// .length_adjustment(0)
/// .num_skip(0)
/// .new_codec();
/// # }
/// ```
pub fn new_codec(&self) -> LengthDelimitedCodec {
LengthDelimitedCodec {
builder: *self,
state: DecodeState::Head,
}
}
/// Create a configured length delimited `FramedRead`
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncRead;
/// use tokio_util::codec::LengthDelimitedCodec;
///
/// # fn bind_read<T: AsyncRead>(io: T) {
/// LengthDelimitedCodec::builder()
/// .length_field_offset(0)
/// .length_field_type::<u16>()
/// .length_adjustment(0)
/// .num_skip(0)
/// .new_read(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new_read<T>(&self, upstream: T) -> FramedRead<T, LengthDelimitedCodec>
where
T: AsyncRead,
{
FramedRead::new(upstream, self.new_codec())
}
/// Create a configured length delimited `FramedWrite`
///
/// # Examples
///
/// ```
/// # use tokio::io::AsyncWrite;
/// # use tokio_util::codec::LengthDelimitedCodec;
/// # fn write_frame<T: AsyncWrite>(io: T) {
/// LengthDelimitedCodec::builder()
/// .length_field_type::<u16>()
/// .new_write(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new_write<T>(&self, inner: T) -> FramedWrite<T, LengthDelimitedCodec>
where
T: AsyncWrite,
{
FramedWrite::new(inner, self.new_codec())
}
/// Create a configured length delimited `Framed`
///
/// # Examples
///
/// ```
/// # use tokio::io::{AsyncRead, AsyncWrite};
/// # use tokio_util::codec::LengthDelimitedCodec;
/// # fn write_frame<T: AsyncRead + AsyncWrite>(io: T) {
/// # let _ =
/// LengthDelimitedCodec::builder()
/// .length_field_type::<u16>()
/// .new_framed(io);
/// # }
/// # pub fn main() {}
/// ```
pub fn new_framed<T>(&self, inner: T) -> Framed<T, LengthDelimitedCodec>
where
T: AsyncRead + AsyncWrite,
{
Framed::new(inner, self.new_codec())
}
fn num_head_bytes(&self) -> usize {
let num = self.length_field_offset + self.length_field_len;
cmp::max(num, self.num_skip.unwrap_or(0))
}
fn get_num_skip(&self) -> usize {
self.num_skip
.unwrap_or(self.length_field_offset + self.length_field_len)
}
}
impl Default for Builder {
fn default() -> Self {
Self::new()
}
}
// ===== impl LengthDelimitedCodecError =====
impl fmt::Debug for LengthDelimitedCodecError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("LengthDelimitedCodecError").finish()
}
}
impl fmt::Display for LengthDelimitedCodecError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("frame size too big")
}
}
impl StdError for LengthDelimitedCodecError {}
|
/*
Copyright 2014-2015 Zumero, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#![feature(box_syntax)]
#![feature(associated_consts)]
#![feature(vec_push_all)]
use std::collections::HashMap;
use std::collections::HashSet;
extern crate bson;
extern crate misc;
extern crate elmo;
extern crate lsm;
use lsm::ICursor;
pub type Result<T> = elmo::Result<T>;
/*
this doesn't help.
pub struct WrapError {
err: lsm::Error,
}
impl From<WrapError> for elmo::Error {
fn from(err: WrapError) -> elmo::Error {
elmo::Error::Whatever(box err.err)
}
}
impl From<lsm::Error> for WrapError {
fn from(err: lsm::Error) -> WrapError {
WrapError {
err: err
}
}
}
impl Into<WrapError> for lsm::Error {
fn into(self) -> WrapError {
WrapError {
err: self
}
}
}
*/
/*
the compiler won't allow this
the impl does not reference any types defined in this crate;
only traits defined in the current crate can be implemented for arbitrary types
impl From<lsm::Error> for elmo::Error {
fn from(err: lsm::Error) -> elmo::Error {
elmo::Error::Whatever(box err)
}
}
*/
#[derive(Clone)]
// TODO Clone is part of temp hack, remove
struct MyIndexPrep {
index_id: u64,
options: bson::Document,
normspec: Vec<(String,elmo::IndexType)>,
weights: Option<HashMap<String,i32>>,
// TODO maybe keep the options we need here directly, sparse and unique
}
#[derive(Clone)]
// TODO Clone is part of temp hack, remove
struct MyCollectionWriter {
// db and coll are only here for caching
db: String,
coll: String,
indexes: Vec<MyIndexPrep>,
collection_id: u64,
}
struct MyCollectionReader {
seq: Box<Iterator<Item=Result<elmo::Row>>>,
// TODO need counts here
}
struct RangeCursorBsonValueIterator {
cursor: lsm::RangeCursor,
}
impl RangeCursorBsonValueIterator {
fn iter_next(&mut self) -> Result<Option<elmo::Row>> {
if self.cursor.IsValid() {
let row = {
let v = try!(self.cursor.LiveValueRef().map_err(elmo::wrap_err));
let v = try!(v.map(lsm_map_to_bson).map_err(elmo::wrap_err));
let v = v.into_value();
let row = elmo::Row {
doc: v,
pos: None,
score: None,
};
row
};
try!(self.cursor.Next().map_err(elmo::wrap_err));
Ok(Some(row))
} else {
Ok(None)
}
}
}
impl Iterator for RangeCursorBsonValueIterator {
type Item = Result<elmo::Row>;
fn next(&mut self) -> Option<Self::Item> {
match self.iter_next() {
Err(e) => {
// TODO will this put us in situations where the iterator just
// returns errors forever?
return Some(Err(e));
},
Ok(v) => {
match v {
None => {
return None;
},
Some(v) => {
return Some(Ok(v));
}
}
},
}
}
}
struct RangeCursorVarintIterator {
cursor: lsm::RangeCursor,
}
impl RangeCursorVarintIterator {
fn iter_next(&mut self) -> Result<Option<u64>> {
if self.cursor.IsValid() {
let v = {
let v = try!(self.cursor.LiveValueRef().map_err(elmo::wrap_err));
//println!("got {:?}", v);
let v = try!(v.map(lsm_map_to_varint).map_err(elmo::wrap_err));
v
};
try!(self.cursor.Next().map_err(elmo::wrap_err));
Ok(Some(v))
} else {
Ok(None)
}
}
}
impl Iterator for RangeCursorVarintIterator {
type Item = Result<u64>;
fn next(&mut self) -> Option<Self::Item> {
match self.iter_next() {
Err(e) => {
// TODO will this put us in situations where the iterator just
// returns errors forever?
return Some(Err(e));
},
Ok(v) => {
match v {
None => {
return None;
},
Some(v) => {
return Some(Ok(v));
}
}
},
}
}
}
struct MyReader {
myconn: std::rc::Rc<MyConn>,
}
struct MyWriter<'a> {
myconn: std::rc::Rc<MyConn>,
tx: std::sync::MutexGuard<'a, lsm::WriteLock>,
pending: HashMap<Box<[u8]>,lsm::Blob>,
max_collection_id: Option<u64>,
max_record_id: HashMap<u64, u64>,
max_index_id: HashMap<u64, u64>,
cw: Option<MyCollectionWriter>,
cursor: lsm::LivingCursor,
}
struct MyConn {
conn: lsm::db,
}
struct MyPublicConn {
myconn: std::rc::Rc<MyConn>,
}
// TODO should all these value encodings be switching around to have
// the collid first, before the tag? drop_collection gets trivial.
// and the collection gets good locality of storage.
// TODO should we have record ids? or just have the _id of each record
// be its actual key?
//
// the pk can be big, and it will be duplicated,
// once in the key, and once in the bson doc itself.
//
// the pk or id is also duplicated in the index entries.
// and in their backlinks.
//
// if we don't have a recid, how would we store a document that doesn't
// have any _id at all?
/// key:
/// (tag)
/// db name (len + str)
/// coll name (len + str)
/// value:
/// collid (varint)
pub const NAME_TO_COLLECTION_ID: u8 = 10;
/// key:
/// (tag)
/// collid (varint)
/// value:
/// properties (bson):
/// d: db name (str)
/// c: coll name (str)
/// o: options (document)
pub const COLLECTION_ID_TO_PROPERTIES: u8 = 11;
pub const COLLECTION_ID_BOUNDARY: u8 = COLLECTION_ID_TO_PROPERTIES + 1;
/// key:
/// (tag)
/// collid (varint)
/// index name (len + str)
/// value:
/// indexid (varint)
pub const NAME_TO_INDEX_ID: u8 = 20;
/// key:
/// (tag)
/// collid (varint)
/// indexid (varint)
/// value:
/// properties (bson):
/// n: name (str)
/// s: spec (bson)
/// o: options (bson)
pub const INDEX_ID_TO_PROPERTIES: u8 = 21;
pub const PRIMARY_INDEX_ID: u64 = 0;
/// key:
/// (tag)
/// collid (varint)
/// recid (varint)
/// value:
/// doc (bson)
pub const RECORD: u8 = 30;
/// key:
/// (tag)
/// collid (varint)
/// indexid (varint)
/// k (len + bytes)
/// recid (varint) (not present when index option unique)
/// value:
/// recid (varint) (present only when index option unique?)
pub const INDEX_ENTRY: u8 = 40;
/// key:
/// (tag)
/// collid (varint)
/// indexid (varint)
/// recid (varint)
/// (complete index key)
/// value:
/// (none)
pub const RECORD_ID_TO_INDEX_ENTRY: u8 = 41;
fn encode_key_name_to_collection_id(db: &str, coll: &str) -> Box<[u8]> {
// TODO capacity
let mut k = vec![];
k.push(NAME_TO_COLLECTION_ID);
// From the mongo docs:
// The maximum length of the collection namespace, which includes the database name, the dot
// (.) separator, and the collection name (i.e. <database>.<collection>), is 120 bytes.
let b = db.as_bytes();
k.push(b.len() as u8);
k.push_all(b);
let b = coll.as_bytes();
k.push(b.len() as u8);
k.push_all(b);
k.into_boxed_slice()
}
fn decode_string_from_key(k: &lsm::KeyRef, cur: usize) -> Result<(String, usize)> {
// TODO should we treat the len before the string as a varint instead of always a byte?
let len = try!(k.u8_at(cur).map_err(elmo::wrap_err)) as usize;
let cur = cur + 1;
let s = try!(k.map_range(cur, cur + len, lsm_map_to_string).map_err(elmo::wrap_err));
let cur = cur + len;
Ok((s, cur))
}
fn decode_varint_from_key(k: &lsm::KeyRef, cur: usize) -> Result<(u64, usize)> {
let first_byte = try!(k.u8_at(cur).map_err(elmo::wrap_err));
let len = misc::varint::first_byte_to_len(first_byte);
let v = try!(k.map_range(cur, cur + len, lsm_map_to_varint).map_err(elmo::wrap_err));
let cur = cur + len;
Ok((v, cur))
}
fn decode_key_name_to_collection_id(k: &lsm::KeyRef) -> Result<(String, String)> {
// k[0] must be NAME_TO_COLLECTION_ID
let cur = 1;
let (db, cur) = try!(decode_string_from_key(k, cur));
let (coll, _) = try!(decode_string_from_key(k, cur));
Ok((db, coll))
}
fn decode_key_name_to_index_id(k: &lsm::KeyRef) -> Result<(u64, String)> {
// k[0] must be NAME_TO_INDEX_ID
let cur = 1;
let (collection_id, cur) = try!(decode_varint_from_key(k, cur));
let (name, _) = try!(decode_string_from_key(k, cur));
Ok((collection_id, name))
}
fn decode_key_record(k: &lsm::KeyRef) -> Result<(u64, u64)> {
// k[0] must be RECORD
let cur = 1;
let (collection_id, cur) = try!(decode_varint_from_key(k, cur));
let (record_id, _) = try!(decode_varint_from_key(k, cur));
Ok((collection_id, record_id))
}
fn decode_key_collection_id_to_properties(k: &lsm::KeyRef) -> Result<(u64)> {
// k[0] must be COLLECTION_ID_TO_PROPERTIES
let cur = 1;
let (collection_id, _) = try!(decode_varint_from_key(k, cur));
Ok(collection_id)
}
fn decode_key_index_id_to_properties(k: &lsm::KeyRef) -> Result<(u64, u64)> {
// k[0] must be INDEX_ID_TO_PROPERTIES
let cur = 1;
let (collection_id, cur) = try!(decode_varint_from_key(k, cur));
let (index_id, _) = try!(decode_varint_from_key(k, cur));
Ok((collection_id, index_id))
}
fn decode_key_backlink(k: &lsm::KeyRef) -> Result<(u64, u64, u64, Box<[u8]>)> {
// k[0] must be RECORD_ID_TO_INDEX_ENTRY
let cur = 1;
let (collection_id, cur) = try!(decode_varint_from_key(k, cur));
let (index_id, cur) = try!(decode_varint_from_key(k, cur));
let (record_id, cur) = try!(decode_varint_from_key(k, cur));
let index_entry = try!(k.map_range(cur, k.len(), lsm_map_to_box).map_err(elmo::wrap_err));
Ok((collection_id, index_id, record_id, index_entry))
}
fn push_varint(v: &mut Vec<u8>, n: u64) {
let mut buf = [0; 9];
let mut cur = 0;
misc::varint::write(&mut buf, &mut cur, n);
v.push_all(&buf[0 .. cur]);
}
fn encode_key_tag_and_varint(tag: u8, id: u64) -> Vec<u8> {
// TODO capacity
let mut k = vec![];
k.push(tag);
push_varint(&mut k, id);
k
}
fn encode_key_collection_id_to_properties(collection_id: u64) -> Vec<u8> {
encode_key_tag_and_varint(COLLECTION_ID_TO_PROPERTIES, collection_id)
}
fn encode_key_index_id_to_properties(collection_id: u64, index_id: u64) -> Vec<u8> {
// TODO capacity
let mut k = vec![];
k.push(INDEX_ID_TO_PROPERTIES);
push_varint(&mut k, collection_id);
push_varint(&mut k, index_id);
k
}
fn encode_key_name_to_index_id(collection_id: u64, name: &str) -> Vec<u8> {
// TODO capacity
let mut k = vec![];
k.push(NAME_TO_INDEX_ID);
// From the mongo docs:
// The maximum length of the collection namespace, which includes the database name, the dot
// (.) separator, and the collection name (i.e. <database>.<collection>), is 120 bytes.
let ba = u64_to_boxed_varint(collection_id);
k.push_all(&ba);
let b = name.as_bytes();
k.push(b.len() as u8);
k.push_all(b);
k
}
fn lsm_map_to_string(ba: &[u8]) -> lsm::Result<String> {
let s = try!(std::str::from_utf8(&ba));
Ok(String::from(s))
}
fn lsm_map_to_varint(ba: &[u8]) -> lsm::Result<u64> {
let mut cur = 0;
let n = misc::varint::read(ba, &mut cur);
// TODO assert cur used up all of ba?
Ok(n)
}
fn lsm_map_to_box(ba: &[u8]) -> lsm::Result<Box<[u8]>> {
// TODO capacity
let mut k = vec![];
k.push_all(ba);
let k = k.into_boxed_slice();
Ok(k)
}
fn u64_to_boxed_varint(n: u64) -> Box<[u8]> {
let mut buf = [0; 9];
let mut cur = 0;
misc::varint::write(&mut buf, &mut cur, n);
let mut v = Vec::with_capacity(cur);
v.push_all(&buf[0 .. cur]);
let v = v.into_boxed_slice();
v
}
fn lsm_map_to_bson(ba: &[u8]) -> lsm::Result<bson::Document> {
let r = bson::Document::from_bson(ba);
let r = r.map_err(lsm::wrap_err);
r
}
fn find_record(cursor: &mut lsm::LivingCursor, collection_id: u64, id: &bson::Value) -> Result<Option<u64>> {
let mut k = vec![];
k.push(INDEX_ENTRY);
push_varint(&mut k, collection_id);
push_varint(&mut k, PRIMARY_INDEX_ID);
let ba = bson::Value::encode_one_for_index(id, false);
k.push_all(&ba);
get_value_for_key_as_varint(cursor, &k)
}
fn get_value_for_key_as_varint(cursor: &mut lsm::LivingCursor, k: &[u8]) -> Result<Option<u64>> {
try!(cursor.SeekRef(&lsm::KeyRef::for_slice(&k), lsm::SeekOp::SEEK_EQ).map_err(elmo::wrap_err));
if cursor.IsValid() {
let v = try!(cursor.LiveValueRef().map_err(elmo::wrap_err));
let id = try!(v.map(lsm_map_to_varint).map_err(elmo::wrap_err));
Ok(Some(id))
} else {
Ok(None)
}
}
fn get_value_for_key_as_bson(cursor: &mut lsm::LivingCursor, k: &[u8]) -> Result<Option<bson::Document>> {
try!(cursor.SeekRef(&lsm::KeyRef::for_slice(&k), lsm::SeekOp::SEEK_EQ).map_err(elmo::wrap_err));
if cursor.IsValid() {
let v = try!(cursor.LiveValueRef().map_err(elmo::wrap_err));
let id = try!(v.map(lsm_map_to_bson).map_err(elmo::wrap_err));
Ok(Some(id))
} else {
Ok(None)
}
}
impl MyConn {
fn get_reader_collection_scan(&self, db: &str, coll: &str) -> Result<MyCollectionReader> {
// check to see if the collection exists and get its id
let k = encode_key_name_to_collection_id(db, coll);
let mut cursor = try!(self.conn.OpenCursor().map_err(elmo::wrap_err));
match try!(get_value_for_key_as_varint(&mut cursor, &k)) {
None => {
let rdr =
MyCollectionReader {
seq: box std::iter::empty(),
};
Ok(rdr)
},
Some(collection_id) => {
// TODO vec capacity
let mut kmin = vec![];
kmin.push(RECORD);
push_varint(&mut kmin, collection_id);
let kmin = kmin.into_boxed_slice();
let min = lsm::Min::new(kmin, lsm::OpGt::GT);
// TODO vec capacity
let mut kmax = vec![];
kmax.push(RECORD);
push_varint(&mut kmax, collection_id + 1);
let kmax = kmax.into_boxed_slice();
let max = lsm::Max::new(kmax, lsm::OpLt::LT);
let mut cursor = lsm::RangeCursor::new(cursor, min, max);
try!(cursor.First().map_err(elmo::wrap_err));
let seq =
RangeCursorBsonValueIterator {
cursor: cursor,
};
let rdr =
MyCollectionReader {
seq: box seq,
};
Ok(rdr)
},
}
}
fn get_reader_text_index_scan(&self, ndx: &elmo::IndexInfo, eq: elmo::QueryKey, terms: Vec<elmo::TextQueryTerm>) -> Result<MyCollectionReader> {
unimplemented!();
}
fn get_reader_regular_index_scan(&self, ndx: &elmo::IndexInfo, bounds: elmo::QueryBounds) -> Result<MyCollectionReader> {
let mut cursor = try!(self.conn.OpenCursor().map_err(elmo::wrap_err));
let collection_id =
match try!(get_value_for_key_as_varint(&mut cursor, &encode_key_name_to_collection_id(&ndx.db, &ndx.coll))) {
Some(id) => id,
None => return Err(elmo::Error::Misc(String::from("collection does not exist"))),
};
let index_id =
match try!(get_value_for_key_as_varint(&mut cursor, &encode_key_name_to_index_id(collection_id, &ndx.name))) {
Some(id) => id,
None => return Err(elmo::Error::Misc(String::from("index does not exist"))),
};
fn add_one(ba: &Vec<u8>) -> Vec<u8> {
let mut a = ba.clone();
let mut i = a.len() - 1;
loop {
if a[i] == 255 {
a[i] = 0;
if i == 0 {
panic!("TODO handle case where add_one to binary array overflows the first byte");
} else {
i = i - 1;
}
} else {
a[i] = a[i] + 1;
break;
}
}
a
}
fn f_twok(cursor: lsm::LivingCursor, kmin: Box<[u8]>, kmax: Box<[u8]>, min_cmp: lsm::OpGt, max_cmp: lsm::OpLt) -> lsm::RangeCursor {
let min = lsm::Min::new(kmin, min_cmp);
let max = lsm::Max::new(kmax, max_cmp);
let cursor = lsm::RangeCursor::new(cursor, min, max);
cursor
}
fn f_two(preface: Vec<u8>, cursor: lsm::LivingCursor, eqvals: elmo::QueryKey, minvals: elmo::QueryKey, maxvals: elmo::QueryKey, min_cmp: lsm::OpGt, max_cmp: lsm::OpLt) -> lsm::RangeCursor {
let mut kmin = preface.clone();
bson::Value::push_encode_multi_for_index(&mut kmin, &eqvals, Some(&minvals));
let mut kmax = preface;
bson::Value::push_encode_multi_for_index(&mut kmax, &eqvals, Some(&maxvals));
let kmin = kmin.into_boxed_slice();
let kmax = kmax.into_boxed_slice();
f_twok(cursor, kmin, kmax, min_cmp, max_cmp)
}
fn f_gt(preface: Vec<u8>, cursor: lsm::LivingCursor, vals: elmo::QueryKey, min_cmp: lsm::OpGt) -> lsm::RangeCursor {
let mut kmin = preface.clone();
bson::Value::push_encode_multi_for_index(&mut kmin, &vals, None);
let kmin = kmin.into_boxed_slice();
let min = lsm::Min::new(kmin, min_cmp);
let kmax = add_one(&preface);
let kmax = kmax.into_boxed_slice();
let max = lsm::Max::new(kmax, lsm::OpLt::LT);
let cursor = lsm::RangeCursor::new(cursor, min, max);
cursor
}
fn f_lt(preface: Vec<u8>, cursor: lsm::LivingCursor, vals: elmo::QueryKey, max_cmp: lsm::OpLt) -> lsm::RangeCursor {
let mut kmax = preface.clone();
bson::Value::push_encode_multi_for_index(&mut kmax, &vals, None);
let kmax = kmax.into_boxed_slice();
let max = lsm::Max::new(kmax, max_cmp);
let kmin = preface.clone();
let kmin = kmin.into_boxed_slice();
let min = lsm::Min::new(kmin, lsm::OpGt::GT);
let cursor = lsm::RangeCursor::new(cursor, min, max);
cursor
}
let mut key_preface = vec![];
key_preface.push(INDEX_ENTRY);
push_varint(&mut key_preface, collection_id);
push_varint(&mut key_preface, index_id);
let mut cursor =
match bounds {
elmo::QueryBounds::GT(vals) => f_gt(key_preface, cursor, vals, lsm::OpGt::GT),
elmo::QueryBounds::GTE(vals) => f_gt(key_preface, cursor, vals, lsm::OpGt::GTE),
elmo::QueryBounds::LT(vals) => f_lt(key_preface, cursor, vals, lsm::OpLt::LT),
elmo::QueryBounds::LTE(vals) => f_lt(key_preface, cursor, vals, lsm::OpLt::LTE),
elmo::QueryBounds::GT_LT(eqvals, minvals, maxvals) => f_two(key_preface, cursor, eqvals, minvals, maxvals, lsm::OpGt::GT, lsm::OpLt::LT),
elmo::QueryBounds::GTE_LT(eqvals, minvals, maxvals) => f_two(key_preface, cursor, eqvals, minvals, maxvals, lsm::OpGt::GTE, lsm::OpLt::LT),
elmo::QueryBounds::GT_LTE(eqvals, minvals, maxvals) => f_two(key_preface, cursor, eqvals, minvals, maxvals, lsm::OpGt::GT, lsm::OpLt::LTE),
elmo::QueryBounds::GTE_LTE(eqvals, minvals, maxvals) => f_two(key_preface, cursor, eqvals, minvals, maxvals, lsm::OpGt::GTE, lsm::OpLt::LTE),
elmo::QueryBounds::EQ(vals) => {
let mut kmin = key_preface.clone();
bson::Value::push_encode_multi_for_index(&mut kmin, &vals, None);
let kmax = add_one(&kmin);
let kmin = kmin.into_boxed_slice();
let kmax = kmax.into_boxed_slice();
f_twok(cursor, kmin, kmax, lsm::OpGt::GTE, lsm::OpLt::LT)
},
};
try!(cursor.First().map_err(elmo::wrap_err));
let seq =
RangeCursorVarintIterator {
cursor: cursor,
};
let seq = {
// DISTINCT. we don't want this producing the same record twice.
let mut q = try!(seq.collect::<Result<Vec<_>>>());
let mut seen = HashSet::new();
let mut a = vec![];
for x in q {
if !seen.contains(&x) {
a.push(x);
seen.insert(x);
}
}
a.into_iter().map(|x| Ok(x))
};
// the iterator above yields record ids.
// now we need something that, for each record id yielded by an
// index entry, looks up the actual record and yields THAT. in
// sqlite, this was a join.
let mut cursor = try!(self.conn.OpenCursor().map_err(elmo::wrap_err));
let seq = seq.map(
move |record_id: Result<u64>| -> Result<elmo::Row> {
match record_id {
Ok(record_id) => {
let mut k = vec![];
k.push(RECORD);
push_varint(&mut k, collection_id);
push_varint(&mut k, record_id);
try!(cursor.SeekRef(&lsm::KeyRef::for_slice(&k), lsm::SeekOp::SEEK_EQ).map_err(elmo::wrap_err));
if cursor.IsValid() {
let v = try!(cursor.LiveValueRef().map_err(elmo::wrap_err));
let v = try!(v.map(lsm_map_to_bson).map_err(elmo::wrap_err));
let v = v.into_value();
let row = elmo::Row {
doc: v,
pos: None,
score: None,
};
Ok(row)
} else {
Err(elmo::Error::Misc(String::from("record id not found?!?")))
}
},
Err(e) => {
Err(e)
},
}
});
let rdr =
MyCollectionReader {
seq: box seq,
};
Ok(rdr)
}
// TODO this could maybe return an iterator instead of a vec
fn base_list_indexes(&self, cursor: &mut lsm::LivingCursor, collection_id: Option<u64>) -> Result<Vec<(u64, u64, bson::Document)>> {
let q =
match collection_id {
Some(collection_id) => {
// TODO capacity
let mut k = vec![];
k.push(INDEX_ID_TO_PROPERTIES);
push_varint(&mut k, collection_id);
k.into_boxed_slice()
},
None => {
// TODO the vec! macro set capacity to match?
let k = vec![INDEX_ID_TO_PROPERTIES];
k.into_boxed_slice()
},
};
let mut cursor = lsm::PrefixCursor::new(cursor, q);
let mut a = vec![];
try!(cursor.First().map_err(elmo::wrap_err));
while cursor.IsValid() {
let (collection_id, index_id, props) = {
let k = try!(cursor.KeyRef().map_err(elmo::wrap_err));
let (collection_id, index_id) = try!(decode_key_index_id_to_properties(&k));
let v = try!(cursor.LiveValueRef().map_err(elmo::wrap_err));
let props = try!(v.map(lsm_map_to_bson).map_err(elmo::wrap_err));
(collection_id, index_id, props)
};
a.push((collection_id, index_id, props));
try!(cursor.Next().map_err(elmo::wrap_err));
}
Ok(a)
}
fn list_all_index_infos(&self) -> Result<Vec<elmo::IndexInfo>> {
let mut cursor = try!(self.conn.OpenCursor().map_err(elmo::wrap_err));
let indexes = try!(self.base_list_indexes(&mut cursor, None));
let indexes = indexes.into_iter().map(
|(collection_id, index_id, mut index_properties)| {
// TODO the extra lookup here is pretty expensive.
// maybe we should just store the db/coll names here too?
let k = encode_key_collection_id_to_properties(collection_id);
let mut collection_properties = try!(get_value_for_key_as_bson(&mut cursor, &k)).unwrap_or(bson::Document::new());
let db = try!(collection_properties.must_remove_string("d"));
let coll = try!(collection_properties.must_remove_string("c"));
//let options = try!(collection_properties.must_remove_document("o"));
let name = try!(index_properties.must_remove_string("n"));
let spec = try!(index_properties.must_remove_document("s"));
let options = try!(index_properties.must_remove_document("o"));
let info = elmo::IndexInfo {
db: String::from(db),
coll: String::from(coll),
name: String::from(name),
spec: spec,
options: options,
};
Ok(info)
}).collect::<Result<Vec<_>>>();
let indexes = try!(indexes);
Ok(indexes)
}
fn list_index_infos_for_collection(&self, db: &str, coll: &str) -> Result<Vec<elmo::IndexInfo>> {
let mut cursor = try!(self.conn.OpenCursor().map_err(elmo::wrap_err));
let k = encode_key_name_to_collection_id(db, coll);
match try!(get_value_for_key_as_varint(&mut cursor, &k)) {
None => {
Ok(vec![])
},
Some(collection_id) => {
let indexes = try!(self.base_list_indexes(&mut cursor, Some(collection_id)));
let indexes = indexes.into_iter().map(
|(collection_id, index_id, mut index_properties)| {
let name = try!(index_properties.must_remove_string("n"));
let spec = try!(index_properties.must_remove_document("s"));
let options = try!(index_properties.must_remove_document("o"));
let info = elmo::IndexInfo {
db: String::from(db),
coll: String::from(coll),
name: String::from(name),
spec: spec,
options: options,
};
Ok(info)
}).collect::<Result<Vec<_>>>();
let indexes = try!(indexes);
Ok(indexes)
}
}
}
// TODO this could maybe return an iterator instead of a vec
fn base_list_collections(&self, cursor: &mut lsm::LivingCursor) -> Result<Vec<(u64, String, String)>> {
let mut cursor = lsm::PrefixCursor::new(cursor, box [NAME_TO_COLLECTION_ID]);
let mut a = vec![];
// TODO might need to sort by the coll name? the sqlite version does.
try!(cursor.First().map_err(elmo::wrap_err));
while cursor.IsValid() {
{
let k = try!(cursor.KeyRef().map_err(elmo::wrap_err));
let (db, coll) = try!(decode_key_name_to_collection_id(&k));
let v = try!(cursor.LiveValueRef().map_err(elmo::wrap_err));
let collection_id = try!(v.map(lsm_map_to_varint).map_err(elmo::wrap_err));
a.push((collection_id, db, coll));
}
try!(cursor.Next().map_err(elmo::wrap_err));
}
Ok(a)
}
fn base_list_collection_infos(&self) -> Result<Vec<elmo::CollectionInfo>> {
let mut cursor = try!(self.conn.OpenCursor().map_err(elmo::wrap_err));
let collections = try!(self.base_list_collections(&mut cursor));
let collections = collections.into_iter().map(
|(collection_id, db, coll)| {
let k = encode_key_collection_id_to_properties(collection_id);
let mut collection_properties = try!(get_value_for_key_as_bson(&mut cursor, &k)).unwrap_or(bson::Document::new());
//let db = try!(collection_properties.must_remove_string("d"));
//let coll = try!(collection_properties.must_remove_string("c"));
let options = try!(collection_properties.must_remove_document("o"));
let info = elmo::CollectionInfo {
db: db,
coll: coll,
options: options,
};
Ok(info)
}).collect::<Result<Vec<_>>>();
let collections = try!(collections);
Ok(collections)
}
}
impl<'a> MyWriter<'a> {
fn use_next_index_id(&mut self, collection_id: u64) -> Result<u64> {
match self.max_index_id.entry(collection_id) {
std::collections::hash_map::Entry::Occupied(mut e) => {
let n = e.get_mut();
*n = *n + 1;
Ok(*n)
},
std::collections::hash_map::Entry::Vacant(e) => {
let n = {
// TODO capacity
let mut k = vec![];
k.push(INDEX_ID_TO_PROPERTIES);
push_varint(&mut k, collection_id + 1);
try!(self.cursor.SeekRef(&lsm::KeyRef::for_slice(&k), lsm::SeekOp::SEEK_LE).map_err(elmo::wrap_err));
if self.cursor.IsValid() {
let k = try!(self.cursor.KeyRef().map_err(elmo::wrap_err));
if try!(k.u8_at(0).map_err(elmo::wrap_err)) == INDEX_ID_TO_PROPERTIES {
let (k_collection_id, index_id) = try!(decode_key_index_id_to_properties(&k));
if collection_id == k_collection_id {
1 + index_id
} else {
1 + PRIMARY_INDEX_ID
}
} else {
1 + PRIMARY_INDEX_ID
}
} else {
1 + PRIMARY_INDEX_ID
}
};
e.insert(n);
Ok(n)
},
}
}
fn use_next_record_id(&mut self, collection_id: u64) -> Result<u64> {
match self.max_record_id.entry(collection_id) {
std::collections::hash_map::Entry::Occupied(mut e) => {
let n = e.get_mut();
*n = *n + 1;
Ok(*n)
},
std::collections::hash_map::Entry::Vacant(e) => {
let n = {
// TODO capacity
let mut k = vec![];
k.push(RECORD);
push_varint(&mut k, collection_id + 1);
try!(self.cursor.SeekRef(&lsm::KeyRef::for_slice(&k), lsm::SeekOp::SEEK_LE).map_err(elmo::wrap_err));
if self.cursor.IsValid() {
let k = try!(self.cursor.KeyRef().map_err(elmo::wrap_err));
if try!(k.u8_at(0).map_err(elmo::wrap_err)) == RECORD {
let (k_collection_id, record_id) = try!(decode_key_record(&k));
if collection_id == k_collection_id {
1 + record_id
} else {
1
}
} else {
1
}
} else {
1
}
};
e.insert(n);
Ok(n)
},
}
}
fn use_next_collection_id(&mut self) -> Result<u64> {
match self.max_collection_id {
Some(n) => {
let n = n + 1;
self.max_collection_id = Some(n);
Ok(n)
},
None => {
let n = {
try!(self.cursor.SeekRef(&lsm::KeyRef::from_boxed_slice(box [COLLECTION_ID_BOUNDARY]), lsm::SeekOp::SEEK_LE).map_err(elmo::wrap_err));
if self.cursor.IsValid() {
let k = try!(self.cursor.KeyRef().map_err(elmo::wrap_err));
if try!(k.u8_at(0).map_err(elmo::wrap_err)) == COLLECTION_ID_TO_PROPERTIES {
let collection_id = try!(decode_key_collection_id_to_properties(&k));
1 + collection_id
} else {
1
}
} else {
1
}
};
self.max_collection_id = Some(n);
Ok(n)
},
}
}
fn list_indexes_for_collection_writer(&mut self, collection_id: u64) -> Result<Vec<MyIndexPrep>> {
let indexes = try!(self.myconn.base_list_indexes(&mut self.cursor, Some(collection_id)));
let indexes = indexes.into_iter().map(
|(_, index_id, mut index_properties)| {
//let name = try!(index_properties.must_remove_string("n"));
let spec = try!(index_properties.must_remove_document("s"));
let options = try!(index_properties.must_remove_document("o"));
// TODO we might want to grab unique and sparse from options now, like:
let unique =
match options.get("unique") {
Some(&bson::Value::BBoolean(b)) => b,
_ => false,
};
let sparse =
match options.get("sparse") {
Some(&bson::Value::BBoolean(b)) => b,
_ => false,
};
let (normspec, weights) = try!(elmo::get_normalized_spec(&spec, &options));
let prep = MyIndexPrep {
index_id: index_id,
options: options,
normspec: normspec,
weights: weights,
};
Ok(prep)
}).collect::<Result<Vec<_>>>();
let indexes = try!(indexes);
Ok(indexes)
}
fn make_collection_writer(&mut self, db: &str, coll: &str) -> Result<MyCollectionWriter> {
let (just_created, collection_id) = try!(self.base_create_collection(db, coll, bson::Document::new()));
let indexes = {
// TODO
// if the collection was just created, there will be no indexes found
// think of the following as a temporary hack.
// we currently have no way of including anything written during the
// current transaction in queries.
if just_created {
let spec = bson::Document {pairs: vec![(String::from("_id"), bson::Value::BInt32(1))]};
let options = bson::Document {pairs: vec![(String::from("unique"), bson::Value::BBoolean(true))]};
let (normspec, weights) = try!(elmo::get_normalized_spec(&spec, &options));
let prep = MyIndexPrep {
index_id: PRIMARY_INDEX_ID,
options: options,
normspec: normspec,
weights: weights,
};
vec![prep]
} else {
try!(self.list_indexes_for_collection_writer(collection_id))
}
};
let c = MyCollectionWriter {
db: String::from(db),
coll: String::from(coll),
indexes: indexes,
collection_id: collection_id,
};
Ok(c)
}
fn get_collection_writer(&mut self, db: &str, coll: &str) -> Result<MyCollectionWriter> {
let need_cw =
if self.cw.is_none() {
//println!("cw is none");
true
} else {
let cw = self.cw.as_ref().unwrap();
if cw.db != db || cw.coll != coll {
true
} else {
//println!("cw doesn't match");
false
}
};
if need_cw {
let cw = try!(self.make_collection_writer(db, coll));
self.cw = Some(cw);
}
// TODO this is an awful approach here. clone. temporary workaround.
match self.cw {
Some(ref cw) => {
Ok(cw.clone())
},
None => {
unreachable!();
},
}
}
fn delete_by_prefix(&mut self, prefix: Box<[u8]>) -> Result<()> {
// TODO it would be nice if lsm had a "graveyard" delete, a way to do a
// blind delete by prefix.
let mut cursor = lsm::PrefixCursor::new(&mut self.cursor, prefix);
try!(cursor.First().map_err(elmo::wrap_err));
while cursor.IsValid() {
{
let k = try!(cursor.KeyRef().map_err(elmo::wrap_err));
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Tombstone);
}
try!(cursor.Next().map_err(elmo::wrap_err));
}
Ok(())
}
fn delete_by_collection_id_prefix(&mut self, tag: u8, collection_id: u64) -> Result<()> {
let mut k = vec![];
k.push(tag);
push_varint(&mut k, collection_id);
self.delete_by_prefix(k.into_boxed_slice())
}
fn delete_by_index_id_prefix(&mut self, tag: u8, collection_id: u64, index_id: u64) -> Result<()> {
let mut k = vec![];
k.push(tag);
push_varint(&mut k, collection_id);
push_varint(&mut k, index_id);
self.delete_by_prefix(k.into_boxed_slice())
}
fn base_clear_collection(&mut self, db: &str, coll: &str) -> Result<bool> {
let k = encode_key_name_to_collection_id(db, coll);
match try!(get_value_for_key_as_varint(&mut self.cursor, &k)) {
None => {
// TODO base_created_collection checks AGAIN to see if the collection exists
let (created, _) = try!(self.base_create_collection(db, coll, bson::Document::new()));
Ok(created)
},
Some(collection_id) => {
// all of the following tags are followed immediately by the
// collection_id, so we can delete by prefix:
try!(self.delete_by_collection_id_prefix(RECORD, collection_id));
try!(self.delete_by_collection_id_prefix(INDEX_ENTRY, collection_id));
try!(self.delete_by_collection_id_prefix(RECORD_ID_TO_INDEX_ENTRY, collection_id));
Ok(false)
},
}
}
fn create_index(&mut self, info: elmo::IndexInfo) -> Result<bool> {
//println!("create_index: {:?}", info);
let (_created, collection_id) = try!(self.base_create_collection(&info.db, &info.coll, bson::Document::new()));
let k = encode_key_name_to_index_id(collection_id, &info.name);
match try!(get_value_for_key_as_varint(&mut self.cursor, &k)) {
Some(index_id) => {
let k = encode_key_index_id_to_properties(collection_id, index_id);
let mut index_properties = try!(get_value_for_key_as_bson(&mut self.cursor, &k)).unwrap_or(bson::Document::new());
//let name = try!(index_properties.must_remove_string("n"));
let spec = try!(index_properties.must_remove_document("s"));
//let options = try!(index_properties.must_remove_document("o"));
if spec != info.spec {
// note that we do not compare the options.
// I think mongo does it this way too.
Err(elmo::Error::Misc(String::from("index already exists with different keys")))
} else {
Ok(false)
}
},
None => {
let index_id = try!(self.use_next_index_id(collection_id));
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(u64_to_boxed_varint(index_id)));
// now create entries for all the existing records
let unique =
match info.options.get("unique") {
Some(&bson::Value::BBoolean(b)) => b,
_ => false,
};
let (normspec, weights) = try!(elmo::get_normalized_spec(&info.spec, &info.options));
let mut k = vec![];
k.push(RECORD);
push_varint(&mut k, collection_id);
let mut cursor = lsm::PrefixCursor::new(&mut self.cursor, k.into_boxed_slice());
try!(cursor.First().map_err(elmo::wrap_err));
while cursor.IsValid() {
{
let k = try!(cursor.KeyRef().map_err(elmo::wrap_err));
let (_, record_id) = try!(decode_key_record(&k));
let ba_record_id = u64_to_boxed_varint(record_id);
let v = try!(cursor.LiveValueRef().map_err(elmo::wrap_err));
let v = try!(v.map(lsm_map_to_bson).map_err(elmo::wrap_err));
let entries = try!(elmo::get_index_entries(&v, &normspec, &weights, &info.options));
let ba_collection_id = u64_to_boxed_varint(collection_id);
let ba_index_id = u64_to_boxed_varint(index_id);
for vals in entries {
let (index_entry, backlink) = try!(Self::make_index_entry_pair(&ba_collection_id, &ba_index_id, &ba_record_id, vals, unique));
self.pending.insert(index_entry, lsm::Blob::Array(ba_record_id.clone()));
self.pending.insert(backlink, lsm::Blob::Array(box []));
}
}
try!(cursor.Next().map_err(elmo::wrap_err));
}
// now store the index id to properties
let k = encode_key_index_id_to_properties(collection_id, index_id);
let mut properties = bson::Document::new();
properties.set_string("n", info.name);
properties.set_document("s", info.spec);
properties.set_document("o", info.options);
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(properties.to_bson_array().into_boxed_slice()));
Ok(true)
}
}
}
fn base_create_indexes(&mut self, what: Vec<elmo::IndexInfo>) -> Result<Vec<bool>> {
let mut v = Vec::new();
for info in what {
let b = try!(self.create_index(info));
v.push(b);
}
Ok(v)
}
fn base_drop_collection(&mut self, db: &str, coll: &str) -> Result<bool> {
let k = encode_key_name_to_collection_id(db, coll);
match try!(get_value_for_key_as_varint(&mut self.cursor, &k)) {
None => Ok(false),
Some(collection_id) => {
self.pending.insert(k, lsm::Blob::Tombstone);
// all of the following tags are followed immediately by the
// collection_id, so we can delete by prefix:
try!(self.delete_by_collection_id_prefix(COLLECTION_ID_TO_PROPERTIES, collection_id));
try!(self.delete_by_collection_id_prefix(NAME_TO_INDEX_ID, collection_id));
try!(self.delete_by_collection_id_prefix(INDEX_ID_TO_PROPERTIES, collection_id));
try!(self.delete_by_collection_id_prefix(RECORD, collection_id));
try!(self.delete_by_collection_id_prefix(INDEX_ENTRY, collection_id));
try!(self.delete_by_collection_id_prefix(RECORD_ID_TO_INDEX_ENTRY, collection_id));
Ok(true)
},
}
}
fn base_rename_collection(&mut self, old_name: &str, new_name: &str, drop_target: bool) -> Result<(bool, u64)> {
let (old_db, old_coll) = try!(bson::split_name(old_name));
let (new_db, new_coll) = try!(bson::split_name(new_name));
// jstests/core/rename8.js seems to think that renaming to/from a system collection is illegal unless
// that collection is system.users, which is "whitelisted". for now, we emulate this behavior, even
// though system.users isn't supported.
if old_coll != "system.users" && old_coll.starts_with("system.") {
return Err(elmo::Error::Misc(String::from("renameCollection with a system collection not allowed.")))
}
if new_coll != "system.users" && new_coll.starts_with("system.") {
return Err(elmo::Error::Misc(String::from("renameCollection with a system collection not allowed.")))
}
if drop_target {
let _deleted = try!(self.base_drop_collection(new_db, new_coll));
}
let k = encode_key_name_to_collection_id(old_db, old_coll);
match try!(get_value_for_key_as_varint(&mut self.cursor, &k)) {
None => {
let created = try!(self.base_create_collection(new_db, new_coll, bson::Document::new()));
Ok(created)
},
Some(collection_id) => {
self.pending.insert(k, lsm::Blob::Tombstone);
let k = encode_key_name_to_collection_id(new_db, new_coll);
self.pending.insert(k, lsm::Blob::Array(u64_to_boxed_varint(collection_id)));
let k = encode_key_collection_id_to_properties(collection_id);
match try!(get_value_for_key_as_bson(&mut self.cursor, &k)) {
Some(mut collection_properties) => {
collection_properties.set_str("d", new_db);
collection_properties.set_str("c", new_coll);
// TODO assert that "o" (options) is already there
// collection_properties.set_document("o", options);
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(collection_properties.to_bson_array().into_boxed_slice()));
},
None => {
// TODO this should not be possible
},
}
Ok((false, collection_id))
}
}
}
fn base_drop_database(&mut self, db_to_delete: &str) -> Result<bool> {
let mut b = false;
for (_, db, coll) in try!(self.myconn.base_list_collections(&mut self.cursor)) {
if db == db_to_delete {
try!(self.base_drop_collection(&db, &coll));
b = true;
}
}
Ok(b)
}
fn base_drop_index(&mut self, db: &str, coll: &str, name: &str) -> Result<bool> {
match try!(get_value_for_key_as_varint(&mut self.cursor, &encode_key_name_to_collection_id(&db, &coll))) {
None => Ok(false),
Some(collection_id) => {
let k = encode_key_name_to_index_id(collection_id, name);
match try!(get_value_for_key_as_varint(&mut self.cursor, &k)) {
None => Ok(false),
Some(index_id) => {
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Tombstone);
try!(self.delete_by_index_id_prefix(INDEX_ID_TO_PROPERTIES, collection_id, index_id));
try!(self.delete_by_index_id_prefix(INDEX_ENTRY, collection_id, index_id));
try!(self.delete_by_index_id_prefix(RECORD_ID_TO_INDEX_ENTRY, collection_id, index_id));
Ok(true)
},
}
},
}
}
fn base_create_collection(&mut self, db: &str, coll: &str, options: bson::Document) -> Result<(bool, u64)> {
let k = encode_key_name_to_collection_id(db, coll);
match try!(get_value_for_key_as_varint(&mut self.cursor, &k)) {
Some(id) => Ok((false, id)),
None => {
let collection_id = try!(self.use_next_collection_id());
self.pending.insert(k, lsm::Blob::Array(u64_to_boxed_varint(collection_id)));
// create mongo index for _id
match options.get("autoIndexId") {
Some(&bson::Value::BBoolean(false)) => {
},
_ => {
let index_id = PRIMARY_INDEX_ID;
let k = encode_key_name_to_index_id(collection_id, "_id_");
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(u64_to_boxed_varint(index_id)));
let k = encode_key_index_id_to_properties(collection_id, index_id);
let mut properties = bson::Document::new();
properties.set_str("n", "_id_");
let spec = bson::Document {pairs: vec![(String::from("_id"), bson::Value::BInt32(1))]};
let options = bson::Document {pairs: vec![(String::from("unique"), bson::Value::BBoolean(true))]};
properties.set_document("s", spec);
properties.set_document("o", options);
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(properties.to_bson_array().into_boxed_slice()));
},
}
let k = encode_key_collection_id_to_properties(collection_id);
let mut properties = bson::Document::new();
properties.set_str("d", db);
properties.set_str("c", coll);
properties.set_document("o", options);
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(properties.to_bson_array().into_boxed_slice()));
Ok((true, collection_id))
},
}
}
fn update_indexes_delete(&mut self, indexes: &Vec<MyIndexPrep>, ba_collection_id: &Box<[u8]>, ba_record_id: &Box<[u8]>) -> Result<()> {
for ndx in indexes {
// delete all index entries (and their back links) which involve this record_id.
// this *could* be done by simply iterating over all the index entries,
// unpacking each one, seeing if the record id matches, and remove it if so, etc.
// back links make it faster, especially when the index is large.
let mut backlink_prefix = vec![];
backlink_prefix.push(RECORD_ID_TO_INDEX_ENTRY);
backlink_prefix.push_all(ba_collection_id);
push_varint(&mut backlink_prefix, ndx.index_id);
backlink_prefix.push_all(ba_record_id);
// TODO maybe store all the backlinks for a given record in a single
// value? we could do a SeekRef EQ search instead? and just one for
// all indexes for this record? but delete of an index would get much
// harder. and add an index would require a lot of extra work to
// rewrite all the backlinks, rather than just adding one for the new
// index.
// TODO
// maybe we shouldn't have backlinks? maybe we should just take the record we
// are deleting, generate all the index entries from it, and then delete each
// one?
let mut cursor = lsm::PrefixCursor::new(&mut self.cursor, backlink_prefix.into_boxed_slice());
try!(cursor.First().map_err(elmo::wrap_err));
while cursor.IsValid() {
{
let k_backlink = try!(cursor.KeyRef().map_err(elmo::wrap_err));
let (k_collection_id, k_index_id, k_record_id, k_index_entry) = try!(decode_key_backlink(&k_backlink));
self.pending.insert(k_backlink.into_boxed_slice(), lsm::Blob::Tombstone);
self.pending.insert(k_index_entry, lsm::Blob::Tombstone);
};
try!(cursor.Next().map_err(elmo::wrap_err));
}
}
Ok(())
}
fn make_index_entry_pair(ba_collection_id: &Box<[u8]>, ba_index_id: &Box<[u8]>, ba_record_id: &Box<[u8]>, vals: Vec<(bson::Value, bool)>, unique: bool) -> Result<(Box<[u8]>, Box<[u8]>)> {
let vref = vals.iter().map(|&(ref v,neg)| (v,neg)).collect::<Vec<_>>();
let k = bson::Value::encode_multi_for_index(&vref, None);
// TODO capacity
let mut index_entry = vec![];
index_entry.push(INDEX_ENTRY);
index_entry.push_all(ba_collection_id);
index_entry.push_all(ba_index_id);
index_entry.push_all(&k);
if !unique {
index_entry.push_all(&ba_record_id);
}
// do the backward entry first, because the other one takes ownership
let mut backlink = vec![];
backlink.push(RECORD_ID_TO_INDEX_ENTRY);
backlink.push_all(ba_collection_id);
backlink.push_all(&ba_index_id);
backlink.push_all(ba_record_id);
backlink.push_all(&index_entry);
Ok((index_entry.into_boxed_slice(), backlink.into_boxed_slice()))
}
fn update_indexes_insert(&mut self, indexes: &Vec<MyIndexPrep>, ba_collection_id: &Box<[u8]>, ba_record_id: &Box<[u8]>, v: &bson::Document) -> Result<()> {
for ndx in indexes {
let entries = try!(elmo::get_index_entries(&v, &ndx.normspec, &ndx.weights, &ndx.options));
// TODO don't look this up here. store it in the cached info.
let unique =
match ndx.options.get("unique") {
Some(&bson::Value::BBoolean(b)) => b,
_ => false,
};
// TODO store this in the cache?
let ba_index_id = u64_to_boxed_varint(ndx.index_id);
for vals in entries {
let (index_entry, backlink) = try!(Self::make_index_entry_pair(ba_collection_id, &ba_index_id, ba_record_id, vals, unique));
self.pending.insert(index_entry, lsm::Blob::Array(ba_record_id.clone()));
self.pending.insert(backlink, lsm::Blob::Array(box []));
}
}
Ok(())
}
fn merge(&self, min_level: u32, max_level: u32, min_segs: usize, max_segs: usize) -> Result<bool> {
let r = self.myconn.conn.merge(min_level, max_level, min_segs, max_segs).map_err(elmo::wrap_err);
if r.is_err() {
println!("from merge: {:?}", r);
}
let r = try!(r);
match r {
Some(seg) => {
//println!("{}: merged segment: {}", level, seg);
try!(self.tx.commitMerge(seg).map_err(elmo::wrap_err));
Ok(true)
},
None => {
//println!("{}: no merge needed", level);
Ok(false)
},
}
}
fn automerge(&self) -> Result<()> {
let mut count_merges = 0;
for i in 0 .. 16 {
let mut at_least_once_in_this_level = false;
loop {
let merged = try!(self.merge(i, i, 4, 8));
if !merged {
break;
}
count_merges = count_merges + 1;
at_least_once_in_this_level = true;
}
if !at_least_once_in_this_level {
break;
}
}
// TODO consider something like:
//try!(self.merge(0, 40, 8, 16));
Ok(())
}
}
impl<'a> elmo::StorageWriter for MyWriter<'a> {
fn update(&mut self, db: &str, coll: &str, v: &bson::Document) -> Result<()> {
match v.get("_id") {
None => Err(elmo::Error::Misc(String::from("cannot update without _id"))),
Some(id) => {
let cw = try!(self.get_collection_writer(db, coll));
match try!(find_record(&mut self.cursor, cw.collection_id, &id)) {
None => {
Err(elmo::Error::Misc(String::from("update but does not exist")))
},
Some(record_id) => {
// TODO capacity
let mut k = vec![];
k.push(RECORD);
let ba_collection_id = u64_to_boxed_varint(cw.collection_id);
k.push_all(&ba_collection_id);
let ba_record_id = u64_to_boxed_varint(record_id);
k.push_all(&ba_record_id);
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(v.to_bson_array().into_boxed_slice()));
try!(self.update_indexes_delete(&cw.indexes, &ba_collection_id, &ba_record_id));
try!(self.update_indexes_insert(&cw.indexes, &ba_collection_id, &ba_record_id, v));
Ok(())
},
}
},
}
}
fn delete(&mut self, db: &str, coll: &str, id: &bson::Value) -> Result<bool> {
let cw = try!(self.get_collection_writer(db, coll));
match try!(find_record(&mut self.cursor, cw.collection_id, &id)) {
Some(record_id) => {
// TODO capacity
let mut k = vec![];
k.push(RECORD);
let ba_collection_id = u64_to_boxed_varint(cw.collection_id);
k.push_all(&ba_collection_id);
let ba_record_id = u64_to_boxed_varint(record_id);
k.push_all(&ba_record_id);
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Tombstone);
try!(self.update_indexes_delete(&cw.indexes, &ba_collection_id, &ba_record_id));
Ok(true)
},
None => {
Ok(false)
},
}
}
fn insert(&mut self, db: &str, coll: &str, v: &bson::Document) -> Result<()> {
let cw = try!(self.get_collection_writer(db, coll));
// TODO capacity
let mut k = vec![];
k.push(RECORD);
let ba_collection_id = u64_to_boxed_varint(cw.collection_id);
k.push_all(&ba_collection_id);
let record_id = try!(self.use_next_record_id(cw.collection_id));
let ba_record_id = u64_to_boxed_varint(record_id);
k.push_all(&ba_record_id);
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(v.to_bson_array().into_boxed_slice()));
try!(self.update_indexes_insert(&cw.indexes, &ba_collection_id, &ba_record_id, v));
Ok(())
}
fn commit(mut self: Box<Self>) -> Result<()> {
if !self.pending.is_empty() {
let pending = std::mem::replace(&mut self.pending, HashMap::new());
let g = try!(self.myconn.conn.WriteSegment2(pending).map_err(elmo::wrap_err));
try!(self.tx.commitSegments(vec![g]).map_err(elmo::wrap_err));
try!(self.automerge());
}
Ok(())
}
fn rollback(mut self: Box<Self>) -> Result<()> {
// since we haven't been writing segments, do nothing here
Ok(())
}
fn create_collection(&mut self, db: &str, coll: &str, options: bson::Document) -> Result<bool> {
let (created, _collection_id) = try!(self.base_create_collection(db, coll, options));
Ok(created)
}
fn drop_collection(&mut self, db: &str, coll: &str) -> Result<bool> {
self.base_drop_collection(db, coll)
}
fn create_indexes(&mut self, what: Vec<elmo::IndexInfo>) -> Result<Vec<bool>> {
self.base_create_indexes(what)
}
fn rename_collection(&mut self, old_name: &str, new_name: &str, drop_target: bool) -> Result<bool> {
let (created, _collection_id) = try!(self.base_rename_collection(old_name, new_name, drop_target));
Ok(created)
}
fn drop_index(&mut self, db: &str, coll: &str, name: &str) -> Result<bool> {
self.base_drop_index(db, coll, name)
}
fn drop_database(&mut self, db: &str) -> Result<bool> {
self.base_drop_database(db)
}
fn clear_collection(&mut self, db: &str, coll: &str) -> Result<bool> {
self.base_clear_collection(db, coll)
}
}
// TODO do we need to declare that StorageWriter must implement Drop ?
impl<'a> Drop for MyWriter<'a> {
fn drop(&mut self) {
// TODO rollback
}
}
// TODO do we need to declare that StorageReader must implement Drop ?
impl Drop for MyReader {
fn drop(&mut self) {
}
}
impl Drop for MyCollectionReader {
fn drop(&mut self) {
}
}
impl Iterator for MyCollectionReader {
type Item = Result<elmo::Row>;
fn next(&mut self) -> Option<Self::Item> {
self.seq.next()
}
}
impl elmo::StorageBase for MyReader {
fn get_reader_collection_scan(&self, db: &str, coll: &str) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_collection_scan(db, coll));
Ok(box rdr)
}
fn get_reader_text_index_scan(&self, ndx: &elmo::IndexInfo, eq: elmo::QueryKey, terms: Vec<elmo::TextQueryTerm>) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_text_index_scan(ndx, eq, terms));
Ok(box rdr)
}
fn get_reader_regular_index_scan(&self, ndx: &elmo::IndexInfo, bounds: elmo::QueryBounds) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_regular_index_scan(ndx, bounds));
Ok(box rdr)
}
fn list_collections(&self) -> Result<Vec<elmo::CollectionInfo>> {
self.myconn.base_list_collection_infos()
}
fn list_indexes(&self, ns: Option<(&str, &str)>) -> Result<Vec<elmo::IndexInfo>> {
match ns {
Some((db, coll)) => {
self.myconn.list_index_infos_for_collection(db, coll)
},
None => {
self.myconn.list_all_index_infos()
},
}
}
}
impl elmo::StorageReader for MyReader {
fn into_reader_collection_scan(mut self: Box<Self>, db: &str, coll: &str) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_collection_scan(db, coll));
Ok(box rdr)
}
fn into_reader_text_index_scan(&self, ndx: &elmo::IndexInfo, eq: elmo::QueryKey, terms: Vec<elmo::TextQueryTerm>) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_text_index_scan(ndx, eq, terms));
Ok(box rdr)
}
fn into_reader_regular_index_scan(&self, ndx: &elmo::IndexInfo, bounds: elmo::QueryBounds) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_regular_index_scan(ndx, bounds));
Ok(box rdr)
}
}
impl<'a> elmo::StorageBase for MyWriter<'a> {
fn get_reader_collection_scan(&self, db: &str, coll: &str) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_collection_scan(db, coll));
Ok(box rdr)
}
fn get_reader_text_index_scan(&self, ndx: &elmo::IndexInfo, eq: elmo::QueryKey, terms: Vec<elmo::TextQueryTerm>) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_text_index_scan(ndx, eq, terms));
Ok(box rdr)
}
fn get_reader_regular_index_scan(&self, ndx: &elmo::IndexInfo, bounds: elmo::QueryBounds) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_regular_index_scan(ndx, bounds));
Ok(box rdr)
}
fn list_collections(&self) -> Result<Vec<elmo::CollectionInfo>> {
self.myconn.base_list_collection_infos()
}
fn list_indexes(&self, ns: Option<(&str, &str)>) -> Result<Vec<elmo::IndexInfo>> {
match ns {
Some((db, coll)) => {
self.myconn.list_index_infos_for_collection(db, coll)
},
None => {
self.myconn.list_all_index_infos()
},
}
}
}
impl elmo::StorageConnection for MyPublicConn {
fn begin_write<'a>(&'a self) -> Result<Box<elmo::StorageWriter + 'a>> {
let tx = try!(self.myconn.conn.GetWriteLock().map_err(elmo::wrap_err));
let cursor = try!(self.myconn.conn.OpenCursor().map_err(elmo::wrap_err));
let w = MyWriter {
myconn: self.myconn.clone(),
tx: tx,
pending: HashMap::new(),
max_collection_id: None,
max_record_id: HashMap::new(),
max_index_id: HashMap::new(),
cw: None,
cursor: cursor,
};
Ok(box w)
}
fn begin_read(&self) -> Result<Box<elmo::StorageReader + 'static>> {
let r = MyReader {
myconn: self.myconn.clone(),
};
Ok(box r)
}
}
fn base_connect(name: &str) -> lsm::Result<lsm::db> {
lsm::db::new(String::from(name), lsm::DEFAULT_SETTINGS)
}
pub fn connect(name: &str) -> Result<Box<elmo::StorageConnection>> {
let conn = try!(base_connect(name).map_err(elmo::wrap_err));
let c = MyConn {
conn: conn,
};
let c = MyPublicConn {
myconn: std::rc::Rc::new(c)
};
Ok(box c)
}
#[derive(Clone)]
pub struct MyFactory {
filename: String,
}
impl MyFactory {
pub fn new(filename: String) -> MyFactory {
MyFactory {
filename: filename,
}
}
}
impl elmo::ConnectionFactory for MyFactory {
fn open(&self) -> elmo::Result<elmo::Connection> {
let conn = try!(connect(&self.filename));
let conn = elmo::Connection::new(conn);
Ok(conn)
}
fn clone_for_new_thread(&self) -> Box<elmo::ConnectionFactory + Send> {
box self.clone()
}
}
proper handling of index bounds for non-unique indexes, which have the record id appended to the index key, which makes the bounds comparisons funky
/*
Copyright 2014-2015 Zumero, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#![feature(box_syntax)]
#![feature(associated_consts)]
#![feature(vec_push_all)]
use std::collections::HashMap;
use std::collections::HashSet;
extern crate bson;
extern crate misc;
extern crate elmo;
extern crate lsm;
use lsm::ICursor;
pub type Result<T> = elmo::Result<T>;
/*
this doesn't help.
pub struct WrapError {
err: lsm::Error,
}
impl From<WrapError> for elmo::Error {
fn from(err: WrapError) -> elmo::Error {
elmo::Error::Whatever(box err.err)
}
}
impl From<lsm::Error> for WrapError {
fn from(err: lsm::Error) -> WrapError {
WrapError {
err: err
}
}
}
impl Into<WrapError> for lsm::Error {
fn into(self) -> WrapError {
WrapError {
err: self
}
}
}
*/
/*
the compiler won't allow this
the impl does not reference any types defined in this crate;
only traits defined in the current crate can be implemented for arbitrary types
impl From<lsm::Error> for elmo::Error {
fn from(err: lsm::Error) -> elmo::Error {
elmo::Error::Whatever(box err)
}
}
*/
#[derive(Clone)]
// TODO Clone is part of temp hack, remove
struct MyIndexPrep {
index_id: u64,
options: bson::Document,
normspec: Vec<(String,elmo::IndexType)>,
weights: Option<HashMap<String,i32>>,
// TODO maybe keep the options we need here directly, sparse and unique
}
#[derive(Clone)]
// TODO Clone is part of temp hack, remove
struct MyCollectionWriter {
// db and coll are only here for caching
db: String,
coll: String,
indexes: Vec<MyIndexPrep>,
collection_id: u64,
}
struct MyCollectionReader {
seq: Box<Iterator<Item=Result<elmo::Row>>>,
// TODO need counts here
}
struct RangeCursorBsonValueIterator {
cursor: lsm::RangeCursor,
}
impl RangeCursorBsonValueIterator {
fn iter_next(&mut self) -> Result<Option<elmo::Row>> {
if self.cursor.IsValid() {
let row = {
let v = try!(self.cursor.LiveValueRef().map_err(elmo::wrap_err));
let v = try!(v.map(lsm_map_to_bson).map_err(elmo::wrap_err));
let v = v.into_value();
let row = elmo::Row {
doc: v,
pos: None,
score: None,
};
row
};
try!(self.cursor.Next().map_err(elmo::wrap_err));
Ok(Some(row))
} else {
Ok(None)
}
}
}
impl Iterator for RangeCursorBsonValueIterator {
type Item = Result<elmo::Row>;
fn next(&mut self) -> Option<Self::Item> {
match self.iter_next() {
Err(e) => {
// TODO will this put us in situations where the iterator just
// returns errors forever?
return Some(Err(e));
},
Ok(v) => {
match v {
None => {
return None;
},
Some(v) => {
return Some(Ok(v));
}
}
},
}
}
}
struct RangeCursorVarintIterator {
cursor: lsm::RangeCursor,
}
impl RangeCursorVarintIterator {
fn iter_next(&mut self) -> Result<Option<u64>> {
if self.cursor.IsValid() {
let v = {
let v = try!(self.cursor.LiveValueRef().map_err(elmo::wrap_err));
//println!("got {:?}", v);
let v = try!(v.map(lsm_map_to_varint).map_err(elmo::wrap_err));
v
};
try!(self.cursor.Next().map_err(elmo::wrap_err));
Ok(Some(v))
} else {
Ok(None)
}
}
}
impl Iterator for RangeCursorVarintIterator {
type Item = Result<u64>;
fn next(&mut self) -> Option<Self::Item> {
match self.iter_next() {
Err(e) => {
// TODO will this put us in situations where the iterator just
// returns errors forever?
return Some(Err(e));
},
Ok(v) => {
match v {
None => {
return None;
},
Some(v) => {
return Some(Ok(v));
}
}
},
}
}
}
struct MyReader {
myconn: std::rc::Rc<MyConn>,
}
struct MyWriter<'a> {
myconn: std::rc::Rc<MyConn>,
tx: std::sync::MutexGuard<'a, lsm::WriteLock>,
pending: HashMap<Box<[u8]>,lsm::Blob>,
max_collection_id: Option<u64>,
max_record_id: HashMap<u64, u64>,
max_index_id: HashMap<u64, u64>,
cw: Option<MyCollectionWriter>,
cursor: lsm::LivingCursor,
}
struct MyConn {
conn: lsm::db,
}
struct MyPublicConn {
myconn: std::rc::Rc<MyConn>,
}
// TODO should all these value encodings be switching around to have
// the collid first, before the tag? drop_collection gets trivial.
// and the collection gets good locality of storage.
// TODO should we have record ids? or just have the _id of each record
// be its actual key?
//
// the pk can be big, and it will be duplicated,
// once in the key, and once in the bson doc itself.
//
// the pk or id is also duplicated in the index entries.
// and in their backlinks.
//
// if we don't have a recid, how would we store a document that doesn't
// have any _id at all?
/// key:
/// (tag)
/// db name (len + str)
/// coll name (len + str)
/// value:
/// collid (varint)
pub const NAME_TO_COLLECTION_ID: u8 = 10;
/// key:
/// (tag)
/// collid (varint)
/// value:
/// properties (bson):
/// d: db name (str)
/// c: coll name (str)
/// o: options (document)
pub const COLLECTION_ID_TO_PROPERTIES: u8 = 11;
pub const COLLECTION_ID_BOUNDARY: u8 = COLLECTION_ID_TO_PROPERTIES + 1;
/// key:
/// (tag)
/// collid (varint)
/// index name (len + str)
/// value:
/// indexid (varint)
pub const NAME_TO_INDEX_ID: u8 = 20;
/// key:
/// (tag)
/// collid (varint)
/// indexid (varint)
/// value:
/// properties (bson):
/// n: name (str)
/// s: spec (bson)
/// o: options (bson)
pub const INDEX_ID_TO_PROPERTIES: u8 = 21;
pub const PRIMARY_INDEX_ID: u64 = 0;
/// key:
/// (tag)
/// collid (varint)
/// recid (varint)
/// value:
/// doc (bson)
pub const RECORD: u8 = 30;
/// key:
/// (tag)
/// collid (varint)
/// indexid (varint)
/// k (len + bytes)
/// recid (varint) (not present when index option unique)
/// value:
/// recid (varint) (present only when index option unique?)
pub const INDEX_ENTRY: u8 = 40;
/// key:
/// (tag)
/// collid (varint)
/// indexid (varint)
/// recid (varint)
/// (complete index key)
/// value:
/// (none)
pub const RECORD_ID_TO_INDEX_ENTRY: u8 = 41;
fn encode_key_name_to_collection_id(db: &str, coll: &str) -> Box<[u8]> {
// TODO capacity
let mut k = vec![];
k.push(NAME_TO_COLLECTION_ID);
// From the mongo docs:
// The maximum length of the collection namespace, which includes the database name, the dot
// (.) separator, and the collection name (i.e. <database>.<collection>), is 120 bytes.
let b = db.as_bytes();
k.push(b.len() as u8);
k.push_all(b);
let b = coll.as_bytes();
k.push(b.len() as u8);
k.push_all(b);
k.into_boxed_slice()
}
fn decode_string_from_key(k: &lsm::KeyRef, cur: usize) -> Result<(String, usize)> {
// TODO should we treat the len before the string as a varint instead of always a byte?
let len = try!(k.u8_at(cur).map_err(elmo::wrap_err)) as usize;
let cur = cur + 1;
let s = try!(k.map_range(cur, cur + len, lsm_map_to_string).map_err(elmo::wrap_err));
let cur = cur + len;
Ok((s, cur))
}
fn decode_varint_from_key(k: &lsm::KeyRef, cur: usize) -> Result<(u64, usize)> {
let first_byte = try!(k.u8_at(cur).map_err(elmo::wrap_err));
let len = misc::varint::first_byte_to_len(first_byte);
let v = try!(k.map_range(cur, cur + len, lsm_map_to_varint).map_err(elmo::wrap_err));
let cur = cur + len;
Ok((v, cur))
}
fn decode_key_name_to_collection_id(k: &lsm::KeyRef) -> Result<(String, String)> {
// k[0] must be NAME_TO_COLLECTION_ID
let cur = 1;
let (db, cur) = try!(decode_string_from_key(k, cur));
let (coll, _) = try!(decode_string_from_key(k, cur));
Ok((db, coll))
}
fn decode_key_name_to_index_id(k: &lsm::KeyRef) -> Result<(u64, String)> {
// k[0] must be NAME_TO_INDEX_ID
let cur = 1;
let (collection_id, cur) = try!(decode_varint_from_key(k, cur));
let (name, _) = try!(decode_string_from_key(k, cur));
Ok((collection_id, name))
}
fn decode_key_record(k: &lsm::KeyRef) -> Result<(u64, u64)> {
// k[0] must be RECORD
let cur = 1;
let (collection_id, cur) = try!(decode_varint_from_key(k, cur));
let (record_id, _) = try!(decode_varint_from_key(k, cur));
Ok((collection_id, record_id))
}
fn decode_key_collection_id_to_properties(k: &lsm::KeyRef) -> Result<(u64)> {
// k[0] must be COLLECTION_ID_TO_PROPERTIES
let cur = 1;
let (collection_id, _) = try!(decode_varint_from_key(k, cur));
Ok(collection_id)
}
fn decode_key_index_id_to_properties(k: &lsm::KeyRef) -> Result<(u64, u64)> {
// k[0] must be INDEX_ID_TO_PROPERTIES
let cur = 1;
let (collection_id, cur) = try!(decode_varint_from_key(k, cur));
let (index_id, _) = try!(decode_varint_from_key(k, cur));
Ok((collection_id, index_id))
}
fn decode_key_backlink(k: &lsm::KeyRef) -> Result<(u64, u64, u64, Box<[u8]>)> {
// k[0] must be RECORD_ID_TO_INDEX_ENTRY
let cur = 1;
let (collection_id, cur) = try!(decode_varint_from_key(k, cur));
let (index_id, cur) = try!(decode_varint_from_key(k, cur));
let (record_id, cur) = try!(decode_varint_from_key(k, cur));
let index_entry = try!(k.map_range(cur, k.len(), lsm_map_to_box).map_err(elmo::wrap_err));
Ok((collection_id, index_id, record_id, index_entry))
}
fn push_varint(v: &mut Vec<u8>, n: u64) {
let mut buf = [0; 9];
let mut cur = 0;
misc::varint::write(&mut buf, &mut cur, n);
v.push_all(&buf[0 .. cur]);
}
fn encode_key_tag_and_varint(tag: u8, id: u64) -> Vec<u8> {
// TODO capacity
let mut k = vec![];
k.push(tag);
push_varint(&mut k, id);
k
}
fn encode_key_collection_id_to_properties(collection_id: u64) -> Vec<u8> {
encode_key_tag_and_varint(COLLECTION_ID_TO_PROPERTIES, collection_id)
}
fn encode_key_index_id_to_properties(collection_id: u64, index_id: u64) -> Vec<u8> {
// TODO capacity
let mut k = vec![];
k.push(INDEX_ID_TO_PROPERTIES);
push_varint(&mut k, collection_id);
push_varint(&mut k, index_id);
k
}
fn encode_key_name_to_index_id(collection_id: u64, name: &str) -> Vec<u8> {
// TODO capacity
let mut k = vec![];
k.push(NAME_TO_INDEX_ID);
// From the mongo docs:
// The maximum length of the collection namespace, which includes the database name, the dot
// (.) separator, and the collection name (i.e. <database>.<collection>), is 120 bytes.
let ba = u64_to_boxed_varint(collection_id);
k.push_all(&ba);
let b = name.as_bytes();
k.push(b.len() as u8);
k.push_all(b);
k
}
fn lsm_map_to_string(ba: &[u8]) -> lsm::Result<String> {
let s = try!(std::str::from_utf8(&ba));
Ok(String::from(s))
}
fn lsm_map_to_varint(ba: &[u8]) -> lsm::Result<u64> {
let mut cur = 0;
let n = misc::varint::read(ba, &mut cur);
// TODO assert cur used up all of ba?
Ok(n)
}
fn lsm_map_to_box(ba: &[u8]) -> lsm::Result<Box<[u8]>> {
// TODO capacity
let mut k = vec![];
k.push_all(ba);
let k = k.into_boxed_slice();
Ok(k)
}
fn u64_to_boxed_varint(n: u64) -> Box<[u8]> {
let mut buf = [0; 9];
let mut cur = 0;
misc::varint::write(&mut buf, &mut cur, n);
let mut v = Vec::with_capacity(cur);
v.push_all(&buf[0 .. cur]);
let v = v.into_boxed_slice();
v
}
fn lsm_map_to_bson(ba: &[u8]) -> lsm::Result<bson::Document> {
let r = bson::Document::from_bson(ba);
let r = r.map_err(lsm::wrap_err);
r
}
fn find_record(cursor: &mut lsm::LivingCursor, collection_id: u64, id: &bson::Value) -> Result<Option<u64>> {
let mut k = vec![];
k.push(INDEX_ENTRY);
push_varint(&mut k, collection_id);
push_varint(&mut k, PRIMARY_INDEX_ID);
let ba = bson::Value::encode_one_for_index(id, false);
k.push_all(&ba);
get_value_for_key_as_varint(cursor, &k)
}
fn get_value_for_key_as_varint(cursor: &mut lsm::LivingCursor, k: &[u8]) -> Result<Option<u64>> {
try!(cursor.SeekRef(&lsm::KeyRef::for_slice(&k), lsm::SeekOp::SEEK_EQ).map_err(elmo::wrap_err));
if cursor.IsValid() {
let v = try!(cursor.LiveValueRef().map_err(elmo::wrap_err));
let id = try!(v.map(lsm_map_to_varint).map_err(elmo::wrap_err));
Ok(Some(id))
} else {
Ok(None)
}
}
fn get_value_for_key_as_bson(cursor: &mut lsm::LivingCursor, k: &[u8]) -> Result<Option<bson::Document>> {
try!(cursor.SeekRef(&lsm::KeyRef::for_slice(&k), lsm::SeekOp::SEEK_EQ).map_err(elmo::wrap_err));
if cursor.IsValid() {
let v = try!(cursor.LiveValueRef().map_err(elmo::wrap_err));
let id = try!(v.map(lsm_map_to_bson).map_err(elmo::wrap_err));
Ok(Some(id))
} else {
Ok(None)
}
}
impl MyConn {
fn get_reader_collection_scan(&self, db: &str, coll: &str) -> Result<MyCollectionReader> {
// check to see if the collection exists and get its id
let k = encode_key_name_to_collection_id(db, coll);
let mut cursor = try!(self.conn.OpenCursor().map_err(elmo::wrap_err));
match try!(get_value_for_key_as_varint(&mut cursor, &k)) {
None => {
let rdr =
MyCollectionReader {
seq: box std::iter::empty(),
};
Ok(rdr)
},
Some(collection_id) => {
// TODO vec capacity
let mut kmin = vec![];
kmin.push(RECORD);
push_varint(&mut kmin, collection_id);
let kmin = kmin.into_boxed_slice();
let min = lsm::Min::new(kmin, lsm::OpGt::GT);
// TODO vec capacity
let mut kmax = vec![];
kmax.push(RECORD);
push_varint(&mut kmax, collection_id + 1);
let kmax = kmax.into_boxed_slice();
let max = lsm::Max::new(kmax, lsm::OpLt::LT);
let mut cursor = lsm::RangeCursor::new(cursor, min, max);
try!(cursor.First().map_err(elmo::wrap_err));
let seq =
RangeCursorBsonValueIterator {
cursor: cursor,
};
let rdr =
MyCollectionReader {
seq: box seq,
};
Ok(rdr)
},
}
}
fn get_reader_text_index_scan(&self, ndx: &elmo::IndexInfo, eq: elmo::QueryKey, terms: Vec<elmo::TextQueryTerm>) -> Result<MyCollectionReader> {
unimplemented!();
}
fn get_reader_regular_index_scan(&self, ndx: &elmo::IndexInfo, bounds: elmo::QueryBounds) -> Result<MyCollectionReader> {
let mut cursor = try!(self.conn.OpenCursor().map_err(elmo::wrap_err));
let collection_id =
match try!(get_value_for_key_as_varint(&mut cursor, &encode_key_name_to_collection_id(&ndx.db, &ndx.coll))) {
Some(id) => id,
None => return Err(elmo::Error::Misc(String::from("collection does not exist"))),
};
let index_id =
match try!(get_value_for_key_as_varint(&mut cursor, &encode_key_name_to_index_id(collection_id, &ndx.name))) {
Some(id) => id,
None => return Err(elmo::Error::Misc(String::from("index does not exist"))),
};
let has_recid = {
let unique =
match ndx.options.get("unique") {
Some(&bson::Value::BBoolean(b)) => b,
_ => false,
};
!unique
};
fn add_one(a: &mut Vec<u8>) {
let mut i = a.len() - 1;
loop {
if a[i] == 255 {
a[i] = 0;
if i == 0 {
panic!("TODO handle case where add_one to binary array overflows the first byte?");
} else {
i = i - 1;
}
} else {
a[i] = a[i] + 1;
break;
}
}
}
fn f_twok(cursor: lsm::LivingCursor, kmin: Vec<u8>, kmax: Vec<u8>, min_cmp: lsm::OpGt, max_cmp: lsm::OpLt) -> lsm::RangeCursor {
let kmin = kmin.into_boxed_slice();
let kmax = kmax.into_boxed_slice();
let min = lsm::Min::new(kmin, min_cmp);
let max = lsm::Max::new(kmax, max_cmp);
let cursor = lsm::RangeCursor::new(cursor, min, max);
cursor
}
fn f_two(has_recid: bool, preface: Vec<u8>, cursor: lsm::LivingCursor, eqvals: elmo::QueryKey, minvals: elmo::QueryKey, maxvals: elmo::QueryKey, min_cmp: lsm::OpGt, max_cmp: lsm::OpLt) -> lsm::RangeCursor {
let mut kmin = preface.clone();
bson::Value::push_encode_multi_for_index(&mut kmin, &eqvals, Some(&minvals));
if has_recid && min_cmp == lsm::OpGt::GT {
add_one(&mut kmin);
}
let mut kmax = preface;
bson::Value::push_encode_multi_for_index(&mut kmax, &eqvals, Some(&maxvals));
if has_recid && max_cmp == lsm::OpLt::LTE {
add_one(&mut kmax);
}
f_twok(cursor, kmin, kmax, min_cmp, max_cmp)
}
fn f_gt(has_recid: bool, preface: Vec<u8>, cursor: lsm::LivingCursor, vals: elmo::QueryKey, min_cmp: lsm::OpGt) -> lsm::RangeCursor {
let mut kmin = preface.clone();
bson::Value::push_encode_multi_for_index(&mut kmin, &vals, None);
if has_recid && min_cmp == lsm::OpGt::GT {
add_one(&mut kmin);
}
let mut kmax = preface.clone();
add_one(&mut kmax);
let max_cmp = lsm::OpLt::LT;
f_twok(cursor, kmin, kmax, min_cmp, max_cmp)
}
fn f_lt(has_recid: bool, preface: Vec<u8>, cursor: lsm::LivingCursor, vals: elmo::QueryKey, max_cmp: lsm::OpLt) -> lsm::RangeCursor {
let mut kmax = preface.clone();
bson::Value::push_encode_multi_for_index(&mut kmax, &vals, None);
if has_recid && max_cmp == lsm::OpLt::LTE {
add_one(&mut kmax);
}
let kmin = preface.clone();
let min_cmp = lsm::OpGt::GT;
f_twok(cursor, kmin, kmax, min_cmp, max_cmp)
}
let mut key_preface = vec![];
key_preface.push(INDEX_ENTRY);
push_varint(&mut key_preface, collection_id);
push_varint(&mut key_preface, index_id);
let mut cursor =
match bounds {
elmo::QueryBounds::GT(vals) => f_gt(has_recid, key_preface, cursor, vals, lsm::OpGt::GT),
elmo::QueryBounds::GTE(vals) => f_gt(has_recid, key_preface, cursor, vals, lsm::OpGt::GTE),
elmo::QueryBounds::LT(vals) => f_lt(has_recid, key_preface, cursor, vals, lsm::OpLt::LT),
elmo::QueryBounds::LTE(vals) => f_lt(has_recid, key_preface, cursor, vals, lsm::OpLt::LTE),
elmo::QueryBounds::GT_LT(eqvals, minvals, maxvals) => f_two(has_recid, key_preface, cursor, eqvals, minvals, maxvals, lsm::OpGt::GT, lsm::OpLt::LT),
elmo::QueryBounds::GTE_LT(eqvals, minvals, maxvals) => f_two(has_recid, key_preface, cursor, eqvals, minvals, maxvals, lsm::OpGt::GTE, lsm::OpLt::LT),
elmo::QueryBounds::GT_LTE(eqvals, minvals, maxvals) => f_two(has_recid, key_preface, cursor, eqvals, minvals, maxvals, lsm::OpGt::GT, lsm::OpLt::LTE),
elmo::QueryBounds::GTE_LTE(eqvals, minvals, maxvals) => f_two(has_recid, key_preface, cursor, eqvals, minvals, maxvals, lsm::OpGt::GTE, lsm::OpLt::LTE),
elmo::QueryBounds::EQ(vals) => {
let mut kmin = key_preface.clone();
bson::Value::push_encode_multi_for_index(&mut kmin, &vals, None);
let mut kmax = kmin.clone();
add_one(&mut kmax);
f_twok(cursor, kmin, kmax, lsm::OpGt::GTE, lsm::OpLt::LT)
},
};
try!(cursor.First().map_err(elmo::wrap_err));
let seq =
RangeCursorVarintIterator {
cursor: cursor,
};
let seq = {
// DISTINCT. we don't want this producing the same record twice.
let mut q = try!(seq.collect::<Result<Vec<_>>>());
let mut seen = HashSet::new();
let mut a = vec![];
for x in q {
if !seen.contains(&x) {
a.push(x);
seen.insert(x);
}
}
a.into_iter().map(|x| Ok(x))
};
// the iterator above yields record ids.
// now we need something that, for each record id yielded by an
// index entry, looks up the actual record and yields THAT. in
// sqlite, this was a join.
let mut cursor = try!(self.conn.OpenCursor().map_err(elmo::wrap_err));
let seq = seq.map(
move |record_id: Result<u64>| -> Result<elmo::Row> {
match record_id {
Ok(record_id) => {
let mut k = vec![];
k.push(RECORD);
push_varint(&mut k, collection_id);
push_varint(&mut k, record_id);
try!(cursor.SeekRef(&lsm::KeyRef::for_slice(&k), lsm::SeekOp::SEEK_EQ).map_err(elmo::wrap_err));
if cursor.IsValid() {
let v = try!(cursor.LiveValueRef().map_err(elmo::wrap_err));
let v = try!(v.map(lsm_map_to_bson).map_err(elmo::wrap_err));
let v = v.into_value();
let row = elmo::Row {
doc: v,
pos: None,
score: None,
};
Ok(row)
} else {
Err(elmo::Error::Misc(String::from("record id not found?!?")))
}
},
Err(e) => {
Err(e)
},
}
});
let rdr =
MyCollectionReader {
seq: box seq,
};
Ok(rdr)
}
// TODO this could maybe return an iterator instead of a vec
fn base_list_indexes(&self, cursor: &mut lsm::LivingCursor, collection_id: Option<u64>) -> Result<Vec<(u64, u64, bson::Document)>> {
let q =
match collection_id {
Some(collection_id) => {
// TODO capacity
let mut k = vec![];
k.push(INDEX_ID_TO_PROPERTIES);
push_varint(&mut k, collection_id);
k.into_boxed_slice()
},
None => {
// TODO the vec! macro set capacity to match?
let k = vec![INDEX_ID_TO_PROPERTIES];
k.into_boxed_slice()
},
};
let mut cursor = lsm::PrefixCursor::new(cursor, q);
let mut a = vec![];
try!(cursor.First().map_err(elmo::wrap_err));
while cursor.IsValid() {
let (collection_id, index_id, props) = {
let k = try!(cursor.KeyRef().map_err(elmo::wrap_err));
let (collection_id, index_id) = try!(decode_key_index_id_to_properties(&k));
let v = try!(cursor.LiveValueRef().map_err(elmo::wrap_err));
let props = try!(v.map(lsm_map_to_bson).map_err(elmo::wrap_err));
(collection_id, index_id, props)
};
a.push((collection_id, index_id, props));
try!(cursor.Next().map_err(elmo::wrap_err));
}
Ok(a)
}
fn list_all_index_infos(&self) -> Result<Vec<elmo::IndexInfo>> {
let mut cursor = try!(self.conn.OpenCursor().map_err(elmo::wrap_err));
let indexes = try!(self.base_list_indexes(&mut cursor, None));
let indexes = indexes.into_iter().map(
|(collection_id, index_id, mut index_properties)| {
// TODO the extra lookup here is pretty expensive.
// maybe we should just store the db/coll names here too?
let k = encode_key_collection_id_to_properties(collection_id);
let mut collection_properties = try!(get_value_for_key_as_bson(&mut cursor, &k)).unwrap_or(bson::Document::new());
let db = try!(collection_properties.must_remove_string("d"));
let coll = try!(collection_properties.must_remove_string("c"));
//let options = try!(collection_properties.must_remove_document("o"));
let name = try!(index_properties.must_remove_string("n"));
let spec = try!(index_properties.must_remove_document("s"));
let options = try!(index_properties.must_remove_document("o"));
let info = elmo::IndexInfo {
db: String::from(db),
coll: String::from(coll),
name: String::from(name),
spec: spec,
options: options,
};
Ok(info)
}).collect::<Result<Vec<_>>>();
let indexes = try!(indexes);
Ok(indexes)
}
fn list_index_infos_for_collection(&self, db: &str, coll: &str) -> Result<Vec<elmo::IndexInfo>> {
let mut cursor = try!(self.conn.OpenCursor().map_err(elmo::wrap_err));
let k = encode_key_name_to_collection_id(db, coll);
match try!(get_value_for_key_as_varint(&mut cursor, &k)) {
None => {
Ok(vec![])
},
Some(collection_id) => {
let indexes = try!(self.base_list_indexes(&mut cursor, Some(collection_id)));
let indexes = indexes.into_iter().map(
|(collection_id, index_id, mut index_properties)| {
let name = try!(index_properties.must_remove_string("n"));
let spec = try!(index_properties.must_remove_document("s"));
let options = try!(index_properties.must_remove_document("o"));
let info = elmo::IndexInfo {
db: String::from(db),
coll: String::from(coll),
name: String::from(name),
spec: spec,
options: options,
};
Ok(info)
}).collect::<Result<Vec<_>>>();
let indexes = try!(indexes);
Ok(indexes)
}
}
}
// TODO this could maybe return an iterator instead of a vec
fn base_list_collections(&self, cursor: &mut lsm::LivingCursor) -> Result<Vec<(u64, String, String)>> {
let mut cursor = lsm::PrefixCursor::new(cursor, box [NAME_TO_COLLECTION_ID]);
let mut a = vec![];
// TODO might need to sort by the coll name? the sqlite version does.
try!(cursor.First().map_err(elmo::wrap_err));
while cursor.IsValid() {
{
let k = try!(cursor.KeyRef().map_err(elmo::wrap_err));
let (db, coll) = try!(decode_key_name_to_collection_id(&k));
let v = try!(cursor.LiveValueRef().map_err(elmo::wrap_err));
let collection_id = try!(v.map(lsm_map_to_varint).map_err(elmo::wrap_err));
a.push((collection_id, db, coll));
}
try!(cursor.Next().map_err(elmo::wrap_err));
}
Ok(a)
}
fn base_list_collection_infos(&self) -> Result<Vec<elmo::CollectionInfo>> {
let mut cursor = try!(self.conn.OpenCursor().map_err(elmo::wrap_err));
let collections = try!(self.base_list_collections(&mut cursor));
let collections = collections.into_iter().map(
|(collection_id, db, coll)| {
let k = encode_key_collection_id_to_properties(collection_id);
let mut collection_properties = try!(get_value_for_key_as_bson(&mut cursor, &k)).unwrap_or(bson::Document::new());
//let db = try!(collection_properties.must_remove_string("d"));
//let coll = try!(collection_properties.must_remove_string("c"));
let options = try!(collection_properties.must_remove_document("o"));
let info = elmo::CollectionInfo {
db: db,
coll: coll,
options: options,
};
Ok(info)
}).collect::<Result<Vec<_>>>();
let collections = try!(collections);
Ok(collections)
}
}
impl<'a> MyWriter<'a> {
fn use_next_index_id(&mut self, collection_id: u64) -> Result<u64> {
match self.max_index_id.entry(collection_id) {
std::collections::hash_map::Entry::Occupied(mut e) => {
let n = e.get_mut();
*n = *n + 1;
Ok(*n)
},
std::collections::hash_map::Entry::Vacant(e) => {
let n = {
// TODO capacity
let mut k = vec![];
k.push(INDEX_ID_TO_PROPERTIES);
push_varint(&mut k, collection_id + 1);
try!(self.cursor.SeekRef(&lsm::KeyRef::for_slice(&k), lsm::SeekOp::SEEK_LE).map_err(elmo::wrap_err));
if self.cursor.IsValid() {
let k = try!(self.cursor.KeyRef().map_err(elmo::wrap_err));
if try!(k.u8_at(0).map_err(elmo::wrap_err)) == INDEX_ID_TO_PROPERTIES {
let (k_collection_id, index_id) = try!(decode_key_index_id_to_properties(&k));
if collection_id == k_collection_id {
1 + index_id
} else {
1 + PRIMARY_INDEX_ID
}
} else {
1 + PRIMARY_INDEX_ID
}
} else {
1 + PRIMARY_INDEX_ID
}
};
e.insert(n);
Ok(n)
},
}
}
fn use_next_record_id(&mut self, collection_id: u64) -> Result<u64> {
match self.max_record_id.entry(collection_id) {
std::collections::hash_map::Entry::Occupied(mut e) => {
let n = e.get_mut();
*n = *n + 1;
Ok(*n)
},
std::collections::hash_map::Entry::Vacant(e) => {
let n = {
// TODO capacity
let mut k = vec![];
k.push(RECORD);
push_varint(&mut k, collection_id + 1);
try!(self.cursor.SeekRef(&lsm::KeyRef::for_slice(&k), lsm::SeekOp::SEEK_LE).map_err(elmo::wrap_err));
if self.cursor.IsValid() {
let k = try!(self.cursor.KeyRef().map_err(elmo::wrap_err));
if try!(k.u8_at(0).map_err(elmo::wrap_err)) == RECORD {
let (k_collection_id, record_id) = try!(decode_key_record(&k));
if collection_id == k_collection_id {
1 + record_id
} else {
1
}
} else {
1
}
} else {
1
}
};
e.insert(n);
Ok(n)
},
}
}
fn use_next_collection_id(&mut self) -> Result<u64> {
match self.max_collection_id {
Some(n) => {
let n = n + 1;
self.max_collection_id = Some(n);
Ok(n)
},
None => {
let n = {
try!(self.cursor.SeekRef(&lsm::KeyRef::from_boxed_slice(box [COLLECTION_ID_BOUNDARY]), lsm::SeekOp::SEEK_LE).map_err(elmo::wrap_err));
if self.cursor.IsValid() {
let k = try!(self.cursor.KeyRef().map_err(elmo::wrap_err));
if try!(k.u8_at(0).map_err(elmo::wrap_err)) == COLLECTION_ID_TO_PROPERTIES {
let collection_id = try!(decode_key_collection_id_to_properties(&k));
1 + collection_id
} else {
1
}
} else {
1
}
};
self.max_collection_id = Some(n);
Ok(n)
},
}
}
fn list_indexes_for_collection_writer(&mut self, collection_id: u64) -> Result<Vec<MyIndexPrep>> {
let indexes = try!(self.myconn.base_list_indexes(&mut self.cursor, Some(collection_id)));
let indexes = indexes.into_iter().map(
|(_, index_id, mut index_properties)| {
//let name = try!(index_properties.must_remove_string("n"));
let spec = try!(index_properties.must_remove_document("s"));
let options = try!(index_properties.must_remove_document("o"));
// TODO we might want to grab unique and sparse from options now, like:
let unique =
match options.get("unique") {
Some(&bson::Value::BBoolean(b)) => b,
_ => false,
};
let sparse =
match options.get("sparse") {
Some(&bson::Value::BBoolean(b)) => b,
_ => false,
};
let (normspec, weights) = try!(elmo::get_normalized_spec(&spec, &options));
let prep = MyIndexPrep {
index_id: index_id,
options: options,
normspec: normspec,
weights: weights,
};
Ok(prep)
}).collect::<Result<Vec<_>>>();
let indexes = try!(indexes);
Ok(indexes)
}
fn make_collection_writer(&mut self, db: &str, coll: &str) -> Result<MyCollectionWriter> {
let (just_created, collection_id) = try!(self.base_create_collection(db, coll, bson::Document::new()));
let indexes = {
// TODO
// if the collection was just created, there will be no indexes found
// think of the following as a temporary hack.
// we currently have no way of including anything written during the
// current transaction in queries.
if just_created {
let spec = bson::Document {pairs: vec![(String::from("_id"), bson::Value::BInt32(1))]};
let options = bson::Document {pairs: vec![(String::from("unique"), bson::Value::BBoolean(true))]};
let (normspec, weights) = try!(elmo::get_normalized_spec(&spec, &options));
let prep = MyIndexPrep {
index_id: PRIMARY_INDEX_ID,
options: options,
normspec: normspec,
weights: weights,
};
vec![prep]
} else {
try!(self.list_indexes_for_collection_writer(collection_id))
}
};
let c = MyCollectionWriter {
db: String::from(db),
coll: String::from(coll),
indexes: indexes,
collection_id: collection_id,
};
Ok(c)
}
fn get_collection_writer(&mut self, db: &str, coll: &str) -> Result<MyCollectionWriter> {
let need_cw =
if self.cw.is_none() {
//println!("cw is none");
true
} else {
let cw = self.cw.as_ref().unwrap();
if cw.db != db || cw.coll != coll {
true
} else {
//println!("cw doesn't match");
false
}
};
if need_cw {
let cw = try!(self.make_collection_writer(db, coll));
self.cw = Some(cw);
}
// TODO this is an awful approach here. clone. temporary workaround.
match self.cw {
Some(ref cw) => {
Ok(cw.clone())
},
None => {
unreachable!();
},
}
}
fn delete_by_prefix(&mut self, prefix: Box<[u8]>) -> Result<()> {
// TODO it would be nice if lsm had a "graveyard" delete, a way to do a
// blind delete by prefix.
let mut cursor = lsm::PrefixCursor::new(&mut self.cursor, prefix);
try!(cursor.First().map_err(elmo::wrap_err));
while cursor.IsValid() {
{
let k = try!(cursor.KeyRef().map_err(elmo::wrap_err));
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Tombstone);
}
try!(cursor.Next().map_err(elmo::wrap_err));
}
Ok(())
}
fn delete_by_collection_id_prefix(&mut self, tag: u8, collection_id: u64) -> Result<()> {
let mut k = vec![];
k.push(tag);
push_varint(&mut k, collection_id);
self.delete_by_prefix(k.into_boxed_slice())
}
fn delete_by_index_id_prefix(&mut self, tag: u8, collection_id: u64, index_id: u64) -> Result<()> {
let mut k = vec![];
k.push(tag);
push_varint(&mut k, collection_id);
push_varint(&mut k, index_id);
self.delete_by_prefix(k.into_boxed_slice())
}
fn base_clear_collection(&mut self, db: &str, coll: &str) -> Result<bool> {
let k = encode_key_name_to_collection_id(db, coll);
match try!(get_value_for_key_as_varint(&mut self.cursor, &k)) {
None => {
// TODO base_created_collection checks AGAIN to see if the collection exists
let (created, _) = try!(self.base_create_collection(db, coll, bson::Document::new()));
Ok(created)
},
Some(collection_id) => {
// all of the following tags are followed immediately by the
// collection_id, so we can delete by prefix:
try!(self.delete_by_collection_id_prefix(RECORD, collection_id));
try!(self.delete_by_collection_id_prefix(INDEX_ENTRY, collection_id));
try!(self.delete_by_collection_id_prefix(RECORD_ID_TO_INDEX_ENTRY, collection_id));
Ok(false)
},
}
}
fn create_index(&mut self, info: elmo::IndexInfo) -> Result<bool> {
//println!("create_index: {:?}", info);
let (_created, collection_id) = try!(self.base_create_collection(&info.db, &info.coll, bson::Document::new()));
let k = encode_key_name_to_index_id(collection_id, &info.name);
match try!(get_value_for_key_as_varint(&mut self.cursor, &k)) {
Some(index_id) => {
let k = encode_key_index_id_to_properties(collection_id, index_id);
let mut index_properties = try!(get_value_for_key_as_bson(&mut self.cursor, &k)).unwrap_or(bson::Document::new());
//let name = try!(index_properties.must_remove_string("n"));
let spec = try!(index_properties.must_remove_document("s"));
//let options = try!(index_properties.must_remove_document("o"));
if spec != info.spec {
// note that we do not compare the options.
// I think mongo does it this way too.
Err(elmo::Error::Misc(String::from("index already exists with different keys")))
} else {
Ok(false)
}
},
None => {
let index_id = try!(self.use_next_index_id(collection_id));
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(u64_to_boxed_varint(index_id)));
// now create entries for all the existing records
let unique =
match info.options.get("unique") {
Some(&bson::Value::BBoolean(b)) => b,
_ => false,
};
let (normspec, weights) = try!(elmo::get_normalized_spec(&info.spec, &info.options));
let mut k = vec![];
k.push(RECORD);
push_varint(&mut k, collection_id);
let mut cursor = lsm::PrefixCursor::new(&mut self.cursor, k.into_boxed_slice());
try!(cursor.First().map_err(elmo::wrap_err));
while cursor.IsValid() {
{
let k = try!(cursor.KeyRef().map_err(elmo::wrap_err));
let (_, record_id) = try!(decode_key_record(&k));
let ba_record_id = u64_to_boxed_varint(record_id);
let v = try!(cursor.LiveValueRef().map_err(elmo::wrap_err));
let v = try!(v.map(lsm_map_to_bson).map_err(elmo::wrap_err));
let entries = try!(elmo::get_index_entries(&v, &normspec, &weights, &info.options));
let ba_collection_id = u64_to_boxed_varint(collection_id);
let ba_index_id = u64_to_boxed_varint(index_id);
for vals in entries {
let (index_entry, backlink) = try!(Self::make_index_entry_pair(&ba_collection_id, &ba_index_id, &ba_record_id, vals, unique));
self.pending.insert(index_entry, lsm::Blob::Array(ba_record_id.clone()));
self.pending.insert(backlink, lsm::Blob::Array(box []));
}
}
try!(cursor.Next().map_err(elmo::wrap_err));
}
// now store the index id to properties
let k = encode_key_index_id_to_properties(collection_id, index_id);
let mut properties = bson::Document::new();
properties.set_string("n", info.name);
properties.set_document("s", info.spec);
properties.set_document("o", info.options);
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(properties.to_bson_array().into_boxed_slice()));
Ok(true)
}
}
}
fn base_create_indexes(&mut self, what: Vec<elmo::IndexInfo>) -> Result<Vec<bool>> {
let mut v = Vec::new();
for info in what {
let b = try!(self.create_index(info));
v.push(b);
}
Ok(v)
}
fn base_drop_collection(&mut self, db: &str, coll: &str) -> Result<bool> {
let k = encode_key_name_to_collection_id(db, coll);
match try!(get_value_for_key_as_varint(&mut self.cursor, &k)) {
None => Ok(false),
Some(collection_id) => {
self.pending.insert(k, lsm::Blob::Tombstone);
// all of the following tags are followed immediately by the
// collection_id, so we can delete by prefix:
try!(self.delete_by_collection_id_prefix(COLLECTION_ID_TO_PROPERTIES, collection_id));
try!(self.delete_by_collection_id_prefix(NAME_TO_INDEX_ID, collection_id));
try!(self.delete_by_collection_id_prefix(INDEX_ID_TO_PROPERTIES, collection_id));
try!(self.delete_by_collection_id_prefix(RECORD, collection_id));
try!(self.delete_by_collection_id_prefix(INDEX_ENTRY, collection_id));
try!(self.delete_by_collection_id_prefix(RECORD_ID_TO_INDEX_ENTRY, collection_id));
Ok(true)
},
}
}
fn base_rename_collection(&mut self, old_name: &str, new_name: &str, drop_target: bool) -> Result<(bool, u64)> {
let (old_db, old_coll) = try!(bson::split_name(old_name));
let (new_db, new_coll) = try!(bson::split_name(new_name));
// jstests/core/rename8.js seems to think that renaming to/from a system collection is illegal unless
// that collection is system.users, which is "whitelisted". for now, we emulate this behavior, even
// though system.users isn't supported.
if old_coll != "system.users" && old_coll.starts_with("system.") {
return Err(elmo::Error::Misc(String::from("renameCollection with a system collection not allowed.")))
}
if new_coll != "system.users" && new_coll.starts_with("system.") {
return Err(elmo::Error::Misc(String::from("renameCollection with a system collection not allowed.")))
}
if drop_target {
let _deleted = try!(self.base_drop_collection(new_db, new_coll));
}
let k = encode_key_name_to_collection_id(old_db, old_coll);
match try!(get_value_for_key_as_varint(&mut self.cursor, &k)) {
None => {
let created = try!(self.base_create_collection(new_db, new_coll, bson::Document::new()));
Ok(created)
},
Some(collection_id) => {
self.pending.insert(k, lsm::Blob::Tombstone);
let k = encode_key_name_to_collection_id(new_db, new_coll);
self.pending.insert(k, lsm::Blob::Array(u64_to_boxed_varint(collection_id)));
let k = encode_key_collection_id_to_properties(collection_id);
match try!(get_value_for_key_as_bson(&mut self.cursor, &k)) {
Some(mut collection_properties) => {
collection_properties.set_str("d", new_db);
collection_properties.set_str("c", new_coll);
// TODO assert that "o" (options) is already there
// collection_properties.set_document("o", options);
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(collection_properties.to_bson_array().into_boxed_slice()));
},
None => {
// TODO this should not be possible
},
}
Ok((false, collection_id))
}
}
}
fn base_drop_database(&mut self, db_to_delete: &str) -> Result<bool> {
let mut b = false;
for (_, db, coll) in try!(self.myconn.base_list_collections(&mut self.cursor)) {
if db == db_to_delete {
try!(self.base_drop_collection(&db, &coll));
b = true;
}
}
Ok(b)
}
fn base_drop_index(&mut self, db: &str, coll: &str, name: &str) -> Result<bool> {
match try!(get_value_for_key_as_varint(&mut self.cursor, &encode_key_name_to_collection_id(&db, &coll))) {
None => Ok(false),
Some(collection_id) => {
let k = encode_key_name_to_index_id(collection_id, name);
match try!(get_value_for_key_as_varint(&mut self.cursor, &k)) {
None => Ok(false),
Some(index_id) => {
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Tombstone);
try!(self.delete_by_index_id_prefix(INDEX_ID_TO_PROPERTIES, collection_id, index_id));
try!(self.delete_by_index_id_prefix(INDEX_ENTRY, collection_id, index_id));
try!(self.delete_by_index_id_prefix(RECORD_ID_TO_INDEX_ENTRY, collection_id, index_id));
Ok(true)
},
}
},
}
}
fn base_create_collection(&mut self, db: &str, coll: &str, options: bson::Document) -> Result<(bool, u64)> {
let k = encode_key_name_to_collection_id(db, coll);
match try!(get_value_for_key_as_varint(&mut self.cursor, &k)) {
Some(id) => Ok((false, id)),
None => {
let collection_id = try!(self.use_next_collection_id());
self.pending.insert(k, lsm::Blob::Array(u64_to_boxed_varint(collection_id)));
// create mongo index for _id
match options.get("autoIndexId") {
Some(&bson::Value::BBoolean(false)) => {
},
_ => {
let index_id = PRIMARY_INDEX_ID;
let k = encode_key_name_to_index_id(collection_id, "_id_");
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(u64_to_boxed_varint(index_id)));
let k = encode_key_index_id_to_properties(collection_id, index_id);
let mut properties = bson::Document::new();
properties.set_str("n", "_id_");
let spec = bson::Document {pairs: vec![(String::from("_id"), bson::Value::BInt32(1))]};
let options = bson::Document {pairs: vec![(String::from("unique"), bson::Value::BBoolean(true))]};
properties.set_document("s", spec);
properties.set_document("o", options);
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(properties.to_bson_array().into_boxed_slice()));
},
}
let k = encode_key_collection_id_to_properties(collection_id);
let mut properties = bson::Document::new();
properties.set_str("d", db);
properties.set_str("c", coll);
properties.set_document("o", options);
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(properties.to_bson_array().into_boxed_slice()));
Ok((true, collection_id))
},
}
}
fn update_indexes_delete(&mut self, indexes: &Vec<MyIndexPrep>, ba_collection_id: &Box<[u8]>, ba_record_id: &Box<[u8]>) -> Result<()> {
for ndx in indexes {
// delete all index entries (and their back links) which involve this record_id.
// this *could* be done by simply iterating over all the index entries,
// unpacking each one, seeing if the record id matches, and remove it if so, etc.
// back links make it faster, especially when the index is large.
let mut backlink_prefix = vec![];
backlink_prefix.push(RECORD_ID_TO_INDEX_ENTRY);
backlink_prefix.push_all(ba_collection_id);
push_varint(&mut backlink_prefix, ndx.index_id);
backlink_prefix.push_all(ba_record_id);
// TODO maybe store all the backlinks for a given record in a single
// value? we could do a SeekRef EQ search instead? and just one for
// all indexes for this record? but delete of an index would get much
// harder. and add an index would require a lot of extra work to
// rewrite all the backlinks, rather than just adding one for the new
// index.
// TODO
// maybe we shouldn't have backlinks? maybe we should just take the record we
// are deleting, generate all the index entries from it, and then delete each
// one?
let mut cursor = lsm::PrefixCursor::new(&mut self.cursor, backlink_prefix.into_boxed_slice());
try!(cursor.First().map_err(elmo::wrap_err));
while cursor.IsValid() {
{
let k_backlink = try!(cursor.KeyRef().map_err(elmo::wrap_err));
let (k_collection_id, k_index_id, k_record_id, k_index_entry) = try!(decode_key_backlink(&k_backlink));
self.pending.insert(k_backlink.into_boxed_slice(), lsm::Blob::Tombstone);
self.pending.insert(k_index_entry, lsm::Blob::Tombstone);
};
try!(cursor.Next().map_err(elmo::wrap_err));
}
}
Ok(())
}
fn make_index_entry_pair(ba_collection_id: &Box<[u8]>, ba_index_id: &Box<[u8]>, ba_record_id: &Box<[u8]>, vals: Vec<(bson::Value, bool)>, unique: bool) -> Result<(Box<[u8]>, Box<[u8]>)> {
let vref = vals.iter().map(|&(ref v,neg)| (v,neg)).collect::<Vec<_>>();
let k = bson::Value::encode_multi_for_index(&vref, None);
// TODO capacity
let mut index_entry = vec![];
index_entry.push(INDEX_ENTRY);
index_entry.push_all(ba_collection_id);
index_entry.push_all(ba_index_id);
index_entry.push_all(&k);
if !unique {
index_entry.push_all(&ba_record_id);
}
// do the backward entry first, because the other one takes ownership
let mut backlink = vec![];
backlink.push(RECORD_ID_TO_INDEX_ENTRY);
backlink.push_all(ba_collection_id);
backlink.push_all(&ba_index_id);
backlink.push_all(ba_record_id);
backlink.push_all(&index_entry);
Ok((index_entry.into_boxed_slice(), backlink.into_boxed_slice()))
}
fn update_indexes_insert(&mut self, indexes: &Vec<MyIndexPrep>, ba_collection_id: &Box<[u8]>, ba_record_id: &Box<[u8]>, v: &bson::Document) -> Result<()> {
for ndx in indexes {
let entries = try!(elmo::get_index_entries(&v, &ndx.normspec, &ndx.weights, &ndx.options));
// TODO don't look this up here. store it in the cached info.
let unique =
match ndx.options.get("unique") {
Some(&bson::Value::BBoolean(b)) => b,
_ => false,
};
// TODO store this in the cache?
let ba_index_id = u64_to_boxed_varint(ndx.index_id);
for vals in entries {
let (index_entry, backlink) = try!(Self::make_index_entry_pair(ba_collection_id, &ba_index_id, ba_record_id, vals, unique));
self.pending.insert(index_entry, lsm::Blob::Array(ba_record_id.clone()));
self.pending.insert(backlink, lsm::Blob::Array(box []));
}
}
Ok(())
}
fn merge(&self, min_level: u32, max_level: u32, min_segs: usize, max_segs: usize) -> Result<bool> {
let r = self.myconn.conn.merge(min_level, max_level, min_segs, max_segs).map_err(elmo::wrap_err);
if r.is_err() {
println!("from merge: {:?}", r);
}
let r = try!(r);
match r {
Some(seg) => {
//println!("{}: merged segment: {}", level, seg);
try!(self.tx.commitMerge(seg).map_err(elmo::wrap_err));
Ok(true)
},
None => {
//println!("{}: no merge needed", level);
Ok(false)
},
}
}
fn automerge(&self) -> Result<()> {
let mut count_merges = 0;
for i in 0 .. 16 {
let mut at_least_once_in_this_level = false;
loop {
let merged = try!(self.merge(i, i, 4, 8));
if !merged {
break;
}
count_merges = count_merges + 1;
at_least_once_in_this_level = true;
}
if !at_least_once_in_this_level {
break;
}
}
// TODO consider something like:
//try!(self.merge(0, 40, 8, 16));
Ok(())
}
}
impl<'a> elmo::StorageWriter for MyWriter<'a> {
fn update(&mut self, db: &str, coll: &str, v: &bson::Document) -> Result<()> {
match v.get("_id") {
None => Err(elmo::Error::Misc(String::from("cannot update without _id"))),
Some(id) => {
let cw = try!(self.get_collection_writer(db, coll));
match try!(find_record(&mut self.cursor, cw.collection_id, &id)) {
None => {
Err(elmo::Error::Misc(String::from("update but does not exist")))
},
Some(record_id) => {
// TODO capacity
let mut k = vec![];
k.push(RECORD);
let ba_collection_id = u64_to_boxed_varint(cw.collection_id);
k.push_all(&ba_collection_id);
let ba_record_id = u64_to_boxed_varint(record_id);
k.push_all(&ba_record_id);
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(v.to_bson_array().into_boxed_slice()));
try!(self.update_indexes_delete(&cw.indexes, &ba_collection_id, &ba_record_id));
try!(self.update_indexes_insert(&cw.indexes, &ba_collection_id, &ba_record_id, v));
Ok(())
},
}
},
}
}
fn delete(&mut self, db: &str, coll: &str, id: &bson::Value) -> Result<bool> {
let cw = try!(self.get_collection_writer(db, coll));
match try!(find_record(&mut self.cursor, cw.collection_id, &id)) {
Some(record_id) => {
// TODO capacity
let mut k = vec![];
k.push(RECORD);
let ba_collection_id = u64_to_boxed_varint(cw.collection_id);
k.push_all(&ba_collection_id);
let ba_record_id = u64_to_boxed_varint(record_id);
k.push_all(&ba_record_id);
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Tombstone);
try!(self.update_indexes_delete(&cw.indexes, &ba_collection_id, &ba_record_id));
Ok(true)
},
None => {
Ok(false)
},
}
}
fn insert(&mut self, db: &str, coll: &str, v: &bson::Document) -> Result<()> {
let cw = try!(self.get_collection_writer(db, coll));
// TODO capacity
let mut k = vec![];
k.push(RECORD);
let ba_collection_id = u64_to_boxed_varint(cw.collection_id);
k.push_all(&ba_collection_id);
let record_id = try!(self.use_next_record_id(cw.collection_id));
let ba_record_id = u64_to_boxed_varint(record_id);
k.push_all(&ba_record_id);
self.pending.insert(k.into_boxed_slice(), lsm::Blob::Array(v.to_bson_array().into_boxed_slice()));
try!(self.update_indexes_insert(&cw.indexes, &ba_collection_id, &ba_record_id, v));
Ok(())
}
fn commit(mut self: Box<Self>) -> Result<()> {
if !self.pending.is_empty() {
let pending = std::mem::replace(&mut self.pending, HashMap::new());
let g = try!(self.myconn.conn.WriteSegment2(pending).map_err(elmo::wrap_err));
try!(self.tx.commitSegments(vec![g]).map_err(elmo::wrap_err));
try!(self.automerge());
}
Ok(())
}
fn rollback(mut self: Box<Self>) -> Result<()> {
// since we haven't been writing segments, do nothing here
Ok(())
}
fn create_collection(&mut self, db: &str, coll: &str, options: bson::Document) -> Result<bool> {
let (created, _collection_id) = try!(self.base_create_collection(db, coll, options));
Ok(created)
}
fn drop_collection(&mut self, db: &str, coll: &str) -> Result<bool> {
self.base_drop_collection(db, coll)
}
fn create_indexes(&mut self, what: Vec<elmo::IndexInfo>) -> Result<Vec<bool>> {
self.base_create_indexes(what)
}
fn rename_collection(&mut self, old_name: &str, new_name: &str, drop_target: bool) -> Result<bool> {
let (created, _collection_id) = try!(self.base_rename_collection(old_name, new_name, drop_target));
Ok(created)
}
fn drop_index(&mut self, db: &str, coll: &str, name: &str) -> Result<bool> {
self.base_drop_index(db, coll, name)
}
fn drop_database(&mut self, db: &str) -> Result<bool> {
self.base_drop_database(db)
}
fn clear_collection(&mut self, db: &str, coll: &str) -> Result<bool> {
self.base_clear_collection(db, coll)
}
}
// TODO do we need to declare that StorageWriter must implement Drop ?
impl<'a> Drop for MyWriter<'a> {
fn drop(&mut self) {
// TODO rollback
}
}
// TODO do we need to declare that StorageReader must implement Drop ?
impl Drop for MyReader {
fn drop(&mut self) {
}
}
impl Drop for MyCollectionReader {
fn drop(&mut self) {
}
}
impl Iterator for MyCollectionReader {
type Item = Result<elmo::Row>;
fn next(&mut self) -> Option<Self::Item> {
self.seq.next()
}
}
impl elmo::StorageBase for MyReader {
fn get_reader_collection_scan(&self, db: &str, coll: &str) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_collection_scan(db, coll));
Ok(box rdr)
}
fn get_reader_text_index_scan(&self, ndx: &elmo::IndexInfo, eq: elmo::QueryKey, terms: Vec<elmo::TextQueryTerm>) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_text_index_scan(ndx, eq, terms));
Ok(box rdr)
}
fn get_reader_regular_index_scan(&self, ndx: &elmo::IndexInfo, bounds: elmo::QueryBounds) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_regular_index_scan(ndx, bounds));
Ok(box rdr)
}
fn list_collections(&self) -> Result<Vec<elmo::CollectionInfo>> {
self.myconn.base_list_collection_infos()
}
fn list_indexes(&self, ns: Option<(&str, &str)>) -> Result<Vec<elmo::IndexInfo>> {
match ns {
Some((db, coll)) => {
self.myconn.list_index_infos_for_collection(db, coll)
},
None => {
self.myconn.list_all_index_infos()
},
}
}
}
impl elmo::StorageReader for MyReader {
fn into_reader_collection_scan(mut self: Box<Self>, db: &str, coll: &str) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_collection_scan(db, coll));
Ok(box rdr)
}
fn into_reader_text_index_scan(&self, ndx: &elmo::IndexInfo, eq: elmo::QueryKey, terms: Vec<elmo::TextQueryTerm>) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_text_index_scan(ndx, eq, terms));
Ok(box rdr)
}
fn into_reader_regular_index_scan(&self, ndx: &elmo::IndexInfo, bounds: elmo::QueryBounds) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_regular_index_scan(ndx, bounds));
Ok(box rdr)
}
}
impl<'a> elmo::StorageBase for MyWriter<'a> {
fn get_reader_collection_scan(&self, db: &str, coll: &str) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_collection_scan(db, coll));
Ok(box rdr)
}
fn get_reader_text_index_scan(&self, ndx: &elmo::IndexInfo, eq: elmo::QueryKey, terms: Vec<elmo::TextQueryTerm>) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_text_index_scan(ndx, eq, terms));
Ok(box rdr)
}
fn get_reader_regular_index_scan(&self, ndx: &elmo::IndexInfo, bounds: elmo::QueryBounds) -> Result<Box<Iterator<Item=Result<elmo::Row>> + 'static>> {
let rdr = try!(self.myconn.get_reader_regular_index_scan(ndx, bounds));
Ok(box rdr)
}
fn list_collections(&self) -> Result<Vec<elmo::CollectionInfo>> {
self.myconn.base_list_collection_infos()
}
fn list_indexes(&self, ns: Option<(&str, &str)>) -> Result<Vec<elmo::IndexInfo>> {
match ns {
Some((db, coll)) => {
self.myconn.list_index_infos_for_collection(db, coll)
},
None => {
self.myconn.list_all_index_infos()
},
}
}
}
impl elmo::StorageConnection for MyPublicConn {
fn begin_write<'a>(&'a self) -> Result<Box<elmo::StorageWriter + 'a>> {
let tx = try!(self.myconn.conn.GetWriteLock().map_err(elmo::wrap_err));
let cursor = try!(self.myconn.conn.OpenCursor().map_err(elmo::wrap_err));
let w = MyWriter {
myconn: self.myconn.clone(),
tx: tx,
pending: HashMap::new(),
max_collection_id: None,
max_record_id: HashMap::new(),
max_index_id: HashMap::new(),
cw: None,
cursor: cursor,
};
Ok(box w)
}
fn begin_read(&self) -> Result<Box<elmo::StorageReader + 'static>> {
let r = MyReader {
myconn: self.myconn.clone(),
};
Ok(box r)
}
}
fn base_connect(name: &str) -> lsm::Result<lsm::db> {
lsm::db::new(String::from(name), lsm::DEFAULT_SETTINGS)
}
pub fn connect(name: &str) -> Result<Box<elmo::StorageConnection>> {
let conn = try!(base_connect(name).map_err(elmo::wrap_err));
let c = MyConn {
conn: conn,
};
let c = MyPublicConn {
myconn: std::rc::Rc::new(c)
};
Ok(box c)
}
#[derive(Clone)]
pub struct MyFactory {
filename: String,
}
impl MyFactory {
pub fn new(filename: String) -> MyFactory {
MyFactory {
filename: filename,
}
}
}
impl elmo::ConnectionFactory for MyFactory {
fn open(&self) -> elmo::Result<elmo::Connection> {
let conn = try!(connect(&self.filename));
let conn = elmo::Connection::new(conn);
Ok(conn)
}
fn clone_for_new_thread(&self) -> Box<elmo::ConnectionFactory + Send> {
box self.clone()
}
}
|
use anyhow::Result;
use clap::Parser;
use regex::Regex;
use std::collections::HashMap;
use std::fs::File;
use std::io::{self, BufRead};
use std::path::PathBuf;
use std::time::Instant;
use polars::frame::DataFrame;
use polars::prelude::*;
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
/// List of input files. Ideally with headers
#[clap(short, long, multiple_values(true))]
input: Vec<PathBuf>,
/// An output file
#[clap(short, long)]
output: String,
/// The column to select. If files have no headers, use "column_N"
#[clap(short, long, multiple_values(true))]
select: Vec<String>,
/// The size of chunks to process at once
#[clap(short, long, default_value_t = 100)]
chunksize: usize,
}
fn load_data(path: &PathBuf, delim: u8) -> Result<DataFrame> {
let mut df: DataFrame = CsvReader::from_path(path)?
.has_header(true)
.with_delimiter(delim)
.with_null_values(Some(NullValues::AllColumns("NA".to_string())))
.infer_schema(None)
.finish()
.unwrap();
// hstack the experiment name (derived from the filename) into the DataFrame
let iter_exp =
vec![path.file_name().unwrap().to_str().unwrap().split('-').collect::<Vec<&str>>()[0..=2]
.join("-")]
.into_iter();
let exp_col: Series = Series::new(
"experiment",
iter_exp.flat_map(|n| std::iter::repeat(n).take(df.height())).collect::<Vec<String>>(),
);
// exp_col.rename("experiment");
df.hstack_mut(&[exp_col]).unwrap();
Ok(df)
}
fn baseline_get_median_gt_zero(str_val: &Series) -> Series {
let lists = str_val
.utf8()
.unwrap()
.into_iter()
.map(|x| {
x.unwrap()
.split(',')
.into_iter()
.map(|y| y.parse::<f64>().unwrap())
.collect::<Vec<f64>>()
});
let medians: Vec<bool> =
lists.into_iter().map(|x| Series::from_iter(x).median().unwrap() > 0.0).collect();
Series::from_iter(medians)
}
fn filter_input(input: &mut DataFrame, baseline: bool) -> DataFrame {
if baseline {
filter_baseline(input)
} else {
filter_differential(input)
}
}
fn filter_baseline(input: &mut DataFrame) -> DataFrame {
// This is a baseline experiment.
// Find columns starting with lower case g, then apply the function to convert to
// medain and select greater than zero
let mut meas = Vec::<String>::new();
for col in input.get_column_names_owned() {
if col.starts_with('g') {
input.apply(&col, baseline_get_median_gt_zero).unwrap();
meas.push(col);
}
}
println!("{:?}", meas);
// Selection should now have all the gN column names in it
input
.clone()
.lazy()
.filter(any_exprs(meas.into_iter().map(|x| col(&x)).collect::<Vec<Expr>>()))
.collect()
.unwrap()
}
fn filter_differential(input: &DataFrame) -> DataFrame {
// find the p value and log fold columns
let pv_regex = Regex::new(r".*p-value.*").unwrap();
let log_fold_regex = Regex::new(r".*log2.*").unwrap();
let mut pv: String = "p-value".to_string();
let mut lf: String = "log2fold".to_string();
for col in input.get_column_names_owned() {
if pv_regex.is_match(&col) {
pv = col;
} else if log_fold_regex.is_match(&col) {
lf = col
}
}
input
.clone()
.lazy()
.filter(all_exprs([col(&pv).is_not_null(), col(&lf).neq(lit(0))]))
.collect()
.unwrap()
}
fn load_chunk(
paths: &mut Vec<PathBuf>,
select: &mut Vec<String>,
) -> Result<DataFrame, anyhow::Error> {
/*
Work with the chunks of input to reduce them into a single dataframe of the genes we want from
that particular set of experiments. Should return a single dataframe
*/
let mut output: Option<DataFrame> = None;
while let Some(infile) = paths.pop() {
// Load the input Csv
let mut input: DataFrame = if infile.extension().unwrap() == "tsv" {
load_data(&infile, b'\t')
} else {
load_data(&infile, b',')
}
.unwrap_or_else(|error| match error.downcast_ref::<PolarsError>() {
Some(PolarsError::Io(_string)) => panic!("Input file does not exist! {:?}", infile),
_ => panic!("An error occurred! {:?}", error),
});
// Rename columns to remove . in the names. Now also remove spaces
let mut new_cols = Vec::new();
for nm in input.get_column_names().iter() {
new_cols.push(nm.replace('.', "").replace(' ', ""));
}
if new_cols != input.get_column_names() {
input.set_column_names(&new_cols).unwrap_or_else(|error| {
panic!("Failed to set column names for some reason {:?}", error);
});
}
// Need to detect which type of experiment it is - differential or baseline
let type_re = Regex::new(r"tpms").unwrap();
let filtered = filter_input(&mut input, type_re.is_match(infile.to_str().unwrap()));
// Drop everything from the input except the genes
let genes: DataFrame = filtered.select(select.iter()).unwrap_or_else(|error| match error {
PolarsError::NotFound(_string) => {
panic!(
"{:?} was not found in the header, does the file have a header?\n{:?}",
select,
input.get_column_names()
);
},
_ => panic!("Error selecting column from input: {:?}", error),
});
output = match output {
None => {
let output_df = genes.clone();
Some(output_df)
},
Some(mut output_df) => {
// genes =/= output, so we are handling a new file
output_df.extend(&genes).unwrap_or_else(|error| {
panic!("Failed to extend the output dataframe. {:?}", error)
});
output_df =
output_df.unique(None, UniqueKeepStrategy::First).unwrap_or_else(|error| {
panic!("Failed to parse selected column for uniqueness. {:?}", error)
});
Some(output_df)
},
}
}
Ok(output.unwrap())
}
fn get_taxid(paths: &mut Vec<PathBuf>, lookup_table: &mut HashMap<String, String>) {
let tax_regex = Regex::new(r".*Taxon_([0-9]{4,})").unwrap();
while let Some(path) = paths.pop() {
let exp_name =
path.file_name().unwrap().to_str().unwrap().split('-').collect::<Vec<&str>>()[0..=2]
.join("-")
.replace(".condensed", "");
let file = File::open(path).unwrap();
let lines = io::BufReader::new(file).lines();
for line in lines {
match line {
Ok(line) => {
let captures = tax_regex.captures(line.as_str());
match captures {
Some(caps) => {
let taxid = caps.get(1).map_or("", |m| m.as_str());
lookup_table.insert(exp_name, taxid.to_string());
break;
},
None => {},
};
},
Err(_idc) => {},
}
}
}
}
fn main() {
let timer = Instant::now();
let mut args = Args::parse();
// Always select the experiment
args.select.push("experiment".to_string());
// separate expression data from taxa data
let mut taxa_files: Vec<PathBuf> = Vec::new();
let mut expr_files: Vec<PathBuf> = Vec::new();
let re = Regex::new(r"sdrf.tsv").unwrap();
for infile in args.input {
if re.is_match(infile.to_str().unwrap()) {
// File is experiment setup data, parse for taxa
taxa_files.push(infile);
} else {
// Should only be expression data, parse for genes
expr_files.push(infile);
}
}
println!("{} taxa files to parse", taxa_files.len());
println!("{} expression files to parse", expr_files.len());
let n_files = expr_files.len();
let mut dataframes: Vec<DataFrame> = Vec::new();
let chunked_input: Vec<Vec<PathBuf>> =
expr_files.chunks(args.chunksize).map(|x| x.to_vec()).collect();
// Read everything into a big vector
let mut n_chunks = 0;
for mut files_chunk in chunked_input {
dataframes.push(
load_chunk(&mut files_chunk, &mut args.select)
.unwrap_or_else(|_error| panic!("Something wrong in one of the reads, aborting")),
);
n_chunks += 1;
println!("Done {} files", n_chunks * args.chunksize);
}
let mut output: DataFrame = dataframes[0].clone();
for item in dataframes.iter() {
output.extend(item).unwrap_or_else(|error| {
panic!("Unable to extend the DataFrame! Out of memory? {:?}", error)
});
output.unique(None, UniqueKeepStrategy::First).unwrap();
}
// Now parse the experiment files to get taxa
let chunked_taxa: Vec<Vec<PathBuf>> =
taxa_files.chunks(args.chunksize).map(|x| x.to_vec()).collect();
let mut lookup_table: HashMap<String, String> = HashMap::new();
for mut tax_chunk in chunked_taxa {
get_taxid(&mut tax_chunk, &mut lookup_table);
}
// Now we have the experiment - taxid lookup_table, we just need to add the column to the output df
let expt_col = output.select(["experiment"]).unwrap();
let mut tax_ids: Vec<String> = Vec::with_capacity(expt_col.height());
for ex in expt_col.iter() {
let mut warn_flag: bool = true;
let uhy = ex.utf8().unwrap();
for (idx, x) in uhy.into_iter().enumerate() {
if lookup_table.contains_key(x.unwrap()) {
tax_ids.insert(idx, lookup_table.get(x.unwrap()).unwrap().to_string());
} else {
tax_ids.insert(idx, String::new());
if warn_flag {
println!("Experiment {} does not name a taxon", x.unwrap());
warn_flag = false;
}
}
}
}
output.hstack_mut(&[Series::new("taxid", tax_ids.into_iter())]).unwrap();
let out_stream: File = File::create(args.output).unwrap();
CsvWriter::new(out_stream)
.has_header(true)
.finish(&mut output)
.unwrap_or_else(|error| panic!("Something wrong writing file: {:?}", error));
println!("Processed {} in {} seconds", n_files, timer.elapsed().as_secs());
}
Don't remove spaces in column headings
I think I was ending up with two GeneIDs. This may break some other
things, requiring a more elaborate solution...
use anyhow::Result;
use clap::Parser;
use regex::Regex;
use std::collections::HashMap;
use std::fs::File;
use std::io::{self, BufRead};
use std::path::PathBuf;
use std::time::Instant;
use polars::frame::DataFrame;
use polars::prelude::*;
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
/// List of input files. Ideally with headers
#[clap(short, long, multiple_values(true))]
input: Vec<PathBuf>,
/// An output file
#[clap(short, long)]
output: String,
/// The column to select. If files have no headers, use "column_N"
#[clap(short, long, multiple_values(true))]
select: Vec<String>,
/// The size of chunks to process at once
#[clap(short, long, default_value_t = 100)]
chunksize: usize,
}
fn load_data(path: &PathBuf, delim: u8) -> Result<DataFrame> {
let mut df: DataFrame = CsvReader::from_path(path)?
.has_header(true)
.with_delimiter(delim)
.with_null_values(Some(NullValues::AllColumns("NA".to_string())))
.infer_schema(None)
.finish()
.unwrap();
// hstack the experiment name (derived from the filename) into the DataFrame
let iter_exp =
vec![path.file_name().unwrap().to_str().unwrap().split('-').collect::<Vec<&str>>()[0..=2]
.join("-")]
.into_iter();
let exp_col: Series = Series::new(
"experiment",
iter_exp.flat_map(|n| std::iter::repeat(n).take(df.height())).collect::<Vec<String>>(),
);
// exp_col.rename("experiment");
df.hstack_mut(&[exp_col]).unwrap();
Ok(df)
}
fn baseline_get_median_gt_zero(str_val: &Series) -> Series {
let lists = str_val
.utf8()
.unwrap()
.into_iter()
.map(|x| {
x.unwrap()
.split(',')
.into_iter()
.map(|y| y.parse::<f64>().unwrap())
.collect::<Vec<f64>>()
});
let medians: Vec<bool> =
lists.into_iter().map(|x| Series::from_iter(x).median().unwrap() > 0.0).collect();
Series::from_iter(medians)
}
fn filter_input(input: &mut DataFrame, baseline: bool) -> DataFrame {
if baseline {
filter_baseline(input)
} else {
filter_differential(input)
}
}
fn filter_baseline(input: &mut DataFrame) -> DataFrame {
// This is a baseline experiment.
// Find columns starting with lower case g, then apply the function to convert to
// medain and select greater than zero
let mut meas = Vec::<String>::new();
for col in input.get_column_names_owned() {
if col.starts_with('g') {
input.apply(&col, baseline_get_median_gt_zero).unwrap();
meas.push(col);
}
}
println!("{:?}", meas);
// Selection should now have all the gN column names in it
input
.clone()
.lazy()
.filter(any_exprs(meas.into_iter().map(|x| col(&x)).collect::<Vec<Expr>>()))
.collect()
.unwrap()
}
fn filter_differential(input: &DataFrame) -> DataFrame {
// find the p value and log fold columns
let pv_regex = Regex::new(r".*p-value.*").unwrap();
let log_fold_regex = Regex::new(r".*log2.*").unwrap();
let mut pv: String = "p-value".to_string();
let mut lf: String = "log2fold".to_string();
for col in input.get_column_names_owned() {
if pv_regex.is_match(&col) {
pv = col;
} else if log_fold_regex.is_match(&col) {
lf = col
}
}
input
.clone()
.lazy()
.filter(all_exprs([col(&pv).is_not_null(), col(&lf).neq(lit(0))]))
.collect()
.unwrap()
}
fn load_chunk(
paths: &mut Vec<PathBuf>,
select: &mut Vec<String>,
) -> Result<DataFrame, anyhow::Error> {
/*
Work with the chunks of input to reduce them into a single dataframe of the genes we want from
that particular set of experiments. Should return a single dataframe
*/
let mut output: Option<DataFrame> = None;
while let Some(infile) = paths.pop() {
// Load the input Csv
let mut input: DataFrame = if infile.extension().unwrap() == "tsv" {
load_data(&infile, b'\t')
} else {
load_data(&infile, b',')
}
.unwrap_or_else(|error| match error.downcast_ref::<PolarsError>() {
Some(PolarsError::Io(_string)) => panic!("Input file does not exist! {:?}", infile),
_ => panic!("An error occurred! {:?}", error),
});
// Rename columns to remove . in the names. Now also remove spaces
let mut new_cols = Vec::new();
for nm in input.get_column_names().iter() {
new_cols.push(nm.replace('.', "");//.replace(' ', ""));
}
if new_cols != input.get_column_names() {
input.set_column_names(&new_cols).unwrap_or_else(|error| {
panic!("Failed to set column names for some reason {:?}", error);
});
}
// Need to detect which type of experiment it is - differential or baseline
let type_re = Regex::new(r"tpms").unwrap();
let filtered = filter_input(&mut input, type_re.is_match(infile.to_str().unwrap()));
// Drop everything from the input except the genes
let genes: DataFrame = filtered.select(select.iter()).unwrap_or_else(|error| match error {
PolarsError::NotFound(_string) => {
panic!(
"{:?} was not found in the header, does the file have a header?\n{:?}",
select,
input.get_column_names()
);
},
_ => panic!("Error selecting column from input: {:?}", error),
});
output = match output {
None => {
let output_df = genes.clone();
Some(output_df)
},
Some(mut output_df) => {
// genes =/= output, so we are handling a new file
output_df.extend(&genes).unwrap_or_else(|error| {
panic!("Failed to extend the output dataframe. {:?}", error)
});
output_df =
output_df.unique(None, UniqueKeepStrategy::First).unwrap_or_else(|error| {
panic!("Failed to parse selected column for uniqueness. {:?}", error)
});
Some(output_df)
},
}
}
Ok(output.unwrap())
}
fn get_taxid(paths: &mut Vec<PathBuf>, lookup_table: &mut HashMap<String, String>) {
let tax_regex = Regex::new(r".*Taxon_([0-9]{4,})").unwrap();
while let Some(path) = paths.pop() {
let exp_name =
path.file_name().unwrap().to_str().unwrap().split('-').collect::<Vec<&str>>()[0..=2]
.join("-")
.replace(".condensed", "");
let file = File::open(path).unwrap();
let lines = io::BufReader::new(file).lines();
for line in lines {
match line {
Ok(line) => {
let captures = tax_regex.captures(line.as_str());
match captures {
Some(caps) => {
let taxid = caps.get(1).map_or("", |m| m.as_str());
lookup_table.insert(exp_name, taxid.to_string());
break;
},
None => {},
};
},
Err(_idc) => {},
}
}
}
}
fn main() {
let timer = Instant::now();
let mut args = Args::parse();
// Always select the experiment
args.select.push("experiment".to_string());
// separate expression data from taxa data
let mut taxa_files: Vec<PathBuf> = Vec::new();
let mut expr_files: Vec<PathBuf> = Vec::new();
let re = Regex::new(r"sdrf.tsv").unwrap();
for infile in args.input {
if re.is_match(infile.to_str().unwrap()) {
// File is experiment setup data, parse for taxa
taxa_files.push(infile);
} else {
// Should only be expression data, parse for genes
expr_files.push(infile);
}
}
println!("{} taxa files to parse", taxa_files.len());
println!("{} expression files to parse", expr_files.len());
let n_files = expr_files.len();
let mut dataframes: Vec<DataFrame> = Vec::new();
let chunked_input: Vec<Vec<PathBuf>> =
expr_files.chunks(args.chunksize).map(|x| x.to_vec()).collect();
// Read everything into a big vector
let mut n_chunks = 0;
for mut files_chunk in chunked_input {
dataframes.push(
load_chunk(&mut files_chunk, &mut args.select)
.unwrap_or_else(|_error| panic!("Something wrong in one of the reads, aborting")),
);
n_chunks += 1;
println!("Done {} files", n_chunks * args.chunksize);
}
let mut output: DataFrame = dataframes[0].clone();
for item in dataframes.iter() {
output.extend(item).unwrap_or_else(|error| {
panic!("Unable to extend the DataFrame! Out of memory? {:?}", error)
});
output.unique(None, UniqueKeepStrategy::First).unwrap();
}
// Now parse the experiment files to get taxa
let chunked_taxa: Vec<Vec<PathBuf>> =
taxa_files.chunks(args.chunksize).map(|x| x.to_vec()).collect();
let mut lookup_table: HashMap<String, String> = HashMap::new();
for mut tax_chunk in chunked_taxa {
get_taxid(&mut tax_chunk, &mut lookup_table);
}
// Now we have the experiment - taxid lookup_table, we just need to add the column to the output df
let expt_col = output.select(["experiment"]).unwrap();
let mut tax_ids: Vec<String> = Vec::with_capacity(expt_col.height());
for ex in expt_col.iter() {
let mut warn_flag: bool = true;
let uhy = ex.utf8().unwrap();
for (idx, x) in uhy.into_iter().enumerate() {
if lookup_table.contains_key(x.unwrap()) {
tax_ids.insert(idx, lookup_table.get(x.unwrap()).unwrap().to_string());
} else {
tax_ids.insert(idx, String::new());
if warn_flag {
println!("Experiment {} does not name a taxon", x.unwrap());
warn_flag = false;
}
}
}
}
output.hstack_mut(&[Series::new("taxid", tax_ids.into_iter())]).unwrap();
let out_stream: File = File::create(args.output).unwrap();
CsvWriter::new(out_stream)
.has_header(true)
.finish(&mut output)
.unwrap_or_else(|error| panic!("Something wrong writing file: {:?}", error));
println!("Processed {} in {} seconds", n_files, timer.elapsed().as_secs());
}
|
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::error;
use std::fmt;
use std::mem;
use std::ptr;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::Weak;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::time::Duration;
use buffer::BufferAccess;
use command_buffer::submit::SubmitAnyBuilder;
use command_buffer::submit::SubmitPresentBuilder;
use command_buffer::submit::SubmitSemaphoresWaitBuilder;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use format::Format;
use format::FormatDesc;
use image::ImageAccess;
use image::ImageDimensions;
use image::ImageLayout;
use image::ImageUsage;
use image::sys::UnsafeImage;
use image::swapchain::SwapchainImage;
use swapchain::ColorSpace;
use swapchain::CompositeAlpha;
use swapchain::PresentMode;
use swapchain::Surface;
use swapchain::SurfaceTransform;
use swapchain::SurfaceSwapchainLock;
use sync::AccessCheckError;
use sync::AccessError;
use sync::AccessFlagBits;
use sync::FlushError;
use sync::GpuFuture;
use sync::PipelineStages;
use sync::Semaphore;
use sync::SharingMode;
use check_errors;
use Error;
use OomError;
use Success;
use VulkanObject;
use VulkanPointers;
use vk;
/// Contains the swapping system and the images that can be shown on a surface.
// TODO: #[derive(Debug)] (waiting on https://github.com/aturon/crossbeam/issues/62)
pub struct Swapchain {
device: Arc<Device>,
surface: Arc<Surface>,
swapchain: vk::SwapchainKHR,
// If true, that means we have used this swapchain to recreate a new swapchain. The current
// swapchain can no longer be used for anything except presenting already-acquired images.
//
// We use a `Mutex` instead of an `AtomicBool` because we want to keep that locked while
// we acquire the image.
stale: Mutex<bool>,
// Parameters passed to the constructor.
num_images: u32,
format: Format,
color_space: ColorSpace,
dimensions: [u32; 2],
layers: u32,
usage: ImageUsage,
sharing: SharingMode,
transform: SurfaceTransform,
alpha: CompositeAlpha,
mode: PresentMode,
clipped: bool,
// TODO: meh for Mutex
images: Mutex<Vec<(Weak<SwapchainImage>, bool)>>,
}
impl Swapchain {
/// Builds a new swapchain. Allocates images who content can be made visible on a surface.
///
/// See also the `Surface::get_capabilities` function which returns the values that are
/// supported by the implementation. All the parameters that you pass to `Swapchain::new`
/// must be supported.
///
/// The `clipped` parameter indicates whether the implementation is allowed to discard
/// rendering operations that affect regions of the surface which aren't visible. This is
/// important to take into account if your fragment shader has side-effects or if you want to
/// read back the content of the image afterwards.
///
/// This function returns the swapchain plus a list of the images that belong to the
/// swapchain. The order in which the images are returned is important for the
/// `acquire_next_image` and `present` functions.
///
/// # Panic
///
/// - Panics if the device and the surface don't belong to the same instance.
/// - Panics if `color_attachment` is false in `usage`.
///
// TODO: remove `old_swapchain` parameter and add another function `with_old_swapchain`.
// TODO: add `ColorSpace` parameter
#[inline]
pub fn new<F, S>(device: &Arc<Device>, surface: &Arc<Surface>, num_images: u32, format: F,
dimensions: [u32; 2], layers: u32, usage: ImageUsage, sharing: S,
transform: SurfaceTransform, alpha: CompositeAlpha, mode: PresentMode,
clipped: bool, old_swapchain: Option<&Arc<Swapchain>>)
-> Result<(Arc<Swapchain>, Vec<Arc<SwapchainImage>>), OomError>
where F: FormatDesc, S: Into<SharingMode>
{
Swapchain::new_inner(device, surface, num_images, format.format(),
ColorSpace::SrgbNonLinear, dimensions, layers, usage, sharing.into(),
transform, alpha, mode, clipped, old_swapchain.map(|s| &**s))
}
/// Recreates the swapchain with new dimensions.
pub fn recreate_with_dimension(&self, dimensions: [u32; 2])
-> Result<(Arc<Swapchain>, Vec<Arc<SwapchainImage>>), OomError>
{
Swapchain::new_inner(&self.device, &self.surface, self.num_images, self.format,
self.color_space, dimensions, self.layers, self.usage,
self.sharing.clone(), self.transform, self.alpha, self.mode,
self.clipped, Some(self))
}
// TODO: images layouts should always be set to "PRESENT", since we have no way to switch the
// layout at present time
fn new_inner(device: &Arc<Device>, surface: &Arc<Surface>, num_images: u32, format: Format,
color_space: ColorSpace, dimensions: [u32; 2], layers: u32, usage: ImageUsage,
sharing: SharingMode, transform: SurfaceTransform, alpha: CompositeAlpha,
mode: PresentMode, clipped: bool, old_swapchain: Option<&Swapchain>)
-> Result<(Arc<Swapchain>, Vec<Arc<SwapchainImage>>), OomError>
{
// Checking that the requested parameters match the capabilities.
let capabilities = try!(surface.get_capabilities(&device.physical_device()));
// TODO: return errors instead
assert!(num_images >= capabilities.min_image_count);
if let Some(c) = capabilities.max_image_count { assert!(num_images <= c) };
assert!(capabilities.supported_formats.iter().any(|&(f, c)| f == format && c == color_space));
assert!(dimensions[0] >= capabilities.min_image_extent[0]);
assert!(dimensions[1] >= capabilities.min_image_extent[1]);
assert!(dimensions[0] <= capabilities.max_image_extent[0]);
assert!(dimensions[1] <= capabilities.max_image_extent[1]);
assert!(layers >= 1 && layers <= capabilities.max_image_array_layers);
assert!((usage.to_usage_bits() & capabilities.supported_usage_flags.to_usage_bits()) == usage.to_usage_bits());
assert!(capabilities.supported_transforms.supports(transform));
assert!(capabilities.supported_composite_alpha.supports(alpha));
assert!(capabilities.present_modes.supports(mode));
// If we recreate a swapchain, make sure that the surface is the same.
if let Some(sc) = old_swapchain {
// TODO: return proper error instead of panicing?
assert_eq!(surface.internal_object(), sc.surface.internal_object());
}
// Checking that the surface doesn't already have a swapchain.
if old_swapchain.is_none() {
// TODO: return proper error instead of panicing?
let has_already = surface.flag().swap(true, Ordering::AcqRel);
if has_already { panic!("The surface already has a swapchain alive"); }
}
// FIXME: check that the device and the surface belong to the same instance
let vk = device.pointers();
assert!(device.loaded_extensions().khr_swapchain); // TODO: return error instead
assert!(usage.color_attachment);
if let Some(ref old_swapchain) = old_swapchain {
*old_swapchain.stale.lock().unwrap() = false;
}
let swapchain = unsafe {
let (sh_mode, sh_count, sh_indices) = match sharing {
SharingMode::Exclusive(_) => (vk::SHARING_MODE_EXCLUSIVE, 0, ptr::null()),
SharingMode::Concurrent(ref ids) => (vk::SHARING_MODE_CONCURRENT, ids.len() as u32,
ids.as_ptr()),
};
let infos = vk::SwapchainCreateInfoKHR {
sType: vk::STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
pNext: ptr::null(),
flags: 0, // reserved
surface: surface.internal_object(),
minImageCount: num_images,
imageFormat: format as u32,
imageColorSpace: color_space as u32,
imageExtent: vk::Extent2D { width: dimensions[0], height: dimensions[1] },
imageArrayLayers: layers,
imageUsage: usage.to_usage_bits(),
imageSharingMode: sh_mode,
queueFamilyIndexCount: sh_count,
pQueueFamilyIndices: sh_indices,
preTransform: transform as u32,
compositeAlpha: alpha as u32,
presentMode: mode as u32,
clipped: if clipped { vk::TRUE } else { vk::FALSE },
oldSwapchain: if let Some(ref old_swapchain) = old_swapchain {
old_swapchain.swapchain
} else {
0
},
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateSwapchainKHR(device.internal_object(), &infos,
ptr::null(), &mut output)));
output
};
let swapchain = Arc::new(Swapchain {
device: device.clone(),
surface: surface.clone(),
swapchain: swapchain,
stale: Mutex::new(false),
num_images: num_images,
format: format,
color_space: color_space,
dimensions: dimensions,
layers: layers,
usage: usage.clone(),
sharing: sharing,
transform: transform,
alpha: alpha,
mode: mode,
clipped: clipped,
images: Mutex::new(Vec::new()), // Filled below.
});
let images = unsafe {
let mut num = 0;
try!(check_errors(vk.GetSwapchainImagesKHR(device.internal_object(),
swapchain.swapchain, &mut num,
ptr::null_mut())));
let mut images = Vec::with_capacity(num as usize);
try!(check_errors(vk.GetSwapchainImagesKHR(device.internal_object(),
swapchain.swapchain, &mut num,
images.as_mut_ptr())));
images.set_len(num as usize);
images
};
let images = images.into_iter().enumerate().map(|(id, image)| unsafe {
let unsafe_image = UnsafeImage::from_raw(device, image, usage.to_usage_bits(), format,
ImageDimensions::Dim2d { width: dimensions[0], height: dimensions[1], array_layers: 1, cubemap_compatible: false }, 1, 1);
SwapchainImage::from_raw(unsafe_image, format, &swapchain, id as u32).unwrap() // TODO: propagate error
}).collect::<Vec<_>>();
*swapchain.images.lock().unwrap() = images.iter().map(|i| (Arc::downgrade(i), true)).collect();
Ok((swapchain, images))
}
/// Tries to take ownership of an image in order to draw on it.
///
/// The function returns the index of the image in the array of images that was returned
/// when creating the swapchain, plus a future that represents the moment when the image will
/// become available from the GPU (which may not be *immediately*).
///
/// If you try to draw on an image without acquiring it first, the execution will block. (TODO
/// behavior may change).
// TODO: has to make sure vkQueuePresent is called, because calling acquire_next_image many
// times in a row is an error
// TODO: swapchain must not have been replaced by being passed as the VkSwapchainCreateInfoKHR::oldSwapchain value to vkCreateSwapchainKHR
// TODO: change timeout to `Option<Duration>`.
pub fn acquire_next_image(&self, timeout: Duration) -> Result<(usize, SwapchainAcquireFuture), AcquireError> {
unsafe {
let stale = self.stale.lock().unwrap();
if *stale {
return Err(AcquireError::OutOfDate);
}
let vk = self.device.pointers();
let semaphore = try!(Semaphore::new(self.device.clone()));
let timeout_ns = timeout.as_secs().saturating_mul(1_000_000_000)
.saturating_add(timeout.subsec_nanos() as u64);
let mut out = mem::uninitialized();
let r = try!(check_errors(vk.AcquireNextImageKHR(self.device.internal_object(),
self.swapchain, timeout_ns,
semaphore.internal_object(), 0,
&mut out)));
let id = match r {
Success::Success => out as usize,
Success::Suboptimal => out as usize, // TODO: give that info to the user
Success::NotReady => return Err(AcquireError::Timeout),
Success::Timeout => return Err(AcquireError::Timeout),
s => panic!("unexpected success value: {:?}", s)
};
let mut images = self.images.lock().unwrap();
let undefined_layout = mem::replace(&mut images.get_mut(id).unwrap().1, false);
Ok((id, SwapchainAcquireFuture {
semaphore: semaphore,
id: id,
image: images.get(id).unwrap().0.clone(),
finished: AtomicBool::new(false),
undefined_layout: undefined_layout,
}))
}
}
/// Presents an image on the screen.
///
/// The parameter is the same index as what `acquire_next_image` returned. The image must
/// have been acquired first.
///
/// The actual behavior depends on the present mode that you passed when creating the
/// swapchain.
// TODO: use another API, since taking by Arc is meh
pub fn present<F>(me: Arc<Self>, before: F, queue: Arc<Queue>, index: usize)
-> PresentFuture<F>
where F: GpuFuture
{
assert!(index < me.num_images as usize);
let swapchain_image = me.images.lock().unwrap().get(index).unwrap().0.upgrade().unwrap(); // TODO: return error instead
// Normally if `check_image_access` returns false we're supposed to call the `gpu_access`
// function on the image instead. But since we know that this method on `SwapchainImage`
// always returns false anyway (by design), we don't need to do it.
assert!(before.check_image_access(&swapchain_image, ImageLayout::PresentSrc, true, &queue).is_ok()); // TODO: return error instead
PresentFuture {
previous: before,
queue: queue,
swapchain: me,
image_id: index as u32,
finished: AtomicBool::new(false),
}
}
/// Returns the number of images of the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn num_images(&self) -> u32 {
self.num_images
}
/// Returns the format of the images of the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn format(&self) -> Format {
self.format
}
/// Returns the dimensions of the images of the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn dimensions(&self) -> [u32; 2] {
self.dimensions
}
/// Returns the number of layers of the images of the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn layers(&self) -> u32 {
self.layers
}
/// Returns the transform that was passed when creating the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn transform(&self) -> SurfaceTransform {
self.transform
}
/// Returns the alpha mode that was passed when creating the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn composite_alpha(&self) -> CompositeAlpha {
self.alpha
}
/// Returns the present mode that was passed when creating the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn present_mode(&self) -> PresentMode {
self.mode
}
/// Returns the value of `clipped` that was passed when creating the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn clipped(&self) -> bool {
self.clipped
}
}
unsafe impl VulkanObject for Swapchain {
type Object = vk::SwapchainKHR;
#[inline]
fn internal_object(&self) -> vk::SwapchainKHR {
self.swapchain
}
}
impl fmt::Debug for Swapchain {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "<Vulkan swapchain {:?}>", self.swapchain)
}
}
impl Drop for Swapchain {
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.device.pointers();
vk.DestroySwapchainKHR(self.device.internal_object(), self.swapchain, ptr::null());
self.surface.flag().store(false, Ordering::Release);
}
}
}
/// Represents the moment when the GPU will have access to a swapchain image.
#[must_use]
pub struct SwapchainAcquireFuture {
semaphore: Semaphore,
id: usize,
image: Weak<SwapchainImage>,
finished: AtomicBool,
// If true, then the acquired image is still in the undefined layout and must be transitionned.
undefined_layout: bool,
}
impl SwapchainAcquireFuture {
/// Returns the index of the image in the list of images returned when creating the swapchain.
#[inline]
pub fn image_id(&self) -> usize {
self.id
}
/// Returns the acquired image.
#[inline]
pub fn image(&self) -> Option<Arc<SwapchainImage>> {
self.image.upgrade()
}
}
unsafe impl GpuFuture for SwapchainAcquireFuture {
#[inline]
fn cleanup_finished(&mut self) {
}
#[inline]
unsafe fn build_submission(&self) -> Result<SubmitAnyBuilder, FlushError> {
let mut sem = SubmitSemaphoresWaitBuilder::new();
sem.add_wait_semaphore(&self.semaphore);
Ok(SubmitAnyBuilder::SemaphoresWait(sem))
}
#[inline]
fn flush(&self) -> Result<(), FlushError> {
Ok(())
}
#[inline]
unsafe fn signal_finished(&self) {
self.finished.store(true, Ordering::SeqCst);
}
#[inline]
fn queue_change_allowed(&self) -> bool {
true
}
#[inline]
fn queue(&self) -> Option<Arc<Queue>> {
None
}
#[inline]
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
Err(AccessCheckError::Unknown)
}
#[inline]
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
if let Some(sc_img) = self.image.upgrade() {
if sc_img.inner().internal_object() != image.inner().internal_object() {
return Err(AccessCheckError::Unknown);
}
if self.undefined_layout && layout != ImageLayout::Undefined {
return Err(AccessCheckError::Denied(AccessError::ImageNotInitialized {
requested: layout
}));
}
if layout != ImageLayout::Undefined && layout != ImageLayout::PresentSrc {
return Err(AccessCheckError::Denied(AccessError::UnexpectedImageLayout {
allowed: ImageLayout::PresentSrc,
requested: layout,
}));
}
Ok(None)
} else {
// The swapchain image no longer exists, therefore the `image` parameter received by
// this function cannot possibly be the swapchain image.
Err(AccessCheckError::Unknown)
}
}
}
unsafe impl DeviceOwned for SwapchainAcquireFuture {
#[inline]
fn device(&self) -> &Arc<Device> {
self.semaphore.device()
}
}
impl Drop for SwapchainAcquireFuture {
fn drop(&mut self) {
if !*self.finished.get_mut() {
panic!() // FIXME: what to do?
/*// TODO: handle errors?
let fence = Fence::new(self.device().clone()).unwrap();
let mut builder = SubmitCommandBufferBuilder::new();
builder.add_wait_semaphore(&self.semaphore);
builder.set_signal_fence(&fence);
builder.submit(... which queue ? ...).unwrap();
fence.wait(Duration::from_secs(600)).unwrap();*/
}
}
}
/// Error that can happen when calling `acquire_next_image`.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(u32)]
pub enum AcquireError {
/// Not enough memory.
OomError(OomError),
/// The connection to the device has been lost.
DeviceLost,
/// The timeout of the function has been reached before an image was available.
Timeout,
/// The surface is no longer accessible and must be recreated.
SurfaceLost,
/// The surface has changed in a way that makes the swapchain unusable. You must query the
/// surface's new properties and recreate a new swapchain if you want to continue drawing.
OutOfDate,
}
impl error::Error for AcquireError {
#[inline]
fn description(&self) -> &str {
match *self {
AcquireError::OomError(_) => "not enough memory",
AcquireError::DeviceLost => "the connection to the device has been lost",
AcquireError::Timeout => "no image is available for acquiring yet",
AcquireError::SurfaceLost => "the surface of this swapchain is no longer valid",
AcquireError::OutOfDate => "the swapchain needs to be recreated",
}
}
#[inline]
fn cause(&self) -> Option<&error::Error> {
match *self {
AcquireError::OomError(ref err) => Some(err),
_ => None
}
}
}
impl fmt::Display for AcquireError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", error::Error::description(self))
}
}
impl From<OomError> for AcquireError {
#[inline]
fn from(err: OomError) -> AcquireError {
AcquireError::OomError(err)
}
}
impl From<Error> for AcquireError {
#[inline]
fn from(err: Error) -> AcquireError {
match err {
err @ Error::OutOfHostMemory => AcquireError::OomError(OomError::from(err)),
err @ Error::OutOfDeviceMemory => AcquireError::OomError(OomError::from(err)),
Error::DeviceLost => AcquireError::DeviceLost,
Error::SurfaceLost => AcquireError::SurfaceLost,
Error::OutOfDate => AcquireError::OutOfDate,
_ => panic!("unexpected error: {:?}", err)
}
}
}
/// Represents a swapchain image being presented on the screen.
#[must_use = "Dropping this object will immediately block the thread until the GPU has finished processing the submission"]
pub struct PresentFuture<P> where P: GpuFuture {
previous: P,
queue: Arc<Queue>,
swapchain: Arc<Swapchain>,
image_id: u32,
finished: AtomicBool,
}
unsafe impl<P> GpuFuture for PresentFuture<P> where P: GpuFuture {
#[inline]
fn cleanup_finished(&mut self) {
self.previous.cleanup_finished();
}
#[inline]
unsafe fn build_submission(&self) -> Result<SubmitAnyBuilder, FlushError> {
let queue = self.previous.queue().map(|q| q.clone());
// TODO: if the swapchain image layout is not PRESENT, should add a transition command
// buffer
Ok(match try!(self.previous.build_submission()) {
SubmitAnyBuilder::Empty => {
let mut builder = SubmitPresentBuilder::new();
builder.add_swapchain(&self.swapchain, self.image_id);
SubmitAnyBuilder::QueuePresent(builder)
},
SubmitAnyBuilder::SemaphoresWait(sem) => {
let mut builder: SubmitPresentBuilder = sem.into();
builder.add_swapchain(&self.swapchain, self.image_id);
SubmitAnyBuilder::QueuePresent(builder)
},
SubmitAnyBuilder::CommandBuffer(cb) => {
try!(cb.submit(&queue.unwrap())); // FIXME: wrong because build_submission can be called multiple times
let mut builder = SubmitPresentBuilder::new();
builder.add_swapchain(&self.swapchain, self.image_id);
SubmitAnyBuilder::QueuePresent(builder)
},
SubmitAnyBuilder::QueuePresent(present) => {
unimplemented!() // TODO:
/*present.submit();
let mut builder = SubmitPresentBuilder::new();
builder.add_swapchain(self.command_buffer.inner(), self.image_id);
SubmitAnyBuilder::CommandBuffer(builder)*/
},
})
}
#[inline]
fn flush(&self) -> Result<(), FlushError> {
unimplemented!()
}
#[inline]
unsafe fn signal_finished(&self) {
self.finished.store(true, Ordering::SeqCst);
self.previous.signal_finished();
}
#[inline]
fn queue_change_allowed(&self) -> bool {
false
}
#[inline]
fn queue(&self) -> Option<Arc<Queue>> {
debug_assert!(match self.previous.queue() {
None => true,
Some(q) => q.is_same(&self.queue)
});
Some(self.queue.clone())
}
#[inline]
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
unimplemented!() // TODO: VK specs don't say whether it is legal to do that
}
#[inline]
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
unimplemented!() // TODO: VK specs don't say whether it is legal to do that
}
}
unsafe impl<P> DeviceOwned for PresentFuture<P> where P: GpuFuture {
#[inline]
fn device(&self) -> &Arc<Device> {
self.queue.device()
}
}
impl<P> Drop for PresentFuture<P> where P: GpuFuture {
fn drop(&mut self) {
unsafe {
if !*self.finished.get_mut() {
// TODO: handle errors?
self.flush().unwrap();
// Block until the queue finished.
self.queue().unwrap().wait().unwrap();
self.previous.signal_finished();
}
}
}
}
Implement PresentFuture::check_*_access
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::error;
use std::fmt;
use std::mem;
use std::ptr;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::Weak;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::time::Duration;
use buffer::BufferAccess;
use command_buffer::submit::SubmitAnyBuilder;
use command_buffer::submit::SubmitPresentBuilder;
use command_buffer::submit::SubmitSemaphoresWaitBuilder;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use format::Format;
use format::FormatDesc;
use image::ImageAccess;
use image::ImageDimensions;
use image::ImageLayout;
use image::ImageUsage;
use image::sys::UnsafeImage;
use image::swapchain::SwapchainImage;
use swapchain::ColorSpace;
use swapchain::CompositeAlpha;
use swapchain::PresentMode;
use swapchain::Surface;
use swapchain::SurfaceTransform;
use swapchain::SurfaceSwapchainLock;
use sync::AccessCheckError;
use sync::AccessError;
use sync::AccessFlagBits;
use sync::FlushError;
use sync::GpuFuture;
use sync::PipelineStages;
use sync::Semaphore;
use sync::SharingMode;
use check_errors;
use Error;
use OomError;
use Success;
use VulkanObject;
use VulkanPointers;
use vk;
/// Contains the swapping system and the images that can be shown on a surface.
// TODO: #[derive(Debug)] (waiting on https://github.com/aturon/crossbeam/issues/62)
pub struct Swapchain {
device: Arc<Device>,
surface: Arc<Surface>,
swapchain: vk::SwapchainKHR,
// If true, that means we have used this swapchain to recreate a new swapchain. The current
// swapchain can no longer be used for anything except presenting already-acquired images.
//
// We use a `Mutex` instead of an `AtomicBool` because we want to keep that locked while
// we acquire the image.
stale: Mutex<bool>,
// Parameters passed to the constructor.
num_images: u32,
format: Format,
color_space: ColorSpace,
dimensions: [u32; 2],
layers: u32,
usage: ImageUsage,
sharing: SharingMode,
transform: SurfaceTransform,
alpha: CompositeAlpha,
mode: PresentMode,
clipped: bool,
// TODO: meh for Mutex
images: Mutex<Vec<(Weak<SwapchainImage>, bool)>>,
}
impl Swapchain {
/// Builds a new swapchain. Allocates images who content can be made visible on a surface.
///
/// See also the `Surface::get_capabilities` function which returns the values that are
/// supported by the implementation. All the parameters that you pass to `Swapchain::new`
/// must be supported.
///
/// The `clipped` parameter indicates whether the implementation is allowed to discard
/// rendering operations that affect regions of the surface which aren't visible. This is
/// important to take into account if your fragment shader has side-effects or if you want to
/// read back the content of the image afterwards.
///
/// This function returns the swapchain plus a list of the images that belong to the
/// swapchain. The order in which the images are returned is important for the
/// `acquire_next_image` and `present` functions.
///
/// # Panic
///
/// - Panics if the device and the surface don't belong to the same instance.
/// - Panics if `color_attachment` is false in `usage`.
///
// TODO: remove `old_swapchain` parameter and add another function `with_old_swapchain`.
// TODO: add `ColorSpace` parameter
#[inline]
pub fn new<F, S>(device: &Arc<Device>, surface: &Arc<Surface>, num_images: u32, format: F,
dimensions: [u32; 2], layers: u32, usage: ImageUsage, sharing: S,
transform: SurfaceTransform, alpha: CompositeAlpha, mode: PresentMode,
clipped: bool, old_swapchain: Option<&Arc<Swapchain>>)
-> Result<(Arc<Swapchain>, Vec<Arc<SwapchainImage>>), OomError>
where F: FormatDesc, S: Into<SharingMode>
{
Swapchain::new_inner(device, surface, num_images, format.format(),
ColorSpace::SrgbNonLinear, dimensions, layers, usage, sharing.into(),
transform, alpha, mode, clipped, old_swapchain.map(|s| &**s))
}
/// Recreates the swapchain with new dimensions.
pub fn recreate_with_dimension(&self, dimensions: [u32; 2])
-> Result<(Arc<Swapchain>, Vec<Arc<SwapchainImage>>), OomError>
{
Swapchain::new_inner(&self.device, &self.surface, self.num_images, self.format,
self.color_space, dimensions, self.layers, self.usage,
self.sharing.clone(), self.transform, self.alpha, self.mode,
self.clipped, Some(self))
}
// TODO: images layouts should always be set to "PRESENT", since we have no way to switch the
// layout at present time
fn new_inner(device: &Arc<Device>, surface: &Arc<Surface>, num_images: u32, format: Format,
color_space: ColorSpace, dimensions: [u32; 2], layers: u32, usage: ImageUsage,
sharing: SharingMode, transform: SurfaceTransform, alpha: CompositeAlpha,
mode: PresentMode, clipped: bool, old_swapchain: Option<&Swapchain>)
-> Result<(Arc<Swapchain>, Vec<Arc<SwapchainImage>>), OomError>
{
// Checking that the requested parameters match the capabilities.
let capabilities = try!(surface.get_capabilities(&device.physical_device()));
// TODO: return errors instead
assert!(num_images >= capabilities.min_image_count);
if let Some(c) = capabilities.max_image_count { assert!(num_images <= c) };
assert!(capabilities.supported_formats.iter().any(|&(f, c)| f == format && c == color_space));
assert!(dimensions[0] >= capabilities.min_image_extent[0]);
assert!(dimensions[1] >= capabilities.min_image_extent[1]);
assert!(dimensions[0] <= capabilities.max_image_extent[0]);
assert!(dimensions[1] <= capabilities.max_image_extent[1]);
assert!(layers >= 1 && layers <= capabilities.max_image_array_layers);
assert!((usage.to_usage_bits() & capabilities.supported_usage_flags.to_usage_bits()) == usage.to_usage_bits());
assert!(capabilities.supported_transforms.supports(transform));
assert!(capabilities.supported_composite_alpha.supports(alpha));
assert!(capabilities.present_modes.supports(mode));
// If we recreate a swapchain, make sure that the surface is the same.
if let Some(sc) = old_swapchain {
// TODO: return proper error instead of panicing?
assert_eq!(surface.internal_object(), sc.surface.internal_object());
}
// Checking that the surface doesn't already have a swapchain.
if old_swapchain.is_none() {
// TODO: return proper error instead of panicing?
let has_already = surface.flag().swap(true, Ordering::AcqRel);
if has_already { panic!("The surface already has a swapchain alive"); }
}
// FIXME: check that the device and the surface belong to the same instance
let vk = device.pointers();
assert!(device.loaded_extensions().khr_swapchain); // TODO: return error instead
assert!(usage.color_attachment);
if let Some(ref old_swapchain) = old_swapchain {
*old_swapchain.stale.lock().unwrap() = false;
}
let swapchain = unsafe {
let (sh_mode, sh_count, sh_indices) = match sharing {
SharingMode::Exclusive(_) => (vk::SHARING_MODE_EXCLUSIVE, 0, ptr::null()),
SharingMode::Concurrent(ref ids) => (vk::SHARING_MODE_CONCURRENT, ids.len() as u32,
ids.as_ptr()),
};
let infos = vk::SwapchainCreateInfoKHR {
sType: vk::STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
pNext: ptr::null(),
flags: 0, // reserved
surface: surface.internal_object(),
minImageCount: num_images,
imageFormat: format as u32,
imageColorSpace: color_space as u32,
imageExtent: vk::Extent2D { width: dimensions[0], height: dimensions[1] },
imageArrayLayers: layers,
imageUsage: usage.to_usage_bits(),
imageSharingMode: sh_mode,
queueFamilyIndexCount: sh_count,
pQueueFamilyIndices: sh_indices,
preTransform: transform as u32,
compositeAlpha: alpha as u32,
presentMode: mode as u32,
clipped: if clipped { vk::TRUE } else { vk::FALSE },
oldSwapchain: if let Some(ref old_swapchain) = old_swapchain {
old_swapchain.swapchain
} else {
0
},
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateSwapchainKHR(device.internal_object(), &infos,
ptr::null(), &mut output)));
output
};
let swapchain = Arc::new(Swapchain {
device: device.clone(),
surface: surface.clone(),
swapchain: swapchain,
stale: Mutex::new(false),
num_images: num_images,
format: format,
color_space: color_space,
dimensions: dimensions,
layers: layers,
usage: usage.clone(),
sharing: sharing,
transform: transform,
alpha: alpha,
mode: mode,
clipped: clipped,
images: Mutex::new(Vec::new()), // Filled below.
});
let images = unsafe {
let mut num = 0;
try!(check_errors(vk.GetSwapchainImagesKHR(device.internal_object(),
swapchain.swapchain, &mut num,
ptr::null_mut())));
let mut images = Vec::with_capacity(num as usize);
try!(check_errors(vk.GetSwapchainImagesKHR(device.internal_object(),
swapchain.swapchain, &mut num,
images.as_mut_ptr())));
images.set_len(num as usize);
images
};
let images = images.into_iter().enumerate().map(|(id, image)| unsafe {
let unsafe_image = UnsafeImage::from_raw(device, image, usage.to_usage_bits(), format,
ImageDimensions::Dim2d { width: dimensions[0], height: dimensions[1], array_layers: 1, cubemap_compatible: false }, 1, 1);
SwapchainImage::from_raw(unsafe_image, format, &swapchain, id as u32).unwrap() // TODO: propagate error
}).collect::<Vec<_>>();
*swapchain.images.lock().unwrap() = images.iter().map(|i| (Arc::downgrade(i), true)).collect();
Ok((swapchain, images))
}
/// Tries to take ownership of an image in order to draw on it.
///
/// The function returns the index of the image in the array of images that was returned
/// when creating the swapchain, plus a future that represents the moment when the image will
/// become available from the GPU (which may not be *immediately*).
///
/// If you try to draw on an image without acquiring it first, the execution will block. (TODO
/// behavior may change).
// TODO: has to make sure vkQueuePresent is called, because calling acquire_next_image many
// times in a row is an error
// TODO: swapchain must not have been replaced by being passed as the VkSwapchainCreateInfoKHR::oldSwapchain value to vkCreateSwapchainKHR
// TODO: change timeout to `Option<Duration>`.
pub fn acquire_next_image(&self, timeout: Duration) -> Result<(usize, SwapchainAcquireFuture), AcquireError> {
unsafe {
let stale = self.stale.lock().unwrap();
if *stale {
return Err(AcquireError::OutOfDate);
}
let vk = self.device.pointers();
let semaphore = try!(Semaphore::new(self.device.clone()));
let timeout_ns = timeout.as_secs().saturating_mul(1_000_000_000)
.saturating_add(timeout.subsec_nanos() as u64);
let mut out = mem::uninitialized();
let r = try!(check_errors(vk.AcquireNextImageKHR(self.device.internal_object(),
self.swapchain, timeout_ns,
semaphore.internal_object(), 0,
&mut out)));
let id = match r {
Success::Success => out as usize,
Success::Suboptimal => out as usize, // TODO: give that info to the user
Success::NotReady => return Err(AcquireError::Timeout),
Success::Timeout => return Err(AcquireError::Timeout),
s => panic!("unexpected success value: {:?}", s)
};
let mut images = self.images.lock().unwrap();
let undefined_layout = mem::replace(&mut images.get_mut(id).unwrap().1, false);
Ok((id, SwapchainAcquireFuture {
semaphore: semaphore,
id: id,
image: images.get(id).unwrap().0.clone(),
finished: AtomicBool::new(false),
undefined_layout: undefined_layout,
}))
}
}
/// Presents an image on the screen.
///
/// The parameter is the same index as what `acquire_next_image` returned. The image must
/// have been acquired first.
///
/// The actual behavior depends on the present mode that you passed when creating the
/// swapchain.
// TODO: use another API, since taking by Arc is meh
pub fn present<F>(me: Arc<Self>, before: F, queue: Arc<Queue>, index: usize)
-> PresentFuture<F>
where F: GpuFuture
{
assert!(index < me.num_images as usize);
let swapchain_image = me.images.lock().unwrap().get(index).unwrap().0.upgrade().unwrap(); // TODO: return error instead
// Normally if `check_image_access` returns false we're supposed to call the `gpu_access`
// function on the image instead. But since we know that this method on `SwapchainImage`
// always returns false anyway (by design), we don't need to do it.
assert!(before.check_image_access(&swapchain_image, ImageLayout::PresentSrc, true, &queue).is_ok()); // TODO: return error instead
PresentFuture {
previous: before,
queue: queue,
swapchain: me,
image_id: index as u32,
finished: AtomicBool::new(false),
}
}
/// Returns the number of images of the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn num_images(&self) -> u32 {
self.num_images
}
/// Returns the format of the images of the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn format(&self) -> Format {
self.format
}
/// Returns the dimensions of the images of the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn dimensions(&self) -> [u32; 2] {
self.dimensions
}
/// Returns the number of layers of the images of the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn layers(&self) -> u32 {
self.layers
}
/// Returns the transform that was passed when creating the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn transform(&self) -> SurfaceTransform {
self.transform
}
/// Returns the alpha mode that was passed when creating the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn composite_alpha(&self) -> CompositeAlpha {
self.alpha
}
/// Returns the present mode that was passed when creating the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn present_mode(&self) -> PresentMode {
self.mode
}
/// Returns the value of `clipped` that was passed when creating the swapchain.
///
/// See the documentation of `Swapchain::new`.
#[inline]
pub fn clipped(&self) -> bool {
self.clipped
}
}
unsafe impl VulkanObject for Swapchain {
type Object = vk::SwapchainKHR;
#[inline]
fn internal_object(&self) -> vk::SwapchainKHR {
self.swapchain
}
}
impl fmt::Debug for Swapchain {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "<Vulkan swapchain {:?}>", self.swapchain)
}
}
impl Drop for Swapchain {
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.device.pointers();
vk.DestroySwapchainKHR(self.device.internal_object(), self.swapchain, ptr::null());
self.surface.flag().store(false, Ordering::Release);
}
}
}
/// Represents the moment when the GPU will have access to a swapchain image.
#[must_use]
pub struct SwapchainAcquireFuture {
semaphore: Semaphore,
id: usize,
image: Weak<SwapchainImage>,
finished: AtomicBool,
// If true, then the acquired image is still in the undefined layout and must be transitionned.
undefined_layout: bool,
}
impl SwapchainAcquireFuture {
/// Returns the index of the image in the list of images returned when creating the swapchain.
#[inline]
pub fn image_id(&self) -> usize {
self.id
}
/// Returns the acquired image.
#[inline]
pub fn image(&self) -> Option<Arc<SwapchainImage>> {
self.image.upgrade()
}
}
unsafe impl GpuFuture for SwapchainAcquireFuture {
#[inline]
fn cleanup_finished(&mut self) {
}
#[inline]
unsafe fn build_submission(&self) -> Result<SubmitAnyBuilder, FlushError> {
let mut sem = SubmitSemaphoresWaitBuilder::new();
sem.add_wait_semaphore(&self.semaphore);
Ok(SubmitAnyBuilder::SemaphoresWait(sem))
}
#[inline]
fn flush(&self) -> Result<(), FlushError> {
Ok(())
}
#[inline]
unsafe fn signal_finished(&self) {
self.finished.store(true, Ordering::SeqCst);
}
#[inline]
fn queue_change_allowed(&self) -> bool {
true
}
#[inline]
fn queue(&self) -> Option<Arc<Queue>> {
None
}
#[inline]
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
Err(AccessCheckError::Unknown)
}
#[inline]
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
if let Some(sc_img) = self.image.upgrade() {
if sc_img.inner().internal_object() != image.inner().internal_object() {
return Err(AccessCheckError::Unknown);
}
if self.undefined_layout && layout != ImageLayout::Undefined {
return Err(AccessCheckError::Denied(AccessError::ImageNotInitialized {
requested: layout
}));
}
if layout != ImageLayout::Undefined && layout != ImageLayout::PresentSrc {
return Err(AccessCheckError::Denied(AccessError::UnexpectedImageLayout {
allowed: ImageLayout::PresentSrc,
requested: layout,
}));
}
Ok(None)
} else {
// The swapchain image no longer exists, therefore the `image` parameter received by
// this function cannot possibly be the swapchain image.
Err(AccessCheckError::Unknown)
}
}
}
unsafe impl DeviceOwned for SwapchainAcquireFuture {
#[inline]
fn device(&self) -> &Arc<Device> {
self.semaphore.device()
}
}
impl Drop for SwapchainAcquireFuture {
fn drop(&mut self) {
if !*self.finished.get_mut() {
panic!() // FIXME: what to do?
/*// TODO: handle errors?
let fence = Fence::new(self.device().clone()).unwrap();
let mut builder = SubmitCommandBufferBuilder::new();
builder.add_wait_semaphore(&self.semaphore);
builder.set_signal_fence(&fence);
builder.submit(... which queue ? ...).unwrap();
fence.wait(Duration::from_secs(600)).unwrap();*/
}
}
}
/// Error that can happen when calling `acquire_next_image`.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(u32)]
pub enum AcquireError {
/// Not enough memory.
OomError(OomError),
/// The connection to the device has been lost.
DeviceLost,
/// The timeout of the function has been reached before an image was available.
Timeout,
/// The surface is no longer accessible and must be recreated.
SurfaceLost,
/// The surface has changed in a way that makes the swapchain unusable. You must query the
/// surface's new properties and recreate a new swapchain if you want to continue drawing.
OutOfDate,
}
impl error::Error for AcquireError {
#[inline]
fn description(&self) -> &str {
match *self {
AcquireError::OomError(_) => "not enough memory",
AcquireError::DeviceLost => "the connection to the device has been lost",
AcquireError::Timeout => "no image is available for acquiring yet",
AcquireError::SurfaceLost => "the surface of this swapchain is no longer valid",
AcquireError::OutOfDate => "the swapchain needs to be recreated",
}
}
#[inline]
fn cause(&self) -> Option<&error::Error> {
match *self {
AcquireError::OomError(ref err) => Some(err),
_ => None
}
}
}
impl fmt::Display for AcquireError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", error::Error::description(self))
}
}
impl From<OomError> for AcquireError {
#[inline]
fn from(err: OomError) -> AcquireError {
AcquireError::OomError(err)
}
}
impl From<Error> for AcquireError {
#[inline]
fn from(err: Error) -> AcquireError {
match err {
err @ Error::OutOfHostMemory => AcquireError::OomError(OomError::from(err)),
err @ Error::OutOfDeviceMemory => AcquireError::OomError(OomError::from(err)),
Error::DeviceLost => AcquireError::DeviceLost,
Error::SurfaceLost => AcquireError::SurfaceLost,
Error::OutOfDate => AcquireError::OutOfDate,
_ => panic!("unexpected error: {:?}", err)
}
}
}
/// Represents a swapchain image being presented on the screen.
#[must_use = "Dropping this object will immediately block the thread until the GPU has finished processing the submission"]
pub struct PresentFuture<P> where P: GpuFuture {
previous: P,
queue: Arc<Queue>,
swapchain: Arc<Swapchain>,
image_id: u32,
finished: AtomicBool,
}
unsafe impl<P> GpuFuture for PresentFuture<P> where P: GpuFuture {
#[inline]
fn cleanup_finished(&mut self) {
self.previous.cleanup_finished();
}
#[inline]
unsafe fn build_submission(&self) -> Result<SubmitAnyBuilder, FlushError> {
let queue = self.previous.queue().map(|q| q.clone());
// TODO: if the swapchain image layout is not PRESENT, should add a transition command
// buffer
Ok(match try!(self.previous.build_submission()) {
SubmitAnyBuilder::Empty => {
let mut builder = SubmitPresentBuilder::new();
builder.add_swapchain(&self.swapchain, self.image_id);
SubmitAnyBuilder::QueuePresent(builder)
},
SubmitAnyBuilder::SemaphoresWait(sem) => {
let mut builder: SubmitPresentBuilder = sem.into();
builder.add_swapchain(&self.swapchain, self.image_id);
SubmitAnyBuilder::QueuePresent(builder)
},
SubmitAnyBuilder::CommandBuffer(cb) => {
try!(cb.submit(&queue.unwrap())); // FIXME: wrong because build_submission can be called multiple times
let mut builder = SubmitPresentBuilder::new();
builder.add_swapchain(&self.swapchain, self.image_id);
SubmitAnyBuilder::QueuePresent(builder)
},
SubmitAnyBuilder::QueuePresent(present) => {
unimplemented!() // TODO:
/*present.submit();
let mut builder = SubmitPresentBuilder::new();
builder.add_swapchain(self.command_buffer.inner(), self.image_id);
SubmitAnyBuilder::CommandBuffer(builder)*/
},
})
}
#[inline]
fn flush(&self) -> Result<(), FlushError> {
unimplemented!()
}
#[inline]
unsafe fn signal_finished(&self) {
self.finished.store(true, Ordering::SeqCst);
self.previous.signal_finished();
}
#[inline]
fn queue_change_allowed(&self) -> bool {
false
}
#[inline]
fn queue(&self) -> Option<Arc<Queue>> {
debug_assert!(match self.previous.queue() {
None => true,
Some(q) => q.is_same(&self.queue)
});
Some(self.queue.clone())
}
#[inline]
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
self.previous.check_buffer_access(buffer, exclusive, queue)
}
#[inline]
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
// FIXME: must return `Err(SwapchainImageAcquireOnly)` for the swapchain image that is
// being presented
self.previous.check_image_access(image, layout, exclusive, queue)
}
}
unsafe impl<P> DeviceOwned for PresentFuture<P> where P: GpuFuture {
#[inline]
fn device(&self) -> &Arc<Device> {
self.queue.device()
}
}
impl<P> Drop for PresentFuture<P> where P: GpuFuture {
fn drop(&mut self) {
unsafe {
if !*self.finished.get_mut() {
// TODO: handle errors?
self.flush().unwrap();
// Block until the queue finished.
self.queue().unwrap().wait().unwrap();
self.previous.signal_finished();
}
}
}
}
|
use std::cmp::Eq;
use std::fmt::Display;
use std::iter::FromIterator;
trait Parser<T: Display + Eq, E>: Sized {
fn preview(&self) -> Option<&T>;
fn consume(&mut self) -> Option<T>;
fn current_pos(&self) -> (i32, i32);
fn error<S: Into<String>>(&self, message: S) -> E;
fn export(&self) -> Self;
fn import(&mut self, backup: Self);
fn next(&mut self) -> Result<T, E> {
self.consume().ok_or(self.error("unexpected eof"))
}
fn predicate<F>(&mut self, pred: F) -> Result<T, E>
where F: Fn(&T) -> bool
{
let x = try!(self.next());
if pred(&x) {
Ok(x)
} else {
Err(self.error(format!("unexpected token {}", x)))
}
}
fn atom(&mut self, expected: T) -> Result<T, E> {
let x = try!(self.next());
if x == expected {
Ok(x)
} else {
Err(self.error(format!("unexpected token {}, expected {}", x, expected)))
}
}
fn string<S, O>(&mut self, s: S) -> Result<O, E>
where S: IntoIterator<Item = T>,
O: FromIterator<T>
{
let mut res: Vec<T> = Vec::new();
for c in s {
match self.atom(c) {
Ok(x) => res.push(x),
Err(err) => return Err(err),
}
}
Ok(O::from_iter(res))
}
fn try<O, F>(&mut self, parser: F) -> Result<O, E>
where F: Fn(&mut Self) -> Result<O, E>
{
let backup = self.export();
parser(self).map_err(|x| {
self.import(backup);
x
})
}
fn choose<O>(&mut self, parsers: &[&Fn(&mut Self) -> Result<O, E>]) -> Result<O, E> {
for parser in parsers {
match self.try(|p| parser(p)) {
Ok(x) => return Ok(x),
Err(_) => continue,
}
}
Err(self.error(match self.preview() {
Some(x) => format!("unexpected token {}", x),
None => String::from("unexpected eof"),
}))
}
fn many<X, F, O>(&mut self, parser: F) -> Result<O, E>
where F: Fn(&mut Self) -> Result<X, E>,
O: FromIterator<X>
{
unimplemented!()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::vec_deque::VecDeque;
struct TP {
input: VecDeque<i32>,
}
impl TP {
fn new(input: &[i32]) -> TP {
TP { input: VecDeque::from(input.to_vec()) }
}
}
impl Parser<i32, String> for TP {
fn consume(&mut self) -> Option<i32> {
self.input.pop_front()
}
fn preview(&self) -> Option<&i32> {
self.input.front()
}
fn current_pos(&self) -> (i32, i32) {
(0, 0)
}
fn error<S: Into<String>>(&self, message: S) -> String {
message.into()
}
fn export(&self) -> Self {
TP { input: self.input.clone() }
}
fn import(&mut self, backup: Self) {
self.input = backup.input;
}
}
type TPR = Result<Vec<i32>, String>;
fn err<T>(m: &str) -> Result<T, String> {
Err(String::from(m))
}
#[test]
fn next_success() {
let mut p = TP::new(&[1, 2, 3]);
assert_eq!(p.next(), Ok(1));
assert_eq!(p.next(), Ok(2));
assert_eq!(p.next(), Ok(3));
}
#[test]
fn next_fail_empty() {
let mut p = TP::new(&[]);
assert_eq!(p.next(), err("unexpected eof"));
}
#[test]
fn predicate_success() {
let mut p = TP::new(&[2, 4, 6]);
assert_eq!(p.predicate(|x| x % 2 == 0), Ok(2));
assert_eq!(p.predicate(|x| x % 2 == 0), Ok(4));
assert_eq!(p.predicate(|x| x % 2 == 0), Ok(6));
}
#[test]
fn predicate_fail_empty() {
let mut p = TP::new(&[]);
assert_eq!(p.predicate(|x| x % 2 == 0), err("unexpected eof"));
}
#[test]
fn predicate_fail_not_satisfy() {
let mut p = TP::new(&[3, 5, 7]);
assert_eq!(p.predicate(|x| x % 2 == 0), err("unexpected token 3"));
}
#[test]
fn atom_success() {
let mut p = TP::new(&[2, 4, 6]);
assert_eq!(p.atom(2), Ok(2));
assert_eq!(p.atom(4), Ok(4));
assert_eq!(p.atom(6), Ok(6));
}
#[test]
fn atom_fail_empty() {
let mut p = TP::new(&[]);
assert_eq!(p.atom(2), err("unexpected eof"));
}
#[test]
fn atom_fail_not_expected() {
let mut p = TP::new(&[3, 5, 7]);
assert_eq!(p.atom(3), Ok(3));
assert_eq!(p.atom(4), err("unexpected token 5, expected 4"));
}
#[test]
fn string_success() {
let mut p = TP::new(&[2, 4, 6]);
assert_eq!(p.string(vec![2, 4, 6]), Ok(vec![2, 4, 6]));
}
#[test]
fn string_fail_empty() {
let mut p = TP::new(&[]);
assert_eq!(p.string(vec![2, 4, 6]) as TPR, err("unexpected eof"));
}
#[test]
fn string_fail_not_expected() {
let mut p = TP::new(&[2, 5, 6]);
assert_eq!(p.string(vec![2, 4, 6]) as TPR,
err("unexpected token 5, expected 4"));
}
#[test]
fn try_success() {
let mut p = TP::new(&[2, 4, 6]);
assert_eq!(p.try(|p| p.string(vec![2, 4, 6])), Ok(vec![2, 4, 6]));
}
#[test]
fn try_fail_recover() {
let mut p = TP::new(&[2, 4, 6]);
assert_eq!(p.try(|p| p.string(vec![2, 4, 7])) as TPR,
err("unexpected token 6, expected 7"));
assert_eq!(p.try(|p| p.string(vec![2, 4, 6])), Ok(vec![2, 4, 6]));
}
#[test]
fn choose_success() {
let mut p = TP::new(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
assert_eq!(p.choose(&[&|p| p.string(vec![1, 2, 3]),
&|p| p.string(vec![4, 5, 6, 7]),
&|p| p.string(vec![4, 5, 6])]),
Ok(vec![1, 2, 3]));
}
#[test]
fn choose_success_with_recover() {
let mut p = TP::new(&[4, 5, 6, 7, 8, 9, 10]);
assert_eq!(p.choose(&[&|p| p.string(vec![1, 2, 3]),
&|p| p.string(vec![4, 5, 6, 8]),
&|p| p.string(vec![4, 5, 6])]),
Ok(vec![4, 5, 6]));
}
#[test]
fn choose_fail_no_match() {
let mut p = TP::new(&[5, 6, 7, 8, 9, 10]);
assert_eq!(p.choose(&[&|p| p.string(vec![1, 2, 3]),
&|p| p.string(vec![4, 5, 6, 8]),
&|p| p.string(vec![4, 5, 6])]) as TPR,
err("unexpected token 5"));
}
#[test]
fn choose_fail_empty() {
let mut p = TP::new(&[]);
assert_eq!(p.choose(&[&|p| p.string(vec![1, 2, 3]),
&|p| p.string(vec![4, 5, 6, 8]),
&|p| p.string(vec![4, 5, 6])]) as TPR,
err("unexpected eof"));
}
#[test]
fn many_success() {
let lt5 = |p: &mut TP| -> Result<i32, String> { p.predicate(|x| *x < 5) };
assert_eq!(TP::new(&[1, 2, 3, 4]).many(<5), Ok(vec![1, 2, 3, 4]));
assert_eq!(TP::new(&[1, 2, 3, 4, 5, 6, 7, 8]).many(<5),
Ok(vec![1, 2, 3, 4]));
assert_eq!(TP::new(&[4, 5, 6, 7, 8]).many(<5), Ok(vec![4]));
assert_eq!(TP::new(&[5, 6, 7, 8]).many(<5), Ok(vec![]));
}
}
Remove redundant closure call
use std::cmp::Eq;
use std::fmt::Display;
use std::iter::FromIterator;
trait Parser<T: Display + Eq, E>: Sized {
fn preview(&self) -> Option<&T>;
fn consume(&mut self) -> Option<T>;
fn current_pos(&self) -> (i32, i32);
fn error<S: Into<String>>(&self, message: S) -> E;
fn export(&self) -> Self;
fn import(&mut self, backup: Self);
fn next(&mut self) -> Result<T, E> {
self.consume().ok_or(self.error("unexpected eof"))
}
fn predicate<F>(&mut self, pred: F) -> Result<T, E>
where F: Fn(&T) -> bool
{
let x = try!(self.next());
if pred(&x) {
Ok(x)
} else {
Err(self.error(format!("unexpected token {}", x)))
}
}
fn atom(&mut self, expected: T) -> Result<T, E> {
let x = try!(self.next());
if x == expected {
Ok(x)
} else {
Err(self.error(format!("unexpected token {}, expected {}", x, expected)))
}
}
fn string<S, O>(&mut self, s: S) -> Result<O, E>
where S: IntoIterator<Item = T>,
O: FromIterator<T>
{
let mut res: Vec<T> = Vec::new();
for c in s {
match self.atom(c) {
Ok(x) => res.push(x),
Err(err) => return Err(err),
}
}
Ok(O::from_iter(res))
}
fn try<O, F>(&mut self, parser: F) -> Result<O, E>
where F: Fn(&mut Self) -> Result<O, E>
{
let backup = self.export();
parser(self).map_err(|x| {
self.import(backup);
x
})
}
fn choose<O>(&mut self, parsers: &[&Fn(&mut Self) -> Result<O, E>]) -> Result<O, E> {
for parser in parsers {
match self.try(parser) {
Ok(x) => return Ok(x),
Err(_) => continue,
}
}
Err(self.error(match self.preview() {
Some(x) => format!("unexpected token {}", x),
None => String::from("unexpected eof"),
}))
}
fn many<X, F, O>(&mut self, parser: F) -> Result<O, E>
where F: Fn(&mut Self) -> Result<X, E>,
O: FromIterator<X>
{
unimplemented!()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::vec_deque::VecDeque;
struct TP {
input: VecDeque<i32>,
}
impl TP {
fn new(input: &[i32]) -> TP {
TP { input: VecDeque::from(input.to_vec()) }
}
}
impl Parser<i32, String> for TP {
fn consume(&mut self) -> Option<i32> {
self.input.pop_front()
}
fn preview(&self) -> Option<&i32> {
self.input.front()
}
fn current_pos(&self) -> (i32, i32) {
(0, 0)
}
fn error<S: Into<String>>(&self, message: S) -> String {
message.into()
}
fn export(&self) -> Self {
TP { input: self.input.clone() }
}
fn import(&mut self, backup: Self) {
self.input = backup.input;
}
}
type TPR = Result<Vec<i32>, String>;
fn err<T>(m: &str) -> Result<T, String> {
Err(String::from(m))
}
#[test]
fn next_success() {
let mut p = TP::new(&[1, 2, 3]);
assert_eq!(p.next(), Ok(1));
assert_eq!(p.next(), Ok(2));
assert_eq!(p.next(), Ok(3));
}
#[test]
fn next_fail_empty() {
let mut p = TP::new(&[]);
assert_eq!(p.next(), err("unexpected eof"));
}
#[test]
fn predicate_success() {
let mut p = TP::new(&[2, 4, 6]);
assert_eq!(p.predicate(|x| x % 2 == 0), Ok(2));
assert_eq!(p.predicate(|x| x % 2 == 0), Ok(4));
assert_eq!(p.predicate(|x| x % 2 == 0), Ok(6));
}
#[test]
fn predicate_fail_empty() {
let mut p = TP::new(&[]);
assert_eq!(p.predicate(|x| x % 2 == 0), err("unexpected eof"));
}
#[test]
fn predicate_fail_not_satisfy() {
let mut p = TP::new(&[3, 5, 7]);
assert_eq!(p.predicate(|x| x % 2 == 0), err("unexpected token 3"));
}
#[test]
fn atom_success() {
let mut p = TP::new(&[2, 4, 6]);
assert_eq!(p.atom(2), Ok(2));
assert_eq!(p.atom(4), Ok(4));
assert_eq!(p.atom(6), Ok(6));
}
#[test]
fn atom_fail_empty() {
let mut p = TP::new(&[]);
assert_eq!(p.atom(2), err("unexpected eof"));
}
#[test]
fn atom_fail_not_expected() {
let mut p = TP::new(&[3, 5, 7]);
assert_eq!(p.atom(3), Ok(3));
assert_eq!(p.atom(4), err("unexpected token 5, expected 4"));
}
#[test]
fn string_success() {
let mut p = TP::new(&[2, 4, 6]);
assert_eq!(p.string(vec![2, 4, 6]), Ok(vec![2, 4, 6]));
}
#[test]
fn string_fail_empty() {
let mut p = TP::new(&[]);
assert_eq!(p.string(vec![2, 4, 6]) as TPR, err("unexpected eof"));
}
#[test]
fn string_fail_not_expected() {
let mut p = TP::new(&[2, 5, 6]);
assert_eq!(p.string(vec![2, 4, 6]) as TPR,
err("unexpected token 5, expected 4"));
}
#[test]
fn try_success() {
let mut p = TP::new(&[2, 4, 6]);
assert_eq!(p.try(|p| p.string(vec![2, 4, 6])), Ok(vec![2, 4, 6]));
}
#[test]
fn try_fail_recover() {
let mut p = TP::new(&[2, 4, 6]);
assert_eq!(p.try(|p| p.string(vec![2, 4, 7])) as TPR,
err("unexpected token 6, expected 7"));
assert_eq!(p.try(|p| p.string(vec![2, 4, 6])), Ok(vec![2, 4, 6]));
}
#[test]
fn choose_success() {
let mut p = TP::new(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
assert_eq!(p.choose(&[&|p| p.string(vec![1, 2, 3]),
&|p| p.string(vec![4, 5, 6, 7]),
&|p| p.string(vec![4, 5, 6])]),
Ok(vec![1, 2, 3]));
}
#[test]
fn choose_success_with_recover() {
let mut p = TP::new(&[4, 5, 6, 7, 8, 9, 10]);
assert_eq!(p.choose(&[&|p| p.string(vec![1, 2, 3]),
&|p| p.string(vec![4, 5, 6, 8]),
&|p| p.string(vec![4, 5, 6])]),
Ok(vec![4, 5, 6]));
}
#[test]
fn choose_fail_no_match() {
let mut p = TP::new(&[5, 6, 7, 8, 9, 10]);
assert_eq!(p.choose(&[&|p| p.string(vec![1, 2, 3]),
&|p| p.string(vec![4, 5, 6, 8]),
&|p| p.string(vec![4, 5, 6])]) as TPR,
err("unexpected token 5"));
}
#[test]
fn choose_fail_empty() {
let mut p = TP::new(&[]);
assert_eq!(p.choose(&[&|p| p.string(vec![1, 2, 3]),
&|p| p.string(vec![4, 5, 6, 8]),
&|p| p.string(vec![4, 5, 6])]) as TPR,
err("unexpected eof"));
}
#[test]
fn many_success() {
let lt5 = |p: &mut TP| -> Result<i32, String> { p.predicate(|x| *x < 5) };
assert_eq!(TP::new(&[1, 2, 3, 4]).many(<5), Ok(vec![1, 2, 3, 4]));
assert_eq!(TP::new(&[1, 2, 3, 4, 5, 6, 7, 8]).many(<5),
Ok(vec![1, 2, 3, 4]));
assert_eq!(TP::new(&[4, 5, 6, 7, 8]).many(<5), Ok(vec![4]));
assert_eq!(TP::new(&[5, 6, 7, 8]).many(<5), Ok(vec![]));
}
}
|
use definitions::{ExternalInterrupt, InternalInterrupt};
use definitions::Config;
use definitions::Instruction;
use definitions::Program;
use definitions::Target;
use definitions::Value;
use definitions::typedef::*;
use error::*;
use std::collections::{BTreeMap, LinkedList};
use std::sync::mpsc::{Receiver, SyncSender, TrySendError};
use std::thread::{self, JoinHandle};
pub fn start(program: Program, sender: SyncSender<Frame>, receiver: Receiver<ExternalInterrupt>)
-> JoinHandle<()> {
thread::spawn(
move || {
if let Err(ref e) = VM::default().exec(program, sender, receiver) {
println!("error: {}", e);
for e in e.iter().skip(1) {
println!("caused by: {}", e);
}
// The backtrace is not always generated. Try to run this example
// with `RUST_BACKTRACE=1`.
if let Some(backtrace) = e.backtrace() {
println!("backtrace: {:?}", backtrace);
}
}
},
)
}
/// Since rusts `std:::cmp::Ordering` doesn't implement serialization, we have
/// to do this
#[derive(Serialize, Deserialize, Debug, PartialEq)]
enum Ordering {
Less,
Greater,
Equal,
}
/// The whole state of the VM
#[derive(Serialize, Deserialize, Default, Debug)]
struct VM {
/// The instructions that are currently executed
image_data: Vec<Instruction>,
/// The current program counter
pc: Address,
stack: LinkedList<Value>,
val_index: BTreeMap<Address, Value>,
input_register: Integer,
framebuffer: Frame,
framebuffer_invalid: bool,
/// A register for holding information about a recent comparison
cmp_register: Option<Ordering>,
/// A stack to hold the return addresses of function calls
call_stack: LinkedList<Address>,
/// A boolean used for locking the program counter
pc_locked: bool,
/// The configuration of the VM
config: Config,
halted: bool,
}
impl VM {
// # Maintainance functions
/// Executes the given program
pub fn exec(
&mut self, program: Program, sender: SyncSender<Frame>,
receiver: Receiver<ExternalInterrupt>
) -> VMResult<()> {
self.reset();
self.load_program(program)?;
self.build_framebuffer();
while (self.pc < self.image_data.len()) && !self.halted {
self.external_interrupt(&receiver)?;
self.do_cycle()?;
self.flush_framebuffer(&sender)?;
thread::yield_now();
}
Ok(())
}
/// Run one instruction cycle
fn do_cycle(&mut self) -> VMResult<()> {
let current_instruction = self.image_data[self.pc].clone();
self.handle_instruction(current_instruction)?;
self.advance_pc();
Ok(())
}
/// Handles a single instruction
fn handle_instruction(&mut self, instruction: Instruction) -> VMResult<()> {
match instruction {
Instruction::Halt => self.halt(),
Instruction::Int(interrupt) => self.int(&interrupt),
Instruction::Add(dest, src) => self.add(&dest, &src)?,
Instruction::Sub(dest, src) => self.sub(&dest, &src)?,
Instruction::Div(dest, src) => self.div(&dest, &src)?,
Instruction::Mul(dest, src) => self.mul(&dest, &src)?,
Instruction::Rem(dest, src) => self.rem(&dest, &src)?,
Instruction::Cmp(target_a, target_b) => self.cmp(&target_a, &target_b)?,
Instruction::Jmp(addr) => self.jmp(&addr),
Instruction::JmpLt(addr) => self.jmp_lt(&addr),
Instruction::JmpGt(addr) => self.jmp_gt(&addr),
Instruction::JmpEq(addr) => self.jmp_eq(&addr),
Instruction::JmpLtEq(addr) => self.jmp_lt_eq(&addr),
Instruction::JmpGtEq(addr) => self.jmp_gt_eq(&addr),
Instruction::Push(dest, value) => self.push(&dest, value)?,
Instruction::Mov(dest, src) => self.mov(&dest, &src)?,
Instruction::Swp(target_a, target_b) => self.swp(&target_a, &target_b)?,
Instruction::Call(addr) => self.call(&addr),
Instruction::Ret => self.ret()?,
}
Ok(())
}
/// Loads the instructions of the given program to the VM's state
fn load_program(&mut self, program: Program) -> VMResult<()> {
let orig_program = Program::default();
if program.preamble != orig_program.preamble {
bail!("invalid preamble");
} else if program.version != orig_program.version {
bail!("invalid version");
} else {
self.image_data = program.instructions;
self.config = program.config;
Ok(())
}
}
/// Aborts the execution of the current image
fn halt(&mut self) {
self.halted = true;
}
/// Handles an internal interrupt
fn int(&mut self, interrupt: &InternalInterrupt) {
match interrupt {
&InternalInterrupt::FlushFramebuffer => self.invalidate_framebuffer(),
}
}
/// Handles incoming interrupts or moves along
fn external_interrupt(&mut self, receiver: &Receiver<ExternalInterrupt>) -> VMResult<()> {
match receiver.try_recv() {
Ok(interrupt) => {
match interrupt {
ExternalInterrupt::Halt => self.halt(),
ExternalInterrupt::KeyDown(value) => {
self.input_register = value;
}
ExternalInterrupt::KeyUp => self.input_register = 0,
_ => {}
}
Ok(())
}
_ => Ok(()),
}
}
/// Flushes the internal framebuffer using the given sender
fn flush_framebuffer(&mut self, sender: &SyncSender<Frame>) -> VMResult<()> {
if self.framebuffer_invalid {
if let Err(TrySendError::Disconnected(..)) = sender.try_send(self.framebuffer.clone()) {
bail!("output channel disconnected");
}
self.framebuffer_invalid = false;
}
Ok(())
}
/// Allocates all the needed space in the framebuffer
fn build_framebuffer(&mut self) {
let ref resolution = self.config.display.resolution;
let allocation_space = resolution.width * resolution.height;
self.framebuffer = vec![Color::default(); allocation_space];
}
/// Resets the VM to a clean state
fn reset(&mut self) {
*self = VM::default();
}
/// Locks the program counter in place
fn lock_pc(&mut self) {
self.pc_locked = true;
}
/// Advances the program counter
fn advance_pc(&mut self) {
if self.pc_locked {
self.pc_locked = false;
} else {
self.pc += 1;
}
}
/// Invalidates the frambuffer causing it to be resent to the display
/// receiver
fn invalidate_framebuffer(&mut self) {
self.framebuffer_invalid = true;
}
/// Return the value at the specified target. The value of the target will
/// be consumed
fn pop(&mut self, target: &Target) -> VMResult<Value> {
match target {
&Target::ValueIndex(index) => {
if let Some(value) = self.val_index.remove(&index) {
Ok(value)
} else {
bail!("no value found at index {}", index);
}
}
&Target::Stack => {
if let Some(value) = self.stack.pop_front() {
Ok(value)
} else {
bail!("unable to pop value off an empty stack");
}
}
&Target::Framebuffer(index) => {
if let Some(&(r, g, b)) = self.framebuffer.get(index) {
Ok(Value::Color(r, g, b))
} else {
bail!("no value found in framebuffer at index {}", index);
}
}
&Target::InputRegister => Ok(Value::Integer(self.input_register)),
}
}
// # Instruction functions
/// Adds the value of the src target to the value of the dest target
fn add(&mut self, dest: &Target, src: &Target) -> VMResult<()> {
let dest_value = self.pop(dest)?;
let src_value = self.pop(src)?;
self.push(dest, dest_value + src_value)?;
Ok(())
}
/// Subtracts the value of the src target from the value of the dest target
fn sub(&mut self, dest: &Target, src: &Target) -> VMResult<()> {
let dest_value = self.pop(dest)?;
let src_value = self.pop(src)?;
self.push(dest, dest_value - src_value)?;
Ok(())
}
/// Divides the value of the dest target through the value of the src target
fn div(&mut self, dest: &Target, src: &Target) -> VMResult<()> {
let dest_value = self.pop(dest)?;
let src_value = self.pop(src)?;
self.push(dest, dest_value / src_value)?;
Ok(())
}
/// Multiplies the value of the dest target with the value of the src target
fn mul(&mut self, dest: &Target, src: &Target) -> VMResult<()> {
let dest_value = self.pop(dest)?;
let src_value = self.pop(src)?;
self.push(dest, dest_value * src_value)?;
Ok(())
}
/// Applies the modulo operator on the value of the dest target using the
/// value of the src target
fn rem(&mut self, dest: &Target, src: &Target) -> VMResult<()> {
let dest_value = self.pop(dest)?;
let src_value = self.pop(src)?;
self.push(dest, dest_value * src_value)?;
Ok(())
}
/// Compares the top values of the two targets and saves the result to
/// `self.cmp_register`
fn cmp(&mut self, target_a: &Target, target_b: &Target) -> VMResult<()> {
let target_a_value = self.pop(target_a)?;
let target_b_value = self.pop(target_b)?;
if target_a_value < target_b_value {
self.cmp_register = Some(Ordering::Less);
} else if target_a_value > target_b_value {
self.cmp_register = Some(Ordering::Greater);
} else if target_a_value == target_b_value {
self.cmp_register = Some(Ordering::Equal);
}
Ok(())
}
/// Jumps unconditionally to the specified address
fn jmp(&mut self, addr: &Address) {
self.pc = *addr;
self.lock_pc();
}
/// Jumps if the last compare got the result `Some(Ordering::Less)`
fn jmp_lt(&mut self, addr: &Address) {
if self.cmp_register == Some(Ordering::Less) {
self.jmp(addr);
}
}
/// Jumps if the last compare got the result `Some(Ordering::Greater)`
fn jmp_gt(&mut self, addr: &Address) {
if self.cmp_register == Some(Ordering::Greater) {
self.jmp(addr);
}
}
/// Jumps if the last compare got the result `Some(Ordering::Equal)`
fn jmp_eq(&mut self, addr: &Address) {
if self.cmp_register == Some(Ordering::Equal) {
self.jmp(addr);
}
}
/// Jumps if the last compare got the result `Some(Ordering::Less)` or
/// `Some(Ordering::Equal)`
fn jmp_lt_eq(&mut self, addr: &Address) {
if (self.cmp_register == Some(Ordering::Less)) ||
(self.cmp_register == Some(Ordering::Equal)) {
self.jmp(addr);
}
}
/// Jumps if the last compare got the result `Some(Ordering::Greater)` or
/// `Some(Ordering::Equal)`
fn jmp_gt_eq(&mut self, addr: &Address) {
if (self.cmp_register == Some(Ordering::Greater)) ||
(self.cmp_register == Some(Ordering::Equal)) {
self.jmp(addr);
}
}
/// Pushes the given value to the given target
fn push(&mut self, dest: &Target, value: Value) -> VMResult<()> {
match dest {
&Target::ValueIndex(index) => {
self.val_index.entry(index).or_insert(value);
Ok(())
}
&Target::Stack => {
self.stack.push_front(value);
Ok(())
}
&Target::Framebuffer(index) => {
if let Value::Color(r, g, b) = value {
self.framebuffer[index] = (r, g, b);
Ok(())
} else {
bail!("unable push a non-color value to the framebuffer");
}
}
&Target::InputRegister => {
bail!("unable push to read-only input register");
}
}
}
/// Moves the top value of the src target to the dest target
fn mov(&mut self, dest: &Target, src: &Target) -> VMResult<()> {
let src_value = self.pop(src)?;
self.push(dest, src_value)?;
Ok(())
}
/// Swaps the top values of the targets
fn swp(&mut self, target_a: &Target, target_b: &Target) -> VMResult<()> {
let a_value = self.pop(target_a)?;
let b_value = self.pop(target_b)?;
self.push(target_a, b_value)?;
self.push(target_b, a_value)?;
Ok(())
}
/// Calls the function at the specified address saving the return address
/// to the call stack
fn call(&mut self, addr: &Address) {
self.call_stack.push_front(self.pc + 1);
self.jmp(addr);
}
/// Returns from an ongoing function call
fn ret(&mut self) -> VMResult<()> {
if let Some(retur_addr) = self.call_stack.pop_front() {
self.jmp(&retur_addr);
} else {
bail!("unable to return from an empty call stack");
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use definitions::ImageBuilder;
use rand;
#[test]
fn halt() {
let mut vm = VM::default();
vm.handle_instruction(Instruction::Halt).expect("failed to handle instruction");
println!("{:#?}", vm);
assert!(vm.halted);
}
#[test]
fn add_stack() {
for _ in 0..3000 {
let val_a = rand::random::<Integer>() / 2;
let val_b = rand::random::<Integer>() / 2;
let mut vm = VM::default();
let mut builder = ImageBuilder::new();
builder.push(Target::Stack, Value::Integer(val_a));
builder.push(Target::Stack, Value::Integer(val_b));
builder.add(Target::Stack, Target::Stack);
let program = builder.gen_program();
vm.load_program(program).expect("failed to generate program");
vm.do_cycle();
vm.do_cycle();
vm.do_cycle();
let stack_value = vm.pop(&Target::Stack).expect("failed to pop value off the stack");
assert_eq!(stack_value, Value::Integer(val_a + val_b));
}
}
}
Unwrap all results in tests
use definitions::{ExternalInterrupt, InternalInterrupt};
use definitions::Config;
use definitions::Instruction;
use definitions::Program;
use definitions::Target;
use definitions::Value;
use definitions::typedef::*;
use error::*;
use std::collections::{BTreeMap, LinkedList};
use std::sync::mpsc::{Receiver, SyncSender, TrySendError};
use std::thread::{self, JoinHandle};
pub fn start(program: Program, sender: SyncSender<Frame>, receiver: Receiver<ExternalInterrupt>)
-> JoinHandle<()> {
thread::spawn(
move || {
if let Err(ref e) = VM::default().exec(program, sender, receiver) {
println!("error: {}", e);
for e in e.iter().skip(1) {
println!("caused by: {}", e);
}
// The backtrace is not always generated. Try to run this example
// with `RUST_BACKTRACE=1`.
if let Some(backtrace) = e.backtrace() {
println!("backtrace: {:?}", backtrace);
}
}
},
)
}
/// Since rusts `std:::cmp::Ordering` doesn't implement serialization, we have
/// to do this
#[derive(Serialize, Deserialize, Debug, PartialEq)]
enum Ordering {
Less,
Greater,
Equal,
}
/// The whole state of the VM
#[derive(Serialize, Deserialize, Default, Debug)]
struct VM {
/// The instructions that are currently executed
image_data: Vec<Instruction>,
/// The current program counter
pc: Address,
stack: LinkedList<Value>,
val_index: BTreeMap<Address, Value>,
input_register: Integer,
framebuffer: Frame,
framebuffer_invalid: bool,
/// A register for holding information about a recent comparison
cmp_register: Option<Ordering>,
/// A stack to hold the return addresses of function calls
call_stack: LinkedList<Address>,
/// A boolean used for locking the program counter
pc_locked: bool,
/// The configuration of the VM
config: Config,
halted: bool,
}
impl VM {
// # Maintainance functions
/// Executes the given program
pub fn exec(
&mut self, program: Program, sender: SyncSender<Frame>,
receiver: Receiver<ExternalInterrupt>
) -> VMResult<()> {
self.reset();
self.load_program(program)?;
self.build_framebuffer();
while (self.pc < self.image_data.len()) && !self.halted {
self.external_interrupt(&receiver)?;
self.do_cycle()?;
self.flush_framebuffer(&sender)?;
thread::yield_now();
}
Ok(())
}
/// Run one instruction cycle
fn do_cycle(&mut self) -> VMResult<()> {
let current_instruction = self.image_data[self.pc].clone();
self.handle_instruction(current_instruction)?;
self.advance_pc();
Ok(())
}
/// Handles a single instruction
fn handle_instruction(&mut self, instruction: Instruction) -> VMResult<()> {
match instruction {
Instruction::Halt => self.halt(),
Instruction::Int(interrupt) => self.int(&interrupt),
Instruction::Add(dest, src) => self.add(&dest, &src)?,
Instruction::Sub(dest, src) => self.sub(&dest, &src)?,
Instruction::Div(dest, src) => self.div(&dest, &src)?,
Instruction::Mul(dest, src) => self.mul(&dest, &src)?,
Instruction::Rem(dest, src) => self.rem(&dest, &src)?,
Instruction::Cmp(target_a, target_b) => self.cmp(&target_a, &target_b)?,
Instruction::Jmp(addr) => self.jmp(&addr),
Instruction::JmpLt(addr) => self.jmp_lt(&addr),
Instruction::JmpGt(addr) => self.jmp_gt(&addr),
Instruction::JmpEq(addr) => self.jmp_eq(&addr),
Instruction::JmpLtEq(addr) => self.jmp_lt_eq(&addr),
Instruction::JmpGtEq(addr) => self.jmp_gt_eq(&addr),
Instruction::Push(dest, value) => self.push(&dest, value)?,
Instruction::Mov(dest, src) => self.mov(&dest, &src)?,
Instruction::Swp(target_a, target_b) => self.swp(&target_a, &target_b)?,
Instruction::Call(addr) => self.call(&addr),
Instruction::Ret => self.ret()?,
}
Ok(())
}
/// Loads the instructions of the given program to the VM's state
fn load_program(&mut self, program: Program) -> VMResult<()> {
let orig_program = Program::default();
if program.preamble != orig_program.preamble {
bail!("invalid preamble");
} else if program.version != orig_program.version {
bail!("invalid version");
} else {
self.image_data = program.instructions;
self.config = program.config;
Ok(())
}
}
/// Aborts the execution of the current image
fn halt(&mut self) {
self.halted = true;
}
/// Handles an internal interrupt
fn int(&mut self, interrupt: &InternalInterrupt) {
match interrupt {
&InternalInterrupt::FlushFramebuffer => self.invalidate_framebuffer(),
}
}
/// Handles incoming interrupts or moves along
fn external_interrupt(&mut self, receiver: &Receiver<ExternalInterrupt>) -> VMResult<()> {
match receiver.try_recv() {
Ok(interrupt) => {
match interrupt {
ExternalInterrupt::Halt => self.halt(),
ExternalInterrupt::KeyDown(value) => {
self.input_register = value;
}
ExternalInterrupt::KeyUp => self.input_register = 0,
_ => {}
}
Ok(())
}
_ => Ok(()),
}
}
/// Flushes the internal framebuffer using the given sender
fn flush_framebuffer(&mut self, sender: &SyncSender<Frame>) -> VMResult<()> {
if self.framebuffer_invalid {
if let Err(TrySendError::Disconnected(..)) = sender.try_send(self.framebuffer.clone()) {
bail!("output channel disconnected");
}
self.framebuffer_invalid = false;
}
Ok(())
}
/// Allocates all the needed space in the framebuffer
fn build_framebuffer(&mut self) {
let ref resolution = self.config.display.resolution;
let allocation_space = resolution.width * resolution.height;
self.framebuffer = vec![Color::default(); allocation_space];
}
/// Resets the VM to a clean state
fn reset(&mut self) {
*self = VM::default();
}
/// Locks the program counter in place
fn lock_pc(&mut self) {
self.pc_locked = true;
}
/// Advances the program counter
fn advance_pc(&mut self) {
if self.pc_locked {
self.pc_locked = false;
} else {
self.pc += 1;
}
}
/// Invalidates the frambuffer causing it to be resent to the display
/// receiver
fn invalidate_framebuffer(&mut self) {
self.framebuffer_invalid = true;
}
/// Return the value at the specified target. The value of the target will
/// be consumed
fn pop(&mut self, target: &Target) -> VMResult<Value> {
match target {
&Target::ValueIndex(index) => {
if let Some(value) = self.val_index.remove(&index) {
Ok(value)
} else {
bail!("no value found at index {}", index);
}
}
&Target::Stack => {
if let Some(value) = self.stack.pop_front() {
Ok(value)
} else {
bail!("unable to pop value off an empty stack");
}
}
&Target::Framebuffer(index) => {
if let Some(&(r, g, b)) = self.framebuffer.get(index) {
Ok(Value::Color(r, g, b))
} else {
bail!("no value found in framebuffer at index {}", index);
}
}
&Target::InputRegister => Ok(Value::Integer(self.input_register)),
}
}
// # Instruction functions
/// Adds the value of the src target to the value of the dest target
fn add(&mut self, dest: &Target, src: &Target) -> VMResult<()> {
let dest_value = self.pop(dest)?;
let src_value = self.pop(src)?;
self.push(dest, dest_value + src_value)?;
Ok(())
}
/// Subtracts the value of the src target from the value of the dest target
fn sub(&mut self, dest: &Target, src: &Target) -> VMResult<()> {
let dest_value = self.pop(dest)?;
let src_value = self.pop(src)?;
self.push(dest, dest_value - src_value)?;
Ok(())
}
/// Divides the value of the dest target through the value of the src target
fn div(&mut self, dest: &Target, src: &Target) -> VMResult<()> {
let dest_value = self.pop(dest)?;
let src_value = self.pop(src)?;
self.push(dest, dest_value / src_value)?;
Ok(())
}
/// Multiplies the value of the dest target with the value of the src target
fn mul(&mut self, dest: &Target, src: &Target) -> VMResult<()> {
let dest_value = self.pop(dest)?;
let src_value = self.pop(src)?;
self.push(dest, dest_value * src_value)?;
Ok(())
}
/// Applies the modulo operator on the value of the dest target using the
/// value of the src target
fn rem(&mut self, dest: &Target, src: &Target) -> VMResult<()> {
let dest_value = self.pop(dest)?;
let src_value = self.pop(src)?;
self.push(dest, dest_value * src_value)?;
Ok(())
}
/// Compares the top values of the two targets and saves the result to
/// `self.cmp_register`
fn cmp(&mut self, target_a: &Target, target_b: &Target) -> VMResult<()> {
let target_a_value = self.pop(target_a)?;
let target_b_value = self.pop(target_b)?;
if target_a_value < target_b_value {
self.cmp_register = Some(Ordering::Less);
} else if target_a_value > target_b_value {
self.cmp_register = Some(Ordering::Greater);
} else if target_a_value == target_b_value {
self.cmp_register = Some(Ordering::Equal);
}
Ok(())
}
/// Jumps unconditionally to the specified address
fn jmp(&mut self, addr: &Address) {
self.pc = *addr;
self.lock_pc();
}
/// Jumps if the last compare got the result `Some(Ordering::Less)`
fn jmp_lt(&mut self, addr: &Address) {
if self.cmp_register == Some(Ordering::Less) {
self.jmp(addr);
}
}
/// Jumps if the last compare got the result `Some(Ordering::Greater)`
fn jmp_gt(&mut self, addr: &Address) {
if self.cmp_register == Some(Ordering::Greater) {
self.jmp(addr);
}
}
/// Jumps if the last compare got the result `Some(Ordering::Equal)`
fn jmp_eq(&mut self, addr: &Address) {
if self.cmp_register == Some(Ordering::Equal) {
self.jmp(addr);
}
}
/// Jumps if the last compare got the result `Some(Ordering::Less)` or
/// `Some(Ordering::Equal)`
fn jmp_lt_eq(&mut self, addr: &Address) {
if (self.cmp_register == Some(Ordering::Less)) ||
(self.cmp_register == Some(Ordering::Equal)) {
self.jmp(addr);
}
}
/// Jumps if the last compare got the result `Some(Ordering::Greater)` or
/// `Some(Ordering::Equal)`
fn jmp_gt_eq(&mut self, addr: &Address) {
if (self.cmp_register == Some(Ordering::Greater)) ||
(self.cmp_register == Some(Ordering::Equal)) {
self.jmp(addr);
}
}
/// Pushes the given value to the given target
fn push(&mut self, dest: &Target, value: Value) -> VMResult<()> {
match dest {
&Target::ValueIndex(index) => {
self.val_index.entry(index).or_insert(value);
Ok(())
}
&Target::Stack => {
self.stack.push_front(value);
Ok(())
}
&Target::Framebuffer(index) => {
if let Value::Color(r, g, b) = value {
self.framebuffer[index] = (r, g, b);
Ok(())
} else {
bail!("unable push a non-color value to the framebuffer");
}
}
&Target::InputRegister => {
bail!("unable push to read-only input register");
}
}
}
/// Moves the top value of the src target to the dest target
fn mov(&mut self, dest: &Target, src: &Target) -> VMResult<()> {
let src_value = self.pop(src)?;
self.push(dest, src_value)?;
Ok(())
}
/// Swaps the top values of the targets
fn swp(&mut self, target_a: &Target, target_b: &Target) -> VMResult<()> {
let a_value = self.pop(target_a)?;
let b_value = self.pop(target_b)?;
self.push(target_a, b_value)?;
self.push(target_b, a_value)?;
Ok(())
}
/// Calls the function at the specified address saving the return address
/// to the call stack
fn call(&mut self, addr: &Address) {
self.call_stack.push_front(self.pc + 1);
self.jmp(addr);
}
/// Returns from an ongoing function call
fn ret(&mut self) -> VMResult<()> {
if let Some(retur_addr) = self.call_stack.pop_front() {
self.jmp(&retur_addr);
} else {
bail!("unable to return from an empty call stack");
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use definitions::ImageBuilder;
use rand;
#[test]
fn halt() {
let mut vm = VM::default();
vm.handle_instruction(Instruction::Halt).expect("failed to handle instruction");
println!("{:#?}", vm);
assert!(vm.halted);
}
#[test]
fn add_stack() {
for _ in 0..3000 {
let val_a = rand::random::<Integer>() / 2;
let val_b = rand::random::<Integer>() / 2;
let mut vm = VM::default();
let mut builder = ImageBuilder::new();
builder.push(Target::Stack, Value::Integer(val_a));
builder.push(Target::Stack, Value::Integer(val_b));
builder.add(Target::Stack, Target::Stack);
let program = builder.gen_program();
vm.load_program(program).unwrap();
vm.do_cycle().unwrap();
vm.do_cycle().unwrap();
vm.do_cycle().unwrap();
let stack_value = vm.pop(&Target::Stack).unwrap();
assert_eq!(stack_value, Value::Integer(val_a + val_b));
}
}
}
|
#![doc(html_favicon_url = "https://www.ruma.io/favicon.ico")]
#![doc(html_logo_url = "https://www.ruma.io/images/logo.png")]
//! (De)serializable types for the events in the [Matrix](https://matrix.org) specification.
//! These types are used by other ruma crates.
//!
//! All data exchanged over Matrix is expressed as an event.
//! Different event types represent different actions, such as joining a room or sending a message.
//! Events are stored and transmitted as simple JSON structures.
//! While anyone can create a new event type for their own purposes, the Matrix specification
//! defines a number of event types which are considered core to the protocol, and Matrix clients
//! and servers must understand their semantics.
//! ruma-events contains Rust types for each of the event types defined by the specification and
//! facilities for extending the event system for custom event types.
//!
//! # Event types
//!
//! ruma-events includes a Rust enum called `EventType`, which provides a simple enumeration of
//! all the event types defined by the Matrix specification. Matrix event types are serialized to
//! JSON strings in [reverse domain name
//! notation](https://en.wikipedia.org/wiki/Reverse_domain_name_notation), although the core event
//! types all use the special "m" TLD, e.g. *m.room.message*.
//!
//! # Core event types
//!
//! ruma-events includes Rust types for every one of the event types in the Matrix specification.
//! To better organize the crate, these types live in separate modules with a hierarchy that
//! matches the reverse domain name notation of the event type.
//! For example, the *m.room.message* event lives at `ruma_events::room::message::MessageEvent`.
//! Each type's module also contains a Rust type for that event type's `content` field, and any
//! other supporting types required by the event's other fields.
//!
//! # Extending Ruma with custom events
//!
//! For our example we will create a reaction message event. This can be used with ruma-events
//! structs, for this event we will use a `SyncMessageEvent` struct but any `MessageEvent` struct
//! would work.
//!
//! ```rust
//! use ruma_events::{macros::EventContent, SyncMessageEvent};
//! use ruma_identifiers::EventId;
//! use serde::{Deserialize, Serialize};
//!
//! #[derive(Clone, Debug, Deserialize, Serialize)]
//! #[serde(tag = "rel_type")]
//! pub enum RelatesTo {
//! #[serde(rename = "m.annotation")]
//! Annotation {
//! /// The event this reaction relates to.
//! event_id: EventId,
//! /// The displayable content of the reaction.
//! key: String,
//! },
//!
//! /// Since this event is not fully specified in the Matrix spec
//! /// it may change or types may be added, we are ready!
//! #[serde(rename = "m.whatever")]
//! Whatever,
//! }
//!
//! /// The payload for our reaction event.
//! #[derive(Clone, Debug, Deserialize, Serialize, EventContent)]
//! #[ruma_event(type = "m.reaction", kind = Message)]
//! pub struct ReactionEventContent {
//! #[serde(rename = "m.relates_to")]
//! pub relates_to: RelatesTo,
//! }
//!
//! let json = serde_json::json!({
//! "content": {
//! "m.relates_to": {
//! "event_id": "$xxxx-xxxx",
//! "key": "👍",
//! "rel_type": "m.annotation"
//! }
//! },
//! "event_id": "$xxxx-xxxx",
//! "origin_server_ts": 1,
//! "sender": "@someone:example.org",
//! "type": "m.reaction",
//! "unsigned": {
//! "age": 85
//! }
//! });
//!
//! // The downside of this event is we cannot use it with event enums,
//! // but could be deserialized from a `Raw<_>` that has failed to deserialize.
//! matches::assert_matches!(
//! serde_json::from_value::<SyncMessageEvent<ReactionEventContent>>(json),
//! Ok(SyncMessageEvent {
//! content: ReactionEventContent {
//! relates_to: RelatesTo::Annotation { key, .. },
//! },
//! ..
//! }) if key == "👍"
//! );
//! ```
//!
//! # Serialization and deserialization
//!
//! All concrete event types in ruma-events can be serialized via the `Serialize` trait from
//! [serde](https://serde.rs/) and can be deserialized from as `Raw<EventType>`. In order to
//! handle incoming data that may not conform to `ruma-events`' strict definitions of event
//! structures, deserialization will return `Raw::Err` on error. This error covers both
//! structurally invalid JSON data as well as structurally valid JSON that doesn't fulfill
//! additional constraints the matrix specification defines for some event types. The error exposes
//! the deserialized `serde_json::Value` so that developers can still work with the received
//! event data. This makes it possible to deserialize a collection of events without the entire
//! collection failing to deserialize due to a single invalid event. The "content" type for each
//! event also implements `Serialize` and either `TryFromRaw` (enabling usage as
//! `Raw<ContentType>` for dedicated content types) or `Deserialize` (when the content is a
//! type alias), allowing content to be converted to and from JSON independently of the surrounding
//! event structure, if needed.
#![recursion_limit = "1024"]
#![warn(missing_docs)]
#![cfg_attr(docsrs, feature(doc_cfg))]
use std::fmt::Debug;
use js_int::Int;
use ruma_identifiers::{EventEncryptionAlgorithm, RoomVersionId};
use ruma_serde::Raw;
use serde::{
de::{self, IgnoredAny},
Deserialize, Serialize,
};
use serde_json::value::RawValue as RawJsonValue;
use self::room::redaction::SyncRedactionEvent;
mod enums;
mod error;
mod event_kinds;
// Hack to allow both ruma-events itself and external crates (or tests) to use procedural macros
// that expect `ruma_events` to exist in the prelude.
extern crate self as ruma_events;
/// Re-exports to allow users to declare their own event types using the
/// macros used internally.
///
/// It is not considered part of ruma-events' public API.
#[doc(hidden)]
pub mod exports {
pub use ruma_common;
pub use ruma_identifiers;
pub use ruma_serde;
pub use serde;
pub use serde_json;
}
/// Re-export of all the derives needed to create your own event types.
pub mod macros {
pub use ruma_events_macros::{Event, EventContent};
}
pub mod call;
pub mod custom;
pub mod direct;
pub mod dummy;
pub mod forwarded_room_key;
pub mod fully_read;
pub mod ignored_user_list;
pub mod key;
pub mod pdu;
pub mod policy;
pub mod presence;
pub mod push_rules;
#[cfg(feature = "unstable-pre-spec")]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable-pre-spec")))]
pub mod reaction;
pub mod receipt;
#[cfg(feature = "unstable-pre-spec")]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable-pre-spec")))]
pub mod relation;
pub mod room;
pub mod room_key;
pub mod room_key_request;
#[cfg(feature = "unstable-pre-spec")]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable-pre-spec")))]
pub mod space;
pub mod sticker;
pub mod tag;
pub mod typing;
#[cfg(feature = "unstable-pre-spec")]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable-pre-spec")))]
pub use self::relation::Relations;
pub use self::{
enums::*,
error::{FromStrError, InvalidInput},
event_kinds::*,
};
/// Extra information about an event that is not incorporated into the event's
/// hash.
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct Unsigned {
/// The time in milliseconds that has elapsed since the event was sent. This
/// field is generated by the local homeserver, and may be incorrect if the
/// local time on at least one of the two servers is out of sync, which can
/// cause the age to either be negative or greater than it actually is.
#[serde(skip_serializing_if = "Option::is_none")]
pub age: Option<Int>,
/// The client-supplied transaction ID, if the client being given the event
/// is the same one which sent it.
#[serde(skip_serializing_if = "Option::is_none")]
pub transaction_id: Option<String>,
/// Server-compiled information from other events relating to this event.
#[cfg(feature = "unstable-pre-spec")]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable-pre-spec")))]
#[serde(rename = "m.relations", skip_serializing_if = "Option::is_none")]
pub relations: Option<Relations>,
}
impl Unsigned {
/// Create a new `Unsigned` with fields set to `None`.
pub fn new() -> Self {
Self::default()
}
/// Whether this unsigned data is empty (all fields are `None`).
///
/// This method is used to determine whether to skip serializing the
/// `unsigned` field in room events. Do not use it to determine whether
/// an incoming `unsigned` field was present - it could still have been
/// present but contained none of the known fields.
pub fn is_empty(&self) -> bool {
self.age.is_none() && self.transaction_id.is_none()
}
}
/// Extra information about a redacted event that is not incorporated into the event's
/// hash.
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct RedactedUnsigned {
/// The event that redacted this event, if any.
#[serde(skip_serializing_if = "Option::is_none")]
pub redacted_because: Option<Box<SyncRedactionEvent>>,
}
impl RedactedUnsigned {
/// Create a new `RedactedUnsigned` with field set to `None`.
pub fn new() -> Self {
Self::default()
}
/// Create a new `RedactedUnsigned` with the given redacted because.
pub fn new_because(redacted_because: Box<SyncRedactionEvent>) -> Self {
Self { redacted_because: Some(redacted_because) }
}
/// Whether this unsigned data is empty (`redacted_because` is `None`).
///
/// This method is used to determine whether to skip serializing the
/// `unsigned` field in redacted room events. Do not use it to determine whether
/// an incoming `unsigned` field was present - it could still have been
/// present but contained none of the known fields.
pub fn is_empty(&self) -> bool {
self.redacted_because.is_none()
}
}
/// The base trait that all event content types implement.
///
/// Implementing this trait allows content types to be serialized as well as deserialized.
pub trait EventContent: Sized + Serialize {
/// A matrix event identifier, like `m.room.message`.
fn event_type(&self) -> &str;
/// Constructs the given event content.
fn from_parts(event_type: &str, content: &RawJsonValue) -> serde_json::Result<Self>;
}
/// Trait to define the behavior of redacting an event.
pub trait Redact {
/// The redacted form of the event.
type Redacted;
/// Transforms `self` into a redacted form (removing most fields) according to the spec.
///
/// A small number of events have room-version specific redaction behavior, so a version has to
/// be specified.
fn redact(self, redaction: SyncRedactionEvent, version: &RoomVersionId) -> Self::Redacted;
}
/// Trait to define the behavior of redact an event's content object.
pub trait RedactContent {
/// The redacted form of the event's content.
type Redacted;
/// Transform `self` into a redacted form (removing most or all fields) according to the spec.
///
/// A small number of events have room-version specific redaction behavior, so a version has to
/// be specified.
///
/// Where applicable, it is preferred to use [`Redact::redact`] on the outer event.
fn redact(self, version: &RoomVersionId) -> Self::Redacted;
}
/// Extension trait for [`Raw<_>`][ruma_serde::Raw].
pub trait RawExt<T: EventContent> {
/// Try to deserialize the JSON as an event's content.
fn deserialize_content(&self, event_type: &str) -> serde_json::Result<T>;
}
impl<T: EventContent> RawExt<T> for Raw<T> {
fn deserialize_content(&self, event_type: &str) -> serde_json::Result<T> {
T::from_parts(event_type, self.json())
}
}
/// Marker trait for the content of an ephemeral room event.
pub trait EphemeralRoomEventContent: EventContent {}
/// Marker trait for the content of a global account data event.
pub trait GlobalAccountDataEventContent: EventContent {}
/// Marker trait for the content of a room account data event.
pub trait RoomAccountDataEventContent: EventContent {}
/// Marker trait for the content of a to device event.
pub trait ToDeviceEventContent: EventContent {}
/// Marker trait for the content of a message event.
pub trait MessageEventContent: EventContent {}
/// Marker trait for the content of a state event.
pub trait StateEventContent: EventContent {}
/// The base trait that all redacted event content types implement.
///
/// This trait's associated functions and methods should not be used to build
/// redacted events, prefer the `redact` method on `AnyStateEvent` and
/// `AnyMessageEvent` and their "sync" and "stripped" counterparts. The
/// `RedactedEventContent` trait is an implementation detail, ruma makes no
/// API guarantees.
pub trait RedactedEventContent: EventContent {
/// Constructs the redacted event content.
///
/// If called for anything but "empty" redacted content this will error.
#[doc(hidden)]
fn empty(_event_type: &str) -> serde_json::Result<Self> {
Err(serde::de::Error::custom("this event is not redacted"))
}
/// Determines if the redacted event content needs to serialize fields.
#[doc(hidden)]
fn has_serialize_fields(&self) -> bool;
/// Determines if the redacted event content needs to deserialize fields.
#[doc(hidden)]
fn has_deserialize_fields() -> HasDeserializeFields;
}
/// Marker trait for the content of a redacted message event.
pub trait RedactedMessageEventContent: RedactedEventContent {}
/// Marker trait for the content of a redacted state event.
pub trait RedactedStateEventContent: RedactedEventContent {}
/// `HasDeserializeFields` is used in the code generated by the `Event` derive
/// to aid in deserializing redacted events.
#[doc(hidden)]
#[derive(Debug)]
pub enum HasDeserializeFields {
/// Deserialize the event's content, failing if invalid.
True,
/// Return the redacted version of this event's content.
False,
/// `Optional` is used for `RedactedAliasesEventContent` since it has
/// an empty version and one with content left after redaction that
/// must be supported together.
Optional,
}
/// Helper struct to determine if the event has been redacted.
#[doc(hidden)]
#[derive(Debug, Deserialize)]
#[allow(clippy::exhaustive_structs)]
pub struct UnsignedDeHelper {
/// This is the field that signals an event has been redacted.
pub redacted_because: Option<IgnoredAny>,
}
/// Helper struct to determine the event kind from a `serde_json::value::RawValue`.
#[doc(hidden)]
#[derive(Debug, Deserialize)]
#[allow(clippy::exhaustive_structs)]
pub struct EventDeHelper {
/// the Matrix event type string "m.room.whatever".
#[serde(rename = "type")]
pub ev_type: String,
/// If `state_key` is present the event will be deserialized as a state event.
pub state_key: Option<IgnoredAny>,
/// If no `state_key` is found but an `event_id` is present the event
/// will be deserialized as a message event.
pub event_id: Option<IgnoredAny>,
/// If no `event_id` or `state_key` are found but a `room_id` is present
/// the event will be deserialized as an ephemeral event.
pub room_id: Option<IgnoredAny>,
/// If this `UnsignedData` contains a `redacted_because` key the event is
/// immediately deserialized as a redacted event.
pub unsigned: Option<UnsignedDeHelper>,
}
/// Helper function for `serde_json::value::RawValue` deserialization.
#[doc(hidden)]
pub fn from_raw_json_value<T, E>(val: &RawJsonValue) -> Result<T, E>
where
T: de::DeserializeOwned,
E: de::Error,
{
serde_json::from_str(val.get()).map_err(E::custom)
}
events: Add StaticEventContent trait
#![doc(html_favicon_url = "https://www.ruma.io/favicon.ico")]
#![doc(html_logo_url = "https://www.ruma.io/images/logo.png")]
//! (De)serializable types for the events in the [Matrix](https://matrix.org) specification.
//! These types are used by other ruma crates.
//!
//! All data exchanged over Matrix is expressed as an event.
//! Different event types represent different actions, such as joining a room or sending a message.
//! Events are stored and transmitted as simple JSON structures.
//! While anyone can create a new event type for their own purposes, the Matrix specification
//! defines a number of event types which are considered core to the protocol, and Matrix clients
//! and servers must understand their semantics.
//! ruma-events contains Rust types for each of the event types defined by the specification and
//! facilities for extending the event system for custom event types.
//!
//! # Event types
//!
//! ruma-events includes a Rust enum called `EventType`, which provides a simple enumeration of
//! all the event types defined by the Matrix specification. Matrix event types are serialized to
//! JSON strings in [reverse domain name
//! notation](https://en.wikipedia.org/wiki/Reverse_domain_name_notation), although the core event
//! types all use the special "m" TLD, e.g. *m.room.message*.
//!
//! # Core event types
//!
//! ruma-events includes Rust types for every one of the event types in the Matrix specification.
//! To better organize the crate, these types live in separate modules with a hierarchy that
//! matches the reverse domain name notation of the event type.
//! For example, the *m.room.message* event lives at `ruma_events::room::message::MessageEvent`.
//! Each type's module also contains a Rust type for that event type's `content` field, and any
//! other supporting types required by the event's other fields.
//!
//! # Extending Ruma with custom events
//!
//! For our example we will create a reaction message event. This can be used with ruma-events
//! structs, for this event we will use a `SyncMessageEvent` struct but any `MessageEvent` struct
//! would work.
//!
//! ```rust
//! use ruma_events::{macros::EventContent, SyncMessageEvent};
//! use ruma_identifiers::EventId;
//! use serde::{Deserialize, Serialize};
//!
//! #[derive(Clone, Debug, Deserialize, Serialize)]
//! #[serde(tag = "rel_type")]
//! pub enum RelatesTo {
//! #[serde(rename = "m.annotation")]
//! Annotation {
//! /// The event this reaction relates to.
//! event_id: EventId,
//! /// The displayable content of the reaction.
//! key: String,
//! },
//!
//! /// Since this event is not fully specified in the Matrix spec
//! /// it may change or types may be added, we are ready!
//! #[serde(rename = "m.whatever")]
//! Whatever,
//! }
//!
//! /// The payload for our reaction event.
//! #[derive(Clone, Debug, Deserialize, Serialize, EventContent)]
//! #[ruma_event(type = "m.reaction", kind = Message)]
//! pub struct ReactionEventContent {
//! #[serde(rename = "m.relates_to")]
//! pub relates_to: RelatesTo,
//! }
//!
//! let json = serde_json::json!({
//! "content": {
//! "m.relates_to": {
//! "event_id": "$xxxx-xxxx",
//! "key": "👍",
//! "rel_type": "m.annotation"
//! }
//! },
//! "event_id": "$xxxx-xxxx",
//! "origin_server_ts": 1,
//! "sender": "@someone:example.org",
//! "type": "m.reaction",
//! "unsigned": {
//! "age": 85
//! }
//! });
//!
//! // The downside of this event is we cannot use it with event enums,
//! // but could be deserialized from a `Raw<_>` that has failed to deserialize.
//! matches::assert_matches!(
//! serde_json::from_value::<SyncMessageEvent<ReactionEventContent>>(json),
//! Ok(SyncMessageEvent {
//! content: ReactionEventContent {
//! relates_to: RelatesTo::Annotation { key, .. },
//! },
//! ..
//! }) if key == "👍"
//! );
//! ```
//!
//! # Serialization and deserialization
//!
//! All concrete event types in ruma-events can be serialized via the `Serialize` trait from
//! [serde](https://serde.rs/) and can be deserialized from as `Raw<EventType>`. In order to
//! handle incoming data that may not conform to `ruma-events`' strict definitions of event
//! structures, deserialization will return `Raw::Err` on error. This error covers both
//! structurally invalid JSON data as well as structurally valid JSON that doesn't fulfill
//! additional constraints the matrix specification defines for some event types. The error exposes
//! the deserialized `serde_json::Value` so that developers can still work with the received
//! event data. This makes it possible to deserialize a collection of events without the entire
//! collection failing to deserialize due to a single invalid event. The "content" type for each
//! event also implements `Serialize` and either `TryFromRaw` (enabling usage as
//! `Raw<ContentType>` for dedicated content types) or `Deserialize` (when the content is a
//! type alias), allowing content to be converted to and from JSON independently of the surrounding
//! event structure, if needed.
#![recursion_limit = "1024"]
#![warn(missing_docs)]
#![cfg_attr(docsrs, feature(doc_cfg))]
use std::fmt::Debug;
use js_int::Int;
use ruma_identifiers::{EventEncryptionAlgorithm, RoomVersionId};
use ruma_serde::Raw;
use serde::{
de::{self, IgnoredAny},
Deserialize, Serialize,
};
use serde_json::value::RawValue as RawJsonValue;
use self::room::redaction::SyncRedactionEvent;
mod enums;
mod error;
mod event_kinds;
// Hack to allow both ruma-events itself and external crates (or tests) to use procedural macros
// that expect `ruma_events` to exist in the prelude.
extern crate self as ruma_events;
/// Re-exports to allow users to declare their own event types using the
/// macros used internally.
///
/// It is not considered part of ruma-events' public API.
#[doc(hidden)]
pub mod exports {
pub use ruma_common;
pub use ruma_identifiers;
pub use ruma_serde;
pub use serde;
pub use serde_json;
}
/// Re-export of all the derives needed to create your own event types.
pub mod macros {
pub use ruma_events_macros::{Event, EventContent};
}
pub mod call;
pub mod custom;
pub mod direct;
pub mod dummy;
pub mod forwarded_room_key;
pub mod fully_read;
pub mod ignored_user_list;
pub mod key;
pub mod pdu;
pub mod policy;
pub mod presence;
pub mod push_rules;
#[cfg(feature = "unstable-pre-spec")]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable-pre-spec")))]
pub mod reaction;
pub mod receipt;
#[cfg(feature = "unstable-pre-spec")]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable-pre-spec")))]
pub mod relation;
pub mod room;
pub mod room_key;
pub mod room_key_request;
#[cfg(feature = "unstable-pre-spec")]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable-pre-spec")))]
pub mod space;
pub mod sticker;
pub mod tag;
pub mod typing;
#[cfg(feature = "unstable-pre-spec")]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable-pre-spec")))]
pub use self::relation::Relations;
pub use self::{
enums::*,
error::{FromStrError, InvalidInput},
event_kinds::*,
};
/// Extra information about an event that is not incorporated into the event's
/// hash.
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct Unsigned {
/// The time in milliseconds that has elapsed since the event was sent. This
/// field is generated by the local homeserver, and may be incorrect if the
/// local time on at least one of the two servers is out of sync, which can
/// cause the age to either be negative or greater than it actually is.
#[serde(skip_serializing_if = "Option::is_none")]
pub age: Option<Int>,
/// The client-supplied transaction ID, if the client being given the event
/// is the same one which sent it.
#[serde(skip_serializing_if = "Option::is_none")]
pub transaction_id: Option<String>,
/// Server-compiled information from other events relating to this event.
#[cfg(feature = "unstable-pre-spec")]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable-pre-spec")))]
#[serde(rename = "m.relations", skip_serializing_if = "Option::is_none")]
pub relations: Option<Relations>,
}
impl Unsigned {
/// Create a new `Unsigned` with fields set to `None`.
pub fn new() -> Self {
Self::default()
}
/// Whether this unsigned data is empty (all fields are `None`).
///
/// This method is used to determine whether to skip serializing the
/// `unsigned` field in room events. Do not use it to determine whether
/// an incoming `unsigned` field was present - it could still have been
/// present but contained none of the known fields.
pub fn is_empty(&self) -> bool {
self.age.is_none() && self.transaction_id.is_none()
}
}
/// Extra information about a redacted event that is not incorporated into the event's
/// hash.
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct RedactedUnsigned {
/// The event that redacted this event, if any.
#[serde(skip_serializing_if = "Option::is_none")]
pub redacted_because: Option<Box<SyncRedactionEvent>>,
}
impl RedactedUnsigned {
/// Create a new `RedactedUnsigned` with field set to `None`.
pub fn new() -> Self {
Self::default()
}
/// Create a new `RedactedUnsigned` with the given redacted because.
pub fn new_because(redacted_because: Box<SyncRedactionEvent>) -> Self {
Self { redacted_because: Some(redacted_because) }
}
/// Whether this unsigned data is empty (`redacted_because` is `None`).
///
/// This method is used to determine whether to skip serializing the
/// `unsigned` field in redacted room events. Do not use it to determine whether
/// an incoming `unsigned` field was present - it could still have been
/// present but contained none of the known fields.
pub fn is_empty(&self) -> bool {
self.redacted_because.is_none()
}
}
/// The base trait that all event content types implement.
///
/// Implementing this trait allows content types to be serialized as well as deserialized.
pub trait EventContent: Sized + Serialize {
/// A matrix event identifier, like `m.room.message`.
fn event_type(&self) -> &str;
/// Constructs the given event content.
fn from_parts(event_type: &str, content: &RawJsonValue) -> serde_json::Result<Self>;
}
/// Trait to define the behavior of redacting an event.
pub trait Redact {
/// The redacted form of the event.
type Redacted;
/// Transforms `self` into a redacted form (removing most fields) according to the spec.
///
/// A small number of events have room-version specific redaction behavior, so a version has to
/// be specified.
fn redact(self, redaction: SyncRedactionEvent, version: &RoomVersionId) -> Self::Redacted;
}
/// Trait to define the behavior of redact an event's content object.
pub trait RedactContent {
/// The redacted form of the event's content.
type Redacted;
/// Transform `self` into a redacted form (removing most or all fields) according to the spec.
///
/// A small number of events have room-version specific redaction behavior, so a version has to
/// be specified.
///
/// Where applicable, it is preferred to use [`Redact::redact`] on the outer event.
fn redact(self, version: &RoomVersionId) -> Self::Redacted;
}
/// Extension trait for [`Raw<_>`][ruma_serde::Raw].
pub trait RawExt<T: EventContent> {
/// Try to deserialize the JSON as an event's content.
fn deserialize_content(&self, event_type: &str) -> serde_json::Result<T>;
}
impl<T: EventContent> RawExt<T> for Raw<T> {
fn deserialize_content(&self, event_type: &str) -> serde_json::Result<T> {
T::from_parts(event_type, self.json())
}
}
/// Marker trait for the content of an ephemeral room event.
pub trait EphemeralRoomEventContent: EventContent {}
/// Marker trait for the content of a global account data event.
pub trait GlobalAccountDataEventContent: EventContent {}
/// Marker trait for the content of a room account data event.
pub trait RoomAccountDataEventContent: EventContent {}
/// Marker trait for the content of a to device event.
pub trait ToDeviceEventContent: EventContent {}
/// Marker trait for the content of a message event.
pub trait MessageEventContent: EventContent {}
/// Marker trait for the content of a state event.
pub trait StateEventContent: EventContent {}
/// The base trait that all redacted event content types implement.
///
/// This trait's associated functions and methods should not be used to build
/// redacted events, prefer the `redact` method on `AnyStateEvent` and
/// `AnyMessageEvent` and their "sync" and "stripped" counterparts. The
/// `RedactedEventContent` trait is an implementation detail, ruma makes no
/// API guarantees.
pub trait RedactedEventContent: EventContent {
/// Constructs the redacted event content.
///
/// If called for anything but "empty" redacted content this will error.
#[doc(hidden)]
fn empty(_event_type: &str) -> serde_json::Result<Self> {
Err(serde::de::Error::custom("this event is not redacted"))
}
/// Determines if the redacted event content needs to serialize fields.
#[doc(hidden)]
fn has_serialize_fields(&self) -> bool;
/// Determines if the redacted event content needs to deserialize fields.
#[doc(hidden)]
fn has_deserialize_fields() -> HasDeserializeFields;
}
/// Marker trait for the content of a redacted message event.
pub trait RedactedMessageEventContent: RedactedEventContent {}
/// Marker trait for the content of a redacted state event.
pub trait RedactedStateEventContent: RedactedEventContent {}
/// Trait for abstracting over event content structs.
///
/// … but *not* enums which don't always have an event type and kind (e.g. message vs state) that's
/// fixed / known at compile time.
pub trait StaticEventContent: EventContent {
/// The event's "kind".
///
/// See the type's documentation.
const KIND: EventKind;
/// The event type.
const TYPE: &'static str;
}
/// The "kind" of an event.
///
/// This corresponds directly to the event content marker traits.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[non_exhaustive]
pub enum EventKind {
/// Global account data event kind.
GlobalAccountData,
/// Room account data event kind.
RoomAccountData,
/// Ephemeral room event kind.
EphemeralRoomData,
/// Message event kind.
///
/// Since redacted / non-redacted message events are used in the same places bu have different
/// sets of fields, these two variations are treated as two closely-related event kinds.
Message {
/// Redacted variation?
redacted: bool,
},
/// State event kind.
///
/// Since redacted / non-redacted state events are used in the same places bu have different
/// sets of fields, these two variations are treated as two closely-related event kinds.
State {
/// Redacted variation?
redacted: bool,
},
/// To-device event kind.
ToDevice,
}
/// `HasDeserializeFields` is used in the code generated by the `Event` derive
/// to aid in deserializing redacted events.
#[doc(hidden)]
#[derive(Debug)]
pub enum HasDeserializeFields {
/// Deserialize the event's content, failing if invalid.
True,
/// Return the redacted version of this event's content.
False,
/// `Optional` is used for `RedactedAliasesEventContent` since it has
/// an empty version and one with content left after redaction that
/// must be supported together.
Optional,
}
/// Helper struct to determine if the event has been redacted.
#[doc(hidden)]
#[derive(Debug, Deserialize)]
#[allow(clippy::exhaustive_structs)]
pub struct UnsignedDeHelper {
/// This is the field that signals an event has been redacted.
pub redacted_because: Option<IgnoredAny>,
}
/// Helper struct to determine the event kind from a `serde_json::value::RawValue`.
#[doc(hidden)]
#[derive(Debug, Deserialize)]
#[allow(clippy::exhaustive_structs)]
pub struct EventDeHelper {
/// the Matrix event type string "m.room.whatever".
#[serde(rename = "type")]
pub ev_type: String,
/// If `state_key` is present the event will be deserialized as a state event.
pub state_key: Option<IgnoredAny>,
/// If no `state_key` is found but an `event_id` is present the event
/// will be deserialized as a message event.
pub event_id: Option<IgnoredAny>,
/// If no `event_id` or `state_key` are found but a `room_id` is present
/// the event will be deserialized as an ephemeral event.
pub room_id: Option<IgnoredAny>,
/// If this `UnsignedData` contains a `redacted_because` key the event is
/// immediately deserialized as a redacted event.
pub unsigned: Option<UnsignedDeHelper>,
}
/// Helper function for `serde_json::value::RawValue` deserialization.
#[doc(hidden)]
pub fn from_raw_json_value<T, E>(val: &RawJsonValue) -> Result<T, E>
where
T: de::DeserializeOwned,
E: de::Error,
{
serde_json::from_str(val.get()).map_err(E::custom)
}
|
/*
* The MIT License (MIT)
*
* Copyright (c) 2015 Andres Vahter (andres.vahter@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
use ffipredict;
use std::ffi::{CString};
use libc::{c_char, c_double};
use std::default::Default;
use std::slice::bytes::copy_memory;
use std::mem::transmute;
pub struct Tle {
pub name: String,
pub line1: String,
pub line2: String,
}
pub struct Location {
pub lat_deg: f64,
pub lon_deg: f64,
pub alt_m: i32,
}
#[derive(Default)]
pub struct Sat {
/// next AOS
pub aos: f64,
/// next LOS
pub los: f64,
/// azimuth [deg]
pub az_deg: f64,
/// elevation [deg]
pub el_deg: f64,
/// range [km]
pub range_km: f64,
/// range rate [km/sec]
pub range_rate_km_sec: f64,
/// SSP latitude [deg]
pub lat_deg: f64,
/// SSP longitude [deg]
pub lon_deg: f64,
/// altitude [km]
pub alt_km: f64,
/// velocity [km/s]
pub vel_km_s: f64,
/// orbit number
pub orbit_nr: u64,
}
pub struct Predict {
pub sat: Sat,
p_sat: ffipredict::sat_t,
p_qth: ffipredict::qth_t,
}
fn create_tle_t(tle: Tle) -> Result<ffipredict::tle_t, &'static str> {
let mut tle_t = ffipredict::tle_t {
epoch: 0.0,
epoch_year: 0,
epoch_day: 0,
epoch_fod: 0.0,
xndt2o: 0.0,
xndd6o: 0.0,
bstar: 0.0,
xincl: 0.0,
xnodeo: 0.0,
eo: 0.0,
omegao: 0.0,
xmo: 0.0,
xno: 0.0,
catnr: 0,
elset: 0,
revnum: 0,
sat_name: [0; 25],
idesg: [0; 9],
status: ffipredict::op_stat_t::OP_STAT_UNKNOWN,
xincl1: 0.0,
xnodeo1: 0.0,
omegao1: 0.0,
//..Default::default()
};
let name = CString::new(tle.name).unwrap();
let line1 = CString::new(tle.line1).unwrap();
let line2 = CString::new(tle.line2).unwrap();
let mut buf = [[0u8; 80]; 3];
copy_memory(&mut buf[0], name.as_bytes_with_nul());
copy_memory(&mut buf[1], line1.as_bytes_with_nul());
copy_memory(&mut buf[2], line2.as_bytes_with_nul());
let tle_set_result = unsafe { ffipredict::Get_Next_Tle_Set(transmute::<&u8, *const c_char>(&buf[0][0]), &mut tle_t)};
if tle_set_result == 1 {
Ok(tle_t)
}
else {
Err("error in TLE parsing")
}
}
impl Predict {
pub fn new(tle: Tle, location: Location) -> Predict {
let tle_t = create_tle_t(tle).unwrap();
let sgps: ffipredict::sgpsdp_static_t = Default::default();
let dps: ffipredict::deep_static_t = Default::default();
let deep_arg: ffipredict::deep_arg_t = Default::default();
let pos: ffipredict::vector_t = Default::default();
let vel: ffipredict::vector_t = Default::default();
let mut sat_t = ffipredict::sat_t{
name: b"placeholder\0".as_ptr() as *const i8,
nickname: b"placeholder\0".as_ptr() as *const i8,
website: b"placeholder\0".as_ptr() as *const i8,
tle: tle_t,
flags: 0,
sgps: sgps,
dps: dps,
deep_arg: deep_arg,
pos: pos,
vel: vel,
jul_epoch: 0.0,
jul_utc: 0.0,
tsince: 0.0,
aos: 0.0,
los: 0.0,
az: 0.0,
el: 0.0,
range: 0.0,
range_rate: 0.0,
ra: 0.0,
dec: 0.0,
ssplat: 0.0,
ssplon: 0.0,
alt: 0.0,
velo: 0.0,
ma: 0.0,
footprint: 0.0,
phase: 0.0,
meanmo: 0.0,
orbit: 0,
otype: ffipredict::orbit_type_t::ORBIT_TYPE_UNKNOWN,
};
let sat: Sat = Default::default();
let mut qth = ffipredict::qth_t {
name: b"placeholder\0".as_ptr() as *const i8,
loc: b"placeholder\0".as_ptr() as *const i8,
desc: b"placeholder\0".as_ptr() as *const i8,
lat: location.lat_deg,
lon: location.lon_deg,
alt: location.alt_m,
qra: b"placeholder\0".as_ptr() as *const i8,
wx: b"placeholder\0".as_ptr() as *const i8,
};
unsafe {ffipredict::select_ephemeris(&mut sat_t)};
unsafe {ffipredict::gtk_sat_data_init_sat(&mut sat_t, &mut qth)};
Predict{sat: sat, p_sat: sat_t, p_qth: qth}
}
pub fn update(&mut self, timeoption: Option<c_double>) {
let juliantime = match timeoption {
Some(t) => t,
None => unsafe {ffipredict::get_current_daynum()}
};
unsafe {ffipredict::predict_calc(&mut self.p_sat, &mut self.p_qth, juliantime)};
self.sat.aos = self.p_sat.aos;
self.sat.los = self.p_sat.los;
self.sat.az_deg = self.p_sat.az;
self.sat.el_deg = self.p_sat.el;
self.sat.range_km = self.p_sat.range;
self.sat.range_rate_km_sec = self.p_sat.range_rate;
self.sat.lat_deg = self.p_sat.ssplat;
self.sat.lon_deg = self.p_sat.ssplon;
self.sat.alt_km = self.p_sat.alt;
self.sat.vel_km_s = self.p_sat.velo;
self.sat.orbit_nr = self.p_sat.orbit;
}
}
replace unstable std::slice::bytes::copy_memory with own copy_memory
/*
* The MIT License (MIT)
*
* Copyright (c) 2015 Andres Vahter (andres.vahter@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
use ffipredict;
use std::ffi::{CString};
use libc::{c_char, c_double};
use std::default::Default;
use std::{cmp, ptr};
use std::mem::transmute;
pub struct Tle {
pub name: String,
pub line1: String,
pub line2: String,
}
pub struct Location {
pub lat_deg: f64,
pub lon_deg: f64,
pub alt_m: i32,
}
#[derive(Default)]
pub struct Sat {
/// next AOS
pub aos: f64,
/// next LOS
pub los: f64,
/// azimuth [deg]
pub az_deg: f64,
/// elevation [deg]
pub el_deg: f64,
/// range [km]
pub range_km: f64,
/// range rate [km/sec]
pub range_rate_km_sec: f64,
/// SSP latitude [deg]
pub lat_deg: f64,
/// SSP longitude [deg]
pub lon_deg: f64,
/// altitude [km]
pub alt_km: f64,
/// velocity [km/s]
pub vel_km_s: f64,
/// orbit number
pub orbit_nr: u64,
}
pub struct Predict {
pub sat: Sat,
p_sat: ffipredict::sat_t,
p_qth: ffipredict::qth_t,
}
fn copy_memory(src: &[u8], dst: &mut [u8]) -> usize {
let len = cmp::min(src.len(), dst.len());
unsafe {
ptr::copy_nonoverlapping(&src[0], &mut dst[0], len);
}
len
}
fn create_tle_t(tle: Tle) -> Result<ffipredict::tle_t, &'static str> {
let mut tle_t = ffipredict::tle_t {
epoch: 0.0,
epoch_year: 0,
epoch_day: 0,
epoch_fod: 0.0,
xndt2o: 0.0,
xndd6o: 0.0,
bstar: 0.0,
xincl: 0.0,
xnodeo: 0.0,
eo: 0.0,
omegao: 0.0,
xmo: 0.0,
xno: 0.0,
catnr: 0,
elset: 0,
revnum: 0,
sat_name: [0; 25],
idesg: [0; 9],
status: ffipredict::op_stat_t::OP_STAT_UNKNOWN,
xincl1: 0.0,
xnodeo1: 0.0,
omegao1: 0.0,
//..Default::default()
};
let name = CString::new(tle.name).unwrap();
let line1 = CString::new(tle.line1).unwrap();
let line2 = CString::new(tle.line2).unwrap();
let mut buf = [[0u8; 80]; 3];
copy_memory(name.as_bytes_with_nul(), &mut buf[0]);
copy_memory(line1.as_bytes_with_nul(), &mut buf[1]);
copy_memory(line2.as_bytes_with_nul(), &mut buf[2]);
let tle_set_result = unsafe { ffipredict::Get_Next_Tle_Set(transmute::<&u8, *const c_char>(&buf[0][0]), &mut tle_t)};
if tle_set_result == 1 {
Ok(tle_t)
}
else {
Err("error in TLE parsing")
}
}
impl Predict {
pub fn new(tle: Tle, location: Location) -> Predict {
let tle_t = create_tle_t(tle).unwrap();
let sgps: ffipredict::sgpsdp_static_t = Default::default();
let dps: ffipredict::deep_static_t = Default::default();
let deep_arg: ffipredict::deep_arg_t = Default::default();
let pos: ffipredict::vector_t = Default::default();
let vel: ffipredict::vector_t = Default::default();
let mut sat_t = ffipredict::sat_t{
name: b"placeholder\0".as_ptr() as *const i8,
nickname: b"placeholder\0".as_ptr() as *const i8,
website: b"placeholder\0".as_ptr() as *const i8,
tle: tle_t,
flags: 0,
sgps: sgps,
dps: dps,
deep_arg: deep_arg,
pos: pos,
vel: vel,
jul_epoch: 0.0,
jul_utc: 0.0,
tsince: 0.0,
aos: 0.0,
los: 0.0,
az: 0.0,
el: 0.0,
range: 0.0,
range_rate: 0.0,
ra: 0.0,
dec: 0.0,
ssplat: 0.0,
ssplon: 0.0,
alt: 0.0,
velo: 0.0,
ma: 0.0,
footprint: 0.0,
phase: 0.0,
meanmo: 0.0,
orbit: 0,
otype: ffipredict::orbit_type_t::ORBIT_TYPE_UNKNOWN,
};
let sat: Sat = Default::default();
let mut qth = ffipredict::qth_t {
name: b"placeholder\0".as_ptr() as *const i8,
loc: b"placeholder\0".as_ptr() as *const i8,
desc: b"placeholder\0".as_ptr() as *const i8,
lat: location.lat_deg,
lon: location.lon_deg,
alt: location.alt_m,
qra: b"placeholder\0".as_ptr() as *const i8,
wx: b"placeholder\0".as_ptr() as *const i8,
};
unsafe {ffipredict::select_ephemeris(&mut sat_t)};
unsafe {ffipredict::gtk_sat_data_init_sat(&mut sat_t, &mut qth)};
Predict{sat: sat, p_sat: sat_t, p_qth: qth}
}
pub fn update(&mut self, timeoption: Option<c_double>) {
let juliantime = match timeoption {
Some(t) => t,
None => unsafe {ffipredict::get_current_daynum()}
};
unsafe {ffipredict::predict_calc(&mut self.p_sat, &mut self.p_qth, juliantime)};
self.sat.aos = self.p_sat.aos;
self.sat.los = self.p_sat.los;
self.sat.az_deg = self.p_sat.az;
self.sat.el_deg = self.p_sat.el;
self.sat.range_km = self.p_sat.range;
self.sat.range_rate_km_sec = self.p_sat.range_rate;
self.sat.lat_deg = self.p_sat.ssplat;
self.sat.lon_deg = self.p_sat.ssplon;
self.sat.alt_km = self.p_sat.alt;
self.sat.vel_km_s = self.p_sat.velo;
self.sat.orbit_nr = self.p_sat.orbit;
}
}
|
#![allow(dead_code, unused_macros)]
use utils::constants::WALLET_CREDENTIALS;
pub mod callback;
#[macro_use]
#[path = "../../src/utils/memzeroize.rs"]
pub mod zeroize;
#[path = "../../src/utils/environment.rs"]
pub mod environment;
pub mod pool;
pub mod crypto;
pub mod did;
pub mod wallet;
pub mod ledger;
pub mod anoncreds;
pub mod types;
pub mod pairwise;
pub mod constants;
pub mod blob_storage;
pub mod non_secrets;
pub mod results;
pub mod payments;
pub mod rand_utils;
pub mod logger;
#[macro_use]
#[allow(unused_macros)]
#[path = "../../src/utils/test.rs"]
pub mod test;
pub mod timeout;
#[path = "../../src/utils/sequence.rs"]
pub mod sequence;
#[macro_use]
#[allow(unused_macros)]
#[path = "../../src/utils/ctypes.rs"]
pub mod ctypes;
#[path = "../../src/utils/inmem_wallet.rs"]
pub mod inmem_wallet;
#[path = "../../src/domain/mod.rs"]
pub mod domain;
pub fn setup(name: &str) {
test::cleanup_storage(name);
logger::set_default_logger();
}
pub fn tear_down(name: &str) {
test::cleanup_storage(name);
}
pub fn setup_with_wallet(name: &str) -> (i32, String) {
setup(name);
wallet::create_and_open_default_wallet().unwrap()
}
pub fn setup_with_plugged_wallet(name: &str) -> (i32, String) {
setup(name);
wallet::create_and_open_plugged_wallet().unwrap()
}
pub fn tear_down_with_wallet(wallet_handle: i32, name: &str, wallet_config: &str) {
wallet::close_wallet(wallet_handle).unwrap();
wallet::delete_wallet(wallet_config, WALLET_CREDENTIALS).unwrap();
tear_down(name);
}
pub fn setup_with_pool(name: &str) -> i32 {
setup(name);
pool::create_and_open_pool_ledger(constants::POOL).unwrap()
}
pub fn tear_down_with_pool(pool_handle: i32, name: &str) {
pool::close(pool_handle).unwrap();
tear_down(name);
}
pub fn setup_with_wallet_and_pool(name: &str) -> (i32, i32, String) {
let (wallet_handle, config) = setup_with_wallet(name);
let pool_handle = pool::create_and_open_pool_ledger(constants::POOL).unwrap();
(wallet_handle, pool_handle, config)
}
pub fn tear_down_with_wallet_and_pool(wallet_handle: i32, pool_handle: i32, name: &str, wallet_config: &str) {
pool::close(pool_handle).unwrap();
tear_down_with_wallet(wallet_handle, name, wallet_config);
}
pub fn setup_trustee(name: &str) -> (i32, i32, String, String) {
let (wallet_handle, pool_handle, config) = setup_with_wallet_and_pool(name);
let (did, _) = did::create_and_store_my_did(wallet_handle, Some(constants::TRUSTEE_SEED)).unwrap();
(wallet_handle, pool_handle, did, config)
}
pub fn setup_steward(name: &str) -> (i32, i32, String, String) {
let (wallet_handle, pool_handle, config) = setup_with_wallet_and_pool(name);
let (did, _) = did::create_and_store_my_did(wallet_handle, Some(constants::STEWARD_SEED)).unwrap();
(wallet_handle, pool_handle, did, config)
}
pub fn setup_did(name: &str) -> (i32, String, String) {
let (wallet_handle, config) = setup_with_wallet(name);
let (did, _) = did::create_and_store_my_did(wallet_handle, None).unwrap();
(wallet_handle, did, config)
}
pub fn setup_new_identity(name: &str) -> (i32, i32, String, String, String) {
let (wallet_handle, pool_handle, trustee_did, config) = setup_trustee(name);
let (my_did, my_vk) = did::create_and_store_my_did(wallet_handle, None).unwrap();
let nym = ledger::build_nym_request(&trustee_did, &my_did, Some(&my_vk), None, Some("TRUSTEE")).unwrap();
let response = ledger::sign_and_submit_request(pool_handle, wallet_handle, &trustee_did, &nym).unwrap();
pool::check_response_type(&response, types::ResponseType::REPLY);
(wallet_handle, pool_handle, my_did, my_vk, config)
}
use test name as pool name
Signed-off-by: Axel Nennker <f4eafe9675d8076d9cbda6a271b098f042c841ad@telekom.de>
#![allow(dead_code, unused_macros)]
use utils::constants::WALLET_CREDENTIALS;
pub mod callback;
#[macro_use]
#[path = "../../src/utils/memzeroize.rs"]
pub mod zeroize;
#[path = "../../src/utils/environment.rs"]
pub mod environment;
pub mod pool;
pub mod crypto;
pub mod did;
pub mod wallet;
pub mod ledger;
pub mod anoncreds;
pub mod types;
pub mod pairwise;
pub mod constants;
pub mod blob_storage;
pub mod non_secrets;
pub mod results;
pub mod payments;
pub mod rand_utils;
pub mod logger;
#[macro_use]
#[allow(unused_macros)]
#[path = "../../src/utils/test.rs"]
pub mod test;
pub mod timeout;
#[path = "../../src/utils/sequence.rs"]
pub mod sequence;
#[macro_use]
#[allow(unused_macros)]
#[path = "../../src/utils/ctypes.rs"]
pub mod ctypes;
#[path = "../../src/utils/inmem_wallet.rs"]
pub mod inmem_wallet;
#[path = "../../src/domain/mod.rs"]
pub mod domain;
pub fn setup(name: &str) {
test::cleanup_storage(name);
logger::set_default_logger();
}
pub fn tear_down(name: &str) {
test::cleanup_storage(name);
}
pub fn setup_with_wallet(name: &str) -> (i32, String) {
setup(name);
wallet::create_and_open_default_wallet().unwrap()
}
pub fn setup_with_plugged_wallet(name: &str) -> (i32, String) {
setup(name);
wallet::create_and_open_plugged_wallet().unwrap()
}
pub fn tear_down_with_wallet(wallet_handle: i32, name: &str, wallet_config: &str) {
wallet::close_wallet(wallet_handle).unwrap();
wallet::delete_wallet(wallet_config, WALLET_CREDENTIALS).unwrap();
tear_down(name);
}
pub fn setup_with_pool(name: &str) -> i32 {
setup(name);
pool::create_and_open_pool_ledger(constants::POOL).unwrap()
}
pub fn tear_down_with_pool(pool_handle: i32, name: &str) {
pool::close(pool_handle).unwrap();
tear_down(name);
}
pub fn setup_with_wallet_and_pool(name: &str) -> (i32, i32, String) {
let (wallet_handle, config) = setup_with_wallet(name);
let pool_handle = pool::create_and_open_pool_ledger(name).unwrap();
(wallet_handle, pool_handle, config)
}
pub fn tear_down_with_wallet_and_pool(wallet_handle: i32, pool_handle: i32, name: &str, wallet_config: &str) {
pool::close(pool_handle).unwrap();
tear_down_with_wallet(wallet_handle, name, wallet_config);
}
pub fn setup_trustee(name: &str) -> (i32, i32, String, String) {
let (wallet_handle, pool_handle, config) = setup_with_wallet_and_pool(name);
let (did, _) = did::create_and_store_my_did(wallet_handle, Some(constants::TRUSTEE_SEED)).unwrap();
(wallet_handle, pool_handle, did, config)
}
pub fn setup_steward(name: &str) -> (i32, i32, String, String) {
let (wallet_handle, pool_handle, config) = setup_with_wallet_and_pool(name);
let (did, _) = did::create_and_store_my_did(wallet_handle, Some(constants::STEWARD_SEED)).unwrap();
(wallet_handle, pool_handle, did, config)
}
pub fn setup_did(name: &str) -> (i32, String, String) {
let (wallet_handle, config) = setup_with_wallet(name);
let (did, _) = did::create_and_store_my_did(wallet_handle, None).unwrap();
(wallet_handle, did, config)
}
pub fn setup_new_identity(name: &str) -> (i32, i32, String, String, String) {
let (wallet_handle, pool_handle, trustee_did, config) = setup_trustee(name);
let (my_did, my_vk) = did::create_and_store_my_did(wallet_handle, None).unwrap();
let nym = ledger::build_nym_request(&trustee_did, &my_did, Some(&my_vk), None, Some("TRUSTEE")).unwrap();
let response = ledger::sign_and_submit_request(pool_handle, wallet_handle, &trustee_did, &nym).unwrap();
pool::check_response_type(&response, types::ResponseType::REPLY);
(wallet_handle, pool_handle, my_did, my_vk, config)
} |
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::collections::{HashMap, HashSet};
use std::time::Duration;
use failure::Error;
use ggez::conf::NumSamples;
use ggez::graphics;
use ggez::timer;
use ggez::{Context, GameResult};
use ggez_goodies::scene;
use specs::world::{Builder, Index};
use specs::{Component, Dispatcher, DispatcherBuilder, Entity, EntityBuilder, Join, World};
use warmy::{LogicalKey, Store};
use super::transition::FadeStyle;
use super::Fade;
use crate::animation::{Image, ImageType, Sprite, SpriteData};
use crate::combat::components::{
AiState, AnimationState, Body, Collided, Controller, DaggersInventory, Draw, Facing, Health,
Intent, MustLive, Palette, Position, State, UnitType, Velocity, WalkingState, Weapon,
};
use crate::combat::damage::DamageTables;
use crate::combat::systems::boundary::TopBoundary;
use crate::combat::systems::health::CombatDone;
use crate::combat::systems::{
ActionSystem, Animation, BlackKnightAi, CheckCollisions, CheckEndOfCombat, Commander,
ConfirmVelocity, EntityDeath, EntityEntityCollision, Movement, OutOfBounds, ResolveCollisions,
RestrictMovementToBoundary, StateUpdater, UpdateBoundingBoxes, UpdateImage, VelocitySystem,
};
use crate::files::collide::CollisionBoxes;
use crate::files::terrain::scenery_rects;
use crate::files::TerrainFile;
use crate::game::{Game, SceneState};
use crate::input;
use crate::manager::GameYaml;
use crate::objects::TextureAtlas;
use crate::palette::PaletteSwaps;
use crate::piv::{palette_swap, Colour, PivImage};
use crate::rect::Rect;
use crate::scenes::FSceneSwitch;
#[derive(Debug, Default, Clone)]
pub struct EncounterTextures {
pub data: HashMap<String, TextureAtlas>,
}
const TICKS_TO_WAIT: u32 = 35;
pub struct EncounterScene<'a> {
pub specs_world: World,
pub dispatcher: Dispatcher<'a, 'a>,
pub background: graphics::Canvas,
pub palette: Vec<Colour>,
player_1: Index,
player_2: Index,
// the number of ticks since the encounter is done
ticks_after: u32,
// we do the fade out first, pop back to this scene then pop the encounter.
fade_out_done: bool,
}
impl<'a> EncounterScene<'a> {
fn build_world() -> World {
let mut world = World::new();
world.register::<AiState>();
world.register::<AnimationState>();
world.register::<Body>();
world.register::<Collided>();
world.register::<Controller>();
world.register::<DaggersInventory>();
world.register::<Draw>();
world.register::<Health>();
world.register::<Intent>();
world.register::<MustLive>();
world.register::<Palette>();
world.register::<Position>();
world.register::<State>();
world.register::<UnitType>();
world.register::<Velocity>();
world.register::<WalkingState>();
world.register::<Weapon>();
world
}
fn build_dispatcher() -> Dispatcher<'a, 'a> {
DispatcherBuilder::new()
.with(Commander, "commander", &[])
.with(BlackKnightAi, "black_knight_ai", &[])
.with(ActionSystem, "action", &["commander", "black_knight_ai"])
.with(EntityDeath, "entity_death", &["action"])
.with(CheckEndOfCombat, "check_end_of_combat", &["entity_death"])
.with(VelocitySystem, "velocity", &["commander"])
.with(EntityEntityCollision, "entity_collision", &["velocity"])
.with(
RestrictMovementToBoundary,
"restrict_movement_to_boundary",
&["velocity"],
)
.with(
ConfirmVelocity,
"confirm_velocity",
&["restrict_movement_to_boundary", "entity_collision"],
)
.with(Movement, "movement", &["confirm_velocity"])
.with(Animation, "animation", &["movement"])
//.with(StateUpdater, "state_updater", &["animation"])
.with(UpdateImage, "update_image", &["animation"])
.with(
UpdateBoundingBoxes,
"update_bounding_boxes",
&["update_image"],
)
.with(
CheckCollisions,
"check_collisions",
&["update_bounding_boxes"],
)
.with(
ResolveCollisions,
"resolve_collisions",
&["check_collisions"],
)
.with(StateUpdater, "state_updater", &["resolve_collisions"])
.with(OutOfBounds, "out_of_bounds", &[])
// .with_thread_local(Renderer {
// store: Store::new(StoreOpt::default()).expect("store creation"),
// })
.build()
}
fn load_resources(
ctx: &mut Context,
store: &mut Store<Context>,
world: &mut World,
) -> Result<(), Error> {
let damage_tables = store
.get::<_, DamageTables>(&LogicalKey::new("/damage.yaml"), ctx)
.expect("error loading damage.yaml");
world.add_resource(damage_tables.borrow().clone());
world.add_resource(CombatDone(false));
Ok(())
}
fn load_sprite_data(
ctx: &mut Context,
store: &mut Store<Context>,
world: &mut World,
entity_names: &[&str],
) -> Result<(), Error> {
let entities_yaml =
store.get::<_, GameYaml>(&warmy::LogicalKey::new("/entities.yaml"), ctx)?;
let mut sprites: HashMap<String, Sprite> = HashMap::new();
let mut atlas_names: HashSet<String> = HashSet::new();
for name in entity_names {
let yaml_borrow = &entities_yaml.borrow();
let yaml_file = yaml_borrow.yaml[name].as_str().unwrap();
let entity_yaml = store.get::<_, Sprite>(&warmy::LogicalKey::new(yaml_file), ctx)?;
sprites.insert(name.to_string(), (*entity_yaml.borrow()).clone());
for i in entity_yaml
.borrow()
.animations
.values()
.map(|a| &a.frames)
.flatten()
.map(|f| &f.images)
.flatten()
.map(|i| &i.sheet)
{
atlas_names.insert(i.clone());
}
}
world.add_resource(SpriteData { sprites });
let mut image_sizes: HashMap<String, Vec<Rect>> = HashMap::new();
let mut texture_atlases: HashMap<String, TextureAtlas> = HashMap::new();
for atlas_name in atlas_names {
let atlas = store
.get::<_, TextureAtlas>(&LogicalKey::new(atlas_name.clone()), ctx)
.unwrap();
image_sizes.insert(atlas_name.clone(), atlas.borrow().rects.clone());
texture_atlases.insert(atlas_name.clone(), atlas.borrow().clone());
}
world.add_resource(EncounterTextures {
data: texture_atlases,
});
Ok(())
}
// fn create_player_entity(entity_builder: EntityBuilder) -> Entity {
// entity_builder
// .with(Controller {
// ..Default::default()
// })
// .build()
// }
fn build_entity(
ctx: &mut Context,
store: &mut Store<Context>,
world: &'a mut World,
resource: &str,
raw_palette: &Vec<u16>,
palette_name: &str,
x: i32,
y: i32,
direction: Facing,
) -> EntityBuilder<'a> {
let sprite_res = store
.get::<_, Sprite>(&LogicalKey::new(format!("/{}.yaml", resource)), ctx)
.unwrap();
let sprite = sprite_res.borrow();
let swaps_res = store
.get::<_, PaletteSwaps>(&LogicalKey::new("/palettes.yaml"), ctx)
.expect("error loading palette.yaml");
let swaps = swaps_res.borrow();
world
.create_entity()
.with(UnitType {
name: resource.to_string(),
})
.with(Palette {
name: palette_name.to_string(),
palette: palette_swap(
&raw_palette,
&swaps.0.get(&palette_name.to_string()).expect("no palette"),
),
})
.with(MustLive {})
.with(Position { x: x, y: y })
.with(Health {
..Default::default()
})
.with(Draw {
frame: sprite.animations["entrance"].frames[0].clone(),
animation: "entrance".to_string(),
resource_name: resource.to_string(),
direction: direction,
})
.with(Intent {
..Default::default()
})
.with(WalkingState {
..Default::default()
})
.with(Velocity {
..Default::default()
})
.with(AnimationState {
..Default::default()
})
.with(State {
direction: direction,
..Default::default()
})
.with(Body {
..Default::default()
})
.with(Weapon {
..Default::default()
})
.with(DaggersInventory {
..Default::default()
})
}
pub fn new(
ctx: &mut Context,
game: &mut Game,
entity_names: &[&str],
background_name: &str,
terrain_name: &str,
) -> Result<Self, Error> {
let mut world = EncounterScene::build_world();
EncounterScene::load_sprite_data(ctx, &mut game.store, &mut world, entity_names)?;
EncounterScene::load_resources(ctx, &mut game.store, &mut world)?;
let piv = game
.store
.get::<_, PivImage>(&LogicalKey::new(background_name), ctx)
.unwrap();
let background_image =
graphics::Image::from_rgba8(ctx, 320, 200, &*piv.borrow().to_rgba8()).unwrap();
let background = graphics::Canvas::new(ctx, 320, 200, NumSamples::One)?;
graphics::set_canvas(ctx, Some(&background));
let screen_origin = graphics::Point2::new(0.0, 0.0);
graphics::draw_ex(
ctx,
&background_image,
graphics::DrawParam {
dest: screen_origin,
scale: graphics::Point2::new(3.0, 3.0),
..Default::default()
},
)?;
let y_max = EncounterScene::draw_terrain(ctx, game, &mut world, terrain_name, &background)?;
let collide_hit = game
.store
.get::<_, CollisionBoxes>(&LogicalKey::new("collide"), ctx)
.unwrap();
world.add_resource(collide_hit.borrow().clone());
let y = EncounterScene::next_starting_position(game, y_max as i32);
let player_1 = EncounterScene::build_entity(
ctx,
&mut game.store,
&mut world,
"knight",
&piv.borrow().raw_palette,
"blue_knight",
250,
y,
Facing::Left,
)
.with(Controller {
..Default::default()
})
.build();
let y = EncounterScene::next_starting_position(game, y_max as i32);
let player_2 = EncounterScene::build_entity(
ctx,
&mut game.store,
&mut world,
"knight",
&piv.borrow().raw_palette,
"green_knight",
30,
y,
Facing::default(),
)
.with(AiState {
class: "black_knight".to_string(),
target: Some(player_1),
y_range: 4,
close_range: 80,
long_range: 100,
})
.build();
let palette: Vec<Colour> = piv.borrow().palette.to_vec();
Ok(Self {
palette,
specs_world: world,
dispatcher: EncounterScene::build_dispatcher(),
background,
player_1: player_1.id(),
player_2: player_2.id(),
ticks_after: 0,
fade_out_done: false,
})
}
fn draw_terrain(
ctx: &mut Context,
game: &mut Game,
world: &mut World,
terrain_name: &str,
background_image: &graphics::Canvas,
) -> Result<u32, Error> {
let terrain = game
.store
.get::<_, TerrainFile>(&LogicalKey::new(terrain_name.to_string()), ctx)
.unwrap();
for p in &terrain.borrow().positions {
let cmp = game
.store
.get::<_, PivImage>(&LogicalKey::new(&p.atlas), ctx)?;
let ggez_image = match game.images.entry(p.atlas.clone()) {
Occupied(i) => i.into_mut(),
Vacant(i) => i.insert(graphics::Image::from_rgba8(
ctx,
512u16,
512u16,
&cmp.borrow().to_rgba8_512(),
)?),
};
let rect = scenery_rects[p.image_number];
// println!("{:#?}",rect);
let draw_params = graphics::DrawParam {
src: graphics::Rect {
x: rect.x as f32 / 512.0,
y: rect.y as f32 / 512.0,
w: rect.w as f32 / 512.0,
h: rect.h as f32 / 512.0,
},
dest: graphics::Point2::new(p.x as f32 * 3.0, p.y as f32 * 3.0),
scale: graphics::Point2::new(3.0, 3.0),
..Default::default()
};
graphics::draw_ex(ctx, ggez_image, draw_params)?;
}
graphics::set_canvas(ctx, None);
let y_max: u32 = terrain
.borrow()
.headers
.iter()
.map(|h| h.y)
.max()
.expect("error getting ymax");
world.add_resource(TopBoundary {
y: y_max as i32 - 30,
});
Ok(y_max)
}
fn update_controllers(&mut self, input: &input::InputState) {
let entities = self.specs_world.entities();
let mut controllers = self.specs_world.write_storage::<Controller>();
for (e, controller) in (&*entities, &mut controllers).join() {
if e.id() == self.player_1 {
controller.x = input.get_axis_raw(input::Axis::Horz1) as i32;
controller.y = input.get_axis_raw(input::Axis::Vert1) as i32;
controller.fire = input.get_button_down(input::Button::Fire1);
} else if e.id() == self.player_2 {
controller.x = input.get_axis_raw(input::Axis::Horz2) as i32;
controller.y = input.get_axis_raw(input::Axis::Vert2) as i32;
controller.fire = input.get_button_down(input::Button::Fire2);
}
}
}
fn next_starting_position(game: &mut Game, t: i32) -> i32 {
game.encounter_starting_position = game.encounter_starting_position % 3 + 1;
let s = 200 - t; // screen height - smallest t
match game.encounter_starting_position {
3 => s / 2 + t - 47,
2 => s / 4 + t - 47,
1 => s / 2 + s / 4 + t - 47,
_ => panic!("set starting position failed"),
}
}
}
impl<'a> scene::Scene<Game, input::InputEvent> for EncounterScene<'a> {
fn update(&mut self, game: &mut Game, ctx: &mut Context) -> FSceneSwitch {
self.specs_world.maintain();
self.update_controllers(&game.input);
self.dispatcher.dispatch_par(&self.specs_world.res);
if self.specs_world.read_resource::<CombatDone>().0 {
self.ticks_after += 1;
if self.ticks_after > TICKS_TO_WAIT {
match self.fade_out_done {
false => {
game.next_scene = SceneState::Menu;
game.practice_encounter = game.practice_encounter % 8 + 1;
return scene::SceneSwitch::push(Fade::new(274, 1, FadeStyle::Out));
}
true => return scene::SceneSwitch::Pop, //shouldn't happen
}
}
}
scene::SceneSwitch::None
}
fn draw(&mut self, game: &mut Game, ctx: &mut Context) -> GameResult<()> {
//fn draw(&mut self, ctx: &mut Context) -> GameResult<()> {
//self.dispatcher.dispatch_thread_local(&self.game.world.res);
// graphics::set_background_color(ctx, graphics::Color::from((0, 0, 0, 255)));
// graphics::clear(ctx);
let screen_origin = graphics::Point2::new(0.0, 0.0);
// draw background
let lair = &self.background;
graphics::draw_ex(
ctx,
lair,
graphics::DrawParam {
dest: screen_origin,
// TODO: this shouldn't be need investigate why it is.
scale: graphics::Point2::new(3.0, 3.0),
..Default::default()
},
)?;
let position_storage = self.specs_world.read_storage::<Position>();
let draw_storage = self.specs_world.read_storage::<Draw>();
let entities = self.specs_world.entities();
let palette_storage = self.specs_world.read_storage::<Palette>();
let mut storage = (&position_storage, &draw_storage, &entities)
.join()
.collect::<Vec<_>>();
storage.sort_by(|&a, &b| a.0.y.cmp(&b.0.y));
for (position, draw, entity) in storage {
let images: Vec<&Image> = match game.gore_on {
true => draw.frame.images.iter().collect(),
false => draw.frame.images.iter().filter(|i| !i.is_blood()).collect(),
};
for image in images {
let atlas = game
.store
.get::<_, TextureAtlas>(&LogicalKey::new(image.sheet.as_str()), ctx)
.unwrap();
let atlas_dimension = atlas.borrow().image.width as u32;
// TODO: change with palettes
let palette: Option<&Palette> = palette_storage.get(entity);
let ggez_image = match palette {
None => {
let ggez_image = match game.images.entry(image.sheet.clone()) {
Occupied(i) => i.into_mut(),
Vacant(i) => i.insert(
graphics::Image::from_rgba8(
ctx,
atlas_dimension as u16,
atlas_dimension as u16,
&atlas.borrow().image.to_rgba8(&self.palette),
)
.unwrap(),
),
};
ggez_image
}
Some(palette) => {
let image_name = [image.sheet.clone(), palette.name.clone()].join("-");
let ggez_image = match game.images.entry(image_name) {
Occupied(i) => i.into_mut(),
Vacant(i) => i.insert(
graphics::Image::from_rgba8(
ctx,
atlas_dimension as u16,
atlas_dimension as u16,
&atlas.borrow().image.to_rgba8(&palette.palette),
)
.unwrap(),
),
};
ggez_image
}
};
// Debug collision rects
let rect = atlas.borrow().rects[image.image];
let texture_size = atlas.borrow().image.width as f32;
let draw_params = graphics::DrawParam {
src: graphics::Rect {
x: rect.x as f32 / texture_size,
y: rect.y as f32 / texture_size,
w: rect.w as f32 / texture_size,
h: rect.h as f32 / texture_size,
},
dest: graphics::Point2::new(
(position.x as i32 + (draw.direction as i32 * image.x)) as f32 * 3.0,
(position.y as i32 + image.y) as f32 * 3.0,
),
scale: graphics::Point2::new((draw.direction as i32 * 3) as f32, 3.0),
..Default::default()
};
graphics::draw_ex(ctx, ggez_image, draw_params)?;
match image.image_type {
ImageType::BloodStain => {
graphics::set_canvas(ctx, Some(&self.background));
graphics::draw_ex(ctx, ggez_image, draw_params)?;
graphics::set_canvas(ctx, None);
}
_ => (),
}
}
}
let body_storage = self.specs_world.read_storage::<Body>();
graphics::set_color(ctx, graphics::Color::new(0.4, 1.0, 0.0, 1.0))?;
for body in (&body_storage).join() {
if let Some(boxes) = &body.collision_boxes {
for collision_box in boxes {
graphics::rectangle(
ctx,
graphics::DrawMode::Line(1.0),
graphics::Rect {
x: (collision_box.rect.x) as f32 * 3.0,
y: (collision_box.rect.y) as f32 * 3.0,
w: collision_box.rect.w as f32 * 3.0,
h: collision_box.rect.h as f32 * 3.0,
},
)?;
}
}
}
let weapon_storage = self.specs_world.read_storage::<Weapon>();
graphics::set_color(ctx, graphics::Color::new(1.0, 0.0, 1.0, 1.0))?;
for weapon in (&weapon_storage).join() {
if let Some(collision_rects) = &weapon.collision_points {
for rect in collision_rects {
graphics::rectangle(
ctx,
graphics::DrawMode::Line(1.0),
graphics::Rect {
x: (rect.bounding.x * 3) as f32,
y: (rect.bounding.y * 3) as f32,
w: rect.bounding.w as f32 * 3.0,
h: rect.bounding.h as f32 * 3.0,
},
)?;
for point in &rect.points {
graphics::rectangle(
ctx,
graphics::DrawMode::Line(1.0),
graphics::Rect {
x: (point.x as i32 * 3) as f32,
y: (point.y as i32 * 3) as f32,
w: 3.0,
h: 3.0,
},
)?;
}
}
}
}
graphics::set_color(ctx, graphics::Color::new(1.0, 1.0, 1.0, 1.0))?;
//let banner = &self.rects[73];
//self.batch.add(graphics::DrawParam {
// src: graphics::Rect {
// x: banner.x as f32 / 512.0,
// y: banner.y as f32 / 512.0,
// w: banner.w as f32 / 512.0,
// h: banner.h as f32 / 512.0,
// },
// dest: graphics::Point2::new(5.0, 20.0),
// //scale: graphics::Point2::new(3.0, 3.0),
// ..Default::default()
//});
//let copyright = &self.rects[74];
//self.batch.add(graphics::DrawParam {
// src: graphics::Rect {
// x: copyright.x as f32 / 512.0,
// y: copyright.y as f32 / 512.0,
// w: copyright.w as f32 / 512.0,
// h: copyright.h as f32 / 512.0,
// },
// dest: graphics::Point2::new(22.0, 181.0),
// //scale: graphics::Point2::new(3.0, 3.0),
// ..Default::default()
//});
//let rights = &self.rects[75];
//self.batch.add(graphics::DrawParam {
// src: graphics::Rect {
// x: rights.x as f32 / 512.0,
// y: rights.y as f32 / 512.0,
// w: rights.w as f32 / 512.0,
// h: rights.h as f32 / 512.0,
// },
// dest: graphics::Point2::new(110.0, 190.0),
// //scale: graphics::Point2::new(3.0, 3.0),
// ..Default::default()
//});
//graphics::draw_ex(
// ctx,
// &self.batch,
// graphics::DrawParam {
// dest: dest_point,
// scale: graphics::Point2::new(3.0, 3.0),
// ..Default::default()
// },
//)?;
//self.batch.clear();
// graphics::present(ctx);
// println!("Delta frame time: {:?} ", timer::get_delta(ctx));
// println!("Average FPS: {}", timer::get_fps(ctx));
// timer::sleep(Duration::from_millis(50));
//timer::sleep(Duration::from_millis(100));
//timer::sleep(Duration::from_millis(109));
Ok(())
}
fn name(&self) -> &str {
"Encounter"
}
fn input(&mut self, _gameworld: &mut Game, _event: input::InputEvent, _started: bool) {
// gameworld.input.update_effect(event, started);
}
}
Allow selecting from 1 - 4 players in practice mode!
if only 1 player is selected, an ai black knight is added.
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::collections::{HashMap, HashSet};
use std::time::Duration;
use failure::Error;
use ggez::conf::NumSamples;
use ggez::graphics;
use ggez::timer;
use ggez::{Context, GameResult};
use ggez_goodies::scene;
use specs::world::{Builder, Index};
use specs::{Component, Dispatcher, DispatcherBuilder, Entity, EntityBuilder, Join, World};
use warmy::{LogicalKey, Store};
use super::transition::FadeStyle;
use super::Fade;
use crate::animation::{Image, ImageType, Sprite, SpriteData};
use crate::combat::components::{
AiState, AnimationState, Body, Collided, Controller, DaggersInventory, Draw, Facing, Health,
Intent, MustLive, Palette, Position, State, UnitType, Velocity, WalkingState, Weapon,
};
use crate::combat::damage::DamageTables;
use crate::combat::systems::boundary::TopBoundary;
use crate::combat::systems::health::CombatDone;
use crate::combat::systems::{
ActionSystem, Animation, BlackKnightAi, CheckCollisions, CheckEndOfCombat, Commander,
ConfirmVelocity, EntityDeath, EntityEntityCollision, Movement, OutOfBounds, ResolveCollisions,
RestrictMovementToBoundary, StateUpdater, UpdateBoundingBoxes, UpdateImage, VelocitySystem,
};
use crate::files::collide::CollisionBoxes;
use crate::files::terrain::scenery_rects;
use crate::files::TerrainFile;
use crate::game::{Game, SceneState};
use crate::input;
use crate::manager::GameYaml;
use crate::objects::TextureAtlas;
use crate::palette::PaletteSwaps;
use crate::piv::{palette_swap, Colour, PivImage};
use crate::rect::Rect;
use crate::scenes::FSceneSwitch;
#[derive(Debug, Default, Clone)]
pub struct EncounterTextures {
pub data: HashMap<String, TextureAtlas>,
}
const TICKS_TO_WAIT: u32 = 35;
pub struct EncounterScene<'a> {
pub specs_world: World,
pub dispatcher: Dispatcher<'a, 'a>,
pub background: graphics::Canvas,
pub palette: Vec<Colour>,
player_1: Index,
player_2: Option<Index>,
player_3: Option<Index>,
player_4: Option<Index>,
// the number of ticks since the encounter is done
ticks_after: u32,
// we do the fade out first, pop back to this scene then pop the encounter.
fade_out_done: bool,
}
impl<'a> EncounterScene<'a> {
fn build_world() -> World {
let mut world = World::new();
world.register::<AiState>();
world.register::<AnimationState>();
world.register::<Body>();
world.register::<Collided>();
world.register::<Controller>();
world.register::<DaggersInventory>();
world.register::<Draw>();
world.register::<Health>();
world.register::<Intent>();
world.register::<MustLive>();
world.register::<Palette>();
world.register::<Position>();
world.register::<State>();
world.register::<UnitType>();
world.register::<Velocity>();
world.register::<WalkingState>();
world.register::<Weapon>();
world
}
fn build_dispatcher() -> Dispatcher<'a, 'a> {
DispatcherBuilder::new()
.with(Commander, "commander", &[])
.with(BlackKnightAi, "black_knight_ai", &[])
.with(ActionSystem, "action", &["commander", "black_knight_ai"])
.with(EntityDeath, "entity_death", &["action"])
.with(CheckEndOfCombat, "check_end_of_combat", &["entity_death"])
.with(VelocitySystem, "velocity", &["commander"])
.with(EntityEntityCollision, "entity_collision", &["velocity"])
.with(
RestrictMovementToBoundary,
"restrict_movement_to_boundary",
&["velocity"],
)
.with(
ConfirmVelocity,
"confirm_velocity",
&["restrict_movement_to_boundary", "entity_collision"],
)
.with(Movement, "movement", &["confirm_velocity"])
.with(Animation, "animation", &["movement"])
//.with(StateUpdater, "state_updater", &["animation"])
.with(UpdateImage, "update_image", &["animation"])
.with(
UpdateBoundingBoxes,
"update_bounding_boxes",
&["update_image"],
)
.with(
CheckCollisions,
"check_collisions",
&["update_bounding_boxes"],
)
.with(
ResolveCollisions,
"resolve_collisions",
&["check_collisions"],
)
.with(StateUpdater, "state_updater", &["resolve_collisions"])
.with(OutOfBounds, "out_of_bounds", &[])
// .with_thread_local(Renderer {
// store: Store::new(StoreOpt::default()).expect("store creation"),
// })
.build()
}
fn load_resources(
ctx: &mut Context,
store: &mut Store<Context>,
world: &mut World,
) -> Result<(), Error> {
let damage_tables = store
.get::<_, DamageTables>(&LogicalKey::new("/damage.yaml"), ctx)
.expect("error loading damage.yaml");
world.add_resource(damage_tables.borrow().clone());
world.add_resource(CombatDone(false));
Ok(())
}
fn load_sprite_data(
ctx: &mut Context,
store: &mut Store<Context>,
world: &mut World,
entity_names: &[&str],
) -> Result<(), Error> {
let entities_yaml =
store.get::<_, GameYaml>(&warmy::LogicalKey::new("/entities.yaml"), ctx)?;
let mut sprites: HashMap<String, Sprite> = HashMap::new();
let mut atlas_names: HashSet<String> = HashSet::new();
for name in entity_names {
let yaml_borrow = &entities_yaml.borrow();
let yaml_file = yaml_borrow.yaml[name].as_str().unwrap();
let entity_yaml = store.get::<_, Sprite>(&warmy::LogicalKey::new(yaml_file), ctx)?;
sprites.insert(name.to_string(), (*entity_yaml.borrow()).clone());
for i in entity_yaml
.borrow()
.animations
.values()
.map(|a| &a.frames)
.flatten()
.map(|f| &f.images)
.flatten()
.map(|i| &i.sheet)
{
atlas_names.insert(i.clone());
}
}
world.add_resource(SpriteData { sprites });
let mut image_sizes: HashMap<String, Vec<Rect>> = HashMap::new();
let mut texture_atlases: HashMap<String, TextureAtlas> = HashMap::new();
for atlas_name in atlas_names {
let atlas = store
.get::<_, TextureAtlas>(&LogicalKey::new(atlas_name.clone()), ctx)
.unwrap();
image_sizes.insert(atlas_name.clone(), atlas.borrow().rects.clone());
texture_atlases.insert(atlas_name.clone(), atlas.borrow().clone());
}
world.add_resource(EncounterTextures {
data: texture_atlases,
});
Ok(())
}
// fn create_player_entity(entity_builder: EntityBuilder) -> Entity {
// entity_builder
// .with(Controller {
// ..Default::default()
// })
// .build()
// }
fn build_entity(
ctx: &mut Context,
store: &mut Store<Context>,
world: &'a mut World,
resource: &str,
raw_palette: &Vec<u16>,
palette_name: &str,
x: i32,
y: i32,
direction: Facing,
) -> EntityBuilder<'a> {
let sprite_res = store
.get::<_, Sprite>(&LogicalKey::new(format!("/{}.yaml", resource)), ctx)
.unwrap();
let sprite = sprite_res.borrow();
let swaps_res = store
.get::<_, PaletteSwaps>(&LogicalKey::new("/palettes.yaml"), ctx)
.expect("error loading palette.yaml");
let swaps = swaps_res.borrow();
world
.create_entity()
.with(UnitType {
name: resource.to_string(),
})
.with(Palette {
name: palette_name.to_string(),
palette: palette_swap(
&raw_palette,
&swaps.0.get(&palette_name.to_string()).expect("no palette"),
),
})
.with(MustLive {})
.with(Position { x: x, y: y })
.with(Health {
..Default::default()
})
.with(Draw {
frame: sprite.animations["entrance"].frames[0].clone(),
animation: "entrance".to_string(),
resource_name: resource.to_string(),
direction: direction,
})
.with(Intent {
..Default::default()
})
.with(WalkingState {
..Default::default()
})
.with(Velocity {
..Default::default()
})
.with(AnimationState {
..Default::default()
})
.with(State {
direction: direction,
..Default::default()
})
.with(Body {
..Default::default()
})
.with(Weapon {
..Default::default()
})
.with(DaggersInventory {
..Default::default()
})
}
pub fn new(
ctx: &mut Context,
game: &mut Game,
entity_names: &[&str],
background_name: &str,
terrain_name: &str,
) -> Result<Self, Error> {
let mut world = EncounterScene::build_world();
EncounterScene::load_sprite_data(ctx, &mut game.store, &mut world, entity_names)?;
EncounterScene::load_resources(ctx, &mut game.store, &mut world)?;
let piv = game
.store
.get::<_, PivImage>(&LogicalKey::new(background_name), ctx)
.unwrap();
let background_image =
graphics::Image::from_rgba8(ctx, 320, 200, &*piv.borrow().to_rgba8()).unwrap();
let background = graphics::Canvas::new(ctx, 320, 200, NumSamples::One)?;
graphics::set_canvas(ctx, Some(&background));
let screen_origin = graphics::Point2::new(0.0, 0.0);
graphics::draw_ex(
ctx,
&background_image,
graphics::DrawParam {
dest: screen_origin,
scale: graphics::Point2::new(3.0, 3.0),
..Default::default()
},
)?;
let y_max = EncounterScene::draw_terrain(ctx, game, &mut world, terrain_name, &background)?;
let collide_hit = game
.store
.get::<_, CollisionBoxes>(&LogicalKey::new("collide"), ctx)
.unwrap();
world.add_resource(collide_hit.borrow().clone());
let (player_1, player_2, player_3, player_4) = EncounterScene::create_entities(
ctx,
game,
&mut world,
&piv.borrow().raw_palette,
y_max,
);
// let y = EncounterScene::next_starting_position(game, y_max as i32);
// let player_1 = EncounterScene::build_entity(
// ctx,
// &mut game.store,
// &mut world,
// "knight",
// &piv.borrow().raw_palette,
// "blue_knight",
// 250,
// y,
// Facing::Left,
// )
// .with(Controller {
// ..Default::default()
// })
// .build();
// let y = EncounterScene::next_starting_position(game, y_max as i32);
// let player_2 = EncounterScene::build_entity(
// ctx,
// &mut game.store,
// &mut world,
// "knight",
// &piv.borrow().raw_palette,
// "green_knight",
// 30,
// y,
// Facing::default(),
// )
// // .with(Controller {
// // ..Default::default()
// // })
// .with(AiState {
// class: "black_knight".to_string(),
// target: Some(player_1),
// y_range: 4,
// close_range: 80,
// long_range: 100,
// })
// .build();
let palette: Vec<Colour> = piv.borrow().palette.to_vec();
Ok(Self {
palette,
specs_world: world,
dispatcher: EncounterScene::build_dispatcher(),
background,
player_1: player_1,
player_2: player_2,
player_3: player_3,
player_4: player_4,
ticks_after: 0,
fade_out_done: false,
})
}
fn create_entities(
ctx: &mut Context,
game: &mut Game,
world: &mut World,
raw_palette: &Vec<u16>,
y_max: u32,
) -> (Index, Option<Index>, Option<Index>, Option<Index>) {
let starting_x = [250, 30, 240, 40];
let colours = ["blue_knight", "green_knight", "red_knight", "orange_knight"];
let x = starting_x[0];
let y = EncounterScene::next_starting_position(game, y_max as i32);
let facing = match x {
x if x < 160 => Facing::Right,
_ => Facing::Left,
};
let player_1 = EncounterScene::build_entity(
ctx,
&mut game.store,
world,
"knight",
raw_palette,
colours[0],
x,
y,
facing,
)
.with(Controller {
..Default::default()
})
.build();
let mut players: Vec<Entity> = Vec::new();
for n in 1..game.num_players {
let x = starting_x[n as usize];
let y = EncounterScene::next_starting_position(game, y_max as i32);
let facing = match x {
x if x < 160 => Facing::Right,
_ => Facing::Left,
};
let player = EncounterScene::build_entity(
ctx,
&mut game.store,
world,
"knight",
raw_palette,
colours[n as usize],
x,
y,
facing,
)
.with(Controller {
..Default::default()
})
.build();
players.push(player);
}
if game.num_players == 1 {
let y = EncounterScene::next_starting_position(game, y_max as i32);
let player_2 = EncounterScene::build_entity(
ctx,
&mut game.store,
world,
"knight",
raw_palette,
"black_knight",
30,
y,
Facing::default(),
)
.with(AiState {
class: "black_knight".to_string(),
target: Some(player_1),
y_range: 4,
close_range: 80,
long_range: 100,
})
.build();
}
(
player_1.id(),
players.get(0).and_then(|p| Some(p.id())),
players.get(1).and_then(|p| Some(p.id())),
players.get(2).and_then(|p| Some(p.id())),
)
}
fn draw_terrain(
ctx: &mut Context,
game: &mut Game,
world: &mut World,
terrain_name: &str,
background_image: &graphics::Canvas,
) -> Result<u32, Error> {
let terrain = game
.store
.get::<_, TerrainFile>(&LogicalKey::new(terrain_name.to_string()), ctx)
.unwrap();
for p in &terrain.borrow().positions {
let cmp = game
.store
.get::<_, PivImage>(&LogicalKey::new(&p.atlas), ctx)?;
let ggez_image = match game.images.entry(p.atlas.clone()) {
Occupied(i) => i.into_mut(),
Vacant(i) => i.insert(graphics::Image::from_rgba8(
ctx,
512u16,
512u16,
&cmp.borrow().to_rgba8_512(),
)?),
};
let rect = scenery_rects[p.image_number];
// println!("{:#?}",rect);
let draw_params = graphics::DrawParam {
src: graphics::Rect {
x: rect.x as f32 / 512.0,
y: rect.y as f32 / 512.0,
w: rect.w as f32 / 512.0,
h: rect.h as f32 / 512.0,
},
dest: graphics::Point2::new(p.x as f32 * 3.0, p.y as f32 * 3.0),
scale: graphics::Point2::new(3.0, 3.0),
..Default::default()
};
graphics::draw_ex(ctx, ggez_image, draw_params)?;
}
graphics::set_canvas(ctx, None);
let y_max: u32 = terrain
.borrow()
.headers
.iter()
.map(|h| h.y)
.max()
.expect("error getting ymax");
world.add_resource(TopBoundary {
y: y_max as i32 - 30,
});
Ok(y_max)
}
fn update_controllers(&mut self, input: &input::InputState) {
let entities = self.specs_world.entities();
let mut controllers = self.specs_world.write_storage::<Controller>();
for (e, controller) in (&*entities, &mut controllers).join() {
if e.id() == self.player_1 {
controller.x = input.get_axis_raw(input::Axis::Horz1) as i32;
controller.y = input.get_axis_raw(input::Axis::Vert1) as i32;
controller.fire = input.get_button_down(input::Button::Fire1);
} else if self.player_2.is_some() && e.id() == self.player_2.unwrap() {
controller.x = input.get_axis_raw(input::Axis::Horz2) as i32;
controller.y = input.get_axis_raw(input::Axis::Vert2) as i32;
controller.fire = input.get_button_down(input::Button::Fire2);
}
}
}
fn next_starting_position(game: &mut Game, t: i32) -> i32 {
game.encounter_starting_position = game.encounter_starting_position % 3 + 1;
let s = 200 - t; // screen height - smallest t
match game.encounter_starting_position {
3 => s / 2 + t - 47,
2 => s / 4 + t - 47,
1 => s / 2 + s / 4 + t - 47,
_ => panic!("set starting position failed"),
}
}
}
impl<'a> scene::Scene<Game, input::InputEvent> for EncounterScene<'a> {
fn update(&mut self, game: &mut Game, ctx: &mut Context) -> FSceneSwitch {
self.specs_world.maintain();
self.update_controllers(&game.input);
self.dispatcher.dispatch_par(&self.specs_world.res);
if self.specs_world.read_resource::<CombatDone>().0 {
self.ticks_after += 1;
if self.ticks_after > TICKS_TO_WAIT {
match self.fade_out_done {
false => {
game.next_scene = SceneState::Menu;
game.practice_encounter = game.practice_encounter % 8 + 1;
return scene::SceneSwitch::push(Fade::new(274, 1, FadeStyle::Out));
}
true => return scene::SceneSwitch::Pop, //shouldn't happen
}
}
}
scene::SceneSwitch::None
}
fn draw(&mut self, game: &mut Game, ctx: &mut Context) -> GameResult<()> {
//fn draw(&mut self, ctx: &mut Context) -> GameResult<()> {
//self.dispatcher.dispatch_thread_local(&self.game.world.res);
// graphics::set_background_color(ctx, graphics::Color::from((0, 0, 0, 255)));
// graphics::clear(ctx);
let screen_origin = graphics::Point2::new(0.0, 0.0);
// draw background
let lair = &self.background;
graphics::draw_ex(
ctx,
lair,
graphics::DrawParam {
dest: screen_origin,
// TODO: this shouldn't be need investigate why it is.
scale: graphics::Point2::new(3.0, 3.0),
..Default::default()
},
)?;
let position_storage = self.specs_world.read_storage::<Position>();
let draw_storage = self.specs_world.read_storage::<Draw>();
let entities = self.specs_world.entities();
let palette_storage = self.specs_world.read_storage::<Palette>();
let mut storage = (&position_storage, &draw_storage, &entities)
.join()
.collect::<Vec<_>>();
storage.sort_by(|&a, &b| a.0.y.cmp(&b.0.y));
for (position, draw, entity) in storage {
let images: Vec<&Image> = match game.gore_on {
true => draw.frame.images.iter().collect(),
false => draw.frame.images.iter().filter(|i| !i.is_blood()).collect(),
};
for image in images {
let atlas = game
.store
.get::<_, TextureAtlas>(&LogicalKey::new(image.sheet.as_str()), ctx)
.unwrap();
let atlas_dimension = atlas.borrow().image.width as u32;
// TODO: change with palettes
let palette: Option<&Palette> = palette_storage.get(entity);
let ggez_image = match palette {
None => {
let ggez_image = match game.images.entry(image.sheet.clone()) {
Occupied(i) => i.into_mut(),
Vacant(i) => i.insert(
graphics::Image::from_rgba8(
ctx,
atlas_dimension as u16,
atlas_dimension as u16,
&atlas.borrow().image.to_rgba8(&self.palette),
)
.unwrap(),
),
};
ggez_image
}
Some(palette) => {
let image_name = [image.sheet.clone(), palette.name.clone()].join("-");
let ggez_image = match game.images.entry(image_name) {
Occupied(i) => i.into_mut(),
Vacant(i) => i.insert(
graphics::Image::from_rgba8(
ctx,
atlas_dimension as u16,
atlas_dimension as u16,
&atlas.borrow().image.to_rgba8(&palette.palette),
)
.unwrap(),
),
};
ggez_image
}
};
// Debug collision rects
let rect = atlas.borrow().rects[image.image];
let texture_size = atlas.borrow().image.width as f32;
let draw_params = graphics::DrawParam {
src: graphics::Rect {
x: rect.x as f32 / texture_size,
y: rect.y as f32 / texture_size,
w: rect.w as f32 / texture_size,
h: rect.h as f32 / texture_size,
},
dest: graphics::Point2::new(
(position.x as i32 + (draw.direction as i32 * image.x)) as f32 * 3.0,
(position.y as i32 + image.y) as f32 * 3.0,
),
scale: graphics::Point2::new((draw.direction as i32 * 3) as f32, 3.0),
..Default::default()
};
graphics::draw_ex(ctx, ggez_image, draw_params)?;
match image.image_type {
ImageType::BloodStain => {
graphics::set_canvas(ctx, Some(&self.background));
graphics::draw_ex(ctx, ggez_image, draw_params)?;
graphics::set_canvas(ctx, None);
}
_ => (),
}
}
}
let body_storage = self.specs_world.read_storage::<Body>();
graphics::set_color(ctx, graphics::Color::new(0.4, 1.0, 0.0, 1.0))?;
for body in (&body_storage).join() {
if let Some(boxes) = &body.collision_boxes {
for collision_box in boxes {
graphics::rectangle(
ctx,
graphics::DrawMode::Line(1.0),
graphics::Rect {
x: (collision_box.rect.x) as f32 * 3.0,
y: (collision_box.rect.y) as f32 * 3.0,
w: collision_box.rect.w as f32 * 3.0,
h: collision_box.rect.h as f32 * 3.0,
},
)?;
}
}
}
let weapon_storage = self.specs_world.read_storage::<Weapon>();
graphics::set_color(ctx, graphics::Color::new(1.0, 0.0, 1.0, 1.0))?;
for weapon in (&weapon_storage).join() {
if let Some(collision_rects) = &weapon.collision_points {
for rect in collision_rects {
graphics::rectangle(
ctx,
graphics::DrawMode::Line(1.0),
graphics::Rect {
x: (rect.bounding.x * 3) as f32,
y: (rect.bounding.y * 3) as f32,
w: rect.bounding.w as f32 * 3.0,
h: rect.bounding.h as f32 * 3.0,
},
)?;
for point in &rect.points {
graphics::rectangle(
ctx,
graphics::DrawMode::Line(1.0),
graphics::Rect {
x: (point.x as i32 * 3) as f32,
y: (point.y as i32 * 3) as f32,
w: 3.0,
h: 3.0,
},
)?;
}
}
}
}
graphics::set_color(ctx, graphics::Color::new(1.0, 1.0, 1.0, 1.0))?;
//let banner = &self.rects[73];
//self.batch.add(graphics::DrawParam {
// src: graphics::Rect {
// x: banner.x as f32 / 512.0,
// y: banner.y as f32 / 512.0,
// w: banner.w as f32 / 512.0,
// h: banner.h as f32 / 512.0,
// },
// dest: graphics::Point2::new(5.0, 20.0),
// //scale: graphics::Point2::new(3.0, 3.0),
// ..Default::default()
//});
//let copyright = &self.rects[74];
//self.batch.add(graphics::DrawParam {
// src: graphics::Rect {
// x: copyright.x as f32 / 512.0,
// y: copyright.y as f32 / 512.0,
// w: copyright.w as f32 / 512.0,
// h: copyright.h as f32 / 512.0,
// },
// dest: graphics::Point2::new(22.0, 181.0),
// //scale: graphics::Point2::new(3.0, 3.0),
// ..Default::default()
//});
//let rights = &self.rects[75];
//self.batch.add(graphics::DrawParam {
// src: graphics::Rect {
// x: rights.x as f32 / 512.0,
// y: rights.y as f32 / 512.0,
// w: rights.w as f32 / 512.0,
// h: rights.h as f32 / 512.0,
// },
// dest: graphics::Point2::new(110.0, 190.0),
// //scale: graphics::Point2::new(3.0, 3.0),
// ..Default::default()
//});
//graphics::draw_ex(
// ctx,
// &self.batch,
// graphics::DrawParam {
// dest: dest_point,
// scale: graphics::Point2::new(3.0, 3.0),
// ..Default::default()
// },
//)?;
//self.batch.clear();
// graphics::present(ctx);
// println!("Delta frame time: {:?} ", timer::get_delta(ctx));
// println!("Average FPS: {}", timer::get_fps(ctx));
// timer::sleep(Duration::from_millis(50));
//timer::sleep(Duration::from_millis(100));
//timer::sleep(Duration::from_millis(109));
Ok(())
}
fn name(&self) -> &str {
"Encounter"
}
fn input(&mut self, _gameworld: &mut Game, _event: input::InputEvent, _started: bool) {
// gameworld.input.update_effect(event, started);
}
}
|
#![cfg_attr(not(feature = "dev"), allow(dead_code))]
#![deny(warnings)]
#![feature(const_fn)]
#[macro_use]
extern crate error_chain;
#[macro_use]
extern crate lazy_static;
extern crate parking_lot;
extern crate rustc_version;
extern crate tempdir;
extern crate dirs;
use std::fs::OpenOptions;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::process::Command;
use std::{env, fs};
use parking_lot::{Mutex, MutexGuard};
use tempdir::TempDir;
use errors::*;
mod errors {
#![allow(unused_doc_comments)]
error_chain!();
}
macro_rules! run {
() => {
if let Err(e) = run() {
panic!("{}", e)
}
}
}
// Returns Xargo's "home"
fn home() -> Result<PathBuf> {
if let Some(h) = env::var_os("XARGO_HOME") {
Ok(PathBuf::from(h))
} else {
Ok(
dirs::home_dir()
.ok_or_else(|| "couldn't find your home directory. Is $HOME set?")?
.join(".xargo"),
)
}
}
fn cleanup(target: &str) -> Result<()> {
let p = home()?.join("lib/rustlib").join(target);
if p.exists() {
fs::remove_dir_all(&p).chain_err(|| format!("couldn't clean sysroot for {}", target))
} else {
Ok(())
}
}
fn exists(krate: &str, target: &str) -> Result<bool> {
let p = home()?.join("lib/rustlib").join(target).join("lib");
for e in fs::read_dir(&p).chain_err(|| format!("couldn't read the directory {}", p.display()))?
{
let e = e.chain_err(|| {
format!(
"error reading the contents of the directory {}",
p.display()
)
})?;
if e.file_name().to_string_lossy().contains(krate) {
return Ok(true);
}
}
Ok(false)
}
fn host() -> String {
rustc_version::version_meta().host
}
fn mkdir(path: &Path) -> Result<()> {
fs::create_dir(path).chain_err(|| {
format!("couldn't create the directory {}", path.display())
})
}
fn sysroot_was_built(stderr: &str, target: &str) -> bool {
stderr.lines().filter(|l| l.starts_with("+")).any(|l| {
l.contains("cargo") && l.contains("build") && l.contains("--target") && l.contains(target)
&& l.contains("-p") && l.contains("core")
})
}
fn write(path: &Path, append: bool, contents: &str) -> Result<()> {
let p = path.display();
let mut opts = OpenOptions::new();
if append {
opts.append(true);
} else {
opts.create(true);
opts.truncate(true);
}
opts.write(true)
.open(path)
.chain_err(|| format!("couldn't open {}", p))?
.write_all(contents.as_bytes())
.chain_err(|| format!("couldn't write to {}", p))
}
fn xargo() -> Result<Command> {
let mut p = env::current_exe().chain_err(|| "couldn't get path to current executable")?;
p.pop();
p.pop();
p.push("xargo");
Ok(Command::new(p))
}
trait CommandExt {
fn run(&mut self) -> Result<()>;
fn run_and_get_stderr(&mut self) -> Result<String>;
}
impl CommandExt for Command {
fn run(&mut self) -> Result<()> {
let status = self.status()
.chain_err(|| format!("couldn't execute `{:?}`", self))?;
if status.success() {
Ok(())
} else {
Err(format!(
"`{:?}` failed with exit code: {:?}",
self,
status.code()
))?
}
}
fn run_and_get_stderr(&mut self) -> Result<String> {
let out = self.output()
.chain_err(|| format!("couldn't execute `{:?}`", self))?;
let stderr = String::from_utf8(out.stderr)
.chain_err(|| format!("`{:?}` output was not UTF-8", self));
if out.status.success() {
stderr
} else {
match stderr {
Ok(e) => print!("{}", e),
Err(e) => print!("{}", e),
}
Err(format!(
"`{:?}` failed with exit code: {:?}",
self,
out.status.code()
))?
}
}
}
struct Project {
name: &'static str,
td: TempDir,
}
impl Project {
fn new(name: &'static str) -> Result<Self> {
const JSON: &'static str = r#"
{
"arch": "arm",
"data-layout": "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64",
"linker-flavor": "gcc",
"llvm-target": "thumbv6m-none-eabi",
"max-atomic-width": 0,
"os": "none",
"target-c-int-width": "32",
"target-endian": "little",
"target-pointer-width": "32"
}
"#;
let td = TempDir::new("xargo").chain_err(|| "couldn't create a temporary directory")?;
xargo()?
.args(&["init", "--lib", "--vcs", "none", "--name", name])
.current_dir(td.path())
.run()?;
write(&td.path().join("src/lib.rs"), false, "#![no_std]")?;
write(&td.path().join(format!("{}.json", name)), false, JSON)?;
Ok(Project { name: name, td: td })
}
/// Calls `xargo build`
fn build(&self, target: &str) -> Result<()> {
xargo()?
.args(&["build", "--target", target])
.current_dir(self.td.path())
.run()
}
/// Calls `xargo build` and collects STDERR
fn build_and_get_stderr(&self, target: Option<&str>) -> Result<String> {
let mut cmd = xargo()?;
cmd.arg("build");
if let Some(target) = target {
cmd.args(&["--target", target]);
}
cmd.arg("-v")
.current_dir(self.td.path())
.run_and_get_stderr()
}
/// Appends a string to the project `Cargo.toml`
fn cargo_toml(&self, contents: &str) -> Result<()> {
write(&self.td.path().join("Cargo.toml"), true, contents)
}
/// Adds a `.cargo/config` to the project
fn config(&self, contents: &str) -> Result<()> {
mkdir(&self.td.path().join(".cargo"))?;
write(&self.td.path().join(".cargo/config"), false, contents)
}
/// Calls `xargo doc`
fn doc(&self, target: &str) -> Result<()> {
xargo()?
.args(&["doc", "--target", target])
.current_dir(self.td.path())
.run()
}
/// Adds a `Xargo.toml` to the project
fn xargo_toml(&self, toml: &str) -> Result<()> {
write(&self.td.path().join("Xargo.toml"), false, toml)
}
}
impl Drop for Project {
fn drop(&mut self) {
cleanup(self.name).unwrap()
}
}
fn hcleanup(triple: &str) -> Result<()> {
let p = home()?.join("HOST/lib/rustlib").join(triple);
if p.exists() {
fs::remove_dir_all(&p).chain_err(|| format!("couldn't clean sysroot for {}", triple))
} else {
Ok(())
}
}
struct HProject {
_guard: MutexGuard<'static, ()>,
host: String,
td: TempDir,
}
impl HProject {
fn new(test: bool) -> Result<Self> {
// There can only be one instance of this type at any point in time
lazy_static! {
static ref ONCE: Mutex<()> = Mutex::new(());
}
let guard = ONCE.lock();
let td = TempDir::new("xargo").chain_err(|| "couldn't create a temporary directory")?;
xargo()?
.args(&["init", "--lib", "--vcs", "none", "--name", "host"])
.current_dir(td.path())
.run()?;
if test {
write(
&td.path().join("src/lib.rs"),
false,
"fn _f(_: Vec<std::fs::File>) {}",
)?;
} else {
write(&td.path().join("src/lib.rs"), false, "#![no_std]")?;
}
Ok(HProject {
_guard: guard,
host: host(),
td: td,
})
}
/// Calls `xargo build` and collects STDERR
fn build_and_get_stderr(&self) -> Result<String> {
let mut cmd = xargo()?;
cmd.arg("build");
cmd.arg("-v")
.current_dir(self.td.path())
.run_and_get_stderr()
}
/// Adds a `Xargo.toml` to the project
fn xargo_toml(&self, toml: &str) -> Result<()> {
write(&self.td.path().join("Xargo.toml"), false, toml)
}
}
impl Drop for HProject {
fn drop(&mut self) {
hcleanup(&self.host).unwrap()
}
}
/// Test vanilla `xargo build`
#[cfg(feature = "dev")]
#[test]
fn simple() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-simple-eabi";
let project = Project::new(TARGET)?;
project.build(TARGET)?;
assert!(exists("core", TARGET)?);
Ok(())
}
run!()
}
/// Test building a dependency specified as `target.{}.dependencies` in
/// Xargo.toml
#[cfg(feature = "dev")]
#[test]
fn target_dependencies() {
fn run() -> Result<()> {
// need this exact target name to get the right gcc flags
const TARGET: &'static str = "thumbv7m-none-eabi";
let project = Project::new(TARGET)?;
project.xargo_toml(
r#"
[target.thumbv7m-none-eabi.dependencies.alloc]
"#,
)?;
project.build(TARGET)?;
assert!(exists("core", TARGET)?);
assert!(exists("alloc", TARGET)?);
Ok(())
}
run!()
}
/// Test building a dependency specified as `dependencies` in Xargo.toml
#[cfg(feature = "dev")]
#[test]
fn dependencies() {
fn run() -> Result<()> {
// need this exact target name to get the right gcc flags
const TARGET: &'static str = "thumbv6m-none-eabi";
let project = Project::new(TARGET)?;
project.xargo_toml(
r#"
[dependencies.alloc]
"#,
)?;
project.build(TARGET)?;
assert!(exists("core", TARGET)?);
assert!(exists("alloc", TARGET)?);
Ok(())
}
run!()
}
/// Test `xargo doc`
#[cfg(feature = "dev")]
#[test]
fn doc() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-doc-eabi";
let project = Project::new(TARGET)?;
project.doc(TARGET)?;
assert!(exists("core", TARGET)?);
Ok(())
}
run!()
}
/// Check that calling `xargo build` a second time doesn't rebuild the sysroot
#[cfg(feature = "dev")]
#[test]
fn twice() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-twice-eabi";
let project = Project::new(TARGET)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(!sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// Check that if `build.target` is set in `.cargo/config`, that target will be
/// used to build the sysroot
#[cfg(feature = "dev")]
#[test]
fn build_target() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-build_target-eabi";
let project = Project::new(TARGET)?;
project.config(
r#"
[build]
target = "thumbv6m-build_target-eabi"
"#,
)?;
let stderr = project.build_and_get_stderr(None)?;
assert!(sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// Check that `--target` overrides `build.target`
#[cfg(feature = "dev")]
#[test]
fn override_build_target() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-override_build_target-eabi";
let project = Project::new(TARGET)?;
project.config(
r#"
[build]
target = "BAD"
"#,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// We shouldn't rebuild the sysroot if `profile.release.lto` changed
#[cfg(feature = "dev")]
#[test]
fn lto_changed() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-lto_changed-eabi";
let project = Project::new(TARGET)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
project.cargo_toml(
r#"
[profile.release]
lto = true
"#,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(!sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// Modifying RUSTFLAGS should trigger a rebuild of the sysroot
#[cfg(feature = "dev")]
#[test]
fn rustflags_changed() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-rustflags_changed-eabi";
let project = Project::new(TARGET)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
project.config(
r#"
[build]
rustflags = ["--cfg", "xargo"]
"#,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// Check that RUSTFLAGS are passed to all `rustc`s
#[cfg(feature = "dev")]
#[test]
fn rustflags() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-rustflags-eabi";
let project = Project::new(TARGET)?;
project.config(
r#"
[build]
rustflags = ["--cfg", "xargo"]
"#,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(
stderr
.lines()
.filter(|l| !l.starts_with("+") && l.contains("rustc"))
.all(|l| l.contains("--cfg") && l.contains("xargo")),
"unexpected stderr:\n{}", stderr
);
Ok(())
}
run!()
}
/// Check that `-C panic=abort` is passed to `rustc` when `panic = "abort"` is
/// set in `profile.release`
#[cfg(not(feature = "dev"))]
#[test]
fn panic_abort() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-panic_abort-eabi";
let project = Project::new(TARGET)?;
project.cargo_toml(
r#"
[profile.release]
panic = "abort"
"#,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(
stderr
.lines()
.filter(|l| !l.starts_with("+") && l.contains("--release"))
.all(|l| l.contains("-C") && l.contains("panic=abort"))
);
Ok(())
}
run!()
}
/// Check that adding linker arguments doesn't trigger a sysroot rebuild
#[cfg(feature = "dev")]
#[test]
fn link_arg() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-link_arg-eabi";
let project = Project::new(TARGET)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
project.config(
r#"
[target.__link_arg]
rustflags = ["-C", "link-arg=-lfoo"]
"#,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(!sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// The sysroot should be rebuilt if the target specification changed
#[cfg(feature = "dev")]
#[test]
fn specification_changed() {
fn run() -> Result<()> {
const JSON: &'static str = r#"
{
"arch": "arm",
"data-layout": "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64",
"linker-flavor": "gcc",
"llvm-target": "thumbv6m-none-eabi",
"max-atomic-width": 0,
"os": "none",
"panic-strategy": "abort",
"target-c-int-width": "32",
"target-endian": "little",
"target-pointer-width": "32"
}
"#;
const TARGET: &'static str = "thumbv6m-specification_changed-eabi";
let project = Project::new(TARGET)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
write(
&project.td.path().join("thumbv6m-specification_changed-eabi.json"),
false,
JSON,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// The sysroot should NOT be rebuilt if the target specification didn't really
/// changed, e.g. some fields were moved around
#[cfg(feature = "dev")]
#[test]
fn unchanged_specification() {
fn run() -> Result<()> {
const JSON: &'static str = r#"
{
"arch": "arm",
"data-layout": "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64",
"linker-flavor": "gcc",
"llvm-target": "thumbv6m-none-eabi",
"os": "none",
"max-atomic-width": 0,
"target-c-int-width": "32",
"target-endian": "little",
"target-pointer-width": "32"
}
"#;
const TARGET: &'static str = "thumbv6m-unchanged_specification-eabi";
let project = Project::new(TARGET)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
write(
&project.td.path().join("thumbv6m-unchanged_specification-eabi.json"),
false,
JSON,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(!sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// Check that a sysroot is built for the host
#[cfg(feature = "dev")]
#[test]
fn host_once() {
fn run() -> Result<()> {
let target = host();
let project = HProject::new(false)?;
let stderr = project.build_and_get_stderr()?;
assert!(sysroot_was_built(&stderr, &target));
Ok(())
}
run!()
}
/// Check that the sysroot is not rebuilt when `xargo build` is called a second
/// time
#[cfg(feature = "dev")]
#[test]
fn host_twice() {
fn run() -> Result<()> {
let target = host();
let project = HProject::new(false)?;
let stderr = project.build_and_get_stderr()?;
assert!(sysroot_was_built(&stderr, &target));
let stderr = project.build_and_get_stderr()?;
assert!(!sysroot_was_built(&stderr, &target));
Ok(())
}
run!()
}
/// Check multi stage sysroot builds with `xargo test`
// We avoid Windows here just because it would be tricky to modify the rust-src
// component (cf. #36501) from within the appveyor environment
#[cfg(feature = "dev")]
#[cfg(not(windows))]
#[test]
fn test() {
fn run() -> Result<()> {
let project = HProject::new(true)?;
project.xargo_toml(
"
[dependencies.std]
features = [\"panic_unwind\"]
[dependencies.test]
stage = 1
",
)?;
xargo()?.arg("test").current_dir(project.td.path()).run()?;
Ok(())
}
run!()
}
/// Check multi stage sysroot builds with `xargo test`
#[cfg(feature = "dev")]
#[test]
fn alloc() {
fn run() -> Result<()> {
let project = HProject::new(false)?;
project.xargo_toml(
"
[dependencies.core]
stage = 0
[dependencies.alloc]
stage = 1
",
)?;
xargo()?.arg("build").current_dir(project.td.path()).run()?;
Ok(())
}
run!()
}
/// Test having a `[patch]` section
#[cfg(feature = "dev")]
#[test]
fn host_patch() {
fn run() -> Result<()> {
let project = HProject::new(false)?;
project.xargo_toml(
r#"
[dependencies.std]
features = ["panic_unwind"]
[patch.crates-io.cc]
git = "https://github.com/alexcrichton/cc-rs"
"#,
)?;
let stderr = project.build_and_get_stderr()?;
assert!(stderr
.lines()
.any(|line| line.contains("Compiling cc ")
&& line.contains("https://github.com/alexcrichton/cc-rs")));
Ok(())
}
run!()
}
try to fix cargo output parsing
#![cfg_attr(not(feature = "dev"), allow(dead_code))]
#![deny(warnings)]
#![feature(const_fn)]
#[macro_use]
extern crate error_chain;
#[macro_use]
extern crate lazy_static;
extern crate parking_lot;
extern crate rustc_version;
extern crate tempdir;
extern crate dirs;
use std::fs::OpenOptions;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::process::Command;
use std::{env, fs};
use parking_lot::{Mutex, MutexGuard};
use tempdir::TempDir;
use errors::*;
mod errors {
#![allow(unused_doc_comments)]
error_chain!();
}
macro_rules! run {
() => {
if let Err(e) = run() {
panic!("{}", e)
}
}
}
// Returns Xargo's "home"
fn home() -> Result<PathBuf> {
if let Some(h) = env::var_os("XARGO_HOME") {
Ok(PathBuf::from(h))
} else {
Ok(
dirs::home_dir()
.ok_or_else(|| "couldn't find your home directory. Is $HOME set?")?
.join(".xargo"),
)
}
}
fn cleanup(target: &str) -> Result<()> {
let p = home()?.join("lib/rustlib").join(target);
if p.exists() {
fs::remove_dir_all(&p).chain_err(|| format!("couldn't clean sysroot for {}", target))
} else {
Ok(())
}
}
fn exists(krate: &str, target: &str) -> Result<bool> {
let p = home()?.join("lib/rustlib").join(target).join("lib");
for e in fs::read_dir(&p).chain_err(|| format!("couldn't read the directory {}", p.display()))?
{
let e = e.chain_err(|| {
format!(
"error reading the contents of the directory {}",
p.display()
)
})?;
if e.file_name().to_string_lossy().contains(krate) {
return Ok(true);
}
}
Ok(false)
}
fn host() -> String {
rustc_version::version_meta().host
}
fn mkdir(path: &Path) -> Result<()> {
fs::create_dir(path).chain_err(|| {
format!("couldn't create the directory {}", path.display())
})
}
fn sysroot_was_built(stderr: &str, target: &str) -> bool {
stderr.lines().filter(|l| l.starts_with("+")).any(|l| {
l.contains("cargo") && l.contains("build") && l.contains("--target") && l.contains(target)
&& l.contains("-p") && l.contains("core")
})
}
fn write(path: &Path, append: bool, contents: &str) -> Result<()> {
let p = path.display();
let mut opts = OpenOptions::new();
if append {
opts.append(true);
} else {
opts.create(true);
opts.truncate(true);
}
opts.write(true)
.open(path)
.chain_err(|| format!("couldn't open {}", p))?
.write_all(contents.as_bytes())
.chain_err(|| format!("couldn't write to {}", p))
}
fn xargo() -> Result<Command> {
let mut p = env::current_exe().chain_err(|| "couldn't get path to current executable")?;
p.pop();
p.pop();
p.push("xargo");
Ok(Command::new(p))
}
trait CommandExt {
fn run(&mut self) -> Result<()>;
fn run_and_get_stderr(&mut self) -> Result<String>;
}
impl CommandExt for Command {
fn run(&mut self) -> Result<()> {
let status = self.status()
.chain_err(|| format!("couldn't execute `{:?}`", self))?;
if status.success() {
Ok(())
} else {
Err(format!(
"`{:?}` failed with exit code: {:?}",
self,
status.code()
))?
}
}
fn run_and_get_stderr(&mut self) -> Result<String> {
let out = self.output()
.chain_err(|| format!("couldn't execute `{:?}`", self))?;
let stderr = String::from_utf8(out.stderr)
.chain_err(|| format!("`{:?}` output was not UTF-8", self));
if out.status.success() {
stderr
} else {
match stderr {
Ok(e) => print!("{}", e),
Err(e) => print!("{}", e),
}
Err(format!(
"`{:?}` failed with exit code: {:?}",
self,
out.status.code()
))?
}
}
}
struct Project {
name: &'static str,
td: TempDir,
}
impl Project {
fn new(name: &'static str) -> Result<Self> {
const JSON: &'static str = r#"
{
"arch": "arm",
"data-layout": "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64",
"linker-flavor": "gcc",
"llvm-target": "thumbv6m-none-eabi",
"max-atomic-width": 0,
"os": "none",
"target-c-int-width": "32",
"target-endian": "little",
"target-pointer-width": "32"
}
"#;
let td = TempDir::new("xargo").chain_err(|| "couldn't create a temporary directory")?;
xargo()?
.args(&["init", "--lib", "--vcs", "none", "--name", name])
.current_dir(td.path())
.run()?;
write(&td.path().join("src/lib.rs"), false, "#![no_std]")?;
write(&td.path().join(format!("{}.json", name)), false, JSON)?;
Ok(Project { name: name, td: td })
}
/// Calls `xargo build`
fn build(&self, target: &str) -> Result<()> {
xargo()?
.args(&["build", "--target", target])
.current_dir(self.td.path())
.run()
}
/// Calls `xargo build` and collects STDERR
fn build_and_get_stderr(&self, target: Option<&str>) -> Result<String> {
let mut cmd = xargo()?;
cmd.arg("build");
if let Some(target) = target {
cmd.args(&["--target", target]);
}
cmd.arg("-v")
.current_dir(self.td.path())
.run_and_get_stderr()
}
/// Appends a string to the project `Cargo.toml`
fn cargo_toml(&self, contents: &str) -> Result<()> {
write(&self.td.path().join("Cargo.toml"), true, contents)
}
/// Adds a `.cargo/config` to the project
fn config(&self, contents: &str) -> Result<()> {
mkdir(&self.td.path().join(".cargo"))?;
write(&self.td.path().join(".cargo/config"), false, contents)
}
/// Calls `xargo doc`
fn doc(&self, target: &str) -> Result<()> {
xargo()?
.args(&["doc", "--target", target])
.current_dir(self.td.path())
.run()
}
/// Adds a `Xargo.toml` to the project
fn xargo_toml(&self, toml: &str) -> Result<()> {
write(&self.td.path().join("Xargo.toml"), false, toml)
}
}
impl Drop for Project {
fn drop(&mut self) {
cleanup(self.name).unwrap()
}
}
fn hcleanup(triple: &str) -> Result<()> {
let p = home()?.join("HOST/lib/rustlib").join(triple);
if p.exists() {
fs::remove_dir_all(&p).chain_err(|| format!("couldn't clean sysroot for {}", triple))
} else {
Ok(())
}
}
struct HProject {
_guard: MutexGuard<'static, ()>,
host: String,
td: TempDir,
}
impl HProject {
fn new(test: bool) -> Result<Self> {
// There can only be one instance of this type at any point in time
lazy_static! {
static ref ONCE: Mutex<()> = Mutex::new(());
}
let guard = ONCE.lock();
let td = TempDir::new("xargo").chain_err(|| "couldn't create a temporary directory")?;
xargo()?
.args(&["init", "--lib", "--vcs", "none", "--name", "host"])
.current_dir(td.path())
.run()?;
if test {
write(
&td.path().join("src/lib.rs"),
false,
"fn _f(_: Vec<std::fs::File>) {}",
)?;
} else {
write(&td.path().join("src/lib.rs"), false, "#![no_std]")?;
}
Ok(HProject {
_guard: guard,
host: host(),
td: td,
})
}
/// Calls `xargo build` and collects STDERR
fn build_and_get_stderr(&self) -> Result<String> {
let mut cmd = xargo()?;
cmd.arg("build");
cmd.arg("-v")
.current_dir(self.td.path())
.run_and_get_stderr()
}
/// Adds a `Xargo.toml` to the project
fn xargo_toml(&self, toml: &str) -> Result<()> {
write(&self.td.path().join("Xargo.toml"), false, toml)
}
}
impl Drop for HProject {
fn drop(&mut self) {
hcleanup(&self.host).unwrap()
}
}
/// Test vanilla `xargo build`
#[cfg(feature = "dev")]
#[test]
fn simple() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-simple-eabi";
let project = Project::new(TARGET)?;
project.build(TARGET)?;
assert!(exists("core", TARGET)?);
Ok(())
}
run!()
}
/// Test building a dependency specified as `target.{}.dependencies` in
/// Xargo.toml
#[cfg(feature = "dev")]
#[test]
fn target_dependencies() {
fn run() -> Result<()> {
// need this exact target name to get the right gcc flags
const TARGET: &'static str = "thumbv7m-none-eabi";
let project = Project::new(TARGET)?;
project.xargo_toml(
r#"
[target.thumbv7m-none-eabi.dependencies.alloc]
"#,
)?;
project.build(TARGET)?;
assert!(exists("core", TARGET)?);
assert!(exists("alloc", TARGET)?);
Ok(())
}
run!()
}
/// Test building a dependency specified as `dependencies` in Xargo.toml
#[cfg(feature = "dev")]
#[test]
fn dependencies() {
fn run() -> Result<()> {
// need this exact target name to get the right gcc flags
const TARGET: &'static str = "thumbv6m-none-eabi";
let project = Project::new(TARGET)?;
project.xargo_toml(
r#"
[dependencies.alloc]
"#,
)?;
project.build(TARGET)?;
assert!(exists("core", TARGET)?);
assert!(exists("alloc", TARGET)?);
Ok(())
}
run!()
}
/// Test `xargo doc`
#[cfg(feature = "dev")]
#[test]
fn doc() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-doc-eabi";
let project = Project::new(TARGET)?;
project.doc(TARGET)?;
assert!(exists("core", TARGET)?);
Ok(())
}
run!()
}
/// Check that calling `xargo build` a second time doesn't rebuild the sysroot
#[cfg(feature = "dev")]
#[test]
fn twice() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-twice-eabi";
let project = Project::new(TARGET)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(!sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// Check that if `build.target` is set in `.cargo/config`, that target will be
/// used to build the sysroot
#[cfg(feature = "dev")]
#[test]
fn build_target() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-build_target-eabi";
let project = Project::new(TARGET)?;
project.config(
r#"
[build]
target = "thumbv6m-build_target-eabi"
"#,
)?;
let stderr = project.build_and_get_stderr(None)?;
assert!(sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// Check that `--target` overrides `build.target`
#[cfg(feature = "dev")]
#[test]
fn override_build_target() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-override_build_target-eabi";
let project = Project::new(TARGET)?;
project.config(
r#"
[build]
target = "BAD"
"#,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// We shouldn't rebuild the sysroot if `profile.release.lto` changed
#[cfg(feature = "dev")]
#[test]
fn lto_changed() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-lto_changed-eabi";
let project = Project::new(TARGET)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
project.cargo_toml(
r#"
[profile.release]
lto = true
"#,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(!sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// Modifying RUSTFLAGS should trigger a rebuild of the sysroot
#[cfg(feature = "dev")]
#[test]
fn rustflags_changed() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-rustflags_changed-eabi";
let project = Project::new(TARGET)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
project.config(
r#"
[build]
rustflags = ["--cfg", "xargo"]
"#,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// Check that RUSTFLAGS are passed to all `rustc`s
#[cfg(feature = "dev")]
#[test]
fn rustflags() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-rustflags-eabi";
let project = Project::new(TARGET)?;
project.config(
r#"
[build]
rustflags = ["--cfg", "xargo"]
"#,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(
stderr
.lines()
.filter(|l| !l.starts_with("+") && l.contains("rustc") && !l.contains("rustc-std-workspace"))
.all(|l| l.contains("--cfg") && l.contains("xargo")),
"unexpected stderr:\n{}", stderr
);
Ok(())
}
run!()
}
/// Check that `-C panic=abort` is passed to `rustc` when `panic = "abort"` is
/// set in `profile.release`
#[cfg(not(feature = "dev"))]
#[test]
fn panic_abort() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-panic_abort-eabi";
let project = Project::new(TARGET)?;
project.cargo_toml(
r#"
[profile.release]
panic = "abort"
"#,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(
stderr
.lines()
.filter(|l| !l.starts_with("+") && l.contains("--release"))
.all(|l| l.contains("-C") && l.contains("panic=abort"))
);
Ok(())
}
run!()
}
/// Check that adding linker arguments doesn't trigger a sysroot rebuild
#[cfg(feature = "dev")]
#[test]
fn link_arg() {
fn run() -> Result<()> {
const TARGET: &'static str = "thumbv6m-link_arg-eabi";
let project = Project::new(TARGET)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
project.config(
r#"
[target.__link_arg]
rustflags = ["-C", "link-arg=-lfoo"]
"#,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(!sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// The sysroot should be rebuilt if the target specification changed
#[cfg(feature = "dev")]
#[test]
fn specification_changed() {
fn run() -> Result<()> {
const JSON: &'static str = r#"
{
"arch": "arm",
"data-layout": "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64",
"linker-flavor": "gcc",
"llvm-target": "thumbv6m-none-eabi",
"max-atomic-width": 0,
"os": "none",
"panic-strategy": "abort",
"target-c-int-width": "32",
"target-endian": "little",
"target-pointer-width": "32"
}
"#;
const TARGET: &'static str = "thumbv6m-specification_changed-eabi";
let project = Project::new(TARGET)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
write(
&project.td.path().join("thumbv6m-specification_changed-eabi.json"),
false,
JSON,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// The sysroot should NOT be rebuilt if the target specification didn't really
/// changed, e.g. some fields were moved around
#[cfg(feature = "dev")]
#[test]
fn unchanged_specification() {
fn run() -> Result<()> {
const JSON: &'static str = r#"
{
"arch": "arm",
"data-layout": "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64",
"linker-flavor": "gcc",
"llvm-target": "thumbv6m-none-eabi",
"os": "none",
"max-atomic-width": 0,
"target-c-int-width": "32",
"target-endian": "little",
"target-pointer-width": "32"
}
"#;
const TARGET: &'static str = "thumbv6m-unchanged_specification-eabi";
let project = Project::new(TARGET)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(sysroot_was_built(&stderr, TARGET));
write(
&project.td.path().join("thumbv6m-unchanged_specification-eabi.json"),
false,
JSON,
)?;
let stderr = project.build_and_get_stderr(Some(TARGET))?;
assert!(!sysroot_was_built(&stderr, TARGET));
Ok(())
}
run!()
}
/// Check that a sysroot is built for the host
#[cfg(feature = "dev")]
#[test]
fn host_once() {
fn run() -> Result<()> {
let target = host();
let project = HProject::new(false)?;
let stderr = project.build_and_get_stderr()?;
assert!(sysroot_was_built(&stderr, &target));
Ok(())
}
run!()
}
/// Check that the sysroot is not rebuilt when `xargo build` is called a second
/// time
#[cfg(feature = "dev")]
#[test]
fn host_twice() {
fn run() -> Result<()> {
let target = host();
let project = HProject::new(false)?;
let stderr = project.build_and_get_stderr()?;
assert!(sysroot_was_built(&stderr, &target));
let stderr = project.build_and_get_stderr()?;
assert!(!sysroot_was_built(&stderr, &target));
Ok(())
}
run!()
}
/// Check multi stage sysroot builds with `xargo test`
// We avoid Windows here just because it would be tricky to modify the rust-src
// component (cf. #36501) from within the appveyor environment
#[cfg(feature = "dev")]
#[cfg(not(windows))]
#[test]
fn test() {
fn run() -> Result<()> {
let project = HProject::new(true)?;
project.xargo_toml(
"
[dependencies.std]
features = [\"panic_unwind\"]
[dependencies.test]
stage = 1
",
)?;
xargo()?.arg("test").current_dir(project.td.path()).run()?;
Ok(())
}
run!()
}
/// Check multi stage sysroot builds with `xargo test`
#[cfg(feature = "dev")]
#[test]
fn alloc() {
fn run() -> Result<()> {
let project = HProject::new(false)?;
project.xargo_toml(
"
[dependencies.core]
stage = 0
[dependencies.alloc]
stage = 1
",
)?;
xargo()?.arg("build").current_dir(project.td.path()).run()?;
Ok(())
}
run!()
}
/// Test having a `[patch]` section
#[cfg(feature = "dev")]
#[test]
fn host_patch() {
fn run() -> Result<()> {
let project = HProject::new(false)?;
project.xargo_toml(
r#"
[dependencies.std]
features = ["panic_unwind"]
[patch.crates-io.cc]
git = "https://github.com/alexcrichton/cc-rs"
"#,
)?;
let stderr = project.build_and_get_stderr()?;
assert!(stderr
.lines()
.any(|line| line.contains("Compiling cc ")
&& line.contains("https://github.com/alexcrichton/cc-rs")));
Ok(())
}
run!()
}
|
extern crate tempdir;
use std::env;
use std::fs::{self, File, OpenOptions};
use std::process::Command;
use std::io::Write;
use tempdir::TempDir;
macro_rules! try {
($e:expr) => {
$e.unwrap_or_else(|e| panic!("{} with {}", stringify!($e), e))
}
}
const CRATES: &'static [&'static str] = &["alloc", "collections", "core", "rand", "rustc_unicode"];
const LIB_RS: &'static [u8] = b"#![no_std]";
const CUSTOM_JSON: &'static str = r#"
{
"arch": "arm",
"llvm-target": "thumbv7m-none-eabi",
"os": "none",
"target-endian": "little",
"target-pointer-width": "32"
}
"#;
fn xargo() -> Command {
let mut path = try!(env::current_exe());
path.pop();
path.push("xargo");
Command::new(path)
}
fn run(cmd: &mut Command) {
println!("running: {:?}", cmd);
let output = try!(cmd.output());
if !output.status.success() {
println!("--- stdout:\n{}", String::from_utf8_lossy(&output.stdout));
println!("--- stderr:\n{}", String::from_utf8_lossy(&output.stderr));
panic!("expected success, got: {}", output.status);
}
}
fn exists_rlib(krate: &str, target: &str) -> bool {
let home = env::home_dir().unwrap();
for entry in try!(fs::read_dir(home.join(format!(".xargo/lib/rustlib/{}/lib", target)))) {
let path = &try!(entry).path();
if path.is_file() && path.extension().and_then(|e| e.to_str()) == Some("rlib") &&
path.file_stem()
.and_then(|f| f.to_str())
.map(|s| s.starts_with(&format!("lib{}", krate))) == Some(true) {
return true;
}
}
false
}
fn cleanup(target: &str) {
try!(fs::remove_dir_all(env::home_dir()
.unwrap()
.join(format!(".xargo/lib/rustlib/{}", target))));
}
#[test]
fn simple() {
const TARGET: &'static str = "__simple";
let td = try!(TempDir::new("xargo"));
let td = &td.path();
try!(try!(File::create(td.join(format!("{}.json", TARGET)))).write_all(CUSTOM_JSON.as_bytes()));
run(xargo().args(&["init", "--vcs", "none", "--name", TARGET]).current_dir(td));
try!(try!(OpenOptions::new().truncate(true).write(true).open(td.join("src/lib.rs")))
.write_all(LIB_RS));
run(xargo().args(&["build", "--target", TARGET]).current_dir(td));
for krate in CRATES {
assert!(exists_rlib(krate, TARGET));
}
cleanup(TARGET);
}
// Check that `xargo build` builds a sysroot for the default target in .cargo/config
#[test]
fn cargo_config() {
const CONFIG: &'static str = "[build]\ntarget = '{}'";
const TARGET: &'static str = "__cargo_config";
let td = try!(TempDir::new("xargo"));
let td = &td.path();
try!(try!(File::create(td.join(format!("{}.json", TARGET)))).write_all(CUSTOM_JSON.as_bytes()));
run(xargo().args(&["init", "--vcs", "none", "--name", TARGET]).current_dir(td));
try!(try!(OpenOptions::new().truncate(true).write(true).open(td.join("src/lib.rs")))
.write_all(LIB_RS));
try!(fs::create_dir(td.join(".cargo")));
try!(try!(File::create(td.join(".cargo/config")))
.write_all(CONFIG.replace("{}", TARGET).as_bytes()));
run(xargo().arg("build").current_dir(td));
for krate in CRATES {
assert!(exists_rlib(krate, TARGET));
}
cleanup(TARGET);
}
// Check that `--targer foo` overrides the default target in .cargo/config
#[test]
fn override_cargo_config() {
const CONFIG: &'static [u8] = b"[build]\ntarget = 'dummy'";
const TARGET: &'static str = "__override_cargo_config";
let td = try!(TempDir::new("xargo"));
let td = &td.path();
try!(try!(File::create(td.join(format!("{}.json", TARGET)))).write_all(CUSTOM_JSON.as_bytes()));
run(xargo().args(&["init", "--vcs", "none", "--name", TARGET]).current_dir(td));
try!(try!(OpenOptions::new().truncate(true).write(true).open(td.join("src/lib.rs")))
.write_all(LIB_RS));
try!(fs::create_dir(td.join(".cargo")));
try!(try!(File::create(td.join(".cargo/config"))).write_all(CONFIG));
run(xargo().args(&["build", "--target", TARGET]).current_dir(td));
for krate in CRATES {
assert!(exists_rlib(krate, TARGET));
}
cleanup(TARGET);
}
add data-layout field to target specification file
extern crate tempdir;
use std::env;
use std::fs::{self, File, OpenOptions};
use std::process::Command;
use std::io::Write;
use tempdir::TempDir;
macro_rules! try {
($e:expr) => {
$e.unwrap_or_else(|e| panic!("{} with {}", stringify!($e), e))
}
}
const CRATES: &'static [&'static str] = &["alloc", "collections", "core", "rand", "rustc_unicode"];
const LIB_RS: &'static [u8] = b"#![no_std]";
const CUSTOM_JSON: &'static str = r#"
{
"arch": "arm",
"data-layout": "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64",
"llvm-target": "thumbv7m-none-eabi",
"os": "none",
"target-endian": "little",
"target-pointer-width": "32"
}
"#;
fn xargo() -> Command {
let mut path = try!(env::current_exe());
path.pop();
path.push("xargo");
Command::new(path)
}
fn run(cmd: &mut Command) {
println!("running: {:?}", cmd);
let output = try!(cmd.output());
if !output.status.success() {
println!("--- stdout:\n{}", String::from_utf8_lossy(&output.stdout));
println!("--- stderr:\n{}", String::from_utf8_lossy(&output.stderr));
panic!("expected success, got: {}", output.status);
}
}
fn exists_rlib(krate: &str, target: &str) -> bool {
let home = env::home_dir().unwrap();
for entry in try!(fs::read_dir(home.join(format!(".xargo/lib/rustlib/{}/lib", target)))) {
let path = &try!(entry).path();
if path.is_file() && path.extension().and_then(|e| e.to_str()) == Some("rlib") &&
path.file_stem()
.and_then(|f| f.to_str())
.map(|s| s.starts_with(&format!("lib{}", krate))) == Some(true) {
return true;
}
}
false
}
fn cleanup(target: &str) {
try!(fs::remove_dir_all(env::home_dir()
.unwrap()
.join(format!(".xargo/lib/rustlib/{}", target))));
}
#[test]
fn simple() {
const TARGET: &'static str = "__simple";
let td = try!(TempDir::new("xargo"));
let td = &td.path();
try!(try!(File::create(td.join(format!("{}.json", TARGET)))).write_all(CUSTOM_JSON.as_bytes()));
run(xargo().args(&["init", "--vcs", "none", "--name", TARGET]).current_dir(td));
try!(try!(OpenOptions::new().truncate(true).write(true).open(td.join("src/lib.rs")))
.write_all(LIB_RS));
run(xargo().args(&["build", "--target", TARGET]).current_dir(td));
for krate in CRATES {
assert!(exists_rlib(krate, TARGET));
}
cleanup(TARGET);
}
// Check that `xargo build` builds a sysroot for the default target in .cargo/config
#[test]
fn cargo_config() {
const CONFIG: &'static str = "[build]\ntarget = '{}'";
const TARGET: &'static str = "__cargo_config";
let td = try!(TempDir::new("xargo"));
let td = &td.path();
try!(try!(File::create(td.join(format!("{}.json", TARGET)))).write_all(CUSTOM_JSON.as_bytes()));
run(xargo().args(&["init", "--vcs", "none", "--name", TARGET]).current_dir(td));
try!(try!(OpenOptions::new().truncate(true).write(true).open(td.join("src/lib.rs")))
.write_all(LIB_RS));
try!(fs::create_dir(td.join(".cargo")));
try!(try!(File::create(td.join(".cargo/config")))
.write_all(CONFIG.replace("{}", TARGET).as_bytes()));
run(xargo().arg("build").current_dir(td));
for krate in CRATES {
assert!(exists_rlib(krate, TARGET));
}
cleanup(TARGET);
}
// Check that `--targer foo` overrides the default target in .cargo/config
#[test]
fn override_cargo_config() {
const CONFIG: &'static [u8] = b"[build]\ntarget = 'dummy'";
const TARGET: &'static str = "__override_cargo_config";
let td = try!(TempDir::new("xargo"));
let td = &td.path();
try!(try!(File::create(td.join(format!("{}.json", TARGET)))).write_all(CUSTOM_JSON.as_bytes()));
run(xargo().args(&["init", "--vcs", "none", "--name", TARGET]).current_dir(td));
try!(try!(OpenOptions::new().truncate(true).write(true).open(td.join("src/lib.rs")))
.write_all(LIB_RS));
try!(fs::create_dir(td.join(".cargo")));
try!(try!(File::create(td.join(".cargo/config"))).write_all(CONFIG));
run(xargo().args(&["build", "--target", TARGET]).current_dir(td));
for krate in CRATES {
assert!(exists_rlib(krate, TARGET));
}
cleanup(TARGET);
}
|
extern crate futures;
#[macro_use]
extern crate tokio_core;
extern crate tokio_process;
#[macro_use]
extern crate log;
extern crate env_logger;
use std::io;
use std::process::{Stdio, ExitStatus, Command};
use futures::{Future, BoxFuture};
use futures::stream::{self, Stream};
use tokio_core::io::{read_until, write_all, read_to_end};
use tokio_core::reactor::Core;
use tokio_process::{CommandExt, Child};
mod support;
fn cat() -> Command {
let mut cmd = support::cmd("cat");
cmd.stdin(Stdio::piped())
.stdout(Stdio::piped());
cmd
}
fn feed_cat(mut cat: Child, n: usize) -> BoxFuture<ExitStatus, io::Error> {
let stdin = cat.stdin().take().unwrap();
let stdout = cat.stdout().take().unwrap();
debug!("starting to feed");
// Produce n lines on the child's stdout.
let numbers = stream::iter((0..n).into_iter().map(Ok));
let write = numbers.fold(stdin, |stdin, i| {
debug!("sending line {} to child", i);
write_all(stdin, format!("line {}\n", i).into_bytes()).map(|p| p.0)
}).map(|_| ());
// Try to read `n + 1` lines, ensuring the last one is empty
// (i.e. EOF is reached after `n` lines.
let reader = io::BufReader::new(stdout);
let expected_numbers = stream::iter((0..n + 1).map(Ok));
let read = expected_numbers.fold((reader, 0), move |(reader, i), _| {
let done = i >= n;
debug!("starting read from child");
read_until(reader, b'\n', Vec::new()).and_then(move |(reader, vec)| {
debug!("read line {} from child ({} bytes, done: {})",
i, vec.len(), done);
match (done, vec.len()) {
(false, 0) => {
Err(io::Error::new(io::ErrorKind::BrokenPipe, "broken pipe"))
},
(true, n) if n != 0 => {
Err(io::Error::new(io::ErrorKind::Other, "extraneous data"))
},
_ => {
let s = std::str::from_utf8(&vec).unwrap();
let expected = format!("line {}\n", i);
if done || s == expected {
Ok((reader, i + 1))
} else {
Err(io::Error::new(io::ErrorKind::Other, "unexpected data"))
}
}
}
})
});
// Compose reading and writing concurrently.
write.join(read).and_then(|_| cat).boxed()
}
#[test]
/// Check for the following properties when feeding stdin and
/// consuming stdout of a cat-like process:
///
/// - A number of lines that amounts to a number of bytes exceeding a
/// typical OS buffer size can be fed to the child without
/// deadlock. This tests that we also consume the stdout
/// concurrently; otherwise this would deadlock.
///
/// - We read the same lines from the child that we fed it.
///
/// - The child does produce EOF on stdout after the last line.
fn feed_a_lot() {
support::init();
let mut lp = Core::new().unwrap();
let child = cat().spawn_async(&lp.handle()).unwrap();
let status = lp.run(feed_cat(child, 10000)).unwrap();
assert_eq!(status.code(), Some(0));
}
#[test]
fn drop_kills() {
support::init();
let mut lp = Core::new().unwrap();
let mut child = cat().spawn_async(&lp.handle()).unwrap();
let stdin = child.stdin().take().unwrap();
let stdout = child.stdout().take().unwrap();
drop(child);
let (_, output) = lp.run(read_to_end(stdout, Vec::new())).unwrap();
assert_eq!(output.len(), 0);
let err = lp.run(write_all(stdin, b"1234")).err().unwrap();
assert_eq!(err.kind(), io::ErrorKind::BrokenPipe);
}
#[test]
fn wait_with_output_captures() {
support::init();
let mut core = Core::new().unwrap();
let mut child = cat().spawn_async(&core.handle()).unwrap();
let stdin = child.stdin().take().unwrap();
let out = child.wait_with_output();
let ret = core.run(write_all(stdin, b"1234").map(|p| p.1).join(out)).unwrap();
let (written, output) = ret;
assert!(output.status.success());
assert_eq!(output.stdout, written);
assert_eq!(output.stderr.len(), 0);
}
process: Tweak drop_kills test
extern crate futures;
#[macro_use]
extern crate tokio_core;
extern crate tokio_process;
#[macro_use]
extern crate log;
extern crate env_logger;
use std::io;
use std::process::{Stdio, ExitStatus, Command};
use futures::{Future, BoxFuture};
use futures::stream::{self, Stream};
use tokio_core::io::{read_until, write_all, read_to_end};
use tokio_core::reactor::Core;
use tokio_process::{CommandExt, Child};
mod support;
fn cat() -> Command {
let mut cmd = support::cmd("cat");
cmd.stdin(Stdio::piped())
.stdout(Stdio::piped());
cmd
}
fn feed_cat(mut cat: Child, n: usize) -> BoxFuture<ExitStatus, io::Error> {
let stdin = cat.stdin().take().unwrap();
let stdout = cat.stdout().take().unwrap();
debug!("starting to feed");
// Produce n lines on the child's stdout.
let numbers = stream::iter((0..n).into_iter().map(Ok));
let write = numbers.fold(stdin, |stdin, i| {
debug!("sending line {} to child", i);
write_all(stdin, format!("line {}\n", i).into_bytes()).map(|p| p.0)
}).map(|_| ());
// Try to read `n + 1` lines, ensuring the last one is empty
// (i.e. EOF is reached after `n` lines.
let reader = io::BufReader::new(stdout);
let expected_numbers = stream::iter((0..n + 1).map(Ok));
let read = expected_numbers.fold((reader, 0), move |(reader, i), _| {
let done = i >= n;
debug!("starting read from child");
read_until(reader, b'\n', Vec::new()).and_then(move |(reader, vec)| {
debug!("read line {} from child ({} bytes, done: {})",
i, vec.len(), done);
match (done, vec.len()) {
(false, 0) => {
Err(io::Error::new(io::ErrorKind::BrokenPipe, "broken pipe"))
},
(true, n) if n != 0 => {
Err(io::Error::new(io::ErrorKind::Other, "extraneous data"))
},
_ => {
let s = std::str::from_utf8(&vec).unwrap();
let expected = format!("line {}\n", i);
if done || s == expected {
Ok((reader, i + 1))
} else {
Err(io::Error::new(io::ErrorKind::Other, "unexpected data"))
}
}
}
})
});
// Compose reading and writing concurrently.
write.join(read).and_then(|_| cat).boxed()
}
#[test]
/// Check for the following properties when feeding stdin and
/// consuming stdout of a cat-like process:
///
/// - A number of lines that amounts to a number of bytes exceeding a
/// typical OS buffer size can be fed to the child without
/// deadlock. This tests that we also consume the stdout
/// concurrently; otherwise this would deadlock.
///
/// - We read the same lines from the child that we fed it.
///
/// - The child does produce EOF on stdout after the last line.
fn feed_a_lot() {
support::init();
let mut lp = Core::new().unwrap();
let child = cat().spawn_async(&lp.handle()).unwrap();
let status = lp.run(feed_cat(child, 10000)).unwrap();
assert_eq!(status.code(), Some(0));
}
#[test]
fn drop_kills() {
support::init();
let mut lp = Core::new().unwrap();
let mut child = cat().spawn_async(&lp.handle()).unwrap();
let stdin = child.stdin().take().unwrap();
let stdout = child.stdout().take().unwrap();
drop(child);
drop(lp.run(write_all(stdin, b"1234")));
let (_, output) = lp.run(read_to_end(stdout, Vec::new())).unwrap();
assert_eq!(output.len(), 0);
}
#[test]
fn wait_with_output_captures() {
support::init();
let mut core = Core::new().unwrap();
let mut child = cat().spawn_async(&core.handle()).unwrap();
let stdin = child.stdin().take().unwrap();
let out = child.wait_with_output();
let ret = core.run(write_all(stdin, b"1234").map(|p| p.1).join(out)).unwrap();
let (written, output) = ret;
assert!(output.status.success());
assert_eq!(output.stdout, written);
assert_eq!(output.stderr.len(), 0);
}
|
//! A module for working with borrowed data.
#![stable(feature = "rust1", since = "1.0.0")]
/// A trait for borrowing data.
///
/// In Rust, it is common to provide different representations of a type for
/// different use cases. For instance, storage location and management for a
/// value can be specifically chosen as appropriate for a particular use via
/// pointer types such as [`Box<T>`] or [`Rc<T>`]. Beyond these generic
/// wrappers that can be used with any type, some types provide optional
/// facets providing potentially costly functionality. An example for such a
/// type is [`String`] which adds the ability to extend a string to the basic
/// [`str`]. This requires keeping additional information unnecessary for a
/// simple, immutable string.
///
/// These types provide access to the underlying data through references
/// to the type of that data. They are said to be ‘borrowed as’ that type.
/// For instance, a [`Box<T>`] can be borrowed as `T` while a [`String`]
/// can be borrowed as `str`.
///
/// Types express that they can be borrowed as some type `T` by implementing
/// `Borrow<T>`, providing a reference to a `T` in the trait’s
/// [`borrow`] method. A type is free to borrow as several different types.
/// If it wishes to mutably borrow as the type – allowing the underlying data
/// to be modified, it can additionally implement [`BorrowMut<T>`].
///
/// Further, when providing implementations for additional traits, it needs
/// to be considered whether they should behave identical to those of the
/// underlying type as a consequence of acting as a representation of that
/// underlying type. Generic code typically uses `Borrow<T>` when it relies
/// on the identical behavior of these additional trait implementations.
/// These traits will likely appear as additional trait bounds.
///
/// In particular `Eq`, `Ord` and `Hash` must be equivalent for
/// borrowed and owned values: `x.borrow() == y.borrow()` should give the
/// same result as `x == y`.
///
/// If generic code merely needs to work for all types that can
/// provide a reference to related type `T`, it is often better to use
/// [`AsRef<T>`] as more types can safely implement it.
///
/// [`AsRef<T>`]: crate::convert::AsRef
/// [`BorrowMut<T>`]: BorrowMut
/// [`Box<T>`]: ../../std/boxed/struct.Box.html
/// [`Mutex<T>`]: ../../std/sync/struct.Mutex.html
/// [`Rc<T>`]: ../../std/rc/struct.Rc.html
/// [`String`]: ../../std/string/struct.String.html
/// [`borrow`]: Borrow::borrow
///
/// # Examples
///
/// As a data collection, [`HashMap<K, V>`] owns both keys and values. If
/// the key’s actual data is wrapped in a managing type of some kind, it
/// should, however, still be possible to search for a value using a
/// reference to the key’s data. For instance, if the key is a string, then
/// it is likely stored with the hash map as a [`String`], while it should
/// be possible to search using a [`&str`][`str`]. Thus, `insert` needs to
/// operate on a `String` while `get` needs to be able to use a `&str`.
///
/// Slightly simplified, the relevant parts of `HashMap<K, V>` look like
/// this:
///
/// ```
/// use std::borrow::Borrow;
/// use std::hash::Hash;
///
/// pub struct HashMap<K, V> {
/// # marker: ::std::marker::PhantomData<(K, V)>,
/// // fields omitted
/// }
///
/// impl<K, V> HashMap<K, V> {
/// pub fn insert(&self, key: K, value: V) -> Option<V>
/// where K: Hash + Eq
/// {
/// # unimplemented!()
/// // ...
/// }
///
/// pub fn get<Q>(&self, k: &Q) -> Option<&V>
/// where
/// K: Borrow<Q>,
/// Q: Hash + Eq + ?Sized
/// {
/// # unimplemented!()
/// // ...
/// }
/// }
/// ```
///
/// The entire hash map is generic over a key type `K`. Because these keys
/// are stored with the hash map, this type has to own the key’s data.
/// When inserting a key-value pair, the map is given such a `K` and needs
/// to find the correct hash bucket and check if the key is already present
/// based on that `K`. It therefore requires `K: Hash + Eq`.
///
/// When searching for a value in the map, however, having to provide a
/// reference to a `K` as the key to search for would require to always
/// create such an owned value. For string keys, this would mean a `String`
/// value needs to be created just for the search for cases where only a
/// `str` is available.
///
/// Instead, the `get` method is generic over the type of the underlying key
/// data, called `Q` in the method signature above. It states that `K`
/// borrows as a `Q` by requiring that `K: Borrow<Q>`. By additionally
/// requiring `Q: Hash + Eq`, it signals the requirement that `K` and `Q`
/// have implementations of the `Hash` and `Eq` traits that produce identical
/// results.
///
/// The implementation of `get` relies in particular on identical
/// implementations of `Hash` by determining the key’s hash bucket by calling
/// `Hash::hash` on the `Q` value even though it inserted the key based on
/// the hash value calculated from the `K` value.
///
/// As a consequence, the hash map breaks if a `K` wrapping a `Q` value
/// produces a different hash than `Q`. For instance, imagine you have a
/// type that wraps a string but compares ASCII letters ignoring their case:
///
/// ```
/// pub struct CaseInsensitiveString(String);
///
/// impl PartialEq for CaseInsensitiveString {
/// fn eq(&self, other: &Self) -> bool {
/// self.0.eq_ignore_ascii_case(&other.0)
/// }
/// }
///
/// impl Eq for CaseInsensitiveString { }
/// ```
///
/// Because two equal values need to produce the same hash value, the
/// implementation of `Hash` needs to ignore ASCII case, too:
///
/// ```
/// # use std::hash::{Hash, Hasher};
/// # pub struct CaseInsensitiveString(String);
/// impl Hash for CaseInsensitiveString {
/// fn hash<H: Hasher>(&self, state: &mut H) {
/// for c in self.0.as_bytes() {
/// c.to_ascii_lowercase().hash(state)
/// }
/// }
/// }
/// ```
///
/// Can `CaseInsensitiveString` implement `Borrow<str>`? It certainly can
/// provide a reference to a string slice via its contained owned string.
/// But because its `Hash` implementation differs, it behaves differently
/// from `str` and therefore must not, in fact, implement `Borrow<str>`.
/// If it wants to allow others access to the underlying `str`, it can do
/// that via `AsRef<str>` which doesn’t carry any extra requirements.
///
/// [`Hash`]: crate::hash::Hash
/// [`HashMap<K, V>`]: ../../std/collections/struct.HashMap.html
/// [`String`]: ../../std/string/struct.String.html
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Borrow<Borrowed: ?Sized> {
/// Immutably borrows from an owned value.
///
/// # Examples
///
/// ```
/// use std::borrow::Borrow;
///
/// fn check<T: Borrow<str>>(s: T) {
/// assert_eq!("Hello", s.borrow());
/// }
///
/// let s = "Hello".to_string();
///
/// check(s);
///
/// let s = "Hello";
///
/// check(s);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn borrow(&self) -> &Borrowed;
}
/// A trait for mutably borrowing data.
///
/// As a companion to [`Borrow<T>`] this trait allows a type to borrow as
/// an underlying type by providing a mutable reference. See [`Borrow<T>`]
/// for more information on borrowing as another type.
///
/// [`Borrow<T>`]: Borrow
#[stable(feature = "rust1", since = "1.0.0")]
pub trait BorrowMut<Borrowed: ?Sized>: Borrow<Borrowed> {
/// Mutably borrows from an owned value.
///
/// # Examples
///
/// ```
/// use std::borrow::BorrowMut;
///
/// fn check<T: BorrowMut<[i32]>>(mut v: T) {
/// assert_eq!(&mut [1, 2, 3], v.borrow_mut());
/// }
///
/// let v = vec![1, 2, 3];
///
/// check(v);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn borrow_mut(&mut self) -> &mut Borrowed;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Borrow<T> for T {
fn borrow(&self) -> &T {
self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> BorrowMut<T> for T {
fn borrow_mut(&mut self) -> &mut T {
self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Borrow<T> for &T {
fn borrow(&self) -> &T {
&**self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Borrow<T> for &mut T {
fn borrow(&self) -> &T {
&**self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> BorrowMut<T> for &mut T {
fn borrow_mut(&mut self) -> &mut T {
&mut **self
}
}
Remove AsRef link as it is in the prelude
//! A module for working with borrowed data.
#![stable(feature = "rust1", since = "1.0.0")]
/// A trait for borrowing data.
///
/// In Rust, it is common to provide different representations of a type for
/// different use cases. For instance, storage location and management for a
/// value can be specifically chosen as appropriate for a particular use via
/// pointer types such as [`Box<T>`] or [`Rc<T>`]. Beyond these generic
/// wrappers that can be used with any type, some types provide optional
/// facets providing potentially costly functionality. An example for such a
/// type is [`String`] which adds the ability to extend a string to the basic
/// [`str`]. This requires keeping additional information unnecessary for a
/// simple, immutable string.
///
/// These types provide access to the underlying data through references
/// to the type of that data. They are said to be ‘borrowed as’ that type.
/// For instance, a [`Box<T>`] can be borrowed as `T` while a [`String`]
/// can be borrowed as `str`.
///
/// Types express that they can be borrowed as some type `T` by implementing
/// `Borrow<T>`, providing a reference to a `T` in the trait’s
/// [`borrow`] method. A type is free to borrow as several different types.
/// If it wishes to mutably borrow as the type – allowing the underlying data
/// to be modified, it can additionally implement [`BorrowMut<T>`].
///
/// Further, when providing implementations for additional traits, it needs
/// to be considered whether they should behave identical to those of the
/// underlying type as a consequence of acting as a representation of that
/// underlying type. Generic code typically uses `Borrow<T>` when it relies
/// on the identical behavior of these additional trait implementations.
/// These traits will likely appear as additional trait bounds.
///
/// In particular `Eq`, `Ord` and `Hash` must be equivalent for
/// borrowed and owned values: `x.borrow() == y.borrow()` should give the
/// same result as `x == y`.
///
/// If generic code merely needs to work for all types that can
/// provide a reference to related type `T`, it is often better to use
/// [`AsRef<T>`] as more types can safely implement it.
///
/// [`BorrowMut<T>`]: BorrowMut
/// [`Box<T>`]: ../../std/boxed/struct.Box.html
/// [`Mutex<T>`]: ../../std/sync/struct.Mutex.html
/// [`Rc<T>`]: ../../std/rc/struct.Rc.html
/// [`String`]: ../../std/string/struct.String.html
/// [`borrow`]: Borrow::borrow
///
/// # Examples
///
/// As a data collection, [`HashMap<K, V>`] owns both keys and values. If
/// the key’s actual data is wrapped in a managing type of some kind, it
/// should, however, still be possible to search for a value using a
/// reference to the key’s data. For instance, if the key is a string, then
/// it is likely stored with the hash map as a [`String`], while it should
/// be possible to search using a [`&str`][`str`]. Thus, `insert` needs to
/// operate on a `String` while `get` needs to be able to use a `&str`.
///
/// Slightly simplified, the relevant parts of `HashMap<K, V>` look like
/// this:
///
/// ```
/// use std::borrow::Borrow;
/// use std::hash::Hash;
///
/// pub struct HashMap<K, V> {
/// # marker: ::std::marker::PhantomData<(K, V)>,
/// // fields omitted
/// }
///
/// impl<K, V> HashMap<K, V> {
/// pub fn insert(&self, key: K, value: V) -> Option<V>
/// where K: Hash + Eq
/// {
/// # unimplemented!()
/// // ...
/// }
///
/// pub fn get<Q>(&self, k: &Q) -> Option<&V>
/// where
/// K: Borrow<Q>,
/// Q: Hash + Eq + ?Sized
/// {
/// # unimplemented!()
/// // ...
/// }
/// }
/// ```
///
/// The entire hash map is generic over a key type `K`. Because these keys
/// are stored with the hash map, this type has to own the key’s data.
/// When inserting a key-value pair, the map is given such a `K` and needs
/// to find the correct hash bucket and check if the key is already present
/// based on that `K`. It therefore requires `K: Hash + Eq`.
///
/// When searching for a value in the map, however, having to provide a
/// reference to a `K` as the key to search for would require to always
/// create such an owned value. For string keys, this would mean a `String`
/// value needs to be created just for the search for cases where only a
/// `str` is available.
///
/// Instead, the `get` method is generic over the type of the underlying key
/// data, called `Q` in the method signature above. It states that `K`
/// borrows as a `Q` by requiring that `K: Borrow<Q>`. By additionally
/// requiring `Q: Hash + Eq`, it signals the requirement that `K` and `Q`
/// have implementations of the `Hash` and `Eq` traits that produce identical
/// results.
///
/// The implementation of `get` relies in particular on identical
/// implementations of `Hash` by determining the key’s hash bucket by calling
/// `Hash::hash` on the `Q` value even though it inserted the key based on
/// the hash value calculated from the `K` value.
///
/// As a consequence, the hash map breaks if a `K` wrapping a `Q` value
/// produces a different hash than `Q`. For instance, imagine you have a
/// type that wraps a string but compares ASCII letters ignoring their case:
///
/// ```
/// pub struct CaseInsensitiveString(String);
///
/// impl PartialEq for CaseInsensitiveString {
/// fn eq(&self, other: &Self) -> bool {
/// self.0.eq_ignore_ascii_case(&other.0)
/// }
/// }
///
/// impl Eq for CaseInsensitiveString { }
/// ```
///
/// Because two equal values need to produce the same hash value, the
/// implementation of `Hash` needs to ignore ASCII case, too:
///
/// ```
/// # use std::hash::{Hash, Hasher};
/// # pub struct CaseInsensitiveString(String);
/// impl Hash for CaseInsensitiveString {
/// fn hash<H: Hasher>(&self, state: &mut H) {
/// for c in self.0.as_bytes() {
/// c.to_ascii_lowercase().hash(state)
/// }
/// }
/// }
/// ```
///
/// Can `CaseInsensitiveString` implement `Borrow<str>`? It certainly can
/// provide a reference to a string slice via its contained owned string.
/// But because its `Hash` implementation differs, it behaves differently
/// from `str` and therefore must not, in fact, implement `Borrow<str>`.
/// If it wants to allow others access to the underlying `str`, it can do
/// that via `AsRef<str>` which doesn’t carry any extra requirements.
///
/// [`Hash`]: crate::hash::Hash
/// [`HashMap<K, V>`]: ../../std/collections/struct.HashMap.html
/// [`String`]: ../../std/string/struct.String.html
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Borrow<Borrowed: ?Sized> {
/// Immutably borrows from an owned value.
///
/// # Examples
///
/// ```
/// use std::borrow::Borrow;
///
/// fn check<T: Borrow<str>>(s: T) {
/// assert_eq!("Hello", s.borrow());
/// }
///
/// let s = "Hello".to_string();
///
/// check(s);
///
/// let s = "Hello";
///
/// check(s);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn borrow(&self) -> &Borrowed;
}
/// A trait for mutably borrowing data.
///
/// As a companion to [`Borrow<T>`] this trait allows a type to borrow as
/// an underlying type by providing a mutable reference. See [`Borrow<T>`]
/// for more information on borrowing as another type.
///
/// [`Borrow<T>`]: Borrow
#[stable(feature = "rust1", since = "1.0.0")]
pub trait BorrowMut<Borrowed: ?Sized>: Borrow<Borrowed> {
/// Mutably borrows from an owned value.
///
/// # Examples
///
/// ```
/// use std::borrow::BorrowMut;
///
/// fn check<T: BorrowMut<[i32]>>(mut v: T) {
/// assert_eq!(&mut [1, 2, 3], v.borrow_mut());
/// }
///
/// let v = vec![1, 2, 3];
///
/// check(v);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn borrow_mut(&mut self) -> &mut Borrowed;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Borrow<T> for T {
fn borrow(&self) -> &T {
self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> BorrowMut<T> for T {
fn borrow_mut(&mut self) -> &mut T {
self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Borrow<T> for &T {
fn borrow(&self) -> &T {
&**self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Borrow<T> for &mut T {
fn borrow(&self) -> &T {
&**self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> BorrowMut<T> for &mut T {
fn borrow_mut(&mut self) -> &mut T {
&mut **self
}
}
|
use runtime::{FuncEnvironment, GlobalValue, ModuleEnvironment};
use translation_utils::{Global, Memory, Table, GlobalIndex, TableIndex, SignatureIndex,
FunctionIndex, MemoryIndex};
use func_translator::FuncTranslator;
use cretonne::ir::{self, InstBuilder};
use cretonne::ir::types::*;
use cretonne::cursor::FuncCursor;
use cretonne::settings;
use wasmparser;
use std::error::Error;
/// Compute a `ir::FunctionName` for a given wasm function index.
fn get_func_name(func_index: FunctionIndex) -> ir::FunctionName {
ir::FunctionName::new(format!("wasm_0x{:x}", func_index))
}
/// A collection of names under which a given entity is exported.
pub struct Exportable<T> {
/// A wasm entity.
pub entity: T,
/// Names under which the entity is exported.
pub export_names: Vec<String>,
}
impl<T> Exportable<T> {
pub fn new(entity: T) -> Self {
Self {
entity,
export_names: Vec::new(),
}
}
}
/// The main state belonging to a `DummyEnvironment`. This is split out from
/// `DummyEnvironment` to allow it to be borrowed separately from the
/// `FuncTranslator` field.
pub struct DummyModuleInfo {
/// Compilation setting flags.
pub flags: settings::Flags,
/// Signatures as provided by `declare_signature`.
pub signatures: Vec<ir::Signature>,
/// Module and field names of imported functions as provided by `declare_func_import`.
pub imported_funcs: Vec<(String, String)>,
/// Functions, imported and local.
pub functions: Vec<Exportable<SignatureIndex>>,
/// Function bodies.
pub function_bodies: Vec<ir::Function>,
/// Tables as provided by `declare_table`.
pub tables: Vec<Exportable<Table>>,
/// Memories as provided by `declare_memory`.
pub memories: Vec<Exportable<Memory>>,
/// Globals as provided by `declare_global`.
pub globals: Vec<Exportable<Global>>,
/// The start function.
pub start_func: Option<FunctionIndex>,
}
impl DummyModuleInfo {
/// Allocates the runtime data structures with the given flags.
pub fn with_flags(flags: settings::Flags) -> Self {
Self {
flags,
signatures: Vec::new(),
imported_funcs: Vec::new(),
functions: Vec::new(),
function_bodies: Vec::new(),
tables: Vec::new(),
memories: Vec::new(),
globals: Vec::new(),
start_func: None,
}
}
}
/// This runtime implementation is a "naïve" one, doing essentially nothing and emitting
/// placeholders when forced to. Don't try to execute code translated with this runtime, it is
/// essentially here for translation debug purposes.
pub struct DummyEnvironment {
/// Module information.
pub info: DummyModuleInfo,
/// Function translation.
trans: FuncTranslator,
}
impl DummyEnvironment {
/// Allocates the runtime data structures with default flags.
pub fn default() -> Self {
Self::with_flags(settings::Flags::new(&settings::builder()))
}
/// Allocates the runtime data structures with the given flags.
pub fn with_flags(flags: settings::Flags) -> Self {
Self {
info: DummyModuleInfo::with_flags(flags),
trans: FuncTranslator::new(),
}
}
/// Return a `DummyFuncEnvironment` for translating functions within this
/// `DummyEnvironment`.
pub fn func_env(&self) -> DummyFuncEnvironment {
DummyFuncEnvironment::new(&self.info)
}
}
/// The FuncEnvironment implementation for use by the DummyEnvironment.
pub struct DummyFuncEnvironment<'dummy_environment> {
pub mod_info: &'dummy_environment DummyModuleInfo,
}
impl<'dummy_environment> DummyFuncEnvironment<'dummy_environment> {
pub fn new(mod_info: &'dummy_environment DummyModuleInfo) -> Self {
Self { mod_info }
}
}
impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environment> {
fn flags(&self) -> &settings::Flags {
&self.mod_info.flags
}
fn make_global(&mut self, func: &mut ir::Function, index: GlobalIndex) -> GlobalValue {
// Just create a dummy `vmctx` global.
let offset = ((index * 8) as i32 + 8).into();
let gv = func.create_global_var(ir::GlobalVarData::VmCtx { offset });
GlobalValue::Memory {
gv,
ty: self.mod_info.globals[index].entity.ty,
}
}
fn make_heap(&mut self, func: &mut ir::Function, _index: MemoryIndex) -> ir::Heap {
func.create_heap(ir::HeapData {
base: ir::HeapBase::ReservedReg,
min_size: 0.into(),
guard_size: 0x8000_0000.into(),
style: ir::HeapStyle::Static { bound: 0x1_0000_0000.into() },
})
}
fn make_indirect_sig(&mut self, func: &mut ir::Function, index: SignatureIndex) -> ir::SigRef {
// A real implementation would probably change the calling convention and add `vmctx` and
// signature index arguments.
func.import_signature(self.mod_info.signatures[index].clone())
}
fn make_direct_func(&mut self, func: &mut ir::Function, index: FunctionIndex) -> ir::FuncRef {
let sigidx = self.mod_info.functions[index].entity;
// A real implementation would probably add a `vmctx` argument.
// And maybe attempt some signature de-duplication.
let signature = func.import_signature(self.mod_info.signatures[sigidx].clone());
let name = get_func_name(index);
func.import_function(ir::ExtFuncData { name, signature })
}
fn translate_call_indirect(
&mut self,
mut pos: FuncCursor,
_table_index: TableIndex,
_sig_index: SignatureIndex,
sig_ref: ir::SigRef,
callee: ir::Value,
call_args: &[ir::Value],
) -> ir::Inst {
pos.ins().call_indirect(sig_ref, callee, call_args)
}
fn translate_grow_memory(
&mut self,
mut pos: FuncCursor,
_index: MemoryIndex,
_heap: ir::Heap,
_val: ir::Value,
) -> ir::Value {
pos.ins().iconst(I32, -1)
}
fn translate_current_memory(
&mut self,
mut pos: FuncCursor,
_index: MemoryIndex,
_heap: ir::Heap,
) -> ir::Value {
pos.ins().iconst(I32, -1)
}
}
impl ModuleEnvironment for DummyEnvironment {
fn get_func_name(&self, func_index: FunctionIndex) -> ir::FunctionName {
get_func_name(func_index)
}
fn declare_signature(&mut self, sig: &ir::Signature) {
self.info.signatures.push(sig.clone());
}
fn get_signature(&self, sig_index: SignatureIndex) -> &ir::Signature {
&self.info.signatures[sig_index]
}
fn declare_func_import<'data>(
&mut self,
sig_index: SignatureIndex,
module: &'data str,
field: &'data str,
) {
assert_eq!(
self.info.functions.len(),
self.info.imported_funcs.len(),
"Imported functions must be declared first"
);
self.info.functions.push(Exportable::new(sig_index));
self.info.imported_funcs.push((
String::from(module),
String::from(field),
));
}
fn get_num_func_imports(&self) -> usize {
self.info.imported_funcs.len()
}
fn declare_func_type(&mut self, sig_index: SignatureIndex) {
self.info.functions.push(Exportable::new(sig_index));
}
fn get_func_type(&self, func_index: FunctionIndex) -> SignatureIndex {
self.info.functions[func_index].entity
}
fn declare_global(&mut self, global: Global) {
self.info.globals.push(Exportable::new(global));
}
fn get_global(&self, global_index: GlobalIndex) -> &Global {
&self.info.globals[global_index].entity
}
fn declare_table(&mut self, table: Table) {
self.info.tables.push(Exportable::new(table));
}
fn declare_table_elements(
&mut self,
_table_index: TableIndex,
_base: Option<GlobalIndex>,
_offset: usize,
_elements: &[FunctionIndex],
) {
// We do nothing
}
fn declare_memory(&mut self, memory: Memory) {
self.info.memories.push(Exportable::new(memory));
}
fn declare_data_initialization<'data>(
&mut self,
_memory_index: MemoryIndex,
_base: Option<GlobalIndex>,
_offset: usize,
_data: &'data [u8],
) {
// We do nothing
}
fn declare_func_export<'data>(&mut self, func_index: FunctionIndex, name: &'data str) {
self.info.functions[func_index].export_names.push(
String::from(
name,
),
);
}
fn declare_table_export<'data>(&mut self, table_index: TableIndex, name: &'data str) {
self.info.tables[table_index].export_names.push(
String::from(name),
);
}
fn declare_memory_export<'data>(&mut self, memory_index: MemoryIndex, name: &'data str) {
self.info.memories[memory_index].export_names.push(
String::from(
name,
),
);
}
fn declare_global_export<'data>(&mut self, global_index: GlobalIndex, name: &'data str) {
self.info.globals[global_index].export_names.push(
String::from(
name,
),
);
}
fn declare_start_func(&mut self, func_index: FunctionIndex) {
debug_assert!(self.info.start_func.is_none());
self.info.start_func = Some(func_index);
}
/// Provides the contents of a function body.
fn define_function_body<'data>(&mut self, body_bytes: &'data [u8]) -> Result<(), String> {
let function_index = self.get_num_func_imports() + self.info.function_bodies.len();
let name = get_func_name(function_index);
let sig = self.get_signature(self.get_func_type(function_index))
.clone();
let mut func = ir::Function::with_name_signature(name, sig);
{
let mut func_environ = DummyFuncEnvironment::new(&self.info);
let reader = wasmparser::BinaryReader::new(body_bytes);
self.trans
.translate_from_reader(reader, &mut func, &mut func_environ)
.map_err(|e| String::from(e.description()))?;
}
self.info.function_bodies.push(func);
Ok(())
}
}
Minor comment cleanup.
use runtime::{FuncEnvironment, GlobalValue, ModuleEnvironment};
use translation_utils::{Global, Memory, Table, GlobalIndex, TableIndex, SignatureIndex,
FunctionIndex, MemoryIndex};
use func_translator::FuncTranslator;
use cretonne::ir::{self, InstBuilder};
use cretonne::ir::types::*;
use cretonne::cursor::FuncCursor;
use cretonne::settings;
use wasmparser;
use std::error::Error;
/// Compute a `ir::FunctionName` for a given wasm function index.
fn get_func_name(func_index: FunctionIndex) -> ir::FunctionName {
ir::FunctionName::new(format!("wasm_0x{:x}", func_index))
}
/// A collection of names under which a given entity is exported.
pub struct Exportable<T> {
/// A wasm entity.
pub entity: T,
/// Names under which the entity is exported.
pub export_names: Vec<String>,
}
impl<T> Exportable<T> {
pub fn new(entity: T) -> Self {
Self {
entity,
export_names: Vec::new(),
}
}
}
/// The main state belonging to a `DummyEnvironment`. This is split out from
/// `DummyEnvironment` to allow it to be borrowed separately from the
/// `FuncTranslator` field.
pub struct DummyModuleInfo {
/// Compilation setting flags.
pub flags: settings::Flags,
/// Signatures as provided by `declare_signature`.
pub signatures: Vec<ir::Signature>,
/// Module and field names of imported functions as provided by `declare_func_import`.
pub imported_funcs: Vec<(String, String)>,
/// Functions, imported and local.
pub functions: Vec<Exportable<SignatureIndex>>,
/// Function bodies.
pub function_bodies: Vec<ir::Function>,
/// Tables as provided by `declare_table`.
pub tables: Vec<Exportable<Table>>,
/// Memories as provided by `declare_memory`.
pub memories: Vec<Exportable<Memory>>,
/// Globals as provided by `declare_global`.
pub globals: Vec<Exportable<Global>>,
/// The start function.
pub start_func: Option<FunctionIndex>,
}
impl DummyModuleInfo {
/// Allocates the runtime data structures with the given flags.
pub fn with_flags(flags: settings::Flags) -> Self {
Self {
flags,
signatures: Vec::new(),
imported_funcs: Vec::new(),
functions: Vec::new(),
function_bodies: Vec::new(),
tables: Vec::new(),
memories: Vec::new(),
globals: Vec::new(),
start_func: None,
}
}
}
/// This runtime implementation is a "naïve" one, doing essentially nothing and emitting
/// placeholders when forced to. Don't try to execute code translated with this runtime, it is
/// essentially here for translation debug purposes.
pub struct DummyEnvironment {
/// Module information.
pub info: DummyModuleInfo,
/// Function translation.
trans: FuncTranslator,
}
impl DummyEnvironment {
/// Allocates the runtime data structures with default flags.
pub fn default() -> Self {
Self::with_flags(settings::Flags::new(&settings::builder()))
}
/// Allocates the runtime data structures with the given flags.
pub fn with_flags(flags: settings::Flags) -> Self {
Self {
info: DummyModuleInfo::with_flags(flags),
trans: FuncTranslator::new(),
}
}
/// Return a `DummyFuncEnvironment` for translating functions within this
/// `DummyEnvironment`.
pub fn func_env(&self) -> DummyFuncEnvironment {
DummyFuncEnvironment::new(&self.info)
}
}
/// The FuncEnvironment implementation for use by the `DummyEnvironment`.
pub struct DummyFuncEnvironment<'dummy_environment> {
pub mod_info: &'dummy_environment DummyModuleInfo,
}
impl<'dummy_environment> DummyFuncEnvironment<'dummy_environment> {
pub fn new(mod_info: &'dummy_environment DummyModuleInfo) -> Self {
Self { mod_info }
}
}
impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environment> {
fn flags(&self) -> &settings::Flags {
&self.mod_info.flags
}
fn make_global(&mut self, func: &mut ir::Function, index: GlobalIndex) -> GlobalValue {
// Just create a dummy `vmctx` global.
let offset = ((index * 8) as i32 + 8).into();
let gv = func.create_global_var(ir::GlobalVarData::VmCtx { offset });
GlobalValue::Memory {
gv,
ty: self.mod_info.globals[index].entity.ty,
}
}
fn make_heap(&mut self, func: &mut ir::Function, _index: MemoryIndex) -> ir::Heap {
func.create_heap(ir::HeapData {
base: ir::HeapBase::ReservedReg,
min_size: 0.into(),
guard_size: 0x8000_0000.into(),
style: ir::HeapStyle::Static { bound: 0x1_0000_0000.into() },
})
}
fn make_indirect_sig(&mut self, func: &mut ir::Function, index: SignatureIndex) -> ir::SigRef {
// A real implementation would probably change the calling convention and add `vmctx` and
// signature index arguments.
func.import_signature(self.mod_info.signatures[index].clone())
}
fn make_direct_func(&mut self, func: &mut ir::Function, index: FunctionIndex) -> ir::FuncRef {
let sigidx = self.mod_info.functions[index].entity;
// A real implementation would probably add a `vmctx` argument.
// And maybe attempt some signature de-duplication.
let signature = func.import_signature(self.mod_info.signatures[sigidx].clone());
let name = get_func_name(index);
func.import_function(ir::ExtFuncData { name, signature })
}
fn translate_call_indirect(
&mut self,
mut pos: FuncCursor,
_table_index: TableIndex,
_sig_index: SignatureIndex,
sig_ref: ir::SigRef,
callee: ir::Value,
call_args: &[ir::Value],
) -> ir::Inst {
pos.ins().call_indirect(sig_ref, callee, call_args)
}
fn translate_grow_memory(
&mut self,
mut pos: FuncCursor,
_index: MemoryIndex,
_heap: ir::Heap,
_val: ir::Value,
) -> ir::Value {
pos.ins().iconst(I32, -1)
}
fn translate_current_memory(
&mut self,
mut pos: FuncCursor,
_index: MemoryIndex,
_heap: ir::Heap,
) -> ir::Value {
pos.ins().iconst(I32, -1)
}
}
impl ModuleEnvironment for DummyEnvironment {
fn get_func_name(&self, func_index: FunctionIndex) -> ir::FunctionName {
get_func_name(func_index)
}
fn declare_signature(&mut self, sig: &ir::Signature) {
self.info.signatures.push(sig.clone());
}
fn get_signature(&self, sig_index: SignatureIndex) -> &ir::Signature {
&self.info.signatures[sig_index]
}
fn declare_func_import<'data>(
&mut self,
sig_index: SignatureIndex,
module: &'data str,
field: &'data str,
) {
assert_eq!(
self.info.functions.len(),
self.info.imported_funcs.len(),
"Imported functions must be declared first"
);
self.info.functions.push(Exportable::new(sig_index));
self.info.imported_funcs.push((
String::from(module),
String::from(field),
));
}
fn get_num_func_imports(&self) -> usize {
self.info.imported_funcs.len()
}
fn declare_func_type(&mut self, sig_index: SignatureIndex) {
self.info.functions.push(Exportable::new(sig_index));
}
fn get_func_type(&self, func_index: FunctionIndex) -> SignatureIndex {
self.info.functions[func_index].entity
}
fn declare_global(&mut self, global: Global) {
self.info.globals.push(Exportable::new(global));
}
fn get_global(&self, global_index: GlobalIndex) -> &Global {
&self.info.globals[global_index].entity
}
fn declare_table(&mut self, table: Table) {
self.info.tables.push(Exportable::new(table));
}
fn declare_table_elements(
&mut self,
_table_index: TableIndex,
_base: Option<GlobalIndex>,
_offset: usize,
_elements: &[FunctionIndex],
) {
// We do nothing
}
fn declare_memory(&mut self, memory: Memory) {
self.info.memories.push(Exportable::new(memory));
}
fn declare_data_initialization<'data>(
&mut self,
_memory_index: MemoryIndex,
_base: Option<GlobalIndex>,
_offset: usize,
_data: &'data [u8],
) {
// We do nothing
}
fn declare_func_export<'data>(&mut self, func_index: FunctionIndex, name: &'data str) {
self.info.functions[func_index].export_names.push(
String::from(
name,
),
);
}
fn declare_table_export<'data>(&mut self, table_index: TableIndex, name: &'data str) {
self.info.tables[table_index].export_names.push(
String::from(name),
);
}
fn declare_memory_export<'data>(&mut self, memory_index: MemoryIndex, name: &'data str) {
self.info.memories[memory_index].export_names.push(
String::from(
name,
),
);
}
fn declare_global_export<'data>(&mut self, global_index: GlobalIndex, name: &'data str) {
self.info.globals[global_index].export_names.push(
String::from(
name,
),
);
}
fn declare_start_func(&mut self, func_index: FunctionIndex) {
debug_assert!(self.info.start_func.is_none());
self.info.start_func = Some(func_index);
}
/// Provides the contents of a function body.
fn define_function_body<'data>(&mut self, body_bytes: &'data [u8]) -> Result<(), String> {
let function_index = self.get_num_func_imports() + self.info.function_bodies.len();
let name = get_func_name(function_index);
let sig = self.get_signature(self.get_func_type(function_index))
.clone();
let mut func = ir::Function::with_name_signature(name, sig);
{
let mut func_environ = DummyFuncEnvironment::new(&self.info);
let reader = wasmparser::BinaryReader::new(body_bytes);
self.trans
.translate_from_reader(reader, &mut func, &mut func_environ)
.map_err(|e| String::from(e.description()))?;
}
self.info.function_bodies.push(func);
Ok(())
}
}
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <mail@beyermatthias.de> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use toml::Value;
use libimagstore::store::Entry;
use toml_query::read::TomlValueReadExt;
use toml_query::set::TomlValueSetExt;
use error::Result;
use error::NoteErrorKind as NEK;
use error::NoteError as NE;
use error::ResultExt;
pub trait Note {
fn set_name(&mut self, n: String) -> Result<()>;
fn get_name(&self) -> Result<String>;
fn set_text(&mut self, n: String);
fn get_text(&self) -> &String;
}
impl Note for Entry {
fn set_name(&mut self, n: String) -> Result<()> {
self.get_header_mut()
.set("note.name", Value::String(n))
.chain_err(|| NEK::StoreWriteError)
.map(|_| ())
}
fn get_name(&self) -> Result<String> {
match self.get_header().read("note.name") {
Ok(Some(&Value::String(ref s))) => Ok(s.clone()),
Ok(_) => {
Err(NE::from_kind(NEK::HeaderTypeError)).chain_err(|| NEK::StoreReadError)
},
Err(e) => Err(e).chain_err(|| NEK::StoreReadError)
}
}
fn set_text(&mut self, n: String) {
*self.get_content_mut() = n
}
fn get_text(&self) -> &String {
self.get_content()
}
}
Refactoring: Use function chaining rather than matching
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <mail@beyermatthias.de> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use toml::Value;
use libimagstore::store::Entry;
use toml_query::read::TomlValueReadExt;
use toml_query::set::TomlValueSetExt;
use error::Result;
use error::NoteErrorKind as NEK;
use error::NoteError as NE;
use error::ResultExt;
pub trait Note {
fn set_name(&mut self, n: String) -> Result<()>;
fn get_name(&self) -> Result<String>;
fn set_text(&mut self, n: String);
fn get_text(&self) -> &String;
}
impl Note for Entry {
fn set_name(&mut self, n: String) -> Result<()> {
self.get_header_mut()
.set("note.name", Value::String(n))
.chain_err(|| NEK::StoreWriteError)
.map(|_| ())
}
fn get_name(&self) -> Result<String> {
self.get_header()
.read("note.name")
.chain_err(|| NEK::StoreReadError)?
.and_then(Value::as_str)
.map(String::from)
.ok_or(NE::from_kind(NEK::HeaderTypeError))
}
fn set_text(&mut self, n: String) {
*self.get_content_mut() = n
}
fn get_text(&self) -> &String {
self.get_content()
}
}
|
// ignore-tidy-filelength
// ignore-tidy-undocumented-unsafe
//! Slice management and manipulation.
//!
//! For more details see [`std::slice`].
//!
//! [`std::slice`]: ../../std/slice/index.html
#![stable(feature = "rust1", since = "1.0.0")]
// How this module is organized.
//
// The library infrastructure for slices is fairly messy. There's
// a lot of stuff defined here. Let's keep it clean.
//
// The layout of this file is thus:
//
// * Inherent methods. This is where most of the slice API resides.
// * Implementations of a few common traits with important slice ops.
// * Definitions of a bunch of iterators.
// * Free functions.
// * The `raw` and `bytes` submodules.
// * Boilerplate trait implementations.
use crate::cmp;
use crate::cmp::Ordering::{self, Equal, Greater, Less};
use crate::fmt;
use crate::intrinsics::{assume, exact_div, is_aligned_and_not_null, unchecked_sub};
use crate::iter::*;
use crate::marker::{self, Copy, Send, Sized, Sync};
use crate::mem;
use crate::ops::{self, FnMut, Range};
use crate::option::Option;
use crate::option::Option::{None, Some};
use crate::ptr::{self, NonNull};
use crate::result::Result;
use crate::result::Result::{Err, Ok};
#[unstable(
feature = "slice_internals",
issue = "none",
reason = "exposed from core to be reused in std; use the memchr crate"
)]
/// Pure rust memchr implementation, taken from rust-memchr
pub mod memchr;
mod rotate;
mod sort;
//
// Extension traits
//
#[lang = "slice"]
#[cfg(not(test))]
impl<T> [T] {
/// Returns the number of elements in the slice.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert_eq!(a.len(), 3);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_len", since = "1.32.0")]
#[inline]
// SAFETY: const sound because we transmute out the length field as a usize (which it must be)
#[allow(unused_attributes)]
#[allow_internal_unstable(const_fn_union)]
pub const fn len(&self) -> usize {
unsafe { crate::ptr::Repr { rust: self }.raw.len }
}
/// Returns `true` if the slice has a length of 0.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert!(!a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_is_empty", since = "1.32.0")]
#[inline]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&10), v.first());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.first());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn first(&self) -> Option<&T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns a mutable pointer to the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(first) = x.first_mut() {
/// *first = 5;
/// }
/// assert_eq!(x, &[5, 1, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn first_mut(&mut self) -> Option<&mut T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first() {
/// assert_eq!(first, &0);
/// assert_eq!(elements, &[1, 2]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_first(&self) -> Option<(&T, &[T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first_mut() {
/// *first = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[3, 4, 5]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last() {
/// assert_eq!(last, &2);
/// assert_eq!(elements, &[0, 1]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_last(&self) -> Option<(&T, &[T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last_mut() {
/// *last = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[4, 5, 3]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&30), v.last());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.last());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn last(&self) -> Option<&T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a mutable pointer to the last item in the slice.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(last) = x.last_mut() {
/// *last = 10;
/// }
/// assert_eq!(x, &[0, 1, 10]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn last_mut(&mut self) -> Option<&mut T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a reference to an element or subslice depending on the type of
/// index.
///
/// - If given a position, returns a reference to the element at that
/// position or `None` if out of bounds.
/// - If given a range, returns the subslice corresponding to that range,
/// or `None` if out of bounds.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&40), v.get(1));
/// assert_eq!(Some(&[10, 40][..]), v.get(0..2));
/// assert_eq!(None, v.get(3));
/// assert_eq!(None, v.get(0..4));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get<I>(&self, index: I) -> Option<&I::Output>
where
I: SliceIndex<Self>,
{
index.get(self)
}
/// Returns a mutable reference to an element or subslice depending on the
/// type of index (see [`get`]) or `None` if the index is out of bounds.
///
/// [`get`]: #method.get
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(elem) = x.get_mut(1) {
/// *elem = 42;
/// }
/// assert_eq!(x, &[0, 42, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
where
I: SliceIndex<Self>,
{
index.get_mut(self)
}
/// Returns a reference to an element or subslice, without doing bounds
/// checking.
///
/// This is generally not recommended, use with caution!
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
/// For a safe alternative see [`get`].
///
/// [`get`]: #method.get
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
///
/// unsafe {
/// assert_eq!(x.get_unchecked(1), &2);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &*index.get_unchecked(self) }
}
/// Returns a mutable reference to an element or subslice, without doing
/// bounds checking.
///
/// This is generally not recommended, use with caution!
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
/// For a safe alternative see [`get_mut`].
///
/// [`get_mut`]: #method.get_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
///
/// unsafe {
/// let elem = x.get_unchecked_mut(1);
/// *elem = 13;
/// }
/// assert_eq!(x, &[1, 13, 4]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &mut *index.get_unchecked_mut(self) }
}
/// Returns a raw pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// The caller must also ensure that the memory the pointer (non-transitively) points to
/// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
/// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let x_ptr = x.as_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
/// }
/// }
/// ```
///
/// [`as_mut_ptr`]: #method.as_mut_ptr
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
#[inline]
pub const fn as_ptr(&self) -> *const T {
self as *const [T] as *const T
}
/// Returns an unsafe mutable pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// let x_ptr = x.as_mut_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// *x_ptr.add(i) += 2;
/// }
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut T {
self as *mut [T] as *mut T
}
/// Returns the two raw pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_ptr`] for warnings on using these pointers. The end pointer
/// requires extra caution, as it does not point to a valid element in the
/// slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// It can also be useful to check if a pointer to an element refers to an
/// element of this slice:
///
/// ```
/// #![feature(slice_ptr_range)]
///
/// let a = [1, 2, 3];
/// let x = &a[1] as *const _;
/// let y = &5 as *const _;
///
/// assert!(a.as_ptr_range().contains(&x));
/// assert!(!a.as_ptr_range().contains(&y));
/// ```
///
/// [`as_ptr`]: #method.as_ptr
#[unstable(feature = "slice_ptr_range", issue = "65807")]
#[inline]
pub fn as_ptr_range(&self) -> Range<*const T> {
// The `add` here is safe, because:
//
// - Both pointers are part of the same object, as pointing directly
// past the object also counts.
//
// - The size of the slice is never larger than isize::MAX bytes, as
// noted here:
// - https://github.com/rust-lang/unsafe-code-guidelines/issues/102#issuecomment-473340447
// - https://doc.rust-lang.org/reference/behavior-considered-undefined.html
// - https://doc.rust-lang.org/core/slice/fn.from_raw_parts.html#safety
// (This doesn't seem normative yet, but the very same assumption is
// made in many places, including the Index implementation of slices.)
//
// - There is no wrapping around involved, as slices do not wrap past
// the end of the address space.
//
// See the documentation of pointer::add.
let start = self.as_ptr();
let end = unsafe { start.add(self.len()) };
start..end
}
/// Returns the two unsafe mutable pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_mut_ptr`] for warnings on using these pointers. The end
/// pointer requires extra caution, as it does not point to a valid element
/// in the slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// [`as_mut_ptr`]: #method.as_mut_ptr
#[unstable(feature = "slice_ptr_range", issue = "65807")]
#[inline]
pub fn as_mut_ptr_range(&mut self) -> Range<*mut T> {
// See as_ptr_range() above for why `add` here is safe.
let start = self.as_mut_ptr();
let end = unsafe { start.add(self.len()) };
start..end
}
/// Swaps two elements in the slice.
///
/// # Arguments
///
/// * a - The index of the first element
/// * b - The index of the second element
///
/// # Panics
///
/// Panics if `a` or `b` are out of bounds.
///
/// # Examples
///
/// ```
/// let mut v = ["a", "b", "c", "d"];
/// v.swap(1, 3);
/// assert!(v == ["a", "d", "c", "b"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn swap(&mut self, a: usize, b: usize) {
unsafe {
// Can't take two mutable loans from one vector, so instead just cast
// them to their raw pointers to do the swap
let pa: *mut T = &mut self[a];
let pb: *mut T = &mut self[b];
ptr::swap(pa, pb);
}
}
/// Reverses the order of elements in the slice, in place.
///
/// # Examples
///
/// ```
/// let mut v = [1, 2, 3];
/// v.reverse();
/// assert!(v == [3, 2, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn reverse(&mut self) {
let mut i: usize = 0;
let ln = self.len();
// For very small types, all the individual reads in the normal
// path perform poorly. We can do better, given efficient unaligned
// load/store, by loading a larger chunk and reversing a register.
// Ideally LLVM would do this for us, as it knows better than we do
// whether unaligned reads are efficient (since that changes between
// different ARM versions, for example) and what the best chunk size
// would be. Unfortunately, as of LLVM 4.0 (2017-05) it only unrolls
// the loop, so we need to do this ourselves. (Hypothesis: reverse
// is troublesome because the sides can be aligned differently --
// will be, when the length is odd -- so there's no way of emitting
// pre- and postludes to use fully-aligned SIMD in the middle.)
let fast_unaligned = cfg!(any(target_arch = "x86", target_arch = "x86_64"));
if fast_unaligned && mem::size_of::<T>() == 1 {
// Use the llvm.bswap intrinsic to reverse u8s in a usize
let chunk = mem::size_of::<usize>();
while i + chunk - 1 < ln / 2 {
unsafe {
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut usize);
let vb = ptr::read_unaligned(pb as *mut usize);
ptr::write_unaligned(pa as *mut usize, vb.swap_bytes());
ptr::write_unaligned(pb as *mut usize, va.swap_bytes());
}
i += chunk;
}
}
if fast_unaligned && mem::size_of::<T>() == 2 {
// Use rotate-by-16 to reverse u16s in a u32
let chunk = mem::size_of::<u32>() / 2;
while i + chunk - 1 < ln / 2 {
unsafe {
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut u32);
let vb = ptr::read_unaligned(pb as *mut u32);
ptr::write_unaligned(pa as *mut u32, vb.rotate_left(16));
ptr::write_unaligned(pb as *mut u32, va.rotate_left(16));
}
i += chunk;
}
}
while i < ln / 2 {
// Unsafe swap to avoid the bounds check in safe swap.
unsafe {
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - 1);
ptr::swap(pa, pb);
}
i += 1;
}
}
/// Returns an iterator over the slice.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let mut iterator = x.iter();
///
/// assert_eq!(iterator.next(), Some(&1));
/// assert_eq!(iterator.next(), Some(&2));
/// assert_eq!(iterator.next(), Some(&4));
/// assert_eq!(iterator.next(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter(&self) -> Iter<'_, T> {
unsafe {
let ptr = self.as_ptr();
assume(!ptr.is_null());
let end = if mem::size_of::<T>() == 0 {
(ptr as *const u8).wrapping_add(self.len()) as *const T
} else {
ptr.add(self.len())
};
Iter { ptr: NonNull::new_unchecked(ptr as *mut T), end, _marker: marker::PhantomData }
}
}
/// Returns an iterator that allows modifying each value.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// for elem in x.iter_mut() {
/// *elem += 2;
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
unsafe {
let ptr = self.as_mut_ptr();
assume(!ptr.is_null());
let end = if mem::size_of::<T>() == 0 {
(ptr as *mut u8).wrapping_add(self.len()) as *mut T
} else {
ptr.add(self.len())
};
IterMut { ptr: NonNull::new_unchecked(ptr), end, _marker: marker::PhantomData }
}
}
/// Returns an iterator over all contiguous windows of length
/// `size`. The windows overlap. If the slice is shorter than
/// `size`, the iterator returns no values.
///
/// # Panics
///
/// Panics if `size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['r', 'u', 's', 't'];
/// let mut iter = slice.windows(2);
/// assert_eq!(iter.next().unwrap(), &['r', 'u']);
/// assert_eq!(iter.next().unwrap(), &['u', 's']);
/// assert_eq!(iter.next().unwrap(), &['s', 't']);
/// assert!(iter.next().is_none());
/// ```
///
/// If the slice is shorter than `size`:
///
/// ```
/// let slice = ['f', 'o', 'o'];
/// let mut iter = slice.windows(4);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn windows(&self, size: usize) -> Windows<'_, T> {
assert_ne!(size, 0);
Windows { v: self, size }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`rchunks`] for the same iterator but starting at the end of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert_eq!(iter.next().unwrap(), &['m']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`chunks_exact`]: #method.chunks_exact
/// [`rchunks`]: #method.rchunks
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
assert_ne!(chunk_size, 0);
Chunks { v: self, chunk_size }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`rchunks_mut`] for the same iterator but starting at
/// the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 3]);
/// ```
///
/// [`chunks_exact_mut`]: #method.chunks_exact_mut
/// [`rchunks_mut`]: #method.rchunks_mut
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
assert_ne!(chunk_size, 0);
ChunksMut { v: self, chunk_size }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`chunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`rchunks_exact`] for the same iterator but starting at the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks`]: #method.chunks
/// [`rchunks_exact`]: #method.rchunks_exact
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
assert_ne!(chunk_size, 0);
let rem = self.len() % chunk_size;
let len = self.len() - rem;
let (fst, snd) = self.split_at(len);
ChunksExact { v: fst, rem: snd, chunk_size }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`chunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`rchunks_exact_mut`] for the same iterator but starting at the end of
/// the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_mut`]: #method.chunks_mut
/// [`rchunks_exact_mut`]: #method.rchunks_exact_mut
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
assert_ne!(chunk_size, 0);
let rem = self.len() % chunk_size;
let len = self.len() - rem;
let (fst, snd) = self.split_at_mut(len);
ChunksExactMut { v: fst, rem: snd, chunk_size }
}
/// Returns an iterator over `N` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `N` does not divide the length of the
/// slice, then the last up to `N-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// This method is the const generic equivalent of [`chunks_exact`].
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.array_chunks();
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks_exact`]: #method.chunks_exact
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> {
assert_ne!(N, 0);
let len = self.len() / N;
let (fst, snd) = self.split_at(len * N);
// SAFETY: We cast a slice of `len * N` elements into
// a slice of `len` many `N` elements chunks.
let array_slice: &[[T; N]] = unsafe { from_raw_parts(fst.as_ptr().cast(), len) };
ArrayChunks { iter: array_slice.iter(), rem: snd }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`chunks`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert_eq!(iter.next().unwrap(), &['l']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`rchunks_exact`]: #method.rchunks_exact
/// [`chunks`]: #method.chunks
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> {
assert!(chunk_size != 0);
RChunks { v: self, chunk_size }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`chunks_mut`] for the same iterator but starting at the
/// beginning of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[3, 2, 2, 1, 1]);
/// ```
///
/// [`rchunks_exact_mut`]: #method.rchunks_exact_mut
/// [`chunks_mut`]: #method.chunks_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> {
assert!(chunk_size != 0);
RChunksMut { v: self, chunk_size }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// end of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`rchunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`chunks_exact`] for the same iterator but starting at the beginning of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['l']);
/// ```
///
/// [`chunks`]: #method.chunks
/// [`rchunks`]: #method.rchunks
/// [`chunks_exact`]: #method.chunks_exact
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> {
assert!(chunk_size != 0);
let rem = self.len() % chunk_size;
let (fst, snd) = self.split_at(rem);
RChunksExact { v: snd, rem: fst, chunk_size }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`rchunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`chunks_exact_mut`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[0, 2, 2, 1, 1]);
/// ```
///
/// [`chunks_mut`]: #method.chunks_mut
/// [`rchunks_mut`]: #method.rchunks_mut
/// [`chunks_exact_mut`]: #method.chunks_exact_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> {
assert!(chunk_size != 0);
let rem = self.len() % chunk_size;
let (fst, snd) = self.split_at_mut(rem);
RChunksExactMut { v: snd, rem: fst, chunk_size }
}
/// Divides one slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let v = [1, 2, 3, 4, 5, 6];
///
/// {
/// let (left, right) = v.split_at(0);
/// assert!(left == []);
/// assert!(right == [1, 2, 3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(2);
/// assert!(left == [1, 2]);
/// assert!(right == [3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(6);
/// assert!(left == [1, 2, 3, 4, 5, 6]);
/// assert!(right == []);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at(&self, mid: usize) -> (&[T], &[T]) {
(&self[..mid], &self[mid..])
}
/// Divides one mutable slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let mut v = [1, 0, 3, 0, 5, 6];
/// // scoped to restrict the lifetime of the borrows
/// {
/// let (left, right) = v.split_at_mut(2);
/// assert!(left == [1, 0]);
/// assert!(right == [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// }
/// assert!(v == [1, 2, 3, 4, 5, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
let len = self.len();
let ptr = self.as_mut_ptr();
unsafe {
assert!(mid <= len);
(from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid))
}
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the first element is matched, an empty slice will be the first item
/// returned by the iterator. Similarly, if the last element in the slice
/// is matched, an empty slice will be the last item returned by the
/// iterator:
///
/// ```
/// let slice = [10, 40, 33];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert!(iter.next().is_none());
/// ```
///
/// If two matched elements are directly adjacent, an empty slice will be
/// present between them:
///
/// ```
/// let slice = [10, 6, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split<F>(&self, pred: F) -> Split<'_, T, F>
where
F: FnMut(&T) -> bool,
{
Split { v: self, pred, finished: false }
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_mut(|num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitMut { v: self, pred, finished: false }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is contained in the end of the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// #![feature(split_inclusive)]
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the last element of the slice is matched,
/// that element will be considered the terminator of the preceding slice.
/// That slice will be the last item returned by the iterator.
///
/// ```
/// #![feature(split_inclusive)]
/// let slice = [3, 10, 40, 33];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[3]);
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert!(iter.next().is_none());
/// ```
#[unstable(feature = "split_inclusive", issue = "72360")]
#[inline]
pub fn split_inclusive<F>(&self, pred: F) -> SplitInclusive<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusive { v: self, pred, finished: false }
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is contained in the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// #![feature(split_inclusive)]
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_inclusive_mut(|num| *num % 3 == 0) {
/// let terminator_idx = group.len()-1;
/// group[terminator_idx] = 1;
/// }
/// assert_eq!(v, [10, 40, 1, 20, 1, 1]);
/// ```
#[unstable(feature = "split_inclusive", issue = "72360")]
#[inline]
pub fn split_inclusive_mut<F>(&mut self, pred: F) -> SplitInclusiveMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusiveMut { v: self, pred, finished: false }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, starting at the end of the slice and working backwards.
/// The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [11, 22, 33, 0, 44, 55];
/// let mut iter = slice.rsplit(|num| *num == 0);
///
/// assert_eq!(iter.next().unwrap(), &[44, 55]);
/// assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
/// assert_eq!(iter.next(), None);
/// ```
///
/// As with `split()`, if the first or last element is matched, an empty
/// slice will be the first (or last) item returned by the iterator.
///
/// ```
/// let v = &[0, 1, 1, 2, 3, 5, 8];
/// let mut it = v.rsplit(|n| *n % 2 == 0);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next().unwrap(), &[3, 5]);
/// assert_eq!(it.next().unwrap(), &[1, 1]);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next(), None);
/// ```
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit<F>(&self, pred: F) -> RSplit<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplit { inner: self.split(pred) }
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`, starting at the end of the slice and working
/// backwards. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [100, 400, 300, 200, 600, 500];
///
/// let mut count = 0;
/// for group in v.rsplit_mut(|num| *num % 3 == 0) {
/// count += 1;
/// group[0] = count;
/// }
/// assert_eq!(v, [3, 400, 300, 2, 600, 1]);
/// ```
///
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitMut { inner: self.split_mut(pred) }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once by numbers divisible by 3 (i.e., `[10, 40]`,
/// `[20, 60, 50]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitN { inner: GenericSplitN { iter: self.split(pred), count: n } }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 50]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitNMut { inner: GenericSplitN { iter: self.split_mut(pred), count: n } }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once, starting from the end, by numbers divisible
/// by 3 (i.e., `[50]`, `[10, 40, 30, 20]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.rsplitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitN { inner: GenericSplitN { iter: self.rsplit(pred), count: n } }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut s = [10, 40, 30, 20, 60, 50];
///
/// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(s, [1, 40, 30, 20, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitNMut { inner: GenericSplitN { iter: self.rsplit_mut(pred), count: n } }
}
/// Returns `true` if the slice contains an element with the given value.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.contains(&30));
/// assert!(!v.contains(&50));
/// ```
///
/// If you do not have an `&T`, but just an `&U` such that `T: Borrow<U>`
/// (e.g. `String: Borrow<str>`), you can use `iter().any`:
///
/// ```
/// let v = [String::from("hello"), String::from("world")]; // slice of `String`
/// assert!(v.iter().any(|e| e == "hello")); // search with `&str`
/// assert!(!v.iter().any(|e| e == "hi"));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn contains(&self, x: &T) -> bool
where
T: PartialEq,
{
x.slice_contains(self)
}
/// Returns `true` if `needle` is a prefix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.starts_with(&[10]));
/// assert!(v.starts_with(&[10, 40]));
/// assert!(!v.starts_with(&[50]));
/// assert!(!v.starts_with(&[10, 50]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.starts_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.starts_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn starts_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let n = needle.len();
self.len() >= n && needle == &self[..n]
}
/// Returns `true` if `needle` is a suffix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.ends_with(&[30]));
/// assert!(v.ends_with(&[40, 30]));
/// assert!(!v.ends_with(&[50]));
/// assert!(!v.ends_with(&[50, 30]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.ends_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.ends_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn ends_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let (m, n) = (self.len(), needle.len());
m >= n && needle == &self[m - n..]
}
/// Returns a subslice with the prefix removed.
///
/// This method returns [`None`] if slice does not start with `prefix`.
/// Also it returns the original slice if `prefix` is an empty slice.
///
/// # Examples
///
/// ```
/// #![feature(slice_strip)]
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
/// assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
/// assert_eq!(v.strip_prefix(&[50]), None);
/// assert_eq!(v.strip_prefix(&[10, 50]), None);
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[unstable(feature = "slice_strip", issue = "73413")]
pub fn strip_prefix(&self, prefix: &[T]) -> Option<&[T]>
where
T: PartialEq,
{
let n = prefix.len();
if n <= self.len() {
let (head, tail) = self.split_at(n);
if head == prefix {
return Some(tail);
}
}
None
}
/// Returns a subslice with the suffix removed.
///
/// This method returns [`None`] if slice does not end with `suffix`.
/// Also it returns the original slice if `suffix` is an empty slice
///
/// # Examples
///
/// ```
/// #![feature(slice_strip)]
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
/// assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
/// assert_eq!(v.strip_suffix(&[50]), None);
/// assert_eq!(v.strip_suffix(&[50, 30]), None);
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[unstable(feature = "slice_strip", issue = "73413")]
pub fn strip_suffix(&self, suffix: &[T]) -> Option<&[T]>
where
T: PartialEq,
{
let (len, n) = (self.len(), suffix.len());
if n <= len {
let (head, tail) = self.split_at(len - n);
if tail == suffix {
return Some(head);
}
}
None
}
/// Binary searches this sorted slice for a given element.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. If the value is not found then
/// [`Result::Err`] is returned, containing the index where a matching
/// element could be inserted while maintaining sorted order.
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// assert_eq!(s.binary_search(&13), Ok(9));
/// assert_eq!(s.binary_search(&4), Err(7));
/// assert_eq!(s.binary_search(&100), Err(13));
/// let r = s.binary_search(&1);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
///
/// If you want to insert an item to a sorted vector, while maintaining
/// sort order:
///
/// ```
/// let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
/// let num = 42;
/// let idx = s.binary_search(&num).unwrap_or_else(|x| x);
/// s.insert(idx, num);
/// assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn binary_search(&self, x: &T) -> Result<usize, usize>
where
T: Ord,
{
self.binary_search_by(|p| p.cmp(x))
}
/// Binary searches this sorted slice with a comparator function.
///
/// The comparator function should implement an order consistent
/// with the sort order of the underlying slice, returning an
/// order code that indicates whether its argument is `Less`,
/// `Equal` or `Greater` the desired target.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. If the value is not found then
/// [`Result::Err`] is returned, containing the index where a matching
/// element could be inserted while maintaining sorted order.
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// let seek = 13;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
/// let seek = 4;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
/// let seek = 100;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
/// let seek = 1;
/// let r = s.binary_search_by(|probe| probe.cmp(&seek));
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> Ordering,
{
let s = self;
let mut size = s.len();
if size == 0 {
return Err(0);
}
let mut base = 0usize;
while size > 1 {
let half = size / 2;
let mid = base + half;
// mid is always in [0, size), that means mid is >= 0 and < size.
// mid >= 0: by definition
// mid < size: mid = size / 2 + size / 4 + size / 8 ...
let cmp = f(unsafe { s.get_unchecked(mid) });
base = if cmp == Greater { base } else { mid };
size -= half;
}
// base is always in [0, size) because base <= mid.
let cmp = f(unsafe { s.get_unchecked(base) });
if cmp == Equal { Ok(base) } else { Err(base + (cmp == Less) as usize) }
}
/// Binary searches this sorted slice with a key extraction function.
///
/// Assumes that the slice is sorted by the key, for instance with
/// [`sort_by_key`] using the same key extraction function.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. If the value is not found then
/// [`Result::Err`] is returned, containing the index where a matching
/// element could be inserted while maintaining sorted order.
///
/// [`sort_by_key`]: #method.sort_by_key
///
/// # Examples
///
/// Looks up a series of four elements in a slice of pairs sorted by
/// their second elements. The first is found, with a uniquely
/// determined position; the second and third are not found; the
/// fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
/// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
/// (1, 21), (2, 34), (4, 55)];
///
/// assert_eq!(s.binary_search_by_key(&13, |&(a,b)| b), Ok(9));
/// assert_eq!(s.binary_search_by_key(&4, |&(a,b)| b), Err(7));
/// assert_eq!(s.binary_search_by_key(&100, |&(a,b)| b), Err(13));
/// let r = s.binary_search_by_key(&1, |&(a,b)| b);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
#[stable(feature = "slice_binary_search_by_key", since = "1.10.0")]
#[inline]
pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> B,
B: Ord,
{
self.binary_search_by(|k| f(k).cmp(b))
}
/// Sorts the slice, but may not preserve the order of equal elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [-5, 4, 1, -3, 2];
///
/// v.sort_unstable();
/// assert!(v == [-5, -3, 1, 2, 4]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable(&mut self)
where
T: Ord,
{
sort::quicksort(self, |a, b| a.lt(b));
}
/// Sorts the slice with a comparator function, but may not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// The comparator function must define a total ordering for the elements in the slice. If
/// the ordering is not total, the order of the elements is unspecified. An order is a
/// total order if it is (for all a, b and c):
///
/// * total and antisymmetric: exactly one of a < b, a == b or a > b is true; and
/// * transitive, a < b and b < c implies a < c. The same must hold for both == and >.
///
/// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
/// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
///
/// ```
/// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
/// floats.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
/// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
/// ```
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [5, 4, 1, 3, 2];
/// v.sort_unstable_by(|a, b| a.cmp(b));
/// assert!(v == [1, 2, 3, 4, 5]);
///
/// // reverse sorting
/// v.sort_unstable_by(|a, b| b.cmp(a));
/// assert!(v == [5, 4, 3, 2, 1]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by<F>(&mut self, mut compare: F)
where
F: FnMut(&T, &T) -> Ordering,
{
sort::quicksort(self, |a, b| compare(a, b) == Ordering::Less);
}
/// Sorts the slice with a key extraction function, but may not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(m \* *n* \* log(*n*)) worst-case, where the key function is
/// *O*(*m*).
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// Due to its key calling strategy, [`sort_unstable_by_key`](#method.sort_unstable_by_key)
/// is likely to be slower than [`sort_by_cached_key`](#method.sort_by_cached_key) in
/// cases where the key function is expensive.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// v.sort_unstable_by_key(|k| k.abs());
/// assert!(v == [1, 2, -3, 4, -5]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by_key<K, F>(&mut self, mut f: F)
where
F: FnMut(&T) -> K,
K: Ord,
{
sort::quicksort(self, |a, b| f(a).lt(&f(b)));
}
/// Reorder the slice such that the element at `index` is at its final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index`. Additionally, this reordering is
/// unstable (i.e. any number of equal elements may end up at position `index`), in-place
/// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth
/// element" in other libraries. It returns a triplet of the following values: all elements less
/// than the one at the given index, the value at the given index, and all elements greater than
/// the one at the given index.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: #method.sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_at_index)]
///
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median
/// v.partition_at_index(2);
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [-3, -5, 1, 2, 4] ||
/// v == [-5, -3, 1, 2, 4] ||
/// v == [-3, -5, 1, 4, 2] ||
/// v == [-5, -3, 1, 4, 2]);
/// ```
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[inline]
pub fn partition_at_index(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
where
T: Ord,
{
let mut f = |a: &T, b: &T| a.lt(b);
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a comparator function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the comparator function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index,
/// and all elements greater than the one at the given index, using the provided comparator
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: #method.sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_at_index)]
///
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median as if the slice were sorted in descending order.
/// v.partition_at_index_by(2, |a, b| b.cmp(a));
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [2, 4, 1, -5, -3] ||
/// v == [2, 4, 1, -3, -5] ||
/// v == [4, 2, 1, -5, -3] ||
/// v == [4, 2, 1, -3, -5]);
/// ```
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[inline]
pub fn partition_at_index_by<F>(
&mut self,
index: usize,
mut compare: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T, &T) -> Ordering,
{
let mut f = |a: &T, b: &T| compare(a, b) == Less;
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a key extraction function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the key extraction function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index, and
/// all elements greater than the one at the given index, using the provided key extraction
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: #method.sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_at_index)]
///
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Return the median as if the array were sorted according to absolute value.
/// v.partition_at_index_by_key(2, |a| a.abs());
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [1, 2, -3, 4, -5] ||
/// v == [1, 2, -3, -5, 4] ||
/// v == [2, 1, -3, 4, -5] ||
/// v == [2, 1, -3, -5, 4]);
/// ```
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[inline]
pub fn partition_at_index_by_key<K, F>(
&mut self,
index: usize,
mut f: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T) -> K,
K: Ord,
{
let mut g = |a: &T, b: &T| f(a).lt(&f(b));
sort::partition_at_index(self, index, &mut g)
}
/// Moves all consecutive repeated elements to the end of the slice according to the
/// [`PartialEq`] trait implementation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [1, 2, 2, 3, 3, 2, 1, 1];
///
/// let (dedup, duplicates) = slice.partition_dedup();
///
/// assert_eq!(dedup, [1, 2, 3, 2, 1]);
/// assert_eq!(duplicates, [2, 3, 1]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup(&mut self) -> (&mut [T], &mut [T])
where
T: PartialEq,
{
self.partition_dedup_by(|a, b| a == b)
}
/// Moves all but the first of consecutive elements to the end of the slice satisfying
/// a given equality relation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// The `same_bucket` function is passed references to two elements from the slice and
/// must determine if the elements compare equal. The elements are passed in opposite order
/// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is moved
/// at the end of the slice.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = ["foo", "Foo", "BAZ", "Bar", "bar", "baz", "BAZ"];
///
/// let (dedup, duplicates) = slice.partition_dedup_by(|a, b| a.eq_ignore_ascii_case(b));
///
/// assert_eq!(dedup, ["foo", "BAZ", "Bar", "baz"]);
/// assert_eq!(duplicates, ["bar", "Foo", "BAZ"]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by<F>(&mut self, mut same_bucket: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T, &mut T) -> bool,
{
// Although we have a mutable reference to `self`, we cannot make
// *arbitrary* changes. The `same_bucket` calls could panic, so we
// must ensure that the slice is in a valid state at all times.
//
// The way that we handle this is by using swaps; we iterate
// over all the elements, swapping as we go so that at the end
// the elements we wish to keep are in the front, and those we
// wish to reject are at the back. We can then split the slice.
// This operation is still `O(n)`.
//
// Example: We start in this state, where `r` represents "next
// read" and `w` represents "next_write`.
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate, so
// we swap self[r] and self[w] (no effect as r==w) and then increment both
// r and w, leaving us with:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this value is a duplicate,
// so we increment `r` but leave everything else unchanged:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate,
// so swap self[r] and self[w] and advance r and w:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 1 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Not a duplicate, repeat:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 3 | 1 | 3 |
// +---+---+---+---+---+---+
// w
//
// Duplicate, advance r. End of slice. Split at w.
let len = self.len();
if len <= 1 {
return (self, &mut []);
}
let ptr = self.as_mut_ptr();
let mut next_read: usize = 1;
let mut next_write: usize = 1;
unsafe {
// Avoid bounds checks by using raw pointers.
while next_read < len {
let ptr_read = ptr.add(next_read);
let prev_ptr_write = ptr.add(next_write - 1);
if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
if next_read != next_write {
let ptr_write = prev_ptr_write.offset(1);
mem::swap(&mut *ptr_read, &mut *ptr_write);
}
next_write += 1;
}
next_read += 1;
}
}
self.split_at_mut(next_write)
}
/// Moves all but the first of consecutive elements to the end of the slice that resolve
/// to the same key.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [10, 20, 21, 30, 30, 20, 11, 13];
///
/// let (dedup, duplicates) = slice.partition_dedup_by_key(|i| *i / 10);
///
/// assert_eq!(dedup, [10, 20, 30, 20, 11]);
/// assert_eq!(duplicates, [21, 30, 13]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by_key<K, F>(&mut self, mut key: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T) -> K,
K: PartialEq,
{
self.partition_dedup_by(|a, b| key(a) == key(b))
}
/// Rotates the slice in-place such that the first `mid` elements of the
/// slice move to the end while the last `self.len() - mid` elements move to
/// the front. After calling `rotate_left`, the element previously at index
/// `mid` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `mid` is greater than the length of the
/// slice. Note that `mid == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_left(2);
/// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']);
/// ```
///
/// Rotating a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_left(1);
/// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len());
let k = self.len() - mid;
unsafe {
let p = self.as_mut_ptr();
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Rotates the slice in-place such that the first `self.len() - k`
/// elements of the slice move to the end while the last `k` elements move
/// to the front. After calling `rotate_right`, the element previously at
/// index `self.len() - k` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `k` is greater than the length of the
/// slice. Note that `k == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_right(2);
/// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']);
/// ```
///
/// Rotate a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_right(1);
/// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len());
let mid = self.len() - k;
unsafe {
let p = self.as_mut_ptr();
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Fills `self` with elements by cloning `value`.
///
/// # Examples
///
/// ```
/// #![feature(slice_fill)]
///
/// let mut buf = vec![0; 10];
/// buf.fill(1);
/// assert_eq!(buf, vec![1; 10]);
/// ```
#[unstable(feature = "slice_fill", issue = "70758")]
pub fn fill(&mut self, value: T)
where
T: Clone,
{
if let Some((last, elems)) = self.split_last_mut() {
for el in elems {
el.clone_from(&value);
}
*last = value
}
}
/// Copies the elements from `src` into `self`.
///
/// The length of `src` must be the same as `self`.
///
/// If `T` implements `Copy`, it can be more performant to use
/// [`copy_from_slice`].
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Cloning two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.clone_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `clone_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].clone_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.clone_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`copy_from_slice`]: #method.copy_from_slice
/// [`split_at_mut`]: #method.split_at_mut
#[stable(feature = "clone_from_slice", since = "1.7.0")]
pub fn clone_from_slice(&mut self, src: &[T])
where
T: Clone,
{
assert!(self.len() == src.len(), "destination and source slices have different lengths");
// NOTE: We need to explicitly slice them to the same length
// for bounds checking to be elided, and the optimizer will
// generate memcpy for simple cases (for example T = u8).
let len = self.len();
let src = &src[..len];
for i in 0..len {
self[i].clone_from(&src[i]);
}
}
/// Copies all elements from `src` into `self`, using a memcpy.
///
/// The length of `src` must be the same as `self`.
///
/// If `T` does not implement `Copy`, use [`clone_from_slice`].
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Copying two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.copy_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `copy_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].copy_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.copy_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`clone_from_slice`]: #method.clone_from_slice
/// [`split_at_mut`]: #method.split_at_mut
#[stable(feature = "copy_from_slice", since = "1.9.0")]
pub fn copy_from_slice(&mut self, src: &[T])
where
T: Copy,
{
assert_eq!(self.len(), src.len(), "destination and source slices have different lengths");
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len());
}
}
/// Copies elements from one part of the slice to another part of itself,
/// using a memmove.
///
/// `src` is the range within `self` to copy from. `dest` is the starting
/// index of the range within `self` to copy to, which will have the same
/// length as `src`. The two ranges may overlap. The ends of the two ranges
/// must be less than or equal to `self.len()`.
///
/// # Panics
///
/// This function will panic if either range exceeds the end of the slice,
/// or if the end of `src` is before the start.
///
/// # Examples
///
/// Copying four bytes within a slice:
///
/// ```
/// let mut bytes = *b"Hello, World!";
///
/// bytes.copy_within(1..5, 8);
///
/// assert_eq!(&bytes, b"Hello, Wello!");
/// ```
#[stable(feature = "copy_within", since = "1.37.0")]
#[track_caller]
pub fn copy_within<R: ops::RangeBounds<usize>>(&mut self, src: R, dest: usize)
where
T: Copy,
{
let src_start = match src.start_bound() {
ops::Bound::Included(&n) => n,
ops::Bound::Excluded(&n) => {
n.checked_add(1).unwrap_or_else(|| slice_index_overflow_fail())
}
ops::Bound::Unbounded => 0,
};
let src_end = match src.end_bound() {
ops::Bound::Included(&n) => {
n.checked_add(1).unwrap_or_else(|| slice_index_overflow_fail())
}
ops::Bound::Excluded(&n) => n,
ops::Bound::Unbounded => self.len(),
};
assert!(src_start <= src_end, "src end is before src start");
assert!(src_end <= self.len(), "src is out of bounds");
let count = src_end - src_start;
assert!(dest <= self.len() - count, "dest is out of bounds");
unsafe {
ptr::copy(self.as_ptr().add(src_start), self.as_mut_ptr().add(dest), count);
}
}
/// Swaps all elements in `self` with those in `other`.
///
/// The length of `other` must be the same as `self`.
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Example
///
/// Swapping two elements across slices:
///
/// ```
/// let mut slice1 = [0, 0];
/// let mut slice2 = [1, 2, 3, 4];
///
/// slice1.swap_with_slice(&mut slice2[2..]);
///
/// assert_eq!(slice1, [3, 4]);
/// assert_eq!(slice2, [1, 2, 0, 0]);
/// ```
///
/// Rust enforces that there can only be one mutable reference to a
/// particular piece of data in a particular scope. Because of this,
/// attempting to use `swap_with_slice` on a single slice will result in
/// a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
/// slice[..2].swap_with_slice(&mut slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// mutable sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.swap_with_slice(&mut right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 1, 2]);
/// ```
///
/// [`split_at_mut`]: #method.split_at_mut
#[stable(feature = "swap_with_slice", since = "1.27.0")]
pub fn swap_with_slice(&mut self, other: &mut [T]) {
assert!(self.len() == other.len(), "destination and source slices have different lengths");
unsafe {
ptr::swap_nonoverlapping(self.as_mut_ptr(), other.as_mut_ptr(), self.len());
}
}
/// Function to calculate lengths of the middle and trailing slice for `align_to{,_mut}`.
fn align_to_offsets<U>(&self) -> (usize, usize) {
// What we gonna do about `rest` is figure out what multiple of `U`s we can put in a
// lowest number of `T`s. And how many `T`s we need for each such "multiple".
//
// Consider for example T=u8 U=u16. Then we can put 1 U in 2 Ts. Simple. Now, consider
// for example a case where size_of::<T> = 16, size_of::<U> = 24. We can put 2 Us in
// place of every 3 Ts in the `rest` slice. A bit more complicated.
//
// Formula to calculate this is:
//
// Us = lcm(size_of::<T>, size_of::<U>) / size_of::<U>
// Ts = lcm(size_of::<T>, size_of::<U>) / size_of::<T>
//
// Expanded and simplified:
//
// Us = size_of::<T> / gcd(size_of::<T>, size_of::<U>)
// Ts = size_of::<U> / gcd(size_of::<T>, size_of::<U>)
//
// Luckily since all this is constant-evaluated... performance here matters not!
#[inline]
fn gcd(a: usize, b: usize) -> usize {
use crate::intrinsics;
// iterative stein’s algorithm
// We should still make this `const fn` (and revert to recursive algorithm if we do)
// because relying on llvm to consteval all this is… well, it makes me uncomfortable.
let (ctz_a, mut ctz_b) = unsafe {
if a == 0 {
return b;
}
if b == 0 {
return a;
}
(intrinsics::cttz_nonzero(a), intrinsics::cttz_nonzero(b))
};
let k = ctz_a.min(ctz_b);
let mut a = a >> ctz_a;
let mut b = b;
loop {
// remove all factors of 2 from b
b >>= ctz_b;
if a > b {
mem::swap(&mut a, &mut b);
}
b = b - a;
unsafe {
if b == 0 {
break;
}
ctz_b = intrinsics::cttz_nonzero(b);
}
}
a << k
}
let gcd: usize = gcd(mem::size_of::<T>(), mem::size_of::<U>());
let ts: usize = mem::size_of::<U>() / gcd;
let us: usize = mem::size_of::<T>() / gcd;
// Armed with this knowledge, we can find how many `U`s we can fit!
let us_len = self.len() / ts * us;
// And how many `T`s will be in the trailing slice!
let ts_len = self.len() % ts;
(us_len, ts_len)
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &[], &[]);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &[], &[])
} else {
let (left, rest) = self.split_at(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
// SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
// since the caller guarantees that we can transmute `T` to `U` safely.
unsafe {
(
left,
from_raw_parts(rest.as_ptr() as *const U, us_len),
from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
)
}
}
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to_mut::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &mut [], &mut []);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &mut [], &mut [])
} else {
let (left, rest) = self.split_at_mut(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
let rest_len = rest.len();
let mut_ptr = rest.as_mut_ptr();
// We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
// SAFETY: see comments for `align_to`.
unsafe {
(
left,
from_raw_parts_mut(mut_ptr as *mut U, us_len),
from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
)
}
}
}
/// Checks if the elements of this slice are sorted.
///
/// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the
/// slice yields exactly zero or one element, `true` is returned.
///
/// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition
/// implies that this function returns `false` if any two consecutive items are not
/// comparable.
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
/// let empty: [i32; 0] = [];
///
/// assert!([1, 2, 2, 9].is_sorted());
/// assert!(![1, 3, 2, 4].is_sorted());
/// assert!([0].is_sorted());
/// assert!(empty.is_sorted());
/// assert!(![0.0, 1.0, f32::NAN].is_sorted());
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted(&self) -> bool
where
T: PartialOrd,
{
self.is_sorted_by(|a, b| a.partial_cmp(b))
}
/// Checks if the elements of this slice are sorted using the given comparator function.
///
/// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare`
/// function to determine the ordering of two elements. Apart from that, it's equivalent to
/// [`is_sorted`]; see its documentation for more information.
///
/// [`is_sorted`]: #method.is_sorted
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by<F>(&self, mut compare: F) -> bool
where
F: FnMut(&T, &T) -> Option<Ordering>,
{
self.iter().is_sorted_by(|a, b| compare(*a, *b))
}
/// Checks if the elements of this slice are sorted using the given key extraction function.
///
/// Instead of comparing the slice's elements directly, this function compares the keys of the
/// elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see its
/// documentation for more information.
///
/// [`is_sorted`]: #method.is_sorted
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
///
/// assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
/// assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by_key<F, K>(&self, f: F) -> bool
where
F: FnMut(&T) -> K,
K: PartialOrd,
{
self.iter().is_sorted_by_key(f)
}
/// Returns the index of the partition point according to the given predicate
/// (the index of the first element of the second partition).
///
/// The slice is assumed to be partitioned according to the given predicate.
/// This means that all elements for which the predicate returns true are at the start of the slice
/// and all elements for which the predicate returns false are at the end.
/// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
/// (all odd numbers are at the start, all even at the end).
///
/// If this slice is not partitioned, the returned result is unspecified and meaningless,
/// as this method performs a kind of binary search.
///
/// # Examples
///
/// ```
/// #![feature(partition_point)]
///
/// let v = [1, 2, 3, 3, 5, 6, 7];
/// let i = v.partition_point(|&x| x < 5);
///
/// assert_eq!(i, 4);
/// assert!(v[..i].iter().all(|&x| x < 5));
/// assert!(v[i..].iter().all(|&x| !(x < 5)));
/// ```
#[unstable(feature = "partition_point", reason = "new API", issue = "73831")]
pub fn partition_point<P>(&self, mut pred: P) -> usize
where
P: FnMut(&T) -> bool,
{
let mut left = 0;
let mut right = self.len();
while left != right {
let mid = left + (right - left) / 2;
// SAFETY:
// When left < right, left <= mid < right.
// Therefore left always increases and right always decreases,
// and eigher of them is selected.
// In both cases left <= right is satisfied.
// Therefore if left < right in a step,
// left <= right is satisfied in the next step.
// Therefore as long as left != right, 0 <= left < right <= len is satisfied
// and if this case 0 <= mid < len is satisfied too.
let value = unsafe { self.get_unchecked(mid) };
if pred(value) {
left = mid + 1;
} else {
right = mid;
}
}
left
}
}
#[lang = "slice_u8"]
#[cfg(not(test))]
impl [u8] {
/// Checks if all bytes in this slice are within the ASCII range.
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn is_ascii(&self) -> bool {
is_ascii(self)
}
/// Checks that two slices are an ASCII case-insensitive match.
///
/// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
/// but without allocating and copying temporaries.
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
self.len() == other.len() && self.iter().zip(other).all(|(a, b)| a.eq_ignore_ascii_case(b))
}
/// Converts this slice to its ASCII upper case equivalent in-place.
///
/// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
/// but non-ASCII letters are unchanged.
///
/// To return a new uppercased value without modifying the existing one, use
/// [`to_ascii_uppercase`].
///
/// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn make_ascii_uppercase(&mut self) {
for byte in self {
byte.make_ascii_uppercase();
}
}
/// Converts this slice to its ASCII lower case equivalent in-place.
///
/// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
/// but non-ASCII letters are unchanged.
///
/// To return a new lowercased value without modifying the existing one, use
/// [`to_ascii_lowercase`].
///
/// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn make_ascii_lowercase(&mut self) {
for byte in self {
byte.make_ascii_lowercase();
}
}
}
/// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
/// from `../str/mod.rs`, which does something similar for utf8 validation.
#[inline]
fn contains_nonascii(v: usize) -> bool {
const NONASCII_MASK: usize = 0x80808080_80808080u64 as usize;
(NONASCII_MASK & v) != 0
}
/// Optimized ASCII test that will use usize-at-a-time operations instead of
/// byte-at-a-time operations (when possible).
///
/// The algorithm we use here is pretty simple. If `s` is too short, we just
/// check each byte and be done with it. Otherwise:
///
/// - Read the first word with an unaligned load.
/// - Align the pointer, read subsequent words until end with aligned loads.
/// - If there's a tail, the last `usize` from `s` with an unaligned load.
///
/// If any of these loads produces something for which `contains_nonascii`
/// (above) returns true, then we know the answer is false.
#[inline]
fn is_ascii(s: &[u8]) -> bool {
const USIZE_SIZE: usize = mem::size_of::<usize>();
let len = s.len();
let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
// If we wouldn't gain anything from the word-at-a-time implementation, fall
// back to a scalar loop.
//
// We also do this for architectures where `size_of::<usize>()` isn't
// sufficient alignment for `usize`, because it's a weird edge case.
if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < mem::align_of::<usize>() {
return s.iter().all(|b| b.is_ascii());
}
// We always read the first word unaligned, which means `align_offset` is
// 0, we'd read the same value again for the aligned read.
let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
let start = s.as_ptr();
// SAFETY: We verify `len < USIZE_SIZE` above.
let first_word = unsafe { (start as *const usize).read_unaligned() };
if contains_nonascii(first_word) {
return false;
}
// We checked this above, somewhat implicitly. Note that `offset_to_aligned`
// is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
// above.
debug_assert!(offset_to_aligned <= len);
// word_ptr is the (properly aligned) usize ptr we use to read the middle chunk of the slice.
let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
// `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
let mut byte_pos = offset_to_aligned;
// Paranoia check about alignment, since we're about to do a bunch of
// unaligned loads. In practice this should be impossible barring a bug in
// `align_offset` though.
debug_assert_eq!((word_ptr as usize) % mem::align_of::<usize>(), 0);
while byte_pos <= len - USIZE_SIZE {
debug_assert!(
// Sanity check that the read is in bounds
(word_ptr as usize + USIZE_SIZE) <= (start.wrapping_add(len) as usize) &&
// And that our assumptions about `byte_pos` hold.
(word_ptr as usize) - (start as usize) == byte_pos
);
// Safety: We know `word_ptr` is properly aligned (because of
// `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
let word = unsafe { word_ptr.read() };
if contains_nonascii(word) {
return false;
}
byte_pos += USIZE_SIZE;
// SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
// after this `add`, `word_ptr` will be at most one-past-the-end.
word_ptr = unsafe { word_ptr.add(1) };
}
// If we have anything left over, it should be at-most 1 usize worth of bytes,
// which we check with a read_unaligned.
if byte_pos == len {
return true;
}
// Sanity check to ensure there really is only one `usize` left. This should
// be guaranteed by our loop condition.
debug_assert!(byte_pos < len && len - byte_pos < USIZE_SIZE);
// SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
!contains_nonascii(last_word)
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, I> ops::Index<I> for [T]
where
I: SliceIndex<[T]>,
{
type Output = I::Output;
#[inline]
fn index(&self, index: I) -> &I::Output {
index.index(self)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, I> ops::IndexMut<I> for [T]
where
I: SliceIndex<[T]>,
{
#[inline]
fn index_mut(&mut self, index: I) -> &mut I::Output {
index.index_mut(self)
}
}
#[inline(never)]
#[cold]
#[track_caller]
fn slice_start_index_len_fail(index: usize, len: usize) -> ! {
panic!("range start index {} out of range for slice of length {}", index, len);
}
#[inline(never)]
#[cold]
#[track_caller]
fn slice_end_index_len_fail(index: usize, len: usize) -> ! {
panic!("range end index {} out of range for slice of length {}", index, len);
}
#[inline(never)]
#[cold]
#[track_caller]
fn slice_index_order_fail(index: usize, end: usize) -> ! {
panic!("slice index starts at {} but ends at {}", index, end);
}
#[inline(never)]
#[cold]
#[track_caller]
fn slice_index_overflow_fail() -> ! {
panic!("attempted to index slice up to maximum usize");
}
mod private_slice_index {
use super::ops;
#[stable(feature = "slice_get_slice", since = "1.28.0")]
pub trait Sealed {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for usize {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::Range<usize> {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeTo<usize> {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeFrom<usize> {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeFull {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeInclusive<usize> {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeToInclusive<usize> {}
}
/// A helper trait used for indexing operations.
///
/// Implementations of this trait have to promise that if the argument
/// to `get_(mut_)unchecked` is a safe reference, then so is the result.
#[stable(feature = "slice_get_slice", since = "1.28.0")]
#[rustc_on_unimplemented(
on(T = "str", label = "string indices are ranges of `usize`",),
on(
all(any(T = "str", T = "&str", T = "std::string::String"), _Self = "{integer}"),
note = "you can use `.chars().nth()` or `.bytes().nth()`
see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
),
message = "the type `{T}` cannot be indexed by `{Self}`",
label = "slice indices are of type `usize` or ranges of `usize`"
)]
pub unsafe trait SliceIndex<T: ?Sized>: private_slice_index::Sealed {
/// The output type returned by methods.
#[stable(feature = "slice_get_slice", since = "1.28.0")]
type Output: ?Sized;
/// Returns a shared reference to the output at this location, if in
/// bounds.
#[unstable(feature = "slice_index_methods", issue = "none")]
fn get(self, slice: &T) -> Option<&Self::Output>;
/// Returns a mutable reference to the output at this location, if in
/// bounds.
#[unstable(feature = "slice_index_methods", issue = "none")]
fn get_mut(self, slice: &mut T) -> Option<&mut Self::Output>;
/// Returns a shared reference to the output at this location, without
/// performing any bounds checking.
/// Calling this method with an out-of-bounds index or a dangling `slice` pointer
/// is *[undefined behavior]* even if the resulting reference is not used.
///
/// [undefined behavior]: ../../reference/behavior-considered-undefined.html
#[unstable(feature = "slice_index_methods", issue = "none")]
unsafe fn get_unchecked(self, slice: *const T) -> *const Self::Output;
/// Returns a mutable reference to the output at this location, without
/// performing any bounds checking.
/// Calling this method with an out-of-bounds index or a dangling `slice` pointer
/// is *[undefined behavior]* even if the resulting reference is not used.
///
/// [undefined behavior]: ../../reference/behavior-considered-undefined.html
#[unstable(feature = "slice_index_methods", issue = "none")]
unsafe fn get_unchecked_mut(self, slice: *mut T) -> *mut Self::Output;
/// Returns a shared reference to the output at this location, panicking
/// if out of bounds.
#[unstable(feature = "slice_index_methods", issue = "none")]
#[track_caller]
fn index(self, slice: &T) -> &Self::Output;
/// Returns a mutable reference to the output at this location, panicking
/// if out of bounds.
#[unstable(feature = "slice_index_methods", issue = "none")]
#[track_caller]
fn index_mut(self, slice: &mut T) -> &mut Self::Output;
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for usize {
type Output = T;
#[inline]
fn get(self, slice: &[T]) -> Option<&T> {
if self < slice.len() { unsafe { Some(&*self.get_unchecked(slice)) } } else { None }
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut T> {
if self < slice.len() { unsafe { Some(&mut *self.get_unchecked_mut(slice)) } } else { None }
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const T {
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe { slice.as_ptr().add(self) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut T {
// SAFETY: see comments for `get_unchecked` above.
unsafe { slice.as_mut_ptr().add(self) }
}
#[inline]
fn index(self, slice: &[T]) -> &T {
// N.B., use intrinsic indexing
&(*slice)[self]
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut T {
// N.B., use intrinsic indexing
&mut (*slice)[self]
}
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for ops::Range<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
if self.start > self.end || self.end > slice.len() {
None
} else {
unsafe { Some(&*self.get_unchecked(slice)) }
}
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if self.start > self.end || self.end > slice.len() {
None
} else {
unsafe { Some(&mut *self.get_unchecked_mut(slice)) }
}
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe { ptr::slice_from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: see comments for `get_unchecked` above.
unsafe {
ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start)
}
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
if self.start > self.end {
slice_index_order_fail(self.start, self.end);
} else if self.end > slice.len() {
slice_end_index_len_fail(self.end, slice.len());
}
unsafe { &*self.get_unchecked(slice) }
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if self.start > self.end {
slice_index_order_fail(self.start, self.end);
} else if self.end > slice.len() {
slice_end_index_len_fail(self.end, slice.len());
}
unsafe { &mut *self.get_unchecked_mut(slice) }
}
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeTo<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
(0..self.end).get(slice)
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(0..self.end).get_mut(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (0..self.end).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (0..self.end).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
(0..self.end).index(slice)
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
(0..self.end).index_mut(slice)
}
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeFrom<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
(self.start..slice.len()).get(slice)
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(self.start..slice.len()).get_mut(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (self.start..slice.len()).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (self.start..slice.len()).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
if self.start > slice.len() {
slice_start_index_len_fail(self.start, slice.len());
}
unsafe { &*self.get_unchecked(slice) }
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if self.start > slice.len() {
slice_start_index_len_fail(self.start, slice.len());
}
unsafe { &mut *self.get_unchecked_mut(slice) }
}
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeFull {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
Some(slice)
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
Some(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
slice
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
slice
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
slice
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
slice
}
}
#[stable(feature = "inclusive_range", since = "1.26.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeInclusive<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
if *self.end() == usize::MAX { None } else { (*self.start()..self.end() + 1).get(slice) }
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if *self.end() == usize::MAX {
None
} else {
(*self.start()..self.end() + 1).get_mut(slice)
}
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (*self.start()..self.end() + 1).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (*self.start()..self.end() + 1).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
if *self.end() == usize::MAX {
slice_index_overflow_fail();
}
(*self.start()..self.end() + 1).index(slice)
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if *self.end() == usize::MAX {
slice_index_overflow_fail();
}
(*self.start()..self.end() + 1).index_mut(slice)
}
}
#[stable(feature = "inclusive_range", since = "1.26.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeToInclusive<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
(0..=self.end).get(slice)
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(0..=self.end).get_mut(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (0..=self.end).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (0..=self.end).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
(0..=self.end).index(slice)
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
(0..=self.end).index_mut(slice)
}
}
////////////////////////////////////////////////////////////////////////////////
// Common traits
////////////////////////////////////////////////////////////////////////////////
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for &[T] {
/// Creates an empty slice.
fn default() -> Self {
&[]
}
}
#[stable(feature = "mut_slice_default", since = "1.5.0")]
impl<T> Default for &mut [T] {
/// Creates a mutable empty slice.
fn default() -> Self {
&mut []
}
}
//
// Iterators
//
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a [T] {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a mut [T] {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T>;
fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
}
// Macro helper functions
#[inline(always)]
fn size_from_ptr<T>(_: *const T) -> usize {
mem::size_of::<T>()
}
// Inlining is_empty and len makes a huge performance difference
macro_rules! is_empty {
// The way we encode the length of a ZST iterator, this works both for ZST
// and non-ZST.
($self: ident) => {
$self.ptr.as_ptr() as *const T == $self.end
};
}
// To get rid of some bounds checks (see `position`), we compute the length in a somewhat
// unexpected way. (Tested by `codegen/slice-position-bounds-check`.)
macro_rules! len {
($self: ident) => {{
#![allow(unused_unsafe)] // we're sometimes used within an unsafe block
let start = $self.ptr;
let size = size_from_ptr(start.as_ptr());
if size == 0 {
// This _cannot_ use `unchecked_sub` because we depend on wrapping
// to represent the length of long ZST slice iterators.
($self.end as usize).wrapping_sub(start.as_ptr() as usize)
} else {
// We know that `start <= end`, so can do better than `offset_from`,
// which needs to deal in signed. By setting appropriate flags here
// we can tell LLVM this, which helps it remove bounds checks.
// SAFETY: By the type invariant, `start <= end`
let diff = unsafe { unchecked_sub($self.end as usize, start.as_ptr() as usize) };
// By also telling LLVM that the pointers are apart by an exact
// multiple of the type size, it can optimize `len() == 0` down to
// `start == end` instead of `(end - start) < size`.
// SAFETY: By the type invariant, the pointers are aligned so the
// distance between them must be a multiple of pointee size
unsafe { exact_div(diff, size) }
}
}};
}
// The shared definition of the `Iter` and `IterMut` iterators
macro_rules! iterator {
(
struct $name:ident -> $ptr:ty,
$elem:ty,
$raw_mut:tt,
{$( $mut_:tt )*},
{$($extra:tt)*}
) => {
// Returns the first element and moves the start of the iterator forwards by 1.
// Greatly improves performance compared to an inlined function. The iterator
// must not be empty.
macro_rules! next_unchecked {
($self: ident) => {& $( $mut_ )* *$self.post_inc_start(1)}
}
// Returns the last element and moves the end of the iterator backwards by 1.
// Greatly improves performance compared to an inlined function. The iterator
// must not be empty.
macro_rules! next_back_unchecked {
($self: ident) => {& $( $mut_ )* *$self.pre_dec_end(1)}
}
// Shrinks the iterator when T is a ZST, by moving the end of the iterator
// backwards by `n`. `n` must not exceed `self.len()`.
macro_rules! zst_shrink {
($self: ident, $n: ident) => {
$self.end = ($self.end as * $raw_mut u8).wrapping_offset(-$n) as * $raw_mut T;
}
}
impl<'a, T> $name<'a, T> {
// Helper function for creating a slice from the iterator.
#[inline(always)]
fn make_slice(&self) -> &'a [T] {
unsafe { from_raw_parts(self.ptr.as_ptr(), len!(self)) }
}
// Helper function for moving the start of the iterator forwards by `offset` elements,
// returning the old start.
// Unsafe because the offset must not exceed `self.len()`.
#[inline(always)]
unsafe fn post_inc_start(&mut self, offset: isize) -> * $raw_mut T {
if mem::size_of::<T>() == 0 {
zst_shrink!(self, offset);
self.ptr.as_ptr()
} else {
let old = self.ptr.as_ptr();
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// so this new pointer is inside `self` and thus guaranteed to be non-null.
self.ptr = unsafe { NonNull::new_unchecked(self.ptr.as_ptr().offset(offset)) };
old
}
}
// Helper function for moving the end of the iterator backwards by `offset` elements,
// returning the new end.
// Unsafe because the offset must not exceed `self.len()`.
#[inline(always)]
unsafe fn pre_dec_end(&mut self, offset: isize) -> * $raw_mut T {
if mem::size_of::<T>() == 0 {
zst_shrink!(self, offset);
self.ptr.as_ptr()
} else {
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// which is guaranteed to not overflow an `isize`. Also, the resulting pointer
// is in bounds of `slice`, which fulfills the other requirements for `offset`.
self.end = unsafe { self.end.offset(-offset) };
self.end
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for $name<'_, T> {
#[inline(always)]
fn len(&self) -> usize {
len!(self)
}
#[inline(always)]
fn is_empty(&self) -> bool {
is_empty!(self)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for $name<'a, T> {
type Item = $elem;
#[inline]
fn next(&mut self) -> Option<$elem> {
// could be implemented with slices, but this avoids bounds checks
unsafe {
assume(!self.ptr.as_ptr().is_null());
if mem::size_of::<T>() != 0 {
assume(!self.end.is_null());
}
if is_empty!(self) {
None
} else {
Some(next_unchecked!(self))
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let exact = len!(self);
(exact, Some(exact))
}
#[inline]
fn count(self) -> usize {
len!(self)
}
#[inline]
fn nth(&mut self, n: usize) -> Option<$elem> {
if n >= len!(self) {
// This iterator is now empty.
if mem::size_of::<T>() == 0 {
// We have to do it this way as `ptr` may never be 0, but `end`
// could be (due to wrapping).
self.end = self.ptr.as_ptr();
} else {
unsafe {
// End can't be 0 if T isn't ZST because ptr isn't 0 and end >= ptr
self.ptr = NonNull::new_unchecked(self.end as *mut T);
}
}
return None;
}
// We are in bounds. `post_inc_start` does the right thing even for ZSTs.
unsafe {
self.post_inc_start(n as isize);
Some(next_unchecked!(self))
}
}
#[inline]
fn last(mut self) -> Option<$elem> {
self.next_back()
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn for_each<F>(mut self, mut f: F)
where
Self: Sized,
F: FnMut(Self::Item),
{
while let Some(x) = self.next() {
f(x);
}
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn all<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
while let Some(x) = self.next() {
if !f(x) {
return false;
}
}
true
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn any<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
while let Some(x) = self.next() {
if f(x) {
return true;
}
}
false
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
while let Some(x) = self.next() {
if predicate(&x) {
return Some(x);
}
}
None
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn find_map<B, F>(&mut self, mut f: F) -> Option<B>
where
Self: Sized,
F: FnMut(Self::Item) -> Option<B>,
{
while let Some(x) = self.next() {
if let Some(y) = f(x) {
return Some(y);
}
}
None
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile. Also, the `assume` avoids a bounds check.
#[inline]
#[rustc_inherit_overflow_checks]
fn position<P>(&mut self, mut predicate: P) -> Option<usize> where
Self: Sized,
P: FnMut(Self::Item) -> bool,
{
let n = len!(self);
let mut i = 0;
while let Some(x) = self.next() {
if predicate(x) {
unsafe { assume(i < n) };
return Some(i);
}
i += 1;
}
None
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile. Also, the `assume` avoids a bounds check.
#[inline]
fn rposition<P>(&mut self, mut predicate: P) -> Option<usize> where
P: FnMut(Self::Item) -> bool,
Self: Sized + ExactSizeIterator + DoubleEndedIterator
{
let n = len!(self);
let mut i = n;
while let Some(x) = self.next_back() {
i -= 1;
if predicate(x) {
unsafe { assume(i < n) };
return Some(i);
}
}
None
}
$($extra)*
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for $name<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<$elem> {
// could be implemented with slices, but this avoids bounds checks
unsafe {
assume(!self.ptr.as_ptr().is_null());
if mem::size_of::<T>() != 0 {
assume(!self.end.is_null());
}
if is_empty!(self) {
None
} else {
Some(next_back_unchecked!(self))
}
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<$elem> {
if n >= len!(self) {
// This iterator is now empty.
self.end = self.ptr.as_ptr();
return None;
}
// We are in bounds. `pre_dec_end` does the right thing even for ZSTs.
unsafe {
self.pre_dec_end(n as isize);
Some(next_back_unchecked!(self))
}
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for $name<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for $name<'_, T> {}
}
}
/// Immutable slice iterator
///
/// This struct is created by the [`iter`] method on [slices].
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // First, we declare a type which has `iter` method to get the `Iter` struct (&[usize here]):
/// let slice = &[1, 2, 3];
///
/// // Then, we iterate over it:
/// for element in slice.iter() {
/// println!("{}", element);
/// }
/// ```
///
/// [`iter`]: ../../std/primitive.slice.html#method.iter
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
ptr: NonNull<T>,
end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
// ptr == end is a quick test for the Iterator being empty, that works
// for both ZST and non-ZST.
_marker: marker::PhantomData<&'a T>,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Iter").field(&self.as_slice()).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync> Sync for Iter<'_, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync> Send for Iter<'_, T> {}
impl<'a, T> Iter<'a, T> {
/// Views the underlying data as a subslice of the original data.
///
/// This has the same lifetime as the original slice, and so the
/// iterator can continue to be used while this exists.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // First, we declare a type which has the `iter` method to get the `Iter`
/// // struct (&[usize here]):
/// let slice = &[1, 2, 3];
///
/// // Then, we get the iterator:
/// let mut iter = slice.iter();
/// // So if we print what `as_slice` method returns here, we have "[1, 2, 3]":
/// println!("{:?}", iter.as_slice());
///
/// // Next, we move to the second element of the slice:
/// iter.next();
/// // Now `as_slice` returns "[2, 3]":
/// println!("{:?}", iter.as_slice());
/// ```
#[stable(feature = "iter_to_slice", since = "1.4.0")]
pub fn as_slice(&self) -> &'a [T] {
self.make_slice()
}
}
iterator! {struct Iter -> *const T, &'a T, const, {/* no mut */}, {
fn is_sorted_by<F>(self, mut compare: F) -> bool
where
Self: Sized,
F: FnMut(&Self::Item, &Self::Item) -> Option<Ordering>,
{
self.as_slice().windows(2).all(|w| {
compare(&&w[0], &&w[1]).map(|o| o != Ordering::Greater).unwrap_or(false)
})
}
}}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self {
Iter { ptr: self.ptr, end: self.end, _marker: self._marker }
}
}
#[stable(feature = "slice_iter_as_ref", since = "1.13.0")]
impl<T> AsRef<[T]> for Iter<'_, T> {
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
/// Mutable slice iterator.
///
/// This struct is created by the [`iter_mut`] method on [slices].
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // First, we declare a type which has `iter_mut` method to get the `IterMut`
/// // struct (&[usize here]):
/// let mut slice = &mut [1, 2, 3];
///
/// // Then, we iterate over it and increment each element value:
/// for element in slice.iter_mut() {
/// *element += 1;
/// }
///
/// // We now have "[2, 3, 4]":
/// println!("{:?}", slice);
/// ```
///
/// [`iter_mut`]: ../../std/primitive.slice.html#method.iter_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
ptr: NonNull<T>,
end: *mut T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
// ptr == end is a quick test for the Iterator being empty, that works
// for both ZST and non-ZST.
_marker: marker::PhantomData<&'a mut T>,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("IterMut").field(&self.make_slice()).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Send> Send for IterMut<'_, T> {}
impl<'a, T> IterMut<'a, T> {
/// Views the underlying data as a subslice of the original data.
///
/// To avoid creating `&mut` references that alias, this is forced
/// to consume the iterator.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // First, we declare a type which has `iter_mut` method to get the `IterMut`
/// // struct (&[usize here]):
/// let mut slice = &mut [1, 2, 3];
///
/// {
/// // Then, we get the iterator:
/// let mut iter = slice.iter_mut();
/// // We move to next element:
/// iter.next();
/// // So if we print what `into_slice` method returns here, we have "[2, 3]":
/// println!("{:?}", iter.into_slice());
/// }
///
/// // Now let's modify a value of the slice:
/// {
/// // First we get back the iterator:
/// let mut iter = slice.iter_mut();
/// // We change the value of the first element of the slice returned by the `next` method:
/// *iter.next().unwrap() += 1;
/// }
/// // Now slice is "[2, 2, 3]":
/// println!("{:?}", slice);
/// ```
#[stable(feature = "iter_to_slice", since = "1.4.0")]
pub fn into_slice(self) -> &'a mut [T] {
unsafe { from_raw_parts_mut(self.ptr.as_ptr(), len!(self)) }
}
/// Views the underlying data as a subslice of the original data.
///
/// To avoid creating `&mut [T]` references that alias, the returned slice
/// borrows its lifetime from the iterator the method is applied on.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// # #![feature(slice_iter_mut_as_slice)]
/// let mut slice: &mut [usize] = &mut [1, 2, 3];
///
/// // First, we get the iterator:
/// let mut iter = slice.iter_mut();
/// // So if we check what the `as_slice` method returns here, we have "[1, 2, 3]":
/// assert_eq!(iter.as_slice(), &[1, 2, 3]);
///
/// // Next, we move to the second element of the slice:
/// iter.next();
/// // Now `as_slice` returns "[2, 3]":
/// assert_eq!(iter.as_slice(), &[2, 3]);
/// ```
#[unstable(feature = "slice_iter_mut_as_slice", reason = "recently added", issue = "58957")]
pub fn as_slice(&self) -> &[T] {
self.make_slice()
}
}
iterator! {struct IterMut -> *mut T, &'a mut T, mut, {mut}, {}}
/// An internal abstraction over the splitting iterators, so that
/// splitn, splitn_mut etc can be implemented once.
#[doc(hidden)]
trait SplitIter: DoubleEndedIterator {
/// Marks the underlying iterator as complete, extracting the remaining
/// portion of the slice.
fn finish(&mut self) -> Option<Self::Item>;
}
/// An iterator over subslices separated by elements that match a predicate
/// function.
///
/// This struct is created by the [`split`] method on [slices].
///
/// [`split`]: ../../std/primitive.slice.html#method.split
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Split<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
v: &'a [T],
pred: P,
finished: bool,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for Split<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Split").field("v", &self.v).field("finished", &self.finished).finish()
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, P> Clone for Split<'_, T, P>
where
P: Clone + FnMut(&T) -> bool,
{
fn clone(&self) -> Self {
Split { v: self.v, pred: self.pred.clone(), finished: self.finished }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> Iterator for Split<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.finished {
return None;
}
match self.v.iter().position(|x| (self.pred)(x)) {
None => self.finish(),
Some(idx) => {
let ret = Some(&self.v[..idx]);
self.v = &self.v[idx + 1..];
ret
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.finished { (0, Some(0)) } else { (1, Some(self.v.len() + 1)) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> DoubleEndedIterator for Split<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.finished {
return None;
}
match self.v.iter().rposition(|x| (self.pred)(x)) {
None => self.finish(),
Some(idx) => {
let ret = Some(&self.v[idx + 1..]);
self.v = &self.v[..idx];
ret
}
}
}
}
impl<'a, T, P> SplitIter for Split<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn finish(&mut self) -> Option<&'a [T]> {
if self.finished {
None
} else {
self.finished = true;
Some(self.v)
}
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<T, P> FusedIterator for Split<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over subslices separated by elements that match a predicate
/// function. Unlike `Split`, it contains the matched part as a terminator
/// of the subslice.
///
/// This struct is created by the [`split_inclusive`] method on [slices].
///
/// [`split_inclusive`]: ../../std/primitive.slice.html#method.split_inclusive
/// [slices]: ../../std/primitive.slice.html
#[unstable(feature = "split_inclusive", issue = "72360")]
pub struct SplitInclusive<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
v: &'a [T],
pred: P,
finished: bool,
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T: fmt::Debug, P> fmt::Debug for SplitInclusive<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitInclusive")
.field("v", &self.v)
.field("finished", &self.finished)
.finish()
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T, P> Clone for SplitInclusive<'_, T, P>
where
P: Clone + FnMut(&T) -> bool,
{
fn clone(&self) -> Self {
SplitInclusive { v: self.v, pred: self.pred.clone(), finished: self.finished }
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<'a, T, P> Iterator for SplitInclusive<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.finished {
return None;
}
let idx =
self.v.iter().position(|x| (self.pred)(x)).map(|idx| idx + 1).unwrap_or(self.v.len());
if idx == self.v.len() {
self.finished = true;
}
let ret = Some(&self.v[..idx]);
self.v = &self.v[idx..];
ret
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.finished { (0, Some(0)) } else { (1, Some(self.v.len() + 1)) }
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<'a, T, P> DoubleEndedIterator for SplitInclusive<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.finished {
return None;
}
// The last index of self.v is already checked and found to match
// by the last iteration, so we start searching a new match
// one index to the left.
let remainder = if self.v.is_empty() { &[] } else { &self.v[..(self.v.len() - 1)] };
let idx = remainder.iter().rposition(|x| (self.pred)(x)).map(|idx| idx + 1).unwrap_or(0);
if idx == 0 {
self.finished = true;
}
let ret = Some(&self.v[idx..]);
self.v = &self.v[..idx];
ret
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T, P> FusedIterator for SplitInclusive<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over the mutable subslices of the vector which are separated
/// by elements that match `pred`.
///
/// This struct is created by the [`split_mut`] method on [slices].
///
/// [`split_mut`]: ../../std/primitive.slice.html#method.split_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SplitMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
v: &'a mut [T],
pred: P,
finished: bool,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for SplitMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitMut").field("v", &self.v).field("finished", &self.finished).finish()
}
}
impl<'a, T, P> SplitIter for SplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn finish(&mut self) -> Option<&'a mut [T]> {
if self.finished {
None
} else {
self.finished = true;
Some(mem::replace(&mut self.v, &mut []))
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> Iterator for SplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.finished {
return None;
}
let idx_opt = {
// work around borrowck limitations
let pred = &mut self.pred;
self.v.iter().position(|x| (*pred)(x))
};
match idx_opt {
None => self.finish(),
Some(idx) => {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = &mut tail[1..];
Some(head)
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.finished {
(0, Some(0))
} else {
// if the predicate doesn't match anything, we yield one slice
// if it matches every element, we yield len+1 empty slices.
(1, Some(self.v.len() + 1))
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> DoubleEndedIterator for SplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.finished {
return None;
}
let idx_opt = {
// work around borrowck limitations
let pred = &mut self.pred;
self.v.iter().rposition(|x| (*pred)(x))
};
match idx_opt {
None => self.finish(),
Some(idx) => {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = head;
Some(&mut tail[1..])
}
}
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<T, P> FusedIterator for SplitMut<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over the mutable subslices of the vector which are separated
/// by elements that match `pred`. Unlike `SplitMut`, it contains the matched
/// parts in the ends of the subslices.
///
/// This struct is created by the [`split_inclusive_mut`] method on [slices].
///
/// [`split_inclusive_mut`]: ../../std/primitive.slice.html#method.split_inclusive_mut
/// [slices]: ../../std/primitive.slice.html
#[unstable(feature = "split_inclusive", issue = "72360")]
pub struct SplitInclusiveMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
v: &'a mut [T],
pred: P,
finished: bool,
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T: fmt::Debug, P> fmt::Debug for SplitInclusiveMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitInclusiveMut")
.field("v", &self.v)
.field("finished", &self.finished)
.finish()
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<'a, T, P> Iterator for SplitInclusiveMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.finished {
return None;
}
let idx_opt = {
// work around borrowck limitations
let pred = &mut self.pred;
self.v.iter().position(|x| (*pred)(x))
};
let idx = idx_opt.map(|idx| idx + 1).unwrap_or(self.v.len());
if idx == self.v.len() {
self.finished = true;
}
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = tail;
Some(head)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.finished {
(0, Some(0))
} else {
// if the predicate doesn't match anything, we yield one slice
// if it matches every element, we yield len+1 empty slices.
(1, Some(self.v.len() + 1))
}
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<'a, T, P> DoubleEndedIterator for SplitInclusiveMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.finished {
return None;
}
let idx_opt = if self.v.is_empty() {
None
} else {
// work around borrowck limitations
let pred = &mut self.pred;
// The last index of self.v is already checked and found to match
// by the last iteration, so we start searching a new match
// one index to the left.
let remainder = &self.v[..(self.v.len() - 1)];
remainder.iter().rposition(|x| (*pred)(x))
};
let idx = idx_opt.map(|idx| idx + 1).unwrap_or(0);
if idx == 0 {
self.finished = true;
}
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = head;
Some(tail)
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T, P> FusedIterator for SplitInclusiveMut<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over subslices separated by elements that match a predicate
/// function, starting from the end of the slice.
///
/// This struct is created by the [`rsplit`] method on [slices].
///
/// [`rsplit`]: ../../std/primitive.slice.html#method.rsplit
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[derive(Clone)] // Is this correct, or does it incorrectly require `T: Clone`?
pub struct RSplit<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: Split<'a, T, P>,
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<T: fmt::Debug, P> fmt::Debug for RSplit<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RSplit")
.field("v", &self.inner.v)
.field("finished", &self.inner.finished)
.finish()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> Iterator for RSplit<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
self.inner.next_back()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> DoubleEndedIterator for RSplit<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
self.inner.next()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> SplitIter for RSplit<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn finish(&mut self) -> Option<&'a [T]> {
self.inner.finish()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<T, P> FusedIterator for RSplit<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over the subslices of the vector which are separated
/// by elements that match `pred`, starting from the end of the slice.
///
/// This struct is created by the [`rsplit_mut`] method on [slices].
///
/// [`rsplit_mut`]: ../../std/primitive.slice.html#method.rsplit_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "slice_rsplit", since = "1.27.0")]
pub struct RSplitMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: SplitMut<'a, T, P>,
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<T: fmt::Debug, P> fmt::Debug for RSplitMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RSplitMut")
.field("v", &self.inner.v)
.field("finished", &self.inner.finished)
.finish()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> SplitIter for RSplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn finish(&mut self) -> Option<&'a mut [T]> {
self.inner.finish()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> Iterator for RSplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
self.inner.next_back()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> DoubleEndedIterator for RSplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
self.inner.next()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<T, P> FusedIterator for RSplitMut<'_, T, P> where P: FnMut(&T) -> bool {}
/// An private iterator over subslices separated by elements that
/// match a predicate function, splitting at most a fixed number of
/// times.
#[derive(Debug)]
struct GenericSplitN<I> {
iter: I,
count: usize,
}
impl<T, I: SplitIter<Item = T>> Iterator for GenericSplitN<I> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
match self.count {
0 => None,
1 => {
self.count -= 1;
self.iter.finish()
}
_ => {
self.count -= 1;
self.iter.next()
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let (lower, upper_opt) = self.iter.size_hint();
(lower, upper_opt.map(|upper| cmp::min(self.count, upper)))
}
}
/// An iterator over subslices separated by elements that match a predicate
/// function, limited to a given number of splits.
///
/// This struct is created by the [`splitn`] method on [slices].
///
/// [`splitn`]: ../../std/primitive.slice.html#method.splitn
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SplitN<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: GenericSplitN<Split<'a, T, P>>,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for SplitN<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitN").field("inner", &self.inner).finish()
}
}
/// An iterator over subslices separated by elements that match a
/// predicate function, limited to a given number of splits, starting
/// from the end of the slice.
///
/// This struct is created by the [`rsplitn`] method on [slices].
///
/// [`rsplitn`]: ../../std/primitive.slice.html#method.rsplitn
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct RSplitN<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: GenericSplitN<RSplit<'a, T, P>>,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for RSplitN<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RSplitN").field("inner", &self.inner).finish()
}
}
/// An iterator over subslices separated by elements that match a predicate
/// function, limited to a given number of splits.
///
/// This struct is created by the [`splitn_mut`] method on [slices].
///
/// [`splitn_mut`]: ../../std/primitive.slice.html#method.splitn_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SplitNMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: GenericSplitN<SplitMut<'a, T, P>>,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for SplitNMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitNMut").field("inner", &self.inner).finish()
}
}
/// An iterator over subslices separated by elements that match a
/// predicate function, limited to a given number of splits, starting
/// from the end of the slice.
///
/// This struct is created by the [`rsplitn_mut`] method on [slices].
///
/// [`rsplitn_mut`]: ../../std/primitive.slice.html#method.rsplitn_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct RSplitNMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: GenericSplitN<RSplitMut<'a, T, P>>,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for RSplitNMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RSplitNMut").field("inner", &self.inner).finish()
}
}
macro_rules! forward_iterator {
($name:ident: $elem:ident, $iter_of:ty) => {
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, $elem, P> Iterator for $name<'a, $elem, P>
where
P: FnMut(&T) -> bool,
{
type Item = $iter_of;
#[inline]
fn next(&mut self) -> Option<$iter_of> {
self.inner.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, $elem, P> FusedIterator for $name<'a, $elem, P> where P: FnMut(&T) -> bool {}
};
}
forward_iterator! { SplitN: T, &'a [T] }
forward_iterator! { RSplitN: T, &'a [T] }
forward_iterator! { SplitNMut: T, &'a mut [T] }
forward_iterator! { RSplitNMut: T, &'a mut [T] }
/// An iterator over overlapping subslices of length `size`.
///
/// This struct is created by the [`windows`] method on [slices].
///
/// [`windows`]: ../../std/primitive.slice.html#method.windows
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Windows<'a, T: 'a> {
v: &'a [T],
size: usize,
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Windows<'_, T> {
fn clone(&self) -> Self {
Windows { v: self.v, size: self.size }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for Windows<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.size > self.v.len() {
None
} else {
let ret = Some(&self.v[..self.size]);
self.v = &self.v[1..];
ret
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.size > self.v.len() {
(0, Some(0))
} else {
let size = self.v.len() - self.size + 1;
(size, Some(size))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (end, overflow) = self.size.overflowing_add(n);
if end > self.v.len() || overflow {
self.v = &[];
None
} else {
let nth = &self.v[n..end];
self.v = &self.v[n + 1..];
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.size > self.v.len() {
None
} else {
let start = self.v.len() - self.size;
Some(&self.v[start..])
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for Windows<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.size > self.v.len() {
None
} else {
let ret = Some(&self.v[self.v.len() - self.size..]);
self.v = &self.v[..self.v.len() - 1];
ret
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let (end, overflow) = self.v.len().overflowing_sub(n);
if end < self.size || overflow {
self.v = &[];
None
} else {
let ret = &self.v[end - self.size..end];
self.v = &self.v[..end - 1];
Some(ret)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for Windows<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for Windows<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for Windows<'_, T> {}
#[doc(hidden)]
unsafe impl<'a, T> TrustedRandomAccess for Windows<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
// SAFETY: since the caller guarantees that `i` is in bounds,
// which means that `i` cannot overflow an `isize`, and the
// slice created by `from_raw_parts` is a subslice of `self.v`
// thus is guaranteed to be valid for the lifetime `'a` of `self.v`.
unsafe { from_raw_parts(self.v.as_ptr().add(i), self.size) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
/// time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last slice
/// of the iteration will be the remainder.
///
/// This struct is created by the [`chunks`] method on [slices].
///
/// [`chunks`]: ../../std/primitive.slice.html#method.chunks
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Chunks<'a, T: 'a> {
v: &'a [T],
chunk_size: usize,
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Chunks<'_, T> {
fn clone(&self) -> Self {
Chunks { v: self.v, chunk_size: self.chunk_size }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for Chunks<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.v.is_empty() {
None
} else {
let chunksz = cmp::min(self.v.len(), self.chunk_size);
let (fst, snd) = self.v.split_at(chunksz);
self.v = snd;
Some(fst)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &[];
None
} else {
let end = match start.checked_add(self.chunk_size) {
Some(sum) => cmp::min(self.v.len(), sum),
None => self.v.len(),
};
let nth = &self.v[start..end];
self.v = &self.v[end..];
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.v.is_empty() {
None
} else {
let start = (self.v.len() - 1) / self.chunk_size * self.chunk_size;
Some(&self.v[start..])
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for Chunks<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.v.is_empty() {
None
} else {
let remainder = self.v.len() % self.chunk_size;
let chunksz = if remainder != 0 { remainder } else { self.chunk_size };
let (fst, snd) = self.v.split_at(self.v.len() - chunksz);
self.v = fst;
Some(snd)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &[];
None
} else {
let start = (len - 1 - n) * self.chunk_size;
let end = match start.checked_add(self.chunk_size) {
Some(res) => cmp::min(res, self.v.len()),
None => self.v.len(),
};
let nth_back = &self.v[start..end];
self.v = &self.v[..start];
Some(nth_back)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for Chunks<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for Chunks<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for Chunks<'_, T> {}
#[doc(hidden)]
unsafe impl<'a, T> TrustedRandomAccess for Chunks<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
let start = i * self.chunk_size;
let end = match start.checked_add(self.chunk_size) {
None => self.v.len(),
Some(end) => cmp::min(end, self.v.len()),
};
// SAFETY: the caller guarantees that `i` is in bounds,
// which means that `start` must be in bounds of the
// underlying `self.v` slice, and we made sure that `end`
// is also in bounds of `self.v`. Thus, `start` cannot overflow
// an `isize`, and the slice constructed by `from_raw_parts`
// is a subslice of `self.v` which is guaranteed to be valid
// for the lifetime `'a` of `self.v`.
unsafe { from_raw_parts(self.v.as_ptr().add(start), end - start) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
/// elements at a time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last slice
/// of the iteration will be the remainder.
///
/// This struct is created by the [`chunks_mut`] method on [slices].
///
/// [`chunks_mut`]: ../../std/primitive.slice.html#method.chunks_mut
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct ChunksMut<'a, T: 'a> {
v: &'a mut [T],
chunk_size: usize,
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for ChunksMut<'a, T> {
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.is_empty() {
None
} else {
let sz = cmp::min(self.v.len(), self.chunk_size);
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(sz);
self.v = tail;
Some(head)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &mut [];
None
} else {
let end = match start.checked_add(self.chunk_size) {
Some(sum) => cmp::min(self.v.len(), sum),
None => self.v.len(),
};
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(end);
let (_, nth) = head.split_at_mut(start);
self.v = tail;
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.v.is_empty() {
None
} else {
let start = (self.v.len() - 1) / self.chunk_size * self.chunk_size;
Some(&mut self.v[start..])
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for ChunksMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.v.is_empty() {
None
} else {
let remainder = self.v.len() % self.chunk_size;
let sz = if remainder != 0 { remainder } else { self.chunk_size };
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (head, tail) = tmp.split_at_mut(tmp_len - sz);
self.v = head;
Some(tail)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &mut [];
None
} else {
let start = (len - 1 - n) * self.chunk_size;
let end = match start.checked_add(self.chunk_size) {
Some(res) => cmp::min(res, self.v.len()),
None => self.v.len(),
};
let (temp, _tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
let (head, nth_back) = temp.split_at_mut(start);
self.v = head;
Some(nth_back)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for ChunksMut<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for ChunksMut<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for ChunksMut<'_, T> {}
#[doc(hidden)]
unsafe impl<'a, T> TrustedRandomAccess for ChunksMut<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] {
let start = i * self.chunk_size;
let end = match start.checked_add(self.chunk_size) {
None => self.v.len(),
Some(end) => cmp::min(end, self.v.len()),
};
// SAFETY: see comments for `Chunks::get_unchecked`.
unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
/// time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last
/// up to `chunk_size-1` elements will be omitted but can be retrieved from
/// the [`remainder`] function from the iterator.
///
/// This struct is created by the [`chunks_exact`] method on [slices].
///
/// [`chunks_exact`]: ../../std/primitive.slice.html#method.chunks_exact
/// [`remainder`]: ../../std/slice/struct.ChunksExact.html#method.remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub struct ChunksExact<'a, T: 'a> {
v: &'a [T],
rem: &'a [T],
chunk_size: usize,
}
impl<'a, T> ChunksExact<'a, T> {
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub fn remainder(&self) -> &'a [T] {
self.rem
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> Clone for ChunksExact<'_, T> {
fn clone(&self) -> Self {
ChunksExact { v: self.v, rem: self.rem, chunk_size: self.chunk_size }
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<'a, T> Iterator for ChunksExact<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let (fst, snd) = self.v.split_at(self.chunk_size);
self.v = snd;
Some(fst)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &[];
None
} else {
let (_, snd) = self.v.split_at(start);
self.v = snd;
self.next()
}
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for ChunksExact<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let (fst, snd) = self.v.split_at(self.v.len() - self.chunk_size);
self.v = fst;
Some(snd)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &[];
None
} else {
let start = (len - 1 - n) * self.chunk_size;
let end = start + self.chunk_size;
let nth_back = &self.v[start..end];
self.v = &self.v[..start];
Some(nth_back)
}
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> ExactSizeIterator for ChunksExact<'_, T> {
fn is_empty(&self) -> bool {
self.v.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for ChunksExact<'_, T> {}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> FusedIterator for ChunksExact<'_, T> {}
#[doc(hidden)]
#[stable(feature = "chunks_exact", since = "1.31.0")]
unsafe impl<'a, T> TrustedRandomAccess for ChunksExact<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
let start = i * self.chunk_size;
// SAFETY: mostly identical to `Chunks::get_unchecked`.
unsafe { from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
/// elements at a time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last up to
/// `chunk_size-1` elements will be omitted but can be retrieved from the
/// [`into_remainder`] function from the iterator.
///
/// This struct is created by the [`chunks_exact_mut`] method on [slices].
///
/// [`chunks_exact_mut`]: ../../std/primitive.slice.html#method.chunks_exact_mut
/// [`into_remainder`]: ../../std/slice/struct.ChunksExactMut.html#method.into_remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub struct ChunksExactMut<'a, T: 'a> {
v: &'a mut [T],
rem: &'a mut [T],
chunk_size: usize,
}
impl<'a, T> ChunksExactMut<'a, T> {
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub fn into_remainder(self) -> &'a mut [T] {
self.rem
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<'a, T> Iterator for ChunksExactMut<'a, T> {
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(self.chunk_size);
self.v = tail;
Some(head)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &mut [];
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let (_, snd) = tmp.split_at_mut(start);
self.v = snd;
self.next()
}
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for ChunksExactMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (head, tail) = tmp.split_at_mut(tmp_len - self.chunk_size);
self.v = head;
Some(tail)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &mut [];
None
} else {
let start = (len - 1 - n) * self.chunk_size;
let end = start + self.chunk_size;
let (temp, _tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
let (head, nth_back) = temp.split_at_mut(start);
self.v = head;
Some(nth_back)
}
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> ExactSizeIterator for ChunksExactMut<'_, T> {
fn is_empty(&self) -> bool {
self.v.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for ChunksExactMut<'_, T> {}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> FusedIterator for ChunksExactMut<'_, T> {}
#[doc(hidden)]
#[stable(feature = "chunks_exact", since = "1.31.0")]
unsafe impl<'a, T> TrustedRandomAccess for ChunksExactMut<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] {
let start = i * self.chunk_size;
// SAFETY: see comments for `ChunksExactMut::get_unchecked`.
unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) chunks (`N` elements at a
/// time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last
/// up to `chunk_size-1` elements will be omitted but can be retrieved from
/// the [`remainder`] function from the iterator.
///
/// This struct is created by the [`array_chunks`] method on [slices].
///
/// [`array_chunks`]: ../../std/primitive.slice.html#method.array_chunks
/// [`remainder`]: ../../std/slice/struct.ArrayChunks.html#method.remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[unstable(feature = "array_chunks", issue = "74985")]
pub struct ArrayChunks<'a, T: 'a, const N: usize> {
iter: Iter<'a, [T; N]>,
rem: &'a [T],
}
impl<'a, T, const N: usize> ArrayChunks<'a, T, N> {
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
#[unstable(feature = "array_chunks", issue = "74985")]
pub fn remainder(&self) -> &'a [T] {
self.rem
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[unstable(feature = "array_chunks", issue = "74985")]
impl<T, const N: usize> Clone for ArrayChunks<'_, T, N> {
fn clone(&self) -> Self {
ArrayChunks { iter: self.iter.clone(), rem: self.rem }
}
}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<'a, T, const N: usize> Iterator for ArrayChunks<'a, T, N> {
type Item = &'a [T; N];
#[inline]
fn next(&mut self) -> Option<&'a [T; N]> {
self.iter.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
#[inline]
fn count(self) -> usize {
self.iter.count()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.iter.nth(n)
}
#[inline]
fn last(self) -> Option<Self::Item> {
self.iter.last()
}
}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<'a, T, const N: usize> DoubleEndedIterator for ArrayChunks<'a, T, N> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T; N]> {
self.iter.next_back()
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
self.iter.nth_back(n)
}
}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<T, const N: usize> ExactSizeIterator for ArrayChunks<'_, T, N> {
fn is_empty(&self) -> bool {
self.iter.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, const N: usize> TrustedLen for ArrayChunks<'_, T, N> {}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<T, const N: usize> FusedIterator for ArrayChunks<'_, T, N> {}
#[doc(hidden)]
#[unstable(feature = "array_chunks", issue = "74985")]
unsafe impl<'a, T, const N: usize> TrustedRandomAccess for ArrayChunks<'a, T, N> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T; N] {
unsafe { self.iter.get_unchecked(i) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
/// time), starting at the end of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last slice
/// of the iteration will be the remainder.
///
/// This struct is created by the [`rchunks`] method on [slices].
///
/// [`rchunks`]: ../../std/primitive.slice.html#method.rchunks
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
pub struct RChunks<'a, T: 'a> {
v: &'a [T],
chunk_size: usize,
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> Clone for RChunks<'_, T> {
fn clone(&self) -> Self {
RChunks { v: self.v, chunk_size: self.chunk_size }
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Iterator for RChunks<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.v.is_empty() {
None
} else {
let chunksz = cmp::min(self.v.len(), self.chunk_size);
let (fst, snd) = self.v.split_at(self.v.len() - chunksz);
self.v = fst;
Some(snd)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (end, overflow) = n.overflowing_mul(self.chunk_size);
if end >= self.v.len() || overflow {
self.v = &[];
None
} else {
// Can't underflow because of the check above
let end = self.v.len() - end;
let start = match end.checked_sub(self.chunk_size) {
Some(sum) => sum,
None => 0,
};
let nth = &self.v[start..end];
self.v = &self.v[0..start];
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.v.is_empty() {
None
} else {
let rem = self.v.len() % self.chunk_size;
let end = if rem == 0 { self.chunk_size } else { rem };
Some(&self.v[0..end])
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for RChunks<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.v.is_empty() {
None
} else {
let remainder = self.v.len() % self.chunk_size;
let chunksz = if remainder != 0 { remainder } else { self.chunk_size };
let (fst, snd) = self.v.split_at(chunksz);
self.v = snd;
Some(fst)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &[];
None
} else {
// can't underflow because `n < len`
let offset_from_end = (len - 1 - n) * self.chunk_size;
let end = self.v.len() - offset_from_end;
let start = end.saturating_sub(self.chunk_size);
let nth_back = &self.v[start..end];
self.v = &self.v[end..];
Some(nth_back)
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> ExactSizeIterator for RChunks<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for RChunks<'_, T> {}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> FusedIterator for RChunks<'_, T> {}
#[doc(hidden)]
#[stable(feature = "rchunks", since = "1.31.0")]
unsafe impl<'a, T> TrustedRandomAccess for RChunks<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
let end = self.v.len() - i * self.chunk_size;
let start = match end.checked_sub(self.chunk_size) {
None => 0,
Some(start) => start,
};
// SAFETY: mostly identical to `Chunks::get_unchecked`.
unsafe { from_raw_parts(self.v.as_ptr().add(start), end - start) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
/// elements at a time), starting at the end of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last slice
/// of the iteration will be the remainder.
///
/// This struct is created by the [`rchunks_mut`] method on [slices].
///
/// [`rchunks_mut`]: ../../std/primitive.slice.html#method.rchunks_mut
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
pub struct RChunksMut<'a, T: 'a> {
v: &'a mut [T],
chunk_size: usize,
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Iterator for RChunksMut<'a, T> {
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.is_empty() {
None
} else {
let sz = cmp::min(self.v.len(), self.chunk_size);
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (head, tail) = tmp.split_at_mut(tmp_len - sz);
self.v = head;
Some(tail)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
let (end, overflow) = n.overflowing_mul(self.chunk_size);
if end >= self.v.len() || overflow {
self.v = &mut [];
None
} else {
// Can't underflow because of the check above
let end = self.v.len() - end;
let start = match end.checked_sub(self.chunk_size) {
Some(sum) => sum,
None => 0,
};
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(start);
let (nth, _) = tail.split_at_mut(end - start);
self.v = head;
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.v.is_empty() {
None
} else {
let rem = self.v.len() % self.chunk_size;
let end = if rem == 0 { self.chunk_size } else { rem };
Some(&mut self.v[0..end])
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for RChunksMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.v.is_empty() {
None
} else {
let remainder = self.v.len() % self.chunk_size;
let sz = if remainder != 0 { remainder } else { self.chunk_size };
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(sz);
self.v = tail;
Some(head)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &mut [];
None
} else {
// can't underflow because `n < len`
let offset_from_end = (len - 1 - n) * self.chunk_size;
let end = self.v.len() - offset_from_end;
let start = end.saturating_sub(self.chunk_size);
let (tmp, tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
let (_, nth_back) = tmp.split_at_mut(start);
self.v = tail;
Some(nth_back)
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> ExactSizeIterator for RChunksMut<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for RChunksMut<'_, T> {}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> FusedIterator for RChunksMut<'_, T> {}
#[doc(hidden)]
#[stable(feature = "rchunks", since = "1.31.0")]
unsafe impl<'a, T> TrustedRandomAccess for RChunksMut<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] {
let end = self.v.len() - i * self.chunk_size;
let start = match end.checked_sub(self.chunk_size) {
None => 0,
Some(start) => start,
};
// SAFETY: see comments for `RChunks::get_unchecked`.
unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
/// time), starting at the end of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last
/// up to `chunk_size-1` elements will be omitted but can be retrieved from
/// the [`remainder`] function from the iterator.
///
/// This struct is created by the [`rchunks_exact`] method on [slices].
///
/// [`rchunks_exact`]: ../../std/primitive.slice.html#method.rchunks_exact
/// [`remainder`]: ../../std/slice/struct.ChunksExact.html#method.remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
pub struct RChunksExact<'a, T: 'a> {
v: &'a [T],
rem: &'a [T],
chunk_size: usize,
}
impl<'a, T> RChunksExact<'a, T> {
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
#[stable(feature = "rchunks", since = "1.31.0")]
pub fn remainder(&self) -> &'a [T] {
self.rem
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Clone for RChunksExact<'a, T> {
fn clone(&self) -> RChunksExact<'a, T> {
RChunksExact { v: self.v, rem: self.rem, chunk_size: self.chunk_size }
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Iterator for RChunksExact<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let (fst, snd) = self.v.split_at(self.v.len() - self.chunk_size);
self.v = fst;
Some(snd)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (end, overflow) = n.overflowing_mul(self.chunk_size);
if end >= self.v.len() || overflow {
self.v = &[];
None
} else {
let (fst, _) = self.v.split_at(self.v.len() - end);
self.v = fst;
self.next()
}
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for RChunksExact<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let (fst, snd) = self.v.split_at(self.chunk_size);
self.v = snd;
Some(fst)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &[];
None
} else {
// now that we know that `n` corresponds to a chunk,
// none of these operations can underflow/overflow
let offset = (len - n) * self.chunk_size;
let start = self.v.len() - offset;
let end = start + self.chunk_size;
let nth_back = &self.v[start..end];
self.v = &self.v[end..];
Some(nth_back)
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> ExactSizeIterator for RChunksExact<'a, T> {
fn is_empty(&self) -> bool {
self.v.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for RChunksExact<'_, T> {}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> FusedIterator for RChunksExact<'_, T> {}
#[doc(hidden)]
#[stable(feature = "rchunks", since = "1.31.0")]
unsafe impl<'a, T> TrustedRandomAccess for RChunksExact<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
let end = self.v.len() - i * self.chunk_size;
let start = end - self.chunk_size;
// SAFETY: mostmy identical to `Chunks::get_unchecked`.
unsafe { from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
/// elements at a time), starting at the end of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last up to
/// `chunk_size-1` elements will be omitted but can be retrieved from the
/// [`into_remainder`] function from the iterator.
///
/// This struct is created by the [`rchunks_exact_mut`] method on [slices].
///
/// [`rchunks_exact_mut`]: ../../std/primitive.slice.html#method.rchunks_exact_mut
/// [`into_remainder`]: ../../std/slice/struct.ChunksExactMut.html#method.into_remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
pub struct RChunksExactMut<'a, T: 'a> {
v: &'a mut [T],
rem: &'a mut [T],
chunk_size: usize,
}
impl<'a, T> RChunksExactMut<'a, T> {
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
#[stable(feature = "rchunks", since = "1.31.0")]
pub fn into_remainder(self) -> &'a mut [T] {
self.rem
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Iterator for RChunksExactMut<'a, T> {
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (head, tail) = tmp.split_at_mut(tmp_len - self.chunk_size);
self.v = head;
Some(tail)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
let (end, overflow) = n.overflowing_mul(self.chunk_size);
if end >= self.v.len() || overflow {
self.v = &mut [];
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (fst, _) = tmp.split_at_mut(tmp_len - end);
self.v = fst;
self.next()
}
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for RChunksExactMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(self.chunk_size);
self.v = tail;
Some(head)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &mut [];
None
} else {
// now that we know that `n` corresponds to a chunk,
// none of these operations can underflow/overflow
let offset = (len - n) * self.chunk_size;
let start = self.v.len() - offset;
let end = start + self.chunk_size;
let (tmp, tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
let (_, nth_back) = tmp.split_at_mut(start);
self.v = tail;
Some(nth_back)
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> ExactSizeIterator for RChunksExactMut<'_, T> {
fn is_empty(&self) -> bool {
self.v.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for RChunksExactMut<'_, T> {}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> FusedIterator for RChunksExactMut<'_, T> {}
#[doc(hidden)]
#[stable(feature = "rchunks", since = "1.31.0")]
unsafe impl<'a, T> TrustedRandomAccess for RChunksExactMut<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] {
let end = self.v.len() - i * self.chunk_size;
let start = end - self.chunk_size;
// SAFETY: see comments for `RChunksExact::get_unchecked`.
unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) }
}
fn may_have_side_effect() -> bool {
false
}
}
//
// Free functions
//
/// Forms a slice from a pointer and a length.
///
/// The `len` argument is the number of **elements**, not the number of bytes.
///
/// # Safety
///
/// Behavior is undefined if any of the following conditions are violated:
///
/// * `data` must be [valid] for reads for `len * mem::size_of::<T>()` many bytes,
/// and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single allocated object!
/// Slices can never span across multiple allocated objects. See [below](#incorrect-usage)
/// for an example incorrectly not taking this into account.
/// * `data` must be non-null and aligned even for zero-length slices. One
/// reason for this is that enum layout optimizations may rely on references
/// (including slices of any length) being aligned and non-null to distinguish
/// them from other data. You can obtain a pointer that is usable as `data`
/// for zero-length slices using [`NonNull::dangling()`].
///
/// * The memory referenced by the returned slice must not be mutated for the duration
/// of lifetime `'a`, except inside an `UnsafeCell`.
///
/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
/// See the safety documentation of [`pointer::offset`].
///
/// # Caveat
///
/// The lifetime for the returned slice is inferred from its usage. To
/// prevent accidental misuse, it's suggested to tie the lifetime to whichever
/// source lifetime is safe in the context, such as by providing a helper
/// function taking the lifetime of a host value for the slice, or by explicit
/// annotation.
///
/// # Examples
///
/// ```
/// use std::slice;
///
/// // manifest a slice for a single element
/// let x = 42;
/// let ptr = &x as *const _;
/// let slice = unsafe { slice::from_raw_parts(ptr, 1) };
/// assert_eq!(slice[0], 42);
/// ```
///
/// ### Incorrect usage
///
/// The following `join_slices` function is **unsound** ⚠️
///
/// ```rust,no_run
/// use std::slice;
///
/// fn join_slices<'a, T>(fst: &'a [T], snd: &'a [T]) -> &'a [T] {
/// let fst_end = fst.as_ptr().wrapping_add(fst.len());
/// let snd_start = snd.as_ptr();
/// assert_eq!(fst_end, snd_start, "Slices must be contiguous!");
/// unsafe {
/// // The assertion above ensures `fst` and `snd` are contiguous, but they might
/// // still be contained within _different allocated objects_, in which case
/// // creating this slice is undefined behavior.
/// slice::from_raw_parts(fst.as_ptr(), fst.len() + snd.len())
/// }
/// }
///
/// fn main() {
/// // `a` and `b` are different allocated objects...
/// let a = 42;
/// let b = 27;
/// // ... which may nevertheless be laid out contiguously in memory: | a | b |
/// let _ = join_slices(slice::from_ref(&a), slice::from_ref(&b)); // UB
/// }
/// ```
///
/// [valid]: ../../std/ptr/index.html#safety
/// [`NonNull::dangling()`]: ../../std/ptr/struct.NonNull.html#method.dangling
/// [`pointer::offset`]: ../../std/primitive.pointer.html#method.offset
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] {
debug_assert!(is_aligned_and_not_null(data), "attempt to create unaligned or null slice");
debug_assert!(
mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize,
"attempt to create slice covering at least half the address space"
);
// SAFETY: the caller must uphold the safety contract for `from_raw_parts`.
unsafe { &*ptr::slice_from_raw_parts(data, len) }
}
/// Performs the same functionality as [`from_raw_parts`], except that a
/// mutable slice is returned.
///
/// # Safety
///
/// Behavior is undefined if any of the following conditions are violated:
///
/// * `data` must be [valid] for boths reads and writes for `len * mem::size_of::<T>()` many bytes,
/// and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single allocated object!
/// Slices can never span across multiple allocated objects.
/// * `data` must be non-null and aligned even for zero-length slices. One
/// reason for this is that enum layout optimizations may rely on references
/// (including slices of any length) being aligned and non-null to distinguish
/// them from other data. You can obtain a pointer that is usable as `data`
/// for zero-length slices using [`NonNull::dangling()`].
///
/// * The memory referenced by the returned slice must not be accessed through any other pointer
/// (not derived from the return value) for the duration of lifetime `'a`.
/// Both read and write accesses are forbidden.
///
/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
/// See the safety documentation of [`pointer::offset`].
///
/// [valid]: ../../std/ptr/index.html#safety
/// [`NonNull::dangling()`]: ../../std/ptr/struct.NonNull.html#method.dangling
/// [`pointer::offset`]: ../../std/primitive.pointer.html#method.offset
/// [`from_raw_parts`]: ../../std/slice/fn.from_raw_parts.html
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] {
debug_assert!(is_aligned_and_not_null(data), "attempt to create unaligned or null slice");
debug_assert!(
mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize,
"attempt to create slice covering at least half the address space"
);
// SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`.
unsafe { &mut *ptr::slice_from_raw_parts_mut(data, len) }
}
/// Converts a reference to T into a slice of length 1 (without copying).
#[stable(feature = "from_ref", since = "1.28.0")]
pub fn from_ref<T>(s: &T) -> &[T] {
unsafe { from_raw_parts(s, 1) }
}
/// Converts a reference to T into a slice of length 1 (without copying).
#[stable(feature = "from_ref", since = "1.28.0")]
pub fn from_mut<T>(s: &mut T) -> &mut [T] {
unsafe { from_raw_parts_mut(s, 1) }
}
// This function is public only because there is no other way to unit test heapsort.
#[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")]
#[doc(hidden)]
pub fn heapsort<T, F>(v: &mut [T], mut is_less: F)
where
F: FnMut(&T, &T) -> bool,
{
sort::heapsort(v, &mut is_less);
}
//
// Comparison traits
//
extern "C" {
/// Calls implementation provided memcmp.
///
/// Interprets the data as u8.
///
/// Returns 0 for equal, < 0 for less than and > 0 for greater
/// than.
// FIXME(#32610): Return type should be c_int
fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A, B> PartialEq<[B]> for [A]
where
A: PartialEq<B>,
{
fn eq(&self, other: &[B]) -> bool {
SlicePartialEq::equal(self, other)
}
fn ne(&self, other: &[B]) -> bool {
SlicePartialEq::not_equal(self, other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Eq> Eq for [T] {}
/// Implements comparison of vectors lexicographically.
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Ord> Ord for [T] {
fn cmp(&self, other: &[T]) -> Ordering {
SliceOrd::compare(self, other)
}
}
/// Implements comparison of vectors lexicographically.
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialOrd> PartialOrd for [T] {
fn partial_cmp(&self, other: &[T]) -> Option<Ordering> {
SlicePartialOrd::partial_compare(self, other)
}
}
#[doc(hidden)]
// intermediate trait for specialization of slice's PartialEq
trait SlicePartialEq<B> {
fn equal(&self, other: &[B]) -> bool;
fn not_equal(&self, other: &[B]) -> bool {
!self.equal(other)
}
}
// Generic slice equality
impl<A, B> SlicePartialEq<B> for [A]
where
A: PartialEq<B>,
{
default fn equal(&self, other: &[B]) -> bool {
if self.len() != other.len() {
return false;
}
self.iter().zip(other.iter()).all(|(x, y)| x == y)
}
}
// Use an equal-pointer optimization when types are `Eq`
impl<A> SlicePartialEq<A> for [A]
where
A: PartialEq<A> + Eq,
{
default fn equal(&self, other: &[A]) -> bool {
if self.len() != other.len() {
return false;
}
// While performance would suffer if `guaranteed_eq` just returned `false`
// for all arguments, correctness and return value of this function are not affected.
if self.as_ptr().guaranteed_eq(other.as_ptr()) {
return true;
}
self.iter().zip(other.iter()).all(|(x, y)| x == y)
}
}
// Use memcmp for bytewise equality when the types allow
impl<A> SlicePartialEq<A> for [A]
where
A: PartialEq<A> + BytewiseEquality,
{
fn equal(&self, other: &[A]) -> bool {
if self.len() != other.len() {
return false;
}
// While performance would suffer if `guaranteed_eq` just returned `false`
// for all arguments, correctness and return value of this function are not affected.
if self.as_ptr().guaranteed_eq(other.as_ptr()) {
return true;
}
unsafe {
let size = mem::size_of_val(self);
memcmp(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
}
}
}
#[doc(hidden)]
// intermediate trait for specialization of slice's PartialOrd
trait SlicePartialOrd: Sized {
fn partial_compare(left: &[Self], right: &[Self]) -> Option<Ordering>;
}
impl<A: PartialOrd> SlicePartialOrd for A {
default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
let l = cmp::min(left.len(), right.len());
// Slice to the loop iteration range to enable bound check
// elimination in the compiler
let lhs = &left[..l];
let rhs = &right[..l];
for i in 0..l {
match lhs[i].partial_cmp(&rhs[i]) {
Some(Ordering::Equal) => (),
non_eq => return non_eq,
}
}
left.len().partial_cmp(&right.len())
}
}
// This is the impl that we would like to have. Unfortunately it's not sound.
// See `partial_ord_slice.rs`.
/*
impl<A> SlicePartialOrd for A
where
A: Ord,
{
default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
Some(SliceOrd::compare(left, right))
}
}
*/
impl<A: AlwaysApplicableOrd> SlicePartialOrd for A {
fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
Some(SliceOrd::compare(left, right))
}
}
trait AlwaysApplicableOrd: SliceOrd + Ord {}
macro_rules! always_applicable_ord {
($([$($p:tt)*] $t:ty,)*) => {
$(impl<$($p)*> AlwaysApplicableOrd for $t {})*
}
}
always_applicable_ord! {
[] u8, [] u16, [] u32, [] u64, [] u128, [] usize,
[] i8, [] i16, [] i32, [] i64, [] i128, [] isize,
[] bool, [] char,
[T: ?Sized] *const T, [T: ?Sized] *mut T,
[T: AlwaysApplicableOrd] &T,
[T: AlwaysApplicableOrd] &mut T,
[T: AlwaysApplicableOrd] Option<T>,
}
#[doc(hidden)]
// intermediate trait for specialization of slice's Ord
trait SliceOrd: Sized {
fn compare(left: &[Self], right: &[Self]) -> Ordering;
}
impl<A: Ord> SliceOrd for A {
default fn compare(left: &[Self], right: &[Self]) -> Ordering {
let l = cmp::min(left.len(), right.len());
// Slice to the loop iteration range to enable bound check
// elimination in the compiler
let lhs = &left[..l];
let rhs = &right[..l];
for i in 0..l {
match lhs[i].cmp(&rhs[i]) {
Ordering::Equal => (),
non_eq => return non_eq,
}
}
left.len().cmp(&right.len())
}
}
// memcmp compares a sequence of unsigned bytes lexicographically.
// this matches the order we want for [u8], but no others (not even [i8]).
impl SliceOrd for u8 {
#[inline]
fn compare(left: &[Self], right: &[Self]) -> Ordering {
let order =
unsafe { memcmp(left.as_ptr(), right.as_ptr(), cmp::min(left.len(), right.len())) };
if order == 0 {
left.len().cmp(&right.len())
} else if order < 0 {
Less
} else {
Greater
}
}
}
#[doc(hidden)]
/// Trait implemented for types that can be compared for equality using
/// their bytewise representation
trait BytewiseEquality: Eq + Copy {}
macro_rules! impl_marker_for {
($traitname:ident, $($ty:ty)*) => {
$(
impl $traitname for $ty { }
)*
}
}
impl_marker_for!(BytewiseEquality,
u8 i8 u16 i16 u32 i32 u64 i64 u128 i128 usize isize char bool);
#[doc(hidden)]
unsafe impl<'a, T> TrustedRandomAccess for Iter<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a T {
// SAFETY: the caller must guarantee that `i` is in bounds
// of the underlying slice, so `i` cannot overflow an `isize`,
// and the returned references is guaranteed to refer to an element
// of the slice and thus guaranteed to be valid.
unsafe { &*self.ptr.as_ptr().add(i) }
}
fn may_have_side_effect() -> bool {
false
}
}
#[doc(hidden)]
unsafe impl<'a, T> TrustedRandomAccess for IterMut<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut T {
// SAFETY: see comments for `Iter::get_unchecked`.
unsafe { &mut *self.ptr.as_ptr().add(i) }
}
fn may_have_side_effect() -> bool {
false
}
}
trait SliceContains: Sized {
fn slice_contains(&self, x: &[Self]) -> bool;
}
impl<T> SliceContains for T
where
T: PartialEq,
{
default fn slice_contains(&self, x: &[Self]) -> bool {
x.iter().any(|y| *y == *self)
}
}
impl SliceContains for u8 {
fn slice_contains(&self, x: &[Self]) -> bool {
memchr::memchr(*self, x).is_some()
}
}
impl SliceContains for i8 {
fn slice_contains(&self, x: &[Self]) -> bool {
let byte = *self as u8;
let bytes: &[u8] = unsafe { from_raw_parts(x.as_ptr() as *const u8, x.len()) };
memchr::memchr(byte, bytes).is_some()
}
}
Remove branch in optimized is_ascii
Performs slightly better in short or medium bytes by eliminating
the last branch check on `byte_pos == len` and always check the
last byte as it is always at most one `usize`.
Benchmark, before `libcore`, after `libcore_new`. It improves
medium and short by 1ns but regresses unaligned_tail by 2ns,
either way we can get unaligned_tail have a tiny chance of 1/8
on a 64 bit machine. I don't think we should bet on that, the
probability is worse than dice.
test long::case00_libcore ... bench: 38 ns/iter (+/- 1) = 183947 MB/s
test long::case00_libcore_new ... bench: 38 ns/iter (+/- 1) = 183947 MB/s
test long::case01_iter_all ... bench: 227 ns/iter (+/- 6) = 30792 MB/s
test long::case02_align_to ... bench: 40 ns/iter (+/- 1) = 174750 MB/s
test long::case03_align_to_unrolled ... bench: 19 ns/iter (+/- 1) = 367894 MB/s
test medium::case00_libcore ... bench: 5 ns/iter (+/- 0) = 6400 MB/s
test medium::case00_libcore_new ... bench: 4 ns/iter (+/- 0) = 8000 MB/s
test medium::case01_iter_all ... bench: 20 ns/iter (+/- 1) = 1600 MB/s
test medium::case02_align_to ... bench: 6 ns/iter (+/- 0) = 5333 MB/s
test medium::case03_align_to_unrolled ... bench: 5 ns/iter (+/- 0) = 6400 MB/s
test short::case00_libcore ... bench: 7 ns/iter (+/- 0) = 1000 MB/s
test short::case00_libcore_new ... bench: 6 ns/iter (+/- 0) = 1166 MB/s
test short::case01_iter_all ... bench: 5 ns/iter (+/- 0) = 1400 MB/s
test short::case02_align_to ... bench: 5 ns/iter (+/- 0) = 1400 MB/s
test short::case03_align_to_unrolled ... bench: 5 ns/iter (+/- 1) = 1400 MB/s
test unaligned_both::case00_libcore ... bench: 4 ns/iter (+/- 0) = 7500 MB/s
test unaligned_both::case00_libcore_new ... bench: 4 ns/iter (+/- 0) = 7500 MB/s
test unaligned_both::case01_iter_all ... bench: 26 ns/iter (+/- 0) = 1153 MB/s
test unaligned_both::case02_align_to ... bench: 13 ns/iter (+/- 2) = 2307 MB/s
test unaligned_both::case03_align_to_unrolled ... bench: 11 ns/iter (+/- 0) = 2727 MB/s
test unaligned_head::case00_libcore ... bench: 5 ns/iter (+/- 0) = 6200 MB/s
test unaligned_head::case00_libcore_new ... bench: 5 ns/iter (+/- 0) = 6200 MB/s
test unaligned_head::case01_iter_all ... bench: 19 ns/iter (+/- 1) = 1631 MB/s
test unaligned_head::case02_align_to ... bench: 10 ns/iter (+/- 0) = 3100 MB/s
test unaligned_head::case03_align_to_unrolled ... bench: 14 ns/iter (+/- 0) = 2214 MB/s
test unaligned_tail::case00_libcore ... bench: 3 ns/iter (+/- 0) = 10333 MB/s
test unaligned_tail::case00_libcore_new ... bench: 5 ns/iter (+/- 0) = 6200 MB/s
test unaligned_tail::case01_iter_all ... bench: 19 ns/iter (+/- 0) = 1631 MB/s
test unaligned_tail::case02_align_to ... bench: 10 ns/iter (+/- 0) = 3100 MB/s
test unaligned_tail::case03_align_to_unrolled ... bench: 13 ns/iter (+/- 0) = 2384 MB/s
Rough (unfair) maths on improvements for fun: 1ns * 7/8 - 2ns * 1/8 = 0.625ns
Inspired by fish and zsh clever trick to highlight missing linefeeds (⏎)
and branchless implementation of binary_search in rust.
// ignore-tidy-filelength
// ignore-tidy-undocumented-unsafe
//! Slice management and manipulation.
//!
//! For more details see [`std::slice`].
//!
//! [`std::slice`]: ../../std/slice/index.html
#![stable(feature = "rust1", since = "1.0.0")]
// How this module is organized.
//
// The library infrastructure for slices is fairly messy. There's
// a lot of stuff defined here. Let's keep it clean.
//
// The layout of this file is thus:
//
// * Inherent methods. This is where most of the slice API resides.
// * Implementations of a few common traits with important slice ops.
// * Definitions of a bunch of iterators.
// * Free functions.
// * The `raw` and `bytes` submodules.
// * Boilerplate trait implementations.
use crate::cmp;
use crate::cmp::Ordering::{self, Equal, Greater, Less};
use crate::fmt;
use crate::intrinsics::{assume, exact_div, is_aligned_and_not_null, unchecked_sub};
use crate::iter::*;
use crate::marker::{self, Copy, Send, Sized, Sync};
use crate::mem;
use crate::ops::{self, FnMut, Range};
use crate::option::Option;
use crate::option::Option::{None, Some};
use crate::ptr::{self, NonNull};
use crate::result::Result;
use crate::result::Result::{Err, Ok};
#[unstable(
feature = "slice_internals",
issue = "none",
reason = "exposed from core to be reused in std; use the memchr crate"
)]
/// Pure rust memchr implementation, taken from rust-memchr
pub mod memchr;
mod rotate;
mod sort;
//
// Extension traits
//
#[lang = "slice"]
#[cfg(not(test))]
impl<T> [T] {
/// Returns the number of elements in the slice.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert_eq!(a.len(), 3);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_len", since = "1.32.0")]
#[inline]
// SAFETY: const sound because we transmute out the length field as a usize (which it must be)
#[allow(unused_attributes)]
#[allow_internal_unstable(const_fn_union)]
pub const fn len(&self) -> usize {
unsafe { crate::ptr::Repr { rust: self }.raw.len }
}
/// Returns `true` if the slice has a length of 0.
///
/// # Examples
///
/// ```
/// let a = [1, 2, 3];
/// assert!(!a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_is_empty", since = "1.32.0")]
#[inline]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&10), v.first());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.first());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn first(&self) -> Option<&T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns a mutable pointer to the first element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(first) = x.first_mut() {
/// *first = 5;
/// }
/// assert_eq!(x, &[5, 1, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn first_mut(&mut self) -> Option<&mut T> {
if let [first, ..] = self { Some(first) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first() {
/// assert_eq!(first, &0);
/// assert_eq!(elements, &[1, 2]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_first(&self) -> Option<(&T, &[T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((first, elements)) = x.split_first_mut() {
/// *first = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[3, 4, 5]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &[0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last() {
/// assert_eq!(last, &2);
/// assert_eq!(elements, &[0, 1]);
/// }
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_last(&self) -> Option<(&T, &[T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some((last, elements)) = x.split_last_mut() {
/// *last = 3;
/// elements[0] = 4;
/// elements[1] = 5;
/// }
/// assert_eq!(x, &[4, 5, 3]);
/// ```
#[stable(feature = "slice_splits", since = "1.5.0")]
#[inline]
pub fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
/// Returns the last element of the slice, or `None` if it is empty.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&30), v.last());
///
/// let w: &[i32] = &[];
/// assert_eq!(None, w.last());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn last(&self) -> Option<&T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a mutable pointer to the last item in the slice.
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(last) = x.last_mut() {
/// *last = 10;
/// }
/// assert_eq!(x, &[0, 1, 10]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn last_mut(&mut self) -> Option<&mut T> {
if let [.., last] = self { Some(last) } else { None }
}
/// Returns a reference to an element or subslice depending on the type of
/// index.
///
/// - If given a position, returns a reference to the element at that
/// position or `None` if out of bounds.
/// - If given a range, returns the subslice corresponding to that range,
/// or `None` if out of bounds.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert_eq!(Some(&40), v.get(1));
/// assert_eq!(Some(&[10, 40][..]), v.get(0..2));
/// assert_eq!(None, v.get(3));
/// assert_eq!(None, v.get(0..4));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get<I>(&self, index: I) -> Option<&I::Output>
where
I: SliceIndex<Self>,
{
index.get(self)
}
/// Returns a mutable reference to an element or subslice depending on the
/// type of index (see [`get`]) or `None` if the index is out of bounds.
///
/// [`get`]: #method.get
///
/// # Examples
///
/// ```
/// let x = &mut [0, 1, 2];
///
/// if let Some(elem) = x.get_mut(1) {
/// *elem = 42;
/// }
/// assert_eq!(x, &[0, 42, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
where
I: SliceIndex<Self>,
{
index.get_mut(self)
}
/// Returns a reference to an element or subslice, without doing bounds
/// checking.
///
/// This is generally not recommended, use with caution!
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
/// For a safe alternative see [`get`].
///
/// [`get`]: #method.get
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
///
/// unsafe {
/// assert_eq!(x.get_unchecked(1), &2);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &*index.get_unchecked(self) }
}
/// Returns a mutable reference to an element or subslice, without doing
/// bounds checking.
///
/// This is generally not recommended, use with caution!
/// Calling this method with an out-of-bounds index is *[undefined behavior]*
/// even if the resulting reference is not used.
/// For a safe alternative see [`get_mut`].
///
/// [`get_mut`]: #method.get_mut
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
///
/// unsafe {
/// let elem = x.get_unchecked_mut(1);
/// *elem = 13;
/// }
/// assert_eq!(x, &[1, 13, 4]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
where
I: SliceIndex<Self>,
{
// SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`;
// the slice is dereferencable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &mut *index.get_unchecked_mut(self) }
}
/// Returns a raw pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// The caller must also ensure that the memory the pointer (non-transitively) points to
/// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
/// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let x_ptr = x.as_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
/// }
/// }
/// ```
///
/// [`as_mut_ptr`]: #method.as_mut_ptr
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
#[inline]
pub const fn as_ptr(&self) -> *const T {
self as *const [T] as *const T
}
/// Returns an unsafe mutable pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// Modifying the container referenced by this slice may cause its buffer
/// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// let x_ptr = x.as_mut_ptr();
///
/// unsafe {
/// for i in 0..x.len() {
/// *x_ptr.add(i) += 2;
/// }
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut T {
self as *mut [T] as *mut T
}
/// Returns the two raw pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_ptr`] for warnings on using these pointers. The end pointer
/// requires extra caution, as it does not point to a valid element in the
/// slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// It can also be useful to check if a pointer to an element refers to an
/// element of this slice:
///
/// ```
/// #![feature(slice_ptr_range)]
///
/// let a = [1, 2, 3];
/// let x = &a[1] as *const _;
/// let y = &5 as *const _;
///
/// assert!(a.as_ptr_range().contains(&x));
/// assert!(!a.as_ptr_range().contains(&y));
/// ```
///
/// [`as_ptr`]: #method.as_ptr
#[unstable(feature = "slice_ptr_range", issue = "65807")]
#[inline]
pub fn as_ptr_range(&self) -> Range<*const T> {
// The `add` here is safe, because:
//
// - Both pointers are part of the same object, as pointing directly
// past the object also counts.
//
// - The size of the slice is never larger than isize::MAX bytes, as
// noted here:
// - https://github.com/rust-lang/unsafe-code-guidelines/issues/102#issuecomment-473340447
// - https://doc.rust-lang.org/reference/behavior-considered-undefined.html
// - https://doc.rust-lang.org/core/slice/fn.from_raw_parts.html#safety
// (This doesn't seem normative yet, but the very same assumption is
// made in many places, including the Index implementation of slices.)
//
// - There is no wrapping around involved, as slices do not wrap past
// the end of the address space.
//
// See the documentation of pointer::add.
let start = self.as_ptr();
let end = unsafe { start.add(self.len()) };
start..end
}
/// Returns the two unsafe mutable pointers spanning the slice.
///
/// The returned range is half-open, which means that the end pointer
/// points *one past* the last element of the slice. This way, an empty
/// slice is represented by two equal pointers, and the difference between
/// the two pointers represents the size of the slice.
///
/// See [`as_mut_ptr`] for warnings on using these pointers. The end
/// pointer requires extra caution, as it does not point to a valid element
/// in the slice.
///
/// This function is useful for interacting with foreign interfaces which
/// use two pointers to refer to a range of elements in memory, as is
/// common in C++.
///
/// [`as_mut_ptr`]: #method.as_mut_ptr
#[unstable(feature = "slice_ptr_range", issue = "65807")]
#[inline]
pub fn as_mut_ptr_range(&mut self) -> Range<*mut T> {
// See as_ptr_range() above for why `add` here is safe.
let start = self.as_mut_ptr();
let end = unsafe { start.add(self.len()) };
start..end
}
/// Swaps two elements in the slice.
///
/// # Arguments
///
/// * a - The index of the first element
/// * b - The index of the second element
///
/// # Panics
///
/// Panics if `a` or `b` are out of bounds.
///
/// # Examples
///
/// ```
/// let mut v = ["a", "b", "c", "d"];
/// v.swap(1, 3);
/// assert!(v == ["a", "d", "c", "b"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn swap(&mut self, a: usize, b: usize) {
unsafe {
// Can't take two mutable loans from one vector, so instead just cast
// them to their raw pointers to do the swap
let pa: *mut T = &mut self[a];
let pb: *mut T = &mut self[b];
ptr::swap(pa, pb);
}
}
/// Reverses the order of elements in the slice, in place.
///
/// # Examples
///
/// ```
/// let mut v = [1, 2, 3];
/// v.reverse();
/// assert!(v == [3, 2, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn reverse(&mut self) {
let mut i: usize = 0;
let ln = self.len();
// For very small types, all the individual reads in the normal
// path perform poorly. We can do better, given efficient unaligned
// load/store, by loading a larger chunk and reversing a register.
// Ideally LLVM would do this for us, as it knows better than we do
// whether unaligned reads are efficient (since that changes between
// different ARM versions, for example) and what the best chunk size
// would be. Unfortunately, as of LLVM 4.0 (2017-05) it only unrolls
// the loop, so we need to do this ourselves. (Hypothesis: reverse
// is troublesome because the sides can be aligned differently --
// will be, when the length is odd -- so there's no way of emitting
// pre- and postludes to use fully-aligned SIMD in the middle.)
let fast_unaligned = cfg!(any(target_arch = "x86", target_arch = "x86_64"));
if fast_unaligned && mem::size_of::<T>() == 1 {
// Use the llvm.bswap intrinsic to reverse u8s in a usize
let chunk = mem::size_of::<usize>();
while i + chunk - 1 < ln / 2 {
unsafe {
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut usize);
let vb = ptr::read_unaligned(pb as *mut usize);
ptr::write_unaligned(pa as *mut usize, vb.swap_bytes());
ptr::write_unaligned(pb as *mut usize, va.swap_bytes());
}
i += chunk;
}
}
if fast_unaligned && mem::size_of::<T>() == 2 {
// Use rotate-by-16 to reverse u16s in a u32
let chunk = mem::size_of::<u32>() / 2;
while i + chunk - 1 < ln / 2 {
unsafe {
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - chunk);
let va = ptr::read_unaligned(pa as *mut u32);
let vb = ptr::read_unaligned(pb as *mut u32);
ptr::write_unaligned(pa as *mut u32, vb.rotate_left(16));
ptr::write_unaligned(pb as *mut u32, va.rotate_left(16));
}
i += chunk;
}
}
while i < ln / 2 {
// Unsafe swap to avoid the bounds check in safe swap.
unsafe {
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - 1);
ptr::swap(pa, pb);
}
i += 1;
}
}
/// Returns an iterator over the slice.
///
/// # Examples
///
/// ```
/// let x = &[1, 2, 4];
/// let mut iterator = x.iter();
///
/// assert_eq!(iterator.next(), Some(&1));
/// assert_eq!(iterator.next(), Some(&2));
/// assert_eq!(iterator.next(), Some(&4));
/// assert_eq!(iterator.next(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter(&self) -> Iter<'_, T> {
unsafe {
let ptr = self.as_ptr();
assume(!ptr.is_null());
let end = if mem::size_of::<T>() == 0 {
(ptr as *const u8).wrapping_add(self.len()) as *const T
} else {
ptr.add(self.len())
};
Iter { ptr: NonNull::new_unchecked(ptr as *mut T), end, _marker: marker::PhantomData }
}
}
/// Returns an iterator that allows modifying each value.
///
/// # Examples
///
/// ```
/// let x = &mut [1, 2, 4];
/// for elem in x.iter_mut() {
/// *elem += 2;
/// }
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
unsafe {
let ptr = self.as_mut_ptr();
assume(!ptr.is_null());
let end = if mem::size_of::<T>() == 0 {
(ptr as *mut u8).wrapping_add(self.len()) as *mut T
} else {
ptr.add(self.len())
};
IterMut { ptr: NonNull::new_unchecked(ptr), end, _marker: marker::PhantomData }
}
}
/// Returns an iterator over all contiguous windows of length
/// `size`. The windows overlap. If the slice is shorter than
/// `size`, the iterator returns no values.
///
/// # Panics
///
/// Panics if `size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['r', 'u', 's', 't'];
/// let mut iter = slice.windows(2);
/// assert_eq!(iter.next().unwrap(), &['r', 'u']);
/// assert_eq!(iter.next().unwrap(), &['u', 's']);
/// assert_eq!(iter.next().unwrap(), &['s', 't']);
/// assert!(iter.next().is_none());
/// ```
///
/// If the slice is shorter than `size`:
///
/// ```
/// let slice = ['f', 'o', 'o'];
/// let mut iter = slice.windows(4);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn windows(&self, size: usize) -> Windows<'_, T> {
assert_ne!(size, 0);
Windows { v: self, size }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`rchunks`] for the same iterator but starting at the end of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert_eq!(iter.next().unwrap(), &['m']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`chunks_exact`]: #method.chunks_exact
/// [`rchunks`]: #method.rchunks
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
assert_ne!(chunk_size, 0);
Chunks { v: self, chunk_size }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`chunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`rchunks_mut`] for the same iterator but starting at
/// the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 3]);
/// ```
///
/// [`chunks_exact_mut`]: #method.chunks_exact_mut
/// [`rchunks_mut`]: #method.rchunks_mut
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
assert_ne!(chunk_size, 0);
ChunksMut { v: self, chunk_size }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`chunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`rchunks_exact`] for the same iterator but starting at the end of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.chunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks`]: #method.chunks
/// [`rchunks_exact`]: #method.rchunks_exact
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
assert_ne!(chunk_size, 0);
let rem = self.len() % chunk_size;
let len = self.len() - rem;
let (fst, snd) = self.split_at(len);
ChunksExact { v: fst, rem: snd, chunk_size }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`chunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`rchunks_exact_mut`] for the same iterator but starting at the end of
/// the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.chunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[1, 1, 2, 2, 0]);
/// ```
///
/// [`chunks_mut`]: #method.chunks_mut
/// [`rchunks_exact_mut`]: #method.rchunks_exact_mut
#[stable(feature = "chunks_exact", since = "1.31.0")]
#[inline]
pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
assert_ne!(chunk_size, 0);
let rem = self.len() % chunk_size;
let len = self.len() - rem;
let (fst, snd) = self.split_at_mut(len);
ChunksExactMut { v: fst, rem: snd, chunk_size }
}
/// Returns an iterator over `N` elements of the slice at a time, starting at the
/// beginning of the slice.
///
/// The chunks are slices and do not overlap. If `N` does not divide the length of the
/// slice, then the last up to `N-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// This method is the const generic equivalent of [`chunks_exact`].
///
/// # Panics
///
/// Panics if `N` is 0. This check will most probably get changed to a compile time
/// error before this method gets stabilized.
///
/// # Examples
///
/// ```
/// #![feature(array_chunks)]
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.array_chunks();
/// assert_eq!(iter.next().unwrap(), &['l', 'o']);
/// assert_eq!(iter.next().unwrap(), &['r', 'e']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['m']);
/// ```
///
/// [`chunks_exact`]: #method.chunks_exact
#[unstable(feature = "array_chunks", issue = "74985")]
#[inline]
pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> {
assert_ne!(N, 0);
let len = self.len() / N;
let (fst, snd) = self.split_at(len * N);
// SAFETY: We cast a slice of `len * N` elements into
// a slice of `len` many `N` elements chunks.
let array_slice: &[[T; N]] = unsafe { from_raw_parts(fst.as_ptr().cast(), len) };
ArrayChunks { iter: array_slice.iter(), rem: snd }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact`] for a variant of this iterator that returns chunks of always exactly
/// `chunk_size` elements, and [`chunks`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert_eq!(iter.next().unwrap(), &['l']);
/// assert!(iter.next().is_none());
/// ```
///
/// [`rchunks_exact`]: #method.rchunks_exact
/// [`chunks`]: #method.chunks
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> {
assert!(chunk_size != 0);
RChunks { v: self, chunk_size }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last chunk will not have length `chunk_size`.
///
/// See [`rchunks_exact_mut`] for a variant of this iterator that returns chunks of always
/// exactly `chunk_size` elements, and [`chunks_mut`] for the same iterator but starting at the
/// beginning of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[3, 2, 2, 1, 1]);
/// ```
///
/// [`rchunks_exact_mut`]: #method.rchunks_exact_mut
/// [`chunks_mut`]: #method.chunks_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> {
assert!(chunk_size != 0);
RChunksMut { v: self, chunk_size }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
/// end of the slice.
///
/// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
/// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
/// from the `remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks`].
///
/// See [`rchunks`] for a variant of this iterator that also returns the remainder as a smaller
/// chunk, and [`chunks_exact`] for the same iterator but starting at the beginning of the
/// slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let slice = ['l', 'o', 'r', 'e', 'm'];
/// let mut iter = slice.rchunks_exact(2);
/// assert_eq!(iter.next().unwrap(), &['e', 'm']);
/// assert_eq!(iter.next().unwrap(), &['o', 'r']);
/// assert!(iter.next().is_none());
/// assert_eq!(iter.remainder(), &['l']);
/// ```
///
/// [`chunks`]: #method.chunks
/// [`rchunks`]: #method.rchunks
/// [`chunks_exact`]: #method.chunks_exact
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> {
assert!(chunk_size != 0);
let rem = self.len() % chunk_size;
let (fst, snd) = self.split_at(rem);
RChunksExact { v: snd, rem: fst, chunk_size }
}
/// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
/// of the slice.
///
/// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
/// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
/// retrieved from the `into_remainder` function of the iterator.
///
/// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
/// resulting code better than in the case of [`chunks_mut`].
///
/// See [`rchunks_mut`] for a variant of this iterator that also returns the remainder as a
/// smaller chunk, and [`chunks_exact_mut`] for the same iterator but starting at the beginning
/// of the slice.
///
/// # Panics
///
/// Panics if `chunk_size` is 0.
///
/// # Examples
///
/// ```
/// let v = &mut [0, 0, 0, 0, 0];
/// let mut count = 1;
///
/// for chunk in v.rchunks_exact_mut(2) {
/// for elem in chunk.iter_mut() {
/// *elem += count;
/// }
/// count += 1;
/// }
/// assert_eq!(v, &[0, 2, 2, 1, 1]);
/// ```
///
/// [`chunks_mut`]: #method.chunks_mut
/// [`rchunks_mut`]: #method.rchunks_mut
/// [`chunks_exact_mut`]: #method.chunks_exact_mut
#[stable(feature = "rchunks", since = "1.31.0")]
#[inline]
pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> {
assert!(chunk_size != 0);
let rem = self.len() % chunk_size;
let (fst, snd) = self.split_at_mut(rem);
RChunksExactMut { v: snd, rem: fst, chunk_size }
}
/// Divides one slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let v = [1, 2, 3, 4, 5, 6];
///
/// {
/// let (left, right) = v.split_at(0);
/// assert!(left == []);
/// assert!(right == [1, 2, 3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(2);
/// assert!(left == [1, 2]);
/// assert!(right == [3, 4, 5, 6]);
/// }
///
/// {
/// let (left, right) = v.split_at(6);
/// assert!(left == [1, 2, 3, 4, 5, 6]);
/// assert!(right == []);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at(&self, mid: usize) -> (&[T], &[T]) {
(&self[..mid], &self[mid..])
}
/// Divides one mutable slice into two at an index.
///
/// The first will contain all indices from `[0, mid)` (excluding
/// the index `mid` itself) and the second will contain all
/// indices from `[mid, len)` (excluding the index `len` itself).
///
/// # Panics
///
/// Panics if `mid > len`.
///
/// # Examples
///
/// ```
/// let mut v = [1, 0, 3, 0, 5, 6];
/// // scoped to restrict the lifetime of the borrows
/// {
/// let (left, right) = v.split_at_mut(2);
/// assert!(left == [1, 0]);
/// assert!(right == [3, 0, 5, 6]);
/// left[1] = 2;
/// right[1] = 4;
/// }
/// assert!(v == [1, 2, 3, 4, 5, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
let len = self.len();
let ptr = self.as_mut_ptr();
unsafe {
assert!(mid <= len);
(from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid))
}
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the first element is matched, an empty slice will be the first item
/// returned by the iterator. Similarly, if the last element in the slice
/// is matched, an empty slice will be the last item returned by the
/// iterator:
///
/// ```
/// let slice = [10, 40, 33];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert!(iter.next().is_none());
/// ```
///
/// If two matched elements are directly adjacent, an empty slice will be
/// present between them:
///
/// ```
/// let slice = [10, 6, 33, 20];
/// let mut iter = slice.split(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10]);
/// assert_eq!(iter.next().unwrap(), &[]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split<F>(&self, pred: F) -> Split<'_, T, F>
where
F: FnMut(&T) -> bool,
{
Split { v: self, pred, finished: false }
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_mut(|num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitMut { v: self, pred, finished: false }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`. The matched element is contained in the end of the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// #![feature(split_inclusive)]
/// let slice = [10, 40, 33, 20];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert_eq!(iter.next().unwrap(), &[20]);
/// assert!(iter.next().is_none());
/// ```
///
/// If the last element of the slice is matched,
/// that element will be considered the terminator of the preceding slice.
/// That slice will be the last item returned by the iterator.
///
/// ```
/// #![feature(split_inclusive)]
/// let slice = [3, 10, 40, 33];
/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
///
/// assert_eq!(iter.next().unwrap(), &[3]);
/// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
/// assert!(iter.next().is_none());
/// ```
#[unstable(feature = "split_inclusive", issue = "72360")]
#[inline]
pub fn split_inclusive<F>(&self, pred: F) -> SplitInclusive<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusive { v: self, pred, finished: false }
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`. The matched element is contained in the previous
/// subslice as a terminator.
///
/// # Examples
///
/// ```
/// #![feature(split_inclusive)]
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.split_inclusive_mut(|num| *num % 3 == 0) {
/// let terminator_idx = group.len()-1;
/// group[terminator_idx] = 1;
/// }
/// assert_eq!(v, [10, 40, 1, 20, 1, 1]);
/// ```
#[unstable(feature = "split_inclusive", issue = "72360")]
#[inline]
pub fn split_inclusive_mut<F>(&mut self, pred: F) -> SplitInclusiveMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitInclusiveMut { v: self, pred, finished: false }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, starting at the end of the slice and working backwards.
/// The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let slice = [11, 22, 33, 0, 44, 55];
/// let mut iter = slice.rsplit(|num| *num == 0);
///
/// assert_eq!(iter.next().unwrap(), &[44, 55]);
/// assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
/// assert_eq!(iter.next(), None);
/// ```
///
/// As with `split()`, if the first or last element is matched, an empty
/// slice will be the first (or last) item returned by the iterator.
///
/// ```
/// let v = &[0, 1, 1, 2, 3, 5, 8];
/// let mut it = v.rsplit(|n| *n % 2 == 0);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next().unwrap(), &[3, 5]);
/// assert_eq!(it.next().unwrap(), &[1, 1]);
/// assert_eq!(it.next().unwrap(), &[]);
/// assert_eq!(it.next(), None);
/// ```
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit<F>(&self, pred: F) -> RSplit<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplit { inner: self.split(pred) }
}
/// Returns an iterator over mutable subslices separated by elements that
/// match `pred`, starting at the end of the slice and working
/// backwards. The matched element is not contained in the subslices.
///
/// # Examples
///
/// ```
/// let mut v = [100, 400, 300, 200, 600, 500];
///
/// let mut count = 0;
/// for group in v.rsplit_mut(|num| *num % 3 == 0) {
/// count += 1;
/// group[0] = count;
/// }
/// assert_eq!(v, [3, 400, 300, 2, 600, 1]);
/// ```
///
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[inline]
pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitMut { inner: self.split_mut(pred) }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once by numbers divisible by 3 (i.e., `[10, 40]`,
/// `[20, 60, 50]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitN { inner: GenericSplitN { iter: self.split(pred), count: n } }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.splitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(v, [1, 40, 30, 1, 60, 50]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
SplitNMut { inner: GenericSplitN { iter: self.split_mut(pred), count: n } }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// Print the slice split once, starting from the end, by numbers divisible
/// by 3 (i.e., `[50]`, `[10, 40, 30, 20]`):
///
/// ```
/// let v = [10, 40, 30, 20, 60, 50];
///
/// for group in v.rsplitn(2, |num| *num % 3 == 0) {
/// println!("{:?}", group);
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitN { inner: GenericSplitN { iter: self.rsplit(pred), count: n } }
}
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
/// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// slice.
///
/// # Examples
///
/// ```
/// let mut s = [10, 40, 30, 20, 60, 50];
///
/// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) {
/// group[0] = 1;
/// }
/// assert_eq!(s, [1, 40, 30, 20, 60, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<'_, T, F>
where
F: FnMut(&T) -> bool,
{
RSplitNMut { inner: GenericSplitN { iter: self.rsplit_mut(pred), count: n } }
}
/// Returns `true` if the slice contains an element with the given value.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.contains(&30));
/// assert!(!v.contains(&50));
/// ```
///
/// If you do not have an `&T`, but just an `&U` such that `T: Borrow<U>`
/// (e.g. `String: Borrow<str>`), you can use `iter().any`:
///
/// ```
/// let v = [String::from("hello"), String::from("world")]; // slice of `String`
/// assert!(v.iter().any(|e| e == "hello")); // search with `&str`
/// assert!(!v.iter().any(|e| e == "hi"));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn contains(&self, x: &T) -> bool
where
T: PartialEq,
{
x.slice_contains(self)
}
/// Returns `true` if `needle` is a prefix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.starts_with(&[10]));
/// assert!(v.starts_with(&[10, 40]));
/// assert!(!v.starts_with(&[50]));
/// assert!(!v.starts_with(&[10, 50]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.starts_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.starts_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn starts_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let n = needle.len();
self.len() >= n && needle == &self[..n]
}
/// Returns `true` if `needle` is a suffix of the slice.
///
/// # Examples
///
/// ```
/// let v = [10, 40, 30];
/// assert!(v.ends_with(&[30]));
/// assert!(v.ends_with(&[40, 30]));
/// assert!(!v.ends_with(&[50]));
/// assert!(!v.ends_with(&[50, 30]));
/// ```
///
/// Always returns `true` if `needle` is an empty slice:
///
/// ```
/// let v = &[10, 40, 30];
/// assert!(v.ends_with(&[]));
/// let v: &[u8] = &[];
/// assert!(v.ends_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn ends_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let (m, n) = (self.len(), needle.len());
m >= n && needle == &self[m - n..]
}
/// Returns a subslice with the prefix removed.
///
/// This method returns [`None`] if slice does not start with `prefix`.
/// Also it returns the original slice if `prefix` is an empty slice.
///
/// # Examples
///
/// ```
/// #![feature(slice_strip)]
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
/// assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
/// assert_eq!(v.strip_prefix(&[50]), None);
/// assert_eq!(v.strip_prefix(&[10, 50]), None);
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[unstable(feature = "slice_strip", issue = "73413")]
pub fn strip_prefix(&self, prefix: &[T]) -> Option<&[T]>
where
T: PartialEq,
{
let n = prefix.len();
if n <= self.len() {
let (head, tail) = self.split_at(n);
if head == prefix {
return Some(tail);
}
}
None
}
/// Returns a subslice with the suffix removed.
///
/// This method returns [`None`] if slice does not end with `suffix`.
/// Also it returns the original slice if `suffix` is an empty slice
///
/// # Examples
///
/// ```
/// #![feature(slice_strip)]
/// let v = &[10, 40, 30];
/// assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
/// assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
/// assert_eq!(v.strip_suffix(&[50]), None);
/// assert_eq!(v.strip_suffix(&[50, 30]), None);
/// ```
#[must_use = "returns the subslice without modifying the original"]
#[unstable(feature = "slice_strip", issue = "73413")]
pub fn strip_suffix(&self, suffix: &[T]) -> Option<&[T]>
where
T: PartialEq,
{
let (len, n) = (self.len(), suffix.len());
if n <= len {
let (head, tail) = self.split_at(len - n);
if tail == suffix {
return Some(head);
}
}
None
}
/// Binary searches this sorted slice for a given element.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. If the value is not found then
/// [`Result::Err`] is returned, containing the index where a matching
/// element could be inserted while maintaining sorted order.
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// assert_eq!(s.binary_search(&13), Ok(9));
/// assert_eq!(s.binary_search(&4), Err(7));
/// assert_eq!(s.binary_search(&100), Err(13));
/// let r = s.binary_search(&1);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
///
/// If you want to insert an item to a sorted vector, while maintaining
/// sort order:
///
/// ```
/// let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
/// let num = 42;
/// let idx = s.binary_search(&num).unwrap_or_else(|x| x);
/// s.insert(idx, num);
/// assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn binary_search(&self, x: &T) -> Result<usize, usize>
where
T: Ord,
{
self.binary_search_by(|p| p.cmp(x))
}
/// Binary searches this sorted slice with a comparator function.
///
/// The comparator function should implement an order consistent
/// with the sort order of the underlying slice, returning an
/// order code that indicates whether its argument is `Less`,
/// `Equal` or `Greater` the desired target.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. If the value is not found then
/// [`Result::Err`] is returned, containing the index where a matching
/// element could be inserted while maintaining sorted order.
///
/// # Examples
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
/// found; the fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// let seek = 13;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
/// let seek = 4;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
/// let seek = 100;
/// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
/// let seek = 1;
/// let r = s.binary_search_by(|probe| probe.cmp(&seek));
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> Ordering,
{
let s = self;
let mut size = s.len();
if size == 0 {
return Err(0);
}
let mut base = 0usize;
while size > 1 {
let half = size / 2;
let mid = base + half;
// mid is always in [0, size), that means mid is >= 0 and < size.
// mid >= 0: by definition
// mid < size: mid = size / 2 + size / 4 + size / 8 ...
let cmp = f(unsafe { s.get_unchecked(mid) });
base = if cmp == Greater { base } else { mid };
size -= half;
}
// base is always in [0, size) because base <= mid.
let cmp = f(unsafe { s.get_unchecked(base) });
if cmp == Equal { Ok(base) } else { Err(base + (cmp == Less) as usize) }
}
/// Binary searches this sorted slice with a key extraction function.
///
/// Assumes that the slice is sorted by the key, for instance with
/// [`sort_by_key`] using the same key extraction function.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
/// one of the matches could be returned. If the value is not found then
/// [`Result::Err`] is returned, containing the index where a matching
/// element could be inserted while maintaining sorted order.
///
/// [`sort_by_key`]: #method.sort_by_key
///
/// # Examples
///
/// Looks up a series of four elements in a slice of pairs sorted by
/// their second elements. The first is found, with a uniquely
/// determined position; the second and third are not found; the
/// fourth could match any position in `[1, 4]`.
///
/// ```
/// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
/// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
/// (1, 21), (2, 34), (4, 55)];
///
/// assert_eq!(s.binary_search_by_key(&13, |&(a,b)| b), Ok(9));
/// assert_eq!(s.binary_search_by_key(&4, |&(a,b)| b), Err(7));
/// assert_eq!(s.binary_search_by_key(&100, |&(a,b)| b), Err(13));
/// let r = s.binary_search_by_key(&1, |&(a,b)| b);
/// assert!(match r { Ok(1..=4) => true, _ => false, });
/// ```
#[stable(feature = "slice_binary_search_by_key", since = "1.10.0")]
#[inline]
pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> B,
B: Ord,
{
self.binary_search_by(|k| f(k).cmp(b))
}
/// Sorts the slice, but may not preserve the order of equal elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [-5, 4, 1, -3, 2];
///
/// v.sort_unstable();
/// assert!(v == [-5, -3, 1, 2, 4]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable(&mut self)
where
T: Ord,
{
sort::quicksort(self, |a, b| a.lt(b));
}
/// Sorts the slice with a comparator function, but may not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
///
/// The comparator function must define a total ordering for the elements in the slice. If
/// the ordering is not total, the order of the elements is unspecified. An order is a
/// total order if it is (for all a, b and c):
///
/// * total and antisymmetric: exactly one of a < b, a == b or a > b is true; and
/// * transitive, a < b and b < c implies a < c. The same must hold for both == and >.
///
/// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
/// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
///
/// ```
/// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
/// floats.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
/// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
/// ```
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// It is typically faster than stable sorting, except in a few special cases, e.g., when the
/// slice consists of several concatenated sorted sequences.
///
/// # Examples
///
/// ```
/// let mut v = [5, 4, 1, 3, 2];
/// v.sort_unstable_by(|a, b| a.cmp(b));
/// assert!(v == [1, 2, 3, 4, 5]);
///
/// // reverse sorting
/// v.sort_unstable_by(|a, b| b.cmp(a));
/// assert!(v == [5, 4, 3, 2, 1]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by<F>(&mut self, mut compare: F)
where
F: FnMut(&T, &T) -> Ordering,
{
sort::quicksort(self, |a, b| compare(a, b) == Ordering::Less);
}
/// Sorts the slice with a key extraction function, but may not preserve the order of equal
/// elements.
///
/// This sort is unstable (i.e., may reorder equal elements), in-place
/// (i.e., does not allocate), and *O*(m \* *n* \* log(*n*)) worst-case, where the key function is
/// *O*(*m*).
///
/// # Current implementation
///
/// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
/// which combines the fast average case of randomized quicksort with the fast worst case of
/// heapsort, while achieving linear time on slices with certain patterns. It uses some
/// randomization to avoid degenerate cases, but with a fixed seed to always provide
/// deterministic behavior.
///
/// Due to its key calling strategy, [`sort_unstable_by_key`](#method.sort_unstable_by_key)
/// is likely to be slower than [`sort_by_cached_key`](#method.sort_by_cached_key) in
/// cases where the key function is expensive.
///
/// # Examples
///
/// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// v.sort_unstable_by_key(|k| k.abs());
/// assert!(v == [1, 2, -3, 4, -5]);
/// ```
///
/// [pdqsort]: https://github.com/orlp/pdqsort
#[stable(feature = "sort_unstable", since = "1.20.0")]
#[inline]
pub fn sort_unstable_by_key<K, F>(&mut self, mut f: F)
where
F: FnMut(&T) -> K,
K: Ord,
{
sort::quicksort(self, |a, b| f(a).lt(&f(b)));
}
/// Reorder the slice such that the element at `index` is at its final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index`. Additionally, this reordering is
/// unstable (i.e. any number of equal elements may end up at position `index`), in-place
/// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth
/// element" in other libraries. It returns a triplet of the following values: all elements less
/// than the one at the given index, the value at the given index, and all elements greater than
/// the one at the given index.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: #method.sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_at_index)]
///
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median
/// v.partition_at_index(2);
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [-3, -5, 1, 2, 4] ||
/// v == [-5, -3, 1, 2, 4] ||
/// v == [-3, -5, 1, 4, 2] ||
/// v == [-5, -3, 1, 4, 2]);
/// ```
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[inline]
pub fn partition_at_index(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
where
T: Ord,
{
let mut f = |a: &T, b: &T| a.lt(b);
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a comparator function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the comparator function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index,
/// and all elements greater than the one at the given index, using the provided comparator
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: #method.sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_at_index)]
///
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Find the median as if the slice were sorted in descending order.
/// v.partition_at_index_by(2, |a, b| b.cmp(a));
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [2, 4, 1, -5, -3] ||
/// v == [2, 4, 1, -3, -5] ||
/// v == [4, 2, 1, -5, -3] ||
/// v == [4, 2, 1, -3, -5]);
/// ```
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[inline]
pub fn partition_at_index_by<F>(
&mut self,
index: usize,
mut compare: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T, &T) -> Ordering,
{
let mut f = |a: &T, b: &T| compare(a, b) == Less;
sort::partition_at_index(self, index, &mut f)
}
/// Reorder the slice with a key extraction function such that the element at `index` is at its
/// final sorted position.
///
/// This reordering has the additional property that any value at position `i < index` will be
/// less than or equal to any value at a position `j > index` using the key extraction function.
/// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
/// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
/// is also known as "kth element" in other libraries. It returns a triplet of the following
/// values: all elements less than the one at the given index, the value at the given index, and
/// all elements greater than the one at the given index, using the provided key extraction
/// function.
///
/// # Current implementation
///
/// The current algorithm is based on the quickselect portion of the same quicksort algorithm
/// used for [`sort_unstable`].
///
/// [`sort_unstable`]: #method.sort_unstable
///
/// # Panics
///
/// Panics when `index >= len()`, meaning it always panics on empty slices.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_at_index)]
///
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// // Return the median as if the array were sorted according to absolute value.
/// v.partition_at_index_by_key(2, |a| a.abs());
///
/// // We are only guaranteed the slice will be one of the following, based on the way we sort
/// // about the specified index.
/// assert!(v == [1, 2, -3, 4, -5] ||
/// v == [1, 2, -3, -5, 4] ||
/// v == [2, 1, -3, 4, -5] ||
/// v == [2, 1, -3, -5, 4]);
/// ```
#[unstable(feature = "slice_partition_at_index", issue = "55300")]
#[inline]
pub fn partition_at_index_by_key<K, F>(
&mut self,
index: usize,
mut f: F,
) -> (&mut [T], &mut T, &mut [T])
where
F: FnMut(&T) -> K,
K: Ord,
{
let mut g = |a: &T, b: &T| f(a).lt(&f(b));
sort::partition_at_index(self, index, &mut g)
}
/// Moves all consecutive repeated elements to the end of the slice according to the
/// [`PartialEq`] trait implementation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [1, 2, 2, 3, 3, 2, 1, 1];
///
/// let (dedup, duplicates) = slice.partition_dedup();
///
/// assert_eq!(dedup, [1, 2, 3, 2, 1]);
/// assert_eq!(duplicates, [2, 3, 1]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup(&mut self) -> (&mut [T], &mut [T])
where
T: PartialEq,
{
self.partition_dedup_by(|a, b| a == b)
}
/// Moves all but the first of consecutive elements to the end of the slice satisfying
/// a given equality relation.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// The `same_bucket` function is passed references to two elements from the slice and
/// must determine if the elements compare equal. The elements are passed in opposite order
/// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is moved
/// at the end of the slice.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = ["foo", "Foo", "BAZ", "Bar", "bar", "baz", "BAZ"];
///
/// let (dedup, duplicates) = slice.partition_dedup_by(|a, b| a.eq_ignore_ascii_case(b));
///
/// assert_eq!(dedup, ["foo", "BAZ", "Bar", "baz"]);
/// assert_eq!(duplicates, ["bar", "Foo", "BAZ"]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by<F>(&mut self, mut same_bucket: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T, &mut T) -> bool,
{
// Although we have a mutable reference to `self`, we cannot make
// *arbitrary* changes. The `same_bucket` calls could panic, so we
// must ensure that the slice is in a valid state at all times.
//
// The way that we handle this is by using swaps; we iterate
// over all the elements, swapping as we go so that at the end
// the elements we wish to keep are in the front, and those we
// wish to reject are at the back. We can then split the slice.
// This operation is still `O(n)`.
//
// Example: We start in this state, where `r` represents "next
// read" and `w` represents "next_write`.
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate, so
// we swap self[r] and self[w] (no effect as r==w) and then increment both
// r and w, leaving us with:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this value is a duplicate,
// so we increment `r` but leave everything else unchanged:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 1 | 2 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Comparing self[r] against self[w-1], this is not a duplicate,
// so swap self[r] and self[w] and advance r and w:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 1 | 3 | 3 |
// +---+---+---+---+---+---+
// w
//
// Not a duplicate, repeat:
//
// r
// +---+---+---+---+---+---+
// | 0 | 1 | 2 | 3 | 1 | 3 |
// +---+---+---+---+---+---+
// w
//
// Duplicate, advance r. End of slice. Split at w.
let len = self.len();
if len <= 1 {
return (self, &mut []);
}
let ptr = self.as_mut_ptr();
let mut next_read: usize = 1;
let mut next_write: usize = 1;
unsafe {
// Avoid bounds checks by using raw pointers.
while next_read < len {
let ptr_read = ptr.add(next_read);
let prev_ptr_write = ptr.add(next_write - 1);
if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
if next_read != next_write {
let ptr_write = prev_ptr_write.offset(1);
mem::swap(&mut *ptr_read, &mut *ptr_write);
}
next_write += 1;
}
next_read += 1;
}
}
self.split_at_mut(next_write)
}
/// Moves all but the first of consecutive elements to the end of the slice that resolve
/// to the same key.
///
/// Returns two slices. The first contains no consecutive repeated elements.
/// The second contains all the duplicates in no specified order.
///
/// If the slice is sorted, the first returned slice contains no duplicates.
///
/// # Examples
///
/// ```
/// #![feature(slice_partition_dedup)]
///
/// let mut slice = [10, 20, 21, 30, 30, 20, 11, 13];
///
/// let (dedup, duplicates) = slice.partition_dedup_by_key(|i| *i / 10);
///
/// assert_eq!(dedup, [10, 20, 30, 20, 11]);
/// assert_eq!(duplicates, [21, 30, 13]);
/// ```
#[unstable(feature = "slice_partition_dedup", issue = "54279")]
#[inline]
pub fn partition_dedup_by_key<K, F>(&mut self, mut key: F) -> (&mut [T], &mut [T])
where
F: FnMut(&mut T) -> K,
K: PartialEq,
{
self.partition_dedup_by(|a, b| key(a) == key(b))
}
/// Rotates the slice in-place such that the first `mid` elements of the
/// slice move to the end while the last `self.len() - mid` elements move to
/// the front. After calling `rotate_left`, the element previously at index
/// `mid` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `mid` is greater than the length of the
/// slice. Note that `mid == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_left(2);
/// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']);
/// ```
///
/// Rotating a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_left(1);
/// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len());
let k = self.len() - mid;
unsafe {
let p = self.as_mut_ptr();
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Rotates the slice in-place such that the first `self.len() - k`
/// elements of the slice move to the end while the last `k` elements move
/// to the front. After calling `rotate_right`, the element previously at
/// index `self.len() - k` will become the first element in the slice.
///
/// # Panics
///
/// This function will panic if `k` is greater than the length of the
/// slice. Note that `k == self.len()` does _not_ panic and is a no-op
/// rotation.
///
/// # Complexity
///
/// Takes linear (in `self.len()`) time.
///
/// # Examples
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a.rotate_right(2);
/// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']);
/// ```
///
/// Rotate a subslice:
///
/// ```
/// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
/// a[1..5].rotate_right(1);
/// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']);
/// ```
#[stable(feature = "slice_rotate", since = "1.26.0")]
pub fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len());
let mid = self.len() - k;
unsafe {
let p = self.as_mut_ptr();
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
/// Fills `self` with elements by cloning `value`.
///
/// # Examples
///
/// ```
/// #![feature(slice_fill)]
///
/// let mut buf = vec![0; 10];
/// buf.fill(1);
/// assert_eq!(buf, vec![1; 10]);
/// ```
#[unstable(feature = "slice_fill", issue = "70758")]
pub fn fill(&mut self, value: T)
where
T: Clone,
{
if let Some((last, elems)) = self.split_last_mut() {
for el in elems {
el.clone_from(&value);
}
*last = value
}
}
/// Copies the elements from `src` into `self`.
///
/// The length of `src` must be the same as `self`.
///
/// If `T` implements `Copy`, it can be more performant to use
/// [`copy_from_slice`].
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Cloning two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.clone_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `clone_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].clone_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.clone_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`copy_from_slice`]: #method.copy_from_slice
/// [`split_at_mut`]: #method.split_at_mut
#[stable(feature = "clone_from_slice", since = "1.7.0")]
pub fn clone_from_slice(&mut self, src: &[T])
where
T: Clone,
{
assert!(self.len() == src.len(), "destination and source slices have different lengths");
// NOTE: We need to explicitly slice them to the same length
// for bounds checking to be elided, and the optimizer will
// generate memcpy for simple cases (for example T = u8).
let len = self.len();
let src = &src[..len];
for i in 0..len {
self[i].clone_from(&src[i]);
}
}
/// Copies all elements from `src` into `self`, using a memcpy.
///
/// The length of `src` must be the same as `self`.
///
/// If `T` does not implement `Copy`, use [`clone_from_slice`].
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Examples
///
/// Copying two elements from a slice into another:
///
/// ```
/// let src = [1, 2, 3, 4];
/// let mut dst = [0, 0];
///
/// // Because the slices have to be the same length,
/// // we slice the source slice from four elements
/// // to two. It will panic if we don't do this.
/// dst.copy_from_slice(&src[2..]);
///
/// assert_eq!(src, [1, 2, 3, 4]);
/// assert_eq!(dst, [3, 4]);
/// ```
///
/// Rust enforces that there can only be one mutable reference with no
/// immutable references to a particular piece of data in a particular
/// scope. Because of this, attempting to use `copy_from_slice` on a
/// single slice will result in a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
///
/// slice[..2].copy_from_slice(&slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.copy_from_slice(&right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 4, 5]);
/// ```
///
/// [`clone_from_slice`]: #method.clone_from_slice
/// [`split_at_mut`]: #method.split_at_mut
#[stable(feature = "copy_from_slice", since = "1.9.0")]
pub fn copy_from_slice(&mut self, src: &[T])
where
T: Copy,
{
assert_eq!(self.len(), src.len(), "destination and source slices have different lengths");
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len());
}
}
/// Copies elements from one part of the slice to another part of itself,
/// using a memmove.
///
/// `src` is the range within `self` to copy from. `dest` is the starting
/// index of the range within `self` to copy to, which will have the same
/// length as `src`. The two ranges may overlap. The ends of the two ranges
/// must be less than or equal to `self.len()`.
///
/// # Panics
///
/// This function will panic if either range exceeds the end of the slice,
/// or if the end of `src` is before the start.
///
/// # Examples
///
/// Copying four bytes within a slice:
///
/// ```
/// let mut bytes = *b"Hello, World!";
///
/// bytes.copy_within(1..5, 8);
///
/// assert_eq!(&bytes, b"Hello, Wello!");
/// ```
#[stable(feature = "copy_within", since = "1.37.0")]
#[track_caller]
pub fn copy_within<R: ops::RangeBounds<usize>>(&mut self, src: R, dest: usize)
where
T: Copy,
{
let src_start = match src.start_bound() {
ops::Bound::Included(&n) => n,
ops::Bound::Excluded(&n) => {
n.checked_add(1).unwrap_or_else(|| slice_index_overflow_fail())
}
ops::Bound::Unbounded => 0,
};
let src_end = match src.end_bound() {
ops::Bound::Included(&n) => {
n.checked_add(1).unwrap_or_else(|| slice_index_overflow_fail())
}
ops::Bound::Excluded(&n) => n,
ops::Bound::Unbounded => self.len(),
};
assert!(src_start <= src_end, "src end is before src start");
assert!(src_end <= self.len(), "src is out of bounds");
let count = src_end - src_start;
assert!(dest <= self.len() - count, "dest is out of bounds");
unsafe {
ptr::copy(self.as_ptr().add(src_start), self.as_mut_ptr().add(dest), count);
}
}
/// Swaps all elements in `self` with those in `other`.
///
/// The length of `other` must be the same as `self`.
///
/// # Panics
///
/// This function will panic if the two slices have different lengths.
///
/// # Example
///
/// Swapping two elements across slices:
///
/// ```
/// let mut slice1 = [0, 0];
/// let mut slice2 = [1, 2, 3, 4];
///
/// slice1.swap_with_slice(&mut slice2[2..]);
///
/// assert_eq!(slice1, [3, 4]);
/// assert_eq!(slice2, [1, 2, 0, 0]);
/// ```
///
/// Rust enforces that there can only be one mutable reference to a
/// particular piece of data in a particular scope. Because of this,
/// attempting to use `swap_with_slice` on a single slice will result in
/// a compile failure:
///
/// ```compile_fail
/// let mut slice = [1, 2, 3, 4, 5];
/// slice[..2].swap_with_slice(&mut slice[3..]); // compile fail!
/// ```
///
/// To work around this, we can use [`split_at_mut`] to create two distinct
/// mutable sub-slices from a slice:
///
/// ```
/// let mut slice = [1, 2, 3, 4, 5];
///
/// {
/// let (left, right) = slice.split_at_mut(2);
/// left.swap_with_slice(&mut right[1..]);
/// }
///
/// assert_eq!(slice, [4, 5, 3, 1, 2]);
/// ```
///
/// [`split_at_mut`]: #method.split_at_mut
#[stable(feature = "swap_with_slice", since = "1.27.0")]
pub fn swap_with_slice(&mut self, other: &mut [T]) {
assert!(self.len() == other.len(), "destination and source slices have different lengths");
unsafe {
ptr::swap_nonoverlapping(self.as_mut_ptr(), other.as_mut_ptr(), self.len());
}
}
/// Function to calculate lengths of the middle and trailing slice for `align_to{,_mut}`.
fn align_to_offsets<U>(&self) -> (usize, usize) {
// What we gonna do about `rest` is figure out what multiple of `U`s we can put in a
// lowest number of `T`s. And how many `T`s we need for each such "multiple".
//
// Consider for example T=u8 U=u16. Then we can put 1 U in 2 Ts. Simple. Now, consider
// for example a case where size_of::<T> = 16, size_of::<U> = 24. We can put 2 Us in
// place of every 3 Ts in the `rest` slice. A bit more complicated.
//
// Formula to calculate this is:
//
// Us = lcm(size_of::<T>, size_of::<U>) / size_of::<U>
// Ts = lcm(size_of::<T>, size_of::<U>) / size_of::<T>
//
// Expanded and simplified:
//
// Us = size_of::<T> / gcd(size_of::<T>, size_of::<U>)
// Ts = size_of::<U> / gcd(size_of::<T>, size_of::<U>)
//
// Luckily since all this is constant-evaluated... performance here matters not!
#[inline]
fn gcd(a: usize, b: usize) -> usize {
use crate::intrinsics;
// iterative stein’s algorithm
// We should still make this `const fn` (and revert to recursive algorithm if we do)
// because relying on llvm to consteval all this is… well, it makes me uncomfortable.
let (ctz_a, mut ctz_b) = unsafe {
if a == 0 {
return b;
}
if b == 0 {
return a;
}
(intrinsics::cttz_nonzero(a), intrinsics::cttz_nonzero(b))
};
let k = ctz_a.min(ctz_b);
let mut a = a >> ctz_a;
let mut b = b;
loop {
// remove all factors of 2 from b
b >>= ctz_b;
if a > b {
mem::swap(&mut a, &mut b);
}
b = b - a;
unsafe {
if b == 0 {
break;
}
ctz_b = intrinsics::cttz_nonzero(b);
}
}
a << k
}
let gcd: usize = gcd(mem::size_of::<T>(), mem::size_of::<U>());
let ts: usize = mem::size_of::<U>() / gcd;
let us: usize = mem::size_of::<T>() / gcd;
// Armed with this knowledge, we can find how many `U`s we can fit!
let us_len = self.len() / ts * us;
// And how many `T`s will be in the trailing slice!
let ts_len = self.len() % ts;
(us_len, ts_len)
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &[], &[]);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &[], &[])
} else {
let (left, rest) = self.split_at(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
// SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
// since the caller guarantees that we can transmute `T` to `U` safely.
unsafe {
(
left,
from_raw_parts(rest.as_ptr() as *const U, us_len),
from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
)
}
}
}
/// Transmute the slice to a slice of another type, ensuring alignment of the types is
/// maintained.
///
/// This method splits the slice into three distinct slices: prefix, correctly aligned middle
/// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
/// length possible for a given type and input slice, but only your algorithm's performance
/// should depend on that, not its correctness. It is permissible for all of the input data to
/// be returned as the prefix or suffix slice.
///
/// This method has no purpose when either input element `T` or output element `U` are
/// zero-sized and will return the original slice without splitting anything.
///
/// # Safety
///
/// This method is essentially a `transmute` with respect to the elements in the returned
/// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// unsafe {
/// let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
/// let (prefix, shorts, suffix) = bytes.align_to_mut::<u16>();
/// // less_efficient_algorithm_for_bytes(prefix);
/// // more_efficient_algorithm_for_aligned_shorts(shorts);
/// // less_efficient_algorithm_for_bytes(suffix);
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &mut [], &mut []);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
(self, &mut [], &mut [])
} else {
let (left, rest) = self.split_at_mut(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
let rest_len = rest.len();
let mut_ptr = rest.as_mut_ptr();
// We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
// SAFETY: see comments for `align_to`.
unsafe {
(
left,
from_raw_parts_mut(mut_ptr as *mut U, us_len),
from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
)
}
}
}
/// Checks if the elements of this slice are sorted.
///
/// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the
/// slice yields exactly zero or one element, `true` is returned.
///
/// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition
/// implies that this function returns `false` if any two consecutive items are not
/// comparable.
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
/// let empty: [i32; 0] = [];
///
/// assert!([1, 2, 2, 9].is_sorted());
/// assert!(![1, 3, 2, 4].is_sorted());
/// assert!([0].is_sorted());
/// assert!(empty.is_sorted());
/// assert!(![0.0, 1.0, f32::NAN].is_sorted());
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted(&self) -> bool
where
T: PartialOrd,
{
self.is_sorted_by(|a, b| a.partial_cmp(b))
}
/// Checks if the elements of this slice are sorted using the given comparator function.
///
/// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare`
/// function to determine the ordering of two elements. Apart from that, it's equivalent to
/// [`is_sorted`]; see its documentation for more information.
///
/// [`is_sorted`]: #method.is_sorted
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by<F>(&self, mut compare: F) -> bool
where
F: FnMut(&T, &T) -> Option<Ordering>,
{
self.iter().is_sorted_by(|a, b| compare(*a, *b))
}
/// Checks if the elements of this slice are sorted using the given key extraction function.
///
/// Instead of comparing the slice's elements directly, this function compares the keys of the
/// elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see its
/// documentation for more information.
///
/// [`is_sorted`]: #method.is_sorted
///
/// # Examples
///
/// ```
/// #![feature(is_sorted)]
///
/// assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
/// assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
pub fn is_sorted_by_key<F, K>(&self, f: F) -> bool
where
F: FnMut(&T) -> K,
K: PartialOrd,
{
self.iter().is_sorted_by_key(f)
}
/// Returns the index of the partition point according to the given predicate
/// (the index of the first element of the second partition).
///
/// The slice is assumed to be partitioned according to the given predicate.
/// This means that all elements for which the predicate returns true are at the start of the slice
/// and all elements for which the predicate returns false are at the end.
/// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
/// (all odd numbers are at the start, all even at the end).
///
/// If this slice is not partitioned, the returned result is unspecified and meaningless,
/// as this method performs a kind of binary search.
///
/// # Examples
///
/// ```
/// #![feature(partition_point)]
///
/// let v = [1, 2, 3, 3, 5, 6, 7];
/// let i = v.partition_point(|&x| x < 5);
///
/// assert_eq!(i, 4);
/// assert!(v[..i].iter().all(|&x| x < 5));
/// assert!(v[i..].iter().all(|&x| !(x < 5)));
/// ```
#[unstable(feature = "partition_point", reason = "new API", issue = "73831")]
pub fn partition_point<P>(&self, mut pred: P) -> usize
where
P: FnMut(&T) -> bool,
{
let mut left = 0;
let mut right = self.len();
while left != right {
let mid = left + (right - left) / 2;
// SAFETY:
// When left < right, left <= mid < right.
// Therefore left always increases and right always decreases,
// and eigher of them is selected.
// In both cases left <= right is satisfied.
// Therefore if left < right in a step,
// left <= right is satisfied in the next step.
// Therefore as long as left != right, 0 <= left < right <= len is satisfied
// and if this case 0 <= mid < len is satisfied too.
let value = unsafe { self.get_unchecked(mid) };
if pred(value) {
left = mid + 1;
} else {
right = mid;
}
}
left
}
}
#[lang = "slice_u8"]
#[cfg(not(test))]
impl [u8] {
/// Checks if all bytes in this slice are within the ASCII range.
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn is_ascii(&self) -> bool {
is_ascii(self)
}
/// Checks that two slices are an ASCII case-insensitive match.
///
/// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
/// but without allocating and copying temporaries.
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
self.len() == other.len() && self.iter().zip(other).all(|(a, b)| a.eq_ignore_ascii_case(b))
}
/// Converts this slice to its ASCII upper case equivalent in-place.
///
/// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
/// but non-ASCII letters are unchanged.
///
/// To return a new uppercased value without modifying the existing one, use
/// [`to_ascii_uppercase`].
///
/// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn make_ascii_uppercase(&mut self) {
for byte in self {
byte.make_ascii_uppercase();
}
}
/// Converts this slice to its ASCII lower case equivalent in-place.
///
/// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
/// but non-ASCII letters are unchanged.
///
/// To return a new lowercased value without modifying the existing one, use
/// [`to_ascii_lowercase`].
///
/// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn make_ascii_lowercase(&mut self) {
for byte in self {
byte.make_ascii_lowercase();
}
}
}
/// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
/// from `../str/mod.rs`, which does something similar for utf8 validation.
#[inline]
fn contains_nonascii(v: usize) -> bool {
const NONASCII_MASK: usize = 0x80808080_80808080u64 as usize;
(NONASCII_MASK & v) != 0
}
/// Optimized ASCII test that will use usize-at-a-time operations instead of
/// byte-at-a-time operations (when possible).
///
/// The algorithm we use here is pretty simple. If `s` is too short, we just
/// check each byte and be done with it. Otherwise:
///
/// - Read the first word with an unaligned load.
/// - Align the pointer, read subsequent words until end with aligned loads.
/// - Read the last `usize` from `s` with an unaligned load.
///
/// If any of these loads produces something for which `contains_nonascii`
/// (above) returns true, then we know the answer is false.
#[inline]
fn is_ascii(s: &[u8]) -> bool {
const USIZE_SIZE: usize = mem::size_of::<usize>();
let len = s.len();
let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
// If we wouldn't gain anything from the word-at-a-time implementation, fall
// back to a scalar loop.
//
// We also do this for architectures where `size_of::<usize>()` isn't
// sufficient alignment for `usize`, because it's a weird edge case.
if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < mem::align_of::<usize>() {
return s.iter().all(|b| b.is_ascii());
}
// We always read the first word unaligned, which means `align_offset` is
// 0, we'd read the same value again for the aligned read.
let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
let start = s.as_ptr();
// SAFETY: We verify `len < USIZE_SIZE` above.
let first_word = unsafe { (start as *const usize).read_unaligned() };
if contains_nonascii(first_word) {
return false;
}
// We checked this above, somewhat implicitly. Note that `offset_to_aligned`
// is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
// above.
debug_assert!(offset_to_aligned <= len);
// word_ptr is the (properly aligned) usize ptr we use to read the middle chunk of the slice.
let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
// `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
let mut byte_pos = offset_to_aligned;
// Paranoia check about alignment, since we're about to do a bunch of
// unaligned loads. In practice this should be impossible barring a bug in
// `align_offset` though.
debug_assert_eq!((word_ptr as usize) % mem::align_of::<usize>(), 0);
// Read subsequent words until the last aligned word, excluding the last
// aligned word by itself to be done in tail check later, to ensure that
// tail is always one `usize` at most to extra branch `byte_pos == len`.
while byte_pos < len - USIZE_SIZE {
debug_assert!(
// Sanity check that the read is in bounds
(word_ptr as usize + USIZE_SIZE) <= (start.wrapping_add(len) as usize) &&
// And that our assumptions about `byte_pos` hold.
(word_ptr as usize) - (start as usize) == byte_pos
);
// Safety: We know `word_ptr` is properly aligned (because of
// `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
let word = unsafe { word_ptr.read() };
if contains_nonascii(word) {
return false;
}
byte_pos += USIZE_SIZE;
// SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
// after this `add`, `word_ptr` will be at most one-past-the-end.
word_ptr = unsafe { word_ptr.add(1) };
}
// Sanity check to ensure there really is only one `usize` left. This should
// be guaranteed by our loop condition.
debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
// SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
!contains_nonascii(last_word)
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, I> ops::Index<I> for [T]
where
I: SliceIndex<[T]>,
{
type Output = I::Output;
#[inline]
fn index(&self, index: I) -> &I::Output {
index.index(self)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, I> ops::IndexMut<I> for [T]
where
I: SliceIndex<[T]>,
{
#[inline]
fn index_mut(&mut self, index: I) -> &mut I::Output {
index.index_mut(self)
}
}
#[inline(never)]
#[cold]
#[track_caller]
fn slice_start_index_len_fail(index: usize, len: usize) -> ! {
panic!("range start index {} out of range for slice of length {}", index, len);
}
#[inline(never)]
#[cold]
#[track_caller]
fn slice_end_index_len_fail(index: usize, len: usize) -> ! {
panic!("range end index {} out of range for slice of length {}", index, len);
}
#[inline(never)]
#[cold]
#[track_caller]
fn slice_index_order_fail(index: usize, end: usize) -> ! {
panic!("slice index starts at {} but ends at {}", index, end);
}
#[inline(never)]
#[cold]
#[track_caller]
fn slice_index_overflow_fail() -> ! {
panic!("attempted to index slice up to maximum usize");
}
mod private_slice_index {
use super::ops;
#[stable(feature = "slice_get_slice", since = "1.28.0")]
pub trait Sealed {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for usize {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::Range<usize> {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeTo<usize> {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeFrom<usize> {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeFull {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeInclusive<usize> {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeToInclusive<usize> {}
}
/// A helper trait used for indexing operations.
///
/// Implementations of this trait have to promise that if the argument
/// to `get_(mut_)unchecked` is a safe reference, then so is the result.
#[stable(feature = "slice_get_slice", since = "1.28.0")]
#[rustc_on_unimplemented(
on(T = "str", label = "string indices are ranges of `usize`",),
on(
all(any(T = "str", T = "&str", T = "std::string::String"), _Self = "{integer}"),
note = "you can use `.chars().nth()` or `.bytes().nth()`
see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
),
message = "the type `{T}` cannot be indexed by `{Self}`",
label = "slice indices are of type `usize` or ranges of `usize`"
)]
pub unsafe trait SliceIndex<T: ?Sized>: private_slice_index::Sealed {
/// The output type returned by methods.
#[stable(feature = "slice_get_slice", since = "1.28.0")]
type Output: ?Sized;
/// Returns a shared reference to the output at this location, if in
/// bounds.
#[unstable(feature = "slice_index_methods", issue = "none")]
fn get(self, slice: &T) -> Option<&Self::Output>;
/// Returns a mutable reference to the output at this location, if in
/// bounds.
#[unstable(feature = "slice_index_methods", issue = "none")]
fn get_mut(self, slice: &mut T) -> Option<&mut Self::Output>;
/// Returns a shared reference to the output at this location, without
/// performing any bounds checking.
/// Calling this method with an out-of-bounds index or a dangling `slice` pointer
/// is *[undefined behavior]* even if the resulting reference is not used.
///
/// [undefined behavior]: ../../reference/behavior-considered-undefined.html
#[unstable(feature = "slice_index_methods", issue = "none")]
unsafe fn get_unchecked(self, slice: *const T) -> *const Self::Output;
/// Returns a mutable reference to the output at this location, without
/// performing any bounds checking.
/// Calling this method with an out-of-bounds index or a dangling `slice` pointer
/// is *[undefined behavior]* even if the resulting reference is not used.
///
/// [undefined behavior]: ../../reference/behavior-considered-undefined.html
#[unstable(feature = "slice_index_methods", issue = "none")]
unsafe fn get_unchecked_mut(self, slice: *mut T) -> *mut Self::Output;
/// Returns a shared reference to the output at this location, panicking
/// if out of bounds.
#[unstable(feature = "slice_index_methods", issue = "none")]
#[track_caller]
fn index(self, slice: &T) -> &Self::Output;
/// Returns a mutable reference to the output at this location, panicking
/// if out of bounds.
#[unstable(feature = "slice_index_methods", issue = "none")]
#[track_caller]
fn index_mut(self, slice: &mut T) -> &mut Self::Output;
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for usize {
type Output = T;
#[inline]
fn get(self, slice: &[T]) -> Option<&T> {
if self < slice.len() { unsafe { Some(&*self.get_unchecked(slice)) } } else { None }
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut T> {
if self < slice.len() { unsafe { Some(&mut *self.get_unchecked_mut(slice)) } } else { None }
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const T {
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe { slice.as_ptr().add(self) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut T {
// SAFETY: see comments for `get_unchecked` above.
unsafe { slice.as_mut_ptr().add(self) }
}
#[inline]
fn index(self, slice: &[T]) -> &T {
// N.B., use intrinsic indexing
&(*slice)[self]
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut T {
// N.B., use intrinsic indexing
&mut (*slice)[self]
}
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for ops::Range<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
if self.start > self.end || self.end > slice.len() {
None
} else {
unsafe { Some(&*self.get_unchecked(slice)) }
}
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if self.start > self.end || self.end > slice.len() {
None
} else {
unsafe { Some(&mut *self.get_unchecked_mut(slice)) }
}
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe { ptr::slice_from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: see comments for `get_unchecked` above.
unsafe {
ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start)
}
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
if self.start > self.end {
slice_index_order_fail(self.start, self.end);
} else if self.end > slice.len() {
slice_end_index_len_fail(self.end, slice.len());
}
unsafe { &*self.get_unchecked(slice) }
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if self.start > self.end {
slice_index_order_fail(self.start, self.end);
} else if self.end > slice.len() {
slice_end_index_len_fail(self.end, slice.len());
}
unsafe { &mut *self.get_unchecked_mut(slice) }
}
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeTo<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
(0..self.end).get(slice)
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(0..self.end).get_mut(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (0..self.end).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (0..self.end).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
(0..self.end).index(slice)
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
(0..self.end).index_mut(slice)
}
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeFrom<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
(self.start..slice.len()).get(slice)
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(self.start..slice.len()).get_mut(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (self.start..slice.len()).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (self.start..slice.len()).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
if self.start > slice.len() {
slice_start_index_len_fail(self.start, slice.len());
}
unsafe { &*self.get_unchecked(slice) }
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if self.start > slice.len() {
slice_start_index_len_fail(self.start, slice.len());
}
unsafe { &mut *self.get_unchecked_mut(slice) }
}
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeFull {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
Some(slice)
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
Some(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
slice
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
slice
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
slice
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
slice
}
}
#[stable(feature = "inclusive_range", since = "1.26.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeInclusive<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
if *self.end() == usize::MAX { None } else { (*self.start()..self.end() + 1).get(slice) }
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if *self.end() == usize::MAX {
None
} else {
(*self.start()..self.end() + 1).get_mut(slice)
}
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (*self.start()..self.end() + 1).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (*self.start()..self.end() + 1).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
if *self.end() == usize::MAX {
slice_index_overflow_fail();
}
(*self.start()..self.end() + 1).index(slice)
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if *self.end() == usize::MAX {
slice_index_overflow_fail();
}
(*self.start()..self.end() + 1).index_mut(slice)
}
}
#[stable(feature = "inclusive_range", since = "1.26.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeToInclusive<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
(0..=self.end).get(slice)
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(0..=self.end).get_mut(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (0..=self.end).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (0..=self.end).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
(0..=self.end).index(slice)
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
(0..=self.end).index_mut(slice)
}
}
////////////////////////////////////////////////////////////////////////////////
// Common traits
////////////////////////////////////////////////////////////////////////////////
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for &[T] {
/// Creates an empty slice.
fn default() -> Self {
&[]
}
}
#[stable(feature = "mut_slice_default", since = "1.5.0")]
impl<T> Default for &mut [T] {
/// Creates a mutable empty slice.
fn default() -> Self {
&mut []
}
}
//
// Iterators
//
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a [T] {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a mut [T] {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T>;
fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
}
// Macro helper functions
#[inline(always)]
fn size_from_ptr<T>(_: *const T) -> usize {
mem::size_of::<T>()
}
// Inlining is_empty and len makes a huge performance difference
macro_rules! is_empty {
// The way we encode the length of a ZST iterator, this works both for ZST
// and non-ZST.
($self: ident) => {
$self.ptr.as_ptr() as *const T == $self.end
};
}
// To get rid of some bounds checks (see `position`), we compute the length in a somewhat
// unexpected way. (Tested by `codegen/slice-position-bounds-check`.)
macro_rules! len {
($self: ident) => {{
#![allow(unused_unsafe)] // we're sometimes used within an unsafe block
let start = $self.ptr;
let size = size_from_ptr(start.as_ptr());
if size == 0 {
// This _cannot_ use `unchecked_sub` because we depend on wrapping
// to represent the length of long ZST slice iterators.
($self.end as usize).wrapping_sub(start.as_ptr() as usize)
} else {
// We know that `start <= end`, so can do better than `offset_from`,
// which needs to deal in signed. By setting appropriate flags here
// we can tell LLVM this, which helps it remove bounds checks.
// SAFETY: By the type invariant, `start <= end`
let diff = unsafe { unchecked_sub($self.end as usize, start.as_ptr() as usize) };
// By also telling LLVM that the pointers are apart by an exact
// multiple of the type size, it can optimize `len() == 0` down to
// `start == end` instead of `(end - start) < size`.
// SAFETY: By the type invariant, the pointers are aligned so the
// distance between them must be a multiple of pointee size
unsafe { exact_div(diff, size) }
}
}};
}
// The shared definition of the `Iter` and `IterMut` iterators
macro_rules! iterator {
(
struct $name:ident -> $ptr:ty,
$elem:ty,
$raw_mut:tt,
{$( $mut_:tt )*},
{$($extra:tt)*}
) => {
// Returns the first element and moves the start of the iterator forwards by 1.
// Greatly improves performance compared to an inlined function. The iterator
// must not be empty.
macro_rules! next_unchecked {
($self: ident) => {& $( $mut_ )* *$self.post_inc_start(1)}
}
// Returns the last element and moves the end of the iterator backwards by 1.
// Greatly improves performance compared to an inlined function. The iterator
// must not be empty.
macro_rules! next_back_unchecked {
($self: ident) => {& $( $mut_ )* *$self.pre_dec_end(1)}
}
// Shrinks the iterator when T is a ZST, by moving the end of the iterator
// backwards by `n`. `n` must not exceed `self.len()`.
macro_rules! zst_shrink {
($self: ident, $n: ident) => {
$self.end = ($self.end as * $raw_mut u8).wrapping_offset(-$n) as * $raw_mut T;
}
}
impl<'a, T> $name<'a, T> {
// Helper function for creating a slice from the iterator.
#[inline(always)]
fn make_slice(&self) -> &'a [T] {
unsafe { from_raw_parts(self.ptr.as_ptr(), len!(self)) }
}
// Helper function for moving the start of the iterator forwards by `offset` elements,
// returning the old start.
// Unsafe because the offset must not exceed `self.len()`.
#[inline(always)]
unsafe fn post_inc_start(&mut self, offset: isize) -> * $raw_mut T {
if mem::size_of::<T>() == 0 {
zst_shrink!(self, offset);
self.ptr.as_ptr()
} else {
let old = self.ptr.as_ptr();
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// so this new pointer is inside `self` and thus guaranteed to be non-null.
self.ptr = unsafe { NonNull::new_unchecked(self.ptr.as_ptr().offset(offset)) };
old
}
}
// Helper function for moving the end of the iterator backwards by `offset` elements,
// returning the new end.
// Unsafe because the offset must not exceed `self.len()`.
#[inline(always)]
unsafe fn pre_dec_end(&mut self, offset: isize) -> * $raw_mut T {
if mem::size_of::<T>() == 0 {
zst_shrink!(self, offset);
self.ptr.as_ptr()
} else {
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// which is guaranteed to not overflow an `isize`. Also, the resulting pointer
// is in bounds of `slice`, which fulfills the other requirements for `offset`.
self.end = unsafe { self.end.offset(-offset) };
self.end
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for $name<'_, T> {
#[inline(always)]
fn len(&self) -> usize {
len!(self)
}
#[inline(always)]
fn is_empty(&self) -> bool {
is_empty!(self)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for $name<'a, T> {
type Item = $elem;
#[inline]
fn next(&mut self) -> Option<$elem> {
// could be implemented with slices, but this avoids bounds checks
unsafe {
assume(!self.ptr.as_ptr().is_null());
if mem::size_of::<T>() != 0 {
assume(!self.end.is_null());
}
if is_empty!(self) {
None
} else {
Some(next_unchecked!(self))
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let exact = len!(self);
(exact, Some(exact))
}
#[inline]
fn count(self) -> usize {
len!(self)
}
#[inline]
fn nth(&mut self, n: usize) -> Option<$elem> {
if n >= len!(self) {
// This iterator is now empty.
if mem::size_of::<T>() == 0 {
// We have to do it this way as `ptr` may never be 0, but `end`
// could be (due to wrapping).
self.end = self.ptr.as_ptr();
} else {
unsafe {
// End can't be 0 if T isn't ZST because ptr isn't 0 and end >= ptr
self.ptr = NonNull::new_unchecked(self.end as *mut T);
}
}
return None;
}
// We are in bounds. `post_inc_start` does the right thing even for ZSTs.
unsafe {
self.post_inc_start(n as isize);
Some(next_unchecked!(self))
}
}
#[inline]
fn last(mut self) -> Option<$elem> {
self.next_back()
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn for_each<F>(mut self, mut f: F)
where
Self: Sized,
F: FnMut(Self::Item),
{
while let Some(x) = self.next() {
f(x);
}
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn all<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
while let Some(x) = self.next() {
if !f(x) {
return false;
}
}
true
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn any<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
while let Some(x) = self.next() {
if f(x) {
return true;
}
}
false
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
while let Some(x) = self.next() {
if predicate(&x) {
return Some(x);
}
}
None
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile.
#[inline]
fn find_map<B, F>(&mut self, mut f: F) -> Option<B>
where
Self: Sized,
F: FnMut(Self::Item) -> Option<B>,
{
while let Some(x) = self.next() {
if let Some(y) = f(x) {
return Some(y);
}
}
None
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile. Also, the `assume` avoids a bounds check.
#[inline]
#[rustc_inherit_overflow_checks]
fn position<P>(&mut self, mut predicate: P) -> Option<usize> where
Self: Sized,
P: FnMut(Self::Item) -> bool,
{
let n = len!(self);
let mut i = 0;
while let Some(x) = self.next() {
if predicate(x) {
unsafe { assume(i < n) };
return Some(i);
}
i += 1;
}
None
}
// We override the default implementation, which uses `try_fold`,
// because this simple implementation generates less LLVM IR and is
// faster to compile. Also, the `assume` avoids a bounds check.
#[inline]
fn rposition<P>(&mut self, mut predicate: P) -> Option<usize> where
P: FnMut(Self::Item) -> bool,
Self: Sized + ExactSizeIterator + DoubleEndedIterator
{
let n = len!(self);
let mut i = n;
while let Some(x) = self.next_back() {
i -= 1;
if predicate(x) {
unsafe { assume(i < n) };
return Some(i);
}
}
None
}
$($extra)*
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for $name<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<$elem> {
// could be implemented with slices, but this avoids bounds checks
unsafe {
assume(!self.ptr.as_ptr().is_null());
if mem::size_of::<T>() != 0 {
assume(!self.end.is_null());
}
if is_empty!(self) {
None
} else {
Some(next_back_unchecked!(self))
}
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<$elem> {
if n >= len!(self) {
// This iterator is now empty.
self.end = self.ptr.as_ptr();
return None;
}
// We are in bounds. `pre_dec_end` does the right thing even for ZSTs.
unsafe {
self.pre_dec_end(n as isize);
Some(next_back_unchecked!(self))
}
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for $name<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for $name<'_, T> {}
}
}
/// Immutable slice iterator
///
/// This struct is created by the [`iter`] method on [slices].
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // First, we declare a type which has `iter` method to get the `Iter` struct (&[usize here]):
/// let slice = &[1, 2, 3];
///
/// // Then, we iterate over it:
/// for element in slice.iter() {
/// println!("{}", element);
/// }
/// ```
///
/// [`iter`]: ../../std/primitive.slice.html#method.iter
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
ptr: NonNull<T>,
end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
// ptr == end is a quick test for the Iterator being empty, that works
// for both ZST and non-ZST.
_marker: marker::PhantomData<&'a T>,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Iter").field(&self.as_slice()).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync> Sync for Iter<'_, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync> Send for Iter<'_, T> {}
impl<'a, T> Iter<'a, T> {
/// Views the underlying data as a subslice of the original data.
///
/// This has the same lifetime as the original slice, and so the
/// iterator can continue to be used while this exists.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // First, we declare a type which has the `iter` method to get the `Iter`
/// // struct (&[usize here]):
/// let slice = &[1, 2, 3];
///
/// // Then, we get the iterator:
/// let mut iter = slice.iter();
/// // So if we print what `as_slice` method returns here, we have "[1, 2, 3]":
/// println!("{:?}", iter.as_slice());
///
/// // Next, we move to the second element of the slice:
/// iter.next();
/// // Now `as_slice` returns "[2, 3]":
/// println!("{:?}", iter.as_slice());
/// ```
#[stable(feature = "iter_to_slice", since = "1.4.0")]
pub fn as_slice(&self) -> &'a [T] {
self.make_slice()
}
}
iterator! {struct Iter -> *const T, &'a T, const, {/* no mut */}, {
fn is_sorted_by<F>(self, mut compare: F) -> bool
where
Self: Sized,
F: FnMut(&Self::Item, &Self::Item) -> Option<Ordering>,
{
self.as_slice().windows(2).all(|w| {
compare(&&w[0], &&w[1]).map(|o| o != Ordering::Greater).unwrap_or(false)
})
}
}}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self {
Iter { ptr: self.ptr, end: self.end, _marker: self._marker }
}
}
#[stable(feature = "slice_iter_as_ref", since = "1.13.0")]
impl<T> AsRef<[T]> for Iter<'_, T> {
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
/// Mutable slice iterator.
///
/// This struct is created by the [`iter_mut`] method on [slices].
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // First, we declare a type which has `iter_mut` method to get the `IterMut`
/// // struct (&[usize here]):
/// let mut slice = &mut [1, 2, 3];
///
/// // Then, we iterate over it and increment each element value:
/// for element in slice.iter_mut() {
/// *element += 1;
/// }
///
/// // We now have "[2, 3, 4]":
/// println!("{:?}", slice);
/// ```
///
/// [`iter_mut`]: ../../std/primitive.slice.html#method.iter_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
ptr: NonNull<T>,
end: *mut T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
// ptr == end is a quick test for the Iterator being empty, that works
// for both ZST and non-ZST.
_marker: marker::PhantomData<&'a mut T>,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("IterMut").field(&self.make_slice()).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Send> Send for IterMut<'_, T> {}
impl<'a, T> IterMut<'a, T> {
/// Views the underlying data as a subslice of the original data.
///
/// To avoid creating `&mut` references that alias, this is forced
/// to consume the iterator.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// // First, we declare a type which has `iter_mut` method to get the `IterMut`
/// // struct (&[usize here]):
/// let mut slice = &mut [1, 2, 3];
///
/// {
/// // Then, we get the iterator:
/// let mut iter = slice.iter_mut();
/// // We move to next element:
/// iter.next();
/// // So if we print what `into_slice` method returns here, we have "[2, 3]":
/// println!("{:?}", iter.into_slice());
/// }
///
/// // Now let's modify a value of the slice:
/// {
/// // First we get back the iterator:
/// let mut iter = slice.iter_mut();
/// // We change the value of the first element of the slice returned by the `next` method:
/// *iter.next().unwrap() += 1;
/// }
/// // Now slice is "[2, 2, 3]":
/// println!("{:?}", slice);
/// ```
#[stable(feature = "iter_to_slice", since = "1.4.0")]
pub fn into_slice(self) -> &'a mut [T] {
unsafe { from_raw_parts_mut(self.ptr.as_ptr(), len!(self)) }
}
/// Views the underlying data as a subslice of the original data.
///
/// To avoid creating `&mut [T]` references that alias, the returned slice
/// borrows its lifetime from the iterator the method is applied on.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// # #![feature(slice_iter_mut_as_slice)]
/// let mut slice: &mut [usize] = &mut [1, 2, 3];
///
/// // First, we get the iterator:
/// let mut iter = slice.iter_mut();
/// // So if we check what the `as_slice` method returns here, we have "[1, 2, 3]":
/// assert_eq!(iter.as_slice(), &[1, 2, 3]);
///
/// // Next, we move to the second element of the slice:
/// iter.next();
/// // Now `as_slice` returns "[2, 3]":
/// assert_eq!(iter.as_slice(), &[2, 3]);
/// ```
#[unstable(feature = "slice_iter_mut_as_slice", reason = "recently added", issue = "58957")]
pub fn as_slice(&self) -> &[T] {
self.make_slice()
}
}
iterator! {struct IterMut -> *mut T, &'a mut T, mut, {mut}, {}}
/// An internal abstraction over the splitting iterators, so that
/// splitn, splitn_mut etc can be implemented once.
#[doc(hidden)]
trait SplitIter: DoubleEndedIterator {
/// Marks the underlying iterator as complete, extracting the remaining
/// portion of the slice.
fn finish(&mut self) -> Option<Self::Item>;
}
/// An iterator over subslices separated by elements that match a predicate
/// function.
///
/// This struct is created by the [`split`] method on [slices].
///
/// [`split`]: ../../std/primitive.slice.html#method.split
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Split<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
v: &'a [T],
pred: P,
finished: bool,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for Split<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Split").field("v", &self.v).field("finished", &self.finished).finish()
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, P> Clone for Split<'_, T, P>
where
P: Clone + FnMut(&T) -> bool,
{
fn clone(&self) -> Self {
Split { v: self.v, pred: self.pred.clone(), finished: self.finished }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> Iterator for Split<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.finished {
return None;
}
match self.v.iter().position(|x| (self.pred)(x)) {
None => self.finish(),
Some(idx) => {
let ret = Some(&self.v[..idx]);
self.v = &self.v[idx + 1..];
ret
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.finished { (0, Some(0)) } else { (1, Some(self.v.len() + 1)) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> DoubleEndedIterator for Split<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.finished {
return None;
}
match self.v.iter().rposition(|x| (self.pred)(x)) {
None => self.finish(),
Some(idx) => {
let ret = Some(&self.v[idx + 1..]);
self.v = &self.v[..idx];
ret
}
}
}
}
impl<'a, T, P> SplitIter for Split<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn finish(&mut self) -> Option<&'a [T]> {
if self.finished {
None
} else {
self.finished = true;
Some(self.v)
}
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<T, P> FusedIterator for Split<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over subslices separated by elements that match a predicate
/// function. Unlike `Split`, it contains the matched part as a terminator
/// of the subslice.
///
/// This struct is created by the [`split_inclusive`] method on [slices].
///
/// [`split_inclusive`]: ../../std/primitive.slice.html#method.split_inclusive
/// [slices]: ../../std/primitive.slice.html
#[unstable(feature = "split_inclusive", issue = "72360")]
pub struct SplitInclusive<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
v: &'a [T],
pred: P,
finished: bool,
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T: fmt::Debug, P> fmt::Debug for SplitInclusive<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitInclusive")
.field("v", &self.v)
.field("finished", &self.finished)
.finish()
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T, P> Clone for SplitInclusive<'_, T, P>
where
P: Clone + FnMut(&T) -> bool,
{
fn clone(&self) -> Self {
SplitInclusive { v: self.v, pred: self.pred.clone(), finished: self.finished }
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<'a, T, P> Iterator for SplitInclusive<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.finished {
return None;
}
let idx =
self.v.iter().position(|x| (self.pred)(x)).map(|idx| idx + 1).unwrap_or(self.v.len());
if idx == self.v.len() {
self.finished = true;
}
let ret = Some(&self.v[..idx]);
self.v = &self.v[idx..];
ret
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.finished { (0, Some(0)) } else { (1, Some(self.v.len() + 1)) }
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<'a, T, P> DoubleEndedIterator for SplitInclusive<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.finished {
return None;
}
// The last index of self.v is already checked and found to match
// by the last iteration, so we start searching a new match
// one index to the left.
let remainder = if self.v.is_empty() { &[] } else { &self.v[..(self.v.len() - 1)] };
let idx = remainder.iter().rposition(|x| (self.pred)(x)).map(|idx| idx + 1).unwrap_or(0);
if idx == 0 {
self.finished = true;
}
let ret = Some(&self.v[idx..]);
self.v = &self.v[..idx];
ret
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T, P> FusedIterator for SplitInclusive<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over the mutable subslices of the vector which are separated
/// by elements that match `pred`.
///
/// This struct is created by the [`split_mut`] method on [slices].
///
/// [`split_mut`]: ../../std/primitive.slice.html#method.split_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SplitMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
v: &'a mut [T],
pred: P,
finished: bool,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for SplitMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitMut").field("v", &self.v).field("finished", &self.finished).finish()
}
}
impl<'a, T, P> SplitIter for SplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn finish(&mut self) -> Option<&'a mut [T]> {
if self.finished {
None
} else {
self.finished = true;
Some(mem::replace(&mut self.v, &mut []))
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> Iterator for SplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.finished {
return None;
}
let idx_opt = {
// work around borrowck limitations
let pred = &mut self.pred;
self.v.iter().position(|x| (*pred)(x))
};
match idx_opt {
None => self.finish(),
Some(idx) => {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = &mut tail[1..];
Some(head)
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.finished {
(0, Some(0))
} else {
// if the predicate doesn't match anything, we yield one slice
// if it matches every element, we yield len+1 empty slices.
(1, Some(self.v.len() + 1))
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> DoubleEndedIterator for SplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.finished {
return None;
}
let idx_opt = {
// work around borrowck limitations
let pred = &mut self.pred;
self.v.iter().rposition(|x| (*pred)(x))
};
match idx_opt {
None => self.finish(),
Some(idx) => {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = head;
Some(&mut tail[1..])
}
}
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<T, P> FusedIterator for SplitMut<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over the mutable subslices of the vector which are separated
/// by elements that match `pred`. Unlike `SplitMut`, it contains the matched
/// parts in the ends of the subslices.
///
/// This struct is created by the [`split_inclusive_mut`] method on [slices].
///
/// [`split_inclusive_mut`]: ../../std/primitive.slice.html#method.split_inclusive_mut
/// [slices]: ../../std/primitive.slice.html
#[unstable(feature = "split_inclusive", issue = "72360")]
pub struct SplitInclusiveMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
v: &'a mut [T],
pred: P,
finished: bool,
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T: fmt::Debug, P> fmt::Debug for SplitInclusiveMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitInclusiveMut")
.field("v", &self.v)
.field("finished", &self.finished)
.finish()
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<'a, T, P> Iterator for SplitInclusiveMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.finished {
return None;
}
let idx_opt = {
// work around borrowck limitations
let pred = &mut self.pred;
self.v.iter().position(|x| (*pred)(x))
};
let idx = idx_opt.map(|idx| idx + 1).unwrap_or(self.v.len());
if idx == self.v.len() {
self.finished = true;
}
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = tail;
Some(head)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.finished {
(0, Some(0))
} else {
// if the predicate doesn't match anything, we yield one slice
// if it matches every element, we yield len+1 empty slices.
(1, Some(self.v.len() + 1))
}
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<'a, T, P> DoubleEndedIterator for SplitInclusiveMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.finished {
return None;
}
let idx_opt = if self.v.is_empty() {
None
} else {
// work around borrowck limitations
let pred = &mut self.pred;
// The last index of self.v is already checked and found to match
// by the last iteration, so we start searching a new match
// one index to the left.
let remainder = &self.v[..(self.v.len() - 1)];
remainder.iter().rposition(|x| (*pred)(x))
};
let idx = idx_opt.map(|idx| idx + 1).unwrap_or(0);
if idx == 0 {
self.finished = true;
}
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = head;
Some(tail)
}
}
#[unstable(feature = "split_inclusive", issue = "72360")]
impl<T, P> FusedIterator for SplitInclusiveMut<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over subslices separated by elements that match a predicate
/// function, starting from the end of the slice.
///
/// This struct is created by the [`rsplit`] method on [slices].
///
/// [`rsplit`]: ../../std/primitive.slice.html#method.rsplit
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "slice_rsplit", since = "1.27.0")]
#[derive(Clone)] // Is this correct, or does it incorrectly require `T: Clone`?
pub struct RSplit<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: Split<'a, T, P>,
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<T: fmt::Debug, P> fmt::Debug for RSplit<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RSplit")
.field("v", &self.inner.v)
.field("finished", &self.inner.finished)
.finish()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> Iterator for RSplit<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
self.inner.next_back()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> DoubleEndedIterator for RSplit<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
self.inner.next()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> SplitIter for RSplit<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn finish(&mut self) -> Option<&'a [T]> {
self.inner.finish()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<T, P> FusedIterator for RSplit<'_, T, P> where P: FnMut(&T) -> bool {}
/// An iterator over the subslices of the vector which are separated
/// by elements that match `pred`, starting from the end of the slice.
///
/// This struct is created by the [`rsplit_mut`] method on [slices].
///
/// [`rsplit_mut`]: ../../std/primitive.slice.html#method.rsplit_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "slice_rsplit", since = "1.27.0")]
pub struct RSplitMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: SplitMut<'a, T, P>,
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<T: fmt::Debug, P> fmt::Debug for RSplitMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RSplitMut")
.field("v", &self.inner.v)
.field("finished", &self.inner.finished)
.finish()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> SplitIter for RSplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn finish(&mut self) -> Option<&'a mut [T]> {
self.inner.finish()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> Iterator for RSplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
self.inner.next_back()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<'a, T, P> DoubleEndedIterator for RSplitMut<'a, T, P>
where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
self.inner.next()
}
}
#[stable(feature = "slice_rsplit", since = "1.27.0")]
impl<T, P> FusedIterator for RSplitMut<'_, T, P> where P: FnMut(&T) -> bool {}
/// An private iterator over subslices separated by elements that
/// match a predicate function, splitting at most a fixed number of
/// times.
#[derive(Debug)]
struct GenericSplitN<I> {
iter: I,
count: usize,
}
impl<T, I: SplitIter<Item = T>> Iterator for GenericSplitN<I> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
match self.count {
0 => None,
1 => {
self.count -= 1;
self.iter.finish()
}
_ => {
self.count -= 1;
self.iter.next()
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let (lower, upper_opt) = self.iter.size_hint();
(lower, upper_opt.map(|upper| cmp::min(self.count, upper)))
}
}
/// An iterator over subslices separated by elements that match a predicate
/// function, limited to a given number of splits.
///
/// This struct is created by the [`splitn`] method on [slices].
///
/// [`splitn`]: ../../std/primitive.slice.html#method.splitn
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SplitN<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: GenericSplitN<Split<'a, T, P>>,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for SplitN<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitN").field("inner", &self.inner).finish()
}
}
/// An iterator over subslices separated by elements that match a
/// predicate function, limited to a given number of splits, starting
/// from the end of the slice.
///
/// This struct is created by the [`rsplitn`] method on [slices].
///
/// [`rsplitn`]: ../../std/primitive.slice.html#method.rsplitn
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct RSplitN<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: GenericSplitN<RSplit<'a, T, P>>,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for RSplitN<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RSplitN").field("inner", &self.inner).finish()
}
}
/// An iterator over subslices separated by elements that match a predicate
/// function, limited to a given number of splits.
///
/// This struct is created by the [`splitn_mut`] method on [slices].
///
/// [`splitn_mut`]: ../../std/primitive.slice.html#method.splitn_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SplitNMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: GenericSplitN<SplitMut<'a, T, P>>,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for SplitNMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitNMut").field("inner", &self.inner).finish()
}
}
/// An iterator over subslices separated by elements that match a
/// predicate function, limited to a given number of splits, starting
/// from the end of the slice.
///
/// This struct is created by the [`rsplitn_mut`] method on [slices].
///
/// [`rsplitn_mut`]: ../../std/primitive.slice.html#method.rsplitn_mut
/// [slices]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct RSplitNMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
{
inner: GenericSplitN<RSplitMut<'a, T, P>>,
}
#[stable(feature = "core_impl_debug", since = "1.9.0")]
impl<T: fmt::Debug, P> fmt::Debug for RSplitNMut<'_, T, P>
where
P: FnMut(&T) -> bool,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RSplitNMut").field("inner", &self.inner).finish()
}
}
macro_rules! forward_iterator {
($name:ident: $elem:ident, $iter_of:ty) => {
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, $elem, P> Iterator for $name<'a, $elem, P>
where
P: FnMut(&T) -> bool,
{
type Item = $iter_of;
#[inline]
fn next(&mut self) -> Option<$iter_of> {
self.inner.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, $elem, P> FusedIterator for $name<'a, $elem, P> where P: FnMut(&T) -> bool {}
};
}
forward_iterator! { SplitN: T, &'a [T] }
forward_iterator! { RSplitN: T, &'a [T] }
forward_iterator! { SplitNMut: T, &'a mut [T] }
forward_iterator! { RSplitNMut: T, &'a mut [T] }
/// An iterator over overlapping subslices of length `size`.
///
/// This struct is created by the [`windows`] method on [slices].
///
/// [`windows`]: ../../std/primitive.slice.html#method.windows
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Windows<'a, T: 'a> {
v: &'a [T],
size: usize,
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Windows<'_, T> {
fn clone(&self) -> Self {
Windows { v: self.v, size: self.size }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for Windows<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.size > self.v.len() {
None
} else {
let ret = Some(&self.v[..self.size]);
self.v = &self.v[1..];
ret
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.size > self.v.len() {
(0, Some(0))
} else {
let size = self.v.len() - self.size + 1;
(size, Some(size))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (end, overflow) = self.size.overflowing_add(n);
if end > self.v.len() || overflow {
self.v = &[];
None
} else {
let nth = &self.v[n..end];
self.v = &self.v[n + 1..];
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.size > self.v.len() {
None
} else {
let start = self.v.len() - self.size;
Some(&self.v[start..])
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for Windows<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.size > self.v.len() {
None
} else {
let ret = Some(&self.v[self.v.len() - self.size..]);
self.v = &self.v[..self.v.len() - 1];
ret
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let (end, overflow) = self.v.len().overflowing_sub(n);
if end < self.size || overflow {
self.v = &[];
None
} else {
let ret = &self.v[end - self.size..end];
self.v = &self.v[..end - 1];
Some(ret)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for Windows<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for Windows<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for Windows<'_, T> {}
#[doc(hidden)]
unsafe impl<'a, T> TrustedRandomAccess for Windows<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
// SAFETY: since the caller guarantees that `i` is in bounds,
// which means that `i` cannot overflow an `isize`, and the
// slice created by `from_raw_parts` is a subslice of `self.v`
// thus is guaranteed to be valid for the lifetime `'a` of `self.v`.
unsafe { from_raw_parts(self.v.as_ptr().add(i), self.size) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
/// time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last slice
/// of the iteration will be the remainder.
///
/// This struct is created by the [`chunks`] method on [slices].
///
/// [`chunks`]: ../../std/primitive.slice.html#method.chunks
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Chunks<'a, T: 'a> {
v: &'a [T],
chunk_size: usize,
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Chunks<'_, T> {
fn clone(&self) -> Self {
Chunks { v: self.v, chunk_size: self.chunk_size }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for Chunks<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.v.is_empty() {
None
} else {
let chunksz = cmp::min(self.v.len(), self.chunk_size);
let (fst, snd) = self.v.split_at(chunksz);
self.v = snd;
Some(fst)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &[];
None
} else {
let end = match start.checked_add(self.chunk_size) {
Some(sum) => cmp::min(self.v.len(), sum),
None => self.v.len(),
};
let nth = &self.v[start..end];
self.v = &self.v[end..];
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.v.is_empty() {
None
} else {
let start = (self.v.len() - 1) / self.chunk_size * self.chunk_size;
Some(&self.v[start..])
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for Chunks<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.v.is_empty() {
None
} else {
let remainder = self.v.len() % self.chunk_size;
let chunksz = if remainder != 0 { remainder } else { self.chunk_size };
let (fst, snd) = self.v.split_at(self.v.len() - chunksz);
self.v = fst;
Some(snd)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &[];
None
} else {
let start = (len - 1 - n) * self.chunk_size;
let end = match start.checked_add(self.chunk_size) {
Some(res) => cmp::min(res, self.v.len()),
None => self.v.len(),
};
let nth_back = &self.v[start..end];
self.v = &self.v[..start];
Some(nth_back)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for Chunks<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for Chunks<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for Chunks<'_, T> {}
#[doc(hidden)]
unsafe impl<'a, T> TrustedRandomAccess for Chunks<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
let start = i * self.chunk_size;
let end = match start.checked_add(self.chunk_size) {
None => self.v.len(),
Some(end) => cmp::min(end, self.v.len()),
};
// SAFETY: the caller guarantees that `i` is in bounds,
// which means that `start` must be in bounds of the
// underlying `self.v` slice, and we made sure that `end`
// is also in bounds of `self.v`. Thus, `start` cannot overflow
// an `isize`, and the slice constructed by `from_raw_parts`
// is a subslice of `self.v` which is guaranteed to be valid
// for the lifetime `'a` of `self.v`.
unsafe { from_raw_parts(self.v.as_ptr().add(start), end - start) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
/// elements at a time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last slice
/// of the iteration will be the remainder.
///
/// This struct is created by the [`chunks_mut`] method on [slices].
///
/// [`chunks_mut`]: ../../std/primitive.slice.html#method.chunks_mut
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct ChunksMut<'a, T: 'a> {
v: &'a mut [T],
chunk_size: usize,
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for ChunksMut<'a, T> {
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.is_empty() {
None
} else {
let sz = cmp::min(self.v.len(), self.chunk_size);
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(sz);
self.v = tail;
Some(head)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &mut [];
None
} else {
let end = match start.checked_add(self.chunk_size) {
Some(sum) => cmp::min(self.v.len(), sum),
None => self.v.len(),
};
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(end);
let (_, nth) = head.split_at_mut(start);
self.v = tail;
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.v.is_empty() {
None
} else {
let start = (self.v.len() - 1) / self.chunk_size * self.chunk_size;
Some(&mut self.v[start..])
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for ChunksMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.v.is_empty() {
None
} else {
let remainder = self.v.len() % self.chunk_size;
let sz = if remainder != 0 { remainder } else { self.chunk_size };
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (head, tail) = tmp.split_at_mut(tmp_len - sz);
self.v = head;
Some(tail)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &mut [];
None
} else {
let start = (len - 1 - n) * self.chunk_size;
let end = match start.checked_add(self.chunk_size) {
Some(res) => cmp::min(res, self.v.len()),
None => self.v.len(),
};
let (temp, _tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
let (head, nth_back) = temp.split_at_mut(start);
self.v = head;
Some(nth_back)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for ChunksMut<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for ChunksMut<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for ChunksMut<'_, T> {}
#[doc(hidden)]
unsafe impl<'a, T> TrustedRandomAccess for ChunksMut<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] {
let start = i * self.chunk_size;
let end = match start.checked_add(self.chunk_size) {
None => self.v.len(),
Some(end) => cmp::min(end, self.v.len()),
};
// SAFETY: see comments for `Chunks::get_unchecked`.
unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
/// time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last
/// up to `chunk_size-1` elements will be omitted but can be retrieved from
/// the [`remainder`] function from the iterator.
///
/// This struct is created by the [`chunks_exact`] method on [slices].
///
/// [`chunks_exact`]: ../../std/primitive.slice.html#method.chunks_exact
/// [`remainder`]: ../../std/slice/struct.ChunksExact.html#method.remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub struct ChunksExact<'a, T: 'a> {
v: &'a [T],
rem: &'a [T],
chunk_size: usize,
}
impl<'a, T> ChunksExact<'a, T> {
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub fn remainder(&self) -> &'a [T] {
self.rem
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> Clone for ChunksExact<'_, T> {
fn clone(&self) -> Self {
ChunksExact { v: self.v, rem: self.rem, chunk_size: self.chunk_size }
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<'a, T> Iterator for ChunksExact<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let (fst, snd) = self.v.split_at(self.chunk_size);
self.v = snd;
Some(fst)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &[];
None
} else {
let (_, snd) = self.v.split_at(start);
self.v = snd;
self.next()
}
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for ChunksExact<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let (fst, snd) = self.v.split_at(self.v.len() - self.chunk_size);
self.v = fst;
Some(snd)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &[];
None
} else {
let start = (len - 1 - n) * self.chunk_size;
let end = start + self.chunk_size;
let nth_back = &self.v[start..end];
self.v = &self.v[..start];
Some(nth_back)
}
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> ExactSizeIterator for ChunksExact<'_, T> {
fn is_empty(&self) -> bool {
self.v.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for ChunksExact<'_, T> {}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> FusedIterator for ChunksExact<'_, T> {}
#[doc(hidden)]
#[stable(feature = "chunks_exact", since = "1.31.0")]
unsafe impl<'a, T> TrustedRandomAccess for ChunksExact<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
let start = i * self.chunk_size;
// SAFETY: mostly identical to `Chunks::get_unchecked`.
unsafe { from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
/// elements at a time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last up to
/// `chunk_size-1` elements will be omitted but can be retrieved from the
/// [`into_remainder`] function from the iterator.
///
/// This struct is created by the [`chunks_exact_mut`] method on [slices].
///
/// [`chunks_exact_mut`]: ../../std/primitive.slice.html#method.chunks_exact_mut
/// [`into_remainder`]: ../../std/slice/struct.ChunksExactMut.html#method.into_remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub struct ChunksExactMut<'a, T: 'a> {
v: &'a mut [T],
rem: &'a mut [T],
chunk_size: usize,
}
impl<'a, T> ChunksExactMut<'a, T> {
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
#[stable(feature = "chunks_exact", since = "1.31.0")]
pub fn into_remainder(self) -> &'a mut [T] {
self.rem
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<'a, T> Iterator for ChunksExactMut<'a, T> {
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(self.chunk_size);
self.v = tail;
Some(head)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &mut [];
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let (_, snd) = tmp.split_at_mut(start);
self.v = snd;
self.next()
}
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for ChunksExactMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (head, tail) = tmp.split_at_mut(tmp_len - self.chunk_size);
self.v = head;
Some(tail)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &mut [];
None
} else {
let start = (len - 1 - n) * self.chunk_size;
let end = start + self.chunk_size;
let (temp, _tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
let (head, nth_back) = temp.split_at_mut(start);
self.v = head;
Some(nth_back)
}
}
}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> ExactSizeIterator for ChunksExactMut<'_, T> {
fn is_empty(&self) -> bool {
self.v.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for ChunksExactMut<'_, T> {}
#[stable(feature = "chunks_exact", since = "1.31.0")]
impl<T> FusedIterator for ChunksExactMut<'_, T> {}
#[doc(hidden)]
#[stable(feature = "chunks_exact", since = "1.31.0")]
unsafe impl<'a, T> TrustedRandomAccess for ChunksExactMut<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] {
let start = i * self.chunk_size;
// SAFETY: see comments for `ChunksExactMut::get_unchecked`.
unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) chunks (`N` elements at a
/// time), starting at the beginning of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last
/// up to `chunk_size-1` elements will be omitted but can be retrieved from
/// the [`remainder`] function from the iterator.
///
/// This struct is created by the [`array_chunks`] method on [slices].
///
/// [`array_chunks`]: ../../std/primitive.slice.html#method.array_chunks
/// [`remainder`]: ../../std/slice/struct.ArrayChunks.html#method.remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[unstable(feature = "array_chunks", issue = "74985")]
pub struct ArrayChunks<'a, T: 'a, const N: usize> {
iter: Iter<'a, [T; N]>,
rem: &'a [T],
}
impl<'a, T, const N: usize> ArrayChunks<'a, T, N> {
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
#[unstable(feature = "array_chunks", issue = "74985")]
pub fn remainder(&self) -> &'a [T] {
self.rem
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[unstable(feature = "array_chunks", issue = "74985")]
impl<T, const N: usize> Clone for ArrayChunks<'_, T, N> {
fn clone(&self) -> Self {
ArrayChunks { iter: self.iter.clone(), rem: self.rem }
}
}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<'a, T, const N: usize> Iterator for ArrayChunks<'a, T, N> {
type Item = &'a [T; N];
#[inline]
fn next(&mut self) -> Option<&'a [T; N]> {
self.iter.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
#[inline]
fn count(self) -> usize {
self.iter.count()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.iter.nth(n)
}
#[inline]
fn last(self) -> Option<Self::Item> {
self.iter.last()
}
}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<'a, T, const N: usize> DoubleEndedIterator for ArrayChunks<'a, T, N> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T; N]> {
self.iter.next_back()
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
self.iter.nth_back(n)
}
}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<T, const N: usize> ExactSizeIterator for ArrayChunks<'_, T, N> {
fn is_empty(&self) -> bool {
self.iter.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, const N: usize> TrustedLen for ArrayChunks<'_, T, N> {}
#[unstable(feature = "array_chunks", issue = "74985")]
impl<T, const N: usize> FusedIterator for ArrayChunks<'_, T, N> {}
#[doc(hidden)]
#[unstable(feature = "array_chunks", issue = "74985")]
unsafe impl<'a, T, const N: usize> TrustedRandomAccess for ArrayChunks<'a, T, N> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T; N] {
unsafe { self.iter.get_unchecked(i) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
/// time), starting at the end of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last slice
/// of the iteration will be the remainder.
///
/// This struct is created by the [`rchunks`] method on [slices].
///
/// [`rchunks`]: ../../std/primitive.slice.html#method.rchunks
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
pub struct RChunks<'a, T: 'a> {
v: &'a [T],
chunk_size: usize,
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> Clone for RChunks<'_, T> {
fn clone(&self) -> Self {
RChunks { v: self.v, chunk_size: self.chunk_size }
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Iterator for RChunks<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.v.is_empty() {
None
} else {
let chunksz = cmp::min(self.v.len(), self.chunk_size);
let (fst, snd) = self.v.split_at(self.v.len() - chunksz);
self.v = fst;
Some(snd)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (end, overflow) = n.overflowing_mul(self.chunk_size);
if end >= self.v.len() || overflow {
self.v = &[];
None
} else {
// Can't underflow because of the check above
let end = self.v.len() - end;
let start = match end.checked_sub(self.chunk_size) {
Some(sum) => sum,
None => 0,
};
let nth = &self.v[start..end];
self.v = &self.v[0..start];
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.v.is_empty() {
None
} else {
let rem = self.v.len() % self.chunk_size;
let end = if rem == 0 { self.chunk_size } else { rem };
Some(&self.v[0..end])
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for RChunks<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.v.is_empty() {
None
} else {
let remainder = self.v.len() % self.chunk_size;
let chunksz = if remainder != 0 { remainder } else { self.chunk_size };
let (fst, snd) = self.v.split_at(chunksz);
self.v = snd;
Some(fst)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &[];
None
} else {
// can't underflow because `n < len`
let offset_from_end = (len - 1 - n) * self.chunk_size;
let end = self.v.len() - offset_from_end;
let start = end.saturating_sub(self.chunk_size);
let nth_back = &self.v[start..end];
self.v = &self.v[end..];
Some(nth_back)
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> ExactSizeIterator for RChunks<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for RChunks<'_, T> {}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> FusedIterator for RChunks<'_, T> {}
#[doc(hidden)]
#[stable(feature = "rchunks", since = "1.31.0")]
unsafe impl<'a, T> TrustedRandomAccess for RChunks<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
let end = self.v.len() - i * self.chunk_size;
let start = match end.checked_sub(self.chunk_size) {
None => 0,
Some(start) => start,
};
// SAFETY: mostly identical to `Chunks::get_unchecked`.
unsafe { from_raw_parts(self.v.as_ptr().add(start), end - start) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
/// elements at a time), starting at the end of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last slice
/// of the iteration will be the remainder.
///
/// This struct is created by the [`rchunks_mut`] method on [slices].
///
/// [`rchunks_mut`]: ../../std/primitive.slice.html#method.rchunks_mut
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
pub struct RChunksMut<'a, T: 'a> {
v: &'a mut [T],
chunk_size: usize,
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Iterator for RChunksMut<'a, T> {
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.is_empty() {
None
} else {
let sz = cmp::min(self.v.len(), self.chunk_size);
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (head, tail) = tmp.split_at_mut(tmp_len - sz);
self.v = head;
Some(tail)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
let (end, overflow) = n.overflowing_mul(self.chunk_size);
if end >= self.v.len() || overflow {
self.v = &mut [];
None
} else {
// Can't underflow because of the check above
let end = self.v.len() - end;
let start = match end.checked_sub(self.chunk_size) {
Some(sum) => sum,
None => 0,
};
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(start);
let (nth, _) = tail.split_at_mut(end - start);
self.v = head;
Some(nth)
}
}
#[inline]
fn last(self) -> Option<Self::Item> {
if self.v.is_empty() {
None
} else {
let rem = self.v.len() % self.chunk_size;
let end = if rem == 0 { self.chunk_size } else { rem };
Some(&mut self.v[0..end])
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for RChunksMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.v.is_empty() {
None
} else {
let remainder = self.v.len() % self.chunk_size;
let sz = if remainder != 0 { remainder } else { self.chunk_size };
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(sz);
self.v = tail;
Some(head)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &mut [];
None
} else {
// can't underflow because `n < len`
let offset_from_end = (len - 1 - n) * self.chunk_size;
let end = self.v.len() - offset_from_end;
let start = end.saturating_sub(self.chunk_size);
let (tmp, tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
let (_, nth_back) = tmp.split_at_mut(start);
self.v = tail;
Some(nth_back)
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> ExactSizeIterator for RChunksMut<'_, T> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for RChunksMut<'_, T> {}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> FusedIterator for RChunksMut<'_, T> {}
#[doc(hidden)]
#[stable(feature = "rchunks", since = "1.31.0")]
unsafe impl<'a, T> TrustedRandomAccess for RChunksMut<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] {
let end = self.v.len() - i * self.chunk_size;
let start = match end.checked_sub(self.chunk_size) {
None => 0,
Some(start) => start,
};
// SAFETY: see comments for `RChunks::get_unchecked`.
unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
/// time), starting at the end of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last
/// up to `chunk_size-1` elements will be omitted but can be retrieved from
/// the [`remainder`] function from the iterator.
///
/// This struct is created by the [`rchunks_exact`] method on [slices].
///
/// [`rchunks_exact`]: ../../std/primitive.slice.html#method.rchunks_exact
/// [`remainder`]: ../../std/slice/struct.ChunksExact.html#method.remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
pub struct RChunksExact<'a, T: 'a> {
v: &'a [T],
rem: &'a [T],
chunk_size: usize,
}
impl<'a, T> RChunksExact<'a, T> {
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
#[stable(feature = "rchunks", since = "1.31.0")]
pub fn remainder(&self) -> &'a [T] {
self.rem
}
}
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Clone for RChunksExact<'a, T> {
fn clone(&self) -> RChunksExact<'a, T> {
RChunksExact { v: self.v, rem: self.rem, chunk_size: self.chunk_size }
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Iterator for RChunksExact<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let (fst, snd) = self.v.split_at(self.v.len() - self.chunk_size);
self.v = fst;
Some(snd)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (end, overflow) = n.overflowing_mul(self.chunk_size);
if end >= self.v.len() || overflow {
self.v = &[];
None
} else {
let (fst, _) = self.v.split_at(self.v.len() - end);
self.v = fst;
self.next()
}
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for RChunksExact<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let (fst, snd) = self.v.split_at(self.chunk_size);
self.v = snd;
Some(fst)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &[];
None
} else {
// now that we know that `n` corresponds to a chunk,
// none of these operations can underflow/overflow
let offset = (len - n) * self.chunk_size;
let start = self.v.len() - offset;
let end = start + self.chunk_size;
let nth_back = &self.v[start..end];
self.v = &self.v[end..];
Some(nth_back)
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> ExactSizeIterator for RChunksExact<'a, T> {
fn is_empty(&self) -> bool {
self.v.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for RChunksExact<'_, T> {}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> FusedIterator for RChunksExact<'_, T> {}
#[doc(hidden)]
#[stable(feature = "rchunks", since = "1.31.0")]
unsafe impl<'a, T> TrustedRandomAccess for RChunksExact<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
let end = self.v.len() - i * self.chunk_size;
let start = end - self.chunk_size;
// SAFETY: mostmy identical to `Chunks::get_unchecked`.
unsafe { from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) }
}
fn may_have_side_effect() -> bool {
false
}
}
/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
/// elements at a time), starting at the end of the slice.
///
/// When the slice len is not evenly divided by the chunk size, the last up to
/// `chunk_size-1` elements will be omitted but can be retrieved from the
/// [`into_remainder`] function from the iterator.
///
/// This struct is created by the [`rchunks_exact_mut`] method on [slices].
///
/// [`rchunks_exact_mut`]: ../../std/primitive.slice.html#method.rchunks_exact_mut
/// [`into_remainder`]: ../../std/slice/struct.ChunksExactMut.html#method.into_remainder
/// [slices]: ../../std/primitive.slice.html
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
pub struct RChunksExactMut<'a, T: 'a> {
v: &'a mut [T],
rem: &'a mut [T],
chunk_size: usize,
}
impl<'a, T> RChunksExactMut<'a, T> {
/// Returns the remainder of the original slice that is not going to be
/// returned by the iterator. The returned slice has at most `chunk_size-1`
/// elements.
#[stable(feature = "rchunks", since = "1.31.0")]
pub fn into_remainder(self) -> &'a mut [T] {
self.rem
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> Iterator for RChunksExactMut<'a, T> {
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (head, tail) = tmp.split_at_mut(tmp_len - self.chunk_size);
self.v = head;
Some(tail)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
let (end, overflow) = n.overflowing_mul(self.chunk_size);
if end >= self.v.len() || overflow {
self.v = &mut [];
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (fst, _) = tmp.split_at_mut(tmp_len - end);
self.v = fst;
self.next()
}
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<'a, T> DoubleEndedIterator for RChunksExactMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(self.chunk_size);
self.v = tail;
Some(head)
}
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n >= len {
self.v = &mut [];
None
} else {
// now that we know that `n` corresponds to a chunk,
// none of these operations can underflow/overflow
let offset = (len - n) * self.chunk_size;
let start = self.v.len() - offset;
let end = start + self.chunk_size;
let (tmp, tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
let (_, nth_back) = tmp.split_at_mut(start);
self.v = tail;
Some(nth_back)
}
}
}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> ExactSizeIterator for RChunksExactMut<'_, T> {
fn is_empty(&self) -> bool {
self.v.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T> TrustedLen for RChunksExactMut<'_, T> {}
#[stable(feature = "rchunks", since = "1.31.0")]
impl<T> FusedIterator for RChunksExactMut<'_, T> {}
#[doc(hidden)]
#[stable(feature = "rchunks", since = "1.31.0")]
unsafe impl<'a, T> TrustedRandomAccess for RChunksExactMut<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] {
let end = self.v.len() - i * self.chunk_size;
let start = end - self.chunk_size;
// SAFETY: see comments for `RChunksExact::get_unchecked`.
unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) }
}
fn may_have_side_effect() -> bool {
false
}
}
//
// Free functions
//
/// Forms a slice from a pointer and a length.
///
/// The `len` argument is the number of **elements**, not the number of bytes.
///
/// # Safety
///
/// Behavior is undefined if any of the following conditions are violated:
///
/// * `data` must be [valid] for reads for `len * mem::size_of::<T>()` many bytes,
/// and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single allocated object!
/// Slices can never span across multiple allocated objects. See [below](#incorrect-usage)
/// for an example incorrectly not taking this into account.
/// * `data` must be non-null and aligned even for zero-length slices. One
/// reason for this is that enum layout optimizations may rely on references
/// (including slices of any length) being aligned and non-null to distinguish
/// them from other data. You can obtain a pointer that is usable as `data`
/// for zero-length slices using [`NonNull::dangling()`].
///
/// * The memory referenced by the returned slice must not be mutated for the duration
/// of lifetime `'a`, except inside an `UnsafeCell`.
///
/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
/// See the safety documentation of [`pointer::offset`].
///
/// # Caveat
///
/// The lifetime for the returned slice is inferred from its usage. To
/// prevent accidental misuse, it's suggested to tie the lifetime to whichever
/// source lifetime is safe in the context, such as by providing a helper
/// function taking the lifetime of a host value for the slice, or by explicit
/// annotation.
///
/// # Examples
///
/// ```
/// use std::slice;
///
/// // manifest a slice for a single element
/// let x = 42;
/// let ptr = &x as *const _;
/// let slice = unsafe { slice::from_raw_parts(ptr, 1) };
/// assert_eq!(slice[0], 42);
/// ```
///
/// ### Incorrect usage
///
/// The following `join_slices` function is **unsound** ⚠️
///
/// ```rust,no_run
/// use std::slice;
///
/// fn join_slices<'a, T>(fst: &'a [T], snd: &'a [T]) -> &'a [T] {
/// let fst_end = fst.as_ptr().wrapping_add(fst.len());
/// let snd_start = snd.as_ptr();
/// assert_eq!(fst_end, snd_start, "Slices must be contiguous!");
/// unsafe {
/// // The assertion above ensures `fst` and `snd` are contiguous, but they might
/// // still be contained within _different allocated objects_, in which case
/// // creating this slice is undefined behavior.
/// slice::from_raw_parts(fst.as_ptr(), fst.len() + snd.len())
/// }
/// }
///
/// fn main() {
/// // `a` and `b` are different allocated objects...
/// let a = 42;
/// let b = 27;
/// // ... which may nevertheless be laid out contiguously in memory: | a | b |
/// let _ = join_slices(slice::from_ref(&a), slice::from_ref(&b)); // UB
/// }
/// ```
///
/// [valid]: ../../std/ptr/index.html#safety
/// [`NonNull::dangling()`]: ../../std/ptr/struct.NonNull.html#method.dangling
/// [`pointer::offset`]: ../../std/primitive.pointer.html#method.offset
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] {
debug_assert!(is_aligned_and_not_null(data), "attempt to create unaligned or null slice");
debug_assert!(
mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize,
"attempt to create slice covering at least half the address space"
);
// SAFETY: the caller must uphold the safety contract for `from_raw_parts`.
unsafe { &*ptr::slice_from_raw_parts(data, len) }
}
/// Performs the same functionality as [`from_raw_parts`], except that a
/// mutable slice is returned.
///
/// # Safety
///
/// Behavior is undefined if any of the following conditions are violated:
///
/// * `data` must be [valid] for boths reads and writes for `len * mem::size_of::<T>()` many bytes,
/// and it must be properly aligned. This means in particular:
///
/// * The entire memory range of this slice must be contained within a single allocated object!
/// Slices can never span across multiple allocated objects.
/// * `data` must be non-null and aligned even for zero-length slices. One
/// reason for this is that enum layout optimizations may rely on references
/// (including slices of any length) being aligned and non-null to distinguish
/// them from other data. You can obtain a pointer that is usable as `data`
/// for zero-length slices using [`NonNull::dangling()`].
///
/// * The memory referenced by the returned slice must not be accessed through any other pointer
/// (not derived from the return value) for the duration of lifetime `'a`.
/// Both read and write accesses are forbidden.
///
/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
/// See the safety documentation of [`pointer::offset`].
///
/// [valid]: ../../std/ptr/index.html#safety
/// [`NonNull::dangling()`]: ../../std/ptr/struct.NonNull.html#method.dangling
/// [`pointer::offset`]: ../../std/primitive.pointer.html#method.offset
/// [`from_raw_parts`]: ../../std/slice/fn.from_raw_parts.html
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] {
debug_assert!(is_aligned_and_not_null(data), "attempt to create unaligned or null slice");
debug_assert!(
mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize,
"attempt to create slice covering at least half the address space"
);
// SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`.
unsafe { &mut *ptr::slice_from_raw_parts_mut(data, len) }
}
/// Converts a reference to T into a slice of length 1 (without copying).
#[stable(feature = "from_ref", since = "1.28.0")]
pub fn from_ref<T>(s: &T) -> &[T] {
unsafe { from_raw_parts(s, 1) }
}
/// Converts a reference to T into a slice of length 1 (without copying).
#[stable(feature = "from_ref", since = "1.28.0")]
pub fn from_mut<T>(s: &mut T) -> &mut [T] {
unsafe { from_raw_parts_mut(s, 1) }
}
// This function is public only because there is no other way to unit test heapsort.
#[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")]
#[doc(hidden)]
pub fn heapsort<T, F>(v: &mut [T], mut is_less: F)
where
F: FnMut(&T, &T) -> bool,
{
sort::heapsort(v, &mut is_less);
}
//
// Comparison traits
//
extern "C" {
/// Calls implementation provided memcmp.
///
/// Interprets the data as u8.
///
/// Returns 0 for equal, < 0 for less than and > 0 for greater
/// than.
// FIXME(#32610): Return type should be c_int
fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32;
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A, B> PartialEq<[B]> for [A]
where
A: PartialEq<B>,
{
fn eq(&self, other: &[B]) -> bool {
SlicePartialEq::equal(self, other)
}
fn ne(&self, other: &[B]) -> bool {
SlicePartialEq::not_equal(self, other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Eq> Eq for [T] {}
/// Implements comparison of vectors lexicographically.
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Ord> Ord for [T] {
fn cmp(&self, other: &[T]) -> Ordering {
SliceOrd::compare(self, other)
}
}
/// Implements comparison of vectors lexicographically.
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialOrd> PartialOrd for [T] {
fn partial_cmp(&self, other: &[T]) -> Option<Ordering> {
SlicePartialOrd::partial_compare(self, other)
}
}
#[doc(hidden)]
// intermediate trait for specialization of slice's PartialEq
trait SlicePartialEq<B> {
fn equal(&self, other: &[B]) -> bool;
fn not_equal(&self, other: &[B]) -> bool {
!self.equal(other)
}
}
// Generic slice equality
impl<A, B> SlicePartialEq<B> for [A]
where
A: PartialEq<B>,
{
default fn equal(&self, other: &[B]) -> bool {
if self.len() != other.len() {
return false;
}
self.iter().zip(other.iter()).all(|(x, y)| x == y)
}
}
// Use an equal-pointer optimization when types are `Eq`
impl<A> SlicePartialEq<A> for [A]
where
A: PartialEq<A> + Eq,
{
default fn equal(&self, other: &[A]) -> bool {
if self.len() != other.len() {
return false;
}
// While performance would suffer if `guaranteed_eq` just returned `false`
// for all arguments, correctness and return value of this function are not affected.
if self.as_ptr().guaranteed_eq(other.as_ptr()) {
return true;
}
self.iter().zip(other.iter()).all(|(x, y)| x == y)
}
}
// Use memcmp for bytewise equality when the types allow
impl<A> SlicePartialEq<A> for [A]
where
A: PartialEq<A> + BytewiseEquality,
{
fn equal(&self, other: &[A]) -> bool {
if self.len() != other.len() {
return false;
}
// While performance would suffer if `guaranteed_eq` just returned `false`
// for all arguments, correctness and return value of this function are not affected.
if self.as_ptr().guaranteed_eq(other.as_ptr()) {
return true;
}
unsafe {
let size = mem::size_of_val(self);
memcmp(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
}
}
}
#[doc(hidden)]
// intermediate trait for specialization of slice's PartialOrd
trait SlicePartialOrd: Sized {
fn partial_compare(left: &[Self], right: &[Self]) -> Option<Ordering>;
}
impl<A: PartialOrd> SlicePartialOrd for A {
default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
let l = cmp::min(left.len(), right.len());
// Slice to the loop iteration range to enable bound check
// elimination in the compiler
let lhs = &left[..l];
let rhs = &right[..l];
for i in 0..l {
match lhs[i].partial_cmp(&rhs[i]) {
Some(Ordering::Equal) => (),
non_eq => return non_eq,
}
}
left.len().partial_cmp(&right.len())
}
}
// This is the impl that we would like to have. Unfortunately it's not sound.
// See `partial_ord_slice.rs`.
/*
impl<A> SlicePartialOrd for A
where
A: Ord,
{
default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
Some(SliceOrd::compare(left, right))
}
}
*/
impl<A: AlwaysApplicableOrd> SlicePartialOrd for A {
fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
Some(SliceOrd::compare(left, right))
}
}
trait AlwaysApplicableOrd: SliceOrd + Ord {}
macro_rules! always_applicable_ord {
($([$($p:tt)*] $t:ty,)*) => {
$(impl<$($p)*> AlwaysApplicableOrd for $t {})*
}
}
always_applicable_ord! {
[] u8, [] u16, [] u32, [] u64, [] u128, [] usize,
[] i8, [] i16, [] i32, [] i64, [] i128, [] isize,
[] bool, [] char,
[T: ?Sized] *const T, [T: ?Sized] *mut T,
[T: AlwaysApplicableOrd] &T,
[T: AlwaysApplicableOrd] &mut T,
[T: AlwaysApplicableOrd] Option<T>,
}
#[doc(hidden)]
// intermediate trait for specialization of slice's Ord
trait SliceOrd: Sized {
fn compare(left: &[Self], right: &[Self]) -> Ordering;
}
impl<A: Ord> SliceOrd for A {
default fn compare(left: &[Self], right: &[Self]) -> Ordering {
let l = cmp::min(left.len(), right.len());
// Slice to the loop iteration range to enable bound check
// elimination in the compiler
let lhs = &left[..l];
let rhs = &right[..l];
for i in 0..l {
match lhs[i].cmp(&rhs[i]) {
Ordering::Equal => (),
non_eq => return non_eq,
}
}
left.len().cmp(&right.len())
}
}
// memcmp compares a sequence of unsigned bytes lexicographically.
// this matches the order we want for [u8], but no others (not even [i8]).
impl SliceOrd for u8 {
#[inline]
fn compare(left: &[Self], right: &[Self]) -> Ordering {
let order =
unsafe { memcmp(left.as_ptr(), right.as_ptr(), cmp::min(left.len(), right.len())) };
if order == 0 {
left.len().cmp(&right.len())
} else if order < 0 {
Less
} else {
Greater
}
}
}
#[doc(hidden)]
/// Trait implemented for types that can be compared for equality using
/// their bytewise representation
trait BytewiseEquality: Eq + Copy {}
macro_rules! impl_marker_for {
($traitname:ident, $($ty:ty)*) => {
$(
impl $traitname for $ty { }
)*
}
}
impl_marker_for!(BytewiseEquality,
u8 i8 u16 i16 u32 i32 u64 i64 u128 i128 usize isize char bool);
#[doc(hidden)]
unsafe impl<'a, T> TrustedRandomAccess for Iter<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a T {
// SAFETY: the caller must guarantee that `i` is in bounds
// of the underlying slice, so `i` cannot overflow an `isize`,
// and the returned references is guaranteed to refer to an element
// of the slice and thus guaranteed to be valid.
unsafe { &*self.ptr.as_ptr().add(i) }
}
fn may_have_side_effect() -> bool {
false
}
}
#[doc(hidden)]
unsafe impl<'a, T> TrustedRandomAccess for IterMut<'a, T> {
unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut T {
// SAFETY: see comments for `Iter::get_unchecked`.
unsafe { &mut *self.ptr.as_ptr().add(i) }
}
fn may_have_side_effect() -> bool {
false
}
}
trait SliceContains: Sized {
fn slice_contains(&self, x: &[Self]) -> bool;
}
impl<T> SliceContains for T
where
T: PartialEq,
{
default fn slice_contains(&self, x: &[Self]) -> bool {
x.iter().any(|y| *y == *self)
}
}
impl SliceContains for u8 {
fn slice_contains(&self, x: &[Self]) -> bool {
memchr::memchr(*self, x).is_some()
}
}
impl SliceContains for i8 {
fn slice_contains(&self, x: &[Self]) -> bool {
let byte = *self as u8;
let bytes: &[u8] = unsafe { from_raw_parts(x.as_ptr() as *const u8, x.len()) };
memchr::memchr(byte, bytes).is_some()
}
}
|
use std::char;
use std::cell::Cell;
use std::fmt;
pub struct Lexer {
input: String,
position: Cell<usize>,
read_position: Cell<usize>,
ch: Cell<char>
}
impl Lexer {
pub fn new(input: String) -> Lexer {
let l = Lexer {
input: input,
position: Cell::new(0),
read_position: Cell::new(0),
ch: Cell::new(' ')
};
l.read_char();
return l;
}
pub fn read_char(&self) {
if self.read_position.get() >= self.input.len() {
self.ch.set('\0');
} else {
let c = self.input.chars().nth(self.read_position.get()).unwrap();
self.ch.set(c);
}
self.position.set(self.read_position.get());
self.read_position.set(self.read_position.get() + 1);
}
pub fn next_token(&self) -> Token {
self.skip_white_space();
let ch = self.ch.get();
self.get_token(ch)
}
pub fn get_token(&self, s: char) -> Token {
let token = match s {
'=' => {
if self.peek_char() == '=' {
self.read_char();
Token {
token_type: TokenType::Eq,
value: Some(Value::Str(String::from("=="))) // TODO: 文字列を連結するように修正する
}
} else {
Token {
token_type: TokenType::Assign,
value: Some(Value::Str(String::from("=")))
}
}
}
'+' => {
Token {
token_type: TokenType::Plus,
value: Some(Value::Str(String::from("+")))
}
}
'-' => {
Token {
token_type: TokenType::Minus,
value: Some(Value::Str(String::from("-")))
}
}
'!' => {
if self.peek_char() == '=' {
self.read_char();
Token {
token_type: TokenType::NotEq,
value: Some(Value::Str(String::from("!=")))
}
} else {
Token {
token_type: TokenType::Bang,
value: Some(Value::Str(String::from("!")))
}
}
}
'/' => {
Token {
token_type: TokenType::Slash,
value: Some(Value::Str(String::from("/")))
}
}
'*' => {
Token {
token_type: TokenType::Asterisk,
value: Some(Value::Str(String::from("*")))
}
}
'<' => {
Token {
token_type: TokenType::Lt,
value: Some(Value::Str(String::from("<")))
}
}
'>' => {
Token {
token_type: TokenType::Gt,
value: Some(Value::Str(String::from(">")))
}
}
';' => {
Token {
token_type: TokenType::Semicolon,
value: Some(Value::Str(String::from(";")))
}
}
'(' => {
Token {
token_type: TokenType::Lparen,
value: Some(Value::Str(String::from("(")))
}
}
')' => {
Token {
token_type: TokenType::Rparen,
value: Some(Value::Str(String::from(")")))
}
}
',' => {
Token {
token_type: TokenType::Comma,
value: Some(Value::Str(String::from(",")))
}
}
'{' => {
Token {
token_type: TokenType::LBRACE,
value: Some(Value::Str(String::from("{")))
}
}
'}' => {
Token {
token_type: TokenType::Rbrace,
value: Some(Value::Str(String::from("}")))
}
}
'\0' => {
Token {
token_type: TokenType::Eof,
value: Some(Value::Str(String::from("\0")))
}
}
c => {
if is_letter(c) {
let identifier = unsafe {self.read_identifier()};
Token {
token_type: unsafe {self.lookup_ident(&identifier)},
value: Some(Value::Str(identifier))
}
} else if is_digit(c) {
Token {
token_type: TokenType::Int,
value: Some(Value::Str(unsafe { self.read_number()}))
}
} else {
Token {
token_type: TokenType::Illegal,
value: None
}
}
}
};
self.read_char();
return token;
}
pub unsafe fn read_identifier(&self) -> String {
let start_position = self.position.get();
while is_letter(self.ch.get()) {
self.read_char();
}
let end_position = self.position.get();
let s = self.input.slice_unchecked(start_position, end_position);
s.to_string()
}
pub unsafe fn lookup_ident(&self, s: &str) -> TokenType {
match s {
"fn" => TokenType::Function,
"let" => TokenType::Let,
"true" => TokenType::True,
"false" => TokenType::False,
"if" => TokenType::If,
"else" => TokenType::Else,
"return" => TokenType::Return,
_ => TokenType::Ident
}
}
pub unsafe fn read_number(&self) -> String {
let start_position = self.position.get();
while is_digit(self.ch.get()) {
self.read_char();
}
let end_position = self.position.get();
let s = self.input.slice_unchecked(start_position, end_position);
return s.to_string();
}
pub fn skip_white_space(&self) {
let ch = self.ch.get();
while ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' {
self.read_char();
}
}
pub fn peek_char(&self) -> char {
if self.read_position.get() >= self.input.len() {
return '0';
} else {
return self.input.chars().nth(self.read_position.get()).unwrap();
}
}
}
fn is_letter(ch: char) -> bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_';
}
fn is_digit(ch: char) -> bool {
return '0' <= ch && ch <= '9';
}
#[derive(Debug)]
pub enum Value {
Str(String),
}
#[derive(Debug, PartialEq)]
pub enum TokenType {
Illegal,
Eof,
Ident,
Int,
Assign,
Plus,
Minus,
Bang,
Asterisk,
Slash,
Eq,
NotEq,
Lt,
Gt,
Comma,
Semicolon,
Lparen,
Rparen,
LBRACE,
Rbrace,
Function,
Let,
True,
False,
If,
Else,
Return
}
#[derive(Debug)]
pub struct Token {
pub token_type: TokenType,
pub value: Option<Value>,
}
impl fmt::Display for Token {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({})", 1.0)
}
}
// Test
#[test]
fn peek_char() {
let input = String::from("!=");
let l = &Lexer::new(input);
assert_eq!('=', l.peek_char());
assert_eq!('=', l.peek_char());
}
#[test]
fn next_token_base() {
let input = String::from("=()");
let l = &Lexer::new(input);
assert_eq!(l.next_token().token_type, TokenType::Assign);
assert_eq!(l.next_token().token_type, TokenType::Lparen);
assert_eq!(l.next_token().token_type, TokenType::Rparen);
}
#[test]
fn test_is_digit() {
assert_eq!(is_digit('0'), true);
assert_eq!(is_digit('1'), true);
assert_eq!(is_digit('8'), true);
assert_eq!(is_digit('9'), true);
}
#[test]
fn test_is_letter() {
assert_eq!(is_letter('_'), true);
assert_eq!(is_letter('a'), true);
assert_eq!(is_letter('b'), true);
assert_eq!(is_letter('y'), true);
assert_eq!(is_letter('z'), true);
assert_eq!(is_letter('A'), true);
assert_eq!(is_letter('B'), true);
assert_eq!(is_letter('Y'), true);
assert_eq!(is_letter('Z'), true);
}
#[test]
fn next_token() {
let input = String::from("let five = 5;
let ten = 10;
let add = fn(x, y) {
x + y;
};
let result = add(five, ten);
!-/*5;
5 < 10 > 5;
if (5 < 10) {
return true;
} else {
return false;
}
10 == 10;
10 != 9;");
let l = &Lexer::new(input);
assert_eq!(l.next_token().token_type, TokenType::Let);
assert_eq!(l.next_token().token_type, TokenType::Ident);
assert_eq!(l.next_token().token_type, TokenType::Assign);
assert_eq!(l.next_token().token_type, TokenType::Int);
}
delete return statement
use std::char;
use std::cell::Cell;
use std::fmt;
pub struct Lexer {
input: String,
position: Cell<usize>,
read_position: Cell<usize>,
ch: Cell<char>
}
impl Lexer {
pub fn new(input: String) -> Lexer {
let l = Lexer {
input: input,
position: Cell::new(0),
read_position: Cell::new(0),
ch: Cell::new(' ')
};
l.read_char();
l
}
pub fn read_char(&self) {
if self.read_position.get() >= self.input.len() {
self.ch.set('\0');
} else {
let c = self.input.chars().nth(self.read_position.get()).unwrap();
self.ch.set(c);
}
self.position.set(self.read_position.get());
self.read_position.set(self.read_position.get() + 1);
}
pub fn next_token(&self) -> Token {
self.skip_white_space();
self.get_token(self.ch.get())
}
pub fn get_token(&self, s: char) -> Token {
let token = match s {
'=' => {
if self.peek_char() == '=' {
self.read_char();
Token {
token_type: TokenType::Eq,
value: Some(Value::Str(String::from("=="))) // TODO: 文字列を連結するように修正する
}
} else {
Token {
token_type: TokenType::Assign,
value: Some(Value::Str(String::from("=")))
}
}
}
'+' => {
Token {
token_type: TokenType::Plus,
value: Some(Value::Str(String::from("+")))
}
}
'-' => {
Token {
token_type: TokenType::Minus,
value: Some(Value::Str(String::from("-")))
}
}
'!' => {
if self.peek_char() == '=' {
self.read_char();
Token {
token_type: TokenType::NotEq,
value: Some(Value::Str(String::from("!=")))
}
} else {
Token {
token_type: TokenType::Bang,
value: Some(Value::Str(String::from("!")))
}
}
}
'/' => {
Token {
token_type: TokenType::Slash,
value: Some(Value::Str(String::from("/")))
}
}
'*' => {
Token {
token_type: TokenType::Asterisk,
value: Some(Value::Str(String::from("*")))
}
}
'<' => {
Token {
token_type: TokenType::Lt,
value: Some(Value::Str(String::from("<")))
}
}
'>' => {
Token {
token_type: TokenType::Gt,
value: Some(Value::Str(String::from(">")))
}
}
';' => {
Token {
token_type: TokenType::Semicolon,
value: Some(Value::Str(String::from(";")))
}
}
'(' => {
Token {
token_type: TokenType::Lparen,
value: Some(Value::Str(String::from("(")))
}
}
')' => {
Token {
token_type: TokenType::Rparen,
value: Some(Value::Str(String::from(")")))
}
}
',' => {
Token {
token_type: TokenType::Comma,
value: Some(Value::Str(String::from(",")))
}
}
'{' => {
Token {
token_type: TokenType::LBRACE,
value: Some(Value::Str(String::from("{")))
}
}
'}' => {
Token {
token_type: TokenType::Rbrace,
value: Some(Value::Str(String::from("}")))
}
}
'\0' => {
Token {
token_type: TokenType::Eof,
value: Some(Value::Str(String::from("\0")))
}
}
c => {
if is_letter(c) {
let identifier = unsafe {self.read_identifier()};
Token {
token_type: unsafe {self.lookup_ident(&identifier)},
value: Some(Value::Str(identifier))
}
} else if is_digit(c) {
Token {
token_type: TokenType::Int,
value: Some(Value::Str(unsafe { self.read_number()}))
}
} else {
Token {
token_type: TokenType::Illegal,
value: None
}
}
}
};
self.read_char();
token
}
pub unsafe fn read_identifier(&self) -> String {
let start_position = self.position.get();
while is_letter(self.ch.get()) {
self.read_char();
}
let end_position = self.position.get();
let s = self.input.slice_unchecked(start_position, end_position);
s.to_string()
}
pub unsafe fn lookup_ident(&self, s: &str) -> TokenType {
match s {
"fn" => TokenType::Function,
"let" => TokenType::Let,
"true" => TokenType::True,
"false" => TokenType::False,
"if" => TokenType::If,
"else" => TokenType::Else,
"return" => TokenType::Return,
_ => TokenType::Ident
}
}
pub unsafe fn read_number(&self) -> String {
let start_position = self.position.get();
while is_digit(self.ch.get()) {
self.read_char();
}
let end_position = self.position.get();
let s = self.input.slice_unchecked(start_position, end_position);
s.to_string()
}
pub fn skip_white_space(&self) {
let ch = self.ch.get();
while ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' {
self.read_char();
}
}
pub fn peek_char(&self) -> char {
if self.read_position.get() >= self.input.len() {
'0'
} else {
self.input.chars().nth(self.read_position.get()).unwrap()
}
}
}
fn is_letter(ch: char) -> bool {
'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_'
}
fn is_digit(ch: char) -> bool {
'0' <= ch && ch <= '9'
}
#[derive(Debug)]
pub enum Value {
Str(String),
}
#[derive(Debug, PartialEq)]
pub enum TokenType {
Illegal,
Eof,
Ident,
Int,
Assign,
Plus,
Minus,
Bang,
Asterisk,
Slash,
Eq,
NotEq,
Lt,
Gt,
Comma,
Semicolon,
Lparen,
Rparen,
LBRACE,
Rbrace,
Function,
Let,
True,
False,
If,
Else,
Return
}
#[derive(Debug)]
pub struct Token {
pub token_type: TokenType,
pub value: Option<Value>,
}
impl fmt::Display for Token {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({})", 1.0)
}
}
// Test
#[test]
fn peek_char() {
let input = String::from("!=");
let l = &Lexer::new(input);
assert_eq!('=', l.peek_char());
assert_eq!('=', l.peek_char());
}
#[test]
fn next_token_base() {
let input = String::from("=()");
let l = &Lexer::new(input);
assert_eq!(l.next_token().token_type, TokenType::Assign);
assert_eq!(l.next_token().token_type, TokenType::Lparen);
assert_eq!(l.next_token().token_type, TokenType::Rparen);
}
#[test]
fn test_is_digit() {
assert_eq!(is_digit('0'), true);
assert_eq!(is_digit('1'), true);
assert_eq!(is_digit('8'), true);
assert_eq!(is_digit('9'), true);
}
#[test]
fn test_is_letter() {
assert_eq!(is_letter('_'), true);
assert_eq!(is_letter('a'), true);
assert_eq!(is_letter('b'), true);
assert_eq!(is_letter('y'), true);
assert_eq!(is_letter('z'), true);
assert_eq!(is_letter('A'), true);
assert_eq!(is_letter('B'), true);
assert_eq!(is_letter('Y'), true);
assert_eq!(is_letter('Z'), true);
}
#[test]
fn next_token() {
let input = String::from("let five = 5;
let ten = 10;
let add = fn(x, y) {
x + y;
};
let result = add(five, ten);
!-/*5;
5 < 10 > 5;
if (5 < 10) {
return true;
} else {
return false;
}
10 == 10;
10 != 9;");
let l = &Lexer::new(input);
assert_eq!(l.next_token().token_type, TokenType::Let);
assert_eq!(l.next_token().token_type, TokenType::Ident);
assert_eq!(l.next_token().token_type, TokenType::Assign);
// assert_eq!(l.next_token().token_type, TokenType::Int);
}
|
// Copyright 2019 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::mem::drop;
use std::sync::Arc;
use super::error::*;
use super::usb_endpoint::UsbEndpoint;
use super::utils::{submit_transfer, update_transfer_state};
use crate::usb::xhci::scatter_gather_buffer::ScatterGatherBuffer;
use crate::usb::xhci::xhci_backend_device::{BackendType, UsbDeviceAddress, XhciBackendDevice};
use crate::usb::xhci::xhci_transfer::{XhciTransfer, XhciTransferState, XhciTransferType};
use crate::utils::AsyncJobQueue;
use crate::utils::FailHandle;
use data_model::DataInit;
use std::collections::HashMap;
use std::mem;
use sync::Mutex;
use sys_util::{error, warn};
use usb_util::{
ConfigDescriptorTree, ControlRequestDataPhaseTransferDirection, ControlRequestRecipient,
Device, StandardControlRequest, Transfer, TransferStatus, UsbRequestSetup,
};
#[derive(PartialEq)]
pub enum ControlEndpointState {
/// Control endpoint should receive setup stage next.
SetupStage,
/// Control endpoint should receive data stage next.
DataStage,
/// Control endpoint should receive status stage next.
StatusStage,
}
/// Host device is a device connected to host.
pub struct HostDevice {
fail_handle: Arc<dyn FailHandle>,
// Endpoints only contains data endpoints (1 to 30). Control transfers are handled at device
// level.
endpoints: Vec<UsbEndpoint>,
device: Arc<Mutex<Device>>,
ctl_ep_state: ControlEndpointState,
alt_settings: HashMap<u8, u8>,
claimed_interfaces: Vec<u8>,
control_request_setup: UsbRequestSetup,
executed: bool,
job_queue: Arc<AsyncJobQueue>,
}
impl Drop for HostDevice {
fn drop(&mut self) {
self.release_interfaces();
}
}
impl HostDevice {
/// Create a new host device.
pub fn new(
fail_handle: Arc<dyn FailHandle>,
job_queue: Arc<AsyncJobQueue>,
device: Arc<Mutex<Device>>,
) -> HostDevice {
HostDevice {
fail_handle,
endpoints: vec![],
device,
ctl_ep_state: ControlEndpointState::SetupStage,
alt_settings: HashMap::new(),
claimed_interfaces: vec![],
control_request_setup: UsbRequestSetup::new(0, 0, 0, 0, 0),
executed: false,
job_queue,
}
}
// Check for requests that should be intercepted and emulated using libusb
// functions rather than passed directly to the device.
// Returns true if the request has been intercepted or false if the request
// should be passed through to the device.
fn intercepted_control_transfer(&mut self, xhci_transfer: &XhciTransfer) -> Result<bool> {
let direction = self.control_request_setup.get_direction();
let recipient = self.control_request_setup.get_recipient();
let standard_request = self.control_request_setup.get_standard_request();
if direction != ControlRequestDataPhaseTransferDirection::HostToDevice {
// Only host to device requests are intercepted currently.
return Ok(false);
}
let status = match standard_request {
Some(StandardControlRequest::SetAddress) => {
if recipient != ControlRequestRecipient::Device {
return Ok(false);
}
usb_debug!("host device handling set address");
let addr = self.control_request_setup.value as u32;
self.set_address(addr);
TransferStatus::Completed
}
Some(StandardControlRequest::SetConfiguration) => {
if recipient != ControlRequestRecipient::Device {
return Ok(false);
}
usb_debug!("host device handling set config");
self.set_config()?
}
Some(StandardControlRequest::SetInterface) => {
if recipient != ControlRequestRecipient::Interface {
return Ok(false);
}
usb_debug!("host device handling set interface");
self.set_interface()?
}
Some(StandardControlRequest::ClearFeature) => {
if recipient != ControlRequestRecipient::Endpoint {
return Ok(false);
}
usb_debug!("host device handling clear feature");
self.clear_feature()?
}
_ => {
// Other requests will be passed through to the device.
return Ok(false);
}
};
xhci_transfer
.on_transfer_complete(&status, 0)
.map_err(Error::TransferComplete)?;
Ok(true)
}
fn execute_control_transfer(
&mut self,
xhci_transfer: Arc<XhciTransfer>,
buffer: Option<ScatterGatherBuffer>,
) -> Result<()> {
if self.intercepted_control_transfer(&xhci_transfer)? {
return Ok(());
}
// Default buffer size for control data transfer.
const CONTROL_DATA_BUFFER_SIZE: usize = 1024;
// Buffer type for control transfer. The first 8 bytes is a UsbRequestSetup struct.
#[derive(Copy, Clone)]
#[repr(C, packed)]
struct ControlTransferBuffer {
pub setup: UsbRequestSetup,
pub data: [u8; CONTROL_DATA_BUFFER_SIZE],
}
// Safe because it only has data and has no implicit padding.
unsafe impl DataInit for ControlTransferBuffer {}
let mut control_request = ControlTransferBuffer {
setup: self.control_request_setup,
data: [0; CONTROL_DATA_BUFFER_SIZE],
};
let direction = self.control_request_setup.get_direction();
let buffer = if direction == ControlRequestDataPhaseTransferDirection::HostToDevice {
if let Some(buffer) = buffer {
buffer
.read(&mut control_request.data)
.map_err(Error::ReadBuffer)?;
}
// buffer is consumed here for HostToDevice transfers.
None
} else {
// buffer will be used later in the callback for DeviceToHost transfers.
buffer
};
let control_buffer = control_request.as_slice().to_vec();
let mut control_transfer =
Transfer::new_control(control_buffer).map_err(Error::CreateTransfer)?;
let tmp_transfer = xhci_transfer.clone();
let callback = move |t: Transfer| {
usb_debug!("setup token control transfer callback invoked");
update_transfer_state(&xhci_transfer, &t)?;
let state = xhci_transfer.state().lock();
match *state {
XhciTransferState::Cancelled => {
usb_debug!("transfer cancelled");
drop(state);
xhci_transfer
.on_transfer_complete(&TransferStatus::Cancelled, 0)
.map_err(Error::TransferComplete)?;
}
XhciTransferState::Completed => {
let status = t.status();
let actual_length = t.actual_length();
if direction == ControlRequestDataPhaseTransferDirection::DeviceToHost {
if let Some(control_request_data) =
t.buffer.get(mem::size_of::<UsbRequestSetup>()..)
{
if let Some(buffer) = &buffer {
buffer
.write(&control_request_data)
.map_err(Error::WriteBuffer)?;
}
}
}
drop(state);
usb_debug!("transfer completed with actual length {}", actual_length);
xhci_transfer
.on_transfer_complete(&status, actual_length as u32)
.map_err(Error::TransferComplete)?;
}
_ => {
// update_transfer_state is already invoked before match.
// This transfer could only be `Cancelled` or `Completed`.
// Any other state means there is a bug in crosvm implementation.
error!("should not take this branch");
return Err(Error::BadXhciTransferState);
}
}
Ok(())
};
let fail_handle = self.fail_handle.clone();
control_transfer.set_callback(move |t: Transfer| match callback(t) {
Ok(_) => {}
Err(e) => {
error!("control transfer callback failed {:?}", e);
fail_handle.fail();
}
});
submit_transfer(
self.fail_handle.clone(),
&self.job_queue,
tmp_transfer,
&mut self.device.lock(),
control_transfer,
)
}
fn handle_control_transfer(&mut self, transfer: XhciTransfer) -> Result<()> {
let xhci_transfer = Arc::new(transfer);
match xhci_transfer
.get_transfer_type()
.map_err(Error::GetXhciTransferType)?
{
XhciTransferType::SetupStage(setup) => {
if self.ctl_ep_state != ControlEndpointState::SetupStage {
error!("Control endpoint is in an inconsistant state");
return Ok(());
}
usb_debug!("setup stage setup buffer: {:?}", setup);
self.control_request_setup = setup;
xhci_transfer
.on_transfer_complete(&TransferStatus::Completed, 0)
.map_err(Error::TransferComplete)?;
self.ctl_ep_state = ControlEndpointState::DataStage;
}
XhciTransferType::DataStage(buffer) => {
if self.ctl_ep_state != ControlEndpointState::DataStage {
error!("Control endpoint is in an inconsistant state");
return Ok(());
}
// Requests with a DataStage will be executed here.
// Requests without a DataStage will be executed in StatusStage.
self.execute_control_transfer(xhci_transfer, Some(buffer))?;
self.executed = true;
self.ctl_ep_state = ControlEndpointState::StatusStage;
}
XhciTransferType::StatusStage => {
if self.ctl_ep_state == ControlEndpointState::SetupStage {
error!("Control endpoint is in an inconsistant state");
return Ok(());
}
if self.executed {
// Request was already executed during DataStage.
// Just complete the StatusStage transfer.
xhci_transfer
.on_transfer_complete(&TransferStatus::Completed, 0)
.map_err(Error::TransferComplete)?;
} else {
// Execute the request now since there was no DataStage.
self.execute_control_transfer(xhci_transfer, None)?;
}
self.executed = false;
self.ctl_ep_state = ControlEndpointState::SetupStage;
}
_ => {
// Non control transfer should not be handled in this function.
error!("Non control (could be noop) transfer sent to control endpoint.");
xhci_transfer
.on_transfer_complete(&TransferStatus::Completed, 0)
.map_err(Error::TransferComplete)?;
}
}
Ok(())
}
fn set_config(&mut self) -> Result<TransferStatus> {
// It's a standard, set_config, device request.
let config = (self.control_request_setup.value & 0xff) as u8;
usb_debug!(
"Set config control transfer is received with config: {}",
config
);
self.release_interfaces();
if self.device.lock().get_num_configurations() > 1 {
let cur_config = match self.device.lock().get_active_configuration() {
Ok(c) => Some(c),
Err(e) => {
// The device may be in the default state, in which case
// GET_CONFIGURATION may fail. Assume the device needs to be
// reconfigured.
usb_debug!("Failed to get active configuration: {}", e);
error!("Failed to get active configuration: {}", e);
None
}
};
if Some(config) != cur_config {
self.device
.lock()
.set_active_configuration(config)
.map_err(Error::SetActiveConfig)?;
}
} else {
usb_debug!("Only one configuration - not calling set_active_configuration");
}
let config_descriptor = self
.device
.lock()
.get_config_descriptor(config)
.map_err(Error::GetActiveConfig)?;
self.claim_interfaces(&config_descriptor);
self.create_endpoints(&config_descriptor)?;
Ok(TransferStatus::Completed)
}
fn set_interface(&mut self) -> Result<TransferStatus> {
usb_debug!("set interface");
// It's a standard, set_interface, interface request.
let interface = self.control_request_setup.index as u8;
let alt_setting = self.control_request_setup.value as u8;
self.device
.lock()
.set_interface_alt_setting(interface, alt_setting)
.map_err(Error::SetInterfaceAltSetting)?;
self.alt_settings.insert(interface, alt_setting);
let config = self
.device
.lock()
.get_active_configuration()
.map_err(Error::GetActiveConfig)?;
let config_descriptor = self
.device
.lock()
.get_config_descriptor(config)
.map_err(Error::GetActiveConfig)?;
self.create_endpoints(&config_descriptor)?;
Ok(TransferStatus::Completed)
}
fn clear_feature(&mut self) -> Result<TransferStatus> {
usb_debug!("clear feature");
let request_setup = &self.control_request_setup;
// It's a standard, clear_feature, endpoint request.
const STD_FEATURE_ENDPOINT_HALT: u16 = 0;
if request_setup.value == STD_FEATURE_ENDPOINT_HALT {
self.device
.lock()
.clear_halt(request_setup.index as u8)
.map_err(Error::ClearHalt)?;
}
Ok(TransferStatus::Completed)
}
fn claim_interfaces(&mut self, config_descriptor: &ConfigDescriptorTree) {
for i in 0..config_descriptor.num_interfaces() {
match self.device.lock().claim_interface(i) {
Ok(()) => {
usb_debug!("claimed interface {}", i);
self.claimed_interfaces.push(i);
}
Err(e) => {
error!("unable to claim interface {}: {:?}", i, e);
}
}
}
}
fn create_endpoints(&mut self, config_descriptor: &ConfigDescriptorTree) -> Result<()> {
self.endpoints = Vec::new();
for i in &self.claimed_interfaces {
let alt_setting = self.alt_settings.get(i).unwrap_or(&0);
let interface = config_descriptor
.get_interface_descriptor(*i, *alt_setting)
.ok_or(Error::GetInterfaceDescriptor((*i, *alt_setting)))?;
for ep_idx in 0..interface.bNumEndpoints {
let ep_dp = interface
.get_endpoint_descriptor(ep_idx)
.ok_or(Error::GetEndpointDescriptor(ep_idx))?;
let ep_num = ep_dp.get_endpoint_number();
if ep_num == 0 {
usb_debug!("endpoint 0 in endpoint descriptors");
continue;
}
let direction = ep_dp.get_direction();
let ty = ep_dp.get_endpoint_type().ok_or(Error::GetEndpointType)?;
self.endpoints.push(UsbEndpoint::new(
self.fail_handle.clone(),
self.job_queue.clone(),
self.device.clone(),
ep_num,
direction,
ty,
));
}
}
Ok(())
}
fn release_interfaces(&mut self) {
for i in &self.claimed_interfaces {
if let Err(e) = self.device.lock().release_interface(*i) {
error!("could not release interface: {:?}", e);
}
}
self.claimed_interfaces = Vec::new();
}
fn submit_transfer_helper(&mut self, transfer: XhciTransfer) -> Result<()> {
if transfer.get_endpoint_number() == 0 {
return self.handle_control_transfer(transfer);
}
for ep in &self.endpoints {
if ep.match_ep(transfer.get_endpoint_number(), transfer.get_transfer_dir()) {
return ep.handle_transfer(transfer);
}
}
warn!("Could not find endpoint for transfer");
transfer
.on_transfer_complete(&TransferStatus::Error, 0)
.map_err(Error::TransferComplete)
}
}
impl XhciBackendDevice for HostDevice {
fn get_backend_type(&self) -> BackendType {
let d = match self.device.lock().get_device_descriptor() {
Ok(d) => d,
Err(_) => return BackendType::Usb2,
};
// See definition of bcdUsb.
const USB3_MASK: u16 = 0x0300;
match d.bcdUSB & USB3_MASK {
USB3_MASK => BackendType::Usb3,
_ => BackendType::Usb2,
}
}
fn get_vid(&self) -> u16 {
match self.device.lock().get_device_descriptor() {
Ok(d) => d.idVendor,
Err(e) => {
error!("cannot get device descriptor: {:?}", e);
0
}
}
}
fn get_pid(&self) -> u16 {
match self.device.lock().get_device_descriptor() {
Ok(d) => d.idProduct,
Err(e) => {
error!("cannot get device descriptor: {:?}", e);
0
}
}
}
fn submit_transfer(&mut self, transfer: XhciTransfer) -> std::result::Result<(), ()> {
self.submit_transfer_helper(transfer).map_err(|e| {
error!("failed to submit transfer: {}", e);
})
}
fn set_address(&mut self, _address: UsbDeviceAddress) {
// It's a standard, set_address, device request. We do nothing here. As described in XHCI
// spec. See set address command ring trb.
usb_debug!(
"Set address control transfer is received with address: {}",
_address
);
}
fn reset(&mut self) -> std::result::Result<(), ()> {
usb_debug!("resetting host device");
self.device
.lock()
.reset()
.map_err(|e| error!("failed to reset device: {:?}", e))
}
}
devices: usb: allow arbitrary control request size
Previously, the maximum control request length that could be passed
through the USB host_backend layer was limited to 1024 bytes.
To lift this limit, remove the fixed-length ControlTransferBuffer
structure definition and replace it with manual buffer allocation. This
mirrors the behavior of the later part of this function, which already
indexes into the control_buffer to copy the data back from a device to
host transfer.
BUG=chromium:1073503
TEST=Verify adb logcat still works
Change-Id: I7354f6fa237b4df6db7898f27be76ab10faed9f4
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2161440
Reviewed-by: Dylan Reid <c791bfe44996c1eed9c7149a0c5f12a86d58eb85@chromium.org>
Tested-by: kokoro <2ac7b1f3fa578934c95181d4272be0d3bca00121@google.com>
Tested-by: Daniel Verkamp <72bc170b46ec491f7bdd4359a1c0bfed274de40c@chromium.org>
Commit-Queue: Daniel Verkamp <72bc170b46ec491f7bdd4359a1c0bfed274de40c@chromium.org>
// Copyright 2019 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::mem::drop;
use std::sync::Arc;
use super::error::*;
use super::usb_endpoint::UsbEndpoint;
use super::utils::{submit_transfer, update_transfer_state};
use crate::usb::xhci::scatter_gather_buffer::ScatterGatherBuffer;
use crate::usb::xhci::xhci_backend_device::{BackendType, UsbDeviceAddress, XhciBackendDevice};
use crate::usb::xhci::xhci_transfer::{XhciTransfer, XhciTransferState, XhciTransferType};
use crate::utils::AsyncJobQueue;
use crate::utils::FailHandle;
use data_model::DataInit;
use std::collections::HashMap;
use std::mem;
use sync::Mutex;
use sys_util::{error, warn};
use usb_util::{
ConfigDescriptorTree, ControlRequestDataPhaseTransferDirection, ControlRequestRecipient,
Device, StandardControlRequest, Transfer, TransferStatus, UsbRequestSetup,
};
#[derive(PartialEq)]
pub enum ControlEndpointState {
/// Control endpoint should receive setup stage next.
SetupStage,
/// Control endpoint should receive data stage next.
DataStage,
/// Control endpoint should receive status stage next.
StatusStage,
}
/// Host device is a device connected to host.
pub struct HostDevice {
fail_handle: Arc<dyn FailHandle>,
// Endpoints only contains data endpoints (1 to 30). Control transfers are handled at device
// level.
endpoints: Vec<UsbEndpoint>,
device: Arc<Mutex<Device>>,
ctl_ep_state: ControlEndpointState,
alt_settings: HashMap<u8, u8>,
claimed_interfaces: Vec<u8>,
control_request_setup: UsbRequestSetup,
executed: bool,
job_queue: Arc<AsyncJobQueue>,
}
impl Drop for HostDevice {
fn drop(&mut self) {
self.release_interfaces();
}
}
impl HostDevice {
/// Create a new host device.
pub fn new(
fail_handle: Arc<dyn FailHandle>,
job_queue: Arc<AsyncJobQueue>,
device: Arc<Mutex<Device>>,
) -> HostDevice {
HostDevice {
fail_handle,
endpoints: vec![],
device,
ctl_ep_state: ControlEndpointState::SetupStage,
alt_settings: HashMap::new(),
claimed_interfaces: vec![],
control_request_setup: UsbRequestSetup::new(0, 0, 0, 0, 0),
executed: false,
job_queue,
}
}
// Check for requests that should be intercepted and emulated using libusb
// functions rather than passed directly to the device.
// Returns true if the request has been intercepted or false if the request
// should be passed through to the device.
fn intercepted_control_transfer(&mut self, xhci_transfer: &XhciTransfer) -> Result<bool> {
let direction = self.control_request_setup.get_direction();
let recipient = self.control_request_setup.get_recipient();
let standard_request = self.control_request_setup.get_standard_request();
if direction != ControlRequestDataPhaseTransferDirection::HostToDevice {
// Only host to device requests are intercepted currently.
return Ok(false);
}
let status = match standard_request {
Some(StandardControlRequest::SetAddress) => {
if recipient != ControlRequestRecipient::Device {
return Ok(false);
}
usb_debug!("host device handling set address");
let addr = self.control_request_setup.value as u32;
self.set_address(addr);
TransferStatus::Completed
}
Some(StandardControlRequest::SetConfiguration) => {
if recipient != ControlRequestRecipient::Device {
return Ok(false);
}
usb_debug!("host device handling set config");
self.set_config()?
}
Some(StandardControlRequest::SetInterface) => {
if recipient != ControlRequestRecipient::Interface {
return Ok(false);
}
usb_debug!("host device handling set interface");
self.set_interface()?
}
Some(StandardControlRequest::ClearFeature) => {
if recipient != ControlRequestRecipient::Endpoint {
return Ok(false);
}
usb_debug!("host device handling clear feature");
self.clear_feature()?
}
_ => {
// Other requests will be passed through to the device.
return Ok(false);
}
};
xhci_transfer
.on_transfer_complete(&status, 0)
.map_err(Error::TransferComplete)?;
Ok(true)
}
fn execute_control_transfer(
&mut self,
xhci_transfer: Arc<XhciTransfer>,
buffer: Option<ScatterGatherBuffer>,
) -> Result<()> {
if self.intercepted_control_transfer(&xhci_transfer)? {
return Ok(());
}
// Allocate a buffer for the control transfer.
// This buffer will hold a UsbRequestSetup struct followed by the data.
let control_buffer_len =
mem::size_of::<UsbRequestSetup>() + self.control_request_setup.length as usize;
let mut control_buffer = vec![0u8; control_buffer_len];
// Copy the control request header.
control_buffer[..mem::size_of::<UsbRequestSetup>()]
.copy_from_slice(self.control_request_setup.as_slice());
let direction = self.control_request_setup.get_direction();
let buffer = if direction == ControlRequestDataPhaseTransferDirection::HostToDevice {
if let Some(buffer) = buffer {
buffer
.read(&mut control_buffer[mem::size_of::<UsbRequestSetup>()..])
.map_err(Error::ReadBuffer)?;
}
// buffer is consumed here for HostToDevice transfers.
None
} else {
// buffer will be used later in the callback for DeviceToHost transfers.
buffer
};
let mut control_transfer =
Transfer::new_control(control_buffer).map_err(Error::CreateTransfer)?;
let tmp_transfer = xhci_transfer.clone();
let callback = move |t: Transfer| {
usb_debug!("setup token control transfer callback invoked");
update_transfer_state(&xhci_transfer, &t)?;
let state = xhci_transfer.state().lock();
match *state {
XhciTransferState::Cancelled => {
usb_debug!("transfer cancelled");
drop(state);
xhci_transfer
.on_transfer_complete(&TransferStatus::Cancelled, 0)
.map_err(Error::TransferComplete)?;
}
XhciTransferState::Completed => {
let status = t.status();
let actual_length = t.actual_length();
if direction == ControlRequestDataPhaseTransferDirection::DeviceToHost {
if let Some(control_request_data) =
t.buffer.get(mem::size_of::<UsbRequestSetup>()..)
{
if let Some(buffer) = &buffer {
buffer
.write(&control_request_data)
.map_err(Error::WriteBuffer)?;
}
}
}
drop(state);
usb_debug!("transfer completed with actual length {}", actual_length);
xhci_transfer
.on_transfer_complete(&status, actual_length as u32)
.map_err(Error::TransferComplete)?;
}
_ => {
// update_transfer_state is already invoked before match.
// This transfer could only be `Cancelled` or `Completed`.
// Any other state means there is a bug in crosvm implementation.
error!("should not take this branch");
return Err(Error::BadXhciTransferState);
}
}
Ok(())
};
let fail_handle = self.fail_handle.clone();
control_transfer.set_callback(move |t: Transfer| match callback(t) {
Ok(_) => {}
Err(e) => {
error!("control transfer callback failed {:?}", e);
fail_handle.fail();
}
});
submit_transfer(
self.fail_handle.clone(),
&self.job_queue,
tmp_transfer,
&mut self.device.lock(),
control_transfer,
)
}
fn handle_control_transfer(&mut self, transfer: XhciTransfer) -> Result<()> {
let xhci_transfer = Arc::new(transfer);
match xhci_transfer
.get_transfer_type()
.map_err(Error::GetXhciTransferType)?
{
XhciTransferType::SetupStage(setup) => {
if self.ctl_ep_state != ControlEndpointState::SetupStage {
error!("Control endpoint is in an inconsistant state");
return Ok(());
}
usb_debug!("setup stage setup buffer: {:?}", setup);
self.control_request_setup = setup;
xhci_transfer
.on_transfer_complete(&TransferStatus::Completed, 0)
.map_err(Error::TransferComplete)?;
self.ctl_ep_state = ControlEndpointState::DataStage;
}
XhciTransferType::DataStage(buffer) => {
if self.ctl_ep_state != ControlEndpointState::DataStage {
error!("Control endpoint is in an inconsistant state");
return Ok(());
}
// Requests with a DataStage will be executed here.
// Requests without a DataStage will be executed in StatusStage.
self.execute_control_transfer(xhci_transfer, Some(buffer))?;
self.executed = true;
self.ctl_ep_state = ControlEndpointState::StatusStage;
}
XhciTransferType::StatusStage => {
if self.ctl_ep_state == ControlEndpointState::SetupStage {
error!("Control endpoint is in an inconsistant state");
return Ok(());
}
if self.executed {
// Request was already executed during DataStage.
// Just complete the StatusStage transfer.
xhci_transfer
.on_transfer_complete(&TransferStatus::Completed, 0)
.map_err(Error::TransferComplete)?;
} else {
// Execute the request now since there was no DataStage.
self.execute_control_transfer(xhci_transfer, None)?;
}
self.executed = false;
self.ctl_ep_state = ControlEndpointState::SetupStage;
}
_ => {
// Non control transfer should not be handled in this function.
error!("Non control (could be noop) transfer sent to control endpoint.");
xhci_transfer
.on_transfer_complete(&TransferStatus::Completed, 0)
.map_err(Error::TransferComplete)?;
}
}
Ok(())
}
fn set_config(&mut self) -> Result<TransferStatus> {
// It's a standard, set_config, device request.
let config = (self.control_request_setup.value & 0xff) as u8;
usb_debug!(
"Set config control transfer is received with config: {}",
config
);
self.release_interfaces();
if self.device.lock().get_num_configurations() > 1 {
let cur_config = match self.device.lock().get_active_configuration() {
Ok(c) => Some(c),
Err(e) => {
// The device may be in the default state, in which case
// GET_CONFIGURATION may fail. Assume the device needs to be
// reconfigured.
usb_debug!("Failed to get active configuration: {}", e);
error!("Failed to get active configuration: {}", e);
None
}
};
if Some(config) != cur_config {
self.device
.lock()
.set_active_configuration(config)
.map_err(Error::SetActiveConfig)?;
}
} else {
usb_debug!("Only one configuration - not calling set_active_configuration");
}
let config_descriptor = self
.device
.lock()
.get_config_descriptor(config)
.map_err(Error::GetActiveConfig)?;
self.claim_interfaces(&config_descriptor);
self.create_endpoints(&config_descriptor)?;
Ok(TransferStatus::Completed)
}
fn set_interface(&mut self) -> Result<TransferStatus> {
usb_debug!("set interface");
// It's a standard, set_interface, interface request.
let interface = self.control_request_setup.index as u8;
let alt_setting = self.control_request_setup.value as u8;
self.device
.lock()
.set_interface_alt_setting(interface, alt_setting)
.map_err(Error::SetInterfaceAltSetting)?;
self.alt_settings.insert(interface, alt_setting);
let config = self
.device
.lock()
.get_active_configuration()
.map_err(Error::GetActiveConfig)?;
let config_descriptor = self
.device
.lock()
.get_config_descriptor(config)
.map_err(Error::GetActiveConfig)?;
self.create_endpoints(&config_descriptor)?;
Ok(TransferStatus::Completed)
}
fn clear_feature(&mut self) -> Result<TransferStatus> {
usb_debug!("clear feature");
let request_setup = &self.control_request_setup;
// It's a standard, clear_feature, endpoint request.
const STD_FEATURE_ENDPOINT_HALT: u16 = 0;
if request_setup.value == STD_FEATURE_ENDPOINT_HALT {
self.device
.lock()
.clear_halt(request_setup.index as u8)
.map_err(Error::ClearHalt)?;
}
Ok(TransferStatus::Completed)
}
fn claim_interfaces(&mut self, config_descriptor: &ConfigDescriptorTree) {
for i in 0..config_descriptor.num_interfaces() {
match self.device.lock().claim_interface(i) {
Ok(()) => {
usb_debug!("claimed interface {}", i);
self.claimed_interfaces.push(i);
}
Err(e) => {
error!("unable to claim interface {}: {:?}", i, e);
}
}
}
}
fn create_endpoints(&mut self, config_descriptor: &ConfigDescriptorTree) -> Result<()> {
self.endpoints = Vec::new();
for i in &self.claimed_interfaces {
let alt_setting = self.alt_settings.get(i).unwrap_or(&0);
let interface = config_descriptor
.get_interface_descriptor(*i, *alt_setting)
.ok_or(Error::GetInterfaceDescriptor((*i, *alt_setting)))?;
for ep_idx in 0..interface.bNumEndpoints {
let ep_dp = interface
.get_endpoint_descriptor(ep_idx)
.ok_or(Error::GetEndpointDescriptor(ep_idx))?;
let ep_num = ep_dp.get_endpoint_number();
if ep_num == 0 {
usb_debug!("endpoint 0 in endpoint descriptors");
continue;
}
let direction = ep_dp.get_direction();
let ty = ep_dp.get_endpoint_type().ok_or(Error::GetEndpointType)?;
self.endpoints.push(UsbEndpoint::new(
self.fail_handle.clone(),
self.job_queue.clone(),
self.device.clone(),
ep_num,
direction,
ty,
));
}
}
Ok(())
}
fn release_interfaces(&mut self) {
for i in &self.claimed_interfaces {
if let Err(e) = self.device.lock().release_interface(*i) {
error!("could not release interface: {:?}", e);
}
}
self.claimed_interfaces = Vec::new();
}
fn submit_transfer_helper(&mut self, transfer: XhciTransfer) -> Result<()> {
if transfer.get_endpoint_number() == 0 {
return self.handle_control_transfer(transfer);
}
for ep in &self.endpoints {
if ep.match_ep(transfer.get_endpoint_number(), transfer.get_transfer_dir()) {
return ep.handle_transfer(transfer);
}
}
warn!("Could not find endpoint for transfer");
transfer
.on_transfer_complete(&TransferStatus::Error, 0)
.map_err(Error::TransferComplete)
}
}
impl XhciBackendDevice for HostDevice {
fn get_backend_type(&self) -> BackendType {
let d = match self.device.lock().get_device_descriptor() {
Ok(d) => d,
Err(_) => return BackendType::Usb2,
};
// See definition of bcdUsb.
const USB3_MASK: u16 = 0x0300;
match d.bcdUSB & USB3_MASK {
USB3_MASK => BackendType::Usb3,
_ => BackendType::Usb2,
}
}
fn get_vid(&self) -> u16 {
match self.device.lock().get_device_descriptor() {
Ok(d) => d.idVendor,
Err(e) => {
error!("cannot get device descriptor: {:?}", e);
0
}
}
}
fn get_pid(&self) -> u16 {
match self.device.lock().get_device_descriptor() {
Ok(d) => d.idProduct,
Err(e) => {
error!("cannot get device descriptor: {:?}", e);
0
}
}
}
fn submit_transfer(&mut self, transfer: XhciTransfer) -> std::result::Result<(), ()> {
self.submit_transfer_helper(transfer).map_err(|e| {
error!("failed to submit transfer: {}", e);
})
}
fn set_address(&mut self, _address: UsbDeviceAddress) {
// It's a standard, set_address, device request. We do nothing here. As described in XHCI
// spec. See set address command ring trb.
usb_debug!(
"Set address control transfer is received with address: {}",
_address
);
}
fn reset(&mut self) -> std::result::Result<(), ()> {
usb_debug!("resetting host device");
self.device
.lock()
.reset()
.map_err(|e| error!("failed to reset device: {:?}", e))
}
}
|
#[macro_use]
extern crate nom;
pub mod tree;
pub mod parser;
use std::str::{self, FromStr};
use std::path::{PathBuf, Path};
use std::fs::File;
use std::io::Read;
use std::cmp::Ordering;
use std::borrow::Borrow;
use nom::{IResult, ErrorKind, Needed, FindSubstring, digit, space, multispace, line_ending};
use parser::escape_c_string;
named!(pub parse_include<String>, preceded!(
tag!("/include/"),
preceded!( multispace,
delimited!(
char!('"'),
escape_c_string,
char!('"')
))
));
named!(pub find_include<(&[u8], String)>, do_parse!(
pre: take_until!("/include/") >>
path: parse_include >>
(pre, path)
));
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct IncludeBounds {
pub path: PathBuf,
pub global_start: usize,
pub child_start: usize,
pub len: usize,
pub method: IncludeMethod,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum IncludeMethod {
DTS,
CPP,
}
impl PartialOrd for IncludeBounds {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for IncludeBounds {
fn cmp(&self, other: &Self) -> Ordering {
use std::cmp::Ordering::*;
match self.global_start.cmp(&other.global_start) {
Equal => self.global_end().cmp(&other.global_end()),
o => o,
}
}
}
impl IncludeBounds {
pub fn global_end(&self) -> usize {
self.global_start + self.len
}
pub fn split_bounds(bounds: &mut Vec<IncludeBounds>, start: usize, end: usize, offset: usize) {
let mut remainders: Vec<IncludeBounds> = Vec::new();
// println!("split s: {} e: {} off: {}", start, end, offset);
for b in bounds.iter_mut() {
// println!("g_start: {} g_end: {}", b.global_start, b.global_end());
if b.global_start < start && b.global_end() >= start {
// global_start -- start -- global_end
let remainder = b.global_end() - start;
// println!("remainder: {}", remainder);
remainders.push(IncludeBounds {
path: b.path.clone(),
global_start: end,
child_start: b.child_start + start - b.global_start + offset,
len: remainder, // - offset,
method: b.method.clone(),
});
b.len = start - b.global_start;
} else if b.global_start == start {
// split is at begining of the bound
// offset the start
{
let offset = end - start;
b.global_start += offset;
}
// shrink the len by the offset
b.len -= offset;
}
}
bounds.extend_from_slice(&remainders);
bounds.sort();
}
}
#[derive(Debug, PartialEq)]
pub struct Linemarker {
child_line: usize,
path: PathBuf,
flag: Option<LinemarkerFlag>,
}
#[derive(Debug, PartialEq)]
pub enum LinemarkerFlag {
Start,
Return,
System,
Extern,
}
named!(pub parse_linemarker<Linemarker>,
complete!(do_parse!(
tag!("#") >>
opt!(tag!("line")) >>
space >>
line: map_res!(map_res!(digit, str::from_utf8), usize::from_str) >>
space >>
path: delimited!(
char!('"'),
map!(escape_c_string, PathBuf::from),
char!('"')
) >>
flag: opt!(preceded!(space, map_res!(map_res!(digit, str::from_utf8), u64::from_str))) >>
line_ending >>
(Linemarker {
child_line: line,
path: path,
flag: flag.map(|f| match f {
1 => LinemarkerFlag::Start,
2 => LinemarkerFlag::Return,
3 => LinemarkerFlag::System,
4 => LinemarkerFlag::Extern,
_ => unreachable!(),
}),
})
))
);
fn find_linemarker_start(input: &[u8]) -> IResult<&[u8], &[u8]> {
if "# ".len() > input.len() {
IResult::Incomplete(Needed::Size("# ".len()))
} else {
match input.find_substring("# ").iter().chain(input.find_substring("#line ").iter()).min() {
None => {
IResult::Error(error_position!(ErrorKind::TakeUntil, input))
},
Some(index) => {
IResult::Done(&input[*index..], &input[0..*index])
},
}
}
}
named!(find_linemarker<(&[u8], Linemarker)>, do_parse!(
pre: find_linemarker_start >>
marker: parse_linemarker >>
(pre, marker)
));
fn parse_linemarkers(buf: &[u8], bounds: &mut Vec<IncludeBounds>, global_offset: usize) {
let end_offset = global_offset + buf.len();
// println!("{}", str::from_utf8(buf).unwrap());
let mut buf = buf;
println!("parsing linemarkers");
loop {
// look for linemarker
if let IResult::Done(rem, (pre, marker)) = find_linemarker(buf) {
// println!("{}", str::from_utf8(line).unwrap());
// println!("{:?}", marker);
// println!("pre.len() {}", pre.len());
// double check that last bound was from a linemarker
match bounds.last() {
Some(&IncludeBounds { method: IncludeMethod::CPP, .. }) => {}
_ => {
println!("{:#?}", bounds);
panic!("Linemarker found within DTS include")
}
}
// end last
bounds.last_mut().unwrap().len = pre.len();
// start at new line
let new_bound = IncludeBounds {
path: marker.path.clone(),
global_start: end_offset - rem.len(),
child_start: File::open(&marker.path)
.map(|f| f.bytes().filter_map(|e| e.ok()))
.map(|b| line_to_byte_offset(b, marker.child_line).unwrap()) //TODO: unwraping is bad, SOK?
.unwrap_or(0),
len: rem.len(),
method: IncludeMethod::CPP,
};
bounds.push(new_bound);
buf = rem;
} else {
return;
}
}
}
pub fn include_files(path: &Path,
main_offset: usize)
-> Result<(Vec<u8>, Vec<IncludeBounds>), String> {
// TODO: check from parent directory of root file
let mut file = File::open(path).unwrap();
let mut buffer: Vec<u8> = Vec::new();
let mut bounds: Vec<IncludeBounds> = Vec::new();
let mut string_buffer = String::new();
file.read_to_string(&mut string_buffer).map_err(|_| "IO Error".to_string())?;
let mut buf = string_buffer.as_bytes();
named!(first_linemarker<(&[u8], Linemarker)>,
do_parse!(
marker: peek!(parse_linemarker) >>
line: recognize!(parse_linemarker) >>
(line, marker)
)
);
let start_bound = if let IResult::Done(rem, (line, marker)) = first_linemarker(buf) {
let bound = IncludeBounds {
path: marker.path.clone(),
global_start: buf.len() - rem.len(),
// TODO: check from parent directory of root file
child_start: File::open(&marker.path)
.map(|f| f.bytes().filter_map(|e| e.ok()))
.map(|b| line_to_byte_offset(b, marker.child_line).unwrap()) //TODO: unwraping is bad, SOK?
.unwrap_or(0),
len: File::open(&marker.path).unwrap().bytes().count(),
method: IncludeMethod::CPP,
};
buffer.extend_from_slice(line);
buf = rem;
bound
} else {
// println!("main_offset {}", main_offset);
IncludeBounds {
path: path.to_path_buf(),
global_start: main_offset,
child_start: 0,
// TODO: check from parent directory of root file
len: File::open(path).unwrap().bytes().count(),
method: IncludeMethod::DTS,
}
};
bounds.push(start_bound);
loop {
// go until /include/
if let IResult::Done(rem, (pre, file)) = find_include(&buf[..]) {
parse_linemarkers(pre, &mut bounds, buffer.len());
buffer.extend_from_slice(pre);
let offset = pre.len();
// println!("{}", file);
// println!("Offset: {}", offset);
// println!("{}", include_tree);
let included_path = Path::new(&file);
let total_len = buffer.len() + main_offset; // - 1;
let (sub_buf, sub_bounds) = include_files(included_path, total_len)?;
buffer.extend(sub_buf);
let inc_start = sub_bounds.first()
.map(|b| b.global_start)
.expect(&format!("No bounds returned: {}",
included_path.to_string_lossy()));
let inc_end = sub_bounds.last()
.map(|b| b.global_end())
.expect(&format!("No bounds returned: {}",
included_path.to_string_lossy()));
let eaten_len = (buf.len() - offset) - rem.len();
//include_tree.offset_after_location(inc_start,
// inc_end as isize - inc_start as isize -
// eaten_len as isize);
// println!("After offset");
// println!("{}", include_tree);
IncludeBounds::split_bounds(&mut bounds, inc_start, inc_end, eaten_len);
bounds.extend_from_slice(&sub_bounds);
bounds.sort();
// println!("After split");
// println!("{}", include_tree);
buf = rem;
} else {
parse_linemarkers(buf, &mut bounds, buffer.len());
// no more includes, just add the rest and return
buffer.extend(buf);
return Ok((buffer, bounds));
}
}
}
pub fn line_to_byte_offset<K, I>(bytes: I, line: usize) -> Result<usize, String>
where K: Borrow<u8> + Eq,
I: Iterator<Item = K>
{
if line == 1 {
Ok(0)
} else {
bytes.enumerate()
.filter(|&(_, ref byte)| byte.borrow() == &b'\n')
.nth(line - 2)
.map(|(offset, _)| offset + 1)
.ok_or_else(|| "Failed converting from line to byte offset".to_string())
}
}
pub fn byte_offset_to_line_col<K, I>(bytes: I, offset: usize) -> (usize, usize)
where K: Borrow<u8> + Eq,
I: Iterator<Item = K>
{
let opt = bytes.enumerate()
.filter(|&(off, ref byte)| off < offset && byte.borrow() == &b'\n')
.map(|(start, _)| start)
.enumerate()
.last();
match opt {
Some((line, start)) => (line + 2, offset - start),
None => (1, offset + 1),
}
}
#[cfg(test)]
mod tests {
use super::*;
use nom::IResult;
#[test]
fn lines_to_bytes() {
let string = "Howdy\nHow goes it\n\nI'm doing fine\n";
assert_eq!(line_to_byte_offset(string.as_bytes().iter(), 1).unwrap(),
0);
assert_eq!(line_to_byte_offset(string.as_bytes().iter(), 2).unwrap(),
6);
assert_eq!(line_to_byte_offset(string.as_bytes().iter(), 3).unwrap(),
18);
assert_eq!(line_to_byte_offset(string.as_bytes().iter(), 4).unwrap(),
19);
}
#[test]
fn bytes_to_lines() {
let string = "Howdy\nHow goes it\n\nI'm doing fine\n";
assert_eq!(byte_offset_to_line_col(string.as_bytes().iter(), 0),
(1, 1));
assert_eq!(byte_offset_to_line_col(string.as_bytes().iter(), 8),
(2, 3));
assert_eq!(byte_offset_to_line_col(string.as_bytes().iter(), 20),
(4, 2));
assert_eq!(byte_offset_to_line_col(string.as_bytes().iter(), 18),
(3, 1));
}
#[test]
fn linemarker_no_flag() {
let input = b"# 1 \"<built-in>\"\n";
assert_eq!(
parse_linemarker(input),
IResult::Done(
&b""[..],
Linemarker {
child_line: 1,
path: PathBuf::from("<built-in>"),
flag: None,
}
)
);
}
#[test]
fn linemarker_flag() {
let input = b"# 12 \"am33xx.dtsi\" 2\n";
assert_eq!(
parse_linemarker(input),
IResult::Done(
&b""[..],
Linemarker {
child_line: 12,
path: PathBuf::from("am33xx.dtsi"),
flag: Some(LinemarkerFlag::Return),
}
)
);
}
}
Replace `loop if let` with `while let` to improve code readability
#[macro_use]
extern crate nom;
pub mod tree;
pub mod parser;
use std::str::{self, FromStr};
use std::path::{PathBuf, Path};
use std::fs::File;
use std::io::Read;
use std::cmp::Ordering;
use std::borrow::Borrow;
use nom::{IResult, ErrorKind, Needed, FindSubstring, digit, space, multispace, line_ending};
use parser::escape_c_string;
named!(pub parse_include<String>, preceded!(
tag!("/include/"),
preceded!( multispace,
delimited!(
char!('"'),
escape_c_string,
char!('"')
))
));
named!(pub find_include<(&[u8], String)>, do_parse!(
pre: take_until!("/include/") >>
path: parse_include >>
(pre, path)
));
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct IncludeBounds {
pub path: PathBuf,
pub global_start: usize,
pub child_start: usize,
pub len: usize,
pub method: IncludeMethod,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum IncludeMethod {
DTS,
CPP,
}
impl PartialOrd for IncludeBounds {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for IncludeBounds {
fn cmp(&self, other: &Self) -> Ordering {
use std::cmp::Ordering::*;
match self.global_start.cmp(&other.global_start) {
Equal => self.global_end().cmp(&other.global_end()),
o => o,
}
}
}
impl IncludeBounds {
pub fn global_end(&self) -> usize {
self.global_start + self.len
}
pub fn split_bounds(bounds: &mut Vec<IncludeBounds>, start: usize, end: usize, offset: usize) {
let mut remainders: Vec<IncludeBounds> = Vec::new();
// println!("split s: {} e: {} off: {}", start, end, offset);
for b in bounds.iter_mut() {
// println!("g_start: {} g_end: {}", b.global_start, b.global_end());
if b.global_start < start && b.global_end() >= start {
// global_start -- start -- global_end
let remainder = b.global_end() - start;
// println!("remainder: {}", remainder);
remainders.push(IncludeBounds {
path: b.path.clone(),
global_start: end,
child_start: b.child_start + start - b.global_start + offset,
len: remainder, // - offset,
method: b.method.clone(),
});
b.len = start - b.global_start;
} else if b.global_start == start {
// split is at begining of the bound
// offset the start
{
let offset = end - start;
b.global_start += offset;
}
// shrink the len by the offset
b.len -= offset;
}
}
bounds.extend_from_slice(&remainders);
bounds.sort();
}
}
#[derive(Debug, PartialEq)]
pub struct Linemarker {
child_line: usize,
path: PathBuf,
flag: Option<LinemarkerFlag>,
}
#[derive(Debug, PartialEq)]
pub enum LinemarkerFlag {
Start,
Return,
System,
Extern,
}
named!(pub parse_linemarker<Linemarker>,
complete!(do_parse!(
tag!("#") >>
opt!(tag!("line")) >>
space >>
line: map_res!(map_res!(digit, str::from_utf8), usize::from_str) >>
space >>
path: delimited!(
char!('"'),
map!(escape_c_string, PathBuf::from),
char!('"')
) >>
flag: opt!(preceded!(space, map_res!(map_res!(digit, str::from_utf8), u64::from_str))) >>
line_ending >>
(Linemarker {
child_line: line,
path: path,
flag: flag.map(|f| match f {
1 => LinemarkerFlag::Start,
2 => LinemarkerFlag::Return,
3 => LinemarkerFlag::System,
4 => LinemarkerFlag::Extern,
_ => unreachable!(),
}),
})
))
);
fn find_linemarker_start(input: &[u8]) -> IResult<&[u8], &[u8]> {
if "# ".len() > input.len() {
IResult::Incomplete(Needed::Size("# ".len()))
} else {
match input.find_substring("# ").iter().chain(input.find_substring("#line ").iter()).min() {
None => {
IResult::Error(error_position!(ErrorKind::TakeUntil, input))
},
Some(index) => {
IResult::Done(&input[*index..], &input[0..*index])
},
}
}
}
named!(find_linemarker<(&[u8], Linemarker)>, do_parse!(
pre: find_linemarker_start >>
marker: parse_linemarker >>
(pre, marker)
));
fn parse_linemarkers(buf: &[u8], bounds: &mut Vec<IncludeBounds>, global_offset: usize) {
let end_offset = global_offset + buf.len();
// println!("{}", str::from_utf8(buf).unwrap());
let mut buf = buf;
// println!("parsing linemarkers");
while let IResult::Done(rem, (pre, marker)) = find_linemarker(buf) {
// println!("{}", str::from_utf8(line).unwrap());
// println!("{:?}", marker);
// println!("pre.len() {}", pre.len());
// double check that last bound was from a linemarker
match bounds.last() {
Some(&IncludeBounds { method: IncludeMethod::CPP, .. }) => {}
_ => {
println!("{:#?}", bounds);
panic!("Linemarker found within DTS include")
}
}
// end last
bounds.last_mut().unwrap().len = pre.len();
// start at new line
let new_bound = IncludeBounds {
path: marker.path.clone(),
global_start: end_offset - rem.len(),
child_start: File::open(&marker.path)
.map(|f| f.bytes().filter_map(|e| e.ok()))
.map(|b| line_to_byte_offset(b, marker.child_line).unwrap()) //TODO: unwraping is bad, SOK?
.unwrap_or(0),
len: rem.len(),
method: IncludeMethod::CPP,
};
bounds.push(new_bound);
buf = rem;
}
}
pub fn include_files(path: &Path,
main_offset: usize)
-> Result<(Vec<u8>, Vec<IncludeBounds>), String> {
// TODO: check from parent directory of root file
let mut file = File::open(path).unwrap();
let mut buffer: Vec<u8> = Vec::new();
let mut bounds: Vec<IncludeBounds> = Vec::new();
let mut string_buffer = String::new();
file.read_to_string(&mut string_buffer).map_err(|_| "IO Error".to_string())?;
let mut buf = string_buffer.as_bytes();
named!(first_linemarker<(&[u8], Linemarker)>,
do_parse!(
marker: peek!(parse_linemarker) >>
line: recognize!(parse_linemarker) >>
(line, marker)
)
);
let start_bound = if let IResult::Done(rem, (line, marker)) = first_linemarker(buf) {
let bound = IncludeBounds {
path: marker.path.clone(),
global_start: buf.len() - rem.len(),
// TODO: check from parent directory of root file
child_start: File::open(&marker.path)
.map(|f| f.bytes().filter_map(|e| e.ok()))
.map(|b| line_to_byte_offset(b, marker.child_line).unwrap()) //TODO: unwraping is bad, SOK?
.unwrap_or(0),
len: File::open(&marker.path).unwrap().bytes().count(),
method: IncludeMethod::CPP,
};
buffer.extend_from_slice(line);
buf = rem;
bound
} else {
// println!("main_offset {}", main_offset);
IncludeBounds {
path: path.to_path_buf(),
global_start: main_offset,
child_start: 0,
// TODO: check from parent directory of root file
len: File::open(path).unwrap().bytes().count(),
method: IncludeMethod::DTS,
}
};
bounds.push(start_bound);
while let IResult::Done(rem, (pre, file)) = find_include(&buf[..]) {
parse_linemarkers(pre, &mut bounds, buffer.len());
buffer.extend_from_slice(pre);
let offset = pre.len();
// println!("{}", file);
// println!("Offset: {}", offset);
// println!("{}", include_tree);
let included_path = Path::new(&file);
let total_len = buffer.len() + main_offset; // - 1;
let (sub_buf, sub_bounds) = include_files(included_path, total_len)?;
buffer.extend(sub_buf);
let inc_start = sub_bounds.first()
.map(|b| b.global_start)
.expect(&format!("No bounds returned: {}",
included_path.to_string_lossy()));
let inc_end = sub_bounds.last()
.map(|b| b.global_end())
.expect(&format!("No bounds returned: {}",
included_path.to_string_lossy()));
let eaten_len = (buf.len() - offset) - rem.len();
//include_tree.offset_after_location(inc_start,
// inc_end as isize - inc_start as isize -
// eaten_len as isize);
// println!("After offset");
// println!("{}", include_tree);
IncludeBounds::split_bounds(&mut bounds, inc_start, inc_end, eaten_len);
bounds.extend_from_slice(&sub_bounds);
bounds.sort();
// println!("After split");
// println!("{}", include_tree);
buf = rem;
}
// no more includes, just add the rest and return
parse_linemarkers(buf, &mut bounds, buffer.len());
buffer.extend(buf);
Ok((buffer, bounds))
}
pub fn line_to_byte_offset<K, I>(bytes: I, line: usize) -> Result<usize, String>
where K: Borrow<u8> + Eq,
I: Iterator<Item = K>
{
if line == 1 {
Ok(0)
} else {
bytes.enumerate()
.filter(|&(_, ref byte)| byte.borrow() == &b'\n')
.nth(line - 2)
.map(|(offset, _)| offset + 1)
.ok_or_else(|| "Failed converting from line to byte offset".to_string())
}
}
pub fn byte_offset_to_line_col<K, I>(bytes: I, offset: usize) -> (usize, usize)
where K: Borrow<u8> + Eq,
I: Iterator<Item = K>
{
let opt = bytes.enumerate()
.filter(|&(off, ref byte)| off < offset && byte.borrow() == &b'\n')
.map(|(start, _)| start)
.enumerate()
.last();
match opt {
Some((line, start)) => (line + 2, offset - start),
None => (1, offset + 1),
}
}
#[cfg(test)]
mod tests {
use super::*;
use nom::IResult;
#[test]
fn lines_to_bytes() {
let string = "Howdy\nHow goes it\n\nI'm doing fine\n";
assert_eq!(line_to_byte_offset(string.as_bytes().iter(), 1).unwrap(),
0);
assert_eq!(line_to_byte_offset(string.as_bytes().iter(), 2).unwrap(),
6);
assert_eq!(line_to_byte_offset(string.as_bytes().iter(), 3).unwrap(),
18);
assert_eq!(line_to_byte_offset(string.as_bytes().iter(), 4).unwrap(),
19);
}
#[test]
fn bytes_to_lines() {
let string = "Howdy\nHow goes it\n\nI'm doing fine\n";
assert_eq!(byte_offset_to_line_col(string.as_bytes().iter(), 0),
(1, 1));
assert_eq!(byte_offset_to_line_col(string.as_bytes().iter(), 8),
(2, 3));
assert_eq!(byte_offset_to_line_col(string.as_bytes().iter(), 20),
(4, 2));
assert_eq!(byte_offset_to_line_col(string.as_bytes().iter(), 18),
(3, 1));
}
#[test]
fn linemarker_no_flag() {
let input = b"# 1 \"<built-in>\"\n";
assert_eq!(
parse_linemarker(input),
IResult::Done(
&b""[..],
Linemarker {
child_line: 1,
path: PathBuf::from("<built-in>"),
flag: None,
}
)
);
}
#[test]
fn linemarker_flag() {
let input = b"# 12 \"am33xx.dtsi\" 2\n";
assert_eq!(
parse_linemarker(input),
IResult::Done(
&b""[..],
Linemarker {
child_line: 12,
path: PathBuf::from("am33xx.dtsi"),
flag: Some(LinemarkerFlag::Return),
}
)
);
}
}
|
extern crate epoll;
extern crate libc;
#[macro_use(defer)]
extern crate scopeguard;
extern crate api_server;
extern crate devices;
extern crate kernel_loader;
extern crate kvm;
extern crate kvm_sys;
extern crate net_util;
#[macro_use]
extern crate sys_util;
extern crate x86_64;
pub mod device_config;
pub mod device_manager;
pub mod kernel_cmdline;
mod vm_control;
mod vstate;
use std::ffi::CString;
use std::fs::{File, OpenOptions};
use std::io::{self, stdout};
use std::os::unix::io::{AsRawFd, RawFd};
use std::result;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
use std::sync::{Arc, Barrier, Mutex};
use std::thread;
use api_server::ApiRequest;
use api_server::request::async::{AsyncOutcome, AsyncRequest};
use api_server::request::sync::{DriveError, Error as SyncError, GenerateResponse,
NetworkInterfaceBody, OkStatus as SyncOkStatus, PutDriveOutcome,
SyncRequest, VsockJsonBody};
use api_server::request::sync::boot_source::{PutBootSourceConfigError, PutBootSourceOutcome};
use api_server::request::sync::machine_configuration::{PutMachineConfigurationError,
PutMachineConfigurationOutcome};
use device_config::*;
use device_manager::*;
use devices::virtio;
use devices::{DeviceEventT, EpollHandler};
use kvm::*;
use sys_util::{register_signal_handler, EventFd, GuestAddress, GuestMemory, Killable, Terminal};
use vm_control::VmResponse;
use vstate::{Vcpu, Vm};
pub const KERNEL_START_OFFSET: usize = 0x200000;
pub const CMDLINE_OFFSET: usize = 0x20000;
pub const CMDLINE_MAX_SIZE: usize = KERNEL_START_OFFSET - CMDLINE_OFFSET;
pub const DEFAULT_KERNEL_CMDLINE: &str = "console=ttyS0 noapic reboot=k panic=1 pci=off nomodules";
#[derive(Debug)]
pub enum Error {
ConfigureSystem(x86_64::Error),
EpollFd(std::io::Error),
EventFd(sys_util::Error),
GuestMemory(sys_util::GuestMemoryError),
Kernel(std::io::Error),
KernelLoader(kernel_loader::Error),
InvalidKernelPath,
MissingKernelConfig,
Kvm(sys_util::Error),
Poll(std::io::Error),
Serial(sys_util::Error),
Terminal(sys_util::Error),
Vcpu(vstate::Error),
VcpuConfigure(vstate::Error),
VcpuSpawn(std::io::Error),
Vm(vstate::Error),
VmSetup(vstate::Error),
VmIOBus(vstate::Error),
RootDiskImage(std::io::Error),
RootBlockDeviceNew(sys_util::Error),
RegisterBlock(device_manager::Error),
NetDeviceNew(devices::virtio::NetError),
RegisterNet(device_manager::Error),
CreateVirtioVsock(devices::virtio::vhost::Error),
RegisterMMIOVsockDevice(device_manager::Error),
DeviceVmRequest(sys_util::Error),
DriveError(DriveError),
ApiChannel,
}
impl std::convert::From<kernel_loader::Error> for Error {
fn from(e: kernel_loader::Error) -> Error {
Error::KernelLoader(e)
}
}
impl std::convert::From<x86_64::Error> for Error {
fn from(e: x86_64::Error) -> Error {
Error::ConfigureSystem(e)
}
}
type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Clone, Copy)]
pub enum EpollDispatch {
Exit,
Stdin,
DeviceHandler(usize, DeviceEventT),
ApiRequest,
}
struct MaybeHandler {
handler: Option<Box<EpollHandler>>,
receiver: Receiver<Box<EpollHandler>>,
}
impl MaybeHandler {
fn new(receiver: Receiver<Box<EpollHandler>>) -> Self {
MaybeHandler {
handler: None,
receiver,
}
}
}
pub struct EpollEvent {
dispatch_index: u64,
event_fd: EventFd,
}
//This should handle epoll related business from now on. A glaring shortcoming of the current
//design is the liberal passing around of raw_fds, and duping of file descriptors. This issue
//will be solved when we also implement device removal.
pub struct EpollContext {
epoll_raw_fd: RawFd,
stdin_index: u64,
// FIXME: find a different design as this does not scale. This Vec can only grow.
dispatch_table: Vec<Option<EpollDispatch>>,
device_handlers: Vec<MaybeHandler>,
}
impl EpollContext {
pub fn new() -> Result<Self> {
let epoll_raw_fd = epoll::create(true).map_err(Error::EpollFd)?;
// initial capacity large enough to hold 1 exit and 1 stdin events, plus 2 queue events
// for virtio block, another 4 for virtio net and another 2 for vsock. The total is 10
// elements. Allowing spare capacity to avoid reallocations.
let mut dispatch_table = Vec::with_capacity(20);
let stdin_index = dispatch_table.len() as u64;
dispatch_table.push(None);
Ok(EpollContext {
epoll_raw_fd,
stdin_index,
dispatch_table,
device_handlers: Vec::with_capacity(6),
})
}
pub fn enable_stdin_event(&mut self) -> Result<()> {
epoll::ctl(
self.epoll_raw_fd,
epoll::EPOLL_CTL_ADD,
libc::STDIN_FILENO,
epoll::Event::new(epoll::EPOLLIN, self.stdin_index),
).map_err(Error::EpollFd)?;
self.dispatch_table[self.stdin_index as usize] = Some(EpollDispatch::Stdin);
Ok(())
}
pub fn disable_stdin_event(&mut self) -> Result<()> {
// ignore failure to remove from epoll, only reason for failure is
// that stdin has closed or changed - in which case we won't get
// any more events on the original event_fd anyway.
let _ = epoll::ctl(
self.epoll_raw_fd,
epoll::EPOLL_CTL_DEL,
libc::STDIN_FILENO,
epoll::Event::new(epoll::EPOLLIN, self.stdin_index),
).map_err(Error::EpollFd);
self.dispatch_table[self.stdin_index as usize] = None;
Ok(())
}
pub fn add_event(&mut self, evfd: EventFd, token: EpollDispatch) -> Result<EpollEvent> {
let index = self.dispatch_table.len() as u64;
epoll::ctl(
self.epoll_raw_fd,
epoll::EPOLL_CTL_ADD,
evfd.as_raw_fd(),
epoll::Event::new(epoll::EPOLLIN, index),
).map_err(Error::EpollFd)?;
self.dispatch_table.push(Some(token));
Ok(EpollEvent {
dispatch_index: index,
event_fd: evfd,
})
}
pub fn remove_event(&mut self, epoll_event: EpollEvent) -> Result<()> {
epoll::ctl(
self.epoll_raw_fd,
epoll::EPOLL_CTL_DEL,
epoll_event.event_fd.as_raw_fd(),
epoll::Event::new(epoll::EPOLLIN, epoll_event.dispatch_index),
).map_err(Error::EpollFd)?;
self.dispatch_table[epoll_event.dispatch_index as usize] = None;
Ok(())
}
fn allocate_tokens(&mut self, count: usize) -> (u64, Sender<Box<EpollHandler>>) {
let dispatch_base = self.dispatch_table.len() as u64;
let device_idx = self.device_handlers.len();
let (sender, receiver) = channel();
for x in 0..count - 1 {
self.dispatch_table.push(Some(EpollDispatch::DeviceHandler(
device_idx,
x as DeviceEventT,
)));
}
self.device_handlers.push(MaybeHandler::new(receiver));
(dispatch_base, sender)
}
pub fn allocate_virtio_block_tokens(&mut self) -> virtio::block::EpollConfig {
let (dispatch_base, sender) = self.allocate_tokens(2);
virtio::block::EpollConfig::new(dispatch_base, self.epoll_raw_fd, sender)
}
pub fn allocate_virtio_net_tokens(&mut self) -> virtio::net::EpollConfig {
let (dispatch_base, sender) = self.allocate_tokens(4);
virtio::net::EpollConfig::new(dispatch_base, self.epoll_raw_fd, sender)
}
pub fn allocate_virtio_vsock_tokens(&mut self) -> virtio::vhost::handle::VhostEpollConfig {
let (dispatch_base, sender) = self.allocate_tokens(2);
virtio::vhost::handle::VhostEpollConfig::new(dispatch_base, self.epoll_raw_fd, sender)
}
fn get_device_handler(&mut self, device_idx: usize) -> &mut EpollHandler {
let ref mut maybe = self.device_handlers[device_idx];
match maybe.handler {
Some(ref mut v) => v.as_mut(),
None => {
//this should only be called in response to an epoll trigger, and the channel
//should always contain a message after the events were added to epoll
//by the activate() call
maybe
.handler
.get_or_insert(maybe.receiver.try_recv().unwrap())
.as_mut()
}
}
}
}
impl Drop for EpollContext {
fn drop(&mut self) {
let rc = unsafe { libc::close(self.epoll_raw_fd) };
if rc != 0 {
warn!("Cannot close epoll");
}
}
}
pub struct KernelConfig {
pub cmdline: kernel_cmdline::Cmdline,
pub kernel_file: File,
pub kernel_start_addr: GuestAddress,
pub cmdline_addr: GuestAddress,
}
// This structure should replace MachineCfg; For now it is safer to duplicate the work as the
// net support is not fuully integrated.
pub struct VirtualMachineConfig {
vcpu_count: u8,
mem_size_mib: usize,
}
impl Default for VirtualMachineConfig {
fn default() -> Self {
VirtualMachineConfig {
vcpu_count: 1,
mem_size_mib: 128,
}
}
}
pub struct Vmm {
vm_config: VirtualMachineConfig,
/// guest VM core resources
kernel_config: Option<KernelConfig>,
kill_signaled: Option<Arc<AtomicBool>>,
vcpu_handles: Option<Vec<thread::JoinHandle<()>>>,
exit_evt: Option<EpollEvent>,
stdio_serial: Option<Arc<Mutex<devices::Serial>>>,
vm: Option<Vm>,
/// guest VM devices
// If there is a Root Block Device, this should be added as the first element of the list
// This is necessary because we want the root to always be mounted on /dev/vda
block_device_configs: BlockDeviceConfigs,
network_interface_configs: NetworkInterfaceConfigs,
vsock_device_configs: VsockDeviceConfigs,
epoll_context: EpollContext,
/// api resources
api_event: EpollEvent,
from_api: Receiver<Box<ApiRequest>>,
}
impl Vmm {
pub fn new(api_event_fd: EventFd, from_api: Receiver<Box<ApiRequest>>) -> Result<Self> {
let mut epoll_context = EpollContext::new()?;
let api_event = epoll_context
.add_event(api_event_fd, EpollDispatch::ApiRequest)
.expect("cannot add API eventfd to epoll");
let block_device_configs = BlockDeviceConfigs::new();
Ok(Vmm {
vm_config: VirtualMachineConfig::default(),
kernel_config: None,
kill_signaled: None,
vcpu_handles: None,
exit_evt: None,
stdio_serial: None,
vm: None,
block_device_configs,
network_interface_configs: NetworkInterfaceConfigs::new(),
vsock_device_configs: VsockDeviceConfigs::new(),
epoll_context,
api_event,
from_api,
})
}
/// only call this function as part of the API
/// If the drive_id does not exit, a new Block Device Config is added to the list.
/// Else, the drive will be updated
pub fn put_block_device(
&mut self,
block_device_config: BlockDeviceConfig,
) -> result::Result<(), DriveError> {
// if the id of the drive already exists in the list, the operation is update
if self.block_device_configs
.contains_drive_id(block_device_config.drive_id.clone())
{
return Err(DriveError::NotImplemented);
} else {
self.block_device_configs.add(block_device_config)
}
}
pub fn put_virtual_machine_configuration(
&mut self,
vcpu_count: Option<u8>,
mem_size_mib: Option<usize>,
) -> std::result::Result<(), PutMachineConfigurationError> {
if vcpu_count.is_some() {
let vcpu_count_value = vcpu_count.unwrap();
// TODO: also enforce an upper limit
if vcpu_count_value == 0 {
return Err(PutMachineConfigurationError::InvalidVcpuCount);
}
self.vm_config.vcpu_count = vcpu_count_value;
}
if mem_size_mib.is_some() {
// TODO: add other memory checks
let mem_size_mib_value = mem_size_mib.unwrap();
if mem_size_mib_value == 0 {
return Err(PutMachineConfigurationError::InvalidMemorySize);
}
self.vm_config.mem_size_mib = mem_size_mib.unwrap();
}
Ok(())
}
/// Attach all block devices from the BlockDevicesConfig
/// If there is no root block device, no other devices are attached.The root device should be
/// the first to be attached as a way to make sure it ends up on /dev/vda
/// This function is to be called only from boot_source
fn attach_block_devices(&mut self, device_manager: &mut DeviceManager) -> Result<()> {
// If there's no root device, do not attach any other devices
let block_dev = &self.block_device_configs;
let kernel_config = match self.kernel_config.as_mut() {
Some(x) => x,
None => return Err(Error::MissingKernelConfig),
};
if block_dev.has_root_block_device() {
// this is a simple solution to add a block as a root device; should be improved
kernel_config.cmdline.insert_str(" root=/dev/vda").unwrap();
if block_dev.has_read_only_root() {
kernel_config.cmdline.insert_str(" ro").unwrap();
}
let epoll_context = &mut self.epoll_context;
for drive_config in self.block_device_configs.config_list.iter() {
// adding root blk device from file (currently always opened as read + write)
let root_image = OpenOptions::new()
.read(true)
.write(!drive_config.is_read_only)
.open(&drive_config.path_on_host)
.map_err(Error::RootDiskImage)?;
let epoll_config = epoll_context.allocate_virtio_block_tokens();
let block_box = Box::new(devices::virtio::Block::new(root_image, epoll_config)
.map_err(Error::RootBlockDeviceNew)?);
device_manager
.register_mmio(block_box, &mut kernel_config.cmdline)
.map_err(Error::RegisterBlock)?;
}
}
Ok(())
}
pub fn put_net_device(
&mut self,
body: NetworkInterfaceBody,
) -> result::Result<SyncOkStatus, SyncError> {
self.network_interface_configs.put(body)
}
fn attach_net_devices(&mut self, device_manager: &mut DeviceManager) -> Result<()> {
let kernel_config = match self.kernel_config.as_mut() {
Some(x) => x,
None => return Err(Error::MissingKernelConfig),
};
for cfg in self.network_interface_configs.iter_mut() {
let epoll_config = self.epoll_context.allocate_virtio_net_tokens();
// The following take_tap() should only be called once, on valid NetworkInterfaceConfig
// objects, so the unwrap() shouldn't panic.
let net_box = Box::new(devices::virtio::Net::new_with_tap(
cfg.take_tap().unwrap(),
cfg.guest_mac(),
epoll_config,
).map_err(Error::NetDeviceNew)?);
device_manager
.register_mmio(net_box, &mut kernel_config.cmdline)
.map_err(Error::RegisterNet)?;
}
Ok(())
}
pub fn put_vsock_device(
&mut self,
body: VsockJsonBody,
) -> result::Result<SyncOkStatus, SyncError> {
self.vsock_device_configs.put(body)
}
fn attach_vsock_devices(
&mut self,
guest_mem: &GuestMemory,
device_manager: &mut DeviceManager,
) -> Result<()> {
let kernel_config = match self.kernel_config.as_mut() {
Some(x) => x,
None => return Err(Error::MissingKernelConfig),
};
for cfg in self.vsock_device_configs.iter() {
let epoll_config = self.epoll_context.allocate_virtio_vsock_tokens();
let vsock_box = Box::new(devices::virtio::Vsock::new(
cfg.get_guest_cid() as u64,
guest_mem,
epoll_config,
).map_err(Error::CreateVirtioVsock)?);
device_manager
.register_mmio(vsock_box, &mut kernel_config.cmdline)
.map_err(Error::RegisterMMIOVsockDevice)?;
}
Ok(())
}
pub fn configure_kernel(&mut self, kernel_config: KernelConfig) {
self.kernel_config = Some(kernel_config);
}
/// make sure to check Result of this function and call self.stop() in case of Err
pub fn boot_kernel(&mut self) -> Result<()> {
if self.kernel_config.is_none() {
return Err(Error::MissingKernelConfig);
}
let mem_size = self.vm_config.mem_size_mib << 20;
let arch_mem_regions = x86_64::arch_memory_regions(mem_size);
let guest_mem = GuestMemory::new(&arch_mem_regions).map_err(Error::GuestMemory)?;
let vcpu_count = self.vm_config.vcpu_count;
/* Instantiating MMIO device manager
'mmio_base' address has to be an address which is protected by the kernel, in this case
the start of the x86 specific gap of memory (currently hardcoded at 768MiB)
*/
let mut device_manager =
DeviceManager::new(guest_mem.clone(), x86_64::get_32bit_gap_start() as u64);
self.attach_block_devices(&mut device_manager)?;
self.attach_net_devices(&mut device_manager)?;
self.attach_vsock_devices(&guest_mem, &mut device_manager)?;
// safe to unwrap since we've already validated it's Some()
let kernel_config = self.kernel_config.as_mut().unwrap();
let kvm = Kvm::new().map_err(Error::Kvm)?;
self.vm = Some(Vm::new(&kvm, guest_mem).map_err(Error::Vm)?);
let vm = self.vm.as_mut().unwrap();
vm.setup().map_err(Error::VmSetup)?;
for request in device_manager.vm_requests {
if let VmResponse::Err(e) = request.execute(vm.get_fd()) {
return Err(Error::DeviceVmRequest(e));
}
}
// This is the easy way out of consuming the value of the kernel_cmdline.
// TODO: refactor the kernel_cmdline struct in order to have a CString instead of a String.
let cmdline_cstring = CString::new(kernel_config.cmdline.clone()).unwrap();
kernel_loader::load_kernel(
vm.get_memory(),
kernel_config.kernel_start_addr,
&mut kernel_config.kernel_file,
)?;
kernel_loader::load_cmdline(
vm.get_memory(),
kernel_config.cmdline_addr,
&cmdline_cstring,
)?;
x86_64::configure_system(
vm.get_memory(),
kernel_config.kernel_start_addr,
kernel_config.cmdline_addr,
cmdline_cstring.to_bytes().len() + 1,
vcpu_count,
)?;
let event_fd = EventFd::new().map_err(Error::EventFd)?;
let exit_epoll_evt = self.epoll_context.add_event(event_fd, EpollDispatch::Exit)?;
self.exit_evt = Some(exit_epoll_evt);
let exit_evt = &self.exit_evt.as_mut().unwrap().event_fd;
let mut io_bus = devices::Bus::new();
let com_evt_1_3 = EventFd::new().map_err(Error::EventFd)?;
let com_evt_2_4 = EventFd::new().map_err(Error::EventFd)?;
self.stdio_serial = Some(Arc::new(Mutex::new(devices::Serial::new_out(
com_evt_1_3.try_clone().map_err(Error::EventFd)?,
Box::new(stdout()),
))));
let stdio_serial = self.stdio_serial.as_mut().unwrap();
self.epoll_context.enable_stdin_event()?;
//TODO: put all thse things related to setting up io bus in a struct or something
vm.set_io_bus(
&mut io_bus,
&stdio_serial,
&com_evt_1_3,
&com_evt_2_4,
exit_evt,
).map_err(Error::VmIOBus)?;
self.vcpu_handles = Some(Vec::with_capacity(vcpu_count as usize));
let vcpu_handles = self.vcpu_handles.as_mut().unwrap();
self.kill_signaled = Some(Arc::new(AtomicBool::new(false)));
let kill_signaled = self.kill_signaled.as_mut().unwrap();
let vcpu_thread_barrier = Arc::new(Barrier::new((vcpu_count + 1) as usize));
for cpu_id in 0..vcpu_count {
let io_bus = io_bus.clone();
let mmio_bus = device_manager.bus.clone();
let kill_signaled = kill_signaled.clone();
let vcpu_thread_barrier = vcpu_thread_barrier.clone();
let vcpu_exit_evt = exit_evt.try_clone().map_err(Error::EventFd)?;
let mut vcpu = Vcpu::new(cpu_id, &vm).map_err(Error::Vcpu)?;
vcpu.configure(vcpu_count, kernel_config.kernel_start_addr, &vm)
.map_err(Error::VcpuConfigure)?;
vcpu_handles.push(thread::Builder::new()
.name(format!("fc_vcpu{}", cpu_id))
.spawn(move || {
unsafe {
extern "C" fn handle_signal() {}
// Our signal handler does nothing and is trivially async signal safe.
register_signal_handler(0, handle_signal)
.expect("failed to register vcpu signal handler");
}
vcpu_thread_barrier.wait();
loop {
match vcpu.run() {
Ok(run) => match run {
VcpuExit::IoIn(addr, data) => {
io_bus.read(addr as u64, data);
}
VcpuExit::IoOut(addr, data) => {
io_bus.write(addr as u64, data);
}
VcpuExit::MmioRead(addr, data) => {
mmio_bus.read(addr, data);
}
VcpuExit::MmioWrite(addr, data) => {
mmio_bus.write(addr, data);
}
VcpuExit::Hlt => {
info!("KVM_EXIT_HLT");
break;
}
VcpuExit::Shutdown => {
info!("KVM_EXIT_SHUTDOWN");
break;
}
r => {
error!("unexpected exit reason: {:?}", r);
break;
}
},
Err(e) => match e {
vstate::Error::VcpuRun(ref v) => match v.errno() {
libc::EAGAIN | libc::EINTR => {}
_ => {
error!("vcpu hit unknown error: {:?}", e);
break;
}
},
_ => {
error!("unrecognized error type for vcpu run");
break;
}
},
}
if kill_signaled.load(Ordering::SeqCst) {
break;
}
}
vcpu_exit_evt
.write(1)
.expect("failed to signal vcpu exit eventfd");
})
.map_err(Error::VcpuSpawn)?);
}
vcpu_thread_barrier.wait();
Ok(())
}
fn stop(&mut self) -> Result<()> {
if let Some(v) = self.kill_signaled.take() {
v.store(true, Ordering::SeqCst);
};
if let Some(handles) = self.vcpu_handles.take() {
for handle in handles {
match handle.kill(0) {
Ok(_) => {
if let Err(e) = handle.join() {
warn!("failed to join vcpu thread: {:?}", e);
}
}
Err(e) => warn!("failed to kill vcpu thread: {:?}", e),
}
}
};
if let Some(evt) = self.exit_evt.take() {
self.epoll_context.remove_event(evt)?;
}
self.epoll_context.disable_stdin_event()?;
self.stdio_serial.take();
self.vm.take();
//TODO:
// - clean epoll_context:
// - remove block, net
Ok(())
}
pub fn run_control(&mut self, api_enabled: bool) -> Result<()> {
let stdin_handle = io::stdin();
let stdin_lock = stdin_handle.lock();
stdin_lock.set_raw_mode().map_err(Error::Terminal)?;
defer! {{
if let Err(e) = stdin_lock.set_canon_mode() {
warn!("cannot set canon mode for stdin: {:?}", e);
}
}};
const EPOLL_EVENTS_LEN: usize = 100;
let mut events = Vec::<epoll::Event>::with_capacity(EPOLL_EVENTS_LEN);
// Safe as we pass to set_len the value passed to with_capacity.
unsafe { events.set_len(EPOLL_EVENTS_LEN) };
let epoll_raw_fd = self.epoll_context.epoll_raw_fd;
// TODO: try handling of errors/failures without breaking this main loop.
'poll: loop {
let num_events = epoll::wait(epoll_raw_fd, -1, &mut events[..]).map_err(Error::Poll)?;
for i in 0..num_events {
let dispatch_idx = events[i].data() as usize;
if let Some(dispatch_type) = self.epoll_context.dispatch_table[dispatch_idx] {
match dispatch_type {
EpollDispatch::Exit => {
info!("vcpu requested shutdown");
match self.exit_evt {
Some(ref ev) => {
ev.event_fd.read().map_err(Error::EventFd)?;
}
None => warn!("leftover exit-evt in epollcontext!"),
}
self.stop()?;
if !api_enabled {
break 'poll;
}
}
EpollDispatch::Stdin => {
let mut out = [0u8; 64];
match stdin_lock.read_raw(&mut out[..]) {
Ok(0) => {
// Zero-length read indicates EOF. Remove from pollables.
self.epoll_context.disable_stdin_event()?;
}
Err(e) => {
warn!("error while reading stdin: {:?}", e);
self.epoll_context.disable_stdin_event()?;
}
Ok(count) => match self.stdio_serial {
Some(ref mut serial) => {
serial
.lock()
.unwrap()
.queue_input_bytes(&out[..count])
.map_err(Error::Serial)?;
}
None => warn!("leftover stdin event in epollcontext!"),
},
}
}
EpollDispatch::DeviceHandler(device_idx, device_token) => {
let handler = self.epoll_context.get_device_handler(device_idx);
handler.handle_event(device_token, events[i].events().bits());
}
EpollDispatch::ApiRequest => {
self.api_event.event_fd.read().map_err(Error::EventFd)?;
self.run_api_cmd().unwrap_or_else(|_| {
warn!("got spurious notification from api thread");
()
});
}
}
}
}
}
Ok(())
}
fn run_api_cmd(&mut self) -> Result<()> {
let request = match self.from_api.try_recv() {
Ok(t) => t,
Err(TryRecvError::Empty) => {
return Err(Error::ApiChannel);
}
Err(TryRecvError::Disconnected) => {
panic!();
}
};
match *request {
ApiRequest::Async(req) => {
match req {
AsyncRequest::StartInstance(sender) => {
let result = match self.boot_kernel() {
Ok(_) => AsyncOutcome::Ok(0),
Err(e) => {
let _ = self.stop();
AsyncOutcome::Error(format!("cannot boot kernel: {:?}", e))
}
};
sender.send(result).expect("one-shot channel closed");
}
AsyncRequest::StopInstance(sender) => {
let result = match self.stop() {
Ok(_) => AsyncOutcome::Ok(0),
Err(e) => AsyncOutcome::Error(format!(
"Errors detected during instance stop()! err: {:?}",
e
)),
};
sender.send(result).expect("one-shot channel closed");
}
};
}
ApiRequest::Sync(req) => {
match req {
SyncRequest::PutDrive(drive_description, sender) => {
match self.put_block_device(BlockDeviceConfig::from(drive_description)) {
Ok(_) => sender
.send(Box::new(PutDriveOutcome::Created))
.map_err(|_| ())
.expect("one-shot channel closed"),
Err(e) => sender
.send(Box::new(e))
.map_err(|_| ())
.expect("one-shot channel closed"),
}
}
SyncRequest::PutBootSource(boot_source_body, sender) => {
// check that the kernel path exists and it is valid
let box_response: Box<GenerateResponse + Send> = match boot_source_body
.local_image
{
Some(image) => match File::open(image.kernel_image_path) {
Ok(kernel_file) => {
let mut cmdline =
kernel_cmdline::Cmdline::new(CMDLINE_MAX_SIZE);
match cmdline.insert_str(
boot_source_body
.boot_args
.unwrap_or(String::from(DEFAULT_KERNEL_CMDLINE)),
) {
Ok(_) => {
let kernel_config = KernelConfig {
kernel_file,
cmdline,
kernel_start_addr: GuestAddress(
KERNEL_START_OFFSET,
),
cmdline_addr: GuestAddress(CMDLINE_OFFSET),
};
// if the kernel was already configure, we have an update operation
let outcome = match self.kernel_config {
Some(_) => PutBootSourceOutcome::Updated,
None => PutBootSourceOutcome::Created,
};
self.configure_kernel(kernel_config);
Box::new(outcome)
}
Err(_) => Box::new(
PutBootSourceConfigError::InvalidKernelCommandLine,
),
}
}
Err(_e) => Box::new(PutBootSourceConfigError::InvalidKernelPath),
},
None => Box::new(PutBootSourceConfigError::InvalidKernelPath),
};
sender
.send(box_response)
.map_err(|_| ())
.expect("one-shot channel closed");
}
SyncRequest::PutMachineConfiguration(machine_config_body, sender) => {
let boxed_response = match self.put_virtual_machine_configuration(
machine_config_body.vcpu_count,
machine_config_body.mem_size_mib,
) {
Ok(_) => Box::new(PutMachineConfigurationOutcome::Updated),
Err(e) => Box::new(PutMachineConfigurationOutcome::Error(e)),
};
sender
.send(boxed_response)
.map_err(|_| ())
.expect("one-shot channel closed");;
}
SyncRequest::PutNetworkInterface(body, outcome_sender) => outcome_sender
.send(Box::new(self.put_net_device(body)))
.map_err(|_| ())
.expect("one-shot channel closed"),
SyncRequest::PutVsock(body, outcome_sender) => outcome_sender
.send(Box::new(self.put_vsock_device(body)))
.map_err(|_| ())
.expect("one-shot channel closed"),
}
}
}
Ok(())
}
}
pub fn start_vmm_thread(
api_event_fd: EventFd,
from_api: Receiver<Box<ApiRequest>>,
) -> thread::JoinHandle<()> {
thread::spawn(move || {
let mut vmm = Vmm::new(api_event_fd, from_api).expect("cannot create VMM");
vmm.run_control(true).expect("VMM thread fail");
// TODO: maybe offer through API: an instance status reporting error messages (r)
})
}
vmm: propagate errors where possible instead of unwrap()/expect()
Document remaining unwrap()s and expect()s.
Run rustfmt 0.3.4-nightly on vmm crate.
Signed-off-by: Adrian Catangiu <43487eb7d05f4ed2cf28dd3e748821285e5f7796@amazon.com>
extern crate epoll;
extern crate libc;
#[macro_use(defer)]
extern crate scopeguard;
extern crate api_server;
extern crate devices;
extern crate kernel_loader;
extern crate kvm;
extern crate kvm_sys;
extern crate net_util;
#[macro_use]
extern crate sys_util;
extern crate x86_64;
pub mod device_config;
pub mod device_manager;
pub mod kernel_cmdline;
mod vm_control;
mod vstate;
use std::ffi::CString;
use std::fs::{File, OpenOptions};
use std::io::{self, stdout};
use std::os::unix::io::{AsRawFd, RawFd};
use std::result;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
use std::sync::{Arc, Barrier, Mutex};
use std::thread;
use api_server::ApiRequest;
use api_server::request::async::{AsyncOutcome, AsyncRequest};
use api_server::request::sync::{DriveError, Error as SyncError, GenerateResponse,
NetworkInterfaceBody, OkStatus as SyncOkStatus, PutDriveOutcome,
SyncRequest, VsockJsonBody};
use api_server::request::sync::boot_source::{PutBootSourceConfigError, PutBootSourceOutcome};
use api_server::request::sync::machine_configuration::{PutMachineConfigurationError,
PutMachineConfigurationOutcome};
use device_config::*;
use device_manager::*;
use devices::virtio;
use devices::{DeviceEventT, EpollHandler};
use kvm::*;
use sys_util::{register_signal_handler, EventFd, GuestAddress, GuestMemory, Killable, Terminal};
use vm_control::VmResponse;
use vstate::{Vcpu, Vm};
pub const KERNEL_START_OFFSET: usize = 0x200000;
pub const CMDLINE_OFFSET: usize = 0x20000;
pub const CMDLINE_MAX_SIZE: usize = KERNEL_START_OFFSET - CMDLINE_OFFSET;
pub const DEFAULT_KERNEL_CMDLINE: &str = "console=ttyS0 noapic reboot=k panic=1 pci=off nomodules";
#[derive(Debug)]
pub enum Error {
ConfigureSystem(x86_64::Error),
EpollFd(std::io::Error),
EventFd(sys_util::Error),
GuestMemory(sys_util::GuestMemoryError),
Kernel(std::io::Error),
KernelCmdLine(kernel_cmdline::Error),
KernelLoader(kernel_loader::Error),
InvalidKernelPath,
MissingKernelConfig,
Kvm(sys_util::Error),
Poll(std::io::Error),
Serial(sys_util::Error),
Terminal(sys_util::Error),
Vcpu(vstate::Error),
VcpuConfigure(vstate::Error),
VcpuSpawn(std::io::Error),
Vm(vstate::Error),
VmSetup(vstate::Error),
VmIOBus(vstate::Error),
RootDiskImage(std::io::Error),
RootBlockDeviceNew(sys_util::Error),
RegisterBlock(device_manager::Error),
NetDeviceNew(devices::virtio::NetError),
RegisterNet(device_manager::Error),
CreateVirtioVsock(devices::virtio::vhost::Error),
RegisterMMIOVsockDevice(device_manager::Error),
DeviceVmRequest(sys_util::Error),
DriveError(DriveError),
ApiChannel,
}
impl std::convert::From<kernel_loader::Error> for Error {
fn from(e: kernel_loader::Error) -> Error {
Error::KernelLoader(e)
}
}
impl std::convert::From<x86_64::Error> for Error {
fn from(e: x86_64::Error) -> Error {
Error::ConfigureSystem(e)
}
}
type Result<T> = std::result::Result<T, Error>;
#[derive(Debug, Clone, Copy)]
pub enum EpollDispatch {
Exit,
Stdin,
DeviceHandler(usize, DeviceEventT),
ApiRequest,
}
struct MaybeHandler {
handler: Option<Box<EpollHandler>>,
receiver: Receiver<Box<EpollHandler>>,
}
impl MaybeHandler {
fn new(receiver: Receiver<Box<EpollHandler>>) -> Self {
MaybeHandler {
handler: None,
receiver,
}
}
}
pub struct EpollEvent {
dispatch_index: u64,
event_fd: EventFd,
}
//This should handle epoll related business from now on. A glaring shortcoming of the current
//design is the liberal passing around of raw_fds, and duping of file descriptors. This issue
//will be solved when we also implement device removal.
pub struct EpollContext {
epoll_raw_fd: RawFd,
stdin_index: u64,
// FIXME: find a different design as this does not scale. This Vec can only grow.
dispatch_table: Vec<Option<EpollDispatch>>,
device_handlers: Vec<MaybeHandler>,
}
impl EpollContext {
pub fn new() -> Result<Self> {
let epoll_raw_fd = epoll::create(true).map_err(Error::EpollFd)?;
// initial capacity large enough to hold 1 exit and 1 stdin events, plus 2 queue events
// for virtio block, another 4 for virtio net and another 2 for vsock. The total is 10
// elements. Allowing spare capacity to avoid reallocations.
let mut dispatch_table = Vec::with_capacity(20);
let stdin_index = dispatch_table.len() as u64;
dispatch_table.push(None);
Ok(EpollContext {
epoll_raw_fd,
stdin_index,
dispatch_table,
device_handlers: Vec::with_capacity(6),
})
}
pub fn enable_stdin_event(&mut self) -> Result<()> {
epoll::ctl(
self.epoll_raw_fd,
epoll::EPOLL_CTL_ADD,
libc::STDIN_FILENO,
epoll::Event::new(epoll::EPOLLIN, self.stdin_index),
).map_err(Error::EpollFd)?;
self.dispatch_table[self.stdin_index as usize] = Some(EpollDispatch::Stdin);
Ok(())
}
pub fn disable_stdin_event(&mut self) -> Result<()> {
// ignore failure to remove from epoll, only reason for failure is
// that stdin has closed or changed - in which case we won't get
// any more events on the original event_fd anyway.
let _ = epoll::ctl(
self.epoll_raw_fd,
epoll::EPOLL_CTL_DEL,
libc::STDIN_FILENO,
epoll::Event::new(epoll::EPOLLIN, self.stdin_index),
).map_err(Error::EpollFd);
self.dispatch_table[self.stdin_index as usize] = None;
Ok(())
}
pub fn add_event(&mut self, evfd: EventFd, token: EpollDispatch) -> Result<EpollEvent> {
let index = self.dispatch_table.len() as u64;
epoll::ctl(
self.epoll_raw_fd,
epoll::EPOLL_CTL_ADD,
evfd.as_raw_fd(),
epoll::Event::new(epoll::EPOLLIN, index),
).map_err(Error::EpollFd)?;
self.dispatch_table.push(Some(token));
Ok(EpollEvent {
dispatch_index: index,
event_fd: evfd,
})
}
pub fn remove_event(&mut self, epoll_event: EpollEvent) -> Result<()> {
epoll::ctl(
self.epoll_raw_fd,
epoll::EPOLL_CTL_DEL,
epoll_event.event_fd.as_raw_fd(),
epoll::Event::new(epoll::EPOLLIN, epoll_event.dispatch_index),
).map_err(Error::EpollFd)?;
self.dispatch_table[epoll_event.dispatch_index as usize] = None;
Ok(())
}
fn allocate_tokens(&mut self, count: usize) -> (u64, Sender<Box<EpollHandler>>) {
let dispatch_base = self.dispatch_table.len() as u64;
let device_idx = self.device_handlers.len();
let (sender, receiver) = channel();
for x in 0..count - 1 {
self.dispatch_table.push(Some(EpollDispatch::DeviceHandler(
device_idx,
x as DeviceEventT,
)));
}
self.device_handlers.push(MaybeHandler::new(receiver));
(dispatch_base, sender)
}
pub fn allocate_virtio_block_tokens(&mut self) -> virtio::block::EpollConfig {
let (dispatch_base, sender) = self.allocate_tokens(2);
virtio::block::EpollConfig::new(dispatch_base, self.epoll_raw_fd, sender)
}
pub fn allocate_virtio_net_tokens(&mut self) -> virtio::net::EpollConfig {
let (dispatch_base, sender) = self.allocate_tokens(4);
virtio::net::EpollConfig::new(dispatch_base, self.epoll_raw_fd, sender)
}
pub fn allocate_virtio_vsock_tokens(&mut self) -> virtio::vhost::handle::VhostEpollConfig {
let (dispatch_base, sender) = self.allocate_tokens(2);
virtio::vhost::handle::VhostEpollConfig::new(dispatch_base, self.epoll_raw_fd, sender)
}
fn get_device_handler(&mut self, device_idx: usize) -> &mut EpollHandler {
let ref mut maybe = self.device_handlers[device_idx];
match maybe.handler {
Some(ref mut v) => v.as_mut(),
None => {
//this should only be called in response to an epoll trigger, and the channel
//should always contain a message after the events were added to epoll
//by the activate() call
maybe
.handler
.get_or_insert(maybe.receiver.try_recv().unwrap())
.as_mut()
}
}
}
}
impl Drop for EpollContext {
fn drop(&mut self) {
let rc = unsafe { libc::close(self.epoll_raw_fd) };
if rc != 0 {
warn!("Cannot close epoll");
}
}
}
pub struct KernelConfig {
pub cmdline: kernel_cmdline::Cmdline,
pub kernel_file: File,
pub kernel_start_addr: GuestAddress,
pub cmdline_addr: GuestAddress,
}
// This structure should replace MachineCfg; For now it is safer to duplicate the work as the
// net support is not fuully integrated.
pub struct VirtualMachineConfig {
vcpu_count: u8,
mem_size_mib: usize,
}
impl Default for VirtualMachineConfig {
fn default() -> Self {
VirtualMachineConfig {
vcpu_count: 1,
mem_size_mib: 128,
}
}
}
pub struct Vmm {
vm_config: VirtualMachineConfig,
/// guest VM core resources
kernel_config: Option<KernelConfig>,
kill_signaled: Option<Arc<AtomicBool>>,
vcpu_handles: Option<Vec<thread::JoinHandle<()>>>,
exit_evt: Option<EpollEvent>,
stdio_serial: Option<Arc<Mutex<devices::Serial>>>,
vm: Option<Vm>,
/// guest VM devices
// If there is a Root Block Device, this should be added as the first element of the list
// This is necessary because we want the root to always be mounted on /dev/vda
block_device_configs: BlockDeviceConfigs,
network_interface_configs: NetworkInterfaceConfigs,
vsock_device_configs: VsockDeviceConfigs,
epoll_context: EpollContext,
/// api resources
api_event: EpollEvent,
from_api: Receiver<Box<ApiRequest>>,
}
impl Vmm {
pub fn new(api_event_fd: EventFd, from_api: Receiver<Box<ApiRequest>>) -> Result<Self> {
let mut epoll_context = EpollContext::new()?;
// if this fails, it's fatal, .expect() it
let api_event = epoll_context
.add_event(api_event_fd, EpollDispatch::ApiRequest)
.expect("cannot add API eventfd to epoll");
let block_device_configs = BlockDeviceConfigs::new();
Ok(Vmm {
vm_config: VirtualMachineConfig::default(),
kernel_config: None,
kill_signaled: None,
vcpu_handles: None,
exit_evt: None,
stdio_serial: None,
vm: None,
block_device_configs,
network_interface_configs: NetworkInterfaceConfigs::new(),
vsock_device_configs: VsockDeviceConfigs::new(),
epoll_context,
api_event,
from_api,
})
}
/// only call this function as part of the API
/// If the drive_id does not exit, a new Block Device Config is added to the list.
/// Else, the drive will be updated
pub fn put_block_device(
&mut self,
block_device_config: BlockDeviceConfig,
) -> result::Result<(), DriveError> {
// if the id of the drive already exists in the list, the operation is update
if self.block_device_configs
.contains_drive_id(block_device_config.drive_id.clone())
{
return Err(DriveError::NotImplemented);
} else {
self.block_device_configs.add(block_device_config)
}
}
pub fn put_virtual_machine_configuration(
&mut self,
vcpu_count: Option<u8>,
mem_size_mib: Option<usize>,
) -> std::result::Result<(), PutMachineConfigurationError> {
if vcpu_count.is_some() {
let vcpu_count_value = vcpu_count.unwrap();
// TODO: also enforce an upper limit
if vcpu_count_value == 0 {
return Err(PutMachineConfigurationError::InvalidVcpuCount);
}
self.vm_config.vcpu_count = vcpu_count_value;
}
if mem_size_mib.is_some() {
// TODO: add other memory checks
let mem_size_mib_value = mem_size_mib.unwrap();
if mem_size_mib_value == 0 {
return Err(PutMachineConfigurationError::InvalidMemorySize);
}
self.vm_config.mem_size_mib = mem_size_mib.unwrap();
}
Ok(())
}
/// Attach all block devices from the BlockDevicesConfig
/// If there is no root block device, no other devices are attached.The root device should be
/// the first to be attached as a way to make sure it ends up on /dev/vda
/// This function is to be called only from boot_source
fn attach_block_devices(&mut self, device_manager: &mut DeviceManager) -> Result<()> {
// If there's no root device, do not attach any other devices
let block_dev = &self.block_device_configs;
let kernel_config = match self.kernel_config.as_mut() {
Some(x) => x,
None => return Err(Error::MissingKernelConfig),
};
if block_dev.has_root_block_device() {
// this is a simple solution to add a block as a root device; should be improved
kernel_config
.cmdline
.insert_str(" root=/dev/vda")
.map_err(|e| Error::RegisterBlock(device_manager::Error::Cmdline(e)))?;
if block_dev.has_read_only_root() {
kernel_config
.cmdline
.insert_str(" ro")
.map_err(|e| Error::RegisterBlock(device_manager::Error::Cmdline(e)))?;
}
let epoll_context = &mut self.epoll_context;
for drive_config in self.block_device_configs.config_list.iter() {
// adding root blk device from file (currently always opened as read + write)
let root_image = OpenOptions::new()
.read(true)
.write(!drive_config.is_read_only)
.open(&drive_config.path_on_host)
.map_err(Error::RootDiskImage)?;
let epoll_config = epoll_context.allocate_virtio_block_tokens();
let block_box = Box::new(devices::virtio::Block::new(root_image, epoll_config)
.map_err(Error::RootBlockDeviceNew)?);
device_manager
.register_mmio(block_box, &mut kernel_config.cmdline)
.map_err(Error::RegisterBlock)?;
}
}
Ok(())
}
pub fn put_net_device(
&mut self,
body: NetworkInterfaceBody,
) -> result::Result<SyncOkStatus, SyncError> {
self.network_interface_configs.put(body)
}
fn attach_net_devices(&mut self, device_manager: &mut DeviceManager) -> Result<()> {
let kernel_config = match self.kernel_config.as_mut() {
Some(x) => x,
None => return Err(Error::MissingKernelConfig),
};
for cfg in self.network_interface_configs.iter_mut() {
let epoll_config = self.epoll_context.allocate_virtio_net_tokens();
// The following take_tap() should only be called once, on valid NetworkInterfaceConfig
// objects, so the unwrap() shouldn't panic.
let net_box = Box::new(devices::virtio::Net::new_with_tap(
cfg.take_tap().unwrap(),
cfg.guest_mac(),
epoll_config,
).map_err(Error::NetDeviceNew)?);
device_manager
.register_mmio(net_box, &mut kernel_config.cmdline)
.map_err(Error::RegisterNet)?;
}
Ok(())
}
pub fn put_vsock_device(
&mut self,
body: VsockJsonBody,
) -> result::Result<SyncOkStatus, SyncError> {
self.vsock_device_configs.put(body)
}
fn attach_vsock_devices(
&mut self,
guest_mem: &GuestMemory,
device_manager: &mut DeviceManager,
) -> Result<()> {
let kernel_config = match self.kernel_config.as_mut() {
Some(x) => x,
None => return Err(Error::MissingKernelConfig),
};
for cfg in self.vsock_device_configs.iter() {
let epoll_config = self.epoll_context.allocate_virtio_vsock_tokens();
let vsock_box = Box::new(devices::virtio::Vsock::new(
cfg.get_guest_cid() as u64,
guest_mem,
epoll_config,
).map_err(Error::CreateVirtioVsock)?);
device_manager
.register_mmio(vsock_box, &mut kernel_config.cmdline)
.map_err(Error::RegisterMMIOVsockDevice)?;
}
Ok(())
}
pub fn configure_kernel(&mut self, kernel_config: KernelConfig) {
self.kernel_config = Some(kernel_config);
}
/// make sure to check Result of this function and call self.stop() in case of Err
pub fn boot_kernel(&mut self) -> Result<()> {
if self.kernel_config.is_none() {
return Err(Error::MissingKernelConfig);
}
let mem_size = self.vm_config.mem_size_mib << 20;
let arch_mem_regions = x86_64::arch_memory_regions(mem_size);
let guest_mem = GuestMemory::new(&arch_mem_regions).map_err(Error::GuestMemory)?;
let vcpu_count = self.vm_config.vcpu_count;
/* Instantiating MMIO device manager
'mmio_base' address has to be an address which is protected by the kernel, in this case
the start of the x86 specific gap of memory (currently hardcoded at 768MiB)
*/
let mut device_manager =
DeviceManager::new(guest_mem.clone(), x86_64::get_32bit_gap_start() as u64);
self.attach_block_devices(&mut device_manager)?;
self.attach_net_devices(&mut device_manager)?;
self.attach_vsock_devices(&guest_mem, &mut device_manager)?;
// safe to unwrap since we've already validated it's Some()
let kernel_config = self.kernel_config.as_mut().unwrap();
let kvm = Kvm::new().map_err(Error::Kvm)?;
self.vm = Some(Vm::new(&kvm, guest_mem).map_err(Error::Vm)?);
// safe to unwrap since it's set just above
let vm = self.vm.as_mut().unwrap();
vm.setup().map_err(Error::VmSetup)?;
for request in device_manager.vm_requests {
if let VmResponse::Err(e) = request.execute(vm.get_fd()) {
return Err(Error::DeviceVmRequest(e));
}
}
// This is the easy way out of consuming the value of the kernel_cmdline.
// TODO: refactor the kernel_cmdline struct in order to have a CString instead of a String.
let cmdline_cstring = CString::new(kernel_config.cmdline.clone())
.map_err(|_| Error::KernelCmdLine(kernel_cmdline::Error::InvalidAscii))?;
kernel_loader::load_kernel(
vm.get_memory(),
kernel_config.kernel_start_addr,
&mut kernel_config.kernel_file,
)?;
kernel_loader::load_cmdline(
vm.get_memory(),
kernel_config.cmdline_addr,
&cmdline_cstring,
)?;
x86_64::configure_system(
vm.get_memory(),
kernel_config.kernel_start_addr,
kernel_config.cmdline_addr,
cmdline_cstring.to_bytes().len() + 1,
vcpu_count,
)?;
let event_fd = EventFd::new().map_err(Error::EventFd)?;
let exit_epoll_evt = self.epoll_context.add_event(event_fd, EpollDispatch::Exit)?;
self.exit_evt = Some(exit_epoll_evt);
// safe to unwrap since it's set just above
let exit_evt = &self.exit_evt.as_mut().unwrap().event_fd;
let mut io_bus = devices::Bus::new();
let com_evt_1_3 = EventFd::new().map_err(Error::EventFd)?;
let com_evt_2_4 = EventFd::new().map_err(Error::EventFd)?;
self.stdio_serial = Some(Arc::new(Mutex::new(devices::Serial::new_out(
com_evt_1_3.try_clone().map_err(Error::EventFd)?,
Box::new(stdout()),
))));
// safe to unwrap since it's set just above
let stdio_serial = self.stdio_serial.as_mut().unwrap();
self.epoll_context.enable_stdin_event()?;
//TODO: put all thse things related to setting up io bus in a struct or something
vm.set_io_bus(
&mut io_bus,
&stdio_serial,
&com_evt_1_3,
&com_evt_2_4,
exit_evt,
).map_err(Error::VmIOBus)?;
self.vcpu_handles = Some(Vec::with_capacity(vcpu_count as usize));
// safe to unwrap since it's set just above
let vcpu_handles = self.vcpu_handles.as_mut().unwrap();
self.kill_signaled = Some(Arc::new(AtomicBool::new(false)));
// safe to unwrap since it's set just above
let kill_signaled = self.kill_signaled.as_mut().unwrap();
let vcpu_thread_barrier = Arc::new(Barrier::new((vcpu_count + 1) as usize));
for cpu_id in 0..vcpu_count {
let io_bus = io_bus.clone();
let mmio_bus = device_manager.bus.clone();
let kill_signaled = kill_signaled.clone();
let vcpu_thread_barrier = vcpu_thread_barrier.clone();
let vcpu_exit_evt = exit_evt.try_clone().map_err(Error::EventFd)?;
let mut vcpu = Vcpu::new(cpu_id, &vm).map_err(Error::Vcpu)?;
vcpu.configure(vcpu_count, kernel_config.kernel_start_addr, &vm)
.map_err(Error::VcpuConfigure)?;
vcpu_handles.push(thread::Builder::new()
.name(format!("fc_vcpu{}", cpu_id))
.spawn(move || {
unsafe {
extern "C" fn handle_signal() {}
// Our signal handler does nothing and is trivially async signal safe.
register_signal_handler(0, handle_signal)
.expect("failed to register vcpu signal handler");
}
vcpu_thread_barrier.wait();
loop {
match vcpu.run() {
Ok(run) => match run {
VcpuExit::IoIn(addr, data) => {
io_bus.read(addr as u64, data);
}
VcpuExit::IoOut(addr, data) => {
io_bus.write(addr as u64, data);
}
VcpuExit::MmioRead(addr, data) => {
mmio_bus.read(addr, data);
}
VcpuExit::MmioWrite(addr, data) => {
mmio_bus.write(addr, data);
}
VcpuExit::Hlt => {
info!("KVM_EXIT_HLT");
break;
}
VcpuExit::Shutdown => {
info!("KVM_EXIT_SHUTDOWN");
break;
}
r => {
error!("unexpected exit reason: {:?}", r);
break;
}
},
Err(e) => match e {
vstate::Error::VcpuRun(ref v) => match v.errno() {
libc::EAGAIN | libc::EINTR => {}
_ => {
error!("vcpu hit unknown error: {:?}", e);
break;
}
},
_ => {
error!("unrecognized error type for vcpu run");
break;
}
},
}
if kill_signaled.load(Ordering::SeqCst) {
break;
}
}
// TODO: find a way to report vCPU errors to the user,
// for now ignoring this result as there's nothing we can do
// for the failure case.
let _ = vcpu_exit_evt.write(1);
})
.map_err(Error::VcpuSpawn)?);
}
vcpu_thread_barrier.wait();
Ok(())
}
fn stop(&mut self) -> Result<()> {
if let Some(v) = self.kill_signaled.take() {
v.store(true, Ordering::SeqCst);
};
if let Some(handles) = self.vcpu_handles.take() {
for handle in handles {
match handle.kill(0) {
Ok(_) => {
if let Err(e) = handle.join() {
warn!("failed to join vcpu thread: {:?}", e);
}
}
Err(e) => warn!("failed to kill vcpu thread: {:?}", e),
}
}
};
if let Some(evt) = self.exit_evt.take() {
self.epoll_context.remove_event(evt)?;
}
self.epoll_context.disable_stdin_event()?;
self.stdio_serial.take();
self.vm.take();
//TODO:
// - clean epoll_context:
// - remove block, net
Ok(())
}
pub fn run_control(&mut self, api_enabled: bool) -> Result<()> {
let stdin_handle = io::stdin();
let stdin_lock = stdin_handle.lock();
stdin_lock.set_raw_mode().map_err(Error::Terminal)?;
defer! {{
if let Err(e) = stdin_lock.set_canon_mode() {
warn!("cannot set canon mode for stdin: {:?}", e);
}
}};
const EPOLL_EVENTS_LEN: usize = 100;
let mut events = Vec::<epoll::Event>::with_capacity(EPOLL_EVENTS_LEN);
// Safe as we pass to set_len the value passed to with_capacity.
unsafe { events.set_len(EPOLL_EVENTS_LEN) };
let epoll_raw_fd = self.epoll_context.epoll_raw_fd;
// TODO: try handling of errors/failures without breaking this main loop.
'poll: loop {
let num_events = epoll::wait(epoll_raw_fd, -1, &mut events[..]).map_err(Error::Poll)?;
for i in 0..num_events {
let dispatch_idx = events[i].data() as usize;
if let Some(dispatch_type) = self.epoll_context.dispatch_table[dispatch_idx] {
match dispatch_type {
EpollDispatch::Exit => {
info!("vcpu requested shutdown");
match self.exit_evt {
Some(ref ev) => {
ev.event_fd.read().map_err(Error::EventFd)?;
}
None => warn!("leftover exit-evt in epollcontext!"),
}
self.stop()?;
if !api_enabled {
break 'poll;
}
}
EpollDispatch::Stdin => {
let mut out = [0u8; 64];
match stdin_lock.read_raw(&mut out[..]) {
Ok(0) => {
// Zero-length read indicates EOF. Remove from pollables.
self.epoll_context.disable_stdin_event()?;
}
Err(e) => {
warn!("error while reading stdin: {:?}", e);
self.epoll_context.disable_stdin_event()?;
}
Ok(count) => match self.stdio_serial {
Some(ref mut serial) => {
// unwrap() to panic if another thread panicked
// while holding the lock
serial
.lock()
.unwrap()
.queue_input_bytes(&out[..count])
.map_err(Error::Serial)?;
}
None => warn!("leftover stdin event in epollcontext!"),
},
}
}
EpollDispatch::DeviceHandler(device_idx, device_token) => {
let handler = self.epoll_context.get_device_handler(device_idx);
handler.handle_event(device_token, events[i].events().bits());
}
EpollDispatch::ApiRequest => {
self.api_event.event_fd.read().map_err(Error::EventFd)?;
self.run_api_cmd().unwrap_or_else(|_| {
warn!("got spurious notification from api thread");
()
});
}
}
}
}
}
Ok(())
}
fn run_api_cmd(&mut self) -> Result<()> {
let request = match self.from_api.try_recv() {
Ok(t) => t,
Err(TryRecvError::Empty) => {
return Err(Error::ApiChannel);
}
Err(TryRecvError::Disconnected) => {
panic!();
}
};
match *request {
ApiRequest::Async(req) => {
match req {
AsyncRequest::StartInstance(sender) => {
let result = match self.boot_kernel() {
Ok(_) => AsyncOutcome::Ok(0),
Err(e) => {
let _ = self.stop();
AsyncOutcome::Error(format!("cannot boot kernel: {:?}", e))
}
};
// doing expect() to crash this thread as well if the other thread crashed
sender.send(result).expect("one-shot channel closed");
}
AsyncRequest::StopInstance(sender) => {
let result = match self.stop() {
Ok(_) => AsyncOutcome::Ok(0),
Err(e) => AsyncOutcome::Error(format!(
"Errors detected during instance stop()! err: {:?}",
e
)),
};
// doing expect() to crash this thread as well if the other thread crashed
sender.send(result).expect("one-shot channel closed");
}
};
}
ApiRequest::Sync(req) => {
match req {
SyncRequest::PutDrive(drive_description, sender) => {
match self.put_block_device(BlockDeviceConfig::from(drive_description)) {
Ok(_) =>
// doing expect() to crash this thread if the other thread crashed
sender.send(Box::new(PutDriveOutcome::Created))
.map_err(|_| ())
.expect("one-shot channel closed"),
Err(e) =>
// doing expect() to crash this thread if the other thread crashed
sender.send(Box::new(e))
.map_err(|_| ())
.expect("one-shot channel closed"),
}
}
SyncRequest::PutBootSource(boot_source_body, sender) => {
// check that the kernel path exists and it is valid
let box_response: Box<GenerateResponse + Send> = match boot_source_body
.local_image
{
Some(image) => match File::open(image.kernel_image_path) {
Ok(kernel_file) => {
let mut cmdline =
kernel_cmdline::Cmdline::new(CMDLINE_MAX_SIZE);
match cmdline.insert_str(
boot_source_body
.boot_args
.unwrap_or(String::from(DEFAULT_KERNEL_CMDLINE)),
) {
Ok(_) => {
let kernel_config = KernelConfig {
kernel_file,
cmdline,
kernel_start_addr: GuestAddress(
KERNEL_START_OFFSET,
),
cmdline_addr: GuestAddress(CMDLINE_OFFSET),
};
// if the kernel was already configure, we have an update operation
let outcome = match self.kernel_config {
Some(_) => PutBootSourceOutcome::Updated,
None => PutBootSourceOutcome::Created,
};
self.configure_kernel(kernel_config);
Box::new(outcome)
}
Err(_) => Box::new(
PutBootSourceConfigError::InvalidKernelCommandLine,
),
}
}
Err(_e) => Box::new(PutBootSourceConfigError::InvalidKernelPath),
},
None => Box::new(PutBootSourceConfigError::InvalidKernelPath),
};
sender
.send(box_response)
.map_err(|_| ())
.expect("one-shot channel closed");
}
SyncRequest::PutMachineConfiguration(machine_config_body, sender) => {
let boxed_response = match self.put_virtual_machine_configuration(
machine_config_body.vcpu_count,
machine_config_body.mem_size_mib,
) {
Ok(_) => Box::new(PutMachineConfigurationOutcome::Updated),
Err(e) => Box::new(PutMachineConfigurationOutcome::Error(e)),
};
sender
.send(boxed_response)
.map_err(|_| ())
.expect("one-shot channel closed");;
}
SyncRequest::PutNetworkInterface(body, outcome_sender) => outcome_sender
.send(Box::new(self.put_net_device(body)))
.map_err(|_| ())
.expect("one-shot channel closed"),
SyncRequest::PutVsock(body, outcome_sender) => outcome_sender
.send(Box::new(self.put_vsock_device(body)))
.map_err(|_| ())
.expect("one-shot channel closed"),
}
}
}
Ok(())
}
}
pub fn start_vmm_thread(
api_event_fd: EventFd,
from_api: Receiver<Box<ApiRequest>>,
) -> thread::JoinHandle<()> {
thread::spawn(move || {
// if this fails, consider it fatal: .expect()
let mut vmm = Vmm::new(api_event_fd, from_api).expect("cannot create VMM");
// vmm thread errors are irrecoverable for now: .expect()
vmm.run_control(true).expect("VMM thread fail");
// TODO: maybe offer through API: an instance status reporting error messages (r)
})
}
|
extern crate libc;
extern crate sys_util;
extern crate kvm_sys;
extern crate kvm;
extern crate kernel_loader;
extern crate x86_64;
extern crate clap;
extern crate devices;
pub mod machine;
use std::ffi::{CStr, CString};
use std::fs::File;
use std::io::{self, stdout, Write};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Barrier, Mutex};
use std::thread;
use std::thread::{JoinHandle};
use kvm::*;
use kvm_sys::kvm_regs;
use sys_util::{register_signal_handler, EventFd, GuestAddress, GuestMemory,
Killable, Pollable, Poller, Terminal};
use machine::MachineCfg;
const KERNEL_START_OFFSET: usize = 0x200000;
const CMDLINE_OFFSET: usize = 0x20000;
#[derive(Debug)]
pub enum Error {
ConfigureSystem(x86_64::Error),
EventFd(sys_util::Error),
GuestMemory(sys_util::GuestMemoryError),
Irq(sys_util::Error),
Kernel(std::io::Error),
KernelLoader(kernel_loader::Error),
Kvm(sys_util::Error),
Vcpu(sys_util::Error),
VcpuSpawn(std::io::Error),
Vm(sys_util::Error),
}
impl std::convert::From<kernel_loader::Error> for Error {
fn from(e: kernel_loader::Error) -> Error {
Error::KernelLoader(e)
}
}
impl std::convert::From<x86_64::Error> for Error {
fn from(e: x86_64::Error) -> Error {
Error::ConfigureSystem(e)
}
}
type Result<T> = std::result::Result<T, Error>;
pub fn boot_kernel(cfg: &MachineCfg) -> Result<()> {
// FIXME branciog@ do not hardcode the vm mem size
// Hardcoding the vm memory size to 128MB
let mem_size = 128 << 20;
let arch_mem_regions = x86_64::arch_memory_regions(mem_size);
let mut kernel_file;
match cfg.kernel_path {
Some(ref kernel_path) => {
kernel_file = File::open(kernel_path.as_path())
.map_err(Error::Kernel)?
},
None => {
return Err(Error::Kernel(
io::Error::new(io::ErrorKind::NotFound,
"missing kernel path")))
}
}
let cmdline: CString = match cfg.kernel_cmdline {
Some(ref v) => CString::new(v.as_bytes()).unwrap(),
_ => return Err(Error::Kernel(
io::Error::new(io::ErrorKind::NotFound,
"missing kernel cmdline")))
};
let cmdline: &CStr = &cmdline;
let vcpu_count = 1;
let kernel_start_addr = GuestAddress(KERNEL_START_OFFSET);
let cmdline_addr = GuestAddress(CMDLINE_OFFSET);
let guest_mem = GuestMemory::new(&arch_mem_regions)
.map_err(Error::GuestMemory)?;
let kvm = Kvm::new().map_err(Error::Kvm)?;
let vm = Vm::new(&kvm, guest_mem).map_err(Error::Vm)?;
let tss_addr = GuestAddress(0xfffbd000);
vm.set_tss_addr(tss_addr).map_err(Error::Vm)?;
vm.create_pit().map_err(Error::Vm)?;
vm.create_irq_chip().map_err(Error::Vm)?;
kernel_loader::load_kernel(vm.get_memory(), kernel_start_addr,
&mut kernel_file)?;
kernel_loader::load_cmdline(vm.get_memory(), cmdline_addr, cmdline)?;
x86_64::configure_system(vm.get_memory(),
kernel_start_addr,
cmdline_addr,
cmdline.to_bytes().len() + 1,
vcpu_count as u8)?;
let mut io_bus = devices::Bus::new();
let com_evt_1_3 = EventFd::new().map_err(Error::EventFd)?;
let com_evt_2_4 = EventFd::new().map_err(Error::EventFd)?;
let stdio_serial = Arc::new(Mutex::new(devices::Serial::new_out(
com_evt_1_3.try_clone().map_err(Error::EventFd)?,
Box::new(stdout()))));
io_bus.insert(stdio_serial.clone(), 0x3f8, 0x8).unwrap();
io_bus.insert(Arc::new(Mutex::new(devices::Serial::new_sink(
com_evt_2_4.try_clone().map_err(Error::EventFd)?))), 0x2f8, 0x8)
.unwrap();
io_bus.insert(Arc::new(Mutex::new(devices::Serial::new_sink(
com_evt_1_3.try_clone().map_err(Error::EventFd)?))), 0x3e8, 0x8)
.unwrap();
io_bus.insert(Arc::new(Mutex::new(devices::Serial::new_sink(
com_evt_2_4.try_clone().map_err(Error::EventFd)?))), 0x2e8, 0x8)
.unwrap();
vm.register_irqfd(&com_evt_1_3, 4)
.map_err(Error::Irq)?;
vm.register_irqfd(&com_evt_2_4, 3)
.map_err(Error::Irq)?;
let exit_evt = EventFd::new().map_err(Error::EventFd)?;
let mut vcpu_handles = Vec::with_capacity(vcpu_count as usize);
let vcpu_thread_barrier = Arc::new(Barrier::new((vcpu_count + 1) as usize));
let kill_signaled = Arc::new(AtomicBool::new(false));
for cpu_id in 0..vcpu_count {
let io_bus = io_bus.clone();
let kill_signaled = kill_signaled.clone();
let vcpu_thread_barrier = vcpu_thread_barrier.clone();
let vcpu_exit_evt = exit_evt.try_clone().map_err(Error::EventFd)?;
let vcpu = Vcpu::new(cpu_id as libc::c_ulong, &kvm, &vm).map_err(Error::Vcpu)?;
x86_64::configure_vcpu(vm.get_memory(), kernel_start_addr, &kvm,
&vcpu, cpu_id as u64, vcpu_count as u64)?;
vcpu_handles.push(thread::Builder::new()
.name(format!("fc_vcpu{}", cpu_id))
.spawn(move || {
unsafe {
extern "C" fn handle_signal() {}
// Our signal handler does nothing and is trivially async signal safe.
register_signal_handler(0, handle_signal)
.expect("failed to register vcpu signal handler");
}
vcpu_thread_barrier.wait();
loop {
match vcpu.run() {
Ok(run) => match run {
VcpuExit::IoIn(addr, data) => {
io_bus.read(addr as u64, data);
},
VcpuExit::IoOut(addr, data) => {
io_bus.write(addr as u64, data);
},
VcpuExit::MmioRead(_, _) => {},
VcpuExit::MmioWrite(_, _) => {},
VcpuExit::Hlt => {
println!("KVM_EXIT_HLT");
break;
},
VcpuExit::Shutdown => {
println!("KVM_EXIT_SHUTDOWN");
break;
},
r => {
println!("unexpected exit reason: {:?}", r);
break;
}
},
Err(e) => {
match e.errno() {
libc::EAGAIN | libc::EINTR => {},
_ => {
println!("vcpu hit unknown error: {:?}", e);
break;
}
}
}
}
if kill_signaled.load(Ordering::SeqCst) {
break;
}
}
vcpu_exit_evt
.write(1)
.expect("failed to signal vcpu exit eventfd");
}).map_err(Error::VcpuSpawn)?);
}
vcpu_thread_barrier.wait();
run_control(stdio_serial, exit_evt, kill_signaled, vcpu_handles)
}
fn run_control(stdio_serial: Arc<Mutex<devices::Serial>>,
exit_evt: EventFd,
kill_signaled: Arc<AtomicBool>,
vcpu_handles: Vec<JoinHandle<()>>) -> Result<()> {
const EXIT: u32 = 0;
const STDIN: u32 = 1;
let stdin_handle = io::stdin();
let stdin_lock = stdin_handle.lock();
stdin_lock
.set_raw_mode()
.expect("failed to set terminal raw mode");
let mut pollables = Vec::new();
pollables.push((EXIT, &exit_evt as &Pollable));
pollables.push((STDIN, &stdin_lock as &Pollable));
let mut poller = Poller::new(pollables.len());
'poll: loop {
let tokens = match poller.poll(&pollables[..]) {
Ok(v) => v,
Err(e) => {
println!("failed to poll: {:?}", e);
break;
}
};
for &token in tokens {
match token {
EXIT => {
println!("vcpu requested shutdown");
break 'poll;
},
STDIN => {
let mut out = [0u8; 64];
match stdin_lock.read_raw(&mut out[..]) {
Ok(0) => {
// Zero-length read indicates EOF. Remove from pollables.
pollables.retain(|&pollable| pollable.0 != STDIN);
},
Err(e) => {
println!("error while reading stdin: {:?}", e);
pollables.retain(|&pollable| pollable.0 != STDIN);
},
Ok(count) => {
stdio_serial
.lock()
.unwrap()
.queue_input_bytes(&out[..count])
.expect("failed to queue bytes into serial port");
},
}
}
_ => {}
}
}
}
kill_signaled.store(true, Ordering::SeqCst);
for handle in vcpu_handles {
match handle.kill(0) {
Ok(_) => {
if let Err(e) = handle.join() {
println!("failed to join vcpu thread: {:?}", e);
}
},
Err(e) => println!("failed to kill vcpu thread: {:?}", e),
}
}
stdin_lock
.set_canon_mode()
.expect("failed to restore canonical mode for terminal");
Ok(())
}
pub fn run_x86_code() {
// This example based on https://lwn.net/Articles/658511/
let code = [
0xba, 0xf8, 0x03, /* mov $0x3f8, %dx */
0x00, 0xd8, /* add %bl, %al */
0x04, '0' as u8, /* add $'0', %al */
0xee, /* out %al, (%dx) */
0xb0, '\n' as u8, /* mov $'\n', %al */
0xee, /* out %al, (%dx) */
0xf4, /* hlt */
];
let mem_size = 0x1000;
let load_addr = GuestAddress(0x1000);
let mem = GuestMemory::new(&vec![(load_addr, mem_size)]).unwrap();
let kvm = Kvm::new().expect("new kvm failed");
let vm = Vm::new(&kvm, mem).expect("new vm failed");
let vcpu = Vcpu::new(0, &kvm, &vm).expect("new vcpu failed");
vm.get_memory()
.write_slice_at_addr(&code, load_addr)
.expect("Writing code to memory failed.");
let mut vcpu_sregs = vcpu.get_sregs().expect("get sregs failed");
assert_ne!(vcpu_sregs.cs.base, 0);
assert_ne!(vcpu_sregs.cs.selector, 0);
vcpu_sregs.cs.base = 0;
vcpu_sregs.cs.selector = 0;
vcpu.set_sregs(&vcpu_sregs).expect("set sregs failed");
let mut vcpu_regs: kvm_regs = unsafe { std::mem::zeroed() };
vcpu_regs.rip = 0x1000;
vcpu_regs.rax = 2;
vcpu_regs.rbx = 2;
vcpu_regs.rflags = 2;
vcpu.set_regs(&vcpu_regs).expect("set regs failed");
loop {
match vcpu.run().expect("run failed") {
VcpuExit::IoOut(0x3f8, data) => {
assert_eq!(data.len(), 1);
io::stdout().write(data).unwrap();
},
VcpuExit::Hlt => {
io::stdout().write(b"KVM_EXIT_HLT\n").unwrap();
break
},
r => panic!("unexpected exit reason: {:?}", r),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn run_code() {
run_x86_code();
}
}
vmm: fix style manually as cargo fmt messes up the code
Signed-off-by: Constantin Musca <e6b0bba089ab6d3e8fde8b62faae63e6b21d7743@amazon.com>
extern crate libc;
extern crate sys_util;
extern crate kvm_sys;
extern crate kvm;
extern crate kernel_loader;
extern crate x86_64;
extern crate clap;
extern crate devices;
pub mod machine;
use std::ffi::{CStr, CString};
use std::fs::File;
use std::io::{self, stdout, Write};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Barrier, Mutex};
use std::thread;
use std::thread::{JoinHandle};
use kvm::*;
use kvm_sys::kvm_regs;
use sys_util::{register_signal_handler, EventFd, GuestAddress, GuestMemory,
Killable, Pollable, Poller, Terminal};
use machine::MachineCfg;
const KERNEL_START_OFFSET: usize = 0x200000;
const CMDLINE_OFFSET: usize = 0x20000;
#[derive(Debug)]
pub enum Error {
ConfigureSystem(x86_64::Error),
EventFd(sys_util::Error),
GuestMemory(sys_util::GuestMemoryError),
Irq(sys_util::Error),
Kernel(std::io::Error),
KernelLoader(kernel_loader::Error),
Kvm(sys_util::Error),
Vcpu(sys_util::Error),
VcpuSpawn(std::io::Error),
Vm(sys_util::Error),
}
impl std::convert::From<kernel_loader::Error> for Error {
fn from(e: kernel_loader::Error) -> Error {
Error::KernelLoader(e)
}
}
impl std::convert::From<x86_64::Error> for Error {
fn from(e: x86_64::Error) -> Error {
Error::ConfigureSystem(e)
}
}
type Result<T> = std::result::Result<T, Error>;
pub fn boot_kernel(cfg: &MachineCfg) -> Result<()> {
// FIXME branciog@ do not hardcode the vm mem size
// Hardcoding the vm memory size to 128MB
let mem_size = 128 << 20;
let arch_mem_regions = x86_64::arch_memory_regions(mem_size);
let mut kernel_file;
match cfg.kernel_path {
Some(ref kernel_path) => {
kernel_file = File::open(kernel_path.as_path())
.map_err(Error::Kernel)?
},
None => {
return Err(Error::Kernel(
io::Error::new(io::ErrorKind::NotFound,
"missing kernel path")))
}
}
let cmdline: CString = match cfg.kernel_cmdline {
Some(ref v) => CString::new(v.as_bytes()).unwrap(),
_ => return Err(Error::Kernel(
io::Error::new(io::ErrorKind::NotFound,
"missing kernel cmdline")))
};
let cmdline: &CStr = &cmdline;
let vcpu_count = 1;
let kernel_start_addr = GuestAddress(KERNEL_START_OFFSET);
let cmdline_addr = GuestAddress(CMDLINE_OFFSET);
let guest_mem = GuestMemory::new(&arch_mem_regions)
.map_err(Error::GuestMemory)?;
let kvm = Kvm::new().map_err(Error::Kvm)?;
let vm = Vm::new(&kvm, guest_mem).map_err(Error::Vm)?;
let tss_addr = GuestAddress(0xfffbd000);
vm.set_tss_addr(tss_addr).map_err(Error::Vm)?;
vm.create_pit().map_err(Error::Vm)?;
vm.create_irq_chip().map_err(Error::Vm)?;
kernel_loader::load_kernel(vm.get_memory(), kernel_start_addr,
&mut kernel_file)?;
kernel_loader::load_cmdline(vm.get_memory(), cmdline_addr, cmdline)?;
x86_64::configure_system(vm.get_memory(),
kernel_start_addr,
cmdline_addr,
cmdline.to_bytes().len() + 1,
vcpu_count as u8)?;
let mut io_bus = devices::Bus::new();
let com_evt_1_3 = EventFd::new().map_err(Error::EventFd)?;
let com_evt_2_4 = EventFd::new().map_err(Error::EventFd)?;
let stdio_serial = Arc::new(Mutex::new(devices::Serial::new_out(
com_evt_1_3.try_clone().map_err(Error::EventFd)?,
Box::new(stdout()))));
io_bus.insert(stdio_serial.clone(), 0x3f8, 0x8).unwrap();
io_bus.insert(Arc::new(Mutex::new(devices::Serial::new_sink(
com_evt_2_4.try_clone().map_err(Error::EventFd)?))), 0x2f8, 0x8)
.unwrap();
io_bus.insert(Arc::new(Mutex::new(devices::Serial::new_sink(
com_evt_1_3.try_clone().map_err(Error::EventFd)?))), 0x3e8, 0x8)
.unwrap();
io_bus.insert(Arc::new(Mutex::new(devices::Serial::new_sink(
com_evt_2_4.try_clone().map_err(Error::EventFd)?))), 0x2e8, 0x8)
.unwrap();
vm.register_irqfd(&com_evt_1_3, 4)
.map_err(Error::Irq)?;
vm.register_irqfd(&com_evt_2_4, 3)
.map_err(Error::Irq)?;
let exit_evt = EventFd::new().map_err(Error::EventFd)?;
let mut vcpu_handles = Vec::with_capacity(vcpu_count as usize);
let vcpu_thread_barrier = Arc::new(Barrier::new((vcpu_count + 1) as usize));
let kill_signaled = Arc::new(AtomicBool::new(false));
for cpu_id in 0..vcpu_count {
let io_bus = io_bus.clone();
let kill_signaled = kill_signaled.clone();
let vcpu_thread_barrier = vcpu_thread_barrier.clone();
let vcpu_exit_evt = exit_evt.try_clone().map_err(Error::EventFd)?;
let vcpu = Vcpu::new(cpu_id as libc::c_ulong, &kvm, &vm).map_err(Error::Vcpu)?;
x86_64::configure_vcpu(vm.get_memory(), kernel_start_addr, &kvm,
&vcpu, cpu_id as u64, vcpu_count as u64)?;
vcpu_handles.push(thread::Builder::new()
.name(format!("fc_vcpu{}", cpu_id))
.spawn(move || {
unsafe {
extern "C" fn handle_signal() {}
// Our signal handler does nothing and is trivially async signal safe.
register_signal_handler(0, handle_signal)
.expect("failed to register vcpu signal handler");
}
vcpu_thread_barrier.wait();
loop {
match vcpu.run() {
Ok(run) => match run {
VcpuExit::IoIn(addr, data) => {
io_bus.read(addr as u64, data);
},
VcpuExit::IoOut(addr, data) => {
io_bus.write(addr as u64, data);
},
VcpuExit::MmioRead(_, _) => {},
VcpuExit::MmioWrite(_, _) => {},
VcpuExit::Hlt => {
println!("KVM_EXIT_HLT");
break;
},
VcpuExit::Shutdown => {
println!("KVM_EXIT_SHUTDOWN");
break;
},
r => {
println!("unexpected exit reason: {:?}", r);
break;
}
},
Err(e) => {
match e.errno() {
libc::EAGAIN | libc::EINTR => {},
_ => {
println!("vcpu hit unknown error: {:?}", e);
break;
}
}
}
}
if kill_signaled.load(Ordering::SeqCst) {
break;
}
}
vcpu_exit_evt
.write(1)
.expect("failed to signal vcpu exit eventfd");
}).map_err(Error::VcpuSpawn)?);
}
vcpu_thread_barrier.wait();
run_control(stdio_serial, exit_evt, kill_signaled, vcpu_handles)
}
fn run_control(stdio_serial: Arc<Mutex<devices::Serial>>,
exit_evt: EventFd,
kill_signaled: Arc<AtomicBool>,
vcpu_handles: Vec<JoinHandle<()>>) -> Result<()> {
const EXIT: u32 = 0;
const STDIN: u32 = 1;
let stdin_handle = io::stdin();
let stdin_lock = stdin_handle.lock();
stdin_lock
.set_raw_mode()
.expect("failed to set terminal raw mode");
let mut pollables = Vec::new();
pollables.push((EXIT, &exit_evt as &Pollable));
pollables.push((STDIN, &stdin_lock as &Pollable));
let mut poller = Poller::new(pollables.len());
'poll: loop {
let tokens = match poller.poll(&pollables[..]) {
Ok(v) => v,
Err(e) => {
println!("failed to poll: {:?}", e);
break;
}
};
for &token in tokens {
match token {
EXIT => {
println!("vcpu requested shutdown");
break 'poll;
},
STDIN => {
let mut out = [0u8; 64];
match stdin_lock.read_raw(&mut out[..]) {
Ok(0) => {
// Zero-length read indicates EOF. Remove from pollables.
pollables.retain(|&pollable| pollable.0 != STDIN);
},
Err(e) => {
println!("error while reading stdin: {:?}", e);
pollables.retain(|&pollable| pollable.0 != STDIN);
},
Ok(count) => {
stdio_serial
.lock()
.unwrap()
.queue_input_bytes(&out[..count])
.expect("failed to queue bytes into serial port");
},
}
}
_ => {}
}
}
}
kill_signaled.store(true, Ordering::SeqCst);
for handle in vcpu_handles {
match handle.kill(0) {
Ok(_) => {
if let Err(e) = handle.join() {
println!("failed to join vcpu thread: {:?}", e);
}
},
Err(e) => println!("failed to kill vcpu thread: {:?}", e),
}
}
stdin_lock
.set_canon_mode()
.expect("failed to restore canonical mode for terminal");
Ok(())
}
pub fn run_x86_code() {
// This example based on https://lwn.net/Articles/658511/
let code = [
/* mov $0x3f8, %dx */
0xba,
0xf8,
0x03,
/* add %bl, %al */
0x00,
0xd8,
/* add $'0', %al */
0x04,
'0' as u8,
/* out %al, (%dx) */
0xee,
/* mov $'\n', %al */
0xb0,
'\n' as u8,
/* out %al, (%dx) */
0xee,
/* hlt */
0xf4,
];
let mem_size = 0x1000;
let load_addr = GuestAddress(0x1000);
let mem = GuestMemory::new(&vec![(load_addr, mem_size)]).unwrap();
let kvm = Kvm::new().expect("new kvm failed");
let vm = Vm::new(&kvm, mem).expect("new vm failed");
let vcpu = Vcpu::new(0, &kvm, &vm).expect("new vcpu failed");
vm.get_memory()
.write_slice_at_addr(&code, load_addr)
.expect("Writing code to memory failed.");
let mut vcpu_sregs = vcpu.get_sregs().expect("get sregs failed");
assert_ne!(vcpu_sregs.cs.base, 0);
assert_ne!(vcpu_sregs.cs.selector, 0);
vcpu_sregs.cs.base = 0;
vcpu_sregs.cs.selector = 0;
vcpu.set_sregs(&vcpu_sregs).expect("set sregs failed");
let mut vcpu_regs: kvm_regs = unsafe { std::mem::zeroed() };
vcpu_regs.rip = 0x1000;
vcpu_regs.rax = 2;
vcpu_regs.rbx = 2;
vcpu_regs.rflags = 2;
vcpu.set_regs(&vcpu_regs).expect("set regs failed");
loop {
match vcpu.run().expect("run failed") {
VcpuExit::IoOut(0x3f8, data) => {
assert_eq!(data.len(), 1);
io::stdout().write(data).unwrap();
},
VcpuExit::Hlt => {
io::stdout().write(b"KVM_EXIT_HLT\n").unwrap();
break
},
r => panic!("unexpected exit reason: {:?}", r),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn run_code() {
run_x86_code();
}
}
|
extern crate rspgod;
extern crate postgres;
extern crate rustc_serialize;
use postgres::{Connection, SslMode};
use rspgod::types::{Change};
use rustc_serialize::json;
#[derive(Debug)]
struct TestRecord {
id: i32,
name: String,
}
#[test]
fn sanity_test() {
with_clean_database(|c| {
let records = vec![
TestRecord { id: 1, name: "Michael Baker".to_string() },
TestRecord { id: 2, name: "Josh Cheek".to_string() },
];
for record in records.iter() {
create_record(c, &record);
}
fetch_records(c);
});
}
#[test]
fn basic_insert() {
with_slot(|c| {
let record = TestRecord { id: 1, name: "Michael Baker".to_string(), };
create_record(c, &record);
let updates = fetch_updates(c);
assert_eq!(updates.len(), 1);
let change:Change = json::decode(&updates[0][..]).unwrap();
assert!(match change {
Change::Insert {..} => { true },
_ => { false },
});
});
}
#[test]
fn basic_delete() {
with_slot(|c| {
let record = TestRecord { id: 1, name: "Michael Baker".to_string() };
create_record(c, &record);
delete_record(c, 1);
let updates = fetch_updates(c);
assert_eq!(updates.len(), 2);
let change:Change = json::decode(&updates[1][..]).unwrap();
assert!(match change {
Change::Delete {..} => { true },
_ => { false },
});
});
}
#[test]
fn basic_update() {
with_slot(|c| {
let record = TestRecord { id: 1, name: "Michael Baker".to_string() };
create_record(c, &record);
update_record(c, TestRecord { id: 1, name: "Bichael Maker".to_string() });
let updates = fetch_updates(c);
assert_eq!(updates.len(), 2);
let change:Change = json::decode(&updates[1][..]).unwrap();
assert!(match change {
Change::Update {..} => { true },
_ => { false },
});
});
}
//
// [TODO] I want to move a lot of these into a utility module when I can figure out how to do that
//
fn fetch_updates(c: &Connection) -> Vec<String> {
let stmt = c.prepare("SELECT * FROM pg_logical_slot_peek_changes('slot', NULL, NULL)").unwrap();
let mut result = vec![];
for r in stmt.query(&[]).unwrap() {
result.push(r.get(2));
}
result
}
fn create_slot(c: &Connection) {
let stmt = c.prepare("select * from pg_create_logical_replication_slot('slot', 'thingy')").unwrap();
stmt.execute(&[]).unwrap();
}
fn drop_slot(c: &Connection) {
let stmt = c.prepare("select pg_drop_replication_slot('slot')").unwrap();
match stmt.execute(&[]) {
_ => {},
}
}
fn with_slot<F>(f: F) where F:Fn(&Connection) -> () {
with_clean_database(|c| {
drop_slot(c);
create_slot(c);
f(c);
drop_slot(c);
});
}
fn with_clean_database<F>(f: F) where F:Fn(&Connection) -> () {
let c = connection();
reset_database(&c);
f(&c);
drop_database(&c);
}
fn reset_database(c: &Connection) {
drop_database(&c);
create_database(&c);
}
fn create_database(c: &Connection) {
let stmt = c.prepare("create table test_table (id int primary key, name text)").unwrap();
stmt.execute(&[]).unwrap();
}
fn fetch_records(c: &Connection) -> Vec<TestRecord> {
let stmt = c.prepare("select id, name from test_table").unwrap();
let results = stmt.query(&[]).unwrap();
results.iter().map(|r| {
TestRecord {
id: r.get(0),
name: r.get(1),
}
}).collect()
}
fn create_record(c: &Connection, r: &TestRecord) {
let stmt = c.prepare("insert into test_table (id, name) values ($1, $2)").unwrap();
stmt.execute(&[
&r.id,
&r.name,
]).unwrap();
}
fn delete_record(c: &Connection, id: i32) {
let stmt = c.prepare("delete from test_table where id = $1").unwrap();
stmt.execute(&[&id]).unwrap();
}
fn update_record(c: &Connection, new_record: TestRecord) {
let stmt = c.prepare("update test_table set name = $2 where id = $1").unwrap();
stmt.execute(&[&new_record.id, &new_record.name]).unwrap();
}
fn drop_database(c: &Connection) {
let stmt = c.prepare("drop table if exists test_table").unwrap();
stmt.execute(&[]).unwrap();
}
fn connection_string() -> String {
match std::env::var("POSTGRES_URL") {
Ok(val) => val,
Err(e) => panic!(format!("You must set the POSTGRES_URL environment variable to point to a running Postgres test database when running automated tests: {}", e)),
}
}
fn connection() -> Connection {
match Connection::connect(&connection_string()[..], &SslMode::None) {
Ok(c) => c,
Err(e) => panic!(format!("Could not connect to the test database: {}", e)),
}
}
Removes the dependency on rspgod to make the tests faster
extern crate postgres;
extern crate rustc_serialize;
use postgres::{Connection, SslMode};
use rustc_serialize::json::Json;
#[derive(Debug)]
struct TestRecord {
id: i32,
name: String,
}
#[test]
fn sanity_test() {
with_clean_database(|c| {
let records = vec![
TestRecord { id: 1, name: "Michael Baker".to_string() },
TestRecord { id: 2, name: "Josh Cheek".to_string() },
];
for record in records.iter() {
create_record(c, &record);
}
fetch_records(c);
});
}
#[test]
fn basic_insert() {
with_slot(|c| {
let record = TestRecord { id: 1, name: "Michael Baker".to_string(), };
create_record(c, &record);
let updates = fetch_updates(c);
assert_eq!(updates.len(), 1);
let data = Json::from_str(&updates[0][..]).unwrap();
let change = data.as_object().unwrap();
let variant = change.get("variant").unwrap().as_string().unwrap();
assert_eq!(variant, "Insert");
});
}
#[test]
fn basic_delete() {
with_slot(|c| {
let record = TestRecord { id: 1, name: "Michael Baker".to_string() };
create_record(c, &record);
delete_record(c, 1);
let updates = fetch_updates(c);
assert_eq!(updates.len(), 2);
let data = Json::from_str(&updates[1][..]).unwrap();
let change = data.as_object().unwrap();
let variant = change.get("variant").unwrap().as_string().unwrap();
assert_eq!(variant, "Delete");
});
}
#[test]
fn basic_update() {
with_slot(|c| {
let record = TestRecord { id: 1, name: "Michael Baker".to_string() };
create_record(c, &record);
update_record(c, TestRecord { id: 1, name: "Bichael Maker".to_string() });
let updates = fetch_updates(c);
assert_eq!(updates.len(), 2);
let data = Json::from_str(&updates[1][..]).unwrap();
let change = data.as_object().unwrap();
let variant = change.get("variant").unwrap().as_string().unwrap();
assert_eq!(variant, "Update");
});
}
//
// [TODO] I want to move a lot of these into a utility module when I can figure out how to do that
//
fn fetch_updates(c: &Connection) -> Vec<String> {
let stmt = c.prepare("SELECT * FROM pg_logical_slot_peek_changes('slot', NULL, NULL)").unwrap();
let mut result = vec![];
for r in stmt.query(&[]).unwrap() {
result.push(r.get(2));
}
result
}
fn create_slot(c: &Connection) {
let stmt = c.prepare("select * from pg_create_logical_replication_slot('slot', 'thingy')").unwrap();
stmt.execute(&[]).unwrap();
}
fn drop_slot(c: &Connection) {
let stmt = c.prepare("select pg_drop_replication_slot('slot')").unwrap();
match stmt.execute(&[]) {
_ => {},
}
}
fn with_slot<F>(f: F) where F:Fn(&Connection) -> () {
with_clean_database(|c| {
drop_slot(c);
create_slot(c);
f(c);
drop_slot(c);
});
}
fn with_clean_database<F>(f: F) where F:Fn(&Connection) -> () {
let c = connection();
reset_database(&c);
f(&c);
drop_database(&c);
}
fn reset_database(c: &Connection) {
drop_database(&c);
create_database(&c);
}
fn create_database(c: &Connection) {
let stmt = c.prepare("create table test_table (id int primary key, name text)").unwrap();
stmt.execute(&[]).unwrap();
}
fn fetch_records(c: &Connection) -> Vec<TestRecord> {
let stmt = c.prepare("select id, name from test_table").unwrap();
let results = stmt.query(&[]).unwrap();
results.iter().map(|r| {
TestRecord {
id: r.get(0),
name: r.get(1),
}
}).collect()
}
fn create_record(c: &Connection, r: &TestRecord) {
let stmt = c.prepare("insert into test_table (id, name) values ($1, $2)").unwrap();
stmt.execute(&[
&r.id,
&r.name,
]).unwrap();
}
fn delete_record(c: &Connection, id: i32) {
let stmt = c.prepare("delete from test_table where id = $1").unwrap();
stmt.execute(&[&id]).unwrap();
}
fn update_record(c: &Connection, new_record: TestRecord) {
let stmt = c.prepare("update test_table set name = $2 where id = $1").unwrap();
stmt.execute(&[&new_record.id, &new_record.name]).unwrap();
}
fn drop_database(c: &Connection) {
let stmt = c.prepare("drop table if exists test_table").unwrap();
stmt.execute(&[]).unwrap();
}
fn connection_string() -> String {
match std::env::var("POSTGRES_URL") {
Ok(val) => val,
Err(e) => panic!(format!("You must set the POSTGRES_URL environment variable to point to a running Postgres test database when running automated tests: {}", e)),
}
}
fn connection() -> Connection {
match Connection::connect(&connection_string()[..], &SslMode::None) {
Ok(c) => c,
Err(e) => panic!(format!("Could not connect to the test database: {}", e)),
}
}
|
use clippy_utils::diagnostics::span_lint_and_help;
use rustc_hir::{HirId, Item, ItemKind};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::ty::Const;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::sym;
declare_clippy_lint! {
/// ### What it does
/// Displays a warning when a struct with a trailing zero-sized array is declared without a `repr` attribute.
///
/// ### Why is this bad?
/// Zero-sized arrays aren't very useful in Rust itself, so such a struct is likely being created to pass to C code or in some other situation where control over memory layout matters (for example, in conjuction with manual allocation to make it easy to compute the offset of the array). Either way, `#[repr(C)]` (or another `repr` attribute) is needed.
///
/// ### Example
/// ```rust
/// struct RarelyUseful {
/// some_field: u32,
/// last: [u32; 0],
/// }
/// ```
///
/// Use instead:
/// ```rust
/// #[repr(C)]
/// struct MoreOftenUseful {
/// some_field: usize,
/// last: [u32; 0],
/// }
/// ```
pub TRAILING_ZERO_SIZED_ARRAY_WITHOUT_REPR,
nursery,
"struct with a trailing zero-sized array but without `#[repr(C)]` or another `repr` attribute"
}
declare_lint_pass!(TrailingZeroSizedArrayWithoutRepr => [TRAILING_ZERO_SIZED_ARRAY_WITHOUT_REPR]);
impl<'tcx> LateLintPass<'tcx> for TrailingZeroSizedArrayWithoutRepr {
fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx Item<'tcx>) {
if is_struct_with_trailing_zero_sized_array(cx, item) && !has_repr_attr(cx, item.hir_id()) {
span_lint_and_help(
cx,
TRAILING_ZERO_SIZED_ARRAY_WITHOUT_REPR,
item.span,
"trailing zero-sized array in a struct which is not marked with a `repr` attribute",
None,
&format!(
"consider annotating `{}` with `#[repr(C)]` or another `repr` attribute",
cx.tcx.def_path_str(item.def_id.to_def_id())
),
);
}
}
}
fn is_struct_with_trailing_zero_sized_array(cx: &LateContext<'tcx>, item: &'tcx Item<'tcx>) -> bool {
if_chain! {
// First check if last field is an array
if let ItemKind::Struct(data, _) = &item.kind;
if let Some(last_field) = data.fields().last();
if let rustc_hir::TyKind::Array(_, length) = last_field.ty.kind;
// Then check if that that array zero-sized
let length_ldid = cx.tcx.hir().local_def_id(length.hir_id);
let length = Const::from_anon_const(cx.tcx, length_ldid);
let length = length.try_eval_usize(cx.tcx, cx.param_env);
if let Some(length) = length;
then {
length == 0
} else {
false
}
}
}
fn has_repr_attr(cx: &LateContext<'_>, hir_id: HirId) -> bool {
cx.tcx.hir().attrs(hir_id).iter().any(|attr| attr.has_name(sym::repr))
}
formatting 🙃
use clippy_utils::diagnostics::span_lint_and_help;
use rustc_hir::{HirId, Item, ItemKind};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::ty::Const;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::sym;
declare_clippy_lint! {
/// ### What it does
/// Displays a warning when a struct with a trailing zero-sized array is declared without a `repr` attribute.
///
/// ### Why is this bad?
/// Zero-sized arrays aren't very useful in Rust itself, so such a struct is likely being created to pass to C code or in some other situation where control over memory layout matters (for example, in conjuction with manual allocation to make it easy to compute the offset of the array). Either way, `#[repr(C)]` (or another `repr` attribute) is needed.
///
/// ### Example
/// ```rust
/// struct RarelyUseful {
/// some_field: u32,
/// last: [u32; 0],
/// }
/// ```
///
/// Use instead:
/// ```rust
/// #[repr(C)]
/// struct MoreOftenUseful {
/// some_field: usize,
/// last: [u32; 0],
/// }
/// ```
pub TRAILING_ZERO_SIZED_ARRAY_WITHOUT_REPR,
nursery,
"struct with a trailing zero-sized array but without `#[repr(C)]` or another `repr` attribute"
}
declare_lint_pass!(TrailingZeroSizedArrayWithoutRepr => [TRAILING_ZERO_SIZED_ARRAY_WITHOUT_REPR]);
impl<'tcx> LateLintPass<'tcx> for TrailingZeroSizedArrayWithoutRepr {
fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx Item<'tcx>) {
if is_struct_with_trailing_zero_sized_array(cx, item) && !has_repr_attr(cx, item.hir_id()) {
span_lint_and_help(
cx,
TRAILING_ZERO_SIZED_ARRAY_WITHOUT_REPR,
item.span,
"trailing zero-sized array in a struct which is not marked with a `repr` attribute",
None,
&format!(
"consider annotating `{}` with `#[repr(C)]` or another `repr` attribute",
cx.tcx.def_path_str(item.def_id.to_def_id())
),
);
}
}
}
fn is_struct_with_trailing_zero_sized_array(cx: &LateContext<'tcx>, item: &'tcx Item<'tcx>) -> bool {
if_chain! {
// First check if last field is an array
if let ItemKind::Struct(data, _) = &item.kind;
if let Some(last_field) = data.fields().last();
if let rustc_hir::TyKind::Array(_, length) = last_field.ty.kind;
// Then check if that that array zero-sized
let length_ldid = cx.tcx.hir().local_def_id(length.hir_id);
let length = Const::from_anon_const(cx.tcx, length_ldid);
let length = length.try_eval_usize(cx.tcx, cx.param_env);
if let Some(length) = length;
then {
length == 0
} else {
false
}
}
}
fn has_repr_attr(cx: &LateContext<'_>, hir_id: HirId) -> bool {
cx.tcx.hir().attrs(hir_id).iter().any(|attr| attr.has_name(sym::repr))
}
|
// Copyright 2015 Ted Mielczarek. See the COPYRIGHT
// file at the top-level directory of this distribution.
use chrono::{TimeZone, Utc};
use failure::Fail;
use std::collections::HashMap;
use std::fs::File;
use std::io::BufReader;
use std::ops::Deref;
use std::path::Path;
use minidump::{self, *};
use crate::process_state::{CallStack, CallStackInfo, LinuxStandardBase, ProcessState};
use crate::stackwalker;
use crate::symbols::*;
use crate::system_info::SystemInfo;
/// An error encountered during minidump processing.
#[derive(Debug, Fail)]
pub enum ProcessError {
#[fail(display = "Failed to read minidump")]
MinidumpReadError(minidump::Error),
#[fail(display = "An unknown error occurred")]
UnknownError,
#[fail(display = "The system information stream was not found")]
MissingSystemInfo,
#[fail(display = "The thread list stream was not found")]
MissingThreadList,
}
impl From<minidump::Error> for ProcessError {
fn from(err: minidump::Error) -> ProcessError {
ProcessError::MinidumpReadError(err)
}
}
/// Unwind all threads in `dump` and return a `ProcessState`.
///
/// # Examples
///
/// ```
/// use minidump::Minidump;
/// use std::path::PathBuf;
/// use breakpad_symbols::{Symbolizer, SimpleSymbolSupplier};
///
/// # std::env::set_current_dir(env!("CARGO_MANIFEST_DIR"));
/// # fn foo() -> Result<(), minidump_processor::ProcessError> {
/// let mut dump = Minidump::read_path("../testdata/test.dmp")?;
/// let supplier = SimpleSymbolSupplier::new(vec!(PathBuf::from("../testdata/symbols")));
/// let symbolizer = Symbolizer::new(supplier);
/// let state = minidump_processor::process_minidump(&mut dump, &symbolizer)?;
/// assert_eq!(state.threads.len(), 2);
/// println!("Processed {} threads", state.threads.len());
/// # Ok(())
/// # }
/// # fn main() { foo().unwrap() }
/// ```
pub fn process_minidump<'a, T, P>(
dump: &Minidump<'a, T>,
symbol_provider: &P,
) -> Result<ProcessState, ProcessError>
where
T: Deref<Target = [u8]> + 'a,
P: SymbolProvider,
{
// No Evil JSON Here!
process_minidump_with_evil(dump, symbol_provider, None)
}
/// The same as `process_minidump` but with an extra evil little json file.
///
/// This is a hack to support mozilla's legacy workflow, just use `process_minidump`.
pub fn process_minidump_with_evil<'a, T, P>(
dump: &Minidump<'a, T>,
symbol_provider: &P,
evil_json: Option<&Path>,
) -> Result<ProcessState, ProcessError>
where
T: Deref<Target = [u8]> + 'a,
P: SymbolProvider,
{
// Thread list is required for processing.
let thread_list = dump
.get_stream::<MinidumpThreadList>()
.or(Err(ProcessError::MissingThreadList))?;
// Try to get thread names, but it's only a nice-to-have.
let thread_names = dump
.get_stream::<MinidumpThreadNames>()
.unwrap_or_else(|_| MinidumpThreadNames::default());
// System info is required for processing.
let dump_system_info = dump
.get_stream::<MinidumpSystemInfo>()
.or(Err(ProcessError::MissingSystemInfo))?;
let mut os_version = format!(
"{}.{}.{}",
dump_system_info.raw.major_version,
dump_system_info.raw.minor_version,
dump_system_info.raw.build_number
);
if let Some(csd_version) = dump_system_info.csd_version() {
os_version.push(' ');
os_version.push_str(&csd_version);
}
let linux_standard_base = dump.get_stream::<MinidumpLinuxLsbRelease>().ok();
let linux_cpu_info = dump
.get_stream::<MinidumpLinuxCpuInfo>()
.unwrap_or_default();
let _linux_environ = dump.get_stream::<MinidumpLinuxEnviron>().ok();
let _linux_proc_status = dump.get_stream::<MinidumpLinuxProcStatus>().ok();
// Extract everything we care about from linux streams here.
// We don't eagerly process them in the minidump crate because there's just
// tons of random information in there and it's not obvious what anyone
// would care about. So just providing an iterator and letting minidump-processor
// pull out the things it cares about is simple and effective.
let mut cpu_microcode_version = None;
for (key, val) in linux_cpu_info.iter() {
if key.as_bytes() == b"microcode" {
cpu_microcode_version = val
.to_str()
.ok()
.and_then(|val| val.strip_prefix("0x"))
.and_then(|val| u64::from_str_radix(val, 16).ok());
break;
}
}
let linux_standard_base = linux_standard_base.map(|linux_standard_base| {
let mut lsb = LinuxStandardBase::default();
for (key, val) in linux_standard_base.iter() {
match key.as_bytes() {
b"DISTRIB_ID" | b"ID" => lsb.id = val.to_string_lossy().into_owned(),
b"DISTRIB_RELEASE" | b"VERSION_ID" => {
lsb.release = val.to_string_lossy().into_owned()
}
b"DISTRIB_CODENAME" | b"VERSION_CODENAME" => {
lsb.codename = val.to_string_lossy().into_owned()
}
b"DISTRIB_DESCRIPTION" | b"PRETTY_NAME" => {
lsb.description = val.to_string_lossy().into_owned()
}
_ => {}
}
}
lsb
});
let cpu_info = dump_system_info
.cpu_info()
.map(|string| string.into_owned());
let system_info = SystemInfo {
os: dump_system_info.os,
os_version: Some(os_version),
cpu: dump_system_info.cpu,
cpu_info,
cpu_microcode_version,
cpu_count: dump_system_info.raw.number_of_processors as usize,
};
let mac_crash_info = dump
.get_stream::<MinidumpMacCrashInfo>()
.ok()
.map(|info| info.raw);
// Process create time is optional.
let (process_id, process_create_time) =
if let Ok(misc_info) = dump.get_stream::<MinidumpMiscInfo>() {
(
misc_info.raw.process_id().cloned(),
misc_info.process_create_time(),
)
} else {
(None, None)
};
// If Breakpad info exists in dump, get dump and requesting thread ids.
let breakpad_info = dump.get_stream::<MinidumpBreakpadInfo>();
let (dump_thread_id, requesting_thread_id) = if let Ok(info) = breakpad_info {
(info.dump_thread_id, info.requesting_thread_id)
} else {
(None, None)
};
// Get exception info if it exists.
let exception_stream = dump.get_stream::<MinidumpException>().ok();
let exception_ref = exception_stream.as_ref();
let (crash_reason, crash_address, crashing_thread_id) = if let Some(exception) = exception_ref {
(
Some(exception.get_crash_reason(system_info.os, system_info.cpu)),
Some(exception.get_crash_address(system_info.os)),
Some(exception.get_crashing_thread_id()),
)
} else {
(None, None, None)
};
let exception_context = exception_ref.and_then(|e| e.context.as_ref());
// Get assertion
let assertion = None;
let modules = match dump.get_stream::<MinidumpModuleList>() {
Ok(module_list) => module_list,
// Just give an empty list, simplifies things.
Err(_) => MinidumpModuleList::new(),
};
let unloaded_modules = match dump.get_stream::<MinidumpUnloadedModuleList>() {
Ok(module_list) => module_list,
// Just give an empty list, simplifies things.
Err(_) => MinidumpUnloadedModuleList::new(),
};
let memory_list = dump.get_stream::<MinidumpMemoryList>().unwrap_or_default();
let memory_info_list = dump.get_stream::<MinidumpMemoryInfoList>().ok();
let linux_maps = dump.get_stream::<MinidumpLinuxMaps>().ok();
let _memory_info = UnifiedMemoryInfoList::new(memory_info_list, linux_maps).unwrap_or_default();
// Get memory list
let mut threads = vec![];
let mut requesting_thread = None;
for (i, thread) in thread_list.threads.iter().enumerate() {
// If this is the thread that wrote the dump, skip processing it.
if dump_thread_id.is_some() && dump_thread_id.unwrap() == thread.raw.thread_id {
threads.push(CallStack::with_info(CallStackInfo::DumpThreadSkipped));
continue;
}
// If this thread requested the dump then try to use the exception
// context if it exists. (prefer the exception stream's thread id over
// the breakpad info stream's thread id.)
let context = if crashing_thread_id
.or(requesting_thread_id)
.map(|id| id == thread.raw.thread_id)
.unwrap_or(false)
{
requesting_thread = Some(i);
exception_context.or_else(|| thread.context.as_ref())
} else {
thread.context.as_ref()
};
let stack = thread.stack.as_ref().or_else(|| {
// Windows probably gave us null RVAs for our stack memory descriptors.
// If this happens, then we need to look up the memory region by address.
let stack_addr = thread.raw.stack.start_of_memory_range;
memory_list.memory_at_address(stack_addr)
});
let mut stack = stackwalker::walk_stack(&context, stack, &modules, symbol_provider);
let name = thread_names
.get_name(thread.raw.thread_id)
.map(|cow| cow.into_owned());
stack.thread_name = name;
stack.last_error_value = thread.last_error(system_info.cpu, &memory_list);
threads.push(stack);
}
// Collect up info on unimplemented/unknown modules
let unknown_streams = dump.unknown_streams().collect();
let unimplemented_streams = dump.unimplemented_streams().collect();
// Get symbol stats from the symbolizer
let symbol_stats = symbol_provider.stats();
// Finally, handle the evil JSON file (get module signing certs)
let cert_info = evil_json.and_then(handle_evil).unwrap_or_else(HashMap::new);
Ok(ProcessState {
process_id,
time: Utc.timestamp(dump.header.time_date_stamp as i64, 0),
process_create_time,
cert_info,
crash_reason,
crash_address,
assertion,
requesting_thread,
system_info,
linux_standard_base,
mac_crash_info,
threads,
modules,
unloaded_modules,
unknown_streams,
unimplemented_streams,
symbol_stats,
})
}
fn handle_evil(evil_path: &Path) -> Option<HashMap<String, String>> {
use log::{error, warn};
use serde_json::Value::{self, *};
// Get the evil json
let evil_json = File::open(evil_path)
.map_err(|e| {
error!("Could not load Extra JSON at {:?}", evil_path);
e
})
.ok()?;
let buf = BufReader::new(evil_json);
let json: Value = serde_json::from_reader(buf)
.map_err(|e| {
error!("Could not parse Extra JSON (was not valid JSON)");
e
})
.ok()?;
// Get module signing info
let temp_obj;
let certs = match json.get("ModuleSignatureInfo") {
Some(Object(obj)) => obj,
Some(String(string)) => {
// Possible the signature info was wrapped in a string by mistake,
// So try to parse that string as an object.
temp_obj = serde_json::from_str(string)
.map_err(|e| {
error!("Could not parse Extra JSON's ModuleSignatureInfo (not an object)");
error!("ModuleSignatureInfo: {}", string);
e
})
.ok()?;
&temp_obj
}
_ => {
error!("Could not parse Extra JSON's ModuleSignatureInfo (not an object)");
return None;
}
};
// Each certificate lists the modules it applies to, but we want the
// reverse mapping -- module names to certificates. Invert the map.
let mut cert_map = HashMap::new();
for (cert, modules) in certs {
if let Array(modules) = modules {
for module in modules {
if let String(module) = module {
cert_map.insert(module.clone(), cert.clone());
}
}
} else {
warn!(
"Extra JSON had corrupt entry -- \"{}\": {:?}",
cert, modules
);
}
}
Some(cert_map)
}
Use the thread names in the evil json
Fixes #284
// Copyright 2015 Ted Mielczarek. See the COPYRIGHT
// file at the top-level directory of this distribution.
use chrono::{TimeZone, Utc};
use failure::Fail;
use std::collections::HashMap;
use std::fs::File;
use std::io::BufReader;
use std::ops::Deref;
use std::path::Path;
use minidump::{self, *};
use crate::process_state::{CallStack, CallStackInfo, LinuxStandardBase, ProcessState};
use crate::stackwalker;
use crate::symbols::*;
use crate::system_info::SystemInfo;
/// An error encountered during minidump processing.
#[derive(Debug, Fail)]
pub enum ProcessError {
#[fail(display = "Failed to read minidump")]
MinidumpReadError(minidump::Error),
#[fail(display = "An unknown error occurred")]
UnknownError,
#[fail(display = "The system information stream was not found")]
MissingSystemInfo,
#[fail(display = "The thread list stream was not found")]
MissingThreadList,
}
impl From<minidump::Error> for ProcessError {
fn from(err: minidump::Error) -> ProcessError {
ProcessError::MinidumpReadError(err)
}
}
/// Unwind all threads in `dump` and return a `ProcessState`.
///
/// # Examples
///
/// ```
/// use minidump::Minidump;
/// use std::path::PathBuf;
/// use breakpad_symbols::{Symbolizer, SimpleSymbolSupplier};
///
/// # std::env::set_current_dir(env!("CARGO_MANIFEST_DIR"));
/// # fn foo() -> Result<(), minidump_processor::ProcessError> {
/// let mut dump = Minidump::read_path("../testdata/test.dmp")?;
/// let supplier = SimpleSymbolSupplier::new(vec!(PathBuf::from("../testdata/symbols")));
/// let symbolizer = Symbolizer::new(supplier);
/// let state = minidump_processor::process_minidump(&mut dump, &symbolizer)?;
/// assert_eq!(state.threads.len(), 2);
/// println!("Processed {} threads", state.threads.len());
/// # Ok(())
/// # }
/// # fn main() { foo().unwrap() }
/// ```
pub fn process_minidump<'a, T, P>(
dump: &Minidump<'a, T>,
symbol_provider: &P,
) -> Result<ProcessState, ProcessError>
where
T: Deref<Target = [u8]> + 'a,
P: SymbolProvider,
{
// No Evil JSON Here!
process_minidump_with_evil(dump, symbol_provider, None)
}
/// The same as `process_minidump` but with an extra evil little json file.
///
/// This is a hack to support mozilla's legacy workflow, just use `process_minidump`.
pub fn process_minidump_with_evil<'a, T, P>(
dump: &Minidump<'a, T>,
symbol_provider: &P,
evil_json: Option<&Path>,
) -> Result<ProcessState, ProcessError>
where
T: Deref<Target = [u8]> + 'a,
P: SymbolProvider,
{
// Thread list is required for processing.
let thread_list = dump
.get_stream::<MinidumpThreadList>()
.or(Err(ProcessError::MissingThreadList))?;
// Try to get thread names, but it's only a nice-to-have.
let thread_names = dump
.get_stream::<MinidumpThreadNames>()
.unwrap_or_else(|_| MinidumpThreadNames::default());
// System info is required for processing.
let dump_system_info = dump
.get_stream::<MinidumpSystemInfo>()
.or(Err(ProcessError::MissingSystemInfo))?;
let mut os_version = format!(
"{}.{}.{}",
dump_system_info.raw.major_version,
dump_system_info.raw.minor_version,
dump_system_info.raw.build_number
);
if let Some(csd_version) = dump_system_info.csd_version() {
os_version.push(' ');
os_version.push_str(&csd_version);
}
let linux_standard_base = dump.get_stream::<MinidumpLinuxLsbRelease>().ok();
let linux_cpu_info = dump
.get_stream::<MinidumpLinuxCpuInfo>()
.unwrap_or_default();
let _linux_environ = dump.get_stream::<MinidumpLinuxEnviron>().ok();
let _linux_proc_status = dump.get_stream::<MinidumpLinuxProcStatus>().ok();
// Extract everything we care about from linux streams here.
// We don't eagerly process them in the minidump crate because there's just
// tons of random information in there and it's not obvious what anyone
// would care about. So just providing an iterator and letting minidump-processor
// pull out the things it cares about is simple and effective.
let mut cpu_microcode_version = None;
for (key, val) in linux_cpu_info.iter() {
if key.as_bytes() == b"microcode" {
cpu_microcode_version = val
.to_str()
.ok()
.and_then(|val| val.strip_prefix("0x"))
.and_then(|val| u64::from_str_radix(val, 16).ok());
break;
}
}
let linux_standard_base = linux_standard_base.map(|linux_standard_base| {
let mut lsb = LinuxStandardBase::default();
for (key, val) in linux_standard_base.iter() {
match key.as_bytes() {
b"DISTRIB_ID" | b"ID" => lsb.id = val.to_string_lossy().into_owned(),
b"DISTRIB_RELEASE" | b"VERSION_ID" => {
lsb.release = val.to_string_lossy().into_owned()
}
b"DISTRIB_CODENAME" | b"VERSION_CODENAME" => {
lsb.codename = val.to_string_lossy().into_owned()
}
b"DISTRIB_DESCRIPTION" | b"PRETTY_NAME" => {
lsb.description = val.to_string_lossy().into_owned()
}
_ => {}
}
}
lsb
});
let cpu_info = dump_system_info
.cpu_info()
.map(|string| string.into_owned());
let system_info = SystemInfo {
os: dump_system_info.os,
os_version: Some(os_version),
cpu: dump_system_info.cpu,
cpu_info,
cpu_microcode_version,
cpu_count: dump_system_info.raw.number_of_processors as usize,
};
let mac_crash_info = dump
.get_stream::<MinidumpMacCrashInfo>()
.ok()
.map(|info| info.raw);
// Process create time is optional.
let (process_id, process_create_time) =
if let Ok(misc_info) = dump.get_stream::<MinidumpMiscInfo>() {
(
misc_info.raw.process_id().cloned(),
misc_info.process_create_time(),
)
} else {
(None, None)
};
// If Breakpad info exists in dump, get dump and requesting thread ids.
let breakpad_info = dump.get_stream::<MinidumpBreakpadInfo>();
let (dump_thread_id, requesting_thread_id) = if let Ok(info) = breakpad_info {
(info.dump_thread_id, info.requesting_thread_id)
} else {
(None, None)
};
// Get exception info if it exists.
let exception_stream = dump.get_stream::<MinidumpException>().ok();
let exception_ref = exception_stream.as_ref();
let (crash_reason, crash_address, crashing_thread_id) = if let Some(exception) = exception_ref {
(
Some(exception.get_crash_reason(system_info.os, system_info.cpu)),
Some(exception.get_crash_address(system_info.os)),
Some(exception.get_crashing_thread_id()),
)
} else {
(None, None, None)
};
let exception_context = exception_ref.and_then(|e| e.context.as_ref());
// Get assertion
let assertion = None;
let modules = match dump.get_stream::<MinidumpModuleList>() {
Ok(module_list) => module_list,
// Just give an empty list, simplifies things.
Err(_) => MinidumpModuleList::new(),
};
let unloaded_modules = match dump.get_stream::<MinidumpUnloadedModuleList>() {
Ok(module_list) => module_list,
// Just give an empty list, simplifies things.
Err(_) => MinidumpUnloadedModuleList::new(),
};
let memory_list = dump.get_stream::<MinidumpMemoryList>().unwrap_or_default();
let memory_info_list = dump.get_stream::<MinidumpMemoryInfoList>().ok();
let linux_maps = dump.get_stream::<MinidumpLinuxMaps>().ok();
let _memory_info = UnifiedMemoryInfoList::new(memory_info_list, linux_maps).unwrap_or_default();
// Get the evil JSON file (thread names and module certificates)
let evil = evil_json.and_then(handle_evil).unwrap_or_default();
let mut threads = vec![];
let mut requesting_thread = None;
for (i, thread) in thread_list.threads.iter().enumerate() {
// If this is the thread that wrote the dump, skip processing it.
if dump_thread_id.is_some() && dump_thread_id.unwrap() == thread.raw.thread_id {
threads.push(CallStack::with_info(CallStackInfo::DumpThreadSkipped));
continue;
}
// If this thread requested the dump then try to use the exception
// context if it exists. (prefer the exception stream's thread id over
// the breakpad info stream's thread id.)
let context = if crashing_thread_id
.or(requesting_thread_id)
.map(|id| id == thread.raw.thread_id)
.unwrap_or(false)
{
requesting_thread = Some(i);
exception_context.or_else(|| thread.context.as_ref())
} else {
thread.context.as_ref()
};
let stack = thread.stack.as_ref().or_else(|| {
// Windows probably gave us null RVAs for our stack memory descriptors.
// If this happens, then we need to look up the memory region by address.
let stack_addr = thread.raw.stack.start_of_memory_range;
memory_list.memory_at_address(stack_addr)
});
let mut stack = stackwalker::walk_stack(&context, stack, &modules, symbol_provider);
let name = thread_names
.get_name(thread.raw.thread_id)
.map(|cow| cow.into_owned())
.or_else(|| evil.thread_names.get(&thread.raw.thread_id).cloned());
stack.thread_name = name;
stack.last_error_value = thread.last_error(system_info.cpu, &memory_list);
threads.push(stack);
}
// Collect up info on unimplemented/unknown modules
let unknown_streams = dump.unknown_streams().collect();
let unimplemented_streams = dump.unimplemented_streams().collect();
// Get symbol stats from the symbolizer
let symbol_stats = symbol_provider.stats();
Ok(ProcessState {
process_id,
time: Utc.timestamp(dump.header.time_date_stamp as i64, 0),
process_create_time,
cert_info: evil.certs,
crash_reason,
crash_address,
assertion,
requesting_thread,
system_info,
linux_standard_base,
mac_crash_info,
threads,
modules,
unloaded_modules,
unknown_streams,
unimplemented_streams,
symbol_stats,
})
}
/// Things extracted from the Evil JSON File
#[derive(Debug, Default)]
struct Evil {
/// module name => cert
certs: HashMap<String, String>,
/// thread id => thread name
thread_names: HashMap<u32, String>,
}
fn handle_evil(evil_path: &Path) -> Option<Evil> {
use log::error;
use serde_json::map::Map;
use serde_json::Value;
use std::str::FromStr;
// Get the evil json
let evil_json = File::open(evil_path)
.map_err(|e| {
error!("Could not load Extra JSON at {:?}", evil_path);
e
})
.ok()?;
let buf = BufReader::new(evil_json);
let mut json: Map<String, Value> = serde_json::from_reader(buf)
.map_err(|e| {
error!("Could not parse Extra JSON (was not valid JSON)");
e
})
.ok()?;
// Of course evil json contains a string-that-can-be-parsed-as-a-json-object
// instead of having a normal json object!
fn evil_obj<K, V>(json: &mut Map<String, Value>, field_name: &str) -> Option<HashMap<K, V>>
where
K: for<'de> serde::de::Deserialize<'de> + Eq + std::hash::Hash,
V: for<'de> serde::de::Deserialize<'de>,
{
json.remove(field_name).and_then(|val| {
match val {
Value::Object(_) => serde_json::from_value(val).ok(),
Value::String(string) => serde_json::from_str(&string).ok(),
_ => None,
}
.or_else(|| {
error!("Could not parse Evil JSON's {} (not an object)", field_name);
None
})
})
}
// Convert certs from
// "cert_name1": ["module1", "module2", ...], "cert_name2": ...
// to
// "module1": "cert_name1", "module2": "cert_name1", ...
let certs = evil_obj(&mut json, "ModuleSignatureInfo")
.map(|certs: HashMap<String, Vec<String>>| {
let mut cert_map = HashMap::new();
for (cert, modules) in certs {
for module in modules {
cert_map.insert(module, cert.clone());
}
}
cert_map
})
.unwrap_or_default();
// Get thread name mappings
// In typical evil json fashion, this list doesn't conform to even the evil_obj format!
// It's just a set of comma-separated int:string pairs, with a trailing comma.
// This cannot be parsed as JSON at all, since the keys are not strings. So we just
// do a sloppy `split` based parse and hope we don't encounter thread names with commas
// in them because I hate this JSON file with a passion.
//
// ex: 123: "name1", 456: "name",
let thread_names = json
.remove("ThreadIdNameMapping")
.unwrap_or_default()
.as_str()
.unwrap_or_default()
.split(',')
.filter_map(|entry| {
entry.split_once(":").and_then(|(key, val)| {
let key = u32::from_str(key).ok();
let val = val
.strip_prefix('"')
.and_then(|val| val.strip_suffix('"'))
.map(String::from);
key.zip(val)
})
})
.collect();
Some(Evil {
certs,
thread_names,
})
}
|
use std::{cmp::min, ffi::OsStr, fs::File, io, mem, num::Wrapping, path::PathBuf, time::Duration};
use clap::Parser;
use pgn_reader::{BufferedReader, Color, RawHeader, SanPlus, Skip, Visitor};
use rand::{distributions::OpenClosed01, rngs::SmallRng, Rng, SeedableRng};
use serde::Serialize;
use serde_with::{serde_as, DisplayFromStr, SpaceSeparator, StringWithSeparator};
const BATCH_SIZE: usize = 100;
#[derive(Debug, Serialize, Copy, Clone)]
#[serde(rename_all = "camelCase")]
enum Speed {
UltraBullet,
Bullet,
Blitz,
Rapid,
Classical,
Correspondence,
}
impl Speed {
fn from_seconds_and_increment(seconds: u64, increment: u64) -> Speed {
let total = seconds + 40 * increment;
if total < 30 {
Speed::UltraBullet
} else if total < 180 {
Speed::Bullet
} else if total < 480 {
Speed::Blitz
} else if total < 1500 {
Speed::Rapid
} else if total < 21_600 {
Speed::Classical
} else {
Speed::Correspondence
}
}
fn from_bytes(bytes: &[u8]) -> Result<Speed, ()> {
if bytes == b"-" {
return Ok(Speed::Correspondence);
}
let mut parts = bytes.splitn(2, |ch| *ch == b'+');
let seconds = btoi::btou(parts.next().ok_or(())?).map_err(|_| ())?;
let increment = btoi::btou(parts.next().ok_or(())?).map_err(|_| ())?;
Ok(Speed::from_seconds_and_increment(seconds, increment))
}
}
struct Importer {
endpoint: String,
filename: PathBuf,
client: reqwest::blocking::Client,
rng: SmallRng,
spinner_idx: Wrapping<usize>,
current: Game,
skip: bool,
batch: Vec<Game>,
}
#[serde_as]
#[derive(Default, Serialize, Debug)]
struct Game {
variant: Option<String>,
speed: Option<Speed>,
fen: Option<String>,
id: Option<String>,
date: Option<String>,
white: Player,
black: Player,
#[serde_as(as = "Option<DisplayFromStr>")]
winner: Option<Color>,
#[serde_as(as = "StringWithSeparator<SpaceSeparator, SanPlus>")]
moves: Vec<SanPlus>,
}
#[derive(Default, Serialize, Debug)]
struct Player {
name: Option<String>,
rating: Option<u16>,
}
impl Importer {
fn new(endpoint: &str, filename: PathBuf) -> Importer {
Importer {
endpoint: endpoint.to_owned(),
filename,
client: reqwest::blocking::Client::builder()
.timeout(Duration::from_secs(60))
.build()
.expect("client"),
rng: SmallRng::from_seed([
0x19, 0x29, 0xab, 0x17, 0xc6, 0xfa, 0xb0, 0xe9, 0x4b, 0x44, 0xd8, 0x07, 0x09, 0xbf,
0x1d, 0x87, 0xbd, 0xd8, 0xb3, 0x2f, 0xe1, 0xe2, 0xa0, 0x1a, 0x9e, 0x30, 0x98, 0xd7,
0xef, 0xd5, 0x7a, 0x1d,
]),
spinner_idx: Wrapping(0),
current: Game::default(),
skip: false,
batch: Vec::with_capacity(BATCH_SIZE),
}
}
pub fn send(&mut self) {
// println!("{}", serde_json::to_string(&self.batch).expect("serialize"));
let res = self
.client
.put(format!("{}/import/lichess", self.endpoint))
.json(&self.batch)
.send()
.expect("send batch");
self.spinner_idx += Wrapping(1);
let spinner = &['⣾', '⣽', '⣻', '⢿', '⡿', '⣟', '⣯', '⣷'];
println!(
"{} {:?}: {}: {} - {}",
spinner[self.spinner_idx.0 % spinner.len()],
self.filename,
self.batch
.last()
.and_then(|g| g.date.as_ref())
.unwrap_or(&String::new()),
res.status(),
res.text().expect("decode response")
);
self.batch.clear();
}
}
impl Visitor for Importer {
type Result = ();
fn begin_game(&mut self) {
self.skip = false;
self.current = Game::default();
}
fn header(&mut self, key: &[u8], value: RawHeader<'_>) {
if key == b"White" {
self.current.white.name = Some(value.decode_utf8().expect("White").into_owned());
} else if key == b"Black" {
self.current.black.name = Some(value.decode_utf8().expect("Black").into_owned());
} else if key == b"WhiteElo" {
if value.as_bytes() != b"?" {
self.current.white.rating = Some(btoi::btoi(value.as_bytes()).expect("WhiteElo"));
}
} else if key == b"BlackElo" {
if value.as_bytes() != b"?" {
self.current.black.rating = Some(btoi::btoi(value.as_bytes()).expect("BlackElo"));
}
} else if key == b"TimeControl" {
self.current.speed = Some(Speed::from_bytes(value.as_bytes()).expect("TimeControl"));
} else if key == b"Variant" {
self.current.variant = Some(value.decode_utf8().expect("Variant").into_owned());
} else if key == b"Date" || key == b"UTCDate" {
self.current.date = Some(value.decode_utf8().expect("Date").into_owned());
} else if key == b"WhiteTitle" || key == b"BlackTitle" {
if value.as_bytes() == b"BOT" {
self.skip = true;
}
} else if key == b"Site" {
self.current.id = Some(
String::from_utf8(
value
.as_bytes()
.rsplitn(2, |ch| *ch == b'/')
.next()
.expect("Site")
.to_owned(),
)
.expect("Site"),
);
} else if key == b"Result" {
match value.as_bytes() {
b"1-0" => self.current.winner = Some(Color::White),
b"0-1" => self.current.winner = Some(Color::Black),
b"1/2-1/2" => self.current.winner = None,
_ => self.skip = true,
}
} else if key == b"FEN" {
if value.as_bytes() == b"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1" {
// https://github.com/ornicar/lichess-db/issues/40
self.current.fen = None;
} else {
self.current.fen = Some(value.decode_utf8().expect("FEN").into_owned());
}
}
}
fn end_headers(&mut self) -> Skip {
let rating =
(self.current.white.rating.unwrap_or(0) + self.current.black.rating.unwrap_or(0)) / 2;
let standard = self
.current
.variant
.as_ref()
.map_or(true, |name| name == "Standard");
let probability = if standard {
match self.current.speed.unwrap_or(Speed::Correspondence) {
Speed::Correspondence | Speed::Classical => 1.00,
_ if rating >= 2500 => 1.00,
Speed::Rapid if rating >= 2200 => 1.00,
Speed::Rapid if rating >= 2000 => 0.83,
Speed::Rapid if rating >= 1800 => 0.46,
Speed::Rapid if rating >= 1600 => 0.39,
Speed::Blitz if rating >= 2200 => 0.38,
Speed::Blitz if rating >= 2000 => 0.18,
Speed::Blitz if rating >= 1600 => 0.13,
Speed::Bullet if rating >= 2200 => 0.48,
Speed::Bullet if rating >= 2000 => 0.27,
Speed::Bullet if rating >= 1800 => 0.19,
Speed::Bullet if rating >= 1600 => 0.18,
Speed::UltraBullet => 1.00,
_ => 0.02,
}
} else {
// variant games
if rating >= 1600 {
1.00
} else {
0.50
}
};
let accept = min(
self.current.white.rating.unwrap_or(0),
self.current.black.rating.unwrap_or(0),
) >= 1501
&& probability >= self.rng.sample(OpenClosed01)
&& !self.skip;
self.skip = !accept;
Skip(self.skip)
}
fn san(&mut self, san: SanPlus) {
self.current.moves.push(san);
}
fn begin_variation(&mut self) -> Skip {
Skip(true) // stay in the mainline
}
fn end_game(&mut self) {
if !self.skip {
self.batch
.push(mem::replace(&mut self.current, Default::default()));
if self.batch.len() >= BATCH_SIZE {
self.send();
}
}
}
}
#[derive(Parser)]
struct Args {
#[clap(long, default_value = "http://localhost:9002")]
endpoint: String,
pgns: Vec<PathBuf>,
}
fn main() -> Result<(), io::Error> {
let args = Args::parse();
for arg in args.pgns {
let file = File::open(&arg)?;
let uncompressed: Box<dyn io::Read> = if arg.extension() == Some(OsStr::new("bz2")) {
println!("Reading compressed {:?} ...", arg);
Box::new(bzip2::read::MultiBzDecoder::new(file))
} else {
println!("Reading {:?} ...", arg);
Box::new(file)
};
let mut reader = BufferedReader::new(uncompressed);
let mut importer = Importer::new(&args.endpoint, arg);
reader.read_all(&mut importer)?;
importer.send();
}
Ok(())
}
use new Outcome::from_ascii()
use std::{cmp::min, ffi::OsStr, fs::File, io, mem, num::Wrapping, path::PathBuf, time::Duration};
use clap::Parser;
use pgn_reader::{BufferedReader, Color, Outcome, RawHeader, SanPlus, Skip, Visitor};
use rand::{distributions::OpenClosed01, rngs::SmallRng, Rng, SeedableRng};
use serde::Serialize;
use serde_with::{serde_as, DisplayFromStr, SpaceSeparator, StringWithSeparator};
const BATCH_SIZE: usize = 100;
#[derive(Debug, Serialize, Copy, Clone)]
#[serde(rename_all = "camelCase")]
enum Speed {
UltraBullet,
Bullet,
Blitz,
Rapid,
Classical,
Correspondence,
}
impl Speed {
fn from_seconds_and_increment(seconds: u64, increment: u64) -> Speed {
let total = seconds + 40 * increment;
if total < 30 {
Speed::UltraBullet
} else if total < 180 {
Speed::Bullet
} else if total < 480 {
Speed::Blitz
} else if total < 1500 {
Speed::Rapid
} else if total < 21_600 {
Speed::Classical
} else {
Speed::Correspondence
}
}
fn from_bytes(bytes: &[u8]) -> Result<Speed, ()> {
if bytes == b"-" {
return Ok(Speed::Correspondence);
}
let mut parts = bytes.splitn(2, |ch| *ch == b'+');
let seconds = btoi::btou(parts.next().ok_or(())?).map_err(|_| ())?;
let increment = btoi::btou(parts.next().ok_or(())?).map_err(|_| ())?;
Ok(Speed::from_seconds_and_increment(seconds, increment))
}
}
struct Importer {
endpoint: String,
filename: PathBuf,
client: reqwest::blocking::Client,
rng: SmallRng,
spinner_idx: Wrapping<usize>,
current: Game,
skip: bool,
batch: Vec<Game>,
}
#[serde_as]
#[derive(Default, Serialize, Debug)]
struct Game {
variant: Option<String>,
speed: Option<Speed>,
fen: Option<String>,
id: Option<String>,
date: Option<String>,
white: Player,
black: Player,
#[serde_as(as = "Option<DisplayFromStr>")]
winner: Option<Color>,
#[serde_as(as = "StringWithSeparator<SpaceSeparator, SanPlus>")]
moves: Vec<SanPlus>,
}
#[derive(Default, Serialize, Debug)]
struct Player {
name: Option<String>,
rating: Option<u16>,
}
impl Importer {
fn new(endpoint: &str, filename: PathBuf) -> Importer {
Importer {
endpoint: endpoint.to_owned(),
filename,
client: reqwest::blocking::Client::builder()
.timeout(Duration::from_secs(60))
.build()
.expect("client"),
rng: SmallRng::from_seed([
0x19, 0x29, 0xab, 0x17, 0xc6, 0xfa, 0xb0, 0xe9, 0x4b, 0x44, 0xd8, 0x07, 0x09, 0xbf,
0x1d, 0x87, 0xbd, 0xd8, 0xb3, 0x2f, 0xe1, 0xe2, 0xa0, 0x1a, 0x9e, 0x30, 0x98, 0xd7,
0xef, 0xd5, 0x7a, 0x1d,
]),
spinner_idx: Wrapping(0),
current: Game::default(),
skip: false,
batch: Vec::with_capacity(BATCH_SIZE),
}
}
pub fn send(&mut self) {
// println!("{}", serde_json::to_string(&self.batch).expect("serialize"));
let res = self
.client
.put(format!("{}/import/lichess", self.endpoint))
.json(&self.batch)
.send()
.expect("send batch");
self.spinner_idx += Wrapping(1);
let spinner = &['⣾', '⣽', '⣻', '⢿', '⡿', '⣟', '⣯', '⣷'];
println!(
"{} {:?}: {}: {} - {}",
spinner[self.spinner_idx.0 % spinner.len()],
self.filename,
self.batch
.last()
.and_then(|g| g.date.as_ref())
.unwrap_or(&String::new()),
res.status(),
res.text().expect("decode response")
);
self.batch.clear();
}
}
impl Visitor for Importer {
type Result = ();
fn begin_game(&mut self) {
self.skip = false;
self.current = Game::default();
}
fn header(&mut self, key: &[u8], value: RawHeader<'_>) {
if key == b"White" {
self.current.white.name = Some(value.decode_utf8().expect("White").into_owned());
} else if key == b"Black" {
self.current.black.name = Some(value.decode_utf8().expect("Black").into_owned());
} else if key == b"WhiteElo" {
if value.as_bytes() != b"?" {
self.current.white.rating = Some(btoi::btoi(value.as_bytes()).expect("WhiteElo"));
}
} else if key == b"BlackElo" {
if value.as_bytes() != b"?" {
self.current.black.rating = Some(btoi::btoi(value.as_bytes()).expect("BlackElo"));
}
} else if key == b"TimeControl" {
self.current.speed = Some(Speed::from_bytes(value.as_bytes()).expect("TimeControl"));
} else if key == b"Variant" {
self.current.variant = Some(value.decode_utf8().expect("Variant").into_owned());
} else if key == b"Date" || key == b"UTCDate" {
self.current.date = Some(value.decode_utf8().expect("Date").into_owned());
} else if key == b"WhiteTitle" || key == b"BlackTitle" {
if value.as_bytes() == b"BOT" {
self.skip = true;
}
} else if key == b"Site" {
self.current.id = Some(
String::from_utf8(
value
.as_bytes()
.rsplitn(2, |ch| *ch == b'/')
.next()
.expect("Site")
.to_owned(),
)
.expect("Site"),
);
} else if key == b"Result" {
match Outcome::from_ascii(value.as_bytes()) {
Ok(outcome) => self.current.winner = outcome.winner(),
Err(_) => self.skip = true,
}
} else if key == b"FEN" {
if value.as_bytes() == b"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1" {
// https://github.com/ornicar/lichess-db/issues/40
self.current.fen = None;
} else {
self.current.fen = Some(value.decode_utf8().expect("FEN").into_owned());
}
}
}
fn end_headers(&mut self) -> Skip {
let rating =
(self.current.white.rating.unwrap_or(0) + self.current.black.rating.unwrap_or(0)) / 2;
let standard = self
.current
.variant
.as_ref()
.map_or(true, |name| name == "Standard");
let probability = if standard {
match self.current.speed.unwrap_or(Speed::Correspondence) {
Speed::Correspondence | Speed::Classical => 1.00,
_ if rating >= 2500 => 1.00,
Speed::Rapid if rating >= 2200 => 1.00,
Speed::Rapid if rating >= 2000 => 0.83,
Speed::Rapid if rating >= 1800 => 0.46,
Speed::Rapid if rating >= 1600 => 0.39,
Speed::Blitz if rating >= 2200 => 0.38,
Speed::Blitz if rating >= 2000 => 0.18,
Speed::Blitz if rating >= 1600 => 0.13,
Speed::Bullet if rating >= 2200 => 0.48,
Speed::Bullet if rating >= 2000 => 0.27,
Speed::Bullet if rating >= 1800 => 0.19,
Speed::Bullet if rating >= 1600 => 0.18,
Speed::UltraBullet => 1.00,
_ => 0.02,
}
} else {
// variant games
if rating >= 1600 {
1.00
} else {
0.50
}
};
let accept = min(
self.current.white.rating.unwrap_or(0),
self.current.black.rating.unwrap_or(0),
) >= 1501
&& probability >= self.rng.sample(OpenClosed01)
&& !self.skip;
self.skip = !accept;
Skip(self.skip)
}
fn san(&mut self, san: SanPlus) {
self.current.moves.push(san);
}
fn begin_variation(&mut self) -> Skip {
Skip(true) // stay in the mainline
}
fn end_game(&mut self) {
if !self.skip {
self.batch
.push(mem::replace(&mut self.current, Default::default()));
if self.batch.len() >= BATCH_SIZE {
self.send();
}
}
}
}
#[derive(Parser)]
struct Args {
#[clap(long, default_value = "http://localhost:9002")]
endpoint: String,
pgns: Vec<PathBuf>,
}
fn main() -> Result<(), io::Error> {
let args = Args::parse();
for arg in args.pgns {
let file = File::open(&arg)?;
let uncompressed: Box<dyn io::Read> = if arg.extension() == Some(OsStr::new("bz2")) {
println!("Reading compressed {:?} ...", arg);
Box::new(bzip2::read::MultiBzDecoder::new(file))
} else {
println!("Reading {:?} ...", arg);
Box::new(file)
};
let mut reader = BufferedReader::new(uncompressed);
let mut importer = Importer::new(&args.endpoint, arg);
reader.read_all(&mut importer)?;
importer.send();
}
Ok(())
}
|
use std::{cmp::min, env, fs::File, io, mem, time::Duration};
use pgn_reader::{BufferedReader, Color, RawHeader, SanPlus, Skip, Visitor};
use rand::{distributions::OpenClosed01, rngs::SmallRng, Rng, SeedableRng};
use serde::Serialize;
use serde_with::{serde_as, DisplayFromStr, SpaceSeparator, StringWithSeparator};
const BATCH_SIZE: usize = 50;
#[derive(Debug, Serialize, Copy, Clone)]
#[serde(rename_all = "camelCase")]
enum Speed {
UltraBullet,
Bullet,
Blitz,
Rapid,
Classical,
Correspondence,
}
impl Speed {
fn from_seconds_and_increment(seconds: u64, increment: u64) -> Speed {
let total = seconds + 40 * increment;
if total < 30 {
Speed::UltraBullet
} else if total < 180 {
Speed::Bullet
} else if total < 480 {
Speed::Blitz
} else if total < 1500 {
Speed::Rapid
} else if total < 21_600 {
Speed::Classical
} else {
Speed::Correspondence
}
}
fn from_bytes(bytes: &[u8]) -> Result<Speed, ()> {
if bytes == b"-" {
return Ok(Speed::Correspondence);
}
let mut parts = bytes.splitn(2, |ch| *ch == b'+');
let seconds = btoi::btou(parts.next().ok_or(())?).map_err(|_| ())?;
let increment = btoi::btou(parts.next().ok_or(())?).map_err(|_| ())?;
Ok(Speed::from_seconds_and_increment(seconds, increment))
}
}
struct Importer {
filename: String,
client: reqwest::blocking::Client,
rng: SmallRng,
current: Game,
skip: bool,
batch: Vec<Game>,
}
#[serde_as]
#[derive(Default, Serialize, Debug)]
struct Game {
variant: Option<String>,
speed: Option<Speed>,
fen: Option<String>,
id: Option<String>,
date: Option<String>,
white: Player,
black: Player,
#[serde_as(as = "Option<DisplayFromStr>")]
winner: Option<Color>,
#[serde_as(as = "StringWithSeparator<SpaceSeparator, SanPlus>")]
moves: Vec<SanPlus>,
}
#[derive(Default, Serialize, Debug)]
struct Player {
name: Option<String>,
rating: Option<u16>,
}
impl Importer {
fn new(filename: String) -> Importer {
Importer {
filename,
client: reqwest::blocking::Client::builder()
.timeout(Duration::from_secs(60))
.build()
.expect("client"),
rng: SmallRng::from_seed([
0x19, 0x29, 0xab, 0x17, 0xc6, 0xfa, 0xb0, 0xe9, 0x4b, 0x44, 0xd8, 0x07, 0x09, 0xbf,
0x1d, 0x87, 0xbd, 0xd8, 0xb3, 0x2f, 0xe1, 0xe2, 0xa0, 0x1a, 0x9e, 0x30, 0x98, 0xd7,
0xef, 0xd5, 0x7a, 0x1d,
]),
current: Game::default(),
skip: false,
batch: Vec::with_capacity(BATCH_SIZE),
}
}
pub fn send(&mut self) {
// println!("{}", serde_json::to_string(&self.batch).expect("serialize"));
let res = self
.client
.put("http://127.0.0.1:9001/import/lichess")
.json(&self.batch)
.send()
.expect("send batch");
println!(
"{}: {}: {} - {}",
self.filename,
self.batch
.last()
.and_then(|g| g.date.as_ref())
.unwrap_or(&String::new()),
res.status(),
res.text().expect("decode response")
);
self.batch.clear();
}
}
impl Visitor for Importer {
type Result = ();
fn begin_game(&mut self) {
self.skip = false;
self.current = Game::default();
}
fn header(&mut self, key: &[u8], value: RawHeader<'_>) {
if key == b"White" {
self.current.white.name = Some(value.decode_utf8().expect("White").into_owned());
} else if key == b"Black" {
self.current.black.name = Some(value.decode_utf8().expect("Black").into_owned());
} else if key == b"WhiteElo" {
if value.as_bytes() != b"?" {
self.current.white.rating = Some(btoi::btoi(value.as_bytes()).expect("WhiteElo"));
}
} else if key == b"BlackElo" {
if value.as_bytes() != b"?" {
self.current.black.rating = Some(btoi::btoi(value.as_bytes()).expect("BlackElo"));
}
} else if key == b"TimeControl" {
self.current.speed = Some(Speed::from_bytes(value.as_bytes()).expect("TimeControl"));
} else if key == b"Variant" {
self.current.variant = Some(value.decode_utf8().expect("Variant").into_owned());
} else if key == b"Date" || key == b"UTCDate" {
self.current.date = Some(value.decode_utf8().expect("Date").into_owned());
} else if key == b"WhiteTitle" || key == b"BlackTitle" {
if value.as_bytes() == b"BOT" {
self.skip = true;
}
} else if key == b"Site" {
self.current.id = Some(
String::from_utf8(
value
.as_bytes()
.rsplitn(2, |ch| *ch == b'/')
.next()
.expect("Site")
.to_owned(),
)
.expect("Site"),
);
} else if key == b"Result" {
match value.as_bytes() {
b"1-0" => self.current.winner = Some(Color::White),
b"0-1" => self.current.winner = Some(Color::Black),
b"1/2-1/2" => self.current.winner = None,
_ => self.skip = true,
}
} else if key == b"FEN" {
self.current.fen = Some(value.decode_utf8().expect("FEN").into_owned());
}
}
fn end_headers(&mut self) -> Skip {
let rating =
(self.current.white.rating.unwrap_or(0) + self.current.black.rating.unwrap_or(0)) / 2;
let standard = self
.current
.variant
.as_ref()
.map_or(false, |name| name != "Standard");
let probability = if standard {
match self.current.speed.unwrap_or(Speed::Correspondence) {
Speed::Correspondence | Speed::Classical => 1.0,
_ if rating >= 2500 => 1.0,
Speed::Rapid if rating >= 2200 => 1.0,
Speed::Rapid if rating >= 2000 => 0.30,
Speed::Rapid if rating >= 1800 => 0.18,
Speed::Rapid if rating >= 1600 => 0.16,
Speed::Blitz if rating >= 2200 => 0.16,
Speed::Blitz if rating >= 2000 => 0.07,
Speed::Blitz if rating >= 1800 => 0.05,
Speed::Blitz if rating >= 1600 => 0.05,
Speed::Bullet if rating >= 2200 => 0.20,
Speed::Bullet if rating >= 2000 => 0.11,
Speed::Bullet if rating >= 1800 => 0.07,
Speed::Bullet if rating >= 1600 => 0.07,
Speed::UltraBullet => 1.0,
_ => 0.02,
}
} else {
// variant games
if rating >= 1600 {
1.0
} else {
0.5
}
};
let accept = min(
self.current.white.rating.unwrap_or(0),
self.current.black.rating.unwrap_or(0),
) >= 1501
&& probability >= self.rng.sample(OpenClosed01)
&& !self.skip;
self.skip = !accept;
Skip(self.skip)
}
fn san(&mut self, san: SanPlus) {
self.current.moves.push(san);
}
fn begin_variation(&mut self) -> Skip {
Skip(true) // stay in the mainline
}
fn end_game(&mut self) {
if !self.skip {
self.batch
.push(mem::replace(&mut self.current, Default::default()));
if self.batch.len() >= BATCH_SIZE {
self.send();
}
}
}
}
fn main() -> Result<(), io::Error> {
for arg in env::args().skip(1) {
let file = File::open(&arg)?;
let uncompressed: Box<dyn io::Read> = if arg.ends_with(".bz2") {
Box::new(bzip2::read::MultiBzDecoder::new(file))
} else {
Box::new(file)
};
let mut reader = BufferedReader::new(uncompressed);
let mut importer = Importer::new(arg);
reader.read_all(&mut importer)?;
}
Ok(())
}
work around ornicar/lichess-db#40
use std::{cmp::min, env, fs::File, io, mem, time::Duration};
use pgn_reader::{BufferedReader, Color, RawHeader, SanPlus, Skip, Visitor};
use rand::{distributions::OpenClosed01, rngs::SmallRng, Rng, SeedableRng};
use serde::Serialize;
use serde_with::{serde_as, DisplayFromStr, SpaceSeparator, StringWithSeparator};
const BATCH_SIZE: usize = 50;
#[derive(Debug, Serialize, Copy, Clone)]
#[serde(rename_all = "camelCase")]
enum Speed {
UltraBullet,
Bullet,
Blitz,
Rapid,
Classical,
Correspondence,
}
impl Speed {
fn from_seconds_and_increment(seconds: u64, increment: u64) -> Speed {
let total = seconds + 40 * increment;
if total < 30 {
Speed::UltraBullet
} else if total < 180 {
Speed::Bullet
} else if total < 480 {
Speed::Blitz
} else if total < 1500 {
Speed::Rapid
} else if total < 21_600 {
Speed::Classical
} else {
Speed::Correspondence
}
}
fn from_bytes(bytes: &[u8]) -> Result<Speed, ()> {
if bytes == b"-" {
return Ok(Speed::Correspondence);
}
let mut parts = bytes.splitn(2, |ch| *ch == b'+');
let seconds = btoi::btou(parts.next().ok_or(())?).map_err(|_| ())?;
let increment = btoi::btou(parts.next().ok_or(())?).map_err(|_| ())?;
Ok(Speed::from_seconds_and_increment(seconds, increment))
}
}
struct Importer {
filename: String,
client: reqwest::blocking::Client,
rng: SmallRng,
current: Game,
skip: bool,
batch: Vec<Game>,
}
#[serde_as]
#[derive(Default, Serialize, Debug)]
struct Game {
variant: Option<String>,
speed: Option<Speed>,
fen: Option<String>,
id: Option<String>,
date: Option<String>,
white: Player,
black: Player,
#[serde_as(as = "Option<DisplayFromStr>")]
winner: Option<Color>,
#[serde_as(as = "StringWithSeparator<SpaceSeparator, SanPlus>")]
moves: Vec<SanPlus>,
}
#[derive(Default, Serialize, Debug)]
struct Player {
name: Option<String>,
rating: Option<u16>,
}
impl Importer {
fn new(filename: String) -> Importer {
Importer {
filename,
client: reqwest::blocking::Client::builder()
.timeout(Duration::from_secs(60))
.build()
.expect("client"),
rng: SmallRng::from_seed([
0x19, 0x29, 0xab, 0x17, 0xc6, 0xfa, 0xb0, 0xe9, 0x4b, 0x44, 0xd8, 0x07, 0x09, 0xbf,
0x1d, 0x87, 0xbd, 0xd8, 0xb3, 0x2f, 0xe1, 0xe2, 0xa0, 0x1a, 0x9e, 0x30, 0x98, 0xd7,
0xef, 0xd5, 0x7a, 0x1d,
]),
current: Game::default(),
skip: false,
batch: Vec::with_capacity(BATCH_SIZE),
}
}
pub fn send(&mut self) {
// println!("{}", serde_json::to_string(&self.batch).expect("serialize"));
let res = self
.client
.put("http://127.0.0.1:9001/import/lichess")
.json(&self.batch)
.send()
.expect("send batch");
println!(
"{}: {}: {} - {}",
self.filename,
self.batch
.last()
.and_then(|g| g.date.as_ref())
.unwrap_or(&String::new()),
res.status(),
res.text().expect("decode response")
);
self.batch.clear();
}
}
impl Visitor for Importer {
type Result = ();
fn begin_game(&mut self) {
self.skip = false;
self.current = Game::default();
}
fn header(&mut self, key: &[u8], value: RawHeader<'_>) {
if key == b"White" {
self.current.white.name = Some(value.decode_utf8().expect("White").into_owned());
} else if key == b"Black" {
self.current.black.name = Some(value.decode_utf8().expect("Black").into_owned());
} else if key == b"WhiteElo" {
if value.as_bytes() != b"?" {
self.current.white.rating = Some(btoi::btoi(value.as_bytes()).expect("WhiteElo"));
}
} else if key == b"BlackElo" {
if value.as_bytes() != b"?" {
self.current.black.rating = Some(btoi::btoi(value.as_bytes()).expect("BlackElo"));
}
} else if key == b"TimeControl" {
self.current.speed = Some(Speed::from_bytes(value.as_bytes()).expect("TimeControl"));
} else if key == b"Variant" {
self.current.variant = Some(value.decode_utf8().expect("Variant").into_owned());
} else if key == b"Date" || key == b"UTCDate" {
self.current.date = Some(value.decode_utf8().expect("Date").into_owned());
} else if key == b"WhiteTitle" || key == b"BlackTitle" {
if value.as_bytes() == b"BOT" {
self.skip = true;
}
} else if key == b"Site" {
self.current.id = Some(
String::from_utf8(
value
.as_bytes()
.rsplitn(2, |ch| *ch == b'/')
.next()
.expect("Site")
.to_owned(),
)
.expect("Site"),
);
} else if key == b"Result" {
match value.as_bytes() {
b"1-0" => self.current.winner = Some(Color::White),
b"0-1" => self.current.winner = Some(Color::Black),
b"1/2-1/2" => self.current.winner = None,
_ => self.skip = true,
}
} else if key == b"FEN" {
if value.as_bytes() == b"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1" {
// https://github.com/ornicar/lichess-db/issues/40
self.current.fen = None;
} else {
self.current.fen = Some(value.decode_utf8().expect("FEN").into_owned());
}
}
}
fn end_headers(&mut self) -> Skip {
let rating =
(self.current.white.rating.unwrap_or(0) + self.current.black.rating.unwrap_or(0)) / 2;
let standard = self
.current
.variant
.as_ref()
.map_or(false, |name| name != "Standard");
let probability = if standard {
match self.current.speed.unwrap_or(Speed::Correspondence) {
Speed::Correspondence | Speed::Classical => 1.0,
_ if rating >= 2500 => 1.0,
Speed::Rapid if rating >= 2200 => 1.0,
Speed::Rapid if rating >= 2000 => 0.30,
Speed::Rapid if rating >= 1800 => 0.18,
Speed::Rapid if rating >= 1600 => 0.16,
Speed::Blitz if rating >= 2200 => 0.16,
Speed::Blitz if rating >= 2000 => 0.07,
Speed::Blitz if rating >= 1800 => 0.05,
Speed::Blitz if rating >= 1600 => 0.05,
Speed::Bullet if rating >= 2200 => 0.20,
Speed::Bullet if rating >= 2000 => 0.11,
Speed::Bullet if rating >= 1800 => 0.07,
Speed::Bullet if rating >= 1600 => 0.07,
Speed::UltraBullet => 1.0,
_ => 0.02,
}
} else {
// variant games
if rating >= 1600 {
1.0
} else {
0.5
}
};
let accept = min(
self.current.white.rating.unwrap_or(0),
self.current.black.rating.unwrap_or(0),
) >= 1501
&& probability >= self.rng.sample(OpenClosed01)
&& !self.skip;
self.skip = !accept;
Skip(self.skip)
}
fn san(&mut self, san: SanPlus) {
self.current.moves.push(san);
}
fn begin_variation(&mut self) -> Skip {
Skip(true) // stay in the mainline
}
fn end_game(&mut self) {
if !self.skip {
self.batch
.push(mem::replace(&mut self.current, Default::default()));
if self.batch.len() >= BATCH_SIZE {
self.send();
}
}
}
}
fn main() -> Result<(), io::Error> {
for arg in env::args().skip(1) {
let file = File::open(&arg)?;
let uncompressed: Box<dyn io::Read> = if arg.ends_with(".bz2") {
Box::new(bzip2::read::MultiBzDecoder::new(file))
} else {
Box::new(file)
};
let mut reader = BufferedReader::new(uncompressed);
let mut importer = Importer::new(arg);
reader.read_all(&mut importer)?;
}
Ok(())
}
|
use super::WinningRootHashSet;
use crate::common::get_attesting_indices_unsorted;
use types::*;
/// Sets the boolean `var` on `self` to be true if it is true on `other`. Otherwise leaves `self`
/// as is.
macro_rules! set_self_if_other_is_true {
($self_: ident, $other: ident, $var: ident) => {
if $other.$var {
$self_.$var = true;
}
};
}
/// The information required to reward some validator for their participation in a "winning"
/// crosslink root.
#[derive(Default, Clone)]
pub struct WinningRootInfo {
/// The total balance of the crosslink committee.
pub total_committee_balance: u64,
/// The total balance of the crosslink committee that attested for the "winning" root.
pub total_attesting_balance: u64,
}
/// The information required to reward a block producer for including an attestation in a block.
#[derive(Clone, Copy)]
pub struct InclusionInfo {
/// The earliest slot a validator had an attestation included in the previous epoch.
pub slot: Slot,
/// The distance between the attestation slot and the slot that attestation was included in a
/// block.
pub distance: u64,
/// The index of the proposer at the slot where the attestation was included.
pub proposer_index: usize,
}
impl Default for InclusionInfo {
/// Defaults to `slot` and `distance` at their maximum values and `proposer_index` at zero.
fn default() -> Self {
Self {
slot: Slot::max_value(),
distance: u64::max_value(),
proposer_index: 0,
}
}
}
impl InclusionInfo {
/// Tests if some `other` `InclusionInfo` has a lower inclusion slot than `self`. If so,
/// replaces `self` with `other`.
pub fn update(&mut self, other: &Self) {
if other.slot < self.slot {
self.slot = other.slot;
self.distance = other.distance;
self.proposer_index = other.proposer_index;
}
}
}
/// Information required to reward some validator during the current and previous epoch.
#[derive(Default, Clone)]
pub struct ValidatorStatus {
/// True if the validator has been slashed, ever.
pub is_slashed: bool,
/// True if the validator can withdraw in the current epoch.
pub is_withdrawable_in_current_epoch: bool,
/// True if the validator was active in the state's _current_ epoch.
pub is_active_in_current_epoch: bool,
/// True if the validator was active in the state's _previous_ epoch.
pub is_active_in_previous_epoch: bool,
/// The validator's effective balance in the _current_ epoch.
pub current_epoch_effective_balance: u64,
/// True if the validator had an attestation included in the _current_ epoch.
pub is_current_epoch_attester: bool,
/// True if the validator's beacon block root attestation for the first slot of the _current_
/// epoch matches the block root known to the state.
pub is_current_epoch_target_attester: bool,
/// True if the validator had an attestation included in the _previous_ epoch.
pub is_previous_epoch_attester: bool,
/// True if the validator's beacon block root attestation for the first slot of the _previous_
/// epoch matches the block root known to the state.
pub is_previous_epoch_target_attester: bool,
/// True if the validator's beacon block root attestation in the _previous_ epoch at the
/// attestation's slot (`attestation_data.slot`) matches the block root known to the state.
pub is_previous_epoch_head_attester: bool,
/// Information used to reward the block producer of this validators earliest-included
/// attestation.
pub inclusion_info: Option<InclusionInfo>,
/// Information used to reward/penalize the validator if they voted in the super-majority for
/// some shard block.
pub winning_root_info: Option<WinningRootInfo>,
}
impl ValidatorStatus {
/// Accepts some `other` `ValidatorStatus` and updates `self` if required.
///
/// Will never set one of the `bool` fields to `false`, it will only set it to `true` if other
/// contains a `true` field.
///
/// Note: does not update the winning root info, this is done manually.
pub fn update(&mut self, other: &Self) {
// Update all the bool fields, only updating `self` if `other` is true (never setting
// `self` to false).
set_self_if_other_is_true!(self, other, is_slashed);
set_self_if_other_is_true!(self, other, is_withdrawable_in_current_epoch);
set_self_if_other_is_true!(self, other, is_active_in_current_epoch);
set_self_if_other_is_true!(self, other, is_active_in_previous_epoch);
set_self_if_other_is_true!(self, other, is_current_epoch_attester);
set_self_if_other_is_true!(self, other, is_current_epoch_target_attester);
set_self_if_other_is_true!(self, other, is_previous_epoch_attester);
set_self_if_other_is_true!(self, other, is_previous_epoch_target_attester);
set_self_if_other_is_true!(self, other, is_previous_epoch_head_attester);
if let Some(other_info) = other.inclusion_info {
if let Some(self_info) = self.inclusion_info.as_mut() {
self_info.update(&other_info);
} else {
self.inclusion_info = other.inclusion_info;
}
}
}
}
/// The total effective balances for different sets of validators during the previous and current
/// epochs.
#[derive(Default, Clone)]
pub struct TotalBalances {
/// The total effective balance of all active validators during the _current_ epoch.
pub current_epoch: u64,
/// The total effective balance of all active validators during the _previous_ epoch.
pub previous_epoch: u64,
/// The total effective balance of all validators who attested during the _current_ epoch.
pub current_epoch_attesters: u64,
/// The total effective balance of all validators who attested during the _current_ epoch and
/// agreed with the state about the beacon block at the first slot of the _current_ epoch.
pub current_epoch_target_attesters: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch.
pub previous_epoch_attesters: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch and
/// agreed with the state about the beacon block at the first slot of the _previous_ epoch.
pub previous_epoch_target_attesters: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch and
/// agreed with the state about the beacon block at the time of attestation.
pub previous_epoch_head_attesters: u64,
}
/// Summarised information about validator participation in the _previous and _current_ epochs of
/// some `BeaconState`.
#[derive(Clone)]
pub struct ValidatorStatuses {
/// Information about each individual validator from the state's validator registy.
pub statuses: Vec<ValidatorStatus>,
/// Summed balances for various sets of validators.
pub total_balances: TotalBalances,
}
impl ValidatorStatuses {
/// Initializes a new instance, determining:
///
/// - Active validators
/// - Total balances for the current and previous epochs.
///
/// Spec v0.6.1
pub fn new<T: EthSpec>(
state: &BeaconState<T>,
spec: &ChainSpec,
) -> Result<Self, BeaconStateError> {
let mut statuses = Vec::with_capacity(state.validator_registry.len());
let mut total_balances = TotalBalances::default();
for (i, validator) in state.validator_registry.iter().enumerate() {
let effective_balance = state.get_effective_balance(i, spec)?;
let mut status = ValidatorStatus {
is_slashed: validator.slashed,
is_withdrawable_in_current_epoch: validator
.is_withdrawable_at(state.current_epoch()),
current_epoch_effective_balance: effective_balance,
..ValidatorStatus::default()
};
if validator.is_active_at(state.current_epoch()) {
status.is_active_in_current_epoch = true;
total_balances.current_epoch += effective_balance;
}
if validator.is_active_at(state.previous_epoch()) {
status.is_active_in_previous_epoch = true;
total_balances.previous_epoch += effective_balance;
}
statuses.push(status);
}
Ok(Self {
statuses,
total_balances,
})
}
/// Process some attestations from the given `state` updating the `statuses` and
/// `total_balances` fields.
///
/// Spec v0.6.1
pub fn process_attestations<T: EthSpec>(
&mut self,
state: &BeaconState<T>,
spec: &ChainSpec,
) -> Result<(), BeaconStateError> {
for a in state
.previous_epoch_attestations
.iter()
.chain(state.current_epoch_attestations.iter())
{
let attesting_indices =
get_attesting_indices_unsorted(state, &a.data, &a.aggregation_bitfield)?;
let mut status = ValidatorStatus::default();
// Profile this attestation, updating the total balances and generating an
// `ValidatorStatus` object that applies to all participants in the attestation.
if is_from_epoch(a, state.current_epoch()) {
status.is_current_epoch_attester = true;
if target_matches_epoch_start_block(a, state, state.current_epoch(), spec)? {
status.is_current_epoch_target_attester = true;
}
} else if is_from_epoch(a, state.previous_epoch()) {
status.is_previous_epoch_attester = true;
// The inclusion slot and distance are only required for previous epoch attesters.
let attestation_slot = state.get_attestation_slot(&a.data)?;
let inclusion_slot = attestation_slot + a.inclusion_delay;
let relative_epoch =
RelativeEpoch::from_slot(state.slot, inclusion_slot, spec.slots_per_epoch)?;
status.inclusion_info = Some(InclusionInfo {
slot: inclusion_slot,
distance: a.inclusion_delay,
proposer_index: state.get_beacon_proposer_index(
attestation_slot,
relative_epoch,
spec,
)?,
});
if target_matches_epoch_start_block(a, state, state.previous_epoch(), spec)? {
status.is_previous_epoch_target_attester = true;
}
if has_common_beacon_block_root(a, state)? {
status.is_previous_epoch_head_attester = true;
}
}
// Loop through the participating validator indices and update the status vec.
for validator_index in attesting_indices {
self.statuses[validator_index].update(&status);
}
}
// Compute the total balances
for (index, v) in self.statuses.iter().enumerate() {
// According to the spec, we only count unslashed validators towards the totals.
if !v.is_slashed {
let validator_balance = state.get_effective_balance(index, spec)?;
if v.is_current_epoch_attester {
self.total_balances.current_epoch_attesters += validator_balance;
}
if v.is_current_epoch_target_attester {
self.total_balances.current_epoch_target_attesters += validator_balance;
}
if v.is_previous_epoch_attester {
self.total_balances.previous_epoch_attesters += validator_balance;
}
if v.is_previous_epoch_target_attester {
self.total_balances.previous_epoch_target_attesters += validator_balance;
}
if v.is_previous_epoch_head_attester {
self.total_balances.previous_epoch_head_attesters += validator_balance;
}
}
}
Ok(())
}
/// Update the `statuses` for each validator based upon whether or not they attested to the
/// "winning" shard block root for the previous epoch.
///
/// Spec v0.6.1
pub fn process_winning_roots<T: EthSpec>(
&mut self,
state: &BeaconState<T>,
winning_roots: &WinningRootHashSet,
spec: &ChainSpec,
) -> Result<(), BeaconStateError> {
// Loop through each slot in the previous epoch.
for slot in state.previous_epoch().slot_iter(spec.slots_per_epoch) {
let crosslink_committees_at_slot = state.get_crosslink_committees_at_slot(slot)?;
// Loop through each committee in the slot.
for c in crosslink_committees_at_slot {
// If there was some winning crosslink root for the committee's shard.
if let Some(winning_root) = winning_roots.get(&c.shard) {
let total_committee_balance = state.get_total_balance(&c.committee, spec)?;
for &validator_index in &winning_root.attesting_validator_indices {
// Take note of the balance information for the winning root, it will be
// used later to calculate rewards for that validator.
self.statuses[validator_index].winning_root_info = Some(WinningRootInfo {
total_committee_balance,
total_attesting_balance: winning_root.total_attesting_balance,
})
}
}
}
}
Ok(())
}
}
/// Returns `true` if some `PendingAttestation` is from the supplied `epoch`.
///
/// Spec v0.6.1
fn is_from_epoch(a: &PendingAttestation, epoch: Epoch) -> bool {
a.data.target_epoch == epoch
}
/// Returns `true` if the attestation's FFG target is equal to the hash of the `state`'s first
/// beacon block in the given `epoch`.
///
/// Spec v0.6.1
fn target_matches_epoch_start_block<T: EthSpec>(
a: &PendingAttestation,
state: &BeaconState<T>,
epoch: Epoch,
spec: &ChainSpec,
) -> Result<bool, BeaconStateError> {
let slot = epoch.start_slot(spec.slots_per_epoch);
let state_boundary_root = *state.get_block_root(slot)?;
Ok(a.data.target_root == state_boundary_root)
}
/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for
/// the current slot of the `PendingAttestation`.
///
/// Spec v0.6.1
fn has_common_beacon_block_root<T: EthSpec>(
a: &PendingAttestation,
state: &BeaconState<T>,
) -> Result<bool, BeaconStateError> {
let attestation_slot = state.get_attestation_slot(&a.data)?;
let state_block_root = *state.get_block_root(attestation_slot)?;
Ok(a.data.beacon_block_root == state_block_root)
}
epoch processing: fix inclusion info proposer idx
use super::WinningRootHashSet;
use crate::common::get_attesting_indices_unsorted;
use types::*;
/// Sets the boolean `var` on `self` to be true if it is true on `other`. Otherwise leaves `self`
/// as is.
macro_rules! set_self_if_other_is_true {
($self_: ident, $other: ident, $var: ident) => {
if $other.$var {
$self_.$var = true;
}
};
}
/// The information required to reward some validator for their participation in a "winning"
/// crosslink root.
#[derive(Default, Clone)]
pub struct WinningRootInfo {
/// The total balance of the crosslink committee.
pub total_committee_balance: u64,
/// The total balance of the crosslink committee that attested for the "winning" root.
pub total_attesting_balance: u64,
}
/// The information required to reward a block producer for including an attestation in a block.
#[derive(Clone, Copy)]
pub struct InclusionInfo {
/// The earliest slot a validator had an attestation included in the previous epoch.
pub slot: Slot,
/// The distance between the attestation slot and the slot that attestation was included in a
/// block.
pub distance: u64,
/// The index of the proposer at the slot where the attestation was included.
pub proposer_index: usize,
}
impl Default for InclusionInfo {
/// Defaults to `slot` and `distance` at their maximum values and `proposer_index` at zero.
fn default() -> Self {
Self {
slot: Slot::max_value(),
distance: u64::max_value(),
proposer_index: 0,
}
}
}
impl InclusionInfo {
/// Tests if some `other` `InclusionInfo` has a lower inclusion slot than `self`. If so,
/// replaces `self` with `other`.
pub fn update(&mut self, other: &Self) {
if other.slot < self.slot {
self.slot = other.slot;
self.distance = other.distance;
self.proposer_index = other.proposer_index;
}
}
}
/// Information required to reward some validator during the current and previous epoch.
#[derive(Default, Clone)]
pub struct ValidatorStatus {
/// True if the validator has been slashed, ever.
pub is_slashed: bool,
/// True if the validator can withdraw in the current epoch.
pub is_withdrawable_in_current_epoch: bool,
/// True if the validator was active in the state's _current_ epoch.
pub is_active_in_current_epoch: bool,
/// True if the validator was active in the state's _previous_ epoch.
pub is_active_in_previous_epoch: bool,
/// The validator's effective balance in the _current_ epoch.
pub current_epoch_effective_balance: u64,
/// True if the validator had an attestation included in the _current_ epoch.
pub is_current_epoch_attester: bool,
/// True if the validator's beacon block root attestation for the first slot of the _current_
/// epoch matches the block root known to the state.
pub is_current_epoch_target_attester: bool,
/// True if the validator had an attestation included in the _previous_ epoch.
pub is_previous_epoch_attester: bool,
/// True if the validator's beacon block root attestation for the first slot of the _previous_
/// epoch matches the block root known to the state.
pub is_previous_epoch_target_attester: bool,
/// True if the validator's beacon block root attestation in the _previous_ epoch at the
/// attestation's slot (`attestation_data.slot`) matches the block root known to the state.
pub is_previous_epoch_head_attester: bool,
/// Information used to reward the block producer of this validators earliest-included
/// attestation.
pub inclusion_info: Option<InclusionInfo>,
/// Information used to reward/penalize the validator if they voted in the super-majority for
/// some shard block.
pub winning_root_info: Option<WinningRootInfo>,
}
impl ValidatorStatus {
/// Accepts some `other` `ValidatorStatus` and updates `self` if required.
///
/// Will never set one of the `bool` fields to `false`, it will only set it to `true` if other
/// contains a `true` field.
///
/// Note: does not update the winning root info, this is done manually.
pub fn update(&mut self, other: &Self) {
// Update all the bool fields, only updating `self` if `other` is true (never setting
// `self` to false).
set_self_if_other_is_true!(self, other, is_slashed);
set_self_if_other_is_true!(self, other, is_withdrawable_in_current_epoch);
set_self_if_other_is_true!(self, other, is_active_in_current_epoch);
set_self_if_other_is_true!(self, other, is_active_in_previous_epoch);
set_self_if_other_is_true!(self, other, is_current_epoch_attester);
set_self_if_other_is_true!(self, other, is_current_epoch_target_attester);
set_self_if_other_is_true!(self, other, is_previous_epoch_attester);
set_self_if_other_is_true!(self, other, is_previous_epoch_target_attester);
set_self_if_other_is_true!(self, other, is_previous_epoch_head_attester);
if let Some(other_info) = other.inclusion_info {
if let Some(self_info) = self.inclusion_info.as_mut() {
self_info.update(&other_info);
} else {
self.inclusion_info = other.inclusion_info;
}
}
}
}
/// The total effective balances for different sets of validators during the previous and current
/// epochs.
#[derive(Default, Clone)]
pub struct TotalBalances {
/// The total effective balance of all active validators during the _current_ epoch.
pub current_epoch: u64,
/// The total effective balance of all active validators during the _previous_ epoch.
pub previous_epoch: u64,
/// The total effective balance of all validators who attested during the _current_ epoch.
pub current_epoch_attesters: u64,
/// The total effective balance of all validators who attested during the _current_ epoch and
/// agreed with the state about the beacon block at the first slot of the _current_ epoch.
pub current_epoch_target_attesters: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch.
pub previous_epoch_attesters: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch and
/// agreed with the state about the beacon block at the first slot of the _previous_ epoch.
pub previous_epoch_target_attesters: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch and
/// agreed with the state about the beacon block at the time of attestation.
pub previous_epoch_head_attesters: u64,
}
/// Summarised information about validator participation in the _previous and _current_ epochs of
/// some `BeaconState`.
#[derive(Clone)]
pub struct ValidatorStatuses {
/// Information about each individual validator from the state's validator registy.
pub statuses: Vec<ValidatorStatus>,
/// Summed balances for various sets of validators.
pub total_balances: TotalBalances,
}
impl ValidatorStatuses {
/// Initializes a new instance, determining:
///
/// - Active validators
/// - Total balances for the current and previous epochs.
///
/// Spec v0.6.1
pub fn new<T: EthSpec>(
state: &BeaconState<T>,
spec: &ChainSpec,
) -> Result<Self, BeaconStateError> {
let mut statuses = Vec::with_capacity(state.validator_registry.len());
let mut total_balances = TotalBalances::default();
for (i, validator) in state.validator_registry.iter().enumerate() {
let effective_balance = state.get_effective_balance(i, spec)?;
let mut status = ValidatorStatus {
is_slashed: validator.slashed,
is_withdrawable_in_current_epoch: validator
.is_withdrawable_at(state.current_epoch()),
current_epoch_effective_balance: effective_balance,
..ValidatorStatus::default()
};
if validator.is_active_at(state.current_epoch()) {
status.is_active_in_current_epoch = true;
total_balances.current_epoch += effective_balance;
}
if validator.is_active_at(state.previous_epoch()) {
status.is_active_in_previous_epoch = true;
total_balances.previous_epoch += effective_balance;
}
statuses.push(status);
}
Ok(Self {
statuses,
total_balances,
})
}
/// Process some attestations from the given `state` updating the `statuses` and
/// `total_balances` fields.
///
/// Spec v0.6.1
pub fn process_attestations<T: EthSpec>(
&mut self,
state: &BeaconState<T>,
spec: &ChainSpec,
) -> Result<(), BeaconStateError> {
for a in state
.previous_epoch_attestations
.iter()
.chain(state.current_epoch_attestations.iter())
{
let attesting_indices =
get_attesting_indices_unsorted(state, &a.data, &a.aggregation_bitfield)?;
let mut status = ValidatorStatus::default();
// Profile this attestation, updating the total balances and generating an
// `ValidatorStatus` object that applies to all participants in the attestation.
if is_from_epoch(a, state.current_epoch()) {
status.is_current_epoch_attester = true;
if target_matches_epoch_start_block(a, state, state.current_epoch(), spec)? {
status.is_current_epoch_target_attester = true;
}
} else if is_from_epoch(a, state.previous_epoch()) {
status.is_previous_epoch_attester = true;
// The inclusion slot and distance are only required for previous epoch attesters.
let attestation_slot = state.get_attestation_slot(&a.data)?;
let inclusion_slot = attestation_slot + a.inclusion_delay;
let relative_epoch =
RelativeEpoch::from_slot(state.slot, inclusion_slot, spec.slots_per_epoch)?;
status.inclusion_info = Some(InclusionInfo {
slot: inclusion_slot,
distance: a.inclusion_delay,
proposer_index: state.get_beacon_proposer_index(
inclusion_slot,
relative_epoch,
spec,
)?,
});
if target_matches_epoch_start_block(a, state, state.previous_epoch(), spec)? {
status.is_previous_epoch_target_attester = true;
}
if has_common_beacon_block_root(a, state)? {
status.is_previous_epoch_head_attester = true;
}
}
// Loop through the participating validator indices and update the status vec.
for validator_index in attesting_indices {
self.statuses[validator_index].update(&status);
}
}
// Compute the total balances
for (index, v) in self.statuses.iter().enumerate() {
// According to the spec, we only count unslashed validators towards the totals.
if !v.is_slashed {
let validator_balance = state.get_effective_balance(index, spec)?;
if v.is_current_epoch_attester {
self.total_balances.current_epoch_attesters += validator_balance;
}
if v.is_current_epoch_target_attester {
self.total_balances.current_epoch_target_attesters += validator_balance;
}
if v.is_previous_epoch_attester {
self.total_balances.previous_epoch_attesters += validator_balance;
}
if v.is_previous_epoch_target_attester {
self.total_balances.previous_epoch_target_attesters += validator_balance;
}
if v.is_previous_epoch_head_attester {
self.total_balances.previous_epoch_head_attesters += validator_balance;
}
}
}
Ok(())
}
/// Update the `statuses` for each validator based upon whether or not they attested to the
/// "winning" shard block root for the previous epoch.
///
/// Spec v0.6.1
pub fn process_winning_roots<T: EthSpec>(
&mut self,
state: &BeaconState<T>,
winning_roots: &WinningRootHashSet,
spec: &ChainSpec,
) -> Result<(), BeaconStateError> {
// Loop through each slot in the previous epoch.
for slot in state.previous_epoch().slot_iter(spec.slots_per_epoch) {
let crosslink_committees_at_slot = state.get_crosslink_committees_at_slot(slot)?;
// Loop through each committee in the slot.
for c in crosslink_committees_at_slot {
// If there was some winning crosslink root for the committee's shard.
if let Some(winning_root) = winning_roots.get(&c.shard) {
let total_committee_balance = state.get_total_balance(&c.committee, spec)?;
for &validator_index in &winning_root.attesting_validator_indices {
// Take note of the balance information for the winning root, it will be
// used later to calculate rewards for that validator.
self.statuses[validator_index].winning_root_info = Some(WinningRootInfo {
total_committee_balance,
total_attesting_balance: winning_root.total_attesting_balance,
})
}
}
}
}
Ok(())
}
}
/// Returns `true` if some `PendingAttestation` is from the supplied `epoch`.
///
/// Spec v0.6.1
fn is_from_epoch(a: &PendingAttestation, epoch: Epoch) -> bool {
a.data.target_epoch == epoch
}
/// Returns `true` if the attestation's FFG target is equal to the hash of the `state`'s first
/// beacon block in the given `epoch`.
///
/// Spec v0.6.1
fn target_matches_epoch_start_block<T: EthSpec>(
a: &PendingAttestation,
state: &BeaconState<T>,
epoch: Epoch,
spec: &ChainSpec,
) -> Result<bool, BeaconStateError> {
let slot = epoch.start_slot(spec.slots_per_epoch);
let state_boundary_root = *state.get_block_root(slot)?;
Ok(a.data.target_root == state_boundary_root)
}
/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for
/// the current slot of the `PendingAttestation`.
///
/// Spec v0.6.1
fn has_common_beacon_block_root<T: EthSpec>(
a: &PendingAttestation,
state: &BeaconState<T>,
) -> Result<bool, BeaconStateError> {
let attestation_slot = state.get_attestation_slot(&a.data)?;
let state_block_root = *state.get_block_root(attestation_slot)?;
Ok(a.data.beacon_block_root == state_block_root)
}
|
//! Types and traits related to deserializing values from the database
use std::error::Error;
use std::result;
use backend::Backend;
use row::{NamedRow, Row};
/// A specialized result type representing the result of deserializing
/// a value from the database.
pub type Result<T> = result::Result<T, Box<Error + Send + Sync>>;
/// Trait indicating that a record can be queried from the database.
///
/// Types which implement `Queryable` represent the result of a SQL query. This
/// does not necessarily mean they represent a single database table.
///
/// This trait can be derived automatically using `#[derive(Queryable)]`. This
/// trait can only be derived for structs, not enums.
///
/// Diesel represents the return type of a query as a tuple. The purpose of this
/// trait is to convert from a tuple of Rust values that have been deserialized
/// into your struct.
///
/// When this trait is derived, it will assume that the order of fields on your
/// struct match the order of the fields in the query. This means that field
/// order is significant if you are using `#[derive(Queryable)]`. Field name has
/// no affect.
///
/// # Examples
///
/// If we just want to map a query to our struct, we can use `derive`.
///
/// ```rust
/// # #[macro_use] extern crate diesel;
/// # include!("doctest_setup.rs");
/// #
/// #[derive(Queryable, PartialEq, Debug)]
/// struct User {
/// id: i32,
/// name: String,
/// }
///
/// # fn main() {
/// # run_test();
/// # }
/// #
/// # fn run_test() -> QueryResult<()> {
/// # use schema::users::dsl::*;
/// # let connection = establish_connection();
/// let first_user = users.first(&connection)?;
/// let expected = User { id: 1, name: "Sean".into() };
/// assert_eq!(expected, first_user);
/// # Ok(())
/// # }
/// ```
///
/// If we want to do additional work during deserialization, we can implement
/// the trait ourselves.
///
/// ```rust
/// # #[macro_use] extern crate diesel;
/// # include!("doctest_setup.rs");
/// #
/// use schema::users;
/// use diesel::deserialize::Queryable;
///
/// # /*
/// type DB = diesel::sqlite::Sqlite;
/// # */
///
/// #[derive(PartialEq, Debug)]
/// struct User {
/// id: i32,
/// name: String,
/// }
///
/// impl Queryable<users::SqlType, DB> for User {
/// type Row = (i32, String);
///
/// fn build(row: Self::Row) -> Self {
/// User {
/// id: row.0,
/// name: row.1.to_lowercase(),
/// }
/// }
/// }
///
/// # fn main() {
/// # run_test();
/// # }
/// #
/// # fn run_test() -> QueryResult<()> {
/// # use schema::users::dsl::*;
/// # let connection = establish_connection();
/// let first_user = users.first(&connection)?;
/// let expected = User { id: 1, name: "sean".into() };
/// assert_eq!(expected, first_user);
/// # Ok(())
/// # }
pub trait Queryable<ST, DB>
where
DB: Backend,
{
/// The Rust type you'd like to map from.
///
/// This is typically a tuple of all of your struct's fields.
type Row: FromSqlRow<ST, DB>;
/// Construct an instance of this type
fn build(row: Self::Row) -> Self;
}
// Reasons we can't write this:
//
// impl<T, ST, DB> Queryable<ST, DB> for T
// where
// DB: Backend,
// T: FromSqlRow<ST, DB>,
// {
// type Row = Self;
//
// fn build(row: Self::Row) -> Self {
// row
// }
// }
//
// (this is mostly a reference for @sgrif so he has a better reference every
// time he thinks he has a breakthrough on this problem).
//
// See the comment under `FromSqlRow`. All of the same impls conflict there that
// would here. If we had `#[derive(FromSqlRow)]`, we would also have that
// implement `Queryable`. I know it doesn't look like the `Option` impl
// conflicts, but it definitely does -- It covers types which implement
// `Queryable` but not `FromSqlRow`, while this impl wouldn't.
//
// The same "we could remove one of these traits" applies. Really `FromSqlRow`
// is the only trait that *needs* to exist. At the end of the day, `FromSql` is
// meant to be "I only want to deal with deserializing a single field" case, and
// `Queryable` is meant to be easier to implement by hand than `FromSqlRow`
// would be.
/// Deserializes the result of a query constructed with [`sql_query`].
///
/// # Deriving
///
/// To derive this trait, Diesel needs to know the SQL type of each field. You
/// can do this by either annotating your struct with `#[table_name =
/// "some_table"]` (in which case the SQL type will be
/// `diesel::dsl::SqlTypeOf<table_name::column_name>`), or by annotating each
/// field with `#[sql_type = "SomeType"]`.
///
/// If you are using `#[table_name]`, the module for that table must be in
/// scope. For example, to derive this for a struct called `User`, you will
/// likely need a line such as `use schema::users;`
///
/// If the name of a field on your struct is different than the column in your
/// `table!` declaration, or if you are deriving this trait on a tuple struct,
/// you can annotate the field with `#[column_name = "some_column"]`. For tuple
/// structs, all fields must have this annotation.
///
/// If a field is another struct which implements `QueryableByName`, instead of
/// a column, you can annotate that struct with `#[diesel(embed)]`
///
/// [`sql_query`]: ../fn.sql_query.html
pub trait QueryableByName<DB>
where
Self: Sized,
DB: Backend,
{
/// Construct an instance of `Self` from the database row
fn build<R: NamedRow<DB>>(row: &R) -> Result<Self>;
}
/// Deserialize a single field of a given SQL type.
///
/// When possible, implementations of this trait should prefer to use an
/// existing implementation, rather than reading from `bytes`. (For example, if
/// you are implementing this for an enum which is represented as an integer in
/// the database, prefer `i32::from_sql(bytes)` over reading from `bytes`
/// directly)
///
/// Types which implement this trait should also have `#[derive(FromSqlRow)]`
///
/// ### Backend specific details
///
/// - For PostgreSQL, the bytes will be sent using the binary protocol, not text.
/// - For SQLite, the actual type of `DB::RawValue` is private API. All
/// implementations of this trait must be written in terms of an existing
/// primitive.
/// - For MySQL, the value of `bytes` will depend on the return value of
/// `type_metadata` for the given SQL type. See [`MysqlType`] for details.
/// - For third party backends, consult that backend's documentation.
///
/// [`MysqlType`]: ../mysql/enum.MysqlType.html
pub trait FromSql<A, DB: Backend>: Sized {
/// See the trait documentation.
fn from_sql(bytes: Option<&DB::RawValue>) -> Result<Self>;
}
/// Deserialize one or more fields.
///
/// All types which implement `FromSql` should also implement this trait. This
/// trait differs from `FromSql` in that it is also implemented by tuples.
/// Implementations of this trait are usually derived.
///
/// In the future, we hope to be able to provide a blanket impl of this trait
/// for all types which implement `FromSql`. However, as of Diesel 1.0, such an
/// impl would conflict with our impl for tuples.
///
/// ## Deriving
///
/// This trait can be automatically derived by Diesel
/// for any type which implements `FromSql`.
/// There are no options or special considerations needed for this derive.
/// Note that `#[derive(FromSqlRow)]` will also generate a `Queryable` implementation.
pub trait FromSqlRow<A, DB: Backend>: Sized {
/// The number of fields that this type will consume. Must be equal to
/// the number of times you would call `row.take()` in `build_from_row`
const FIELDS_NEEDED: usize = 1;
/// See the trait documentation.
fn build_from_row<T: Row<DB>>(row: &mut T) -> Result<Self>;
}
// Reasons we can't write this:
//
// impl<T, ST, DB> FromSqlRow<ST, DB> for T
// where
// DB: Backend + HasSqlType<ST>,
// T: FromSql<ST, DB>,
// {
// fn build_from_row<T: Row<DB>>(row: &mut T) -> Result<Self> {
// Self::from_sql(row.take())
// }
// }
//
// (this is mostly here so @sgrif has a better reference every time he thinks
// he's somehow had a breakthrough on solving this problem):
//
// - It conflicts with our impl for tuples, because `DB` is a bare type
// parameter, it could in theory be a local type for some other impl.
// - This is fixed by replacing our impl with 3 impls, where `DB` is changed
// concrete backends. This would mean that any third party crates adding new
// backends would need to add the tuple impls, which sucks but is fine.
// - It conflicts with our impl for `Option`
// - So we could in theory fix this by both splitting the generic impl into
// backend specific impls, and removing the `FromSql` impls. In theory there
// is no reason that it needs to implement `FromSql`, since everything
// requires `FromSqlRow`, but it really feels like it should.
// - Specialization might also fix this one. The impl isn't quite a strict
// subset (the `FromSql` impl has `T: FromSql`, and the `FromSqlRow` impl
// has `T: FromSqlRow`), but if `FromSql` implies `FromSqlRow`,
// specialization might consider that a subset?
// - I don't know that we really need it. `#[derive(FromSqlRow)]` is probably
// good enough. That won't improve our own codebase, since 99% of our
// `FromSqlRow` impls are for types from another crate, but it's almost
// certainly good enough for user types.
// - Still, it really feels like `FromSql` *should* be able to imply both
// `FromSqlRow` and `Queryable`
Remove double comment
Yes, I do still frequently think that I've had some breakthrough to
"fix" this, and then have to look at this comment again. However, it's
in the file twice, and I don't need to be reminded that much.
//! Types and traits related to deserializing values from the database
use std::error::Error;
use std::result;
use backend::Backend;
use row::{NamedRow, Row};
/// A specialized result type representing the result of deserializing
/// a value from the database.
pub type Result<T> = result::Result<T, Box<Error + Send + Sync>>;
/// Trait indicating that a record can be queried from the database.
///
/// Types which implement `Queryable` represent the result of a SQL query. This
/// does not necessarily mean they represent a single database table.
///
/// This trait can be derived automatically using `#[derive(Queryable)]`. This
/// trait can only be derived for structs, not enums.
///
/// Diesel represents the return type of a query as a tuple. The purpose of this
/// trait is to convert from a tuple of Rust values that have been deserialized
/// into your struct.
///
/// When this trait is derived, it will assume that the order of fields on your
/// struct match the order of the fields in the query. This means that field
/// order is significant if you are using `#[derive(Queryable)]`. Field name has
/// no affect.
///
/// # Examples
///
/// If we just want to map a query to our struct, we can use `derive`.
///
/// ```rust
/// # #[macro_use] extern crate diesel;
/// # include!("doctest_setup.rs");
/// #
/// #[derive(Queryable, PartialEq, Debug)]
/// struct User {
/// id: i32,
/// name: String,
/// }
///
/// # fn main() {
/// # run_test();
/// # }
/// #
/// # fn run_test() -> QueryResult<()> {
/// # use schema::users::dsl::*;
/// # let connection = establish_connection();
/// let first_user = users.first(&connection)?;
/// let expected = User { id: 1, name: "Sean".into() };
/// assert_eq!(expected, first_user);
/// # Ok(())
/// # }
/// ```
///
/// If we want to do additional work during deserialization, we can implement
/// the trait ourselves.
///
/// ```rust
/// # #[macro_use] extern crate diesel;
/// # include!("doctest_setup.rs");
/// #
/// use schema::users;
/// use diesel::deserialize::Queryable;
///
/// # /*
/// type DB = diesel::sqlite::Sqlite;
/// # */
///
/// #[derive(PartialEq, Debug)]
/// struct User {
/// id: i32,
/// name: String,
/// }
///
/// impl Queryable<users::SqlType, DB> for User {
/// type Row = (i32, String);
///
/// fn build(row: Self::Row) -> Self {
/// User {
/// id: row.0,
/// name: row.1.to_lowercase(),
/// }
/// }
/// }
///
/// # fn main() {
/// # run_test();
/// # }
/// #
/// # fn run_test() -> QueryResult<()> {
/// # use schema::users::dsl::*;
/// # let connection = establish_connection();
/// let first_user = users.first(&connection)?;
/// let expected = User { id: 1, name: "sean".into() };
/// assert_eq!(expected, first_user);
/// # Ok(())
/// # }
pub trait Queryable<ST, DB>
where
DB: Backend,
{
/// The Rust type you'd like to map from.
///
/// This is typically a tuple of all of your struct's fields.
type Row: FromSqlRow<ST, DB>;
/// Construct an instance of this type
fn build(row: Self::Row) -> Self;
}
/// Deserializes the result of a query constructed with [`sql_query`].
///
/// # Deriving
///
/// To derive this trait, Diesel needs to know the SQL type of each field. You
/// can do this by either annotating your struct with `#[table_name =
/// "some_table"]` (in which case the SQL type will be
/// `diesel::dsl::SqlTypeOf<table_name::column_name>`), or by annotating each
/// field with `#[sql_type = "SomeType"]`.
///
/// If you are using `#[table_name]`, the module for that table must be in
/// scope. For example, to derive this for a struct called `User`, you will
/// likely need a line such as `use schema::users;`
///
/// If the name of a field on your struct is different than the column in your
/// `table!` declaration, or if you are deriving this trait on a tuple struct,
/// you can annotate the field with `#[column_name = "some_column"]`. For tuple
/// structs, all fields must have this annotation.
///
/// If a field is another struct which implements `QueryableByName`, instead of
/// a column, you can annotate that struct with `#[diesel(embed)]`
///
/// [`sql_query`]: ../fn.sql_query.html
pub trait QueryableByName<DB>
where
Self: Sized,
DB: Backend,
{
/// Construct an instance of `Self` from the database row
fn build<R: NamedRow<DB>>(row: &R) -> Result<Self>;
}
/// Deserialize a single field of a given SQL type.
///
/// When possible, implementations of this trait should prefer to use an
/// existing implementation, rather than reading from `bytes`. (For example, if
/// you are implementing this for an enum which is represented as an integer in
/// the database, prefer `i32::from_sql(bytes)` over reading from `bytes`
/// directly)
///
/// Types which implement this trait should also have `#[derive(FromSqlRow)]`
///
/// ### Backend specific details
///
/// - For PostgreSQL, the bytes will be sent using the binary protocol, not text.
/// - For SQLite, the actual type of `DB::RawValue` is private API. All
/// implementations of this trait must be written in terms of an existing
/// primitive.
/// - For MySQL, the value of `bytes` will depend on the return value of
/// `type_metadata` for the given SQL type. See [`MysqlType`] for details.
/// - For third party backends, consult that backend's documentation.
///
/// [`MysqlType`]: ../mysql/enum.MysqlType.html
pub trait FromSql<A, DB: Backend>: Sized {
/// See the trait documentation.
fn from_sql(bytes: Option<&DB::RawValue>) -> Result<Self>;
}
/// Deserialize one or more fields.
///
/// All types which implement `FromSql` should also implement this trait. This
/// trait differs from `FromSql` in that it is also implemented by tuples.
/// Implementations of this trait are usually derived.
///
/// In the future, we hope to be able to provide a blanket impl of this trait
/// for all types which implement `FromSql`. However, as of Diesel 1.0, such an
/// impl would conflict with our impl for tuples.
///
/// ## Deriving
///
/// This trait can be automatically derived by Diesel
/// for any type which implements `FromSql`.
/// There are no options or special considerations needed for this derive.
/// Note that `#[derive(FromSqlRow)]` will also generate a `Queryable` implementation.
pub trait FromSqlRow<A, DB: Backend>: Sized {
/// The number of fields that this type will consume. Must be equal to
/// the number of times you would call `row.take()` in `build_from_row`
const FIELDS_NEEDED: usize = 1;
/// See the trait documentation.
fn build_from_row<T: Row<DB>>(row: &mut T) -> Result<Self>;
}
// Reasons we can't write this:
//
// impl<T, ST, DB> FromSqlRow<ST, DB> for T
// where
// DB: Backend + HasSqlType<ST>,
// T: FromSql<ST, DB>,
// {
// fn build_from_row<T: Row<DB>>(row: &mut T) -> Result<Self> {
// Self::from_sql(row.take())
// }
// }
//
// (this is mostly here so @sgrif has a better reference every time he thinks
// he's somehow had a breakthrough on solving this problem):
//
// - It conflicts with our impl for tuples, because `DB` is a bare type
// parameter, it could in theory be a local type for some other impl.
// - This is fixed by replacing our impl with 3 impls, where `DB` is changed
// concrete backends. This would mean that any third party crates adding new
// backends would need to add the tuple impls, which sucks but is fine.
// - It conflicts with our impl for `Option`
// - So we could in theory fix this by both splitting the generic impl into
// backend specific impls, and removing the `FromSql` impls. In theory there
// is no reason that it needs to implement `FromSql`, since everything
// requires `FromSqlRow`, but it really feels like it should.
// - Specialization might also fix this one. The impl isn't quite a strict
// subset (the `FromSql` impl has `T: FromSql`, and the `FromSqlRow` impl
// has `T: FromSqlRow`), but if `FromSql` implies `FromSqlRow`,
// specialization might consider that a subset?
// - I don't know that we really need it. `#[derive(FromSqlRow)]` is probably
// good enough. That won't improve our own codebase, since 99% of our
// `FromSqlRow` impls are for types from another crate, but it's almost
// certainly good enough for user types.
// - Still, it really feels like `FromSql` *should* be able to imply both
// `FromSqlRow` and `Queryable`
|
#![feature(plugin)]
#![warn(clippy::indexing_slicing)]
// We also check the out_of_bounds_indexing lint here, because it lints similar things and
// we want to avoid false positives.
#![warn(clippy::out_of_bounds_indexing)]
#![allow(clippy::no_effect, clippy::unnecessary_operation)]
fn main() {
let x = [1, 2, 3, 4];
let index: usize = 1;
let index_from: usize = 2;
let index_to: usize = 3;
x[index];
&x[index..];
&x[..index];
&x[index_from..index_to];
&x[index_from..][..index_to]; // Two lint reports, one for [index_from..] and another for [..index_to].
x[4]; // Ok, let rustc's `const_err` lint handle `usize` indexing on arrays.
x[1 << 3]; // Ok, let rustc's `const_err` lint handle `usize` indexing on arrays.
&x[5..][..10]; // Two lint reports, one for out of bounds [5..] and another for slicing [..10].
&x[0..][..3];
&x[1..][..5];
&x[0..].get(..3); // Ok, should not produce stderr.
x[0]; // Ok, should not produce stderr.
x[3]; // Ok, should not produce stderr.
&x[0..3]; // Ok, should not produce stderr.
let y = &x;
y[0];
&y[1..2];
&y[0..=4];
&y[..=4];
&y[..]; // Ok, should not produce stderr.
let v = vec![0; 5];
v[0];
v[10];
v[1 << 3];
&v[10..100];
&x[10..][..100]; // Two lint reports, one for [10..] and another for [..100].
&v[10..];
&v[..100];
&v[..]; // Ok, should not produce stderr.
//
// Continue tests at end function to minimize the changes to this file's corresponding stderr.
//
const N: usize = 15; // Out of bounds
const M: usize = 3; // In bounds
x[N]; // Ok, let rustc's `const_err` lint handle `usize` indexing on arrays.
x[M]; // Ok, should not produce stderr.
v[N];
v[M];
}
cargo fmt
#![feature(plugin)]
#![warn(clippy::indexing_slicing)]
// We also check the out_of_bounds_indexing lint here, because it lints similar things and
// we want to avoid false positives.
#![warn(clippy::out_of_bounds_indexing)]
#![allow(clippy::no_effect, clippy::unnecessary_operation)]
fn main() {
let x = [1, 2, 3, 4];
let index: usize = 1;
let index_from: usize = 2;
let index_to: usize = 3;
x[index];
&x[index..];
&x[..index];
&x[index_from..index_to];
&x[index_from..][..index_to]; // Two lint reports, one for [index_from..] and another for [..index_to].
x[4]; // Ok, let rustc's `const_err` lint handle `usize` indexing on arrays.
x[1 << 3]; // Ok, let rustc's `const_err` lint handle `usize` indexing on arrays.
&x[5..][..10]; // Two lint reports, one for out of bounds [5..] and another for slicing [..10].
&x[0..][..3];
&x[1..][..5];
&x[0..].get(..3); // Ok, should not produce stderr.
x[0]; // Ok, should not produce stderr.
x[3]; // Ok, should not produce stderr.
&x[0..3]; // Ok, should not produce stderr.
let y = &x;
y[0];
&y[1..2];
&y[0..=4];
&y[..=4];
&y[..]; // Ok, should not produce stderr.
let v = vec![0; 5];
v[0];
v[10];
v[1 << 3];
&v[10..100];
&x[10..][..100]; // Two lint reports, one for [10..] and another for [..100].
&v[10..];
&v[..100];
&v[..]; // Ok, should not produce stderr.
//
// Continue tests at end function to minimize the changes to this file's corresponding stderr.
//
const N: usize = 15; // Out of bounds
const M: usize = 3; // In bounds
x[N]; // Ok, let rustc's `const_err` lint handle `usize` indexing on arrays.
x[M]; // Ok, should not produce stderr.
v[N];
v[M];
}
|
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/* automatically generated by rust-bindgen */
use glut::bindgen::{glutCreateWindow, glutDestroyWindow, glutDisplayFunc, glutMouseFunc};
use glut::bindgen::{glutKeyboardFunc, glutGetModifiers, glutMotionFunc, glutPassiveMotionFunc};
use glut::bindgen::{glutGet, glutGetWindow};
use glut::bindgen::{glutInit, glutInitDisplayMode, glutPostRedisplay, glutReshapeFunc};
use glut::bindgen::{glutReshapeWindow, glutSetWindow, glutSetWindowTitle, glutSwapBuffers};
use glut::bindgen::{glutTimerFunc, glutIdleFunc};
use core::libc::*;
use core::local_data::{local_data_get, local_data_set};
use core::ptr::{null, to_unsafe_ptr};
use core::str::to_bytes;
use core::cast::transmute;
use core::vec::raw::to_ptr;
/* FIXME: global variable glutStrokeRoman */
/* FIXME: global variable glutStrokeMonoRoman */
/* FIXME: global variable glutBitmap9By15 */
/* FIXME: global variable glutBitmap8By13 */
/* FIXME: global variable glutBitmapTimesRoman10 */
/* FIXME: global variable glutBitmapTimesRoman24 */
/* FIXME: global variable glutBitmapHelvetica10 */
/* FIXME: global variable glutBitmapHelvetica12 */
/* FIXME: global variable glutBitmapHelvetica18 */
pub type GLenum = i32;
pub type GLint = i32;
pub type GLfloat = f32;
pub type GLdouble = f64;
pub struct Window(c_int);
pub static DOUBLE: c_uint = 2 as c_uint;
pub static ACTIVE_SHIFT: c_int = 1;
pub static ACTIVE_CTRL: c_int = 2;
pub static ACTIVE_ALT: c_int = 4;
// mouse buttons
pub static LEFT_BUTTON: c_int = 0;
pub static MIDDLE_BUTTON: c_int = 1;
pub static RIGHT_BUTTON: c_int = 2;
// mouse button callback state
pub static MOUSE_DOWN: c_int = 0;
pub static MOUSE_UP: c_int = 1;
static WINDOW_WIDTH: GLenum = 102;
static WINDOW_HEIGHT: GLenum = 103;
#[cfg(target_os="linux")]
pub static HAVE_PRECISE_MOUSE_WHEEL: bool = false;
#[cfg(target_os="macos")]
pub static HAVE_PRECISE_MOUSE_WHEEL: bool = true;
pub enum State {
WindowWidth,
WindowHeight
}
pub fn destroy<T>(_value: ~[T]) {
// let it drop
}
pub fn init() {
unsafe {
let argc = 0 as c_int;
let command = to_bytes(~"glut");
let argv: (*u8, *u8) = (to_ptr(command), null());
let argv_p = transmute(to_unsafe_ptr(&argv));
glutInit(to_unsafe_ptr(&argc), argv_p);
destroy(command);
}
}
pub fn create_window(name: ~str) -> Window {
unsafe {
let bytes = to_bytes(name);
return Window(glutCreateWindow(to_ptr(bytes) as *c_char));
}
}
pub fn destroy_window(window: Window) {
unsafe {
glutDestroyWindow(*window);
}
}
pub fn set_window(window: Window) {
unsafe {
glutSetWindow(*window);
}
}
pub fn set_window_title(window: Window, title: &str) {
unsafe {
let bytes = to_bytes(title);
glutSetWindowTitle(to_ptr(bytes) as *c_char);
}
}
pub fn reshape_window(window: Window, width: c_int, height: c_int) {
unsafe {
let current_window = glutGetWindow();
glutSetWindow(*window);
glutReshapeWindow(width, height);
glutSetWindow(current_window);
}
}
pub fn display_callback_tls_key(_callback: @@fn()) {
// Empty.
}
pub extern fn display_callback() {
unsafe {
let callback = local_data_get(display_callback_tls_key).get();
(*callback)();
}
}
pub fn display_func(callback: @fn()) {
unsafe {
local_data_set(display_callback_tls_key, @callback);
glutDisplayFunc(display_callback);
}
}
pub fn keyboard_callback_tls_key(_: @@fn(key: c_uchar, x: c_int, y: c_int)) {
// Empty.
}
pub extern fn keyboard_callback(key: c_uchar, x: c_int, y: c_int) {
unsafe {
let callback = local_data_get(keyboard_callback_tls_key).get();
(*callback)(key, x, y)
}
}
pub fn keyboard_func(callback: @fn(key: c_uchar, x: c_int, y: c_int)) {
unsafe {
local_data_set(keyboard_callback_tls_key, @callback);
glutKeyboardFunc(keyboard_callback);
}
}
pub fn mouse_callback_tls_key(_callback: @@fn(button: c_int, state: c_int, x: c_int, y: c_int)) {
// Empty.
}
pub extern fn mouse_callback(button: c_int, state: c_int, x: c_int, y: c_int) {
unsafe {
let callback = local_data_get(mouse_callback_tls_key).get();
(*callback)(button, state, x, y);
}
}
pub fn mouse_func(callback: @fn(button: c_int, state: c_int, x: c_int, y: c_int)) {
unsafe {
local_data_set(mouse_callback_tls_key, @callback);
glutMouseFunc(mouse_callback);
}
}
pub fn motion_callback_tls_key(_callback: @@fn(x: c_int, y: c_int)) {
// Empty.
}
pub extern fn motion_callback(x: c_int, y: c_int) {
unsafe {
let callback = local_data_get(motion_callback_tls_key).get();
(*callback)(x, y);
}
}
pub fn motion_func(callback: @fn(x: c_int, y: c_int)) {
unsafe {
local_data_set(motion_callback_tls_key, @callback);
glutMotionFunc(motion_callback);
}
}
pub fn passive_motion_callback_tls_key(_callback: @@fn(x: c_int, y: c_int)) {
// Empty.
}
pub extern fn passive_motion_callback(x: c_int, y: c_int) {
unsafe {
let callback = local_data_get(passive_motion_callback_tls_key).get();
(*callback)(x, y);
}
}
pub fn passive_motion_func(callback: @fn(x: c_int, y: c_int)) {
unsafe {
local_data_set(passive_motion_callback_tls_key, @callback);
glutPassiveMotionFunc(passive_motion_callback);
}
}
pub fn timer_callback_tls_key(_callback: @~[@fn()]) {
// Empty.
}
pub extern fn timer_callback(index: int) {
unsafe {
let callbacks = local_data_get(timer_callback_tls_key).get();
((*callbacks)[index as uint])();
}
}
pub fn timer_func(msecs: u32, callback: @fn()) {
unsafe {
let callbacks;
match local_data_get(timer_callback_tls_key) {
None => {
callbacks = @mut ~[];
local_data_set(timer_callback_tls_key, cast::transmute(callbacks));
}
Some(existing_callbacks) => {
callbacks = cast::transmute(existing_callbacks);
}
}
callbacks.push(callback);
let index = (callbacks.len() - 1) as c_int;
glutTimerFunc(msecs, timer_callback, index);
}
}
pub fn reshape_callback_tls_key(_callback: @@fn(x: c_int, y: c_int)) {
// Empty.
}
pub extern fn reshape_callback(width: c_int, height: c_int) {
unsafe {
let callback = local_data_get(reshape_callback_tls_key).get();
(*callback)(width, height);
}
}
pub fn reshape_func(_window: Window, callback: @fn(x: c_int, y: c_int)) {
unsafe {
local_data_set(reshape_callback_tls_key, @callback);
glutReshapeFunc(reshape_callback);
}
}
pub fn idle_callback_tls_key(_callback: @@fn()) {
// Empty.
}
pub extern fn idle_callback() {
unsafe {
let callback = local_data_get(idle_callback_tls_key).get();
(*callback)();
}
}
pub fn idle_func(callback: @fn()) {
unsafe {
local_data_set(idle_callback_tls_key, @callback);
glutIdleFunc(idle_callback);
}
}
// Mouse wheel handling.
//
// This is not part of the standard, but it's supported by freeglut and our Mac hack.
pub fn mouse_wheel_callback_tls_key(_callback: @@fn(wheel: c_int,
direction: c_int,
x: c_int,
y: c_int)) {
// Empty.
}
#[cfg(target_os="linux")]
pub extern fn mouse_wheel_callback(wheel: c_int, direction: c_int, x: c_int, y: c_int) {
unsafe {
let callback = local_data_get(wheel_callback_tls_key).get();
(*callback)(wheel, direction, x, y)
}
}
#[cfg(target_os="linux")]
pub fn mouse_wheel_func(callback: @fn(wheel: c_int, direction: c_int, x: c_int, y: c_int)) {
unsafe {
local_data_set(wheel_callback_tls_key, @callback);
glutMouseWheelFunc(mouse_wheel_callback);
}
}
#[cfg(target_os="macos")]
pub fn mouse_wheel_func(callback: @fn(wheel: c_int, direction: c_int, x: c_int, y: c_int)) {
unsafe {
local_data_set(mouse_wheel_callback_tls_key, @callback);
}
}
#[cfg(target_os="macos")]
pub fn check_loop() {
unsafe {
ext::glutCheckLoop();
}
}
#[cfg(target_os="linux")]
pub fn check_loop() {
unsafe {
ext::glutMainLoopEvent();
}
}
pub fn init_display_mode(mode: c_uint) {
unsafe {
glutInitDisplayMode(mode);
}
}
pub fn swap_buffers() {
unsafe {
glutSwapBuffers();
}
}
pub fn post_redisplay() {
unsafe {
glutPostRedisplay();
}
}
pub fn get(state: State) -> c_int {
unsafe {
let glut_state;
match state {
WindowWidth => glut_state = WINDOW_WIDTH,
WindowHeight => glut_state = WINDOW_HEIGHT
}
glutGet(glut_state)
}
}
pub fn get_modifiers() -> c_int {
unsafe {
glutGetModifiers()
}
}
#[cfg(target_os="macos")]
#[nolink]
#[link_args="-framework GLUT"]
pub extern mod dummy {
}
#[cfg(target_os="linux")]
#[link_name="glut"]
pub extern mod dummy {
}
#[cfg(target_os="macos")]
#[nolink]
pub extern mod ext {
// Mac GLUT extension.
fn glutCheckLoop();
}
#[cfg(target_os="linux")]
#[nolink]
pub extern mod ext {
// freeglut extension.
fn glutMainLoopEvent();
}
#[nolink]
pub extern mod bindgen {
pub fn glutInit(argcp: *c_int, argv: **c_char);
pub fn glutInitDisplayMode(mode: c_uint);
pub fn glutInitDisplayString(string: *c_char);
pub fn glutInitWindowPosition(x: c_int, y: c_int);
pub fn glutInitWindowSize(width: c_int, height: c_int);
pub fn glutMainLoop();
pub fn glutCreateWindow(title: *c_char) -> c_int;
pub fn glutCreateSubWindow(win: c_int, x: c_int, y: c_int, width: c_int, height: c_int) -> c_int;
pub fn glutDestroyWindow(win: c_int);
pub fn glutPostRedisplay();
pub fn glutPostWindowRedisplay(win: c_int);
pub fn glutSwapBuffers();
pub fn glutGetWindow() -> c_int;
pub fn glutSetWindow(win: c_int);
pub fn glutSetWindowTitle(title: *c_char);
pub fn glutSetIconTitle(title: *c_char);
pub fn glutPositionWindow(x: c_int, y: c_int);
pub fn glutReshapeWindow(width: c_int, height: c_int);
pub fn glutPopWindow();
pub fn glutPushWindow();
pub fn glutIconifyWindow();
pub fn glutShowWindow();
pub fn glutHideWindow();
pub fn glutFullScreen();
pub fn glutSetCursor(cursor: c_int);
pub fn glutWarpPointer(x: c_int, y: c_int);
pub fn glutEstablishOverlay();
pub fn glutRemoveOverlay();
pub fn glutUseLayer(layer: GLenum);
pub fn glutPostOverlayRedisplay();
pub fn glutPostWindowOverlayRedisplay(win: c_int);
pub fn glutShowOverlay();
pub fn glutHideOverlay();
pub fn glutCreateMenu(arg1: *u8) -> c_int;
pub fn glutDestroyMenu(menu: c_int);
pub fn glutGetMenu() -> c_int;
pub fn glutSetMenu(menu: c_int);
pub fn glutAddMenuEntry(label: *c_char, value: c_int);
pub fn glutAddSubMenu(label: *c_char, submenu: c_int);
pub fn glutChangeToMenuEntry(item: c_int, label: *c_char, value: c_int);
pub fn glutChangeToSubMenu(item: c_int, label: *c_char, submenu: c_int);
pub fn glutRemoveMenuItem(item: c_int);
pub fn glutAttachMenu(button: c_int);
pub fn glutDetachMenu(button: c_int);
pub fn glutDisplayFunc(func: *u8);
pub fn glutReshapeFunc(func: *u8);
pub fn glutKeyboardFunc(func: *u8);
pub fn glutMouseFunc(func: *u8);
pub fn glutMotionFunc(func: *u8);
pub fn glutPassiveMotionFunc(func: *u8);
pub fn glutEntryFunc(func: *u8);
pub fn glutVisibilityFunc(func: *u8);
pub fn glutIdleFunc(func: *u8);
pub fn glutTimerFunc(millis: c_uint, func: *u8, value: c_int);
pub fn glutMenuStateFunc(func: *u8);
pub fn glutSpecialFunc(func: *u8);
pub fn glutSpaceballMotionFunc(func: *u8);
pub fn glutSpaceballRotateFunc(func: *u8);
pub fn glutSpaceballButtonFunc(func: *u8);
pub fn glutButtonBoxFunc(func: *u8);
pub fn glutDialsFunc(func: *u8);
pub fn glutTabletMotionFunc(func: *u8);
pub fn glutTabletButtonFunc(func: *u8);
pub fn glutMenuStatusFunc(func: *u8);
pub fn glutOverlayDisplayFunc(func: *u8);
pub fn glutWindowStatusFunc(func: *u8);
pub fn glutKeyboardUpFunc(func: *u8);
pub fn glutSpecialUpFunc(func: *u8);
pub fn glutJoystickFunc(func: *u8, pollInterval: c_int);
pub fn glutSetColor(arg1: c_int, red: GLfloat, green: GLfloat, blue: GLfloat);
pub fn glutGetColor(ndx: c_int, component: c_int) -> GLfloat;
pub fn glutCopyColormap(win: c_int);
pub fn glutGet(_type: GLenum) -> c_int;
pub fn glutDeviceGet(_type: GLenum) -> c_int;
pub fn glutExtensionSupported(name: *c_char) -> c_int;
pub fn glutGetModifiers() -> c_int;
pub fn glutLayerGet(_type: GLenum) -> c_int;
pub fn glutGetProcAddress(procName: *c_char) -> *c_void;
pub fn glutBitmapCharacter(font: *c_void, character: c_int);
pub fn glutBitmapWidth(font: *c_void, character: c_int) -> c_int;
pub fn glutStrokeCharacter(font: *c_void, character: c_int);
pub fn glutStrokeWidth(font: *c_void, character: c_int) -> c_int;
pub fn glutBitmapLength(font: *c_void, string: *c_uchar) -> c_int;
pub fn glutStrokeLength(font: *c_void, string: *c_uchar) -> c_int;
pub fn glutWireSphere(radius: GLdouble, slices: GLint, stacks: GLint);
pub fn glutSolidSphere(radius: GLdouble, slices: GLint, stacks: GLint);
pub fn glutWireCone(base: GLdouble, height: GLdouble, slices: GLint, stacks: GLint);
pub fn glutSolidCone(base: GLdouble, height: GLdouble, slices: GLint, stacks: GLint);
pub fn glutWireCube(size: GLdouble);
pub fn glutSolidCube(size: GLdouble);
pub fn glutWireTorus(innerRadius: GLdouble, outerRadius: GLdouble, sides: GLint, rings: GLint);
pub fn glutSolidTorus(innerRadius: GLdouble, outerRadius: GLdouble, sides: GLint, rings: GLint);
pub fn glutWireDodecahedron();
pub fn glutSolidDodecahedron();
pub fn glutWireTeapot(size: GLdouble);
pub fn glutSolidTeapot(size: GLdouble);
pub fn glutWireOctahedron();
pub fn glutSolidOctahedron();
pub fn glutWireTetrahedron();
pub fn glutSolidTetrahedron();
pub fn glutWireIcosahedron();
pub fn glutSolidIcosahedron();
pub fn glutVideoResizeGet(param: GLenum) -> c_int;
pub fn glutSetupVideoResizing();
pub fn glutStopVideoResizing();
pub fn glutVideoResize(x: c_int, y: c_int, width: c_int, height: c_int);
pub fn glutVideoPan(x: c_int, y: c_int, width: c_int, height: c_int);
pub fn glutReportErrors();
pub fn glutIgnoreKeyRepeat(ignore: c_int);
pub fn glutSetKeyRepeat(repeatMode: c_int);
pub fn glutForceJoystickFunc();
pub fn glutGameModeString(string: *c_char);
pub fn glutEnterGameMode() -> c_int;
pub fn glutLeaveGameMode();
pub fn glutGameModeGet(mode: GLenum) -> c_int;
}
Fix typo in linux version of mouse wheel code.
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/* automatically generated by rust-bindgen */
use glut::bindgen::{glutCreateWindow, glutDestroyWindow, glutDisplayFunc, glutMouseFunc};
use glut::bindgen::{glutKeyboardFunc, glutGetModifiers, glutMotionFunc, glutPassiveMotionFunc};
use glut::bindgen::{glutGet, glutGetWindow};
use glut::bindgen::{glutInit, glutInitDisplayMode, glutPostRedisplay, glutReshapeFunc};
use glut::bindgen::{glutReshapeWindow, glutSetWindow, glutSetWindowTitle, glutSwapBuffers};
use glut::bindgen::{glutTimerFunc, glutIdleFunc};
use core::libc::*;
use core::local_data::{local_data_get, local_data_set};
use core::ptr::{null, to_unsafe_ptr};
use core::str::to_bytes;
use core::cast::transmute;
use core::vec::raw::to_ptr;
/* FIXME: global variable glutStrokeRoman */
/* FIXME: global variable glutStrokeMonoRoman */
/* FIXME: global variable glutBitmap9By15 */
/* FIXME: global variable glutBitmap8By13 */
/* FIXME: global variable glutBitmapTimesRoman10 */
/* FIXME: global variable glutBitmapTimesRoman24 */
/* FIXME: global variable glutBitmapHelvetica10 */
/* FIXME: global variable glutBitmapHelvetica12 */
/* FIXME: global variable glutBitmapHelvetica18 */
pub type GLenum = i32;
pub type GLint = i32;
pub type GLfloat = f32;
pub type GLdouble = f64;
pub struct Window(c_int);
pub static DOUBLE: c_uint = 2 as c_uint;
pub static ACTIVE_SHIFT: c_int = 1;
pub static ACTIVE_CTRL: c_int = 2;
pub static ACTIVE_ALT: c_int = 4;
// mouse buttons
pub static LEFT_BUTTON: c_int = 0;
pub static MIDDLE_BUTTON: c_int = 1;
pub static RIGHT_BUTTON: c_int = 2;
// mouse button callback state
pub static MOUSE_DOWN: c_int = 0;
pub static MOUSE_UP: c_int = 1;
static WINDOW_WIDTH: GLenum = 102;
static WINDOW_HEIGHT: GLenum = 103;
#[cfg(target_os="linux")]
pub static HAVE_PRECISE_MOUSE_WHEEL: bool = false;
#[cfg(target_os="macos")]
pub static HAVE_PRECISE_MOUSE_WHEEL: bool = true;
pub enum State {
WindowWidth,
WindowHeight
}
pub fn destroy<T>(_value: ~[T]) {
// let it drop
}
pub fn init() {
unsafe {
let argc = 0 as c_int;
let command = to_bytes(~"glut");
let argv: (*u8, *u8) = (to_ptr(command), null());
let argv_p = transmute(to_unsafe_ptr(&argv));
glutInit(to_unsafe_ptr(&argc), argv_p);
destroy(command);
}
}
pub fn create_window(name: ~str) -> Window {
unsafe {
let bytes = to_bytes(name);
return Window(glutCreateWindow(to_ptr(bytes) as *c_char));
}
}
pub fn destroy_window(window: Window) {
unsafe {
glutDestroyWindow(*window);
}
}
pub fn set_window(window: Window) {
unsafe {
glutSetWindow(*window);
}
}
pub fn set_window_title(window: Window, title: &str) {
unsafe {
let bytes = to_bytes(title);
glutSetWindowTitle(to_ptr(bytes) as *c_char);
}
}
pub fn reshape_window(window: Window, width: c_int, height: c_int) {
unsafe {
let current_window = glutGetWindow();
glutSetWindow(*window);
glutReshapeWindow(width, height);
glutSetWindow(current_window);
}
}
pub fn display_callback_tls_key(_callback: @@fn()) {
// Empty.
}
pub extern fn display_callback() {
unsafe {
let callback = local_data_get(display_callback_tls_key).get();
(*callback)();
}
}
pub fn display_func(callback: @fn()) {
unsafe {
local_data_set(display_callback_tls_key, @callback);
glutDisplayFunc(display_callback);
}
}
pub fn keyboard_callback_tls_key(_: @@fn(key: c_uchar, x: c_int, y: c_int)) {
// Empty.
}
pub extern fn keyboard_callback(key: c_uchar, x: c_int, y: c_int) {
unsafe {
let callback = local_data_get(keyboard_callback_tls_key).get();
(*callback)(key, x, y)
}
}
pub fn keyboard_func(callback: @fn(key: c_uchar, x: c_int, y: c_int)) {
unsafe {
local_data_set(keyboard_callback_tls_key, @callback);
glutKeyboardFunc(keyboard_callback);
}
}
pub fn mouse_callback_tls_key(_callback: @@fn(button: c_int, state: c_int, x: c_int, y: c_int)) {
// Empty.
}
pub extern fn mouse_callback(button: c_int, state: c_int, x: c_int, y: c_int) {
unsafe {
let callback = local_data_get(mouse_callback_tls_key).get();
(*callback)(button, state, x, y);
}
}
pub fn mouse_func(callback: @fn(button: c_int, state: c_int, x: c_int, y: c_int)) {
unsafe {
local_data_set(mouse_callback_tls_key, @callback);
glutMouseFunc(mouse_callback);
}
}
pub fn motion_callback_tls_key(_callback: @@fn(x: c_int, y: c_int)) {
// Empty.
}
pub extern fn motion_callback(x: c_int, y: c_int) {
unsafe {
let callback = local_data_get(motion_callback_tls_key).get();
(*callback)(x, y);
}
}
pub fn motion_func(callback: @fn(x: c_int, y: c_int)) {
unsafe {
local_data_set(motion_callback_tls_key, @callback);
glutMotionFunc(motion_callback);
}
}
pub fn passive_motion_callback_tls_key(_callback: @@fn(x: c_int, y: c_int)) {
// Empty.
}
pub extern fn passive_motion_callback(x: c_int, y: c_int) {
unsafe {
let callback = local_data_get(passive_motion_callback_tls_key).get();
(*callback)(x, y);
}
}
pub fn passive_motion_func(callback: @fn(x: c_int, y: c_int)) {
unsafe {
local_data_set(passive_motion_callback_tls_key, @callback);
glutPassiveMotionFunc(passive_motion_callback);
}
}
pub fn timer_callback_tls_key(_callback: @~[@fn()]) {
// Empty.
}
pub extern fn timer_callback(index: int) {
unsafe {
let callbacks = local_data_get(timer_callback_tls_key).get();
((*callbacks)[index as uint])();
}
}
pub fn timer_func(msecs: u32, callback: @fn()) {
unsafe {
let callbacks;
match local_data_get(timer_callback_tls_key) {
None => {
callbacks = @mut ~[];
local_data_set(timer_callback_tls_key, cast::transmute(callbacks));
}
Some(existing_callbacks) => {
callbacks = cast::transmute(existing_callbacks);
}
}
callbacks.push(callback);
let index = (callbacks.len() - 1) as c_int;
glutTimerFunc(msecs, timer_callback, index);
}
}
pub fn reshape_callback_tls_key(_callback: @@fn(x: c_int, y: c_int)) {
// Empty.
}
pub extern fn reshape_callback(width: c_int, height: c_int) {
unsafe {
let callback = local_data_get(reshape_callback_tls_key).get();
(*callback)(width, height);
}
}
pub fn reshape_func(_window: Window, callback: @fn(x: c_int, y: c_int)) {
unsafe {
local_data_set(reshape_callback_tls_key, @callback);
glutReshapeFunc(reshape_callback);
}
}
pub fn idle_callback_tls_key(_callback: @@fn()) {
// Empty.
}
pub extern fn idle_callback() {
unsafe {
let callback = local_data_get(idle_callback_tls_key).get();
(*callback)();
}
}
pub fn idle_func(callback: @fn()) {
unsafe {
local_data_set(idle_callback_tls_key, @callback);
glutIdleFunc(idle_callback);
}
}
// Mouse wheel handling.
//
// This is not part of the standard, but it's supported by freeglut and our Mac hack.
pub fn mouse_wheel_callback_tls_key(_callback: @@fn(wheel: c_int,
direction: c_int,
x: c_int,
y: c_int)) {
// Empty.
}
#[cfg(target_os="linux")]
pub extern fn mouse_wheel_callback(wheel: c_int, direction: c_int, x: c_int, y: c_int) {
unsafe {
let callback = local_data_get(mouse_wheel_callback_tls_key).get();
(*callback)(wheel, direction, x, y)
}
}
#[cfg(target_os="linux")]
pub fn mouse_wheel_func(callback: @fn(wheel: c_int, direction: c_int, x: c_int, y: c_int)) {
unsafe {
local_data_set(mouse_wheel_callback_tls_key, @callback);
glutMouseWheelFunc(mouse_wheel_callback);
}
}
#[cfg(target_os="macos")]
pub fn mouse_wheel_func(callback: @fn(wheel: c_int, direction: c_int, x: c_int, y: c_int)) {
unsafe {
local_data_set(mouse_wheel_callback_tls_key, @callback);
}
}
#[cfg(target_os="macos")]
pub fn check_loop() {
unsafe {
ext::glutCheckLoop();
}
}
#[cfg(target_os="linux")]
pub fn check_loop() {
unsafe {
ext::glutMainLoopEvent();
}
}
pub fn init_display_mode(mode: c_uint) {
unsafe {
glutInitDisplayMode(mode);
}
}
pub fn swap_buffers() {
unsafe {
glutSwapBuffers();
}
}
pub fn post_redisplay() {
unsafe {
glutPostRedisplay();
}
}
pub fn get(state: State) -> c_int {
unsafe {
let glut_state;
match state {
WindowWidth => glut_state = WINDOW_WIDTH,
WindowHeight => glut_state = WINDOW_HEIGHT
}
glutGet(glut_state)
}
}
pub fn get_modifiers() -> c_int {
unsafe {
glutGetModifiers()
}
}
#[cfg(target_os="macos")]
#[nolink]
#[link_args="-framework GLUT"]
pub extern mod dummy {
}
#[cfg(target_os="linux")]
#[link_name="glut"]
pub extern mod dummy {
}
#[cfg(target_os="macos")]
#[nolink]
pub extern mod ext {
// Mac GLUT extension.
fn glutCheckLoop();
}
#[cfg(target_os="linux")]
#[nolink]
pub extern mod ext {
// freeglut extension.
fn glutMainLoopEvent();
}
#[nolink]
pub extern mod bindgen {
pub fn glutInit(argcp: *c_int, argv: **c_char);
pub fn glutInitDisplayMode(mode: c_uint);
pub fn glutInitDisplayString(string: *c_char);
pub fn glutInitWindowPosition(x: c_int, y: c_int);
pub fn glutInitWindowSize(width: c_int, height: c_int);
pub fn glutMainLoop();
pub fn glutCreateWindow(title: *c_char) -> c_int;
pub fn glutCreateSubWindow(win: c_int, x: c_int, y: c_int, width: c_int, height: c_int) -> c_int;
pub fn glutDestroyWindow(win: c_int);
pub fn glutPostRedisplay();
pub fn glutPostWindowRedisplay(win: c_int);
pub fn glutSwapBuffers();
pub fn glutGetWindow() -> c_int;
pub fn glutSetWindow(win: c_int);
pub fn glutSetWindowTitle(title: *c_char);
pub fn glutSetIconTitle(title: *c_char);
pub fn glutPositionWindow(x: c_int, y: c_int);
pub fn glutReshapeWindow(width: c_int, height: c_int);
pub fn glutPopWindow();
pub fn glutPushWindow();
pub fn glutIconifyWindow();
pub fn glutShowWindow();
pub fn glutHideWindow();
pub fn glutFullScreen();
pub fn glutSetCursor(cursor: c_int);
pub fn glutWarpPointer(x: c_int, y: c_int);
pub fn glutEstablishOverlay();
pub fn glutRemoveOverlay();
pub fn glutUseLayer(layer: GLenum);
pub fn glutPostOverlayRedisplay();
pub fn glutPostWindowOverlayRedisplay(win: c_int);
pub fn glutShowOverlay();
pub fn glutHideOverlay();
pub fn glutCreateMenu(arg1: *u8) -> c_int;
pub fn glutDestroyMenu(menu: c_int);
pub fn glutGetMenu() -> c_int;
pub fn glutSetMenu(menu: c_int);
pub fn glutAddMenuEntry(label: *c_char, value: c_int);
pub fn glutAddSubMenu(label: *c_char, submenu: c_int);
pub fn glutChangeToMenuEntry(item: c_int, label: *c_char, value: c_int);
pub fn glutChangeToSubMenu(item: c_int, label: *c_char, submenu: c_int);
pub fn glutRemoveMenuItem(item: c_int);
pub fn glutAttachMenu(button: c_int);
pub fn glutDetachMenu(button: c_int);
pub fn glutDisplayFunc(func: *u8);
pub fn glutReshapeFunc(func: *u8);
pub fn glutKeyboardFunc(func: *u8);
pub fn glutMouseFunc(func: *u8);
pub fn glutMotionFunc(func: *u8);
pub fn glutPassiveMotionFunc(func: *u8);
pub fn glutEntryFunc(func: *u8);
pub fn glutVisibilityFunc(func: *u8);
pub fn glutIdleFunc(func: *u8);
pub fn glutTimerFunc(millis: c_uint, func: *u8, value: c_int);
pub fn glutMenuStateFunc(func: *u8);
pub fn glutSpecialFunc(func: *u8);
pub fn glutSpaceballMotionFunc(func: *u8);
pub fn glutSpaceballRotateFunc(func: *u8);
pub fn glutSpaceballButtonFunc(func: *u8);
pub fn glutButtonBoxFunc(func: *u8);
pub fn glutDialsFunc(func: *u8);
pub fn glutTabletMotionFunc(func: *u8);
pub fn glutTabletButtonFunc(func: *u8);
pub fn glutMenuStatusFunc(func: *u8);
pub fn glutOverlayDisplayFunc(func: *u8);
pub fn glutWindowStatusFunc(func: *u8);
pub fn glutKeyboardUpFunc(func: *u8);
pub fn glutSpecialUpFunc(func: *u8);
pub fn glutJoystickFunc(func: *u8, pollInterval: c_int);
pub fn glutSetColor(arg1: c_int, red: GLfloat, green: GLfloat, blue: GLfloat);
pub fn glutGetColor(ndx: c_int, component: c_int) -> GLfloat;
pub fn glutCopyColormap(win: c_int);
pub fn glutGet(_type: GLenum) -> c_int;
pub fn glutDeviceGet(_type: GLenum) -> c_int;
pub fn glutExtensionSupported(name: *c_char) -> c_int;
pub fn glutGetModifiers() -> c_int;
pub fn glutLayerGet(_type: GLenum) -> c_int;
pub fn glutGetProcAddress(procName: *c_char) -> *c_void;
pub fn glutBitmapCharacter(font: *c_void, character: c_int);
pub fn glutBitmapWidth(font: *c_void, character: c_int) -> c_int;
pub fn glutStrokeCharacter(font: *c_void, character: c_int);
pub fn glutStrokeWidth(font: *c_void, character: c_int) -> c_int;
pub fn glutBitmapLength(font: *c_void, string: *c_uchar) -> c_int;
pub fn glutStrokeLength(font: *c_void, string: *c_uchar) -> c_int;
pub fn glutWireSphere(radius: GLdouble, slices: GLint, stacks: GLint);
pub fn glutSolidSphere(radius: GLdouble, slices: GLint, stacks: GLint);
pub fn glutWireCone(base: GLdouble, height: GLdouble, slices: GLint, stacks: GLint);
pub fn glutSolidCone(base: GLdouble, height: GLdouble, slices: GLint, stacks: GLint);
pub fn glutWireCube(size: GLdouble);
pub fn glutSolidCube(size: GLdouble);
pub fn glutWireTorus(innerRadius: GLdouble, outerRadius: GLdouble, sides: GLint, rings: GLint);
pub fn glutSolidTorus(innerRadius: GLdouble, outerRadius: GLdouble, sides: GLint, rings: GLint);
pub fn glutWireDodecahedron();
pub fn glutSolidDodecahedron();
pub fn glutWireTeapot(size: GLdouble);
pub fn glutSolidTeapot(size: GLdouble);
pub fn glutWireOctahedron();
pub fn glutSolidOctahedron();
pub fn glutWireTetrahedron();
pub fn glutSolidTetrahedron();
pub fn glutWireIcosahedron();
pub fn glutSolidIcosahedron();
pub fn glutVideoResizeGet(param: GLenum) -> c_int;
pub fn glutSetupVideoResizing();
pub fn glutStopVideoResizing();
pub fn glutVideoResize(x: c_int, y: c_int, width: c_int, height: c_int);
pub fn glutVideoPan(x: c_int, y: c_int, width: c_int, height: c_int);
pub fn glutReportErrors();
pub fn glutIgnoreKeyRepeat(ignore: c_int);
pub fn glutSetKeyRepeat(repeatMode: c_int);
pub fn glutForceJoystickFunc();
pub fn glutGameModeString(string: *c_char);
pub fn glutEnterGameMode() -> c_int;
pub fn glutLeaveGameMode();
pub fn glutGameModeGet(mode: GLenum) -> c_int;
}
|
use std::sync::Arc;
use smithay::delegate_compositor;
use smithay::reexports::wayland_server::Display;
use smithay::wayland::compositor::{CompositorHandler, CompositorState};
use wayland_server::backend::{ClientData, ClientId, DisconnectReason};
use wayland_server::protocol::wl_surface::WlSurface;
use wayland_server::{socket::ListeningSocket, DisplayHandle};
struct App {
compositor_state: CompositorState,
}
impl CompositorHandler for App {
fn compositor_state(&mut self) -> &mut CompositorState {
&mut self.compositor_state
}
fn commit(&mut self, _dh: &DisplayHandle, surface: &WlSurface) {
dbg!("Commit", surface);
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut display: Display<App> = Display::new()?;
let compositor_state = CompositorState::new(&mut display, None);
let mut state = App { compositor_state };
let listener = ListeningSocket::bind("wayland-5").unwrap();
let mut clients = Vec::new();
loop {
if let Some(stream) = listener.accept().unwrap() {
println!("Got a client: {:?}", stream);
let client = display.insert_client(stream, Arc::new(ClientState)).unwrap();
clients.push(client);
}
display.dispatch_clients(&mut state)?;
display.flush_clients()?;
}
}
struct ClientState;
impl ClientData<App> for ClientState {
fn initialized(&self, _client_id: ClientId) {
println!("initialized");
}
fn disconnected(&self, _client_id: ClientId, _reason: DisconnectReason) {
println!("disconnected");
}
}
impl AsMut<CompositorState> for App {
fn as_mut(&mut self) -> &mut CompositorState {
&mut self.compositor_state
}
}
delegate_compositor!(App);
compositor-example: fix errors outside of macros
use std::sync::Arc;
use smithay::delegate_compositor;
use smithay::reexports::wayland_server::Display;
use smithay::wayland::compositor::{CompositorHandler, CompositorState};
use wayland_server::backend::{ClientData, ClientId, DisconnectReason};
use wayland_server::protocol::wl_surface::WlSurface;
use wayland_server::{socket::ListeningSocket, DisplayHandle};
struct App {
compositor_state: CompositorState,
}
impl CompositorHandler for App {
fn compositor_state(&mut self) -> &mut CompositorState {
&mut self.compositor_state
}
fn commit(&mut self, _dh: &DisplayHandle, surface: &WlSurface) {
dbg!("Commit", surface);
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut display: Display<App> = Display::new()?;
let compositor_state = CompositorState::new(&mut display, None);
let mut state = App { compositor_state };
let listener = ListeningSocket::bind("wayland-5").unwrap();
let mut clients = Vec::new();
loop {
if let Some(stream) = listener.accept().unwrap() {
println!("Got a client: {:?}", stream);
let client = display
.handle()
.insert_client(stream, Arc::new(ClientState))
.unwrap();
clients.push(client);
}
display.dispatch_clients(&mut state)?;
display.flush_clients()?;
}
}
struct ClientState;
impl ClientData for ClientState {
fn initialized(&self, _client_id: ClientId) {
println!("initialized");
}
fn disconnected(&self, _client_id: ClientId, _reason: DisconnectReason) {
println!("disconnected");
}
}
impl AsMut<CompositorState> for App {
fn as_mut(&mut self) -> &mut CompositorState {
&mut self.compositor_state
}
}
delegate_compositor!(App);
|
// By compiling this file we check that all the intrinsics we care about continue to be provided by
// the `compiler_builtins` crate regardless of the changes we make to it. If we, by mistake, stop
// compiling a C implementation and forget to implement that intrinsic in Rust, this file will fail
// to link due to the missing intrinsic (symbol).
#![allow(unused_features)]
#![cfg_attr(thumb, no_main)]
#![deny(dead_code)]
#![feature(alloc_system)]
#![feature(asm)]
#![feature(compiler_builtins_lib)]
#![feature(core_float)]
#![feature(lang_items)]
#![feature(start)]
#![feature(global_allocator)]
#![feature(allocator_api)]
#![cfg_attr(windows, feature(panic_unwind))]
#![no_core]
extern crate core;
#[cfg(not(thumb))]
extern crate alloc_system;
extern crate compiler_builtins;
#[cfg(windows)]
extern crate panic_unwind;
#[cfg(not(thumb))]
#[global_allocator]
static A: alloc_system::System = alloc_system::System;
// NOTE cfg(not(thumbv6m)) means that the operation is not supported on ARMv6-M at all. Not even
// compiler-rt provides a C/assembly implementation.
// Every function in this module maps will be lowered to an intrinsic by LLVM, if the platform
// doesn't have native support for the operation used in the function. ARM has a naming convention
// convention for its intrinsics that's different from other architectures; that's why some function
// have an additional comment: the function name is the ARM name for the intrinsic and the comment
// in the non-ARM name for the intrinsic.
mod intrinsics {
use core::num::Float;
// trunccdfsf2
pub fn aeabi_d2f(x: f64) -> f32 {
x as f32
}
// fixdfsi
pub fn aeabi_d2i(x: f64) -> i32 {
x as i32
}
// fixdfdi
#[cfg(not(thumbv6m))]
pub fn aeabi_d2l(x: f64) -> i64 {
x as i64
}
#[cfg(thumbv6m)]
pub fn aeabi_d2l(_: f64) -> i64 {
0
}
// fixunsdfsi
pub fn aeabi_d2uiz(x: f64) -> u32 {
x as u32
}
// fixunsdfdi
#[cfg(not(thumbv6m))]
pub fn aeabi_d2ulz(x: f64) -> u64 {
x as u64
}
#[cfg(thumbv6m)]
pub fn aeabi_d2ulz(_: f64) -> u64 {
0
}
// adddf3
pub fn aeabi_dadd(a: f64, b: f64) -> f64 {
a + b
}
// eqdf2
#[cfg(not(thumbv6m))]
pub fn aeabi_dcmpeq(a: f64, b: f64) -> bool {
a == b
}
#[cfg(thumbv6m)]
pub fn aeabi_dcmpeq(_: f64, _: f64) -> bool {
true
}
// gtdf2
#[cfg(not(thumbv6m))]
pub fn aeabi_dcmpgt(a: f64, b: f64) -> bool {
a > b
}
#[cfg(thumbv6m)]
pub fn aeabi_dcmpgt(_: f64, _: f64) -> bool {
true
}
// ltdf2
#[cfg(not(thumbv6m))]
pub fn aeabi_dcmplt(a: f64, b: f64) -> bool {
a < b
}
#[cfg(thumbv6m)]
pub fn aeabi_dcmplt(_: f64, _: f64) -> bool {
true
}
// divdf3
pub fn aeabi_ddiv(a: f64, b: f64) -> f64 {
a / b
}
// muldf3
pub fn aeabi_dmul(a: f64, b: f64) -> f64 {
a * b
}
// subdf3
pub fn aeabi_dsub(a: f64, b: f64) -> f64 {
a - b
}
// extendsfdf2
pub fn aeabi_f2d(x: f32) -> f64 {
x as f64
}
// fixsfsi
pub fn aeabi_f2iz(x: f32) -> i32 {
x as i32
}
// fixsfdi
#[cfg(not(thumbv6m))]
pub fn aeabi_f2lz(x: f32) -> i64 {
x as i64
}
#[cfg(thumbv6m)]
pub fn aeabi_f2lz(_: f32) -> i64 {
0
}
// fixunssfsi
pub fn aeabi_f2uiz(x: f32) -> u32 {
x as u32
}
// fixunssfdi
#[cfg(not(thumbv6m))]
pub fn aeabi_f2ulz(x: f32) -> u64 {
x as u64
}
#[cfg(thumbv6m)]
pub fn aeabi_f2ulz(_: f32) -> u64 {
0
}
// addsf3
pub fn aeabi_fadd(a: f32, b: f32) -> f32 {
a + b
}
// eqsf2
#[cfg(not(thumbv6m))]
pub fn aeabi_fcmpeq(a: f32, b: f32) -> bool {
a == b
}
#[cfg(thumbv6m)]
pub fn aeabi_fcmpeq(_: f32, _: f32) -> bool {
true
}
// gtsf2
#[cfg(not(thumbv6m))]
pub fn aeabi_fcmpgt(a: f32, b: f32) -> bool {
a > b
}
#[cfg(thumbv6m)]
pub fn aeabi_fcmpgt(_: f32, _: f32) -> bool {
true
}
// ltsf2
#[cfg(not(thumbv6m))]
pub fn aeabi_fcmplt(a: f32, b: f32) -> bool {
a < b
}
#[cfg(thumbv6m)]
pub fn aeabi_fcmplt(_: f32, _: f32) -> bool {
true
}
// divsf3
pub fn aeabi_fdiv(a: f32, b: f32) -> f32 {
a / b
}
// mulsf3
pub fn aeabi_fmul(a: f32, b: f32) -> f32 {
a * b
}
// subsf3
pub fn aeabi_fsub(a: f32, b: f32) -> f32 {
a - b
}
// floatsidf
pub fn aeabi_i2d(x: i32) -> f64 {
x as f64
}
// floatsisf
pub fn aeabi_i2f(x: i32) -> f32 {
x as f32
}
pub fn aeabi_idiv(a: i32, b: i32) -> i32 {
a.wrapping_div(b)
}
pub fn aeabi_idivmod(a: i32, b: i32) -> i32 {
a % b
}
// floatdidf
pub fn aeabi_l2d(x: i64) -> f64 {
x as f64
}
// floatdisf
pub fn aeabi_l2f(x: i64) -> f32 {
x as f32
}
// divdi3
pub fn aeabi_ldivmod(a: i64, b: i64) -> i64 {
a / b
}
// muldi3
pub fn aeabi_lmul(a: i64, b: i64) -> i64 {
a.wrapping_mul(b)
}
// floatunsidf
pub fn aeabi_ui2d(x: u32) -> f64 {
x as f64
}
// floatunsisf
pub fn aeabi_ui2f(x: u32) -> f32 {
x as f32
}
pub fn aeabi_uidiv(a: u32, b: u32) -> u32 {
a / b
}
pub fn aeabi_uidivmod(a: u32, b: u32) -> u32 {
a % b
}
// floatundidf
pub fn aeabi_ul2d(x: u64) -> f64 {
x as f64
}
// floatundisf
pub fn aeabi_ul2f(x: u64) -> f32 {
x as f32
}
// udivdi3
pub fn aeabi_uldivmod(a: u64, b: u64) -> u64 {
a * b
}
pub fn moddi3(a: i64, b: i64) -> i64 {
a % b
}
pub fn mulodi4(a: i64, b: i64) -> i64 {
a * b
}
pub fn powidf2(a: f64, b: i32) -> f64 {
a.powi(b)
}
pub fn powisf2(a: f32, b: i32) -> f32 {
a.powi(b)
}
pub fn umoddi3(a: u64, b: u64) -> u64 {
a % b
}
pub fn muloti4(a: u128, b: u128) -> Option<u128> {
a.checked_mul(b)
}
pub fn multi3(a: u128, b: u128) -> u128 {
a.wrapping_mul(b)
}
pub fn ashlti3(a: u128, b: usize) -> u128 {
a >> b
}
pub fn ashrti3(a: u128, b: usize) -> u128 {
a << b
}
pub fn lshrti3(a: i128, b: usize) -> i128 {
a >> b
}
pub fn udivti3(a: u128, b: u128) -> u128 {
a / b
}
pub fn umodti3(a: u128, b: u128) -> u128 {
a % b
}
pub fn divti3(a: i128, b: i128) -> i128 {
a / b
}
pub fn modti3(a: i128, b: i128) -> i128 {
a % b
}
}
fn run() {
use intrinsics::*;
// A copy of "test::black_box". Used to prevent LLVM from optimizing away the intrinsics during LTO
fn bb<T>(dummy: T) -> T {
unsafe { asm!("" : : "r"(&dummy)) }
dummy
}
bb(aeabi_d2f(bb(2.)));
bb(aeabi_d2i(bb(2.)));
bb(aeabi_d2l(bb(2.)));
bb(aeabi_d2uiz(bb(2.)));
bb(aeabi_d2ulz(bb(2.)));
bb(aeabi_dadd(bb(2.), bb(3.)));
bb(aeabi_dcmpeq(bb(2.), bb(3.)));
bb(aeabi_dcmpgt(bb(2.), bb(3.)));
bb(aeabi_dcmplt(bb(2.), bb(3.)));
bb(aeabi_ddiv(bb(2.), bb(3.)));
bb(aeabi_dmul(bb(2.), bb(3.)));
bb(aeabi_dsub(bb(2.), bb(3.)));
bb(aeabi_f2d(bb(2.)));
bb(aeabi_f2iz(bb(2.)));
bb(aeabi_f2lz(bb(2.)));
bb(aeabi_f2uiz(bb(2.)));
bb(aeabi_f2ulz(bb(2.)));
bb(aeabi_fadd(bb(2.), bb(3.)));
bb(aeabi_fcmpeq(bb(2.), bb(3.)));
bb(aeabi_fcmpgt(bb(2.), bb(3.)));
bb(aeabi_fcmplt(bb(2.), bb(3.)));
bb(aeabi_fdiv(bb(2.), bb(3.)));
bb(aeabi_fmul(bb(2.), bb(3.)));
bb(aeabi_fsub(bb(2.), bb(3.)));
bb(aeabi_i2d(bb(2)));
bb(aeabi_i2f(bb(2)));
bb(aeabi_idiv(bb(2), bb(3)));
bb(aeabi_idivmod(bb(2), bb(3)));
bb(aeabi_l2d(bb(2)));
bb(aeabi_l2f(bb(2)));
bb(aeabi_ldivmod(bb(2), bb(3)));
bb(aeabi_lmul(bb(2), bb(3)));
bb(aeabi_ui2d(bb(2)));
bb(aeabi_ui2f(bb(2)));
bb(aeabi_uidiv(bb(2), bb(3)));
bb(aeabi_uidivmod(bb(2), bb(3)));
bb(aeabi_ul2d(bb(2)));
bb(aeabi_ul2f(bb(2)));
bb(aeabi_uldivmod(bb(2), bb(3)));
bb(moddi3(bb(2), bb(3)));
bb(mulodi4(bb(2), bb(3)));
bb(powidf2(bb(2.), bb(3)));
bb(powisf2(bb(2.), bb(3)));
bb(umoddi3(bb(2), bb(3)));
bb(muloti4(bb(2), bb(2)));
bb(multi3(bb(2), bb(2)));
bb(ashlti3(bb(2), bb(2)));
bb(ashrti3(bb(2), bb(2)));
bb(lshrti3(bb(2), bb(2)));
bb(udivti3(bb(2), bb(2)));
bb(umodti3(bb(2), bb(2)));
bb(divti3(bb(2), bb(2)));
bb(modti3(bb(2), bb(2)));
something_with_a_dtor(&|| assert_eq!(bb(1), 1));
}
fn something_with_a_dtor(f: &Fn()) {
struct A<'a>(&'a (Fn() + 'a));
impl<'a> Drop for A<'a> {
fn drop(&mut self) {
(self.0)();
}
}
let _a = A(f);
f();
}
#[cfg(not(thumb))]
#[start]
fn main(_: isize, _: *const *const u8) -> isize {
run();
0
}
#[cfg(thumb)]
#[no_mangle]
pub fn _start() -> ! {
run();
loop {}
}
#[cfg(windows)]
#[link(name = "kernel32")]
#[link(name = "msvcrt")]
extern {}
// ARM targets need these symbols
#[no_mangle]
pub fn __aeabi_unwind_cpp_pr0() {}
#[no_mangle]
pub fn __aeabi_unwind_cpp_pr1() {}
#[cfg(not(windows))]
#[allow(non_snake_case)]
#[no_mangle]
pub fn _Unwind_Resume() {}
#[cfg(not(windows))]
#[lang = "eh_personality"]
#[no_mangle]
pub extern "C" fn eh_personality() {}
#[lang = "panic_fmt"]
#[no_mangle]
#[allow(private_no_mangle_fns)]
extern "C" fn panic_fmt() {}
More attempts for intrinsics example
// By compiling this file we check that all the intrinsics we care about continue to be provided by
// the `compiler_builtins` crate regardless of the changes we make to it. If we, by mistake, stop
// compiling a C implementation and forget to implement that intrinsic in Rust, this file will fail
// to link due to the missing intrinsic (symbol).
#![allow(unused_features)]
#![cfg_attr(thumb, no_main)]
#![deny(dead_code)]
#![feature(alloc_system)]
#![feature(asm)]
#![feature(compiler_builtins_lib)]
#![feature(core_float)]
#![feature(lang_items)]
#![feature(start)]
#![feature(global_allocator)]
#![feature(allocator_api)]
#![cfg_attr(windows, feature(panic_unwind))]
#![no_std]
#[cfg(not(thumb))]
#[link(name = "c")]
extern {}
// NOTE cfg(not(thumbv6m)) means that the operation is not supported on ARMv6-M at all. Not even
// compiler-rt provides a C/assembly implementation.
// Every function in this module maps will be lowered to an intrinsic by LLVM, if the platform
// doesn't have native support for the operation used in the function. ARM has a naming convention
// convention for its intrinsics that's different from other architectures; that's why some function
// have an additional comment: the function name is the ARM name for the intrinsic and the comment
// in the non-ARM name for the intrinsic.
mod intrinsics {
// trunccdfsf2
pub fn aeabi_d2f(x: f64) -> f32 {
x as f32
}
// fixdfsi
pub fn aeabi_d2i(x: f64) -> i32 {
x as i32
}
// fixdfdi
#[cfg(not(thumbv6m))]
pub fn aeabi_d2l(x: f64) -> i64 {
x as i64
}
#[cfg(thumbv6m)]
pub fn aeabi_d2l(_: f64) -> i64 {
0
}
// fixunsdfsi
pub fn aeabi_d2uiz(x: f64) -> u32 {
x as u32
}
// fixunsdfdi
#[cfg(not(thumbv6m))]
pub fn aeabi_d2ulz(x: f64) -> u64 {
x as u64
}
#[cfg(thumbv6m)]
pub fn aeabi_d2ulz(_: f64) -> u64 {
0
}
// adddf3
pub fn aeabi_dadd(a: f64, b: f64) -> f64 {
a + b
}
// eqdf2
#[cfg(not(thumbv6m))]
pub fn aeabi_dcmpeq(a: f64, b: f64) -> bool {
a == b
}
#[cfg(thumbv6m)]
pub fn aeabi_dcmpeq(_: f64, _: f64) -> bool {
true
}
// gtdf2
#[cfg(not(thumbv6m))]
pub fn aeabi_dcmpgt(a: f64, b: f64) -> bool {
a > b
}
#[cfg(thumbv6m)]
pub fn aeabi_dcmpgt(_: f64, _: f64) -> bool {
true
}
// ltdf2
#[cfg(not(thumbv6m))]
pub fn aeabi_dcmplt(a: f64, b: f64) -> bool {
a < b
}
#[cfg(thumbv6m)]
pub fn aeabi_dcmplt(_: f64, _: f64) -> bool {
true
}
// divdf3
pub fn aeabi_ddiv(a: f64, b: f64) -> f64 {
a / b
}
// muldf3
pub fn aeabi_dmul(a: f64, b: f64) -> f64 {
a * b
}
// subdf3
pub fn aeabi_dsub(a: f64, b: f64) -> f64 {
a - b
}
// extendsfdf2
pub fn aeabi_f2d(x: f32) -> f64 {
x as f64
}
// fixsfsi
pub fn aeabi_f2iz(x: f32) -> i32 {
x as i32
}
// fixsfdi
#[cfg(not(thumbv6m))]
pub fn aeabi_f2lz(x: f32) -> i64 {
x as i64
}
#[cfg(thumbv6m)]
pub fn aeabi_f2lz(_: f32) -> i64 {
0
}
// fixunssfsi
pub fn aeabi_f2uiz(x: f32) -> u32 {
x as u32
}
// fixunssfdi
#[cfg(not(thumbv6m))]
pub fn aeabi_f2ulz(x: f32) -> u64 {
x as u64
}
#[cfg(thumbv6m)]
pub fn aeabi_f2ulz(_: f32) -> u64 {
0
}
// addsf3
pub fn aeabi_fadd(a: f32, b: f32) -> f32 {
a + b
}
// eqsf2
#[cfg(not(thumbv6m))]
pub fn aeabi_fcmpeq(a: f32, b: f32) -> bool {
a == b
}
#[cfg(thumbv6m)]
pub fn aeabi_fcmpeq(_: f32, _: f32) -> bool {
true
}
// gtsf2
#[cfg(not(thumbv6m))]
pub fn aeabi_fcmpgt(a: f32, b: f32) -> bool {
a > b
}
#[cfg(thumbv6m)]
pub fn aeabi_fcmpgt(_: f32, _: f32) -> bool {
true
}
// ltsf2
#[cfg(not(thumbv6m))]
pub fn aeabi_fcmplt(a: f32, b: f32) -> bool {
a < b
}
#[cfg(thumbv6m)]
pub fn aeabi_fcmplt(_: f32, _: f32) -> bool {
true
}
// divsf3
pub fn aeabi_fdiv(a: f32, b: f32) -> f32 {
a / b
}
// mulsf3
pub fn aeabi_fmul(a: f32, b: f32) -> f32 {
a * b
}
// subsf3
pub fn aeabi_fsub(a: f32, b: f32) -> f32 {
a - b
}
// floatsidf
pub fn aeabi_i2d(x: i32) -> f64 {
x as f64
}
// floatsisf
pub fn aeabi_i2f(x: i32) -> f32 {
x as f32
}
pub fn aeabi_idiv(a: i32, b: i32) -> i32 {
a.wrapping_div(b)
}
pub fn aeabi_idivmod(a: i32, b: i32) -> i32 {
a % b
}
// floatdidf
pub fn aeabi_l2d(x: i64) -> f64 {
x as f64
}
// floatdisf
pub fn aeabi_l2f(x: i64) -> f32 {
x as f32
}
// divdi3
pub fn aeabi_ldivmod(a: i64, b: i64) -> i64 {
a / b
}
// muldi3
pub fn aeabi_lmul(a: i64, b: i64) -> i64 {
a.wrapping_mul(b)
}
// floatunsidf
pub fn aeabi_ui2d(x: u32) -> f64 {
x as f64
}
// floatunsisf
pub fn aeabi_ui2f(x: u32) -> f32 {
x as f32
}
pub fn aeabi_uidiv(a: u32, b: u32) -> u32 {
a / b
}
pub fn aeabi_uidivmod(a: u32, b: u32) -> u32 {
a % b
}
// floatundidf
pub fn aeabi_ul2d(x: u64) -> f64 {
x as f64
}
// floatundisf
pub fn aeabi_ul2f(x: u64) -> f32 {
x as f32
}
// udivdi3
pub fn aeabi_uldivmod(a: u64, b: u64) -> u64 {
a * b
}
pub fn moddi3(a: i64, b: i64) -> i64 {
a % b
}
pub fn mulodi4(a: i64, b: i64) -> i64 {
a * b
}
pub fn umoddi3(a: u64, b: u64) -> u64 {
a % b
}
pub fn muloti4(a: u128, b: u128) -> Option<u128> {
a.checked_mul(b)
}
pub fn multi3(a: u128, b: u128) -> u128 {
a.wrapping_mul(b)
}
pub fn ashlti3(a: u128, b: usize) -> u128 {
a >> b
}
pub fn ashrti3(a: u128, b: usize) -> u128 {
a << b
}
pub fn lshrti3(a: i128, b: usize) -> i128 {
a >> b
}
pub fn udivti3(a: u128, b: u128) -> u128 {
a / b
}
pub fn umodti3(a: u128, b: u128) -> u128 {
a % b
}
pub fn divti3(a: i128, b: i128) -> i128 {
a / b
}
pub fn modti3(a: i128, b: i128) -> i128 {
a % b
}
}
fn run() {
use intrinsics::*;
// A copy of "test::black_box". Used to prevent LLVM from optimizing away the intrinsics during LTO
fn bb<T>(dummy: T) -> T {
unsafe { asm!("" : : "r"(&dummy)) }
dummy
}
bb(aeabi_d2f(bb(2.)));
bb(aeabi_d2i(bb(2.)));
bb(aeabi_d2l(bb(2.)));
bb(aeabi_d2uiz(bb(2.)));
bb(aeabi_d2ulz(bb(2.)));
bb(aeabi_dadd(bb(2.), bb(3.)));
bb(aeabi_dcmpeq(bb(2.), bb(3.)));
bb(aeabi_dcmpgt(bb(2.), bb(3.)));
bb(aeabi_dcmplt(bb(2.), bb(3.)));
bb(aeabi_ddiv(bb(2.), bb(3.)));
bb(aeabi_dmul(bb(2.), bb(3.)));
bb(aeabi_dsub(bb(2.), bb(3.)));
bb(aeabi_f2d(bb(2.)));
bb(aeabi_f2iz(bb(2.)));
bb(aeabi_f2lz(bb(2.)));
bb(aeabi_f2uiz(bb(2.)));
bb(aeabi_f2ulz(bb(2.)));
bb(aeabi_fadd(bb(2.), bb(3.)));
bb(aeabi_fcmpeq(bb(2.), bb(3.)));
bb(aeabi_fcmpgt(bb(2.), bb(3.)));
bb(aeabi_fcmplt(bb(2.), bb(3.)));
bb(aeabi_fdiv(bb(2.), bb(3.)));
bb(aeabi_fmul(bb(2.), bb(3.)));
bb(aeabi_fsub(bb(2.), bb(3.)));
bb(aeabi_i2d(bb(2)));
bb(aeabi_i2f(bb(2)));
bb(aeabi_idiv(bb(2), bb(3)));
bb(aeabi_idivmod(bb(2), bb(3)));
bb(aeabi_l2d(bb(2)));
bb(aeabi_l2f(bb(2)));
bb(aeabi_ldivmod(bb(2), bb(3)));
bb(aeabi_lmul(bb(2), bb(3)));
bb(aeabi_ui2d(bb(2)));
bb(aeabi_ui2f(bb(2)));
bb(aeabi_uidiv(bb(2), bb(3)));
bb(aeabi_uidivmod(bb(2), bb(3)));
bb(aeabi_ul2d(bb(2)));
bb(aeabi_ul2f(bb(2)));
bb(aeabi_uldivmod(bb(2), bb(3)));
bb(moddi3(bb(2), bb(3)));
bb(mulodi4(bb(2), bb(3)));
bb(umoddi3(bb(2), bb(3)));
bb(muloti4(bb(2), bb(2)));
bb(multi3(bb(2), bb(2)));
bb(ashlti3(bb(2), bb(2)));
bb(ashrti3(bb(2), bb(2)));
bb(lshrti3(bb(2), bb(2)));
bb(udivti3(bb(2), bb(2)));
bb(umodti3(bb(2), bb(2)));
bb(divti3(bb(2), bb(2)));
bb(modti3(bb(2), bb(2)));
something_with_a_dtor(&|| assert_eq!(bb(1), 1));
}
fn something_with_a_dtor(f: &Fn()) {
struct A<'a>(&'a (Fn() + 'a));
impl<'a> Drop for A<'a> {
fn drop(&mut self) {
(self.0)();
}
}
let _a = A(f);
f();
}
#[cfg(not(thumb))]
#[start]
fn main(_: isize, _: *const *const u8) -> isize {
run();
0
}
#[cfg(thumb)]
#[no_mangle]
pub fn _start() -> ! {
run();
loop {}
}
#[cfg(windows)]
#[link(name = "kernel32")]
#[link(name = "msvcrt")]
extern {}
// ARM targets need these symbols
#[no_mangle]
pub fn __aeabi_unwind_cpp_pr0() {}
#[no_mangle]
pub fn __aeabi_unwind_cpp_pr1() {}
#[cfg(not(windows))]
#[allow(non_snake_case)]
#[no_mangle]
pub fn _Unwind_Resume() {}
#[cfg(not(windows))]
#[lang = "eh_personality"]
#[no_mangle]
pub extern "C" fn eh_personality() {}
#[lang = "panic_fmt"]
#[no_mangle]
#[allow(private_no_mangle_fns)]
extern "C" fn panic_fmt() {}
|
use syntect::easy::HighlightLines;
use syntect::parsing::SyntaxSet;
use syntect::highlighting::{ThemeSet,Style};
use syntect::util::{as_latex_escaped,LinesWithEndings};
fn main() {
// Load these once at the start of your program
let ps = SyntaxSet::load_defaults_newlines();
let ts = ThemeSet::load_defaults();
let syntax = ps.find_syntax_by_extension("rs").unwrap();
let s = "pub struct Wow { hi: u64 }\nfn blah() -> u64 {}\n";
let mut h = HighlightLines::new(syntax, &ts.themes["InspiredGitHub"]);
for line in LinesWithEndings::from(s) { // LinesWithEndings enables use of newlines mode
let ranges: Vec<(Style, &str)> = h.highlight(line, &ps);
let escaped = as_latex_escaped(&ranges[..]);
println!("\n{:?}", line);
println!("\n{}", escaped);
}
}
remove stray whitespace
use syntect::easy::HighlightLines;
use syntect::parsing::SyntaxSet;
use syntect::highlighting::{ThemeSet,Style};
use syntect::util::{as_latex_escaped,LinesWithEndings};
fn main() {
// Load these once at the start of your program
let ps = SyntaxSet::load_defaults_newlines();
let ts = ThemeSet::load_defaults();
let syntax = ps.find_syntax_by_extension("rs").unwrap();
let s = "pub struct Wow { hi: u64 }\nfn blah() -> u64 {}\n";
let mut h = HighlightLines::new(syntax, &ts.themes["InspiredGitHub"]);
for line in LinesWithEndings::from(s) { // LinesWithEndings enables use of newlines mode
let ranges: Vec<(Style, &str)> = h.highlight(line, &ps);
let escaped = as_latex_escaped(&ranges[..]);
println!("\n{:?}", line);
println!("\n{}", escaped);
}
}
|
//! JIT Compiler to generate code fragments in runtime.
extern crate libc;
extern crate llvm_sys;
#[macro_use]
mod macros;
mod block;
mod buffer;
mod builder;
mod util;
mod types;
mod value;
use std::mem;
use std::ptr;
use llvm_sys::core;
use llvm_sys::prelude::{
LLVMContextRef,
LLVMModuleRef
};
use llvm_sys::bit_reader::LLVMParseBitcodeInContext;
use llvm_sys::execution_engine::{
LLVMExecutionEngineRef,
LLVMLinkInMCJIT,
LLVMMCJITCompilerOptions,
LLVMCreateMCJITCompilerForModule
};
use llvm_sys::linker;
use llvm_sys::target::{
LLVM_InitializeNativeTarget,
LLVM_InitializeNativeAsmPrinter
};
use llvm_sys::target_machine::LLVMCodeModel;
use libc::{c_char, c_uint};
use buffer::MemoryBuffer;
use builder::Builder;
use value::{GlobalValue, Value, ValueIter};
use types::Ty;
use util::chars;
pub const JIT_OPT_LVEL: usize = 2;
#[repr(C)]
#[derive(Copy, Clone)]
pub enum AddressSpace
{
Generic = 0,
Global = 1,
Shared = 3,
Const = 4,
Local = 5,
}
fn new_module(ctx: LLVMContextRef, name: &str) -> LLVMModuleRef
{
let c_name = util::chars::from_str(name);
unsafe {
core::LLVMModuleCreateWithNameInContext(c_name, ctx)
}
}
fn new_module_from_bc(ctx: LLVMContextRef, path: &str) -> Result<LLVMModuleRef, String>
{
unsafe {
let mut out: LLVMModuleRef = mem::uninitialized();
let mut err: *mut c_char = mem::uninitialized();
let buf = try!(MemoryBuffer::from_file(path));
let ret = LLVMParseBitcodeInContext(ctx,
buf.as_ptr(),
&mut out,
&mut err);
llvm_ret!(ret, out, err)
}
}
fn new_jit_ee(m: LLVMModuleRef, opt_lv: usize) -> Result<LLVMExecutionEngineRef, String>
{
unsafe {
let mut ee : LLVMExecutionEngineRef = mem::uninitialized();
let mut err: *mut c_char = mem::uninitialized();
LLVMLinkInMCJIT();
expect_noerr!(LLVM_InitializeNativeTarget(), "failed to initialize native target");
expect_noerr!(LLVM_InitializeNativeAsmPrinter(), "failed to initialize native asm printer");
let mut opts = new_mcjit_compiler_options(opt_lv);
let opts_size = mem::size_of::<LLVMMCJITCompilerOptions>();
let ret = LLVMCreateMCJITCompilerForModule(&mut ee,
m,
&mut opts,
opts_size as u64,
&mut err);
llvm_ret!(ret, ee, err)
}
}
fn new_mcjit_compiler_options(opt_lv: usize) -> LLVMMCJITCompilerOptions
{
LLVMMCJITCompilerOptions {
OptLevel: opt_lv as c_uint,
CodeModel: LLVMCodeModel::LLVMCodeModelJITDefault,
NoFramePointerElim: 0,
EnableFastISel: 1,
MCJMM: ptr::null_mut()
}
}
pub struct JitCompiler
{
ctx : LLVMContextRef,
module : LLVMModuleRef,
ee : LLVMExecutionEngineRef,
//builder: Builder // internal use
}
impl JitCompiler {
pub fn new(module_name: &str) -> Result<JitCompiler, String>
{
let ctx = unsafe { core::LLVMContextCreate() };
let module = new_module(ctx, module_name);
JitCompiler::new_internal(ctx, module)
}
pub fn new_from_bc(bitcode_path: &str) -> Result<JitCompiler, String>
{
let ctx = unsafe { core::LLVMContextCreate() };
let module = try!(new_module_from_bc(ctx, bitcode_path));
JitCompiler::new_internal(ctx, module)
}
fn new_internal(ctx: LLVMContextRef, module: LLVMModuleRef) -> Result<JitCompiler, String>
{
let ee = try!(new_jit_ee(module, JIT_OPT_LVEL));
//let builder = Builder(unsafe { core::LLVMCreateBuilderInContext(ctx) });
Ok(JitCompiler {
ctx : ctx,
module : module,
ee : ee,
//builder: builder
})
}
pub fn context(&self) -> LLVMContextRef { self.ctx }
pub fn module(&self) -> LLVMModuleRef { self.module }
pub fn engine(&self) -> LLVMExecutionEngineRef { self.ee }
/// Add an external global to the module with the given type and name.
pub fn add_global(&self, name: &str, ty: &Ty) -> GlobalValue
{
let c_name = chars::from_str(name);
GlobalValue(
unsafe {
core::LLVMAddGlobal(self.module, ty.0, c_name)
}
)
}
/// Add a global in the given address space to the module with the given type and name.
pub fn add_global_in_addr_space(&self,
name: &str,
ty: &Ty,
sp: AddressSpace) -> GlobalValue
{
let c_name = chars::from_str(name);
GlobalValue(
unsafe {
core::LLVMAddGlobalInAddressSpace(self.module, ty.0, c_name, sp as c_uint)
}
)
}
/// Add a constant global to the module with the given type, name and value.
pub fn add_global_constant(&self, name: &str, val: &Value) -> GlobalValue
{
let c_name = chars::from_str(name);
GlobalValue(
unsafe {
let global = core::LLVMAddGlobal(self.module, val.ty().0, c_name);
core::LLVMSetInitializer (global, val.0);
global
}
)
}
/// Get the global with the name given, or `None` if no global with that name exists.
pub fn get_global(&self, name: &str) -> Option<GlobalValue>
{
let c_name = chars::from_str(name);
unsafe {
let ptr = core::LLVMGetNamedGlobal(self.module, c_name);
if ptr.is_null() {
None
} else {
Some(GlobalValue(ptr))
}
}
}
/// Get an iterator of global values
pub fn global_values(&self) -> ValueIter<GlobalValue>
{
ValueIter::new(
unsafe { core::LLVMGetFirstGlobal(self.module) },
core::LLVMGetNextGlobal
)
}
/// Link a module into this module, returning an error string if an error occurs.
///
/// This *does not* destroy the source module.
pub fn link(&self, module: LLVMModuleRef) -> Result<(), String>
{
unsafe {
let mut error = mem::uninitialized();
let ret = linker::LLVMLinkModules(self.module, module,
linker::LLVMLinkerMode::LLVMLinkerPreserveSource,
&mut error);
llvm_ret!(ret, (), error)
}
}
/// Link a module into this module, returning an error string if an error occurs.
///
/// This *does* destroy the source module.
pub fn link_destroy(&self, module: LLVMModuleRef) -> Result<(), String>
{
unsafe {
let mut error = mem::uninitialized();
let ret = linker::LLVMLinkModules(self.module, module,
linker::LLVMLinkerMode::LLVMLinkerDestroySource,
&mut error);
llvm_ret!(ret, (), error)
}
}
pub fn new_builder(&self) -> Builder
{
Builder(unsafe { core::LLVMCreateBuilderInContext(self.ctx) })
}
}
impl Drop for JitCompiler {
fn drop(&mut self) {
unsafe {
core::LLVMDisposeModule(self.module);
core::LLVMContextDispose(self.ctx);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_ctor() {
let jit = JitCompiler::new("target/test-ir/test-module.bc").ok().unwrap();
}
}
Add optimize() to JitCompiler.
This function lets Module to pass the LLVMPassManager.
//! JIT Compiler to generate code fragments in runtime.
extern crate libc;
extern crate llvm_sys;
#[macro_use]
mod macros;
mod block;
mod buffer;
mod builder;
mod util;
mod types;
mod value;
use std::mem;
use std::ptr;
use llvm_sys::core;
use llvm_sys::prelude::{
LLVMContextRef,
LLVMModuleRef
};
use llvm_sys::bit_reader::LLVMParseBitcodeInContext;
use llvm_sys::execution_engine::{
LLVMExecutionEngineRef,
LLVMLinkInMCJIT,
LLVMMCJITCompilerOptions,
LLVMCreateMCJITCompilerForModule
};
use llvm_sys::linker;
use llvm_sys::transforms::pass_manager_builder as pass;
use llvm_sys::target::{
LLVM_InitializeNativeTarget,
LLVM_InitializeNativeAsmPrinter
};
use llvm_sys::target_machine::LLVMCodeModel;
use libc::{c_char, c_uint};
use buffer::MemoryBuffer;
use builder::Builder;
use value::{GlobalValue, Value, ValueIter};
use types::Ty;
use util::chars;
pub const JIT_OPT_LVEL: usize = 2;
#[repr(C)]
#[derive(Copy, Clone)]
pub enum AddressSpace
{
Generic = 0,
Global = 1,
Shared = 3,
Const = 4,
Local = 5,
}
fn new_module(ctx: LLVMContextRef, name: &str) -> LLVMModuleRef
{
let c_name = util::chars::from_str(name);
unsafe {
core::LLVMModuleCreateWithNameInContext(c_name, ctx)
}
}
fn new_module_from_bc(ctx: LLVMContextRef, path: &str) -> Result<LLVMModuleRef, String>
{
unsafe {
let mut out: LLVMModuleRef = mem::uninitialized();
let mut err: *mut c_char = mem::uninitialized();
let buf = try!(MemoryBuffer::from_file(path));
let ret = LLVMParseBitcodeInContext(ctx,
buf.as_ptr(),
&mut out,
&mut err);
llvm_ret!(ret, out, err)
}
}
fn new_jit_ee(m: LLVMModuleRef, opt_lv: usize) -> Result<LLVMExecutionEngineRef, String>
{
unsafe {
let mut ee : LLVMExecutionEngineRef = mem::uninitialized();
let mut err: *mut c_char = mem::uninitialized();
LLVMLinkInMCJIT();
expect_noerr!(LLVM_InitializeNativeTarget(), "failed to initialize native target");
expect_noerr!(LLVM_InitializeNativeAsmPrinter(), "failed to initialize native asm printer");
let mut opts = new_mcjit_compiler_options(opt_lv);
let opts_size = mem::size_of::<LLVMMCJITCompilerOptions>();
let ret = LLVMCreateMCJITCompilerForModule(&mut ee,
m,
&mut opts,
opts_size as u64,
&mut err);
llvm_ret!(ret, ee, err)
}
}
fn new_mcjit_compiler_options(opt_lv: usize) -> LLVMMCJITCompilerOptions
{
LLVMMCJITCompilerOptions {
OptLevel: opt_lv as c_uint,
CodeModel: LLVMCodeModel::LLVMCodeModelJITDefault,
NoFramePointerElim: 0,
EnableFastISel: 1,
MCJMM: ptr::null_mut()
}
}
pub struct JitCompiler
{
ctx : LLVMContextRef,
module : LLVMModuleRef,
ee : LLVMExecutionEngineRef,
//builder: Builder // internal use
}
impl JitCompiler {
pub fn new(module_name: &str) -> Result<JitCompiler, String>
{
let ctx = unsafe { core::LLVMContextCreate() };
let module = new_module(ctx, module_name);
JitCompiler::new_internal(ctx, module)
}
pub fn new_from_bc(bitcode_path: &str) -> Result<JitCompiler, String>
{
let ctx = unsafe { core::LLVMContextCreate() };
let module = try!(new_module_from_bc(ctx, bitcode_path));
JitCompiler::new_internal(ctx, module)
}
fn new_internal(ctx: LLVMContextRef, module: LLVMModuleRef) -> Result<JitCompiler, String>
{
let ee = try!(new_jit_ee(module, JIT_OPT_LVEL));
//let builder = Builder(unsafe { core::LLVMCreateBuilderInContext(ctx) });
Ok(JitCompiler {
ctx : ctx,
module : module,
ee : ee,
//builder: builder
})
}
pub fn context(&self) -> LLVMContextRef { self.ctx }
pub fn module(&self) -> LLVMModuleRef { self.module }
pub fn engine(&self) -> LLVMExecutionEngineRef { self.ee }
/// Add an external global to the module with the given type and name.
pub fn add_global(&self, name: &str, ty: &Ty) -> GlobalValue
{
let c_name = chars::from_str(name);
GlobalValue(
unsafe {
core::LLVMAddGlobal(self.module, ty.0, c_name)
}
)
}
/// Add a global in the given address space to the module with the given type and name.
pub fn add_global_in_addr_space(&self,
name: &str,
ty: &Ty,
sp: AddressSpace) -> GlobalValue
{
let c_name = chars::from_str(name);
GlobalValue(
unsafe {
core::LLVMAddGlobalInAddressSpace(self.module, ty.0, c_name, sp as c_uint)
}
)
}
/// Add a constant global to the module with the given type, name and value.
pub fn add_global_constant(&self, name: &str, val: &Value) -> GlobalValue
{
let c_name = chars::from_str(name);
GlobalValue(
unsafe {
let global = core::LLVMAddGlobal(self.module, val.ty().0, c_name);
core::LLVMSetInitializer (global, val.0);
global
}
)
}
/// Get the global with the name given, or `None` if no global with that name exists.
pub fn get_global(&self, name: &str) -> Option<GlobalValue>
{
let c_name = chars::from_str(name);
unsafe {
let ptr = core::LLVMGetNamedGlobal(self.module, c_name);
if ptr.is_null() {
None
} else {
Some(GlobalValue(ptr))
}
}
}
/// Get an iterator of global values
pub fn global_values(&self) -> ValueIter<GlobalValue>
{
ValueIter::new(
unsafe { core::LLVMGetFirstGlobal(self.module) },
core::LLVMGetNextGlobal
)
}
/// Link a module into this module, returning an error string if an error occurs.
///
/// This *does not* destroy the source module.
pub fn link(&self, module: LLVMModuleRef) -> Result<(), String>
{
unsafe {
let mut error = mem::uninitialized();
let ret = linker::LLVMLinkModules(self.module, module,
linker::LLVMLinkerMode::LLVMLinkerPreserveSource,
&mut error);
llvm_ret!(ret, (), error)
}
}
/// Link a module into this module, returning an error string if an error occurs.
///
/// This *does* destroy the source module.
pub fn link_destroy(&self, module: LLVMModuleRef) -> Result<(), String>
{
unsafe {
let mut error = mem::uninitialized();
let ret = linker::LLVMLinkModules(self.module, module,
linker::LLVMLinkerMode::LLVMLinkerDestroySource,
&mut error);
llvm_ret!(ret, (), error)
}
}
/// Optimize this module with the given optimization level and size level.
///
/// This runs passes depending on the levels given.
pub fn optimize(&self, opt_level: usize, size_level: usize) {
unsafe {
let builder = pass::LLVMPassManagerBuilderCreate();
pass::LLVMPassManagerBuilderSetOptLevel(builder, opt_level as c_uint);
pass::LLVMPassManagerBuilderSetSizeLevel(builder, size_level as c_uint);
let pass_manager = core::LLVMCreatePassManager();
pass::LLVMPassManagerBuilderPopulateModulePassManager(builder, pass_manager);
pass::LLVMPassManagerBuilderDispose(builder);
core::LLVMRunPassManager(pass_manager, self.module);
}
}
pub fn new_builder(&self) -> Builder
{
Builder(unsafe { core::LLVMCreateBuilderInContext(self.ctx) })
}
}
impl Drop for JitCompiler {
fn drop(&mut self) {
unsafe {
core::LLVMDisposeModule(self.module);
core::LLVMContextDispose(self.ctx);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_ctor() {
let jit = JitCompiler::new("target/test-ir/test-module.bc").ok().unwrap();
}
} |
#![doc = "Learning data-related types."]
use std::sync::RwLock ;
use hashconsing::{ HConser, HConsed, HashConsign } ;
use common::* ;
use instance::Instance ;
use instance::info::* ;
use learning::ice::CData ;
/// Hash consed samples.
pub type HSample = HConsed< Args > ;
/// Vector of samples.
pub type HSamples = Vec<HSample> ;
/// A sample is some values for a predicate.
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub struct Sample {
pub pred: PrdIdx,
pub args: HSample,
}
impl Sample {
/// Constructor.
pub fn mk(pred: PrdIdx, args: HSample) -> Self {
Sample { pred, args }
}
/// Tests if a sample is about some predicate and its arguments belong
/// to a set.
pub fn is_in(& self, pred: PrdIdx, samples: & HConSet<Args>) -> bool {
self.pred == pred && samples.contains(& self.args)
}
}
impl<'a> PebcakFmt<'a> for Sample {
type Info = & 'a PrdMap<PrdInfo> ;
fn pebcak_err(& self) -> ErrorKind {
"during sample pebcak formatting".into()
}
fn pebcak_io_fmt<W: Write>(
& self, w: & mut W, map: & 'a PrdMap<PrdInfo>
) -> IoRes<()> {
write!(w, "({}", map[self.pred].name) ? ;
for arg in & * self.args {
write!(w, " {}", arg) ?
}
write!(w, ")")
}
}
impl_fmt!{
Sample(self, fmt) {
write!(fmt, "p_{} {}", self.pred, self.args)
}
}
wrap_usize!{
#[doc = "Constraint index."]
CstrIdx
#[doc = "Constraint set."]
set: CstrSet
#[doc = "Constraint total map."]
map: CstrMap with iter: CstrMapIter
}
/// Constraints using hashconsed samples.
///
/// A constraint is a tautology iff `lhs.is_empty()` and `rhs.is_none()`.
///
/// # Invariants
///
/// - `lhs.is_empty() => rhs.is_none()`
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub struct Constraint {
pub lhs: Vec< Sample >,
pub rhs: Option< Sample >,
}
impl Constraint {
/// Transforms a constraint in a tautology. Returns all the samples from the
/// constraint.
pub fn tautologize(& mut self) -> Vec<Sample> {
let mut res = Vec::with_capacity(0) ;
::std::mem::swap(& mut res, & mut self.lhs) ;
let mut rhs = None ;
::std::mem::swap(& mut rhs, & mut self.rhs) ;
if let Some(sample) = rhs {
res.push(sample)
}
res
}
/// Checks whether the lhs of the constraint is empty.
pub fn is_tautology(& self) -> bool {
if self.lhs.is_empty() {
debug_assert!( self.rhs.is_none() ) ;
true
} else {
false
}
}
}
impl<'a> PebcakFmt<'a> for Constraint {
type Info = & 'a PrdMap<PrdInfo> ;
fn pebcak_err(& self) -> ErrorKind {
"during constraint pebcak formatting".into()
}
fn pebcak_io_fmt<W: Write>(
& self, w: & mut W, map: & 'a PrdMap<PrdInfo>
) -> IoRes<()> {
for lhs in & self.lhs {
lhs.pebcak_io_fmt(w, map) ? ;
write!(w, " ") ?
}
write!(w, "=> ") ? ;
if let Some(ref rhs) = self.rhs {
rhs.pebcak_io_fmt(w, map)
} else {
write!(w, "false")
}
}
}
impl_fmt!{
Constraint(self, fmt) {
for lhs in & self.lhs {
write!(fmt, "{} ", lhs) ?
}
write!(fmt, "=> ") ? ;
if let Some(ref rhs) = self.rhs {
write!(fmt, "{}", rhs)
} else {
write!(fmt, "false")
}
}
}
/// Structure storing unprojected learning data.
///
/// Used by the teacher to simplify constraints as it hads samples.
///
/// Also used by the ice learner to propagate the choices it makes.
pub struct NewData {
/// Instance, only used for printing.
instance: Arc<Instance>,
/// Positive examples.
pos: PrdMap< HConSet<Args> >,
/// Negative examples.
neg: PrdMap< HConSet<Args> >,
/// Constraints.
constraints: CstrMap<Constraint>,
/// Map from samples to contstraints.
map: PrdMap< HConMap<Args, CstrSet> >,
}
impl NewData {
/// Constructor.
pub fn mk(instance: Arc<Instance>) -> Self {
let pred_count = instance.preds().len() ;
let (
mut map, mut pos, mut neg
) = (
PrdMap::with_capacity(pred_count),
PrdMap::with_capacity(pred_count),
PrdMap::with_capacity(pred_count)
) ;
for _ in instance.preds() {
map.push( HConMap::with_capacity(103) ) ;
pos.push( HConSet::with_capacity(103) ) ;
neg.push( HConSet::with_capacity(103) ) ;
}
let constraints = CstrMap::with_capacity(103) ;
NewData { instance, pos, neg, constraints, map }
}
/// The projected data for some predicate.
pub fn data_of(& self, pred: PrdIdx) -> CData {
let unc_set = & self.map[pred] ;
let pos_set = & self.pos[pred] ;
let neg_set = & self.neg[pred] ;
let (mut pos, mut neg, mut unc) = (
Vec::with_capacity( pos_set.len() ),
Vec::with_capacity( neg_set.len() ),
Vec::with_capacity( unc_set.len() )
) ;
for sample in pos_set.iter() {
pos.push( sample.clone() )
}
for sample in neg_set.iter() {
neg.push( sample.clone() )
}
for (sample, set) in unc_set.iter() {
if ! set.is_empty() {
unc.push( sample.clone() )
}
}
CData { pos, neg, unc }
}
/// Tautologizes a constraint and removes the links with its samples in
/// the map.
pub fn tautologize(& mut self, constraint: CstrIdx) -> Vec<Sample> {
let samples = self.constraints[constraint].tautologize() ;
for & Sample { pred, ref args } in & samples {
let _ = self.map[pred].get_mut(& args).map(
|set| set.remove(& constraint)
) ;
}
samples
}
/// Adds some positive examples.
///
/// Simplifies constraints containing these samples.
///
/// `modded_constraints` will be updated as follows: a constraint is
///
/// - added to the set when it is modified (but not tautologized)
/// - removed from the set when it is tautologized
pub fn add_pos(
& mut self, mut samples: PrdHMap< HConSet<Args> >,
modded_constraints: & mut CstrSet
) -> Res<()> {
// Stack of things to propagate.
let mut to_propagate = Vec::with_capacity( samples.len() ) ;
// The stack is updated here and at the end of the `'propagate` loop below.
// Be careful when using `continue 'propagate` as this will skip the stack
// update.
for (pred, set) in samples.drain() {
to_propagate.push( (pred, set) )
}
'propagate: while let Some(
(curr_pred, curr_samples)
) = to_propagate.pop() {
if curr_samples.is_empty() { continue }
println!(
"propagating {} samples for predicate {}",
curr_samples.len(), self.instance[curr_pred]
) ;
// Get the constraints mentioning the positive samples.
let mut constraints ;
{
let mut tmp = None ;
let mut iter = curr_samples.iter() ;
// Find the first sample that appears in some constraints.
'find_first: while let Some(sample) = iter.next() {
if let Some(cstr_set) = self.map[curr_pred].remove(sample) {
if ! cstr_set.is_empty() {
println!(
" - sample {} appears in {} constraints",
sample, cstr_set.len()
) ;
tmp = Some(cstr_set) ;
break 'find_first
}
}
println!(" - sample {} does not appear in any constraint", sample)
}
if let Some(set) = tmp {
constraints = set
} else { // None of the samples appear in any constraint.
continue 'propagate
}
// Iterate over the remaining samples and add to the constraints to
// check.
'other_samples: while let Some(sample) = iter.next() {
if let Some(cstr_set) = self.map[curr_pred].remove(sample) {
if ! cstr_set.is_empty() {
use std::iter::Extend ;
println!(
" - sample {} appears in {} constraints",
sample, cstr_set.len()
) ;
constraints.extend( cstr_set ) ;
continue 'other_samples
}
}
println!(" - sample {} does not appear in any constraint", sample)
}
}
println!(" working on {} constraints", constraints.len()) ;
'update_constraints: for c_idx in constraints {
println!(
" looking at {}", self.constraints[c_idx].to_string_info(
self.instance.preds()
) ?
) ;
// Is `rhs` true?
if self.constraints[c_idx].rhs.as_ref().map(
| sample | sample.is_in(curr_pred, & curr_samples)
).unwrap_or(false) {
println!(" -> rhs is true, tautologizing") ;
// Tautologize and break links.
let _ = self.tautologize(c_idx) ;
let _ = modded_constraints.remove(& c_idx) ;
// Move on.
continue 'update_constraints
}
// `lhs` simplification.
let mut count = 0 ;
while count < self.constraints[c_idx].lhs.len() {
if self.constraints[c_idx].lhs[count].is_in(
curr_pred, & curr_samples
) {
let _ = self.constraints[c_idx].lhs.swap_remove(count) ;
// No need to break links here as we've already removed all links
// from `curr_samples` (to get the constraints).
// DO NOT increment `count` here as we just `swap_remove`d. `count`
// is already the index of an unvisited element.
()
} else {
// Unknown, moving on to next sample.
count += 1
}
}
// Is `lhs` empty?
if self.constraints[c_idx].lhs.is_empty() {
println!(" -> lhs is empty, remembering for later") ;
// Then `rhs` has to be true.
let mut maybe_rhs = self.tautologize(c_idx) ;
let _ = modded_constraints.remove(& c_idx) ;
if let Some( Sample { pred, args } ) = maybe_rhs.pop() {
// `maybe_rhs` can only be empty now, we've removed the whole
// `lhs`.
debug_assert!( maybe_rhs.is_empty() ) ;
// Remember the sample has to be true.
let _ = samples.entry(pred).or_insert_with(
|| HConSet::with_capacity(11)
).insert(args) ;
} else {
// No `rhs`, we have `true => false`, contradiction.
bail!("contradiction detected, inference impossible")
}
} else {
// `lhs` has changed, remember that for unit clause propagation.
let _ = modded_constraints.insert(c_idx) ;
}
}
// Done propagating `curr_args` for `curr_pred`, push new positive
// samples.
for (pred, set) in samples.drain() {
to_propagate.push( (pred, set) )
}
}
Ok(())
}
/// Adds some negative examples.
///
/// Simplifies constraints containing these samples.
///
/// `modded_constraints` will be updated as follows: a constraint is
///
/// - added to the set when it is modified (but not tautologized)
/// - removed from the set when it is tautologized
pub fn add_neg(
& mut self, mut samples: PrdHMap< HConSet<Args> >,
modded_constraints: & mut CstrSet
) -> Res<()> {
// Stack of things to propagate.
let mut to_propagate = Vec::with_capacity( samples.len() ) ;
// The stack is updated here and at the end of the `'propagate` loop below.
// Be careful when using `continue 'propagate` as this will skip the stack
// update.
for (pred, set) in samples.drain() {
to_propagate.push( (pred, set) )
}
'propagate: while let Some(
(curr_pred, curr_samples)
) = to_propagate.pop() {
if curr_samples.is_empty() { continue }
println!(
"propagating {} samples for predicate {}",
curr_samples.len(), self.instance[curr_pred]
) ;
// Get the constraints mentioning the negative samples.
let mut constraints ;
{
let mut tmp = None ;
let mut iter = curr_samples.iter() ;
// Find the first sample that appears in some constraints.
'find_first: while let Some(sample) = iter.next() {
if let Some(cstr_set) = self.map[curr_pred].remove(sample) {
if ! cstr_set.is_empty() {
println!(
" - sample {} appears in {} constraints",
sample, cstr_set.len()
) ;
tmp = Some(cstr_set) ;
break 'find_first
}
}
println!(" - sample {} does not appear in any constraint", sample)
}
if let Some(set) = tmp {
constraints = set
} else { // None of the samples appear in any constraint.
continue 'propagate
}
// Iterate over the remaining samples and add to the constraints to
// check.
'other_samples: while let Some(sample) = iter.next() {
if let Some(cstr_set) = self.map[curr_pred].remove(sample) {
if ! cstr_set.is_empty() {
use std::iter::Extend ;
println!(
" - sample {} appears in {} constraints",
sample, cstr_set.len()
) ;
constraints.extend( cstr_set ) ;
continue 'other_samples
}
}
println!(" - sample {} does not appear in any constraint", sample)
}
}
println!(" working on {} constraints", constraints.len()) ;
'update_constraints: for c_idx in constraints {
println!(
" looking at {}", self.constraints[c_idx].to_string_info(
self.instance.preds()
) ?
) ;
// Is `rhs` false?
if self.constraints[c_idx].rhs.as_ref().map(
| sample | sample.is_in(curr_pred, & curr_samples)
).unwrap_or(false) {
println!(" -> rhs is false, constraint is negative") ;
// Forget rhs.
self.constraints[c_idx].rhs = None
}
// `lhs` inspection.
let mut trivial = false ;
for sample in & self.constraints[c_idx].lhs {
if sample.is_in(curr_pred, & curr_samples) {
// This sample is false, the constraint is trivially true.
trivial = true ;
break
}
}
// Is constraint trivial?
if trivial {
println!(" -> lhs is always false, constraint is trivial") ;
let _ = self.tautologize(c_idx) ;
} else if self.constraints[c_idx].lhs.len() == 1
&& self.constraints[c_idx].rhs.is_none() {
println!(
" -> one sample in lhs of negative constraint, remembering"
) ;
// Constraint is negative and only one sample in lhs, it has to be
// false.
let mut just_one = self.tautologize(c_idx) ;
if let Some( Sample {pred, args } ) = just_one.pop() {
debug_assert!( just_one.is_empty() ) ;
let _ = samples.entry(pred).or_insert_with(
|| HConSet::with_capacity(11)
).insert(args) ;
} else {
unreachable!()
}
} else {
// Constraint has changed, remember that for unit clause propagation.
let _ = modded_constraints.insert(c_idx) ;
}
}
// Done propagating `curr_args` for `curr_pred`, push new negative
// samples.
for (pred, set) in samples.drain() {
to_propagate.push( (pred, set) )
}
}
Ok(())
}
/// Adds a constraint. Propagates positive and negative samples.
pub fn add_cstr(
& self,
lhs: Vec<(PrdIdx, Args)>, rhs: Option< (PrdIdx, Args) >
) -> Res< Option< Either<Constraint, (Sample, bool)> > > {
let mut nu_lhs = Vec::with_capacity( lhs.len() ) ;
'smpl_iter: for (pred, args) in lhs {
let (args, is_new) = self.samples.mk_is_new(args) ;
if ! is_new {
if self.pos[pred].read().map_err(corrupted_err)?.contains(& args) {
// Sample known to be positive, ignore.
continue 'smpl_iter
} else if self.neg[pred].read().map_err(
corrupted_err
)?.contains(& args) {
// Sample known to be negative, constraint is a tautology.
return Ok(None)
}
}
// Neither pos or neg, memorizing.
nu_lhs.push( Sample { pred, args } )
}
let nu_rhs = if let Some( (pred, args) ) = rhs {
let (args, is_new) = self.samples.mk_is_new(args) ;
if ! is_new {
if self.pos[pred].read().map_err(corrupted_err)?.contains(& args) {
// Sample known to be positive, constraint's a tautology.
return Ok(None)
} else if self.neg[pred].read().map_err(
corrupted_err
)?.contains(& args) {
// Sample known to be negative, constraint is a negative one.
None
} else {
Some( Sample { pred, args } )
}
} else {
Some( Sample { pred, args } )
}
} else { None } ;
let cstr_index = self.constraints.read().map_err(
corrupted_err
)?.next_index() ;
// Detect unit cases.
if nu_lhs.is_empty() {
// unit, rhs has to be true.
if let Some( Sample { pred, args } ) = nu_rhs {
return Ok(
Some(Either::Rgt( (self.add_pos(pred, args.get().clone())?, true) ))
)
} else {
bail!("contradiction detected, inference is impossible")
}
} else if nu_lhs.len() == 1 && nu_rhs.is_none() {
// unit, the single lhs has to be false.
let Sample { pred, args } = nu_lhs.pop().unwrap() ;
return Ok(
Some(Either::Rgt( (self.add_neg(pred, args.get().clone())?, false) ))
)
}
// Update the map from samples to constraints. Better to do that now than
// above, since there might be further simplifications possible.
for & Sample { pred, ref args } in & nu_lhs {
let mut map = self.map[pred].write().map_err(corrupted_err)? ;
let entry = map.entry(
args.clone()
) ;
let set = entry.or_insert_with(
|| CstrSet::with_capacity(17)
) ;
let _ = set.insert(cstr_index) ;
}
if let Some( & Sample { pred, ref args } ) = nu_rhs.as_ref() {
let mut map = self.map[pred].write().map_err(corrupted_err)? ;
let entry = map.entry(
args.clone()
) ;
let set = entry.or_insert_with(
|| CstrSet::with_capacity(17)
) ;
let _ = set.insert(cstr_index) ;
}
let cstr = Constraint { lhs: nu_lhs, rhs: nu_rhs } ;
self.constraints.write().map_err(corrupted_err)?.push(
cstr.clone()
) ;
Ok( Some( Either::Lft(cstr) ) )
}
}
/// Structure storing the (unprojected) learning data.
///
/// # TO DO
///
/// - add stats monitoring simplifications and unit clause propagation
pub struct Data {
/// Sample hashconsign.
samples: RwLock< HashConsign<Args> >,
/// Constraints.
pub constraints: RwLock< CstrMap<Constraint> >,
/// Map from samples to constraints.
pub map: PrdMap< RwLock< HConMap<Args, CstrSet> > >,
/// Positive examples.
pub pos: PrdMap< RwLock< HConSet<Args> > >,
/// Negative examples.
pub neg: PrdMap< RwLock< HConSet<Args> > >,
}
impl Data {
/// Constructor.
pub fn mk(instance: & Instance) -> Self {
let mut map = PrdMap::with_capacity( instance.preds().len() ) ;
let mut pos = PrdMap::with_capacity( instance.preds().len() ) ;
let mut neg = PrdMap::with_capacity( instance.preds().len() ) ;
for _ in instance.preds() {
map.push(
RwLock::new( HConMap::with_capacity(103) )
) ;
pos.push(
RwLock::new( HConSet::with_capacity(103) )
) ;
neg.push(
RwLock::new( HConSet::with_capacity(103) )
) ;
}
Data {
samples: RwLock::new( HashConsign::with_capacity(1007) ),
constraints: RwLock::new( CstrMap::with_capacity(703) ),
map, pos, neg
}
}
/// Performs an action on all samples.
pub fn samples_fold<T, F>(& self, init: T, f: F) -> Res<T>
where F: Fn(T, HSample) -> T {
Ok(
self.samples.read().map_err(corrupted_err)?.fold(f, init)
)
}
/// The projected data for some predicate.
pub fn data_of(& self, pred: PrdIdx) -> Res<CData> {
let unc_set = self.map[pred].read().map_err(corrupted_err) ? ;
let pos_set = self.pos[pred].read().map_err(corrupted_err) ? ;
let neg_set = self.neg[pred].read().map_err(corrupted_err) ? ;
let (mut pos, mut neg, mut unc) = (
Vec::with_capacity( pos_set.len() ),
Vec::with_capacity( neg_set.len() ),
Vec::with_capacity( unc_set.len() )
) ;
for sample in pos_set.iter() {
pos.push( sample.clone() )
}
for sample in neg_set.iter() {
neg.push( sample.clone() )
}
for (sample, set) in unc_set.iter() {
if ! set.is_empty() {
unc.push( sample.clone() )
}
}
Ok( CData { pos, neg, unc } )
}
// /// Temporary function adding learning data directly.
// pub fn add_learning_data(& self, data: & LearningData) -> Res<()> {
// for sample in & data.pos {
// self.add_pos( sample.pred, sample.args.clone() ) ?
// }
// for sample in & data.neg {
// self.add_neg( sample.pred, sample.args.clone() ) ?
// }
// for cstr in & data.cstr {
// let lhs = cstr.lhs.iter().map(
// |sample| (sample.pred, sample.args.clone())
// ).collect() ;
// let rhs = cstr.rhs.as_ref().map(
// |sample| (sample.pred, sample.args.clone())
// ) ;
// self.add_cstr(lhs, rhs) ?
// }
// Ok(())
// }
/// Removes the links between the samples in the input constraint and the
/// constraint. Also, tautologizes the constraint.
pub fn unlink(
& self, dead_links: Vec<(PrdIdx, HSample, CstrIdx)>
) -> Res<()> {
for (pred, args, cstr) in dead_links {
let _ = self.map[pred].write().map_err(
corrupted_err
)?.get_mut(& args).map(|set| set.remove(& cstr)) ;
}
Ok(())
}
/// Propagates unit clauses recursively.
pub fn propagate_unit_clauses(
& self
) -> Res<()> {
let (mut pos, mut neg) = (None, None) ;
'fixed_point: loop {
{
for cstr in self.constraints.read().map_err(corrupted_err)?.iter() {
if ! cstr.is_tautology() {
match (cstr.lhs.len(), cstr.rhs.as_ref()) {
(0, Some(rhs)) => {
pos = Some( (rhs.pred, rhs.args.get().clone()) ) ;
break
},
(1, None) => {
neg = Some( (cstr.lhs[0].pred, cstr.lhs[0].args.get().clone()) ) ;
break
},
_ => (),
}
}
}
}
if let Some( (pred, args) ) = pos {
let _ = self.add_pos(pred, args) ? ;
pos = None
} else if let Some( (pred, args) ) = neg {
let _ = self.add_neg(pred, args) ? ;
neg = None
} else {
break 'fixed_point
}
}
Ok(())
}
/// Adds a positive example. Simplifies constraints containing that sample.
pub fn add_pos(
& self, pred: PrdIdx, args: Args
) -> Res<Sample> {
let (args, is_new_sample) = self.samples.mk_is_new(args) ;
let is_new_pos = self.pos[pred].write().map_err(
corrupted_err
)?.insert( args.clone() ) ;
let res = Sample { pred, args: args.clone() } ;
let mut dead_links = vec![] ;
// New positive, but not a new sample. Might appear in some constraints.
if is_new_pos && ! is_new_sample {
let mut to_propagate = vec![ (pred, args) ] ;
'propagate: while let Some(
(curr_pred, curr_args)
) = to_propagate.pop() {
let mut all_constraints = self.map[curr_pred].write().map_err(
corrupted_err
) ? ;
// Get all constraints that mention the current sample.
if let Some(constraints) = all_constraints.remove(& curr_args) {
let mut cstrs = self.constraints.write().map_err(corrupted_err) ? ;
'cstr_iter: for cstr in constraints {
// Index of the sample in the lhs of the constraint.
// None if it's the rhs.
let maybe_index = match cstrs[cstr].rhs.as_ref() {
// rhs
Some(rhs)
if rhs.pred == curr_pred && rhs.args == curr_args => None,
// lhs
_ => {
let mut cnt = 0 ;
let mut res = None ;
'lhs_iter: for & Sample {
pred, ref args
} in & cstrs[cstr].lhs {
if pred == curr_pred && curr_args == * args {
res = Some(cnt) ;
break 'lhs_iter
} else {
cnt += 1
}
}
if res.is_none() {
// This can happen if a constraint was tautologized when
// adding a negative example.
continue 'cstr_iter
} else {
res
}
},
} ;
if let Some(idx) = maybe_index {
// Current sample appears in lhs.
let _ = cstrs[cstr].lhs.swap_remove(idx) ;
dead_links.push( (curr_pred, curr_args.clone(), cstr) ) ;
// Anything left?
if cstrs[cstr].lhs.is_empty() {
// Nothing left, meaning the `lhs` is true. Propagating `rhs`.
let mut rhs = None ;
::std::mem::swap(& mut rhs, & mut cstrs[cstr].rhs) ;
if let Some( Sample { pred, args } ) = rhs {
dead_links.push( (pred, args.clone(), cstr) ) ;
// Constraint is unit, propagating.
debug_assert!( cstrs[cstr].is_tautology() ) ;
self.pos[pred].write().map_err(
corrupted_err
)?.insert( args.clone() ) ;
to_propagate.push( (pred, args) ) ;
} else {
bail!("contradiction detected, inference is impossible")
}
} else {
// Constraint's not unit, done.
}
} else {
// Current positive sample is rhs, clause is a tautology.
let samples = cstrs[cstr].tautologize() ;
for Sample { pred, args } in samples {
dead_links.push( (pred, args, cstr) )
}
}
}
}
}
}
self.unlink(dead_links) ? ;
Ok(res)
}
/// Adds a negative example. Simplifies constraints containing that sample.
pub fn add_neg(
& self, pred: PrdIdx, args: Args
) -> Res<Sample> {
let (args, is_new_sample) = self.samples.mk_is_new(args) ;
let is_new_neg = self.neg[pred].write().map_err(
corrupted_err
)?.insert( args.clone() ) ;
let res = Sample { pred, args: args.clone() } ;
let mut dead_links = vec![] ;
// New negative, but not a new sample. Might appear in some constraints.
if is_new_neg && ! is_new_sample {
let mut to_propagate = vec![ (pred, args) ] ;
'propagate: while let Some(
(curr_pred, curr_args)
) = to_propagate.pop() {
// let (mut curr_pred, mut curr_args) = (pred, args) ;
// 'propagate: loop {
let mut all_constraints = self.map[curr_pred].write().map_err(
corrupted_err
) ? ;
// Get all constraints that mention the current sample.
if let Some(constraints) = all_constraints.remove(& curr_args) {
let mut cstrs = self.constraints.write().map_err(corrupted_err) ? ;
'cstr_iter: for cstr in constraints {
// Index of the sample in the lhs of the constraint.
// None if it's the rhs.
let maybe_index = match cstrs[cstr].rhs.as_ref() {
// rhs
Some(rhs)
if rhs.pred == curr_pred && rhs.args == curr_args => None,
// lhs
_ => {
let mut cnt = 0 ;
let mut res = None ;
'lhs_iter: for & Sample {
pred, ref args
} in & cstrs[cstr].lhs {
if pred == curr_pred && curr_args == * args {
res = Some(cnt) ;
break 'lhs_iter
} else {
cnt += 1
}
}
if res.is_none() {
// This can happen if a constraint was tautologized when
// adding a positive or negative sample.
continue 'cstr_iter
} else {
res
}
},
} ;
if maybe_index.is_some() {
// Current sample appears in lhs, constraint's a tautology.
let samples = cstrs[cstr].tautologize() ;
for Sample { pred, args } in samples {
dead_links.push( (pred, args, cstr) )
}
} else {
// Current sample appears in rhs, constraint's negative.
cstrs[cstr].rhs = None ;
dead_links.push( (curr_pred, curr_args.clone(), cstr) ) ;
if cstrs[cstr].lhs.len() == 1 {
// Only one sample in lhs, has to be negative, propagating.
let Sample { pred, args } = cstrs[cstr].lhs.pop().unwrap() ;
dead_links.push( (pred, args.clone(), cstr) ) ;
debug_assert!( cstrs[cstr].is_tautology() ) ;
self.neg[pred].write().map_err(
corrupted_err
)?.insert( args.clone() ) ;
to_propagate.push( (pred, args) ) ;
}
}
}
} else {
// No constraint mentions current sample.
}
}
}
self.unlink(dead_links) ? ;
Ok(res)
}
/// Adds a constraint. Propagates positive and negative samples.
pub fn add_cstr(
& self,
lhs: Vec<(PrdIdx, Args)>, rhs: Option< (PrdIdx, Args) >
) -> Res< Option< Either<Constraint, (Sample, bool)> > > {
let mut nu_lhs = Vec::with_capacity( lhs.len() ) ;
'smpl_iter: for (pred, args) in lhs {
let (args, is_new) = self.samples.mk_is_new(args) ;
if ! is_new {
if self.pos[pred].read().map_err(corrupted_err)?.contains(& args) {
// Sample known to be positive, ignore.
continue 'smpl_iter
} else if self.neg[pred].read().map_err(
corrupted_err
)?.contains(& args) {
// Sample known to be negative, constraint is a tautology.
return Ok(None)
}
}
// Neither pos or neg, memorizing.
nu_lhs.push( Sample { pred, args } )
}
let nu_rhs = if let Some( (pred, args) ) = rhs {
let (args, is_new) = self.samples.mk_is_new(args) ;
if ! is_new {
if self.pos[pred].read().map_err(corrupted_err)?.contains(& args) {
// Sample known to be positive, constraint's a tautology.
return Ok(None)
} else if self.neg[pred].read().map_err(
corrupted_err
)?.contains(& args) {
// Sample known to be negative, constraint is a negative one.
None
} else {
Some( Sample { pred, args } )
}
} else {
Some( Sample { pred, args } )
}
} else { None } ;
let cstr_index = self.constraints.read().map_err(
corrupted_err
)?.next_index() ;
// Detect unit cases.
if nu_lhs.is_empty() {
// unit, rhs has to be true.
if let Some( Sample { pred, args } ) = nu_rhs {
return Ok(
Some(Either::Rgt( (self.add_pos(pred, args.get().clone())?, true) ))
)
} else {
bail!("contradiction detected, inference is impossible")
}
} else if nu_lhs.len() == 1 && nu_rhs.is_none() {
// unit, the single lhs has to be false.
let Sample { pred, args } = nu_lhs.pop().unwrap() ;
return Ok(
Some(Either::Rgt( (self.add_neg(pred, args.get().clone())?, false) ))
)
}
// Update the map from samples to constraints. Better to do that now than
// above, since there might be further simplifications possible.
for & Sample { pred, ref args } in & nu_lhs {
let mut map = self.map[pred].write().map_err(corrupted_err)? ;
let entry = map.entry(
args.clone()
) ;
let set = entry.or_insert_with(
|| CstrSet::with_capacity(17)
) ;
let _ = set.insert(cstr_index) ;
}
if let Some( & Sample { pred, ref args } ) = nu_rhs.as_ref() {
let mut map = self.map[pred].write().map_err(corrupted_err)? ;
let entry = map.entry(
args.clone()
) ;
let set = entry.or_insert_with(
|| CstrSet::with_capacity(17)
) ;
let _ = set.insert(cstr_index) ;
}
let cstr = Constraint { lhs: nu_lhs, rhs: nu_rhs } ;
self.constraints.write().map_err(corrupted_err)?.push(
cstr.clone()
) ;
Ok( Some( Either::Lft(cstr) ) )
}
/// Uses the classification info to classify some ICE data.
pub fn apply(
& self, pred: PrdIdx, data: & mut CData
) -> Res<()> {
let pos = self.pos[pred].read().map_err(corrupted_err)? ;
let neg = self.neg[pred].read().map_err(corrupted_err)? ;
let mut cursor = 0 ;
while cursor < data.unc.len() {
if pos.contains(& data.unc[cursor]) {
let sample = data.unc.swap_remove(cursor) ;
data.pos.push(sample)
} else if neg.contains(& data.unc[cursor]) {
let sample = data.unc.swap_remove(cursor) ;
data.neg.push(sample)
} else {
cursor += 1
}
}
Ok(())
}
}
impl<'a> PebcakFmt<'a> for Data {
type Info = & 'a PrdMap<PrdInfo> ;
fn pebcak_err(& self) -> ErrorKind {
"during data pebcak formatting".into()
}
fn pebcak_io_fmt<W: Write>(
& self, w: & mut W, map: & 'a PrdMap<PrdInfo>
) -> IoRes<()> {
write!(w, "pos (") ? ;
for (pred, set) in self.pos.index_iter() {
for args in set.read().unwrap().iter() {
write!(w, "\n ({}", map[pred]) ? ;
for arg in args.iter() {
write!(w, " {}", arg)?
}
write!(w, ")") ?
}
}
write!(w, "\n) neg (") ? ;
for (pred, set) in self.neg.index_iter() {
for args in set.read().unwrap().iter() {
write!(w, "\n ({}", map[pred]) ? ;
for arg in args.iter() {
write!(w, " {}", arg)?
}
write!(w, ")") ?
}
}
write!(w, "\n) constraints (") ? ;
for (index, cstr) in self.constraints.read().unwrap().index_iter() {
write!(w, "\n {: >3} | ", index) ? ;
if cstr.is_tautology() {
write!(w, "_") ?
} else {
for & Sample { pred, ref args } in cstr.lhs.iter() {
write!(w, "({}", map[pred]) ? ;
for arg in args.iter() {
write!(w, " {}", arg) ?
}
write!(w, ") ") ?
}
write!(w, "=> ") ? ;
if let Some(& Sample { pred, ref args }) = cstr.rhs.as_ref() {
write!(w, "({}", map[pred]) ? ;
for arg in args.iter() {
write!(w, " {}", arg) ?
}
write!(w, ")") ?
} else {
write!(w, "false") ?
}
}
}
write!(w, "\n) constraint map(") ? ;
for (pred, samples) in self.map.index_iter() {
for (sample, set) in samples.read().unwrap().iter() {
write!(w, "\n ({}", map[pred]) ? ;
for arg in sample.iter() {
write!(w, " {}", arg) ?
}
write!(w, ") ->") ? ;
for pred in set.iter() {
write!(w, " {}", pred) ?
}
}
}
writeln!(w, "\n)")
}
}
/// New learning data sent by the teacher to the learners.
#[derive(Clone)]
pub struct LearningData {
/// Positive learning data.
pub pos: Vec<Sample>,
/// Negative learning data.
pub neg: Vec<Sample>,
/// Constraints.
pub cstr: Vec<Constraint>,
}
impl LearningData {
/// Constructor.
pub fn mk(
pos: Vec<Sample>, neg: Vec<Sample>, cstr: Vec<Constraint>
) -> Self {
LearningData { pos, neg, cstr }
}
/// Returns `true` if everything's empty.
pub fn is_empty(& self) -> bool {
self.pos.is_empty() && self.neg.is_empty() && self.cstr.is_empty()
}
}
impl<'a> PebcakFmt<'a> for LearningData {
type Info = & 'a PrdMap<PrdInfo> ;
fn pebcak_err(& self) -> ErrorKind {
"during constraint pebcak formatting".into()
}
fn pebcak_io_fmt<W: Write>(
& self, w: & mut W, map: & 'a PrdMap<PrdInfo>
) -> IoRes<()> {
write!(w, "pos (") ? ;
if ! self.pos.is_empty() {
write!(w, "\n ") ? ;
for pos in & self.pos {
write!(w, " ") ? ;
pos.pebcak_io_fmt(w, map) ? ;
write!(w, "\n") ?
}
}
write!(w, ") neg (") ? ;
if ! self.neg.is_empty() {
write!(w, "\n ") ? ;
for neg in & self.neg {
write!(w, " ") ? ;
neg.pebcak_io_fmt(w, map) ? ;
write!(w, "\n") ?
}
}
write!(w, ") constraints (") ? ;
if ! self.cstr.is_empty() {
write!(w, "\n ") ? ;
for cstr in & self.cstr {
write!(w, " ") ? ;
cstr.pebcak_io_fmt(w, map) ? ;
writeln!(w, "") ?
}
}
writeln!(w, ")")
}
}
New learning data structure: add positive and negative data
#![doc = "Learning data-related types."]
use std::sync::RwLock ;
use hashconsing::{ HConser, HConsed, HashConsign } ;
use common::* ;
use instance::Instance ;
use instance::info::* ;
use learning::ice::CData ;
/// Hash consed samples.
pub type HSample = HConsed< Args > ;
/// Vector of samples.
pub type HSamples = Vec<HSample> ;
/// A sample is some values for a predicate.
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub struct Sample {
pub pred: PrdIdx,
pub args: HSample,
}
impl Sample {
/// Constructor.
pub fn mk(pred: PrdIdx, args: HSample) -> Self {
Sample { pred, args }
}
/// Tests if a sample is about some predicate and its arguments belong
/// to a set.
pub fn is_in(& self, pred: PrdIdx, samples: & HConSet<Args>) -> bool {
self.pred == pred && samples.contains(& self.args)
}
}
impl<'a> PebcakFmt<'a> for Sample {
type Info = & 'a PrdMap<PrdInfo> ;
fn pebcak_err(& self) -> ErrorKind {
"during sample pebcak formatting".into()
}
fn pebcak_io_fmt<W: Write>(
& self, w: & mut W, map: & 'a PrdMap<PrdInfo>
) -> IoRes<()> {
write!(w, "({}", map[self.pred].name) ? ;
for arg in & * self.args {
write!(w, " {}", arg) ?
}
write!(w, ")")
}
}
impl_fmt!{
Sample(self, fmt) {
write!(fmt, "p_{} {}", self.pred, self.args)
}
}
wrap_usize!{
#[doc = "Constraint index."]
CstrIdx
#[doc = "Constraint set."]
set: CstrSet
#[doc = "Constraint total map."]
map: CstrMap with iter: CstrMapIter
}
/// Constraints using hashconsed samples.
///
/// A constraint is a tautology iff `lhs.is_empty()` and `rhs.is_none()`.
///
/// # Invariants
///
/// - `lhs.is_empty() => rhs.is_none()`
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub struct Constraint {
pub lhs: Vec< Sample >,
pub rhs: Option< Sample >,
}
impl Constraint {
/// Transforms a constraint in a tautology. Returns all the samples from the
/// constraint.
pub fn tautologize(& mut self) -> Vec<Sample> {
let mut res = Vec::with_capacity(0) ;
::std::mem::swap(& mut res, & mut self.lhs) ;
let mut rhs = None ;
::std::mem::swap(& mut rhs, & mut self.rhs) ;
if let Some(sample) = rhs {
res.push(sample)
}
res
}
/// Checks whether the lhs of the constraint is empty.
pub fn is_tautology(& self) -> bool {
if self.lhs.is_empty() {
debug_assert!( self.rhs.is_none() ) ;
true
} else {
false
}
}
}
impl<'a> PebcakFmt<'a> for Constraint {
type Info = & 'a PrdMap<PrdInfo> ;
fn pebcak_err(& self) -> ErrorKind {
"during constraint pebcak formatting".into()
}
fn pebcak_io_fmt<W: Write>(
& self, w: & mut W, map: & 'a PrdMap<PrdInfo>
) -> IoRes<()> {
for lhs in & self.lhs {
lhs.pebcak_io_fmt(w, map) ? ;
write!(w, " ") ?
}
write!(w, "=> ") ? ;
if let Some(ref rhs) = self.rhs {
rhs.pebcak_io_fmt(w, map)
} else {
write!(w, "false")
}
}
}
impl_fmt!{
Constraint(self, fmt) {
for lhs in & self.lhs {
write!(fmt, "{} ", lhs) ?
}
write!(fmt, "=> ") ? ;
if let Some(ref rhs) = self.rhs {
write!(fmt, "{}", rhs)
} else {
write!(fmt, "false")
}
}
}
/// Structure storing unprojected learning data.
///
/// Used by the teacher to simplify constraints as it hads samples.
///
/// Also used by the ice learner to propagate the choices it makes.
pub struct NewData {
/// Instance, only used for printing.
instance: Arc<Instance>,
/// Positive examples.
pos: PrdMap< HConSet<Args> >,
/// Negative examples.
neg: PrdMap< HConSet<Args> >,
/// Constraints.
constraints: CstrMap<Constraint>,
/// Map from samples to contstraints.
map: PrdMap< HConMap<Args, CstrSet> >,
}
impl NewData {
/// Constructor.
pub fn mk(instance: Arc<Instance>) -> Self {
let pred_count = instance.preds().len() ;
let (
mut map, mut pos, mut neg
) = (
PrdMap::with_capacity(pred_count),
PrdMap::with_capacity(pred_count),
PrdMap::with_capacity(pred_count)
) ;
for _ in instance.preds() {
map.push( HConMap::with_capacity(103) ) ;
pos.push( HConSet::with_capacity(103) ) ;
neg.push( HConSet::with_capacity(103) ) ;
}
let constraints = CstrMap::with_capacity(103) ;
NewData { instance, pos, neg, constraints, map }
}
/// The projected data for some predicate.
pub fn data_of(& self, pred: PrdIdx) -> CData {
let unc_set = & self.map[pred] ;
let pos_set = & self.pos[pred] ;
let neg_set = & self.neg[pred] ;
let (mut pos, mut neg, mut unc) = (
Vec::with_capacity( pos_set.len() ),
Vec::with_capacity( neg_set.len() ),
Vec::with_capacity( unc_set.len() )
) ;
for sample in pos_set.iter() {
pos.push( sample.clone() )
}
for sample in neg_set.iter() {
neg.push( sample.clone() )
}
for (sample, set) in unc_set.iter() {
if ! set.is_empty() {
unc.push( sample.clone() )
}
}
CData { pos, neg, unc }
}
/// Tautologizes a constraint and removes the links with its samples in
/// the map.
pub fn tautologize(& mut self, constraint: CstrIdx) -> Vec<Sample> {
let samples = self.constraints[constraint].tautologize() ;
for & Sample { pred, ref args } in & samples {
let _ = self.map[pred].get_mut(& args).map(
|set| set.remove(& constraint)
) ;
}
samples
}
/// Adds some positive examples.
///
/// Simplifies constraints containing these samples.
///
/// `modded_constraints` will be updated as follows: a constraint is
///
/// - added to the set when it is modified (but not tautologized)
/// - removed from the set when it is tautologized
pub fn add_pos(
& mut self, mut samples: PrdHMap< HConSet<Args> >,
modded_constraints: & mut CstrSet
) -> Res<()> {
// Stack of things to propagate.
let mut to_propagate = Vec::with_capacity( samples.len() ) ;
// The stack is updated here and at the end of the `'propagate` loop below.
// Be careful when using `continue 'propagate` as this will skip the stack
// update.
for (pred, set) in samples.drain() {
to_propagate.push( (pred, set) )
}
'propagate: while let Some(
(curr_pred, curr_samples)
) = to_propagate.pop() {
if curr_samples.is_empty() { continue }
println!(
"propagating {} samples for predicate {}",
curr_samples.len(), self.instance[curr_pred]
) ;
// Get the constraints mentioning the positive samples.
let mut constraints ;
{
let mut tmp = None ;
let mut iter = curr_samples.iter() ;
// Find the first sample that appears in some constraints.
'find_first: while let Some(sample) = iter.next() {
if let Some(cstr_set) = self.map[curr_pred].remove(sample) {
if ! cstr_set.is_empty() {
println!(
" - sample {} appears in {} constraints",
sample, cstr_set.len()
) ;
tmp = Some(cstr_set) ;
break 'find_first
}
}
println!(" - sample {} does not appear in any constraint", sample)
}
if let Some(set) = tmp {
constraints = set
} else { // None of the samples appear in any constraint.
continue 'propagate
}
// Iterate over the remaining samples and add to the constraints to
// check.
'other_samples: while let Some(sample) = iter.next() {
if let Some(cstr_set) = self.map[curr_pred].remove(sample) {
if ! cstr_set.is_empty() {
use std::iter::Extend ;
println!(
" - sample {} appears in {} constraints",
sample, cstr_set.len()
) ;
constraints.extend( cstr_set ) ;
continue 'other_samples
}
}
println!(" - sample {} does not appear in any constraint", sample)
}
}
println!(" working on {} constraints", constraints.len()) ;
'update_constraints: for c_idx in constraints {
println!(
" looking at {}", self.constraints[c_idx].to_string_info(
self.instance.preds()
) ?
) ;
// Is `rhs` true?
if self.constraints[c_idx].rhs.as_ref().map(
| sample | sample.is_in(curr_pred, & curr_samples)
).unwrap_or(false) {
println!(" -> rhs is true, tautologizing") ;
// Tautologize and break links.
let _ = self.tautologize(c_idx) ;
let _ = modded_constraints.remove(& c_idx) ;
// Move on.
continue 'update_constraints
}
// `lhs` simplification.
let mut count = 0 ;
while count < self.constraints[c_idx].lhs.len() {
if self.constraints[c_idx].lhs[count].is_in(
curr_pred, & curr_samples
) {
let _ = self.constraints[c_idx].lhs.swap_remove(count) ;
// No need to break links here as we've already removed all links
// from `curr_samples` (to get the constraints).
// DO NOT increment `count` here as we just `swap_remove`d. `count`
// is already the index of an unvisited element.
()
} else {
// Unknown, moving on to next sample.
count += 1
}
}
// Is `lhs` empty?
if self.constraints[c_idx].lhs.is_empty() {
println!(" -> lhs is empty, remembering for later") ;
// Then `rhs` has to be true.
let mut maybe_rhs = self.tautologize(c_idx) ;
let _ = modded_constraints.remove(& c_idx) ;
if let Some( Sample { pred, args } ) = maybe_rhs.pop() {
// `maybe_rhs` can only be empty now, we've removed the whole
// `lhs`.
debug_assert!( maybe_rhs.is_empty() ) ;
// Remember the sample has to be true.
let _ = samples.entry(pred).or_insert_with(
|| HConSet::with_capacity(11)
).insert(args) ;
} else {
// No `rhs`, we have `true => false`, contradiction.
bail!("contradiction detected, inference impossible")
}
} else {
// `lhs` has changed, remember that for unit clause propagation.
let _ = modded_constraints.insert(c_idx) ;
}
}
// Done propagating `curr_args` for `curr_pred`, push new positive
// samples.
for (pred, set) in samples.drain() {
to_propagate.push( (pred, set) )
}
}
Ok(())
}
/// Adds some negative examples.
///
/// Simplifies constraints containing these samples.
///
/// `modded_constraints` will be updated as follows: a constraint is
///
/// - added to the set when it is modified (but not tautologized)
/// - removed from the set when it is tautologized
pub fn add_neg(
& mut self, mut samples: PrdHMap< HConSet<Args> >,
modded_constraints: & mut CstrSet
) -> Res<()> {
// Stack of things to propagate.
let mut to_propagate = Vec::with_capacity( samples.len() ) ;
// The stack is updated here and at the end of the `'propagate` loop below.
// Be careful when using `continue 'propagate` as this will skip the stack
// update.
for (pred, set) in samples.drain() {
to_propagate.push( (pred, set) )
}
'propagate: while let Some(
(curr_pred, curr_samples)
) = to_propagate.pop() {
if curr_samples.is_empty() { continue }
println!(
"propagating {} samples for predicate {}",
curr_samples.len(), self.instance[curr_pred]
) ;
// Get the constraints mentioning the negative samples.
let mut constraints ;
{
let mut tmp = None ;
let mut iter = curr_samples.iter() ;
// Find the first sample that appears in some constraints.
'find_first: while let Some(sample) = iter.next() {
if let Some(cstr_set) = self.map[curr_pred].remove(sample) {
if ! cstr_set.is_empty() {
println!(
" - sample {} appears in {} constraints",
sample, cstr_set.len()
) ;
tmp = Some(cstr_set) ;
break 'find_first
}
}
println!(" - sample {} does not appear in any constraint", sample)
}
if let Some(set) = tmp {
constraints = set
} else { // None of the samples appear in any constraint.
continue 'propagate
}
// Iterate over the remaining samples and add to the constraints to
// check.
'other_samples: while let Some(sample) = iter.next() {
if let Some(cstr_set) = self.map[curr_pred].remove(sample) {
if ! cstr_set.is_empty() {
use std::iter::Extend ;
println!(
" - sample {} appears in {} constraints",
sample, cstr_set.len()
) ;
constraints.extend( cstr_set ) ;
continue 'other_samples
}
}
println!(" - sample {} does not appear in any constraint", sample)
}
}
println!(" working on {} constraints", constraints.len()) ;
'update_constraints: for c_idx in constraints {
println!(
" looking at {}", self.constraints[c_idx].to_string_info(
self.instance.preds()
) ?
) ;
// Is `rhs` false?
if self.constraints[c_idx].rhs.as_ref().map(
| sample | sample.is_in(curr_pred, & curr_samples)
).unwrap_or(false) {
println!(" -> rhs is false, constraint is negative") ;
// Forget rhs.
self.constraints[c_idx].rhs = None
}
// `lhs` inspection.
let mut trivial = false ;
for sample in & self.constraints[c_idx].lhs {
if sample.is_in(curr_pred, & curr_samples) {
// This sample is false, the constraint is trivially true.
trivial = true ;
break
}
}
// Is constraint trivial?
if trivial {
println!(" -> lhs is always false, constraint is trivial") ;
let _ = self.tautologize(c_idx) ;
} else if self.constraints[c_idx].lhs.len() == 1
&& self.constraints[c_idx].rhs.is_none() {
println!(
" -> one sample in lhs of negative constraint, remembering"
) ;
// Constraint is negative and only one sample in lhs, it has to be
// false.
let mut just_one = self.tautologize(c_idx) ;
if let Some( Sample {pred, args } ) = just_one.pop() {
debug_assert!( just_one.is_empty() ) ;
let _ = samples.entry(pred).or_insert_with(
|| HConSet::with_capacity(11)
).insert(args) ;
} else {
unreachable!()
}
} else {
// Constraint has changed, remember that for unit clause propagation.
let _ = modded_constraints.insert(c_idx) ;
}
}
// Done propagating `curr_args` for `curr_pred`, push new negative
// samples.
for (pred, set) in samples.drain() {
to_propagate.push( (pred, set) )
}
}
Ok(())
}
/// Adds a constraint. Propagates positive and negative samples.
pub fn add_cstr(
& self, consign: & 'a mut HashConsign<Args>,
modded_constraints: & mut CstrSet,
lhs: Vec<(PrdIdx, Args)>, rhs: Option< (PrdIdx, Args) >
) -> Res<()> {
let mut nu_lhs = Vec::with_capacity( lhs.len() ) ;
'smpl_iter: for (pred, args) in lhs {
let (args, is_new) = consign.mk_is_new(args) ;
if ! is_new {
if self.pos[pred].contains(& args) {
// Sample known to be positive, ignore.
continue 'smpl_iter
} else if self.neg[pred].contains(& args) {
// Sample known to be negative, constraint is a tautology.
return Ok(())
}
}
// Neither pos or neg, memorizing.
nu_lhs.push( Sample { pred, args } )
}
let nu_rhs = if let Some( (pred, args) ) = rhs {
let (args, is_new) = consign.mk_is_new(args) ;
if ! is_new {
if self.pos[pred].contains(& args) {
// Sample known to be positive, constraint's a tautology.
return Ok(())
} else if self.neg[pred].contains(& args) {
// Sample known to be negative, constraint is a negative one.
None
} else {
Some( Sample { pred, args } )
}
} else {
Some( Sample { pred, args } )
}
} else { None } ;
// Detect unit cases.
if nu_lhs.is_empty() {
// unit, rhs has to be true.
if let Some( Sample { pred, args } ) = nu_rhs {
let mut set = HConSet::new() ;
set.insert(args) ;
let mut positive = PrdHMap::new() ;
positive.insert(pred, set) ;
self.add_pos(positive, modded_constraints) ?
return Ok(())
} else {
bail!("contradiction detected, inference is impossible")
}
} else if nu_lhs.len() == 1 && nu_rhs.is_none() {
// unit, the single lhs has to be false.
let Sample { pred, args } = nu_lhs.pop().expect(
"[bug] empty vector after checking that its length is 1..."
) ;
let mut set = HConSet::new() ;
set.insert(args) ;
let mut negative = PrdHMap::new() ;
negative.insert(pred, set) ;
self.add_neg(negative, modded_constraints) ?
return Ok(())
}
let cstr_index = self.constraints.next_index() ;
// Update the map from samples to constraints. Better to do that now than
// above, since there might be further simplifications possible.
for & Sample { pred, ref args } in & nu_lhs {
let mut map = self.map[pred].write().map_err(corrupted_err)? ;
let entry = map.entry(
args.clone()
) ;
let set = entry.or_insert_with(
|| CstrSet::with_capacity(17)
) ;
let _ = set.insert(cstr_index) ;
}
if let Some( & Sample { pred, ref args } ) = nu_rhs.as_ref() {
let mut map = self.map[pred].write().map_err(corrupted_err)? ;
let entry = map.entry(
args.clone()
) ;
let set = entry.or_insert_with(
|| CstrSet::with_capacity(17)
) ;
let _ = set.insert(cstr_index) ;
}
let cstr = Constraint { lhs: nu_lhs, rhs: nu_rhs } ;
self.constraints.write().map_err(corrupted_err)?.push(
cstr.clone()
) ;
Ok( Some( Either::Lft(cstr) ) )
}
}
/// Structure storing the (unprojected) learning data.
///
/// # TO DO
///
/// - add stats monitoring simplifications and unit clause propagation
pub struct Data {
/// Sample hashconsign.
samples: RwLock< HashConsign<Args> >,
/// Constraints.
pub constraints: RwLock< CstrMap<Constraint> >,
/// Map from samples to constraints.
pub map: PrdMap< RwLock< HConMap<Args, CstrSet> > >,
/// Positive examples.
pub pos: PrdMap< RwLock< HConSet<Args> > >,
/// Negative examples.
pub neg: PrdMap< RwLock< HConSet<Args> > >,
}
impl Data {
/// Constructor.
pub fn mk(instance: & Instance) -> Self {
let mut map = PrdMap::with_capacity( instance.preds().len() ) ;
let mut pos = PrdMap::with_capacity( instance.preds().len() ) ;
let mut neg = PrdMap::with_capacity( instance.preds().len() ) ;
for _ in instance.preds() {
map.push(
RwLock::new( HConMap::with_capacity(103) )
) ;
pos.push(
RwLock::new( HConSet::with_capacity(103) )
) ;
neg.push(
RwLock::new( HConSet::with_capacity(103) )
) ;
}
Data {
samples: RwLock::new( HashConsign::with_capacity(1007) ),
constraints: RwLock::new( CstrMap::with_capacity(703) ),
map, pos, neg
}
}
/// Performs an action on all samples.
pub fn samples_fold<T, F>(& self, init: T, f: F) -> Res<T>
where F: Fn(T, HSample) -> T {
Ok(
self.samples.read().map_err(corrupted_err)?.fold(f, init)
)
}
/// The projected data for some predicate.
pub fn data_of(& self, pred: PrdIdx) -> Res<CData> {
let unc_set = self.map[pred].read().map_err(corrupted_err) ? ;
let pos_set = self.pos[pred].read().map_err(corrupted_err) ? ;
let neg_set = self.neg[pred].read().map_err(corrupted_err) ? ;
let (mut pos, mut neg, mut unc) = (
Vec::with_capacity( pos_set.len() ),
Vec::with_capacity( neg_set.len() ),
Vec::with_capacity( unc_set.len() )
) ;
for sample in pos_set.iter() {
pos.push( sample.clone() )
}
for sample in neg_set.iter() {
neg.push( sample.clone() )
}
for (sample, set) in unc_set.iter() {
if ! set.is_empty() {
unc.push( sample.clone() )
}
}
Ok( CData { pos, neg, unc } )
}
// /// Temporary function adding learning data directly.
// pub fn add_learning_data(& self, data: & LearningData) -> Res<()> {
// for sample in & data.pos {
// self.add_pos( sample.pred, sample.args.clone() ) ?
// }
// for sample in & data.neg {
// self.add_neg( sample.pred, sample.args.clone() ) ?
// }
// for cstr in & data.cstr {
// let lhs = cstr.lhs.iter().map(
// |sample| (sample.pred, sample.args.clone())
// ).collect() ;
// let rhs = cstr.rhs.as_ref().map(
// |sample| (sample.pred, sample.args.clone())
// ) ;
// self.add_cstr(lhs, rhs) ?
// }
// Ok(())
// }
/// Removes the links between the samples in the input constraint and the
/// constraint. Also, tautologizes the constraint.
pub fn unlink(
& self, dead_links: Vec<(PrdIdx, HSample, CstrIdx)>
) -> Res<()> {
for (pred, args, cstr) in dead_links {
let _ = self.map[pred].write().map_err(
corrupted_err
)?.get_mut(& args).map(|set| set.remove(& cstr)) ;
}
Ok(())
}
/// Propagates unit clauses recursively.
pub fn propagate_unit_clauses(
& self
) -> Res<()> {
let (mut pos, mut neg) = (None, None) ;
'fixed_point: loop {
{
for cstr in self.constraints.read().map_err(corrupted_err)?.iter() {
if ! cstr.is_tautology() {
match (cstr.lhs.len(), cstr.rhs.as_ref()) {
(0, Some(rhs)) => {
pos = Some( (rhs.pred, rhs.args.get().clone()) ) ;
break
},
(1, None) => {
neg = Some( (cstr.lhs[0].pred, cstr.lhs[0].args.get().clone()) ) ;
break
},
_ => (),
}
}
}
}
if let Some( (pred, args) ) = pos {
let _ = self.add_pos(pred, args) ? ;
pos = None
} else if let Some( (pred, args) ) = neg {
let _ = self.add_neg(pred, args) ? ;
neg = None
} else {
break 'fixed_point
}
}
Ok(())
}
/// Adds a positive example. Simplifies constraints containing that sample.
pub fn add_pos(
& self, pred: PrdIdx, args: Args
) -> Res<Sample> {
let (args, is_new_sample) = self.samples.mk_is_new(args) ;
let is_new_pos = self.pos[pred].write().map_err(
corrupted_err
)?.insert( args.clone() ) ;
let res = Sample { pred, args: args.clone() } ;
let mut dead_links = vec![] ;
// New positive, but not a new sample. Might appear in some constraints.
if is_new_pos && ! is_new_sample {
let mut to_propagate = vec![ (pred, args) ] ;
'propagate: while let Some(
(curr_pred, curr_args)
) = to_propagate.pop() {
let mut all_constraints = self.map[curr_pred].write().map_err(
corrupted_err
) ? ;
// Get all constraints that mention the current sample.
if let Some(constraints) = all_constraints.remove(& curr_args) {
let mut cstrs = self.constraints.write().map_err(corrupted_err) ? ;
'cstr_iter: for cstr in constraints {
// Index of the sample in the lhs of the constraint.
// None if it's the rhs.
let maybe_index = match cstrs[cstr].rhs.as_ref() {
// rhs
Some(rhs)
if rhs.pred == curr_pred && rhs.args == curr_args => None,
// lhs
_ => {
let mut cnt = 0 ;
let mut res = None ;
'lhs_iter: for & Sample {
pred, ref args
} in & cstrs[cstr].lhs {
if pred == curr_pred && curr_args == * args {
res = Some(cnt) ;
break 'lhs_iter
} else {
cnt += 1
}
}
if res.is_none() {
// This can happen if a constraint was tautologized when
// adding a negative example.
continue 'cstr_iter
} else {
res
}
},
} ;
if let Some(idx) = maybe_index {
// Current sample appears in lhs.
let _ = cstrs[cstr].lhs.swap_remove(idx) ;
dead_links.push( (curr_pred, curr_args.clone(), cstr) ) ;
// Anything left?
if cstrs[cstr].lhs.is_empty() {
// Nothing left, meaning the `lhs` is true. Propagating `rhs`.
let mut rhs = None ;
::std::mem::swap(& mut rhs, & mut cstrs[cstr].rhs) ;
if let Some( Sample { pred, args } ) = rhs {
dead_links.push( (pred, args.clone(), cstr) ) ;
// Constraint is unit, propagating.
debug_assert!( cstrs[cstr].is_tautology() ) ;
self.pos[pred].write().map_err(
corrupted_err
)?.insert( args.clone() ) ;
to_propagate.push( (pred, args) ) ;
} else {
bail!("contradiction detected, inference is impossible")
}
} else {
// Constraint's not unit, done.
}
} else {
// Current positive sample is rhs, clause is a tautology.
let samples = cstrs[cstr].tautologize() ;
for Sample { pred, args } in samples {
dead_links.push( (pred, args, cstr) )
}
}
}
}
}
}
self.unlink(dead_links) ? ;
Ok(res)
}
/// Adds a negative example. Simplifies constraints containing that sample.
pub fn add_neg(
& self, pred: PrdIdx, args: Args
) -> Res<Sample> {
let (args, is_new_sample) = self.samples.mk_is_new(args) ;
let is_new_neg = self.neg[pred].write().map_err(
corrupted_err
)?.insert( args.clone() ) ;
let res = Sample { pred, args: args.clone() } ;
let mut dead_links = vec![] ;
// New negative, but not a new sample. Might appear in some constraints.
if is_new_neg && ! is_new_sample {
let mut to_propagate = vec![ (pred, args) ] ;
'propagate: while let Some(
(curr_pred, curr_args)
) = to_propagate.pop() {
// let (mut curr_pred, mut curr_args) = (pred, args) ;
// 'propagate: loop {
let mut all_constraints = self.map[curr_pred].write().map_err(
corrupted_err
) ? ;
// Get all constraints that mention the current sample.
if let Some(constraints) = all_constraints.remove(& curr_args) {
let mut cstrs = self.constraints.write().map_err(corrupted_err) ? ;
'cstr_iter: for cstr in constraints {
// Index of the sample in the lhs of the constraint.
// None if it's the rhs.
let maybe_index = match cstrs[cstr].rhs.as_ref() {
// rhs
Some(rhs)
if rhs.pred == curr_pred && rhs.args == curr_args => None,
// lhs
_ => {
let mut cnt = 0 ;
let mut res = None ;
'lhs_iter: for & Sample {
pred, ref args
} in & cstrs[cstr].lhs {
if pred == curr_pred && curr_args == * args {
res = Some(cnt) ;
break 'lhs_iter
} else {
cnt += 1
}
}
if res.is_none() {
// This can happen if a constraint was tautologized when
// adding a positive or negative sample.
continue 'cstr_iter
} else {
res
}
},
} ;
if maybe_index.is_some() {
// Current sample appears in lhs, constraint's a tautology.
let samples = cstrs[cstr].tautologize() ;
for Sample { pred, args } in samples {
dead_links.push( (pred, args, cstr) )
}
} else {
// Current sample appears in rhs, constraint's negative.
cstrs[cstr].rhs = None ;
dead_links.push( (curr_pred, curr_args.clone(), cstr) ) ;
if cstrs[cstr].lhs.len() == 1 {
// Only one sample in lhs, has to be negative, propagating.
let Sample { pred, args } = cstrs[cstr].lhs.pop().unwrap() ;
dead_links.push( (pred, args.clone(), cstr) ) ;
debug_assert!( cstrs[cstr].is_tautology() ) ;
self.neg[pred].write().map_err(
corrupted_err
)?.insert( args.clone() ) ;
to_propagate.push( (pred, args) ) ;
}
}
}
} else {
// No constraint mentions current sample.
}
}
}
self.unlink(dead_links) ? ;
Ok(res)
}
/// Adds a constraint. Propagates positive and negative samples.
pub fn add_cstr(
& self,
lhs: Vec<(PrdIdx, Args)>, rhs: Option< (PrdIdx, Args) >
) -> Res< Option< Either<Constraint, (Sample, bool)> > > {
let mut nu_lhs = Vec::with_capacity( lhs.len() ) ;
'smpl_iter: for (pred, args) in lhs {
let (args, is_new) = self.samples.mk_is_new(args) ;
if ! is_new {
if self.pos[pred].read().map_err(corrupted_err)?.contains(& args) {
// Sample known to be positive, ignore.
continue 'smpl_iter
} else if self.neg[pred].read().map_err(
corrupted_err
)?.contains(& args) {
// Sample known to be negative, constraint is a tautology.
return Ok(None)
}
}
// Neither pos or neg, memorizing.
nu_lhs.push( Sample { pred, args } )
}
let nu_rhs = if let Some( (pred, args) ) = rhs {
let (args, is_new) = self.samples.mk_is_new(args) ;
if ! is_new {
if self.pos[pred].read().map_err(corrupted_err)?.contains(& args) {
// Sample known to be positive, constraint's a tautology.
return Ok(None)
} else if self.neg[pred].read().map_err(
corrupted_err
)?.contains(& args) {
// Sample known to be negative, constraint is a negative one.
None
} else {
Some( Sample { pred, args } )
}
} else {
Some( Sample { pred, args } )
}
} else { None } ;
let cstr_index = self.constraints.read().map_err(
corrupted_err
)?.next_index() ;
// Detect unit cases.
if nu_lhs.is_empty() {
// unit, rhs has to be true.
if let Some( Sample { pred, args } ) = nu_rhs {
return Ok(
Some(Either::Rgt( (self.add_pos(pred, args.get().clone())?, true) ))
)
} else {
bail!("contradiction detected, inference is impossible")
}
} else if nu_lhs.len() == 1 && nu_rhs.is_none() {
// unit, the single lhs has to be false.
let Sample { pred, args } = nu_lhs.pop().unwrap() ;
return Ok(
Some(Either::Rgt( (self.add_neg(pred, args.get().clone())?, false) ))
)
}
// Update the map from samples to constraints. Better to do that now than
// above, since there might be further simplifications possible.
for & Sample { pred, ref args } in & nu_lhs {
let mut map = self.map[pred].write().map_err(corrupted_err)? ;
let entry = map.entry(
args.clone()
) ;
let set = entry.or_insert_with(
|| CstrSet::with_capacity(17)
) ;
let _ = set.insert(cstr_index) ;
}
if let Some( & Sample { pred, ref args } ) = nu_rhs.as_ref() {
let mut map = self.map[pred].write().map_err(corrupted_err)? ;
let entry = map.entry(
args.clone()
) ;
let set = entry.or_insert_with(
|| CstrSet::with_capacity(17)
) ;
let _ = set.insert(cstr_index) ;
}
let cstr = Constraint { lhs: nu_lhs, rhs: nu_rhs } ;
self.constraints.write().map_err(corrupted_err)?.push(
cstr.clone()
) ;
Ok( Some( Either::Lft(cstr) ) )
}
/// Uses the classification info to classify some ICE data.
pub fn apply(
& self, pred: PrdIdx, data: & mut CData
) -> Res<()> {
let pos = self.pos[pred].read().map_err(corrupted_err)? ;
let neg = self.neg[pred].read().map_err(corrupted_err)? ;
let mut cursor = 0 ;
while cursor < data.unc.len() {
if pos.contains(& data.unc[cursor]) {
let sample = data.unc.swap_remove(cursor) ;
data.pos.push(sample)
} else if neg.contains(& data.unc[cursor]) {
let sample = data.unc.swap_remove(cursor) ;
data.neg.push(sample)
} else {
cursor += 1
}
}
Ok(())
}
}
impl<'a> PebcakFmt<'a> for Data {
type Info = & 'a PrdMap<PrdInfo> ;
fn pebcak_err(& self) -> ErrorKind {
"during data pebcak formatting".into()
}
fn pebcak_io_fmt<W: Write>(
& self, w: & mut W, map: & 'a PrdMap<PrdInfo>
) -> IoRes<()> {
write!(w, "pos (") ? ;
for (pred, set) in self.pos.index_iter() {
for args in set.read().unwrap().iter() {
write!(w, "\n ({}", map[pred]) ? ;
for arg in args.iter() {
write!(w, " {}", arg)?
}
write!(w, ")") ?
}
}
write!(w, "\n) neg (") ? ;
for (pred, set) in self.neg.index_iter() {
for args in set.read().unwrap().iter() {
write!(w, "\n ({}", map[pred]) ? ;
for arg in args.iter() {
write!(w, " {}", arg)?
}
write!(w, ")") ?
}
}
write!(w, "\n) constraints (") ? ;
for (index, cstr) in self.constraints.read().unwrap().index_iter() {
write!(w, "\n {: >3} | ", index) ? ;
if cstr.is_tautology() {
write!(w, "_") ?
} else {
for & Sample { pred, ref args } in cstr.lhs.iter() {
write!(w, "({}", map[pred]) ? ;
for arg in args.iter() {
write!(w, " {}", arg) ?
}
write!(w, ") ") ?
}
write!(w, "=> ") ? ;
if let Some(& Sample { pred, ref args }) = cstr.rhs.as_ref() {
write!(w, "({}", map[pred]) ? ;
for arg in args.iter() {
write!(w, " {}", arg) ?
}
write!(w, ")") ?
} else {
write!(w, "false") ?
}
}
}
write!(w, "\n) constraint map(") ? ;
for (pred, samples) in self.map.index_iter() {
for (sample, set) in samples.read().unwrap().iter() {
write!(w, "\n ({}", map[pred]) ? ;
for arg in sample.iter() {
write!(w, " {}", arg) ?
}
write!(w, ") ->") ? ;
for pred in set.iter() {
write!(w, " {}", pred) ?
}
}
}
writeln!(w, "\n)")
}
}
/// New learning data sent by the teacher to the learners.
#[derive(Clone)]
pub struct LearningData {
/// Positive learning data.
pub pos: Vec<Sample>,
/// Negative learning data.
pub neg: Vec<Sample>,
/// Constraints.
pub cstr: Vec<Constraint>,
}
impl LearningData {
/// Constructor.
pub fn mk(
pos: Vec<Sample>, neg: Vec<Sample>, cstr: Vec<Constraint>
) -> Self {
LearningData { pos, neg, cstr }
}
/// Returns `true` if everything's empty.
pub fn is_empty(& self) -> bool {
self.pos.is_empty() && self.neg.is_empty() && self.cstr.is_empty()
}
}
impl<'a> PebcakFmt<'a> for LearningData {
type Info = & 'a PrdMap<PrdInfo> ;
fn pebcak_err(& self) -> ErrorKind {
"during constraint pebcak formatting".into()
}
fn pebcak_io_fmt<W: Write>(
& self, w: & mut W, map: & 'a PrdMap<PrdInfo>
) -> IoRes<()> {
write!(w, "pos (") ? ;
if ! self.pos.is_empty() {
write!(w, "\n ") ? ;
for pos in & self.pos {
write!(w, " ") ? ;
pos.pebcak_io_fmt(w, map) ? ;
write!(w, "\n") ?
}
}
write!(w, ") neg (") ? ;
if ! self.neg.is_empty() {
write!(w, "\n ") ? ;
for neg in & self.neg {
write!(w, " ") ? ;
neg.pebcak_io_fmt(w, map) ? ;
write!(w, "\n") ?
}
}
write!(w, ") constraints (") ? ;
if ! self.cstr.is_empty() {
write!(w, "\n ") ? ;
for cstr in & self.cstr {
write!(w, " ") ? ;
cstr.pebcak_io_fmt(w, map) ? ;
writeln!(w, "") ?
}
}
writeln!(w, ")")
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::InheritTypes::{NodeBase, NodeCast, TextCast, ElementCast};
use dom::bindings::codegen::InheritTypes::HTMLIFrameElementCast;
use dom::bindings::js::JS;
use dom::bindings::utils::Reflectable;
use dom::document::Document;
use dom::element::{AttributeHandlers, HTMLLinkElementTypeId, HTMLIFrameElementTypeId};
use dom::htmlelement::HTMLElement;
use dom::htmlheadingelement::{Heading1, Heading2, Heading3, Heading4, Heading5, Heading6};
use dom::htmliframeelement::IFrameSize;
use dom::htmlformelement::HTMLFormElement;
use dom::node::{ElementNodeTypeId, INode, NodeHelpers};
use dom::types::*;
use html::cssparse::{StylesheetProvenance, UrlProvenance, spawn_css_parser};
use script_task::Page;
use hubbub::hubbub;
use servo_msg::constellation_msg::SubpageId;
use servo_net::resource_task::{Load, Payload, Done, ResourceTask, load_whole_resource};
use servo_util::namespace::Null;
use servo_util::str::{DOMString, HTML_SPACE_CHARACTERS};
use servo_util::task::spawn_named;
use servo_util::url::parse_url;
use std::ascii::StrAsciiExt;
use std::cast;
use std::cell::RefCell;
use std::comm::{channel, Sender, Receiver};
use std::str;
use style::Stylesheet;
use url::Url;
macro_rules! handle_element(
($document: expr,
$localName: expr,
$string: expr,
$ctor: ident
$(, $arg:expr )*) => (
if $string == $localName {
return ElementCast::from(&$ctor::new($localName, $document $(, $arg)*));
}
)
)
pub struct JSFile {
data: ~str,
url: Url
}
pub type JSResult = ~[JSFile];
enum CSSMessage {
CSSTaskNewFile(StylesheetProvenance),
CSSTaskExit
}
enum JSMessage {
JSTaskNewFile(Url),
JSTaskNewInlineScript(~str, Url),
JSTaskExit
}
/// Messages generated by the HTML parser upon discovery of additional resources
pub enum HtmlDiscoveryMessage {
HtmlDiscoveredStyle(Stylesheet),
HtmlDiscoveredIFrame((Url, SubpageId, bool)),
HtmlDiscoveredScript(JSResult)
}
pub struct HtmlParserResult {
discovery_port: Receiver<HtmlDiscoveryMessage>,
}
trait NodeWrapping {
unsafe fn to_hubbub_node(&self) -> hubbub::NodeDataPtr;
unsafe fn from_hubbub_node(n: hubbub::NodeDataPtr) -> Self;
}
impl<T: NodeBase+Reflectable> NodeWrapping for JS<T> {
unsafe fn to_hubbub_node(&self) -> hubbub::NodeDataPtr {
cast::transmute(self.get())
}
unsafe fn from_hubbub_node(n: hubbub::NodeDataPtr) -> JS<T> {
JS::from_raw(cast::transmute(n))
}
}
/**
Runs a task that coordinates parsing links to css stylesheets.
This function should be spawned in a separate task and spins waiting
for the html builder to find links to css stylesheets and sends off
tasks to parse each link. When the html process finishes, it notifies
the listener, who then collects the css rules from each task it
spawned, collates them, and sends them to the given result channel.
# Arguments
* `to_parent` - A channel on which to send back the full set of rules.
* `from_parent` - A port on which to receive new links.
*/
fn css_link_listener(to_parent: Sender<HtmlDiscoveryMessage>,
from_parent: Receiver<CSSMessage>,
resource_task: ResourceTask) {
let mut result_vec = ~[];
loop {
match from_parent.recv_opt() {
Some(CSSTaskNewFile(provenance)) => {
result_vec.push(spawn_css_parser(provenance));
}
Some(CSSTaskExit) | None => {
break;
}
}
}
// Send the sheets back in order
// FIXME: Shouldn't wait until after we've recieved CSSTaskExit to start sending these
for port in result_vec.iter() {
to_parent.try_send(HtmlDiscoveredStyle(port.recv()));
}
}
fn js_script_listener(to_parent: Sender<HtmlDiscoveryMessage>,
from_parent: Receiver<JSMessage>,
resource_task: ResourceTask) {
let mut result_vec = ~[];
loop {
match from_parent.recv_opt() {
Some(JSTaskNewFile(url)) => {
match load_whole_resource(&resource_task, url.clone()) {
Err(_) => {
error!("error loading script {:s}", url.to_str());
}
Ok((metadata, bytes)) => {
result_vec.push(JSFile {
data: str::from_utf8(bytes).unwrap().to_owned(),
url: metadata.final_url,
});
}
}
}
Some(JSTaskNewInlineScript(data, url)) => {
result_vec.push(JSFile { data: data, url: url });
}
Some(JSTaskExit) | None => {
break;
}
}
}
to_parent.try_send(HtmlDiscoveredScript(result_vec));
}
// Silly macros to handle constructing DOM nodes. This produces bad code and should be optimized
// via atomization (issue #85).
pub fn build_element_from_tag(tag: DOMString, document: &JS<Document>) -> JS<Element> {
// TODO (Issue #85): use atoms
handle_element!(document, tag, "a", HTMLAnchorElement);
handle_element!(document, tag, "applet", HTMLAppletElement);
handle_element!(document, tag, "area", HTMLAreaElement);
handle_element!(document, tag, "aside", HTMLElement);
handle_element!(document, tag, "audio", HTMLAudioElement);
handle_element!(document, tag, "b", HTMLElement);
handle_element!(document, tag, "base", HTMLBaseElement);
handle_element!(document, tag, "body", HTMLBodyElement);
handle_element!(document, tag, "br", HTMLBRElement);
handle_element!(document, tag, "button", HTMLButtonElement);
handle_element!(document, tag, "canvas", HTMLCanvasElement);
handle_element!(document, tag, "caption", HTMLTableCaptionElement);
handle_element!(document, tag, "col", HTMLTableColElement);
handle_element!(document, tag, "colgroup", HTMLTableColElement);
handle_element!(document, tag, "data", HTMLDataElement);
handle_element!(document, tag, "datalist", HTMLDataListElement);
handle_element!(document, tag, "del", HTMLModElement);
handle_element!(document, tag, "dir", HTMLDirectoryElement);
handle_element!(document, tag, "div", HTMLDivElement);
handle_element!(document, tag, "dl", HTMLDListElement);
handle_element!(document, tag, "em", HTMLElement);
handle_element!(document, tag, "embed", HTMLEmbedElement);
handle_element!(document, tag, "fieldset", HTMLFieldSetElement);
handle_element!(document, tag, "font", HTMLFontElement);
handle_element!(document, tag, "form", HTMLFormElement);
handle_element!(document, tag, "frame", HTMLFrameElement);
handle_element!(document, tag, "frameset", HTMLFrameSetElement);
handle_element!(document, tag, "h1", HTMLHeadingElement, Heading1);
handle_element!(document, tag, "h2", HTMLHeadingElement, Heading2);
handle_element!(document, tag, "h3", HTMLHeadingElement, Heading3);
handle_element!(document, tag, "h4", HTMLHeadingElement, Heading4);
handle_element!(document, tag, "h5", HTMLHeadingElement, Heading5);
handle_element!(document, tag, "h6", HTMLHeadingElement, Heading6);
handle_element!(document, tag, "head", HTMLHeadElement);
handle_element!(document, tag, "hr", HTMLHRElement);
handle_element!(document, tag, "html", HTMLHtmlElement);
handle_element!(document, tag, "i", HTMLElement);
handle_element!(document, tag, "iframe", HTMLIFrameElement);
handle_element!(document, tag, "img", HTMLImageElement);
handle_element!(document, tag, "input", HTMLInputElement);
handle_element!(document, tag, "ins", HTMLModElement);
handle_element!(document, tag, "label", HTMLLabelElement);
handle_element!(document, tag, "legend", HTMLLegendElement);
handle_element!(document, tag, "li", HTMLLIElement);
handle_element!(document, tag, "link", HTMLLinkElement);
handle_element!(document, tag, "main", HTMLMainElement);
handle_element!(document, tag, "map", HTMLMapElement);
handle_element!(document, tag, "meta", HTMLMetaElement);
handle_element!(document, tag, "meter", HTMLMeterElement);
handle_element!(document, tag, "object", HTMLObjectElement);
handle_element!(document, tag, "ol", HTMLOListElement);
handle_element!(document, tag, "optgroup", HTMLOptGroupElement);
handle_element!(document, tag, "option", HTMLOptionElement);
handle_element!(document, tag, "output", HTMLOutputElement);
handle_element!(document, tag, "p", HTMLParagraphElement);
handle_element!(document, tag, "param", HTMLParamElement);
handle_element!(document, tag, "pre", HTMLPreElement);
handle_element!(document, tag, "progress", HTMLProgressElement);
handle_element!(document, tag, "q", HTMLQuoteElement);
handle_element!(document, tag, "script", HTMLScriptElement);
handle_element!(document, tag, "section", HTMLElement);
handle_element!(document, tag, "select", HTMLSelectElement);
handle_element!(document, tag, "small", HTMLElement);
handle_element!(document, tag, "source", HTMLSourceElement);
handle_element!(document, tag, "span", HTMLSpanElement);
handle_element!(document, tag, "strong", HTMLElement);
handle_element!(document, tag, "style", HTMLStyleElement);
handle_element!(document, tag, "table", HTMLTableElement);
handle_element!(document, tag, "tbody", HTMLTableSectionElement);
handle_element!(document, tag, "td", HTMLTableDataCellElement);
handle_element!(document, tag, "template", HTMLTemplateElement);
handle_element!(document, tag, "textarea", HTMLTextAreaElement);
handle_element!(document, tag, "th", HTMLTableHeaderCellElement);
handle_element!(document, tag, "time", HTMLTimeElement);
handle_element!(document, tag, "title", HTMLTitleElement);
handle_element!(document, tag, "tr", HTMLTableRowElement);
handle_element!(document, tag, "track", HTMLTrackElement);
handle_element!(document, tag, "ul", HTMLUListElement);
handle_element!(document, tag, "video", HTMLVideoElement);
return ElementCast::from(&HTMLUnknownElement::new(tag, document));
}
pub fn parse_html(page: &Page,
document: &mut JS<Document>,
url: Url,
resource_task: ResourceTask)
-> HtmlParserResult {
debug!("Hubbub: parsing {:?}", url);
let next_subpage_id: SubpageId = *page.next_subpage_id.deref().borrow();
// Spawn a CSS parser to receive links to CSS style sheets.
let resource_task2 = resource_task.clone();
let (discovery_chan, discovery_port) = channel();
let stylesheet_chan = discovery_chan.clone();
let (css_chan, css_msg_port) = channel();
spawn_named("parse_html:css", proc() {
css_link_listener(stylesheet_chan, css_msg_port, resource_task2.clone());
});
// Spawn a JS parser to receive JavaScript.
let resource_task2 = resource_task.clone();
let js_result_chan = discovery_chan.clone();
let (js_chan, js_msg_port) = channel();
spawn_named("parse_html:js", proc() {
js_script_listener(js_result_chan, js_msg_port, resource_task2.clone());
});
// Wait for the LoadResponse so that the parser knows the final URL.
let (input_chan, input_port) = channel();
resource_task.send(Load(url.clone(), input_chan));
let load_response = input_port.recv();
debug!("Fetched page; metadata is {:?}", load_response.metadata);
let base_url = load_response.metadata.final_url.clone();
let url2 = base_url.clone();
let url3 = url2.clone();
{
// Store the final URL before we start parsing, so that DOM routines
// (e.g. HTMLImageElement::update_image) can resolve relative URLs
// correctly.
*page.mut_url() = Some((url2.clone(), true));
}
let pipeline_id = page.id;
let mut parser = hubbub::Parser("UTF-8", false);
debug!("created parser");
parser.set_document_node(unsafe { document.to_hubbub_node() });
parser.enable_scripting(true);
parser.enable_styling(true);
let (css_chan2, js_chan2) = (css_chan.clone(), js_chan.clone());
let next_subpage_id = RefCell::new(next_subpage_id);
let doc_cell = RefCell::new(document);
let tree_handler = hubbub::TreeHandler {
create_comment: |data: ~str| {
debug!("create comment");
// NOTE: tmp vars are workaround for lifetime issues. Both required.
let tmp_borrow = doc_cell.borrow();
let tmp = &*tmp_borrow;
let comment: JS<Node> = NodeCast::from(&Comment::new(data, *tmp));
unsafe { comment.to_hubbub_node() }
},
create_doctype: |doctype: ~hubbub::Doctype| {
debug!("create doctype");
let ~hubbub::Doctype {name: name,
public_id: public_id,
system_id: system_id,
force_quirks: _ } = doctype;
// NOTE: tmp vars are workaround for lifetime issues. Both required.
let tmp_borrow = doc_cell.borrow();
let tmp = &*tmp_borrow;
let doctype_node = DocumentType::new(name, public_id, system_id, *tmp);
unsafe {
doctype_node.to_hubbub_node()
}
},
create_element: |tag: ~hubbub::Tag| {
debug!("create element");
// NOTE: tmp vars are workaround for lifetime issues. Both required.
let tmp_borrow = doc_cell.borrow();
let tmp = &*tmp_borrow;
let mut element = build_element_from_tag(tag.name.clone(), *tmp);
debug!("-- attach attrs");
for attr in tag.attributes.iter() {
//FIXME: this should have proper error handling or explicitly drop
// exceptions on the ground
assert!(element.set_attr(attr.name.clone(),
attr.value.clone()).is_ok());
}
// Spawn additional parsing, network loads, etc. from tag and attrs
match element.get().node.type_id {
// Handle CSS style sheets from <link> elements
ElementNodeTypeId(HTMLLinkElementTypeId) => {
match (element.get_attribute(Null, "rel"),
element.get_attribute(Null, "href")) {
(Some(ref rel), Some(ref href)) if rel.get()
.value_ref()
.split(HTML_SPACE_CHARACTERS.
as_slice())
.any(|s| {
s.eq_ignore_ascii_case("stylesheet")
}) => {
debug!("found CSS stylesheet: {:s}", href.get().value_ref());
let url = parse_url(href.get().value_ref(), Some(url2.clone()));
css_chan2.send(CSSTaskNewFile(UrlProvenance(url, resource_task.clone())));
}
_ => {}
}
}
ElementNodeTypeId(HTMLIFrameElementTypeId) => {
let iframe_chan = discovery_chan.clone();
let mut iframe_element: JS<HTMLIFrameElement> =
HTMLIFrameElementCast::to(&element).unwrap();
let sandboxed = iframe_element.get().is_sandboxed();
let elem: JS<Element> = ElementCast::from(&iframe_element);
let src_opt = elem.get_attribute(Null, "src").map(|x| x.get().Value());
for src in src_opt.iter() {
let iframe_url = parse_url(*src, Some(url2.clone()));
iframe_element.get_mut().set_frame(iframe_url.clone());
// Subpage Id
let subpage_id = *next_subpage_id.borrow();
let SubpageId(id_num) = subpage_id;
next_subpage_id.set(SubpageId(id_num + 1));
iframe_element.get_mut().size = Some(IFrameSize {
pipeline_id: pipeline_id,
subpage_id: subpage_id,
});
iframe_chan.send(HtmlDiscoveredIFrame((iframe_url,
subpage_id,
sandboxed)));
}
}
_ => {}
}
unsafe { element.to_hubbub_node() }
},
create_text: |data: ~str| {
debug!("create text");
// NOTE: tmp vars are workaround for lifetime issues. Both required.
let tmp_borrow = doc_cell.borrow();
let tmp = &*tmp_borrow;
let text = Text::new(data, *tmp);
unsafe { text.to_hubbub_node() }
},
ref_node: |_| {},
unref_node: |_| {},
append_child: |parent: hubbub::NodeDataPtr, child: hubbub::NodeDataPtr| {
unsafe {
debug!("append child {:x} {:x}", parent, child);
let mut parent: JS<Node> = NodeWrapping::from_hubbub_node(parent);
let mut child: JS<Node> = NodeWrapping::from_hubbub_node(child);
assert!(parent.AppendChild(&mut child).is_ok());
}
child
},
insert_before: |_parent, _child| {
debug!("insert before");
0u
},
remove_child: |_parent, _child| {
debug!("remove child");
0u
},
clone_node: |_node, deep| {
debug!("clone node");
if deep { error!("-- deep clone unimplemented"); }
fail!(~"clone node unimplemented")
},
reparent_children: |_node, _new_parent| {
debug!("reparent children");
0u
},
get_parent: |_node, _element_only| {
debug!("get parent");
0u
},
has_children: |_node| {
debug!("has children");
false
},
form_associate: |_form, _node| {
debug!("form associate");
},
add_attributes: |_node, _attributes| {
debug!("add attributes");
},
set_quirks_mode: |mode| {
debug!("set quirks mode");
// NOTE: tmp vars are workaround for lifetime issues. Both required.
let mut tmp_borrow = doc_cell.borrow_mut();
let tmp = &mut *tmp_borrow;
tmp.get_mut().set_quirks_mode(mode);
},
encoding_change: |encname| {
debug!("encoding change");
// NOTE: tmp vars are workaround for lifetime issues. Both required.
let mut tmp_borrow = doc_cell.borrow_mut();
let tmp = &mut *tmp_borrow;
tmp.get_mut().set_encoding_name(encname);
},
complete_script: |script| {
unsafe {
let script: JS<Element> = NodeWrapping::from_hubbub_node(script);
match script.get_attribute(Null, "src") {
Some(src) => {
debug!("found script: {:s}", src.get().Value());
let new_url = parse_url(src.get().value_ref(), Some(url3.clone()));
js_chan2.send(JSTaskNewFile(new_url));
}
None => {
let mut data = ~[];
let scriptnode: JS<Node> = NodeCast::from(&script);
debug!("iterating over children {:?}", scriptnode.first_child());
for child in scriptnode.children() {
debug!("child = {:?}", child);
let text: JS<Text> = TextCast::to(&child).unwrap();
data.push(text.get().characterdata.data.to_str()); // FIXME: Bad copy.
}
debug!("script data = {:?}", data);
js_chan2.send(JSTaskNewInlineScript(data.concat(), url3.clone()));
}
}
}
debug!("complete script");
},
complete_style: |_| {
// style parsing is handled in element::notify_child_list_changed.
},
};
parser.set_tree_handler(&tree_handler);
debug!("set tree handler");
debug!("loaded page");
loop {
match load_response.progress_port.recv() {
Payload(data) => {
debug!("received data");
parser.parse_chunk(data);
}
Done(Err(..)) => {
fail!("Failed to load page URL {:s}", url.to_str());
}
Done(..) => {
break;
}
}
}
debug!("finished parsing");
css_chan.send(CSSTaskExit);
js_chan.send(JSTaskExit);
HtmlParserResult {
discovery_port: discovery_port,
}
}
auto merge of #2157 : lpy/servo/issue2153, r=Ms2ger
see #2153
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::InheritTypes::{NodeBase, NodeCast, TextCast, ElementCast};
use dom::bindings::codegen::InheritTypes::HTMLIFrameElementCast;
use dom::bindings::js::JS;
use dom::bindings::utils::Reflectable;
use dom::document::Document;
use dom::element::{AttributeHandlers, HTMLLinkElementTypeId, HTMLIFrameElementTypeId};
use dom::htmlelement::HTMLElement;
use dom::htmlheadingelement::{Heading1, Heading2, Heading3, Heading4, Heading5, Heading6};
use dom::htmliframeelement::IFrameSize;
use dom::htmlformelement::HTMLFormElement;
use dom::node::{ElementNodeTypeId, INode, NodeHelpers};
use dom::types::*;
use html::cssparse::{StylesheetProvenance, UrlProvenance, spawn_css_parser};
use script_task::Page;
use hubbub::hubbub;
use servo_msg::constellation_msg::SubpageId;
use servo_net::resource_task::{Load, Payload, Done, ResourceTask, load_whole_resource};
use servo_util::namespace::Null;
use servo_util::str::{DOMString, HTML_SPACE_CHARACTERS};
use servo_util::task::spawn_named;
use servo_util::url::parse_url;
use std::ascii::StrAsciiExt;
use std::cast;
use std::cell::RefCell;
use std::comm::{channel, Sender, Receiver};
use std::str;
use style::Stylesheet;
use url::Url;
macro_rules! handle_element(
($document: expr,
$localName: expr,
$string: expr,
$ctor: ident
$(, $arg:expr )*) => (
if $string == $localName {
return ElementCast::from(&$ctor::new($localName, $document $(, $arg)*));
}
)
)
pub struct JSFile {
data: ~str,
url: Url
}
pub type JSResult = ~[JSFile];
enum CSSMessage {
CSSTaskNewFile(StylesheetProvenance),
CSSTaskExit
}
enum JSMessage {
JSTaskNewFile(Url),
JSTaskNewInlineScript(~str, Url),
JSTaskExit
}
/// Messages generated by the HTML parser upon discovery of additional resources
pub enum HtmlDiscoveryMessage {
HtmlDiscoveredStyle(Stylesheet),
HtmlDiscoveredIFrame((Url, SubpageId, bool)),
HtmlDiscoveredScript(JSResult)
}
pub struct HtmlParserResult {
discovery_port: Receiver<HtmlDiscoveryMessage>,
}
trait NodeWrapping {
unsafe fn to_hubbub_node(&self) -> hubbub::NodeDataPtr;
unsafe fn from_hubbub_node(n: hubbub::NodeDataPtr) -> Self;
}
impl<T: NodeBase+Reflectable> NodeWrapping for JS<T> {
unsafe fn to_hubbub_node(&self) -> hubbub::NodeDataPtr {
cast::transmute(self.get())
}
unsafe fn from_hubbub_node(n: hubbub::NodeDataPtr) -> JS<T> {
JS::from_raw(cast::transmute(n))
}
}
/**
Runs a task that coordinates parsing links to css stylesheets.
This function should be spawned in a separate task and spins waiting
for the html builder to find links to css stylesheets and sends off
tasks to parse each link. When the html process finishes, it notifies
the listener, who then collects the css rules from each task it
spawned, collates them, and sends them to the given result channel.
# Arguments
* `to_parent` - A channel on which to send back the full set of rules.
* `from_parent` - A port on which to receive new links.
*/
fn css_link_listener(to_parent: Sender<HtmlDiscoveryMessage>,
from_parent: Receiver<CSSMessage>) {
let mut result_vec = ~[];
loop {
match from_parent.recv_opt() {
Some(CSSTaskNewFile(provenance)) => {
result_vec.push(spawn_css_parser(provenance));
}
Some(CSSTaskExit) | None => {
break;
}
}
}
// Send the sheets back in order
// FIXME: Shouldn't wait until after we've recieved CSSTaskExit to start sending these
for port in result_vec.iter() {
to_parent.try_send(HtmlDiscoveredStyle(port.recv()));
}
}
fn js_script_listener(to_parent: Sender<HtmlDiscoveryMessage>,
from_parent: Receiver<JSMessage>,
resource_task: ResourceTask) {
let mut result_vec = ~[];
loop {
match from_parent.recv_opt() {
Some(JSTaskNewFile(url)) => {
match load_whole_resource(&resource_task, url.clone()) {
Err(_) => {
error!("error loading script {:s}", url.to_str());
}
Ok((metadata, bytes)) => {
result_vec.push(JSFile {
data: str::from_utf8(bytes).unwrap().to_owned(),
url: metadata.final_url,
});
}
}
}
Some(JSTaskNewInlineScript(data, url)) => {
result_vec.push(JSFile { data: data, url: url });
}
Some(JSTaskExit) | None => {
break;
}
}
}
to_parent.try_send(HtmlDiscoveredScript(result_vec));
}
// Silly macros to handle constructing DOM nodes. This produces bad code and should be optimized
// via atomization (issue #85).
pub fn build_element_from_tag(tag: DOMString, document: &JS<Document>) -> JS<Element> {
// TODO (Issue #85): use atoms
handle_element!(document, tag, "a", HTMLAnchorElement);
handle_element!(document, tag, "applet", HTMLAppletElement);
handle_element!(document, tag, "area", HTMLAreaElement);
handle_element!(document, tag, "aside", HTMLElement);
handle_element!(document, tag, "audio", HTMLAudioElement);
handle_element!(document, tag, "b", HTMLElement);
handle_element!(document, tag, "base", HTMLBaseElement);
handle_element!(document, tag, "body", HTMLBodyElement);
handle_element!(document, tag, "br", HTMLBRElement);
handle_element!(document, tag, "button", HTMLButtonElement);
handle_element!(document, tag, "canvas", HTMLCanvasElement);
handle_element!(document, tag, "caption", HTMLTableCaptionElement);
handle_element!(document, tag, "col", HTMLTableColElement);
handle_element!(document, tag, "colgroup", HTMLTableColElement);
handle_element!(document, tag, "data", HTMLDataElement);
handle_element!(document, tag, "datalist", HTMLDataListElement);
handle_element!(document, tag, "del", HTMLModElement);
handle_element!(document, tag, "dir", HTMLDirectoryElement);
handle_element!(document, tag, "div", HTMLDivElement);
handle_element!(document, tag, "dl", HTMLDListElement);
handle_element!(document, tag, "em", HTMLElement);
handle_element!(document, tag, "embed", HTMLEmbedElement);
handle_element!(document, tag, "fieldset", HTMLFieldSetElement);
handle_element!(document, tag, "font", HTMLFontElement);
handle_element!(document, tag, "form", HTMLFormElement);
handle_element!(document, tag, "frame", HTMLFrameElement);
handle_element!(document, tag, "frameset", HTMLFrameSetElement);
handle_element!(document, tag, "h1", HTMLHeadingElement, Heading1);
handle_element!(document, tag, "h2", HTMLHeadingElement, Heading2);
handle_element!(document, tag, "h3", HTMLHeadingElement, Heading3);
handle_element!(document, tag, "h4", HTMLHeadingElement, Heading4);
handle_element!(document, tag, "h5", HTMLHeadingElement, Heading5);
handle_element!(document, tag, "h6", HTMLHeadingElement, Heading6);
handle_element!(document, tag, "head", HTMLHeadElement);
handle_element!(document, tag, "hr", HTMLHRElement);
handle_element!(document, tag, "html", HTMLHtmlElement);
handle_element!(document, tag, "i", HTMLElement);
handle_element!(document, tag, "iframe", HTMLIFrameElement);
handle_element!(document, tag, "img", HTMLImageElement);
handle_element!(document, tag, "input", HTMLInputElement);
handle_element!(document, tag, "ins", HTMLModElement);
handle_element!(document, tag, "label", HTMLLabelElement);
handle_element!(document, tag, "legend", HTMLLegendElement);
handle_element!(document, tag, "li", HTMLLIElement);
handle_element!(document, tag, "link", HTMLLinkElement);
handle_element!(document, tag, "main", HTMLMainElement);
handle_element!(document, tag, "map", HTMLMapElement);
handle_element!(document, tag, "meta", HTMLMetaElement);
handle_element!(document, tag, "meter", HTMLMeterElement);
handle_element!(document, tag, "object", HTMLObjectElement);
handle_element!(document, tag, "ol", HTMLOListElement);
handle_element!(document, tag, "optgroup", HTMLOptGroupElement);
handle_element!(document, tag, "option", HTMLOptionElement);
handle_element!(document, tag, "output", HTMLOutputElement);
handle_element!(document, tag, "p", HTMLParagraphElement);
handle_element!(document, tag, "param", HTMLParamElement);
handle_element!(document, tag, "pre", HTMLPreElement);
handle_element!(document, tag, "progress", HTMLProgressElement);
handle_element!(document, tag, "q", HTMLQuoteElement);
handle_element!(document, tag, "script", HTMLScriptElement);
handle_element!(document, tag, "section", HTMLElement);
handle_element!(document, tag, "select", HTMLSelectElement);
handle_element!(document, tag, "small", HTMLElement);
handle_element!(document, tag, "source", HTMLSourceElement);
handle_element!(document, tag, "span", HTMLSpanElement);
handle_element!(document, tag, "strong", HTMLElement);
handle_element!(document, tag, "style", HTMLStyleElement);
handle_element!(document, tag, "table", HTMLTableElement);
handle_element!(document, tag, "tbody", HTMLTableSectionElement);
handle_element!(document, tag, "td", HTMLTableDataCellElement);
handle_element!(document, tag, "template", HTMLTemplateElement);
handle_element!(document, tag, "textarea", HTMLTextAreaElement);
handle_element!(document, tag, "th", HTMLTableHeaderCellElement);
handle_element!(document, tag, "time", HTMLTimeElement);
handle_element!(document, tag, "title", HTMLTitleElement);
handle_element!(document, tag, "tr", HTMLTableRowElement);
handle_element!(document, tag, "track", HTMLTrackElement);
handle_element!(document, tag, "ul", HTMLUListElement);
handle_element!(document, tag, "video", HTMLVideoElement);
return ElementCast::from(&HTMLUnknownElement::new(tag, document));
}
pub fn parse_html(page: &Page,
document: &mut JS<Document>,
url: Url,
resource_task: ResourceTask)
-> HtmlParserResult {
debug!("Hubbub: parsing {:?}", url);
let next_subpage_id: SubpageId = *page.next_subpage_id.deref().borrow();
// Spawn a CSS parser to receive links to CSS style sheets.
let (discovery_chan, discovery_port) = channel();
let stylesheet_chan = discovery_chan.clone();
let (css_chan, css_msg_port) = channel();
spawn_named("parse_html:css", proc() {
css_link_listener(stylesheet_chan, css_msg_port);
});
// Spawn a JS parser to receive JavaScript.
let resource_task2 = resource_task.clone();
let js_result_chan = discovery_chan.clone();
let (js_chan, js_msg_port) = channel();
spawn_named("parse_html:js", proc() {
js_script_listener(js_result_chan, js_msg_port, resource_task2.clone());
});
// Wait for the LoadResponse so that the parser knows the final URL.
let (input_chan, input_port) = channel();
resource_task.send(Load(url.clone(), input_chan));
let load_response = input_port.recv();
debug!("Fetched page; metadata is {:?}", load_response.metadata);
let base_url = load_response.metadata.final_url.clone();
let url2 = base_url.clone();
let url3 = url2.clone();
{
// Store the final URL before we start parsing, so that DOM routines
// (e.g. HTMLImageElement::update_image) can resolve relative URLs
// correctly.
*page.mut_url() = Some((url2.clone(), true));
}
let pipeline_id = page.id;
let mut parser = hubbub::Parser("UTF-8", false);
debug!("created parser");
parser.set_document_node(unsafe { document.to_hubbub_node() });
parser.enable_scripting(true);
parser.enable_styling(true);
let (css_chan2, js_chan2) = (css_chan.clone(), js_chan.clone());
let next_subpage_id = RefCell::new(next_subpage_id);
let doc_cell = RefCell::new(document);
let tree_handler = hubbub::TreeHandler {
create_comment: |data: ~str| {
debug!("create comment");
// NOTE: tmp vars are workaround for lifetime issues. Both required.
let tmp_borrow = doc_cell.borrow();
let tmp = &*tmp_borrow;
let comment: JS<Node> = NodeCast::from(&Comment::new(data, *tmp));
unsafe { comment.to_hubbub_node() }
},
create_doctype: |doctype: ~hubbub::Doctype| {
debug!("create doctype");
let ~hubbub::Doctype {name: name,
public_id: public_id,
system_id: system_id,
force_quirks: _ } = doctype;
// NOTE: tmp vars are workaround for lifetime issues. Both required.
let tmp_borrow = doc_cell.borrow();
let tmp = &*tmp_borrow;
let doctype_node = DocumentType::new(name, public_id, system_id, *tmp);
unsafe {
doctype_node.to_hubbub_node()
}
},
create_element: |tag: ~hubbub::Tag| {
debug!("create element");
// NOTE: tmp vars are workaround for lifetime issues. Both required.
let tmp_borrow = doc_cell.borrow();
let tmp = &*tmp_borrow;
let mut element = build_element_from_tag(tag.name.clone(), *tmp);
debug!("-- attach attrs");
for attr in tag.attributes.iter() {
//FIXME: this should have proper error handling or explicitly drop
// exceptions on the ground
assert!(element.set_attr(attr.name.clone(),
attr.value.clone()).is_ok());
}
// Spawn additional parsing, network loads, etc. from tag and attrs
match element.get().node.type_id {
// Handle CSS style sheets from <link> elements
ElementNodeTypeId(HTMLLinkElementTypeId) => {
match (element.get_attribute(Null, "rel"),
element.get_attribute(Null, "href")) {
(Some(ref rel), Some(ref href)) if rel.get()
.value_ref()
.split(HTML_SPACE_CHARACTERS.
as_slice())
.any(|s| {
s.eq_ignore_ascii_case("stylesheet")
}) => {
debug!("found CSS stylesheet: {:s}", href.get().value_ref());
let url = parse_url(href.get().value_ref(), Some(url2.clone()));
css_chan2.send(CSSTaskNewFile(UrlProvenance(url, resource_task.clone())));
}
_ => {}
}
}
ElementNodeTypeId(HTMLIFrameElementTypeId) => {
let iframe_chan = discovery_chan.clone();
let mut iframe_element: JS<HTMLIFrameElement> =
HTMLIFrameElementCast::to(&element).unwrap();
let sandboxed = iframe_element.get().is_sandboxed();
let elem: JS<Element> = ElementCast::from(&iframe_element);
let src_opt = elem.get_attribute(Null, "src").map(|x| x.get().Value());
for src in src_opt.iter() {
let iframe_url = parse_url(*src, Some(url2.clone()));
iframe_element.get_mut().set_frame(iframe_url.clone());
// Subpage Id
let subpage_id = *next_subpage_id.borrow();
let SubpageId(id_num) = subpage_id;
next_subpage_id.set(SubpageId(id_num + 1));
iframe_element.get_mut().size = Some(IFrameSize {
pipeline_id: pipeline_id,
subpage_id: subpage_id,
});
iframe_chan.send(HtmlDiscoveredIFrame((iframe_url,
subpage_id,
sandboxed)));
}
}
_ => {}
}
unsafe { element.to_hubbub_node() }
},
create_text: |data: ~str| {
debug!("create text");
// NOTE: tmp vars are workaround for lifetime issues. Both required.
let tmp_borrow = doc_cell.borrow();
let tmp = &*tmp_borrow;
let text = Text::new(data, *tmp);
unsafe { text.to_hubbub_node() }
},
ref_node: |_| {},
unref_node: |_| {},
append_child: |parent: hubbub::NodeDataPtr, child: hubbub::NodeDataPtr| {
unsafe {
debug!("append child {:x} {:x}", parent, child);
let mut parent: JS<Node> = NodeWrapping::from_hubbub_node(parent);
let mut child: JS<Node> = NodeWrapping::from_hubbub_node(child);
assert!(parent.AppendChild(&mut child).is_ok());
}
child
},
insert_before: |_parent, _child| {
debug!("insert before");
0u
},
remove_child: |_parent, _child| {
debug!("remove child");
0u
},
clone_node: |_node, deep| {
debug!("clone node");
if deep { error!("-- deep clone unimplemented"); }
fail!(~"clone node unimplemented")
},
reparent_children: |_node, _new_parent| {
debug!("reparent children");
0u
},
get_parent: |_node, _element_only| {
debug!("get parent");
0u
},
has_children: |_node| {
debug!("has children");
false
},
form_associate: |_form, _node| {
debug!("form associate");
},
add_attributes: |_node, _attributes| {
debug!("add attributes");
},
set_quirks_mode: |mode| {
debug!("set quirks mode");
// NOTE: tmp vars are workaround for lifetime issues. Both required.
let mut tmp_borrow = doc_cell.borrow_mut();
let tmp = &mut *tmp_borrow;
tmp.get_mut().set_quirks_mode(mode);
},
encoding_change: |encname| {
debug!("encoding change");
// NOTE: tmp vars are workaround for lifetime issues. Both required.
let mut tmp_borrow = doc_cell.borrow_mut();
let tmp = &mut *tmp_borrow;
tmp.get_mut().set_encoding_name(encname);
},
complete_script: |script| {
unsafe {
let script: JS<Element> = NodeWrapping::from_hubbub_node(script);
match script.get_attribute(Null, "src") {
Some(src) => {
debug!("found script: {:s}", src.get().Value());
let new_url = parse_url(src.get().value_ref(), Some(url3.clone()));
js_chan2.send(JSTaskNewFile(new_url));
}
None => {
let mut data = ~[];
let scriptnode: JS<Node> = NodeCast::from(&script);
debug!("iterating over children {:?}", scriptnode.first_child());
for child in scriptnode.children() {
debug!("child = {:?}", child);
let text: JS<Text> = TextCast::to(&child).unwrap();
data.push(text.get().characterdata.data.to_str()); // FIXME: Bad copy.
}
debug!("script data = {:?}", data);
js_chan2.send(JSTaskNewInlineScript(data.concat(), url3.clone()));
}
}
}
debug!("complete script");
},
complete_style: |_| {
// style parsing is handled in element::notify_child_list_changed.
},
};
parser.set_tree_handler(&tree_handler);
debug!("set tree handler");
debug!("loaded page");
loop {
match load_response.progress_port.recv() {
Payload(data) => {
debug!("received data");
parser.parse_chunk(data);
}
Done(Err(..)) => {
fail!("Failed to load page URL {:s}", url.to_str());
}
Done(..) => {
break;
}
}
}
debug!("finished parsing");
css_chan.send(CSSTaskExit);
js_chan.send(JSTaskExit);
HtmlParserResult {
discovery_port: discovery_port,
}
}
|
use cursive::traits::*;
use cursive::views;
use std::io::Read as _;
use std::io::Write as _;
use std::sync::{Arc, Mutex};
// This example builds a simple TCP server with some parameters and some output.
// It then builds a TUI to control the parameters and display the output.
fn main() {
let mut siv = cursive::Cursive::default();
// Build a shared model
let model = Arc::new(Mutex::new(Model {
offset: 0,
logs: Vec::new(),
cb_sink: siv.cb_sink().clone(),
}));
// Start the TCP server in a thread
start_server(Arc::clone(&model));
// Build the UI from the model
siv.add_layer(
views::Dialog::around(build_ui(Arc::clone(&model)))
.button("Quit", |s| s.quit()),
);
siv.run();
}
struct Model {
offset: u8,
logs: Vec<(u8, u8)>,
cb_sink: cursive::CbSink,
}
fn start_server(model: Arc<Mutex<Model>>) {
std::thread::spawn(move || {
if let Err(err) = serve(Arc::clone(&model)) {
model
.lock()
.unwrap()
.cb_sink
.send(Box::new(move |s: &mut cursive::Cursive| {
s.add_layer(
views::Dialog::text(format!("{:?}", err))
.title("Error in TCP server")
.button("Quit", |s| s.quit()),
);
}))
.unwrap();
}
});
}
fn serve(model: Arc<Mutex<Model>>) -> std::io::Result<()> {
let listener = std::net::TcpListener::bind("localhost:1234")?;
for stream in listener.incoming() {
let stream = stream?;
for byte in (&stream).bytes() {
let byte = byte?;
let mut model = model.lock().unwrap();
let response = byte.wrapping_add(model.offset);
model.logs.push((byte, response));
(&stream).write_all(&[response])?;
model
.cb_sink
.send(Box::new(cursive::Cursive::noop))
.unwrap();
}
}
Ok(())
}
fn readable_char(byte: u8) -> char {
if byte.is_ascii_control() {
'�'
} else {
byte as char
}
}
fn build_log_viewer(model: Arc<Mutex<Model>>) -> impl cursive::view::View {
views::Canvas::new(model)
.with_draw(|model, printer| {
let model = model.lock().unwrap();
for (i, &(byte, answer)) in model.logs.iter().enumerate() {
printer.print(
(0, i),
&format!(
"{:3} '{}' -> {:3} '{}'",
byte,
readable_char(byte),
answer,
readable_char(answer),
),
);
}
})
.with_required_size(|model, _req| {
let model = model.lock().unwrap();
cursive::Vec2::new(10, model.logs.len())
})
}
fn build_selector(model: Arc<Mutex<Model>>) -> impl cursive::view::View {
views::LinearLayout::horizontal()
.child(
views::EditView::new()
.content("0")
.with_id("edit")
.min_width(5),
)
.child(views::DummyView.fixed_width(1))
.child(views::Button::new("Update", move |s| {
if let Some(n) = s
.call_on_id("edit", |edit: &mut views::EditView| {
edit.get_content()
})
.and_then(|content| content.parse().ok())
{
model.lock().unwrap().offset = n;
} else {
s.add_layer(views::Dialog::info(
"Could not parse offset as u8",
));
}
}))
.child(views::DummyView.fixed_width(1))
.child(views::Button::new("Test", |s| {
if let Err(err) = test_server() {
s.add_layer(
views::Dialog::info(format!("{:?}", err))
.title("Error running test."),
);
}
}))
}
fn test_server() -> std::io::Result<()> {
let mut stream = std::net::TcpStream::connect("localhost:1234")?;
for &byte in &[1, 2, 3, b'a', b'c', b'd'] {
let mut buf = [0];
stream.write_all(&[byte])?;
stream.read_exact(&mut buf)?;
}
Ok(())
}
fn build_ui(model: Arc<Mutex<Model>>) -> impl cursive::view::View {
views::LinearLayout::vertical()
.child(build_selector(Arc::clone(&model)))
.child(build_log_viewer(Arc::clone(&model)))
}
Improve tcp_server example a bit
use cursive::traits::*;
use cursive::views;
use std::io::{Read as _, Write as _};
use std::sync::{Arc, Mutex};
// This example builds a simple TCP server with some parameters and some output.
// It then builds a TUI to control the parameters and display the output.
fn main() {
let mut siv = cursive::Cursive::default();
// Build a shared model
let model = Arc::new(Mutex::new(ModelData {
offset: 10,
logs: Vec::new(),
cb_sink: siv.cb_sink().clone(),
}));
// Start the TCP server in a thread
start_server(Arc::clone(&model));
// Build the UI from the model
siv.add_layer(
views::Dialog::around(build_ui(Arc::clone(&model)))
.button("Quit", |s| s.quit()),
);
siv.run();
}
struct ModelData {
/// The offset will be controlled by the UI and used in the server
offset: u8,
/// Logs will be filled by the server and displayed on the UI
logs: Vec<LogEntry>,
/// A callback sink is used to control the UI from the server
/// (eg. force refresh, error popups)
cb_sink: cursive::CbSink,
}
// Here we use a single mutex, but bigger models might
// prefer individual mutexes for different variables.
type Model = Arc<Mutex<ModelData>>;
#[derive(Clone, Copy)]
struct LogEntry {
input: u8,
output: u8,
}
/// Starts serving on a separate thread, and show a popup on error.
fn start_server(model: Model) {
std::thread::spawn(move || {
if let Err(err) = serve(Arc::clone(&model)) {
let model = model.lock().unwrap();
model
.cb_sink
.send(Box::new(move |s: &mut cursive::Cursive| {
s.add_layer(
views::Dialog::text(format!("{:?}", err))
.title("Error in TCP server")
.button("Quit", |s| s.quit()),
);
}))
.unwrap();
}
});
}
/// Starts a simple, single-threaded TCP server.
/// Adds a configurable offset to each byte received and sent it back.
fn serve(model: Model) -> std::io::Result<()> {
// Bind on some local address
let listener = std::net::TcpListener::bind("localhost:1234")?;
// Handle each connection sequentially
for stream in listener.incoming() {
let stream = stream?;
// Process each byte according to the current model.
for byte in (&stream).bytes() {
let byte = byte?;
let mut model = model.lock().unwrap();
let response = byte.wrapping_add(model.offset);
(&stream).write_all(&[response])?;
// Save processed jobs
model.logs.push(LogEntry {
input: byte,
output: response,
});
// Send a noop to refresh the display
model
.cb_sink
.send(Box::new(cursive::Cursive::noop))
.unwrap();
}
}
Ok(())
}
/// Build the UI for the given model.
fn build_ui(model: Model) -> impl cursive::view::View {
// Build the UI in 3 parts, stacked together in a LinearLayout.
views::LinearLayout::vertical()
.child(build_selector(Arc::clone(&model)))
.child(build_tester(Arc::clone(&model)))
.child(views::DummyView.fixed_height(1))
.child(build_log_viewer(Arc::clone(&model)))
}
/// Build a view that shows processed jobs from the model.
fn build_log_viewer(model: Model) -> impl cursive::view::View {
views::Canvas::new(model)
.with_draw(|model, printer| {
let model = model.lock().unwrap();
for (i, &log) in model.logs.iter().enumerate() {
printer.print(
(0, i),
&format!(
"{:3} '{}' -> {:3} '{}'",
log.input,
readable_char(log.input),
log.output,
readable_char(log.output),
),
);
}
})
.with_required_size(|model, _req| {
let model = model.lock().unwrap();
cursive::Vec2::new(20, model.logs.len())
})
.scrollable()
}
/// Pretty print an ascii u8 if possible.
fn readable_char(byte: u8) -> char {
if byte.is_ascii_control() {
'�'
} else {
byte as char
}
}
/// Build a view that can update the model.
fn build_selector(model: Model) -> impl cursive::view::View {
let offset = model.lock().unwrap().offset;
views::LinearLayout::horizontal()
.child(
views::EditView::new()
.content(format!("{}", offset))
.with_id("edit")
.min_width(5),
)
.child(views::DummyView.fixed_width(1))
.child(views::Button::new("Update", move |s| {
if let Some(n) = s
.call_on_id("edit", |edit: &mut views::EditView| {
edit.get_content()
})
.and_then(|content| content.parse().ok())
{
model.lock().unwrap().offset = n;
} else {
s.add_layer(views::Dialog::info(
"Could not parse offset as u8",
));
}
}))
}
/// Build a view that can run test connections.
fn build_tester(model: Model) -> impl cursive::view::View {
views::LinearLayout::horizontal()
.child(views::TextView::new("Current value:"))
.child(views::DummyView.fixed_width(1))
.child(
views::Canvas::new(model)
.with_draw(|model, printer| {
printer.print(
(0, 0),
&format!("{}", model.lock().unwrap().offset),
)
})
.with_required_size(|_, _| cursive::Vec2::new(3, 1)),
)
.child(views::DummyView.fixed_width(1))
.child(views::Button::new("Test", |s| {
if let Err(err) = test_server() {
s.add_layer(
views::Dialog::info(format!("{:?}", err))
.title("Error running test."),
);
}
}))
}
/// Run a test connection.
fn test_server() -> std::io::Result<()> {
let mut stream = std::net::TcpStream::connect("localhost:1234")?;
for &byte in b"cursive123" {
let mut buf = [0];
stream.write_all(&[byte])?;
stream.read_exact(&mut buf)?;
}
Ok(())
}
|
//! # OpenTelemetry OTLP Exporter
//!
//! The OpenTelemetry OTLP Exporter supports exporting of trace and metric data in the OTLP format.
mod proto;
mod span;
mod transform;
pub use crate::span::Exporter;
Export OTLP types (#175)
Export OTLP configuration types.
//! # OpenTelemetry OTLP Exporter
//!
//! The OpenTelemetry OTLP Exporter supports exporting of trace and metric data in the OTLP format.
mod proto;
mod span;
mod transform;
pub use crate::span::{Compression, Credentials, Exporter, ExporterConfig, Protocol};
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Conversions of Rust values to and from `JSVal`.
//!
//! | IDL type | Type |
//! |-------------------------|----------------------------------|
//! | any | `JSVal` |
//! | boolean | `bool` |
//! | byte | `i8` |
//! | octet | `u8` |
//! | short | `i16` |
//! | unsigned short | `u16` |
//! | long | `i32` |
//! | unsigned long | `u32` |
//! | long long | `i64` |
//! | unsigned long long | `u64` |
//! | unrestricted float | `f32` |
//! | float | `Finite<f32>` |
//! | unrestricted double | `f64` |
//! | double | `Finite<f64>` |
//! | USVString | `String` |
//! | object | `*mut JSObject` |
//! | nullable types | `Option<T>` |
//! | sequences | `Vec<T>` |
#![deny(missing_docs)]
use core::nonzero::NonZero;
use error::throw_type_error;
use glue::RUST_JS_NumberValue;
use jsapi::{ForOfIterator, ForOfIterator_NonIterableBehavior, HandleValue};
use jsapi::{Heap, JS_DefineElement, JS_GetLatin1StringCharsAndLength};
use jsapi::{JS_GetTwoByteStringCharsAndLength, JS_NewArrayObject1};
use jsapi::{JS_NewUCStringCopyN, JSPROP_ENUMERATE, JS_StringHasLatin1Chars};
use jsapi::{JSContext, JSObject, JSString, MutableHandleValue, RootedObject};
use jsval::{BooleanValue, Int32Value, NullValue, UInt32Value, UndefinedValue};
use jsval::{JSVal, ObjectValue, ObjectOrNullValue, StringValue};
use rust::{ToBoolean, ToInt32, ToInt64, ToNumber, ToUint16, ToUint32, ToUint64};
use rust::{ToString, maybe_wrap_object_or_null_value};
use rust::{maybe_wrap_object_value, maybe_wrap_value};
use libc;
use num_traits::{Bounded, Zero};
use std::borrow::Cow;
use std::rc::Rc;
use std::{ptr, slice};
trait As<O>: Copy {
fn cast(self) -> O;
}
macro_rules! impl_as {
($I:ty, $O:ty) => (
impl As<$O> for $I {
fn cast(self) -> $O {
self as $O
}
}
)
}
impl_as!(f64, u8);
impl_as!(f64, u16);
impl_as!(f64, u32);
impl_as!(f64, u64);
impl_as!(f64, i8);
impl_as!(f64, i16);
impl_as!(f64, i32);
impl_as!(f64, i64);
impl_as!(u8, f64);
impl_as!(u16, f64);
impl_as!(u32, f64);
impl_as!(u64, f64);
impl_as!(i8, f64);
impl_as!(i16, f64);
impl_as!(i32, f64);
impl_as!(i64, f64);
impl_as!(i32, i8);
impl_as!(i32, u8);
impl_as!(i32, i16);
impl_as!(u16, u16);
impl_as!(i32, i32);
impl_as!(u32, u32);
impl_as!(i64, i64);
impl_as!(u64, u64);
/// A trait to convert Rust types to `JSVal`s.
pub trait ToJSValConvertible {
/// Convert `self` to a `JSVal`. JSAPI failure causes a panic.
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue);
}
/// An enum to better support enums through FromJSValConvertible::from_jsval.
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum ConversionResult<T> {
/// Everything went fine.
Success(T),
/// Pending exception.
Failure(Cow<'static, str>),
}
impl<T> ConversionResult<T> {
/// Returns Some(value) if it is `ConversionResult::Success`.
pub fn get_success_value(&self) -> Option<&T> {
match *self {
ConversionResult::Success(ref v) => Some(v),
_ => None,
}
}
}
/// A trait to convert `JSVal`s to Rust types.
pub trait FromJSValConvertible: Sized {
/// Optional configurable behaviour switch; use () for no configuration.
type Config;
/// Convert `val` to type `Self`.
/// Optional configuration of type `T` can be passed as the `option`
/// argument.
/// If it returns `Err(())`, a JSAPI exception is pending.
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: Self::Config)
-> Result<ConversionResult<Self>, ()>;
}
/// Behavior for converting out-of-range integers.
#[derive(PartialEq, Eq, Clone)]
pub enum ConversionBehavior {
/// Wrap into the integer's range.
Default,
/// Throw an exception.
EnforceRange,
/// Clamp into the integer's range.
Clamp,
}
/// Try to cast the number to a smaller type, but
/// if it doesn't fit, it will return an error.
unsafe fn enforce_range<D>(cx: *mut JSContext, d: f64) -> Result<ConversionResult<D>, ()>
where D: Bounded + As<f64>,
f64: As<D>
{
if d.is_infinite() {
throw_type_error(cx, "value out of range in an EnforceRange argument");
return Err(());
}
let rounded = d.round();
if D::min_value().cast() <= rounded && rounded <= D::max_value().cast() {
Ok(ConversionResult::Success(rounded.cast()))
} else {
throw_type_error(cx, "value out of range in an EnforceRange argument");
Err(())
}
}
/// Try to cast the number to a smaller type, but if it doesn't fit,
/// round it to the MAX or MIN of the source type before casting it to
/// the destination type.
fn clamp_to<D>(d: f64) -> D
where D: Bounded + As<f64> + Zero,
f64: As<D>
{
if d.is_nan() {
D::zero()
} else if d > D::max_value().cast() {
D::max_value()
} else if d < D::min_value().cast() {
D::min_value()
} else {
d.cast()
}
}
// https://heycam.github.io/webidl/#es-void
impl ToJSValConvertible for () {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(UndefinedValue());
}
}
impl FromJSValConvertible for JSVal {
type Config = ();
unsafe fn from_jsval(_cx: *mut JSContext,
value: HandleValue,
_option: ())
-> Result<ConversionResult<JSVal>, ()> {
Ok(ConversionResult::Success(value.get()))
}
}
impl FromJSValConvertible for Heap<JSVal> {
type Config = ();
unsafe fn from_jsval(_cx: *mut JSContext,
value: HandleValue,
_option: ())
-> Result<ConversionResult<Self>, ()> {
Ok(ConversionResult::Success(Heap::new(value.get())))
}
}
impl ToJSValConvertible for JSVal {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(*self);
maybe_wrap_value(cx, rval);
}
}
impl ToJSValConvertible for HandleValue {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(self.get());
maybe_wrap_value(cx, rval);
}
}
impl ToJSValConvertible for Heap<JSVal> {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(self.get());
maybe_wrap_value(cx, rval);
}
}
#[inline]
unsafe fn convert_int_from_jsval<T, M>(cx: *mut JSContext, value: HandleValue,
option: ConversionBehavior,
convert_fn: unsafe fn(*mut JSContext, HandleValue) -> Result<M, ()>)
-> Result<ConversionResult<T>, ()>
where T: Bounded + Zero + As<f64>,
M: Zero + As<T>,
f64: As<T>
{
match option {
ConversionBehavior::Default => Ok(ConversionResult::Success(try!(convert_fn(cx, value)).cast())),
ConversionBehavior::EnforceRange => enforce_range(cx, try!(ToNumber(cx, value))),
ConversionBehavior::Clamp => Ok(ConversionResult::Success(clamp_to(try!(ToNumber(cx, value))))),
}
}
// https://heycam.github.io/webidl/#es-boolean
impl ToJSValConvertible for bool {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(BooleanValue(*self));
}
}
// https://heycam.github.io/webidl/#es-boolean
impl FromJSValConvertible for bool {
type Config = ();
unsafe fn from_jsval(_cx: *mut JSContext, val: HandleValue, _option: ()) -> Result<ConversionResult<bool>, ()> {
Ok(ToBoolean(val)).map(ConversionResult::Success)
}
}
// https://heycam.github.io/webidl/#es-byte
impl ToJSValConvertible for i8 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(Int32Value(*self as i32));
}
}
// https://heycam.github.io/webidl/#es-byte
impl FromJSValConvertible for i8 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<i8>, ()> {
convert_int_from_jsval(cx, val, option, ToInt32)
}
}
// https://heycam.github.io/webidl/#es-octet
impl ToJSValConvertible for u8 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(Int32Value(*self as i32));
}
}
// https://heycam.github.io/webidl/#es-octet
impl FromJSValConvertible for u8 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<u8>, ()> {
convert_int_from_jsval(cx, val, option, ToInt32)
}
}
// https://heycam.github.io/webidl/#es-short
impl ToJSValConvertible for i16 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(Int32Value(*self as i32));
}
}
// https://heycam.github.io/webidl/#es-short
impl FromJSValConvertible for i16 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<i16>, ()> {
convert_int_from_jsval(cx, val, option, ToInt32)
}
}
// https://heycam.github.io/webidl/#es-unsigned-short
impl ToJSValConvertible for u16 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(Int32Value(*self as i32));
}
}
// https://heycam.github.io/webidl/#es-unsigned-short
impl FromJSValConvertible for u16 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<u16>, ()> {
convert_int_from_jsval(cx, val, option, ToUint16)
}
}
// https://heycam.github.io/webidl/#es-long
impl ToJSValConvertible for i32 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(Int32Value(*self));
}
}
// https://heycam.github.io/webidl/#es-long
impl FromJSValConvertible for i32 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<i32>, ()> {
convert_int_from_jsval(cx, val, option, ToInt32)
}
}
// https://heycam.github.io/webidl/#es-unsigned-long
impl ToJSValConvertible for u32 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(UInt32Value(*self));
}
}
// https://heycam.github.io/webidl/#es-unsigned-long
impl FromJSValConvertible for u32 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<u32>, ()> {
convert_int_from_jsval(cx, val, option, ToUint32)
}
}
// https://heycam.github.io/webidl/#es-long-long
impl ToJSValConvertible for i64 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(RUST_JS_NumberValue(*self as f64));
}
}
// https://heycam.github.io/webidl/#es-long-long
impl FromJSValConvertible for i64 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<i64>, ()> {
convert_int_from_jsval(cx, val, option, ToInt64)
}
}
// https://heycam.github.io/webidl/#es-unsigned-long-long
impl ToJSValConvertible for u64 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(RUST_JS_NumberValue(*self as f64));
}
}
// https://heycam.github.io/webidl/#es-unsigned-long-long
impl FromJSValConvertible for u64 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<u64>, ()> {
convert_int_from_jsval(cx, val, option, ToUint64)
}
}
// https://heycam.github.io/webidl/#es-float
impl ToJSValConvertible for f32 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(RUST_JS_NumberValue(*self as f64));
}
}
// https://heycam.github.io/webidl/#es-float
impl FromJSValConvertible for f32 {
type Config = ();
unsafe fn from_jsval(cx: *mut JSContext, val: HandleValue, _option: ()) -> Result<ConversionResult<f32>, ()> {
let result = ToNumber(cx, val);
result.map(|f| f as f32).map(ConversionResult::Success)
}
}
// https://heycam.github.io/webidl/#es-double
impl ToJSValConvertible for f64 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(RUST_JS_NumberValue(*self));
}
}
// https://heycam.github.io/webidl/#es-double
impl FromJSValConvertible for f64 {
type Config = ();
unsafe fn from_jsval(cx: *mut JSContext, val: HandleValue, _option: ()) -> Result<ConversionResult<f64>, ()> {
ToNumber(cx, val).map(ConversionResult::Success)
}
}
/// Converts a `JSString`, encoded in "Latin1" (i.e. U+0000-U+00FF encoded as 0x00-0xFF) into a
/// `String`.
pub unsafe fn latin1_to_string(cx: *mut JSContext, s: *mut JSString) -> String {
assert!(JS_StringHasLatin1Chars(s));
let mut length = 0;
let chars = JS_GetLatin1StringCharsAndLength(cx, ptr::null(), s, &mut length);
assert!(!chars.is_null());
let chars = slice::from_raw_parts(chars, length as usize);
let mut s = String::with_capacity(length as usize);
s.extend(chars.iter().map(|&c| c as char));
s
}
// https://heycam.github.io/webidl/#es-USVString
impl ToJSValConvertible for str {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
let mut string_utf16: Vec<u16> = Vec::with_capacity(self.len());
string_utf16.extend(self.encode_utf16());
let jsstr = JS_NewUCStringCopyN(cx,
string_utf16.as_ptr(),
string_utf16.len() as libc::size_t);
if jsstr.is_null() {
panic!("JS_NewUCStringCopyN failed");
}
rval.set(StringValue(&*jsstr));
}
}
// https://heycam.github.io/webidl/#es-USVString
impl ToJSValConvertible for String {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
(**self).to_jsval(cx, rval);
}
}
// https://heycam.github.io/webidl/#es-USVString
impl FromJSValConvertible for String {
type Config = ();
unsafe fn from_jsval(cx: *mut JSContext, value: HandleValue, _: ()) -> Result<ConversionResult<String>, ()> {
let jsstr = ToString(cx, value);
if jsstr.is_null() {
debug!("ToString failed");
return Err(());
}
if JS_StringHasLatin1Chars(jsstr) {
return Ok(latin1_to_string(cx, jsstr)).map(ConversionResult::Success);
}
let mut length = 0;
let chars = JS_GetTwoByteStringCharsAndLength(cx, ptr::null(), jsstr, &mut length);
assert!(!chars.is_null());
let char_vec = slice::from_raw_parts(chars, length as usize);
Ok(String::from_utf16_lossy(char_vec)).map(ConversionResult::Success)
}
}
impl<T: ToJSValConvertible> ToJSValConvertible for Option<T> {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
match self {
&Some(ref value) => value.to_jsval(cx, rval),
&None => rval.set(NullValue()),
}
}
}
impl<T: ToJSValConvertible> ToJSValConvertible for Rc<T> {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
(**self).to_jsval(cx, rval)
}
}
impl<T: FromJSValConvertible> FromJSValConvertible for Option<T> {
type Config = T::Config;
unsafe fn from_jsval(cx: *mut JSContext,
value: HandleValue,
option: T::Config)
-> Result<ConversionResult<Option<T>>, ()> {
if value.get().is_null_or_undefined() {
Ok(ConversionResult::Success(None))
} else {
Ok(match try!(FromJSValConvertible::from_jsval(cx, value, option)) {
ConversionResult::Success(v) => ConversionResult::Success(Some(v)),
ConversionResult::Failure(v) => ConversionResult::Failure(v),
})
}
}
}
// https://heycam.github.io/webidl/#es-sequence
impl<T: ToJSValConvertible> ToJSValConvertible for Vec<T> {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
rooted!(in(cx) let js_array = JS_NewArrayObject1(cx, self.len() as libc::size_t));
assert!(!js_array.handle().is_null());
rooted!(in(cx) let mut val = UndefinedValue());
for (index, obj) in self.iter().enumerate() {
obj.to_jsval(cx, val.handle_mut());
assert!(JS_DefineElement(cx, js_array.handle(),
index as u32, val.handle(), JSPROP_ENUMERATE, None, None));
}
rval.set(ObjectValue(js_array.handle().get()));
}
}
/// Rooting guard for the iterator field of ForOfIterator.
/// Behaves like RootedGuard (roots on creation, unroots on drop),
/// but borrows and allows access to the whole ForOfIterator, so
/// that methods on ForOfIterator can still be used through it.
struct ForOfIteratorGuard<'a> {
root: &'a mut ForOfIterator
}
impl<'a> ForOfIteratorGuard<'a> {
fn new(cx: *mut JSContext, root: &'a mut ForOfIterator) -> Self {
unsafe {
root.iterator.add_to_root_stack(cx);
}
ForOfIteratorGuard {
root: root
}
}
}
impl<'a> Drop for ForOfIteratorGuard<'a> {
fn drop(&mut self) {
unsafe {
self.root.iterator.remove_from_root_stack();
}
}
}
impl<C: Clone, T: FromJSValConvertible<Config=C>> FromJSValConvertible for Vec<T> {
type Config = C;
unsafe fn from_jsval(cx: *mut JSContext,
value: HandleValue,
option: C)
-> Result<ConversionResult<Vec<T>>, ()> {
let mut iterator = ForOfIterator {
cx_: cx,
iterator: RootedObject::new_unrooted(),
index: ::std::u32::MAX, // NOT_ARRAY
};
let mut iterator = ForOfIteratorGuard::new(cx, &mut iterator);
let iterator = &mut *iterator.root;
if !iterator.init(value, ForOfIterator_NonIterableBehavior::AllowNonIterable) {
return Err(())
}
if iterator.iterator.ptr.is_null() {
return Ok(ConversionResult::Failure("Value is not iterable".into()));
}
let mut ret = vec![];
loop {
let mut done = false;
rooted!(in(cx) let mut val = UndefinedValue());
if !iterator.next(val.handle_mut(), &mut done) {
return Err(())
}
if done {
break;
}
ret.push(match try!(T::from_jsval(cx, val.handle(), option.clone())) {
ConversionResult::Success(v) => v,
ConversionResult::Failure(e) => return Ok(ConversionResult::Failure(e)),
});
}
Ok(ret).map(ConversionResult::Success)
}
}
// https://heycam.github.io/webidl/#es-object
impl ToJSValConvertible for *mut JSObject {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(ObjectOrNullValue(*self));
maybe_wrap_object_or_null_value(cx, rval);
}
}
// https://heycam.github.io/webidl/#es-object
impl ToJSValConvertible for NonZero<*mut JSObject> {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(ObjectValue(**self));
maybe_wrap_object_value(cx, rval);
}
}
// https://heycam.github.io/webidl/#es-object
impl ToJSValConvertible for Heap<*mut JSObject> {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(ObjectOrNullValue(self.get()));
maybe_wrap_object_or_null_value(cx, rval);
}
}
Implement FromJSValConvertible for HandleValue
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Conversions of Rust values to and from `JSVal`.
//!
//! | IDL type | Type |
//! |-------------------------|----------------------------------|
//! | any | `JSVal` |
//! | boolean | `bool` |
//! | byte | `i8` |
//! | octet | `u8` |
//! | short | `i16` |
//! | unsigned short | `u16` |
//! | long | `i32` |
//! | unsigned long | `u32` |
//! | long long | `i64` |
//! | unsigned long long | `u64` |
//! | unrestricted float | `f32` |
//! | float | `Finite<f32>` |
//! | unrestricted double | `f64` |
//! | double | `Finite<f64>` |
//! | USVString | `String` |
//! | object | `*mut JSObject` |
//! | nullable types | `Option<T>` |
//! | sequences | `Vec<T>` |
#![deny(missing_docs)]
use core::nonzero::NonZero;
use error::throw_type_error;
use glue::RUST_JS_NumberValue;
use jsapi::AssertSameCompartment;
use jsapi::{ForOfIterator, ForOfIterator_NonIterableBehavior, HandleValue};
use jsapi::{Heap, JS_DefineElement, JS_GetLatin1StringCharsAndLength};
use jsapi::{JS_GetTwoByteStringCharsAndLength, JS_NewArrayObject1};
use jsapi::{JS_NewUCStringCopyN, JSPROP_ENUMERATE, JS_StringHasLatin1Chars};
use jsapi::{JSContext, JSObject, JSString, MutableHandleValue, RootedObject};
use jsval::{BooleanValue, Int32Value, NullValue, UInt32Value, UndefinedValue};
use jsval::{JSVal, ObjectValue, ObjectOrNullValue, StringValue};
use rust::{ToBoolean, ToInt32, ToInt64, ToNumber, ToUint16, ToUint32, ToUint64};
use rust::{ToString, maybe_wrap_object_or_null_value};
use rust::{maybe_wrap_object_value, maybe_wrap_value};
use libc;
use num_traits::{Bounded, Zero};
use std::borrow::Cow;
use std::rc::Rc;
use std::{ptr, slice};
trait As<O>: Copy {
fn cast(self) -> O;
}
macro_rules! impl_as {
($I:ty, $O:ty) => (
impl As<$O> for $I {
fn cast(self) -> $O {
self as $O
}
}
)
}
impl_as!(f64, u8);
impl_as!(f64, u16);
impl_as!(f64, u32);
impl_as!(f64, u64);
impl_as!(f64, i8);
impl_as!(f64, i16);
impl_as!(f64, i32);
impl_as!(f64, i64);
impl_as!(u8, f64);
impl_as!(u16, f64);
impl_as!(u32, f64);
impl_as!(u64, f64);
impl_as!(i8, f64);
impl_as!(i16, f64);
impl_as!(i32, f64);
impl_as!(i64, f64);
impl_as!(i32, i8);
impl_as!(i32, u8);
impl_as!(i32, i16);
impl_as!(u16, u16);
impl_as!(i32, i32);
impl_as!(u32, u32);
impl_as!(i64, i64);
impl_as!(u64, u64);
/// A trait to convert Rust types to `JSVal`s.
pub trait ToJSValConvertible {
/// Convert `self` to a `JSVal`. JSAPI failure causes a panic.
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue);
}
/// An enum to better support enums through FromJSValConvertible::from_jsval.
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum ConversionResult<T> {
/// Everything went fine.
Success(T),
/// Pending exception.
Failure(Cow<'static, str>),
}
impl<T> ConversionResult<T> {
/// Returns Some(value) if it is `ConversionResult::Success`.
pub fn get_success_value(&self) -> Option<&T> {
match *self {
ConversionResult::Success(ref v) => Some(v),
_ => None,
}
}
}
/// A trait to convert `JSVal`s to Rust types.
pub trait FromJSValConvertible: Sized {
/// Optional configurable behaviour switch; use () for no configuration.
type Config;
/// Convert `val` to type `Self`.
/// Optional configuration of type `T` can be passed as the `option`
/// argument.
/// If it returns `Err(())`, a JSAPI exception is pending.
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: Self::Config)
-> Result<ConversionResult<Self>, ()>;
}
/// Behavior for converting out-of-range integers.
#[derive(PartialEq, Eq, Clone)]
pub enum ConversionBehavior {
/// Wrap into the integer's range.
Default,
/// Throw an exception.
EnforceRange,
/// Clamp into the integer's range.
Clamp,
}
/// Try to cast the number to a smaller type, but
/// if it doesn't fit, it will return an error.
unsafe fn enforce_range<D>(cx: *mut JSContext, d: f64) -> Result<ConversionResult<D>, ()>
where D: Bounded + As<f64>,
f64: As<D>
{
if d.is_infinite() {
throw_type_error(cx, "value out of range in an EnforceRange argument");
return Err(());
}
let rounded = d.round();
if D::min_value().cast() <= rounded && rounded <= D::max_value().cast() {
Ok(ConversionResult::Success(rounded.cast()))
} else {
throw_type_error(cx, "value out of range in an EnforceRange argument");
Err(())
}
}
/// Try to cast the number to a smaller type, but if it doesn't fit,
/// round it to the MAX or MIN of the source type before casting it to
/// the destination type.
fn clamp_to<D>(d: f64) -> D
where D: Bounded + As<f64> + Zero,
f64: As<D>
{
if d.is_nan() {
D::zero()
} else if d > D::max_value().cast() {
D::max_value()
} else if d < D::min_value().cast() {
D::min_value()
} else {
d.cast()
}
}
// https://heycam.github.io/webidl/#es-void
impl ToJSValConvertible for () {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(UndefinedValue());
}
}
impl FromJSValConvertible for HandleValue {
type Config = ();
#[inline]
unsafe fn from_jsval(cx: *mut JSContext,
value: HandleValue,
_option: ())
-> Result<ConversionResult<HandleValue>, ()> {
if value.is_object() {
AssertSameCompartment(cx, value.to_object());
}
Ok(ConversionResult::Success(value))
}
}
impl FromJSValConvertible for JSVal {
type Config = ();
unsafe fn from_jsval(_cx: *mut JSContext,
value: HandleValue,
_option: ())
-> Result<ConversionResult<JSVal>, ()> {
Ok(ConversionResult::Success(value.get()))
}
}
impl FromJSValConvertible for Heap<JSVal> {
type Config = ();
unsafe fn from_jsval(_cx: *mut JSContext,
value: HandleValue,
_option: ())
-> Result<ConversionResult<Self>, ()> {
Ok(ConversionResult::Success(Heap::new(value.get())))
}
}
impl ToJSValConvertible for JSVal {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(*self);
maybe_wrap_value(cx, rval);
}
}
impl ToJSValConvertible for HandleValue {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(self.get());
maybe_wrap_value(cx, rval);
}
}
impl ToJSValConvertible for Heap<JSVal> {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(self.get());
maybe_wrap_value(cx, rval);
}
}
#[inline]
unsafe fn convert_int_from_jsval<T, M>(cx: *mut JSContext, value: HandleValue,
option: ConversionBehavior,
convert_fn: unsafe fn(*mut JSContext, HandleValue) -> Result<M, ()>)
-> Result<ConversionResult<T>, ()>
where T: Bounded + Zero + As<f64>,
M: Zero + As<T>,
f64: As<T>
{
match option {
ConversionBehavior::Default => Ok(ConversionResult::Success(try!(convert_fn(cx, value)).cast())),
ConversionBehavior::EnforceRange => enforce_range(cx, try!(ToNumber(cx, value))),
ConversionBehavior::Clamp => Ok(ConversionResult::Success(clamp_to(try!(ToNumber(cx, value))))),
}
}
// https://heycam.github.io/webidl/#es-boolean
impl ToJSValConvertible for bool {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(BooleanValue(*self));
}
}
// https://heycam.github.io/webidl/#es-boolean
impl FromJSValConvertible for bool {
type Config = ();
unsafe fn from_jsval(_cx: *mut JSContext, val: HandleValue, _option: ()) -> Result<ConversionResult<bool>, ()> {
Ok(ToBoolean(val)).map(ConversionResult::Success)
}
}
// https://heycam.github.io/webidl/#es-byte
impl ToJSValConvertible for i8 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(Int32Value(*self as i32));
}
}
// https://heycam.github.io/webidl/#es-byte
impl FromJSValConvertible for i8 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<i8>, ()> {
convert_int_from_jsval(cx, val, option, ToInt32)
}
}
// https://heycam.github.io/webidl/#es-octet
impl ToJSValConvertible for u8 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(Int32Value(*self as i32));
}
}
// https://heycam.github.io/webidl/#es-octet
impl FromJSValConvertible for u8 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<u8>, ()> {
convert_int_from_jsval(cx, val, option, ToInt32)
}
}
// https://heycam.github.io/webidl/#es-short
impl ToJSValConvertible for i16 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(Int32Value(*self as i32));
}
}
// https://heycam.github.io/webidl/#es-short
impl FromJSValConvertible for i16 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<i16>, ()> {
convert_int_from_jsval(cx, val, option, ToInt32)
}
}
// https://heycam.github.io/webidl/#es-unsigned-short
impl ToJSValConvertible for u16 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(Int32Value(*self as i32));
}
}
// https://heycam.github.io/webidl/#es-unsigned-short
impl FromJSValConvertible for u16 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<u16>, ()> {
convert_int_from_jsval(cx, val, option, ToUint16)
}
}
// https://heycam.github.io/webidl/#es-long
impl ToJSValConvertible for i32 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(Int32Value(*self));
}
}
// https://heycam.github.io/webidl/#es-long
impl FromJSValConvertible for i32 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<i32>, ()> {
convert_int_from_jsval(cx, val, option, ToInt32)
}
}
// https://heycam.github.io/webidl/#es-unsigned-long
impl ToJSValConvertible for u32 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(UInt32Value(*self));
}
}
// https://heycam.github.io/webidl/#es-unsigned-long
impl FromJSValConvertible for u32 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<u32>, ()> {
convert_int_from_jsval(cx, val, option, ToUint32)
}
}
// https://heycam.github.io/webidl/#es-long-long
impl ToJSValConvertible for i64 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(RUST_JS_NumberValue(*self as f64));
}
}
// https://heycam.github.io/webidl/#es-long-long
impl FromJSValConvertible for i64 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<i64>, ()> {
convert_int_from_jsval(cx, val, option, ToInt64)
}
}
// https://heycam.github.io/webidl/#es-unsigned-long-long
impl ToJSValConvertible for u64 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(RUST_JS_NumberValue(*self as f64));
}
}
// https://heycam.github.io/webidl/#es-unsigned-long-long
impl FromJSValConvertible for u64 {
type Config = ConversionBehavior;
unsafe fn from_jsval(cx: *mut JSContext,
val: HandleValue,
option: ConversionBehavior)
-> Result<ConversionResult<u64>, ()> {
convert_int_from_jsval(cx, val, option, ToUint64)
}
}
// https://heycam.github.io/webidl/#es-float
impl ToJSValConvertible for f32 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(RUST_JS_NumberValue(*self as f64));
}
}
// https://heycam.github.io/webidl/#es-float
impl FromJSValConvertible for f32 {
type Config = ();
unsafe fn from_jsval(cx: *mut JSContext, val: HandleValue, _option: ()) -> Result<ConversionResult<f32>, ()> {
let result = ToNumber(cx, val);
result.map(|f| f as f32).map(ConversionResult::Success)
}
}
// https://heycam.github.io/webidl/#es-double
impl ToJSValConvertible for f64 {
#[inline]
unsafe fn to_jsval(&self, _cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(RUST_JS_NumberValue(*self));
}
}
// https://heycam.github.io/webidl/#es-double
impl FromJSValConvertible for f64 {
type Config = ();
unsafe fn from_jsval(cx: *mut JSContext, val: HandleValue, _option: ()) -> Result<ConversionResult<f64>, ()> {
ToNumber(cx, val).map(ConversionResult::Success)
}
}
/// Converts a `JSString`, encoded in "Latin1" (i.e. U+0000-U+00FF encoded as 0x00-0xFF) into a
/// `String`.
pub unsafe fn latin1_to_string(cx: *mut JSContext, s: *mut JSString) -> String {
assert!(JS_StringHasLatin1Chars(s));
let mut length = 0;
let chars = JS_GetLatin1StringCharsAndLength(cx, ptr::null(), s, &mut length);
assert!(!chars.is_null());
let chars = slice::from_raw_parts(chars, length as usize);
let mut s = String::with_capacity(length as usize);
s.extend(chars.iter().map(|&c| c as char));
s
}
// https://heycam.github.io/webidl/#es-USVString
impl ToJSValConvertible for str {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
let mut string_utf16: Vec<u16> = Vec::with_capacity(self.len());
string_utf16.extend(self.encode_utf16());
let jsstr = JS_NewUCStringCopyN(cx,
string_utf16.as_ptr(),
string_utf16.len() as libc::size_t);
if jsstr.is_null() {
panic!("JS_NewUCStringCopyN failed");
}
rval.set(StringValue(&*jsstr));
}
}
// https://heycam.github.io/webidl/#es-USVString
impl ToJSValConvertible for String {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
(**self).to_jsval(cx, rval);
}
}
// https://heycam.github.io/webidl/#es-USVString
impl FromJSValConvertible for String {
type Config = ();
unsafe fn from_jsval(cx: *mut JSContext, value: HandleValue, _: ()) -> Result<ConversionResult<String>, ()> {
let jsstr = ToString(cx, value);
if jsstr.is_null() {
debug!("ToString failed");
return Err(());
}
if JS_StringHasLatin1Chars(jsstr) {
return Ok(latin1_to_string(cx, jsstr)).map(ConversionResult::Success);
}
let mut length = 0;
let chars = JS_GetTwoByteStringCharsAndLength(cx, ptr::null(), jsstr, &mut length);
assert!(!chars.is_null());
let char_vec = slice::from_raw_parts(chars, length as usize);
Ok(String::from_utf16_lossy(char_vec)).map(ConversionResult::Success)
}
}
impl<T: ToJSValConvertible> ToJSValConvertible for Option<T> {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
match self {
&Some(ref value) => value.to_jsval(cx, rval),
&None => rval.set(NullValue()),
}
}
}
impl<T: ToJSValConvertible> ToJSValConvertible for Rc<T> {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
(**self).to_jsval(cx, rval)
}
}
impl<T: FromJSValConvertible> FromJSValConvertible for Option<T> {
type Config = T::Config;
unsafe fn from_jsval(cx: *mut JSContext,
value: HandleValue,
option: T::Config)
-> Result<ConversionResult<Option<T>>, ()> {
if value.get().is_null_or_undefined() {
Ok(ConversionResult::Success(None))
} else {
Ok(match try!(FromJSValConvertible::from_jsval(cx, value, option)) {
ConversionResult::Success(v) => ConversionResult::Success(Some(v)),
ConversionResult::Failure(v) => ConversionResult::Failure(v),
})
}
}
}
// https://heycam.github.io/webidl/#es-sequence
impl<T: ToJSValConvertible> ToJSValConvertible for Vec<T> {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
rooted!(in(cx) let js_array = JS_NewArrayObject1(cx, self.len() as libc::size_t));
assert!(!js_array.handle().is_null());
rooted!(in(cx) let mut val = UndefinedValue());
for (index, obj) in self.iter().enumerate() {
obj.to_jsval(cx, val.handle_mut());
assert!(JS_DefineElement(cx, js_array.handle(),
index as u32, val.handle(), JSPROP_ENUMERATE, None, None));
}
rval.set(ObjectValue(js_array.handle().get()));
}
}
/// Rooting guard for the iterator field of ForOfIterator.
/// Behaves like RootedGuard (roots on creation, unroots on drop),
/// but borrows and allows access to the whole ForOfIterator, so
/// that methods on ForOfIterator can still be used through it.
struct ForOfIteratorGuard<'a> {
root: &'a mut ForOfIterator
}
impl<'a> ForOfIteratorGuard<'a> {
fn new(cx: *mut JSContext, root: &'a mut ForOfIterator) -> Self {
unsafe {
root.iterator.add_to_root_stack(cx);
}
ForOfIteratorGuard {
root: root
}
}
}
impl<'a> Drop for ForOfIteratorGuard<'a> {
fn drop(&mut self) {
unsafe {
self.root.iterator.remove_from_root_stack();
}
}
}
impl<C: Clone, T: FromJSValConvertible<Config=C>> FromJSValConvertible for Vec<T> {
type Config = C;
unsafe fn from_jsval(cx: *mut JSContext,
value: HandleValue,
option: C)
-> Result<ConversionResult<Vec<T>>, ()> {
let mut iterator = ForOfIterator {
cx_: cx,
iterator: RootedObject::new_unrooted(),
index: ::std::u32::MAX, // NOT_ARRAY
};
let mut iterator = ForOfIteratorGuard::new(cx, &mut iterator);
let iterator = &mut *iterator.root;
if !iterator.init(value, ForOfIterator_NonIterableBehavior::AllowNonIterable) {
return Err(())
}
if iterator.iterator.ptr.is_null() {
return Ok(ConversionResult::Failure("Value is not iterable".into()));
}
let mut ret = vec![];
loop {
let mut done = false;
rooted!(in(cx) let mut val = UndefinedValue());
if !iterator.next(val.handle_mut(), &mut done) {
return Err(())
}
if done {
break;
}
ret.push(match try!(T::from_jsval(cx, val.handle(), option.clone())) {
ConversionResult::Success(v) => v,
ConversionResult::Failure(e) => return Ok(ConversionResult::Failure(e)),
});
}
Ok(ret).map(ConversionResult::Success)
}
}
// https://heycam.github.io/webidl/#es-object
impl ToJSValConvertible for *mut JSObject {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(ObjectOrNullValue(*self));
maybe_wrap_object_or_null_value(cx, rval);
}
}
// https://heycam.github.io/webidl/#es-object
impl ToJSValConvertible for NonZero<*mut JSObject> {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(ObjectValue(**self));
maybe_wrap_object_value(cx, rval);
}
}
// https://heycam.github.io/webidl/#es-object
impl ToJSValConvertible for Heap<*mut JSObject> {
#[inline]
unsafe fn to_jsval(&self, cx: *mut JSContext, rval: MutableHandleValue) {
rval.set(ObjectOrNullValue(self.get()));
maybe_wrap_object_or_null_value(cx, rval);
}
}
|
//! Implements a command for uploading proguard mapping files.
use std::fs;
use std::io;
use std::path::PathBuf;
use clap::{App, Arg, ArgMatches};
use console::style;
use failure::{bail, Error, SyncFailure};
use symbolic::common::byteview::ByteView;
use symbolic::proguard::ProguardMappingView;
use uuid::Uuid;
use crate::api::{Api, AssociateDsyms};
use crate::config::Config;
use crate::utils::android::{dump_proguard_uuids_as_properties, AndroidManifest};
use crate::utils::args::{validate_uuid, ArgExt};
use crate::utils::fs::{get_sha1_checksum, TempFile};
use crate::utils::system::QuietExit;
use crate::utils::ui::{copy_with_progress, make_byte_progress_bar};
#[derive(Debug)]
struct MappingRef {
pub path: PathBuf,
pub size: u64,
pub uuid: Uuid,
}
pub fn make_app<'a, 'b: 'a>(app: App<'a, 'b>) -> App<'a, 'b> {
app.about("Upload ProGuard mapping files to a project.")
.org_project_args()
.arg(
Arg::with_name("paths")
.value_name("PATH")
.help("The path to the mapping files.")
.multiple(true)
.number_of_values(1)
.index(1),
)
.arg(
Arg::with_name("version")
.long("version")
.value_name("VERSION")
.requires("app_id")
.help(
"Optionally associate the mapping files with a human \
readable version.{n}This helps you understand which \
ProGuard files go with which version of your app.",
),
)
.arg(
Arg::with_name("version_code")
.long("version-code")
.value_name("VERSION_CODE")
.requires("app_id")
.requires("version")
.help(
"Optionally associate the mapping files with a version \
code.{n}This helps you understand which ProGuard files \
go with which version of your app.",
),
)
.arg(
Arg::with_name("app_id")
.long("app-id")
.value_name("APP_ID")
.requires("version")
.help(
"Optionally associate the mapping files with an application \
ID.{n}If you have multiple apps in one sentry project you can \
then easlier tell them apart.",
),
)
.arg(
Arg::with_name("platform")
.long("platform")
.value_name("PLATFORM")
.requires("app_id")
.help(
"Optionally defines the platform for the app association. \
[defaults to 'android']",
),
)
.arg(
Arg::with_name("no_reprocessing")
.long("no-reprocessing")
.help("Do not trigger reprocessing after upload."),
)
.arg(Arg::with_name("no_upload").long("no-upload").help(
"Disable the actual upload.{n}This runs all steps for the \
processing but does not trigger the upload (this also \
automatically disables reprocessing. This is useful if you \
just want to verify the mapping files and write the \
proguard UUIDs into a properties file.",
))
.arg(
Arg::with_name("android_manifest")
.long("android-manifest")
.value_name("PATH")
.conflicts_with("app_id")
.help("Read version and version code from an Android manifest file."),
)
.arg(
Arg::with_name("write_properties")
.long("write-properties")
.value_name("PATH")
.help(
"Write the UUIDs for the processed mapping files into \
the given properties file.",
),
)
.arg(
Arg::with_name("require_one")
.long("require-one")
.help("Requires at least one file to upload or the command will error."),
)
.arg(
Arg::with_name("uuid")
.long("uuid")
.short("u")
.value_name("UUID")
.validator(validate_uuid)
.help(
"Explicitly override the UUID of the mapping file with another one.{n}\
This should be used with caution as it means that you can upload \
multiple mapping files if you don't take care. This however can \
be useful if you have a build process in which you need to know \
the UUID of the proguard file before it was created. If you upload \
a file with a forced UUID you can only upload a single proguard file.",
),
)
}
pub fn execute<'a>(matches: &ArgMatches<'a>) -> Result<(), Error> {
let api = Api::get_current();
let paths: Vec<_> = match matches.values_of("paths") {
Some(paths) => paths.collect(),
None => {
return Ok(());
}
};
let mut mappings = vec![];
let mut all_checksums = vec![];
let android_manifest = if let Some(path) = matches.value_of("android_manifest") {
Some(AndroidManifest::from_path(path)?)
} else {
None
};
let forced_uuid = matches.value_of("uuid").map(|x| x.parse::<Uuid>().unwrap());
if forced_uuid.is_some() && paths.len() != 1 {
bail!(
"When forcing a UUID a single proguard file needs to be \
provided, got {}",
paths.len()
);
}
// since the mappings are quite small we don't bother doing a second http
// request to figure out if any of the checksums are missing. We just ship
// them all up.
for path in &paths {
match fs::metadata(path) {
Ok(md) => {
let byteview = ByteView::from_path(path).map_err(SyncFailure::new)?;
let mapping = ProguardMappingView::parse(byteview).map_err(SyncFailure::new)?;
if !mapping.has_line_info() {
eprintln!(
"warning: proguard mapping '{}' was ignored because it \
does not contain any line information.",
path
);
} else {
let mut f = fs::File::open(path)?;
all_checksums.push(get_sha1_checksum(&mut f)?.to_string());
mappings.push(MappingRef {
path: PathBuf::from(path),
size: md.len(),
uuid: forced_uuid.unwrap_or_else(|| mapping.uuid()),
});
}
}
Err(ref err) if err.kind() == io::ErrorKind::NotFound => {
eprintln!(
"warning: proguard mapping '{}' does not exist. This \
might be because the build process did not generate \
one (for instance because -dontobfuscate is used)",
path
);
}
Err(err) => {
return Err(Error::from(err)
.context(format!("failed to open proguard mapping '{}'", path))
.into());
}
}
}
if mappings.is_empty() && matches.is_present("require_one") {
println!();
eprintln!("{}", style("error: found no mapping files to upload").red());
return Err(QuietExit(1).into());
}
println!("{} compressing mappings", style(">").dim());
let tf = TempFile::create()?;
// add a scope here so we will flush before uploading
{
let mut zip = zip::ZipWriter::new(tf.open());
for mapping in &mappings {
let pb = make_byte_progress_bar(mapping.size);
zip.start_file(
format!("proguard/{}.txt", mapping.uuid),
zip::write::FileOptions::default(),
)?;
copy_with_progress(&pb, &mut fs::File::open(&mapping.path)?, &mut zip)?;
pb.finish_and_clear();
}
}
// write UUIDs into the mapping file.
if let Some(p) = matches.value_of("write_properties") {
let uuids: Vec<_> = mappings.iter().map(|x| x.uuid).collect();
dump_proguard_uuids_as_properties(p, &uuids)?;
}
if matches.is_present("no_upload") {
println!("{} skipping upload.", style(">").dim());
return Ok(());
}
println!("{} uploading mappings", style(">").dim());
let config = Config::get_current();
let (org, project) = config.get_org_and_project(matches)?;
let rv = api.upload_dif_archive(&org, &project, tf.path())?;
println!(
"{} Uploaded a total of {} new mapping files",
style(">").dim(),
style(rv.len()).yellow()
);
if !rv.is_empty() {
println!("Newly uploaded debug symbols:");
for df in rv {
println!(" {}", style(&df.id()).dim());
}
}
// update the uuids
if let Some(android_manifest) = android_manifest {
api.associate_android_proguard_mappings(&org, &project, &android_manifest, all_checksums)?;
// if values are given associate
} else if let Some(app_id) = matches.value_of("app_id") {
api.associate_dsyms(
&org,
&project,
&AssociateDsyms {
platform: matches
.value_of("platform")
.unwrap_or("android")
.to_string(),
checksums: all_checksums,
name: app_id.to_string(),
app_id: app_id.to_string(),
version: matches.value_of("version").unwrap().to_string(),
build: matches.value_of("version_code").map(|x| x.to_string()),
},
)?;
}
// If wanted trigger reprocessing
if !matches.is_present("no_reprocessing") && !matches.is_present("no_upload") {
if !api.trigger_reprocessing(&org, &project)? {
println!(
"{} Server does not support reprocessing. Not triggering.",
style(">").dim()
);
}
} else {
println!("{} skipped reprocessing", style(">").dim());
}
Ok(())
}
feat: log proguard mapping file used and hash
//! Implements a command for uploading proguard mapping files.
use std::fs;
use std::io;
use std::path::PathBuf;
use clap::{App, Arg, ArgMatches};
use console::style;
use failure::{bail, Error, SyncFailure};
use log::debug;
use symbolic::common::byteview::ByteView;
use symbolic::proguard::ProguardMappingView;
use uuid::Uuid;
use crate::api::{Api, AssociateDsyms};
use crate::config::Config;
use crate::utils::android::{dump_proguard_uuids_as_properties, AndroidManifest};
use crate::utils::args::{validate_uuid, ArgExt};
use crate::utils::fs::{get_sha1_checksum, TempFile};
use crate::utils::system::QuietExit;
use crate::utils::ui::{copy_with_progress, make_byte_progress_bar};
#[derive(Debug)]
struct MappingRef {
pub path: PathBuf,
pub size: u64,
pub uuid: Uuid,
}
pub fn make_app<'a, 'b: 'a>(app: App<'a, 'b>) -> App<'a, 'b> {
app.about("Upload ProGuard mapping files to a project.")
.org_project_args()
.arg(
Arg::with_name("paths")
.value_name("PATH")
.help("The path to the mapping files.")
.multiple(true)
.number_of_values(1)
.index(1),
)
.arg(
Arg::with_name("version")
.long("version")
.value_name("VERSION")
.requires("app_id")
.help(
"Optionally associate the mapping files with a human \
readable version.{n}This helps you understand which \
ProGuard files go with which version of your app.",
),
)
.arg(
Arg::with_name("version_code")
.long("version-code")
.value_name("VERSION_CODE")
.requires("app_id")
.requires("version")
.help(
"Optionally associate the mapping files with a version \
code.{n}This helps you understand which ProGuard files \
go with which version of your app.",
),
)
.arg(
Arg::with_name("app_id")
.long("app-id")
.value_name("APP_ID")
.requires("version")
.help(
"Optionally associate the mapping files with an application \
ID.{n}If you have multiple apps in one sentry project you can \
then easlier tell them apart.",
),
)
.arg(
Arg::with_name("platform")
.long("platform")
.value_name("PLATFORM")
.requires("app_id")
.help(
"Optionally defines the platform for the app association. \
[defaults to 'android']",
),
)
.arg(
Arg::with_name("no_reprocessing")
.long("no-reprocessing")
.help("Do not trigger reprocessing after upload."),
)
.arg(Arg::with_name("no_upload").long("no-upload").help(
"Disable the actual upload.{n}This runs all steps for the \
processing but does not trigger the upload (this also \
automatically disables reprocessing. This is useful if you \
just want to verify the mapping files and write the \
proguard UUIDs into a properties file.",
))
.arg(
Arg::with_name("android_manifest")
.long("android-manifest")
.value_name("PATH")
.conflicts_with("app_id")
.help("Read version and version code from an Android manifest file."),
)
.arg(
Arg::with_name("write_properties")
.long("write-properties")
.value_name("PATH")
.help(
"Write the UUIDs for the processed mapping files into \
the given properties file.",
),
)
.arg(
Arg::with_name("require_one")
.long("require-one")
.help("Requires at least one file to upload or the command will error."),
)
.arg(
Arg::with_name("uuid")
.long("uuid")
.short("u")
.value_name("UUID")
.validator(validate_uuid)
.help(
"Explicitly override the UUID of the mapping file with another one.{n}\
This should be used with caution as it means that you can upload \
multiple mapping files if you don't take care. This however can \
be useful if you have a build process in which you need to know \
the UUID of the proguard file before it was created. If you upload \
a file with a forced UUID you can only upload a single proguard file.",
),
)
}
pub fn execute<'a>(matches: &ArgMatches<'a>) -> Result<(), Error> {
let api = Api::get_current();
let paths: Vec<_> = match matches.values_of("paths") {
Some(paths) => paths.collect(),
None => {
return Ok(());
}
};
let mut mappings = vec![];
let mut all_checksums = vec![];
let android_manifest = if let Some(path) = matches.value_of("android_manifest") {
Some(AndroidManifest::from_path(path)?)
} else {
None
};
let forced_uuid = matches.value_of("uuid").map(|x| x.parse::<Uuid>().unwrap());
if forced_uuid.is_some() && paths.len() != 1 {
bail!(
"When forcing a UUID a single proguard file needs to be \
provided, got {}",
paths.len()
);
}
// since the mappings are quite small we don't bother doing a second http
// request to figure out if any of the checksums are missing. We just ship
// them all up.
for path in &paths {
match fs::metadata(path) {
Ok(md) => {
let byteview = ByteView::from_path(path).map_err(SyncFailure::new)?;
let mapping = ProguardMappingView::parse(byteview).map_err(SyncFailure::new)?;
if !mapping.has_line_info() {
eprintln!(
"warning: proguard mapping '{}' was ignored because it \
does not contain any line information.",
path
);
} else {
let mut f = fs::File::open(path)?;
let sha = get_sha1_checksum(&mut f)?.to_string();
debug!("SHA1 for mapping file '{}': '{}'", path, sha);
all_checksums.push(sha);
mappings.push(MappingRef {
path: PathBuf::from(path),
size: md.len(),
uuid: forced_uuid.unwrap_or_else(|| mapping.uuid()),
});
}
}
Err(ref err) if err.kind() == io::ErrorKind::NotFound => {
eprintln!(
"warning: proguard mapping '{}' does not exist. This \
might be because the build process did not generate \
one (for instance because -dontobfuscate is used)",
path
);
}
Err(err) => {
return Err(Error::from(err)
.context(format!("failed to open proguard mapping '{}'", path))
.into());
}
}
}
if mappings.is_empty() && matches.is_present("require_one") {
println!();
eprintln!("{}", style("error: found no mapping files to upload").red());
return Err(QuietExit(1).into());
}
println!("{} compressing mappings", style(">").dim());
let tf = TempFile::create()?;
// add a scope here so we will flush before uploading
{
let mut zip = zip::ZipWriter::new(tf.open());
for mapping in &mappings {
let pb = make_byte_progress_bar(mapping.size);
zip.start_file(
format!("proguard/{}.txt", mapping.uuid),
zip::write::FileOptions::default(),
)?;
copy_with_progress(&pb, &mut fs::File::open(&mapping.path)?, &mut zip)?;
pb.finish_and_clear();
}
}
// write UUIDs into the mapping file.
if let Some(p) = matches.value_of("write_properties") {
let uuids: Vec<_> = mappings.iter().map(|x| x.uuid).collect();
dump_proguard_uuids_as_properties(p, &uuids)?;
}
if matches.is_present("no_upload") {
println!("{} skipping upload.", style(">").dim());
return Ok(());
}
println!("{} uploading mappings", style(">").dim());
let config = Config::get_current();
let (org, project) = config.get_org_and_project(matches)?;
let rv = api.upload_dif_archive(&org, &project, tf.path())?;
println!(
"{} Uploaded a total of {} new mapping files",
style(">").dim(),
style(rv.len()).yellow()
);
if !rv.is_empty() {
println!("Newly uploaded debug symbols:");
for df in rv {
println!(" {}", style(&df.id()).dim());
}
}
// update the uuids
if let Some(android_manifest) = android_manifest {
api.associate_android_proguard_mappings(&org, &project, &android_manifest, all_checksums)?;
// if values are given associate
} else if let Some(app_id) = matches.value_of("app_id") {
api.associate_dsyms(
&org,
&project,
&AssociateDsyms {
platform: matches
.value_of("platform")
.unwrap_or("android")
.to_string(),
checksums: all_checksums,
name: app_id.to_string(),
app_id: app_id.to_string(),
version: matches.value_of("version").unwrap().to_string(),
build: matches.value_of("version_code").map(|x| x.to_string()),
},
)?;
}
// If wanted trigger reprocessing
if !matches.is_present("no_reprocessing") && !matches.is_present("no_upload") {
if !api.trigger_reprocessing(&org, &project)? {
println!(
"{} Server does not support reprocessing. Not triggering.",
style(">").dim()
);
}
} else {
println!("{} skipped reprocessing", style(">").dim());
}
Ok(())
}
|
import std::{io, vec, str, option, either, result, fs};
import std::option::{some, none};
import std::either::{left, right};
import std::map::{hashmap, new_str_hash};
import token::can_begin_expr;
import codemap::span;
import util::interner;
import ast::{node_id, spanned};
import front::attr;
tag restriction { UNRESTRICTED; RESTRICT_NO_CALL_EXPRS; }
tag file_type { CRATE_FILE; SOURCE_FILE; }
type parse_sess = @{cm: codemap::codemap, mutable next_id: node_id};
fn next_node_id(sess: parse_sess) -> node_id {
let rv = sess.next_id;
sess.next_id += 1;
ret rv;
}
type parser =
obj {
fn peek() -> token::token;
fn bump();
fn swap(token::token, uint, uint);
fn look_ahead(uint) -> token::token;
fn fatal(str) -> ! ;
fn span_fatal(span, str) -> ! ;
fn warn(str);
fn restrict(restriction);
fn get_restriction() -> restriction;
fn get_file_type() -> file_type;
fn get_cfg() -> ast::crate_cfg;
fn get_span() -> span;
fn get_lo_pos() -> uint;
fn get_hi_pos() -> uint;
fn get_last_lo_pos() -> uint;
fn get_last_hi_pos() -> uint;
fn get_prec_table() -> @[op_spec];
fn get_str(token::str_num) -> str;
fn get_reader() -> lexer::reader;
fn get_filemap() -> codemap::filemap;
fn get_bad_expr_words() -> hashmap<str, ()>;
fn get_chpos() -> uint;
fn get_byte_pos() -> uint;
fn get_id() -> node_id;
fn get_sess() -> parse_sess;
};
fn new_parser_from_file(sess: parse_sess, cfg: ast::crate_cfg, path: str,
chpos: uint, byte_pos: uint, ftype: file_type) ->
parser {
let src = alt io::read_whole_file_str(path) {
result::ok(src) {
// FIXME: This copy is unfortunate
src
}
result::err(e) {
codemap::emit_error(none, e, sess.cm);
fail;
}
};
let filemap = codemap::new_filemap(path, chpos, byte_pos);
sess.cm.files += [filemap];
let itr = @interner::mk(str::hash, str::eq);
let rdr = lexer::new_reader(sess.cm, src, filemap, itr);
ret new_parser(sess, cfg, rdr, ftype);
}
fn new_parser(sess: parse_sess, cfg: ast::crate_cfg, rdr: lexer::reader,
ftype: file_type) -> parser {
obj stdio_parser(sess: parse_sess,
cfg: ast::crate_cfg,
ftype: file_type,
mutable tok: token::token,
mutable tok_span: span,
mutable last_tok_span: span,
mutable buffer: [{tok: token::token, span: span}],
mutable restr: restriction,
rdr: lexer::reader,
precs: @[op_spec],
bad_words: hashmap<str, ()>) {
fn peek() -> token::token { ret tok; }
fn bump() {
last_tok_span = tok_span;
if vec::len(buffer) == 0u {
let next = lexer::next_token(rdr);
tok = next.tok;
tok_span = ast_util::mk_sp(next.chpos, rdr.get_chpos());
} else {
let next = vec::pop(buffer);
tok = next.tok;
tok_span = next.span;
}
}
fn swap(next: token::token, lo: uint, hi: uint) {
tok = next;
tok_span = ast_util::mk_sp(lo, hi);
}
fn look_ahead(distance: uint) -> token::token {
while vec::len(buffer) < distance {
let next = lexer::next_token(rdr);
let sp = ast_util::mk_sp(next.chpos, rdr.get_chpos());
buffer = [{tok: next.tok, span: sp}] + buffer;
}
ret buffer[distance - 1u].tok;
}
fn fatal(m: str) -> ! {
self.span_fatal(self.get_span(), m);
}
fn span_fatal(sp: span, m: str) -> ! {
codemap::emit_error(some(sp), m, sess.cm);
fail;
}
fn warn(m: str) {
codemap::emit_warning(some(self.get_span()), m, sess.cm);
}
fn restrict(r: restriction) { restr = r; }
fn get_restriction() -> restriction { ret restr; }
fn get_span() -> span { ret tok_span; }
fn get_lo_pos() -> uint { ret tok_span.lo; }
fn get_hi_pos() -> uint { ret tok_span.hi; }
fn get_last_lo_pos() -> uint { ret last_tok_span.lo; }
fn get_last_hi_pos() -> uint { ret last_tok_span.hi; }
fn get_file_type() -> file_type { ret ftype; }
fn get_cfg() -> ast::crate_cfg { ret cfg; }
fn get_prec_table() -> @[op_spec] { ret precs; }
fn get_str(i: token::str_num) -> str {
ret interner::get(*rdr.get_interner(), i);
}
fn get_reader() -> lexer::reader { ret rdr; }
fn get_filemap() -> codemap::filemap { ret rdr.get_filemap(); }
fn get_bad_expr_words() -> hashmap<str, ()> { ret bad_words; }
fn get_chpos() -> uint { ret rdr.get_chpos(); }
fn get_byte_pos() -> uint { ret rdr.get_byte_pos(); }
fn get_id() -> node_id { ret next_node_id(sess); }
fn get_sess() -> parse_sess { ret sess; }
}
let tok0 = lexer::next_token(rdr);
let span0 = ast_util::mk_sp(tok0.chpos, rdr.get_chpos());
ret stdio_parser(sess, cfg, ftype, tok0.tok, span0, span0, [],
UNRESTRICTED, rdr, prec_table(), bad_expr_word_table());
}
// These are the words that shouldn't be allowed as value identifiers,
// because, if used at the start of a line, they will cause the line to be
// interpreted as a specific kind of statement, which would be confusing.
fn bad_expr_word_table() -> hashmap<str, ()> {
let words = new_str_hash();
words.insert("mod", ());
words.insert("if", ());
words.insert("else", ());
words.insert("while", ());
words.insert("do", ());
words.insert("alt", ());
words.insert("for", ());
words.insert("break", ());
words.insert("cont", ());
words.insert("ret", ());
words.insert("be", ());
words.insert("fail", ());
words.insert("type", ());
words.insert("resource", ());
words.insert("check", ());
words.insert("assert", ());
words.insert("claim", ());
words.insert("native", ());
words.insert("fn", ());
words.insert("lambda", ());
words.insert("pure", ());
words.insert("unsafe", ());
words.insert("block", ());
words.insert("import", ());
words.insert("export", ());
words.insert("let", ());
words.insert("const", ());
words.insert("log", ());
words.insert("log_err", ());
words.insert("tag", ());
words.insert("obj", ());
words.insert("copy", ());
ret words;
}
fn unexpected(p: parser, t: token::token) -> ! {
let s: str = "unexpected token: ";
s += token::to_str(p.get_reader(), t);
p.fatal(s);
}
fn expect(p: parser, t: token::token) {
if p.peek() == t {
p.bump();
} else {
let s: str = "expecting ";
s += token::to_str(p.get_reader(), t);
s += ", found ";
s += token::to_str(p.get_reader(), p.peek());
p.fatal(s);
}
}
fn expect_gt(p: parser) {
if p.peek() == token::GT {
p.bump();
} else if p.peek() == token::BINOP(token::LSR) {
p.swap(token::GT, p.get_lo_pos() + 1u, p.get_hi_pos());
} else if p.peek() == token::BINOP(token::ASR) {
p.swap(token::BINOP(token::LSR), p.get_lo_pos() + 1u, p.get_hi_pos());
} else {
let s: str = "expecting ";
s += token::to_str(p.get_reader(), token::GT);
s += ", found ";
s += token::to_str(p.get_reader(), p.peek());
p.fatal(s);
}
}
fn spanned<copy T>(lo: uint, hi: uint, node: T) -> spanned<T> {
ret {node: node, span: ast_util::mk_sp(lo, hi)};
}
fn parse_ident(p: parser) -> ast::ident {
alt p.peek() {
token::IDENT(i, _) { p.bump(); ret p.get_str(i); }
_ { p.fatal("expecting ident"); }
}
}
fn parse_value_ident(p: parser) -> ast::ident {
check_bad_word(p);
ret parse_ident(p);
}
fn eat(p: parser, tok: token::token) -> bool {
ret if p.peek() == tok { p.bump(); true } else { false };
}
fn is_word(p: parser, word: str) -> bool {
ret alt p.peek() {
token::IDENT(sid, false) { str::eq(word, p.get_str(sid)) }
_ { false }
};
}
fn eat_word(p: parser, word: str) -> bool {
alt p.peek() {
token::IDENT(sid, false) {
if str::eq(word, p.get_str(sid)) {
p.bump();
ret true;
} else { ret false; }
}
_ { ret false; }
}
}
fn expect_word(p: parser, word: str) {
if !eat_word(p, word) {
p.fatal("expecting " + word + ", found " +
token::to_str(p.get_reader(), p.peek()));
}
}
fn check_bad_word(p: parser) {
alt p.peek() {
token::IDENT(sid, false) {
let w = p.get_str(sid);
if p.get_bad_expr_words().contains_key(w) {
p.fatal("found " + w + " in expression position");
}
}
_ { }
}
}
fn parse_ty_fn(proto: ast::proto, p: parser) -> ast::ty_ {
fn parse_fn_input_ty(p: parser) -> ast::ty_arg {
let lo = p.get_lo_pos();
let mode = parse_arg_mode(p);
// Ignore arg name, if present
if is_plain_ident(p) && p.look_ahead(1u) == token::COLON {
p.bump();
p.bump();
}
let t = parse_ty(p, false);
ret spanned(lo, t.span.hi, {mode: mode, ty: t});
}
let inputs =
parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
parse_fn_input_ty, p);
// FIXME: there's no syntax for this right now anyway
// auto constrs = parse_constrs(~[], p);
let constrs: [@ast::constr] = [];
let (ret_style, ret_ty) = parse_ret_ty(p);
ret ast::ty_fn(proto, inputs.node, ret_ty, ret_style, constrs);
}
fn parse_ty_obj(p: parser) -> ast::ty_ {
fn parse_method_sig(p: parser) -> ast::ty_method {
let flo = p.get_lo_pos();
let proto: ast::proto = parse_method_proto(p);
let ident = parse_value_ident(p);
let f = parse_ty_fn(proto, p);
expect(p, token::SEMI);
alt f {
ast::ty_fn(proto, inputs, output, cf, constrs) {
ret spanned(flo, output.span.hi,
{proto: proto,
ident: ident,
inputs: inputs,
output: output,
cf: cf,
constrs: constrs});
}
}
}
let meths =
parse_seq(token::LBRACE, token::RBRACE, seq_sep_none(),
parse_method_sig, p);
ret ast::ty_obj(meths.node);
}
fn parse_mt(p: parser) -> ast::mt {
let mut = parse_mutability(p);
let t = parse_ty(p, false);
ret {ty: t, mut: mut};
}
fn parse_ty_field(p: parser) -> ast::ty_field {
let lo = p.get_lo_pos();
let mut = parse_mutability(p);
let id = parse_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
ret spanned(lo, ty.span.hi, {ident: id, mt: {ty: ty, mut: mut}});
}
// if i is the jth ident in args, return j
// otherwise, fail
fn ident_index(p: parser, args: [ast::arg], i: ast::ident) -> uint {
let j = 0u;
for a: ast::arg in args { if a.ident == i { ret j; } j += 1u; }
p.fatal("Unbound variable " + i + " in constraint arg");
}
fn parse_type_constr_arg(p: parser) -> @ast::ty_constr_arg {
let sp = p.get_span();
let carg = ast::carg_base;
expect(p, token::BINOP(token::STAR));
if p.peek() == token::DOT {
// "*..." notation for record fields
p.bump();
let pth: ast::path = parse_path(p);
carg = ast::carg_ident(pth);
}
// No literals yet, I guess?
ret @{node: carg, span: sp};
}
fn parse_constr_arg(args: [ast::arg], p: parser) -> @ast::constr_arg {
let sp = p.get_span();
let carg = ast::carg_base;
if p.peek() == token::BINOP(token::STAR) {
p.bump();
} else {
let i: ast::ident = parse_value_ident(p);
carg = ast::carg_ident(ident_index(p, args, i));
}
ret @{node: carg, span: sp};
}
fn parse_ty_constr(fn_args: [ast::arg], p: parser) -> @ast::constr {
let lo = p.get_lo_pos();
let path = parse_path(p);
let args: {node: [@ast::constr_arg], span: span} =
parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
{|p| parse_constr_arg(fn_args, p)}, p);
ret @spanned(lo, args.span.hi,
{path: path, args: args.node, id: p.get_id()});
}
fn parse_constr_in_type(p: parser) -> @ast::ty_constr {
let lo = p.get_lo_pos();
let path = parse_path(p);
let args: [@ast::ty_constr_arg] =
parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
parse_type_constr_arg, p).node;
let hi = p.get_lo_pos();
let tc: ast::ty_constr_ = {path: path, args: args, id: p.get_id()};
ret @spanned(lo, hi, tc);
}
fn parse_constrs<copy T>(pser: block(parser) -> @ast::constr_general<T>,
p: parser) ->
[@ast::constr_general<T>] {
let constrs: [@ast::constr_general<T>] = [];
while true {
let constr = pser(p);
constrs += [constr];
if p.peek() == token::COMMA { p.bump(); } else { break; }
}
constrs
}
fn parse_type_constraints(p: parser) -> [@ast::ty_constr] {
ret parse_constrs(parse_constr_in_type, p);
}
fn parse_ty_postfix(orig_t: ast::ty_, p: parser, colons_before_params: bool)
-> @ast::ty {
let lo = p.get_lo_pos();
if colons_before_params && p.peek() == token::MOD_SEP {
p.bump();
expect(p, token::LT);
} else if !colons_before_params && p.peek() == token::LT {
p.bump();
} else { ret @spanned(lo, p.get_lo_pos(), orig_t); }
// If we're here, we have explicit type parameter instantiation.
let seq = parse_seq_to_gt(some(token::COMMA), {|p| parse_ty(p, false)},
p);
alt orig_t {
ast::ty_path(pth, ann) {
let hi = p.get_hi_pos();
ret @spanned(lo, hi,
ast::ty_path(spanned(lo, hi,
{global: pth.node.global,
idents: pth.node.idents,
types: seq}), ann));
}
_ { p.fatal("type parameter instantiation only allowed for paths"); }
}
}
fn parse_ret_ty(p: parser) -> (ast::ret_style, @ast::ty) {
ret if eat(p, token::RARROW) {
let lo = p.get_lo_pos();
if eat(p, token::NOT) {
(ast::noreturn, @spanned(lo, p.get_last_hi_pos(), ast::ty_bot))
} else { (ast::return_val, parse_ty(p, false)) }
} else {
let pos = p.get_lo_pos();
(ast::return_val, @spanned(pos, pos, ast::ty_nil))
}
}
fn parse_ty(p: parser, colons_before_params: bool) -> @ast::ty {
let lo = p.get_lo_pos();
let t: ast::ty_;
// FIXME: do something with this
if eat_word(p, "bool") {
t = ast::ty_bool;
} else if eat_word(p, "int") {
t = ast::ty_int;
} else if eat_word(p, "uint") {
t = ast::ty_uint;
} else if eat_word(p, "float") {
t = ast::ty_float;
} else if eat_word(p, "str") {
t = ast::ty_str;
} else if eat_word(p, "char") {
t = ast::ty_char;
/*
} else if (eat_word(p, "task")) {
t = ast::ty_task;
*/
} else if eat_word(p, "i8") {
t = ast::ty_machine(ast::ty_i8);
} else if eat_word(p, "i16") {
t = ast::ty_machine(ast::ty_i16);
} else if eat_word(p, "i32") {
t = ast::ty_machine(ast::ty_i32);
} else if eat_word(p, "i64") {
t = ast::ty_machine(ast::ty_i64);
} else if eat_word(p, "u8") {
t = ast::ty_machine(ast::ty_u8);
} else if eat_word(p, "u16") {
t = ast::ty_machine(ast::ty_u16);
} else if eat_word(p, "u32") {
t = ast::ty_machine(ast::ty_u32);
} else if eat_word(p, "u64") {
t = ast::ty_machine(ast::ty_u64);
} else if eat_word(p, "f32") {
t = ast::ty_machine(ast::ty_f32);
} else if eat_word(p, "f64") {
t = ast::ty_machine(ast::ty_f64);
} else if p.peek() == token::LPAREN {
p.bump();
if p.peek() == token::RPAREN {
p.bump();
t = ast::ty_nil;
} else {
let ts = [parse_ty(p, false)];
while p.peek() == token::COMMA {
p.bump();
ts += [parse_ty(p, false)];
}
if vec::len(ts) == 1u {
t = ts[0].node;
} else { t = ast::ty_tup(ts); }
expect(p, token::RPAREN);
}
} else if p.peek() == token::AT {
p.bump();
t = ast::ty_box(parse_mt(p));
} else if p.peek() == token::TILDE {
p.bump();
t = ast::ty_uniq(parse_mt(p));
} else if p.peek() == token::BINOP(token::STAR) {
p.bump();
t = ast::ty_ptr(parse_mt(p));
} else if p.peek() == token::LBRACE {
let elems =
parse_seq(token::LBRACE, token::RBRACE, seq_sep_opt(token::COMMA),
parse_ty_field, p);
let hi = elems.span.hi;
t = ast::ty_rec(elems.node);
if p.peek() == token::COLON {
p.bump();
t = ast::ty_constr(@spanned(lo, hi, t),
parse_type_constraints(p));
}
} else if p.peek() == token::LBRACKET {
expect(p, token::LBRACKET);
t = ast::ty_vec(parse_mt(p));
expect(p, token::RBRACKET);
} else if eat_word(p, "fn") {
let proto = parse_fn_ty_proto(p);
t = parse_ty_fn(proto, p);
} else if eat_word(p, "block") {
t = parse_ty_fn(ast::proto_block, p);
} else if eat_word(p, "lambda") {
t = parse_ty_fn(ast::proto_shared(ast::sugar_sexy), p);
} else if eat_word(p, "obj") {
t = parse_ty_obj(p);
} else if p.peek() == token::MOD_SEP || is_ident(p.peek()) {
let path = parse_path(p);
t = ast::ty_path(path, p.get_id());
} else { p.fatal("expecting type"); }
ret parse_ty_postfix(t, p, colons_before_params);
}
fn parse_arg_mode(p: parser) -> ast::mode {
if eat(p, token::BINOP(token::AND)) { ast::by_mut_ref }
else if eat(p, token::BINOP(token::MINUS)) { ast::by_move }
else if eat(p, token::ANDAND) { ast::by_ref }
else if eat(p, token::BINOP(token::PLUS)) {
if eat(p, token::BINOP(token::PLUS)) { ast::by_val }
else { ast::by_copy }
}
else { ast::mode_infer }
}
fn parse_arg(p: parser) -> ast::arg {
let m = parse_arg_mode(p);
let i = parse_value_ident(p);
expect(p, token::COLON);
let t = parse_ty(p, false);
ret {mode: m, ty: t, ident: i, id: p.get_id()};
}
fn parse_fn_block_arg(p: parser) -> ast::arg {
let m = parse_arg_mode(p);
let i = parse_value_ident(p);
let t = @spanned(p.get_lo_pos(), p.get_hi_pos(), ast::ty_infer);
ret {mode: m, ty: t, ident: i, id: p.get_id()};
}
fn parse_seq_to_before_gt<copy T>(sep: option::t<token::token>,
f: block(parser) -> T,
p: parser) -> [T] {
let first = true;
let v = [];
while p.peek() != token::GT && p.peek() != token::BINOP(token::LSR) &&
p.peek() != token::BINOP(token::ASR) {
alt sep {
some(t) { if first { first = false; } else { expect(p, t); } }
_ { }
}
v += [f(p)];
}
ret v;
}
fn parse_seq_to_gt<copy T>(sep: option::t<token::token>,
f: block(parser) -> T, p: parser) -> [T] {
let v = parse_seq_to_before_gt(sep, f, p);
expect_gt(p);
ret v;
}
fn parse_seq_lt_gt<copy T>(sep: option::t<token::token>,
f: block(parser) -> T,
p: parser) -> spanned<[T]> {
let lo = p.get_lo_pos();
expect(p, token::LT);
let result = parse_seq_to_before_gt::<T>(sep, f, p);
let hi = p.get_hi_pos();
expect_gt(p);
ret spanned(lo, hi, result);
}
fn parse_seq_to_end<copy T>(ket: token::token, sep: seq_sep,
f: block(parser) -> T, p: parser) -> [T] {
let val = parse_seq_to_before_end(ket, sep, f, p);
p.bump();
ret val;
}
type seq_sep = {
sep: option::t<token::token>,
trailing_opt: bool // is trailing separator optional?
};
fn seq_sep(t: token::token) -> seq_sep {
ret {sep: option::some(t), trailing_opt: false};
}
fn seq_sep_opt(t: token::token) -> seq_sep {
ret {sep: option::some(t), trailing_opt: true};
}
fn seq_sep_none() -> seq_sep {
ret {sep: option::none, trailing_opt: false};
}
fn parse_seq_to_before_end<copy T>(ket: token::token,
sep: seq_sep,
f: block(parser) -> T, p: parser) -> [T] {
let first: bool = true;
let v: [T] = [];
while p.peek() != ket {
alt sep.sep {
some(t) { if first { first = false; } else { expect(p, t); } }
_ { }
}
if sep.trailing_opt && p.peek() == ket { break; }
v += [f(p)];
}
ret v;
}
fn parse_seq<copy T>(bra: token::token, ket: token::token,
sep: seq_sep, f: block(parser) -> T,
p: parser) -> spanned<[T]> {
let lo = p.get_lo_pos();
expect(p, bra);
let result = parse_seq_to_before_end::<T>(ket, sep, f, p);
let hi = p.get_hi_pos();
p.bump();
ret spanned(lo, hi, result);
}
fn lit_from_token(p: parser, tok: token::token) -> ast::lit_ {
alt tok {
token::LIT_INT(i) { ast::lit_int(i) }
token::LIT_UINT(u) { ast::lit_uint(u) }
token::LIT_FLOAT(s) { ast::lit_float(p.get_str(s)) }
token::LIT_MACH_INT(tm, i) { ast::lit_mach_int(tm, i) }
token::LIT_MACH_FLOAT(tm, s) { ast::lit_mach_float(tm, p.get_str(s)) }
token::LIT_CHAR(c) { ast::lit_char(c) }
token::LIT_STR(s) { ast::lit_str(p.get_str(s)) }
token::LPAREN. { expect(p, token::RPAREN); ast::lit_nil }
_ { unexpected(p, tok); }
}
}
fn parse_lit(p: parser) -> ast::lit {
let sp = p.get_span();
let lit = if eat_word(p, "true") {
ast::lit_bool(true)
} else if eat_word(p, "false") {
ast::lit_bool(false)
} else {
let tok = p.peek();
p.bump();
lit_from_token(p, tok)
};
ret {node: lit, span: sp};
}
fn is_ident(t: token::token) -> bool {
alt t { token::IDENT(_, _) { ret true; } _ { } }
ret false;
}
fn is_plain_ident(p: parser) -> bool {
ret alt p.peek() { token::IDENT(_, false) { true } _ { false } };
}
fn parse_path(p: parser) -> ast::path {
let lo = p.get_lo_pos();
let hi = lo;
let global;
if p.peek() == token::MOD_SEP {
global = true;
p.bump();
} else { global = false; }
let ids: [ast::ident] = [];
while true {
alt p.peek() {
token::IDENT(i, _) {
hi = p.get_hi_pos();
ids += [p.get_str(i)];
hi = p.get_hi_pos();
p.bump();
if p.peek() == token::MOD_SEP && p.look_ahead(1u) != token::LT {
p.bump();
} else { break; }
}
_ { break; }
}
}
ret spanned(lo, hi, {global: global, idents: ids, types: []});
}
fn parse_path_and_ty_param_substs(p: parser) -> ast::path {
let lo = p.get_lo_pos();
let path = parse_path(p);
if p.peek() == token::MOD_SEP {
p.bump();
let seq =
parse_seq_lt_gt(some(token::COMMA), {|p| parse_ty(p, false)}, p);
let hi = seq.span.hi;
path =
spanned(lo, hi,
{global: path.node.global,
idents: path.node.idents,
types: seq.node});
}
ret path;
}
fn parse_mutability(p: parser) -> ast::mutability {
if eat_word(p, "mutable") {
ast::mut
} else if eat_word(p, "const") {
ast::maybe_mut
} else {
ast::imm
}
}
fn parse_field(p: parser, sep: token::token) -> ast::field {
let lo = p.get_lo_pos();
let m = parse_mutability(p);
let i = parse_ident(p);
expect(p, sep);
let e = parse_expr(p);
ret spanned(lo, e.span.hi, {mut: m, ident: i, expr: e});
}
fn mk_expr(p: parser, lo: uint, hi: uint, node: ast::expr_) -> @ast::expr {
ret @{id: p.get_id(), node: node, span: ast_util::mk_sp(lo, hi)};
}
fn mk_mac_expr(p: parser, lo: uint, hi: uint, m: ast::mac_) -> @ast::expr {
ret @{id: p.get_id(),
node: ast::expr_mac({node: m, span: ast_util::mk_sp(lo, hi)}),
span: ast_util::mk_sp(lo, hi)};
}
fn is_bar(t: token::token) -> bool {
alt t { token::BINOP(token::OR.) | token::OROR. { true } _ { false } }
}
fn parse_bottom_expr(p: parser) -> @ast::expr {
let lo = p.get_lo_pos();
let hi = p.get_hi_pos();
let ex: ast::expr_;
if p.peek() == token::LPAREN {
p.bump();
if p.peek() == token::RPAREN {
hi = p.get_hi_pos();
p.bump();
let lit = @spanned(lo, hi, ast::lit_nil);
ret mk_expr(p, lo, hi, ast::expr_lit(lit));
}
let es = [parse_expr(p)];
while p.peek() == token::COMMA { p.bump(); es += [parse_expr(p)]; }
hi = p.get_hi_pos();
expect(p, token::RPAREN);
if vec::len(es) == 1u {
ret mk_expr(p, lo, hi, es[0].node);
} else { ret mk_expr(p, lo, hi, ast::expr_tup(es)); }
} else if p.peek() == token::LBRACE {
p.bump();
if is_word(p, "mutable") ||
is_plain_ident(p) && p.look_ahead(1u) == token::COLON {
let fields = [parse_field(p, token::COLON)];
let base = none;
while p.peek() != token::RBRACE {
if eat_word(p, "with") { base = some(parse_expr(p)); break; }
expect(p, token::COMMA);
if p.peek() == token::RBRACE {
// record ends by an optional trailing comma
break;
}
fields += [parse_field(p, token::COLON)];
}
hi = p.get_hi_pos();
expect(p, token::RBRACE);
ex = ast::expr_rec(fields, base);
} else if is_bar(p.peek()) {
ret parse_fn_block_expr(p);
} else {
let blk = parse_block_tail(p, lo, ast::default_blk);
ret mk_expr(p, blk.span.lo, blk.span.hi, ast::expr_block(blk));
}
} else if eat_word(p, "if") {
ret parse_if_expr(p);
} else if eat_word(p, "for") {
ret parse_for_expr(p);
} else if eat_word(p, "while") {
ret parse_while_expr(p);
} else if eat_word(p, "do") {
ret parse_do_while_expr(p);
} else if eat_word(p, "alt") {
ret parse_alt_expr(p);
/*
} else if (eat_word(p, "spawn")) {
ret parse_spawn_expr(p);
*/
} else if eat_word(p, "fn") {
let proto = parse_fn_anon_proto(p);
ret parse_fn_expr(p, proto);
} else if eat_word(p, "block") {
ret parse_fn_expr(p, ast::proto_block);
} else if eat_word(p, "lambda") {
ret parse_fn_expr(p, ast::proto_shared(ast::sugar_sexy));
} else if eat_word(p, "unchecked") {
ret parse_block_expr(p, lo, ast::unchecked_blk);
} else if eat_word(p, "unsafe") {
ret parse_block_expr(p, lo, ast::unsafe_blk);
} else if p.peek() == token::LBRACKET {
p.bump();
let mut = parse_mutability(p);
let es =
parse_seq_to_end(token::RBRACKET, seq_sep(token::COMMA),
parse_expr, p);
ex = ast::expr_vec(es, mut);
} else if p.peek() == token::POUND_LT {
p.bump();
let ty = parse_ty(p, false);
expect(p, token::GT);
/* hack: early return to take advantage of specialized function */
ret mk_mac_expr(p, lo, p.get_hi_pos(), ast::mac_embed_type(ty));
} else if p.peek() == token::POUND_LBRACE {
p.bump();
let blk = ast::mac_embed_block(
parse_block_tail(p, lo, ast::default_blk));
ret mk_mac_expr(p, lo, p.get_hi_pos(), blk);
} else if p.peek() == token::ELLIPSIS {
p.bump();
ret mk_mac_expr(p, lo, p.get_hi_pos(), ast::mac_ellipsis);
} else if eat_word(p, "obj") {
// Anonymous object
// Only make people type () if they're actually adding new fields
let fields: option::t<[ast::anon_obj_field]> = none;
if p.peek() == token::LPAREN {
p.bump();
fields =
some(parse_seq_to_end(token::RPAREN, seq_sep(token::COMMA),
parse_anon_obj_field, p));
}
let meths: [@ast::method] = [];
let inner_obj: option::t<@ast::expr> = none;
expect(p, token::LBRACE);
while p.peek() != token::RBRACE {
if eat_word(p, "with") {
inner_obj = some(parse_expr(p));
} else { meths += [parse_method(p)]; }
}
hi = p.get_hi_pos();
expect(p, token::RBRACE);
// fields and methods may be *additional* or *overriding* fields
// and methods if there's a inner_obj, or they may be the *only*
// fields and methods if there's no inner_obj.
// We don't need to pull ".node" out of fields because it's not a
// "spanned".
let ob = {fields: fields, methods: meths, inner_obj: inner_obj};
ex = ast::expr_anon_obj(ob);
} else if eat_word(p, "bind") {
let e = parse_expr_res(p, RESTRICT_NO_CALL_EXPRS);
fn parse_expr_opt(p: parser) -> option::t<@ast::expr> {
alt p.peek() {
token::UNDERSCORE. { p.bump(); ret none; }
_ { ret some(parse_expr(p)); }
}
}
let es =
parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
parse_expr_opt, p);
hi = es.span.hi;
ex = ast::expr_bind(e, es.node);
} else if p.peek() == token::POUND {
let ex_ext = parse_syntax_ext(p);
hi = ex_ext.span.hi;
ex = ex_ext.node;
} else if eat_word(p, "fail") {
if can_begin_expr(p.peek()) {
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_fail(some(e));
} else { ex = ast::expr_fail(none); }
} else if eat_word(p, "log") {
let e = parse_expr(p);
ex = ast::expr_log(1, e);
hi = e.span.hi;
} else if eat_word(p, "log_err") {
let e = parse_expr(p);
ex = ast::expr_log(0, e);
hi = e.span.hi;
} else if eat_word(p, "assert") {
let e = parse_expr(p);
ex = ast::expr_assert(e);
hi = e.span.hi;
} else if eat_word(p, "check") {
/* Should be a predicate (pure boolean function) applied to
arguments that are all either slot variables or literals.
but the typechecker enforces that. */
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_check(ast::checked_expr, e);
} else if eat_word(p, "claim") {
/* Same rules as check, except that if check-claims
is enabled (a command-line flag), then the parser turns
claims into check */
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_check(ast::claimed_expr, e);
} else if eat_word(p, "ret") {
if can_begin_expr(p.peek()) {
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_ret(some(e));
} else { ex = ast::expr_ret(none); }
} else if eat_word(p, "break") {
ex = ast::expr_break;
hi = p.get_hi_pos();
} else if eat_word(p, "cont") {
ex = ast::expr_cont;
hi = p.get_hi_pos();
} else if eat_word(p, "be") {
let e = parse_expr(p);
// FIXME: Is this the right place for this check?
if /*check*/ast_util::is_call_expr(e) {
hi = e.span.hi;
ex = ast::expr_be(e);
} else { p.fatal("Non-call expression in tail call"); }
} else if eat_word(p, "copy") {
let e = parse_expr(p);
ex = ast::expr_copy(e);
hi = e.span.hi;
} else if eat_word(p, "self") {
expect(p, token::DOT);
// The rest is a call expression.
let f: @ast::expr = parse_self_method(p);
let es =
parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
parse_expr, p);
hi = es.span.hi;
ex = ast::expr_call(f, es.node, false);
} else if p.peek() == token::MOD_SEP ||
is_ident(p.peek()) && !is_word(p, "true") &&
!is_word(p, "false") {
check_bad_word(p);
let pth = parse_path_and_ty_param_substs(p);
hi = pth.span.hi;
ex = ast::expr_path(pth);
} else {
let lit = parse_lit(p);
hi = lit.span.hi;
ex = ast::expr_lit(@lit);
}
ret mk_expr(p, lo, hi, ex);
}
fn parse_block_expr(p: parser,
lo: uint,
blk_mode: ast::blk_check_mode) -> @ast::expr {
expect(p, token::LBRACE);
let blk = parse_block_tail(p, lo, blk_mode);
ret mk_expr(p, blk.span.lo, blk.span.hi, ast::expr_block(blk));
}
fn parse_syntax_ext(p: parser) -> @ast::expr {
let lo = p.get_lo_pos();
expect(p, token::POUND);
ret parse_syntax_ext_naked(p, lo);
}
fn parse_syntax_ext_naked(p: parser, lo: uint) -> @ast::expr {
let pth = parse_path(p);
if vec::len(pth.node.idents) == 0u {
p.fatal("expected a syntax expander name");
}
//temporary for a backwards-compatible cycle:
let sep = seq_sep(token::COMMA);
let es =
if p.peek() == token::LPAREN {
parse_seq(token::LPAREN, token::RPAREN, sep, parse_expr, p)
} else {
parse_seq(token::LBRACKET, token::RBRACKET, sep, parse_expr, p)
};
let hi = es.span.hi;
let e = mk_expr(p, es.span.lo, hi, ast::expr_vec(es.node, ast::imm));
ret mk_mac_expr(p, lo, hi, ast::mac_invoc(pth, e, none));
}
fn parse_self_method(p: parser) -> @ast::expr {
let sp = p.get_span();
let f_name: ast::ident = parse_ident(p);
ret mk_expr(p, sp.lo, sp.hi, ast::expr_self_method(f_name));
}
fn parse_dot_or_call_expr(p: parser) -> @ast::expr {
let b = parse_bottom_expr(p);
if expr_has_value(b) { parse_dot_or_call_expr_with(p, b) }
else { b }
}
fn parse_dot_or_call_expr_with(p: parser, e: @ast::expr) -> @ast::expr {
let lo = e.span.lo;
let hi = e.span.hi;
let e = e;
while true {
alt p.peek() {
token::LPAREN. {
if p.get_restriction() == RESTRICT_NO_CALL_EXPRS {
ret e;
} else {
// Call expr.
let es = parse_seq(token::LPAREN, token::RPAREN,
seq_sep(token::COMMA), parse_expr, p);
hi = es.span.hi;
let nd = ast::expr_call(e, es.node, false);
e = mk_expr(p, lo, hi, nd);
}
}
token::LBRACKET. {
p.bump();
let ix = parse_expr(p);
hi = ix.span.hi;
expect(p, token::RBRACKET);
e = mk_expr(p, lo, hi, ast::expr_index(e, ix));
}
token::DOT. {
p.bump();
alt p.peek() {
token::IDENT(i, _) {
hi = p.get_hi_pos();
p.bump();
e = mk_expr(p, lo, hi, ast::expr_field(e, p.get_str(i)));
}
t { unexpected(p, t); }
}
}
_ { ret e; }
}
}
ret e;
}
fn parse_prefix_expr(p: parser) -> @ast::expr {
let lo = p.get_lo_pos();
let hi = p.get_hi_pos();
let ex;
alt p.peek() {
token::NOT. {
p.bump();
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::not, e);
}
token::BINOP(b) {
alt b {
token::MINUS. {
p.bump();
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::neg, e);
}
token::STAR. {
p.bump();
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::deref, e);
}
_ { ret parse_dot_or_call_expr(p); }
}
}
token::AT. {
p.bump();
let m = parse_mutability(p);
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::box(m), e);
}
token::TILDE. {
p.bump();
let m = parse_mutability(p);
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::uniq(m), e);
}
_ { ret parse_dot_or_call_expr(p); }
}
ret mk_expr(p, lo, hi, ex);
}
fn parse_ternary(p: parser) -> @ast::expr {
let cond_expr = parse_binops(p);
if p.peek() == token::QUES {
p.bump();
let then_expr = parse_expr(p);
expect(p, token::COLON);
let else_expr = parse_expr(p);
ret mk_expr(p, cond_expr.span.lo, else_expr.span.hi,
ast::expr_ternary(cond_expr, then_expr, else_expr));
} else { ret cond_expr; }
}
type op_spec = {tok: token::token, op: ast::binop, prec: int};
// FIXME make this a const, don't store it in parser state
fn prec_table() -> @[op_spec] {
ret @[{tok: token::BINOP(token::STAR), op: ast::mul, prec: 11},
{tok: token::BINOP(token::SLASH), op: ast::div, prec: 11},
{tok: token::BINOP(token::PERCENT), op: ast::rem, prec: 11},
{tok: token::BINOP(token::PLUS), op: ast::add, prec: 10},
{tok: token::BINOP(token::MINUS), op: ast::sub, prec: 10},
{tok: token::BINOP(token::LSL), op: ast::lsl, prec: 9},
{tok: token::BINOP(token::LSR), op: ast::lsr, prec: 9},
{tok: token::BINOP(token::ASR), op: ast::asr, prec: 9},
{tok: token::BINOP(token::AND), op: ast::bitand, prec: 8},
{tok: token::BINOP(token::CARET), op: ast::bitxor, prec: 6},
{tok: token::BINOP(token::OR), op: ast::bitor, prec: 6},
// 'as' sits between here with 5
{tok: token::LT, op: ast::lt, prec: 4},
{tok: token::LE, op: ast::le, prec: 4},
{tok: token::GE, op: ast::ge, prec: 4},
{tok: token::GT, op: ast::gt, prec: 4},
{tok: token::EQEQ, op: ast::eq, prec: 3},
{tok: token::NE, op: ast::ne, prec: 3},
{tok: token::ANDAND, op: ast::and, prec: 2},
{tok: token::OROR, op: ast::or, prec: 1}];
}
fn parse_binops(p: parser) -> @ast::expr {
ret parse_more_binops(p, parse_prefix_expr(p), 0);
}
const unop_prec: int = 100;
const as_prec: int = 5;
const ternary_prec: int = 0;
fn parse_more_binops(p: parser, lhs: @ast::expr, min_prec: int) ->
@ast::expr {
if !expr_has_value(lhs) { ret lhs; }
let peeked = p.peek();
let lit_after = alt lexer::maybe_untangle_minus_from_lit(p.get_reader(),
peeked) {
some(tok) {
peeked = token::BINOP(token::MINUS);
let lit = @{node: lit_from_token(p, tok), span: p.get_span()};
some(mk_expr(p, p.get_lo_pos(), p.get_hi_pos(), ast::expr_lit(lit)))
}
none. { none }
};
for cur: op_spec in *p.get_prec_table() {
if cur.prec > min_prec && cur.tok == peeked {
p.bump();
let expr = alt lit_after {
some(ex) { ex }
_ { parse_prefix_expr(p) }
};
let rhs = parse_more_binops(p, expr, cur.prec);
let bin = mk_expr(p, lhs.span.lo, rhs.span.hi,
ast::expr_binary(cur.op, lhs, rhs));
ret parse_more_binops(p, bin, min_prec);
}
}
if as_prec > min_prec && eat_word(p, "as") {
let rhs = parse_ty(p, true);
let _as =
mk_expr(p, lhs.span.lo, rhs.span.hi, ast::expr_cast(lhs, rhs));
ret parse_more_binops(p, _as, min_prec);
}
ret lhs;
}
fn parse_assign_expr(p: parser) -> @ast::expr {
let lo = p.get_lo_pos();
let lhs = parse_ternary(p);
alt p.peek() {
token::EQ. {
p.bump();
let rhs = parse_expr(p);
ret mk_expr(p, lo, rhs.span.hi, ast::expr_assign(lhs, rhs));
}
token::BINOPEQ(op) {
p.bump();
let rhs = parse_expr(p);
let aop = ast::add;
alt op {
token::PLUS. { aop = ast::add; }
token::MINUS. { aop = ast::sub; }
token::STAR. { aop = ast::mul; }
token::SLASH. { aop = ast::div; }
token::PERCENT. { aop = ast::rem; }
token::CARET. { aop = ast::bitxor; }
token::AND. { aop = ast::bitand; }
token::OR. { aop = ast::bitor; }
token::LSL. { aop = ast::lsl; }
token::LSR. { aop = ast::lsr; }
token::ASR. { aop = ast::asr; }
}
ret mk_expr(p, lo, rhs.span.hi, ast::expr_assign_op(aop, lhs, rhs));
}
token::LARROW. {
p.bump();
let rhs = parse_expr(p);
ret mk_expr(p, lo, rhs.span.hi, ast::expr_move(lhs, rhs));
}
token::DARROW. {
p.bump();
let rhs = parse_expr(p);
ret mk_expr(p, lo, rhs.span.hi, ast::expr_swap(lhs, rhs));
}
_ {/* fall through */ }
}
ret lhs;
}
fn parse_if_expr_1(p: parser) ->
{cond: @ast::expr,
then: ast::blk,
els: option::t<@ast::expr>,
lo: uint,
hi: uint} {
let lo = p.get_last_lo_pos();
let cond = parse_expr(p);
let thn = parse_block(p);
let els: option::t<@ast::expr> = none;
let hi = thn.span.hi;
if eat_word(p, "else") {
let elexpr = parse_else_expr(p);
els = some(elexpr);
hi = elexpr.span.hi;
} else if !option::is_none(thn.node.expr) {
let sp = option::get(thn.node.expr).span;
p.span_fatal(sp, "`if` without `else` can not produce a result");
//TODO: If a suggestion mechanism appears, suggest that the
//user may have forgotten a ';'
}
ret {cond: cond, then: thn, els: els, lo: lo, hi: hi};
}
fn parse_if_expr(p: parser) -> @ast::expr {
if eat_word(p, "check") {
let q = parse_if_expr_1(p);
ret mk_expr(p, q.lo, q.hi, ast::expr_if_check(q.cond, q.then, q.els));
} else {
let q = parse_if_expr_1(p);
ret mk_expr(p, q.lo, q.hi, ast::expr_if(q.cond, q.then, q.els));
}
}
fn parse_fn_expr(p: parser, proto: ast::proto) -> @ast::expr {
let lo = p.get_last_lo_pos();
let decl = parse_fn_decl(p, ast::impure_fn, ast::il_normal);
let body = parse_block(p);
let _fn = {decl: decl, proto: proto, body: body};
ret mk_expr(p, lo, body.span.hi, ast::expr_fn(_fn));
}
fn parse_fn_block_expr(p: parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let decl = parse_fn_block_decl(p);
let body = parse_block_tail(p, lo, ast::default_blk);
let _fn = {decl: decl, proto: ast::proto_block, body: body};
ret mk_expr(p, lo, body.span.hi, ast::expr_fn(_fn));
}
fn parse_else_expr(p: parser) -> @ast::expr {
if eat_word(p, "if") {
ret parse_if_expr(p);
} else {
let blk = parse_block(p);
ret mk_expr(p, blk.span.lo, blk.span.hi, ast::expr_block(blk));
}
}
fn parse_for_expr(p: parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let decl = parse_local(p, false);
expect_word(p, "in");
let seq = parse_expr(p);
let body = parse_block_no_value(p);
let hi = body.span.hi;
ret mk_expr(p, lo, hi, ast::expr_for(decl, seq, body));
}
fn parse_while_expr(p: parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let cond = parse_expr(p);
let body = parse_block_no_value(p);
let hi = body.span.hi;
ret mk_expr(p, lo, hi, ast::expr_while(cond, body));
}
fn parse_do_while_expr(p: parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let body = parse_block_no_value(p);
expect_word(p, "while");
let cond = parse_expr(p);
let hi = cond.span.hi;
ret mk_expr(p, lo, hi, ast::expr_do_while(body, cond));
}
fn parse_alt_expr(p: parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let discriminant = parse_expr(p);
expect(p, token::LBRACE);
let arms: [ast::arm] = [];
while p.peek() != token::RBRACE {
let pats = parse_pats(p);
let guard = none;
if eat_word(p, "when") { guard = some(parse_expr(p)); }
let blk = parse_block(p);
arms += [{pats: pats, guard: guard, body: blk}];
}
let hi = p.get_hi_pos();
p.bump();
ret mk_expr(p, lo, hi, ast::expr_alt(discriminant, arms));
}
fn parse_expr(p: parser) -> @ast::expr {
ret parse_expr_res(p, UNRESTRICTED);
}
fn parse_expr_res(p: parser, r: restriction) -> @ast::expr {
let old = p.get_restriction();
p.restrict(r);
let e = parse_assign_expr(p);
p.restrict(old);
ret e;
}
fn parse_initializer(p: parser) -> option::t<ast::initializer> {
alt p.peek() {
token::EQ. {
p.bump();
ret some({op: ast::init_assign, expr: parse_expr(p)});
}
token::LARROW. {
p.bump();
ret some({op: ast::init_move, expr: parse_expr(p)});
}
// Now that the the channel is the first argument to receive,
// combining it with an initializer doesn't really make sense.
// case (token::RECV) {
// p.bump();
// ret some(rec(op = ast::init_recv,
// expr = parse_expr(p)));
// }
_ {
ret none;
}
}
}
fn parse_pats(p: parser) -> [@ast::pat] {
let pats = [];
while true {
pats += [parse_pat(p)];
if p.peek() == token::BINOP(token::OR) { p.bump(); } else { break; }
}
ret pats;
}
fn parse_pat(p: parser) -> @ast::pat {
let lo = p.get_lo_pos();
let hi = p.get_hi_pos();
let pat;
alt p.peek() {
token::UNDERSCORE. { p.bump(); pat = ast::pat_wild; }
token::AT. {
p.bump();
let sub = parse_pat(p);
pat = ast::pat_box(sub);
hi = sub.span.hi;
}
token::TILDE. {
p.bump();
let sub = parse_pat(p);
pat = ast::pat_uniq(sub);
hi = sub.span.hi;
}
token::LBRACE. {
p.bump();
let fields = [];
let etc = false;
let first = true;
while p.peek() != token::RBRACE {
if first { first = false; } else { expect(p, token::COMMA); }
if p.peek() == token::UNDERSCORE {
p.bump();
if p.peek() != token::RBRACE {
p.fatal("expecting }, found " +
token::to_str(p.get_reader(), p.peek()));
}
etc = true;
break;
}
let fieldname = parse_ident(p);
let subpat;
if p.peek() == token::COLON {
p.bump();
subpat = parse_pat(p);
} else {
if p.get_bad_expr_words().contains_key(fieldname) {
p.fatal("found " + fieldname + " in binding position");
}
subpat =
@{id: p.get_id(),
node: ast::pat_bind(fieldname),
span: ast_util::mk_sp(lo, hi)};
}
fields += [{ident: fieldname, pat: subpat}];
}
hi = p.get_hi_pos();
p.bump();
pat = ast::pat_rec(fields, etc);
}
token::LPAREN. {
p.bump();
if p.peek() == token::RPAREN {
hi = p.get_hi_pos();
p.bump();
pat =
ast::pat_lit(@{node: ast::lit_nil,
span: ast_util::mk_sp(lo, hi)});
} else {
let fields = [parse_pat(p)];
while p.peek() == token::COMMA {
p.bump();
fields += [parse_pat(p)];
}
if vec::len(fields) == 1u { expect(p, token::COMMA); }
hi = p.get_hi_pos();
expect(p, token::RPAREN);
pat = ast::pat_tup(fields);
}
}
tok {
if !is_ident(tok) || is_word(p, "true") || is_word(p, "false") {
let lit = parse_lit(p);
if eat_word(p, "to") {
let end = parse_lit(p);
hi = end.span.hi;
pat = ast::pat_range(@lit, @end);
} else {
hi = lit.span.hi;
pat = ast::pat_lit(@lit);
}
} else if is_plain_ident(p) &&
alt p.look_ahead(1u) {
token::DOT. | token::LPAREN. | token::LBRACKET. {
false
}
_ { true }
} {
hi = p.get_hi_pos();
pat = ast::pat_bind(parse_value_ident(p));
} else {
let tag_path = parse_path_and_ty_param_substs(p);
hi = tag_path.span.hi;
let args: [@ast::pat];
alt p.peek() {
token::LPAREN. {
let a =
parse_seq(token::LPAREN, token::RPAREN,
seq_sep(token::COMMA), parse_pat, p);
args = a.node;
hi = a.span.hi;
}
token::DOT. { args = []; p.bump(); }
_ { expect(p, token::LPAREN); fail; }
}
pat = ast::pat_tag(tag_path, args);
}
}
}
ret @{id: p.get_id(), node: pat, span: ast_util::mk_sp(lo, hi)};
}
fn parse_local(p: parser, allow_init: bool) -> @ast::local {
let lo = p.get_lo_pos();
let pat = parse_pat(p);
let ty = @spanned(lo, lo, ast::ty_infer);
if eat(p, token::COLON) { ty = parse_ty(p, false); }
let init = if allow_init { parse_initializer(p) } else { none };
ret @spanned(lo, p.get_last_hi_pos(),
{ty: ty, pat: pat, init: init, id: p.get_id()});
}
fn parse_let(p: parser) -> @ast::decl {
fn parse_let_style(p: parser) -> ast::let_style {
eat(p, token::BINOP(token::AND)) ? ast::let_ref : ast::let_copy
}
let lo = p.get_lo_pos();
let locals = [(parse_let_style(p), parse_local(p, true))];
while eat(p, token::COMMA) {
locals += [(parse_let_style(p), parse_local(p, true))];
}
ret @spanned(lo, p.get_last_hi_pos(), ast::decl_local(locals));
}
fn parse_stmt(p: parser) -> @ast::stmt {
if p.get_file_type() == SOURCE_FILE {
ret parse_source_stmt(p);
} else { ret parse_crate_stmt(p); }
}
fn parse_crate_stmt(p: parser) -> @ast::stmt {
let cdir = parse_crate_directive(p, []);
ret @spanned(cdir.span.lo, cdir.span.hi,
ast::stmt_crate_directive(@cdir));
}
fn parse_source_stmt(p: parser) -> @ast::stmt {
let lo = p.get_lo_pos();
if eat_word(p, "let") {
let decl = parse_let(p);
ret @spanned(lo, decl.span.hi, ast::stmt_decl(decl, p.get_id()));
} else {
let item_attrs;
alt parse_outer_attrs_or_ext(p) {
none. { item_attrs = []; }
some(left(attrs)) { item_attrs = attrs; }
some(right(ext)) {
ret @spanned(lo, ext.span.hi, ast::stmt_expr(ext, p.get_id()));
}
}
let maybe_item = parse_item(p, item_attrs);
// If we have attributes then we should have an item
if vec::len(item_attrs) > 0u {
alt maybe_item {
some(_) {/* fallthrough */ }
_ { ret p.fatal("expected item"); }
}
}
alt maybe_item {
some(i) {
let hi = i.span.hi;
let decl = @spanned(lo, hi, ast::decl_item(i));
ret @spanned(lo, hi, ast::stmt_decl(decl, p.get_id()));
}
none. {
// Remainder are line-expr stmts.
let e = parse_expr(p);
// See if it is a block call
if expr_has_value(e) && p.peek() == token::LBRACE &&
is_bar(p.look_ahead(1u)) {
p.bump();
let blk = parse_fn_block_expr(p);
alt e.node {
ast::expr_call(f, args, false) {
e = @{node: ast::expr_call(f, args + [blk], true)
with *e};
}
_ {
e = mk_expr(p, lo, p.get_last_hi_pos(),
ast::expr_call(e, [blk], true));
}
}
}
ret @spanned(lo, e.span.hi, ast::stmt_expr(e, p.get_id()));
}
_ { p.fatal("expected statement"); }
}
}
}
fn expr_has_value(e: @ast::expr) -> bool {
alt e.node {
ast::expr_if(_, th, els) | ast::expr_if_check(_, th, els) {
if option::is_none(els) { false }
else { !option::is_none(th.node.expr) ||
expr_has_value(option::get(els)) }
}
ast::expr_alt(_, arms) {
let found_expr = false;
for arm in arms {
if !option::is_none(arm.body.node.expr) { found_expr = true; }
}
found_expr
}
ast::expr_block(blk) | ast::expr_while(_, blk) |
ast::expr_for(_, _, blk) | ast::expr_do_while(blk, _) {
!option::is_none(blk.node.expr)
}
ast::expr_call(_, _, true) { false }
_ { true }
}
}
fn stmt_is_expr(stmt: @ast::stmt) -> bool {
ret alt stmt.node {
ast::stmt_expr(e, _) { expr_has_value(e) }
_ { false }
};
}
fn stmt_to_expr(stmt: @ast::stmt) -> option::t<@ast::expr> {
ret if stmt_is_expr(stmt) {
alt stmt.node {
ast::stmt_expr(e, _) { some(e) }
}
} else { none };
}
fn stmt_ends_with_semi(stmt: ast::stmt) -> bool {
alt stmt.node {
ast::stmt_decl(d, _) {
ret alt d.node {
ast::decl_local(_) { true }
ast::decl_item(_) { false }
}
}
ast::stmt_expr(e, _) {
ret expr_has_value(e);
}
// We should not be calling this on a cdir.
ast::stmt_crate_directive(cdir) {
fail;
}
}
}
fn parse_block(p: parser) -> ast::blk {
let lo = p.get_lo_pos();
if eat_word(p, "unchecked") {
expect(p, token::LBRACE);
be parse_block_tail(p, lo, ast::unchecked_blk);
} else if eat_word(p, "unsafe") {
expect(p, token::LBRACE);
be parse_block_tail(p, lo, ast::unsafe_blk);
} else {
expect(p, token::LBRACE);
be parse_block_tail(p, lo, ast::default_blk);
}
}
fn parse_block_no_value(p: parser) -> ast::blk {
let blk = parse_block(p);
if !option::is_none(blk.node.expr) {
let sp = option::get(blk.node.expr).span;
p.span_fatal(sp, "this block must not have a result");
//TODO: If a suggestion mechanism appears, suggest that the
//user may have forgotten a ';'
}
ret blk;
}
// Precondition: already parsed the '{' or '#{'
// I guess that also means "already parsed the 'impure'" if
// necessary, and this should take a qualifier.
// some blocks start with "#{"...
fn parse_block_tail(p: parser, lo: uint, s: ast::blk_check_mode) -> ast::blk {
let view_items = [], stmts = [], expr = none;
while is_word(p, "import") { view_items += [parse_view_item(p)]; }
while p.peek() != token::RBRACE {
alt p.peek() {
token::SEMI. {
p.bump(); // empty
}
_ {
let stmt = parse_stmt(p);
alt stmt_to_expr(stmt) {
some(e) {
alt p.peek() {
token::SEMI. { p.bump(); stmts += [stmt]; }
token::RBRACE. { expr = some(e); }
t {
if stmt_ends_with_semi(*stmt) {
p.fatal("expected ';' or '}' after " +
"expression but found " +
token::to_str(p.get_reader(), t));
}
stmts += [stmt];
}
}
}
none. {
// Not an expression statement.
stmts += [stmt];
if p.get_file_type() == SOURCE_FILE &&
stmt_ends_with_semi(*stmt) {
expect(p, token::SEMI);
}
}
}
}
}
}
let hi = p.get_hi_pos();
p.bump();
let bloc = {view_items: view_items, stmts: stmts, expr: expr,
id: p.get_id(), rules: s};
ret spanned(lo, hi, bloc);
}
fn parse_ty_param(p: parser) -> ast::ty_param {
let k = if eat_word(p, "send") { ast::kind_sendable }
else if eat_word(p, "copy") { ast::kind_copyable }
else { ast::kind_noncopyable };
ret {ident: parse_ident(p), kind: k};
}
fn parse_ty_params(p: parser) -> [ast::ty_param] {
let ty_params: [ast::ty_param] = [];
if p.peek() == token::LT {
p.bump();
ty_params = parse_seq_to_gt(some(token::COMMA), parse_ty_param, p);
}
ret ty_params;
}
fn parse_fn_decl(p: parser, purity: ast::purity, il: ast::inlineness) ->
ast::fn_decl {
let inputs: ast::spanned<[ast::arg]> =
parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
parse_arg, p);
// Use the args list to translate each bound variable
// mentioned in a constraint to an arg index.
// Seems weird to do this in the parser, but I'm not sure how else to.
let constrs = [];
if p.peek() == token::COLON {
p.bump();
constrs = parse_constrs({|x| parse_ty_constr(inputs.node, x) }, p);
}
let (ret_style, ret_ty) = parse_ret_ty(p);
ret {inputs: inputs.node,
output: ret_ty,
purity: purity,
il: il,
cf: ret_style,
constraints: constrs};
}
fn parse_fn_block_decl(p: parser) -> ast::fn_decl {
let inputs =
if p.peek() == token::OROR {
p.bump();
[]
} else {
parse_seq(token::BINOP(token::OR), token::BINOP(token::OR),
seq_sep(token::COMMA), parse_fn_block_arg, p).node
};
ret {inputs: inputs,
output: @spanned(p.get_lo_pos(), p.get_hi_pos(), ast::ty_infer),
purity: ast::impure_fn,
il: ast::il_normal,
cf: ast::return_val,
constraints: []};
}
fn parse_fn(p: parser, proto: ast::proto, purity: ast::purity,
il: ast::inlineness) -> ast::_fn {
let decl = parse_fn_decl(p, purity, il);
let body = parse_block(p);
ret {decl: decl, proto: proto, body: body};
}
fn parse_fn_header(p: parser) -> {ident: ast::ident, tps: [ast::ty_param]} {
let id = parse_value_ident(p);
let ty_params = parse_ty_params(p);
ret {ident: id, tps: ty_params};
}
fn mk_item(p: parser, lo: uint, hi: uint, ident: ast::ident, node: ast::item_,
attrs: [ast::attribute]) -> @ast::item {
ret @{ident: ident,
attrs: attrs,
id: p.get_id(),
node: node,
span: ast_util::mk_sp(lo, hi)};
}
fn parse_item_fn(p: parser, purity: ast::purity, proto: ast::proto,
attrs: [ast::attribute], il: ast::inlineness) ->
@ast::item {
let lo = p.get_last_lo_pos();
let t = parse_fn_header(p);
let f = parse_fn(p, proto, purity, il);
ret mk_item(p, lo, f.body.span.hi, t.ident, ast::item_fn(f, t.tps),
attrs);
}
fn parse_obj_field(p: parser) -> ast::obj_field {
let mut = parse_mutability(p);
let ident = parse_value_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
ret {mut: mut, ty: ty, ident: ident, id: p.get_id()};
}
fn parse_anon_obj_field(p: parser) -> ast::anon_obj_field {
let mut = parse_mutability(p);
let ident = parse_value_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
expect(p, token::EQ);
let expr = parse_expr(p);
ret {mut: mut, ty: ty, expr: expr, ident: ident, id: p.get_id()};
}
fn parse_method(p: parser) -> @ast::method {
let lo = p.get_lo_pos();
let proto = parse_method_proto(p);
let ident = parse_value_ident(p);
let f = parse_fn(p, proto, ast::impure_fn, ast::il_normal);
let meth = {ident: ident, meth: f, id: p.get_id()};
ret @spanned(lo, f.body.span.hi, meth);
}
fn parse_item_obj(p: parser, attrs: [ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let ident = parse_value_ident(p);
let ty_params = parse_ty_params(p);
let fields: ast::spanned<[ast::obj_field]> =
parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
parse_obj_field, p);
let meths: [@ast::method] = [];
expect(p, token::LBRACE);
while p.peek() != token::RBRACE { meths += [parse_method(p)]; }
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
let ob: ast::_obj = {fields: fields.node, methods: meths};
ret mk_item(p, lo, hi, ident, ast::item_obj(ob, ty_params, p.get_id()),
attrs);
}
fn parse_item_res(p: parser, attrs: [ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let ident = parse_value_ident(p);
let ty_params = parse_ty_params(p);
expect(p, token::LPAREN);
let arg_ident = parse_value_ident(p);
expect(p, token::COLON);
let t = parse_ty(p, false);
expect(p, token::RPAREN);
let dtor = parse_block_no_value(p);
let decl =
{inputs:
[{mode: ast::by_ref, ty: t, ident: arg_ident,
id: p.get_id()}],
output: @spanned(lo, lo, ast::ty_nil),
purity: ast::impure_fn,
il: ast::il_normal,
cf: ast::return_val,
constraints: []};
let f = {decl: decl, proto: ast::proto_shared(ast::sugar_normal),
body: dtor};
ret mk_item(p, lo, dtor.span.hi, ident,
ast::item_res(f, p.get_id(), ty_params, p.get_id()), attrs);
}
fn parse_mod_items(p: parser, term: token::token,
first_item_attrs: [ast::attribute]) -> ast::_mod {
// Shouldn't be any view items since we've already parsed an item attr
let view_items =
if vec::len(first_item_attrs) == 0u { parse_view(p) } else { [] };
let items: [@ast::item] = [];
let initial_attrs = first_item_attrs;
while p.peek() != term {
let attrs = initial_attrs + parse_outer_attributes(p);
initial_attrs = [];
alt parse_item(p, attrs) {
some(i) { items += [i]; }
_ {
p.fatal("expected item but found " +
token::to_str(p.get_reader(), p.peek()));
}
}
}
ret {view_items: view_items, items: items};
}
fn parse_item_const(p: parser, attrs: [ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let id = parse_value_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
expect(p, token::EQ);
let e = parse_expr(p);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret mk_item(p, lo, hi, id, ast::item_const(ty, e), attrs);
}
fn parse_item_mod(p: parser, attrs: [ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let id = parse_ident(p);
expect(p, token::LBRACE);
let inner_attrs = parse_inner_attrs_and_next(p);
let first_item_outer_attrs = inner_attrs.next;
let m = parse_mod_items(p, token::RBRACE, first_item_outer_attrs);
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
ret mk_item(p, lo, hi, id, ast::item_mod(m), attrs + inner_attrs.inner);
}
fn parse_item_native_type(p: parser, attrs: [ast::attribute]) ->
@ast::native_item {
let t = parse_type_decl(p);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret @{ident: t.ident,
attrs: attrs,
node: ast::native_item_ty,
id: p.get_id(),
span: ast_util::mk_sp(t.lo, hi)};
}
fn parse_item_native_fn(p: parser, attrs: [ast::attribute],
purity: ast::purity) -> @ast::native_item {
let lo = p.get_last_lo_pos();
let t = parse_fn_header(p);
let decl = parse_fn_decl(p, purity, ast::il_normal);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret @{ident: t.ident,
attrs: attrs,
node: ast::native_item_fn(decl, t.tps),
id: p.get_id(),
span: ast_util::mk_sp(lo, hi)};
}
fn parse_native_item(p: parser, attrs: [ast::attribute]) ->
@ast::native_item {
if eat_word(p, "type") {
ret parse_item_native_type(p, attrs);
} else if eat_word(p, "fn") {
ret parse_item_native_fn(p, attrs, ast::impure_fn);
} else if eat_word(p, "pure") {
expect_word(p, "fn");
ret parse_item_native_fn(p, attrs, ast::pure_fn);
} else if eat_word(p, "unsafe") {
expect_word(p, "fn");
ret parse_item_native_fn(p, attrs, ast::unsafe_fn);
} else { unexpected(p, p.peek()); }
}
fn parse_native_mod_items(p: parser, first_item_attrs: [ast::attribute]) ->
ast::native_mod {
// Shouldn't be any view items since we've already parsed an item attr
let view_items =
if vec::len(first_item_attrs) == 0u {
parse_native_view(p)
} else { [] };
let items: [@ast::native_item] = [];
let initial_attrs = first_item_attrs;
while p.peek() != token::RBRACE {
let attrs = initial_attrs + parse_outer_attributes(p);
initial_attrs = [];
items += [parse_native_item(p, attrs)];
}
ret {view_items: view_items,
items: items};
}
fn parse_item_native_mod(p: parser, attrs: [ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
expect_word(p, "mod");
let id = parse_ident(p);
expect(p, token::LBRACE);
let more_attrs = parse_inner_attrs_and_next(p);
let inner_attrs = more_attrs.inner;
let first_item_outer_attrs = more_attrs.next;
let m = parse_native_mod_items(p, first_item_outer_attrs);
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
ret mk_item(p, lo, hi, id, ast::item_native_mod(m), attrs + inner_attrs);
}
fn parse_type_decl(p: parser) -> {lo: uint, ident: ast::ident} {
let lo = p.get_last_lo_pos();
let id = parse_ident(p);
ret {lo: lo, ident: id};
}
fn parse_item_type(p: parser, attrs: [ast::attribute]) -> @ast::item {
let t = parse_type_decl(p);
let tps = parse_ty_params(p);
expect(p, token::EQ);
let ty = parse_ty(p, false);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret mk_item(p, t.lo, hi, t.ident, ast::item_ty(ty, tps), attrs);
}
fn parse_item_tag(p: parser, attrs: [ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let id = parse_ident(p);
let ty_params = parse_ty_params(p);
let variants: [ast::variant] = [];
// Newtype syntax
if p.peek() == token::EQ {
if p.get_bad_expr_words().contains_key(id) {
p.fatal("found " + id + " in tag constructor position");
}
p.bump();
let ty = parse_ty(p, false);
expect(p, token::SEMI);
let variant =
spanned(ty.span.lo, ty.span.hi,
{name: id,
args: [{ty: ty, id: p.get_id()}],
id: p.get_id()});
ret mk_item(p, lo, ty.span.hi, id,
ast::item_tag([variant], ty_params), attrs);
}
expect(p, token::LBRACE);
while p.peek() != token::RBRACE {
let tok = p.peek();
alt tok {
token::IDENT(name, _) {
check_bad_word(p);
let vlo = p.get_lo_pos();
p.bump();
let args: [ast::variant_arg] = [];
let vhi = p.get_hi_pos();
alt p.peek() {
token::LPAREN. {
let arg_tys = parse_seq(token::LPAREN, token::RPAREN,
seq_sep(token::COMMA),
{|p| parse_ty(p, false)}, p);
for ty: @ast::ty in arg_tys.node {
args += [{ty: ty, id: p.get_id()}];
}
vhi = arg_tys.span.hi;
}
_ {/* empty */ }
}
expect(p, token::SEMI);
p.get_id();
let vr = {name: p.get_str(name), args: args, id: p.get_id()};
variants += [spanned(vlo, vhi, vr)];
}
token::RBRACE. {/* empty */ }
_ {
p.fatal("expected name of variant or '}' but found " +
token::to_str(p.get_reader(), tok));
}
}
}
let hi = p.get_hi_pos();
p.bump();
ret mk_item(p, lo, hi, id, ast::item_tag(variants, ty_params), attrs);
}
fn parse_fn_item_proto(_p: parser) -> ast::proto {
ast::proto_bare
}
fn parse_fn_ty_proto(p: parser) -> ast::proto {
if p.peek() == token::AT {
p.bump();
ast::proto_shared(ast::sugar_normal)
} else {
ast::proto_bare
}
}
fn parse_fn_anon_proto(p: parser) -> ast::proto {
if p.peek() == token::AT {
p.bump();
ast::proto_shared(ast::sugar_normal)
} else {
ast::proto_bare
}
}
fn parse_method_proto(p: parser) -> ast::proto {
if eat_word(p, "fn") {
ret ast::proto_bare;
} else { unexpected(p, p.peek()); }
}
fn parse_item(p: parser, attrs: [ast::attribute]) -> option::t<@ast::item> {
if eat_word(p, "const") {
ret some(parse_item_const(p, attrs));
} else if eat_word(p, "inline") {
expect_word(p, "fn");
let proto = parse_fn_item_proto(p);
ret some(parse_item_fn(p, ast::impure_fn, proto,
attrs, ast::il_inline));
} else if is_word(p, "fn") && p.look_ahead(1u) != token::LPAREN {
p.bump();
let proto = parse_fn_item_proto(p);
ret some(parse_item_fn(p, ast::impure_fn, proto,
attrs, ast::il_normal));
} else if eat_word(p, "pure") {
expect_word(p, "fn");
let proto = parse_fn_item_proto(p);
ret some(parse_item_fn(p, ast::pure_fn, proto, attrs,
ast::il_normal));
} else if is_word(p, "unsafe") && p.look_ahead(1u) != token::LBRACE {
p.bump();
expect_word(p, "fn");
let proto = parse_fn_item_proto(p);
ret some(parse_item_fn(p, ast::unsafe_fn, proto,
attrs, ast::il_normal));
} else if eat_word(p, "mod") {
ret some(parse_item_mod(p, attrs));
} else if eat_word(p, "native") {
ret some(parse_item_native_mod(p, attrs));
}
if eat_word(p, "type") {
ret some(parse_item_type(p, attrs));
} else if eat_word(p, "tag") {
ret some(parse_item_tag(p, attrs));
} else if is_word(p, "obj") && p.look_ahead(1u) != token::LPAREN {
p.bump();
ret some(parse_item_obj(p, attrs));
} else if eat_word(p, "resource") {
ret some(parse_item_res(p, attrs));
} else { ret none; }
}
// A type to distingush between the parsing of item attributes or syntax
// extensions, which both begin with token.POUND
type attr_or_ext = option::t<either::t<[ast::attribute], @ast::expr>>;
fn parse_outer_attrs_or_ext(p: parser) -> attr_or_ext {
if p.peek() == token::POUND {
let lo = p.get_lo_pos();
p.bump();
if p.peek() == token::LBRACKET {
let first_attr = parse_attribute_naked(p, ast::attr_outer, lo);
ret some(left([first_attr] + parse_outer_attributes(p)));
} else if !(p.peek() == token::LT || p.peek() == token::LBRACKET) {
ret some(right(parse_syntax_ext_naked(p, lo)));
} else { ret none; }
} else { ret none; }
}
// Parse attributes that appear before an item
fn parse_outer_attributes(p: parser) -> [ast::attribute] {
let attrs: [ast::attribute] = [];
while p.peek() == token::POUND {
attrs += [parse_attribute(p, ast::attr_outer)];
}
ret attrs;
}
fn parse_attribute(p: parser, style: ast::attr_style) -> ast::attribute {
let lo = p.get_lo_pos();
expect(p, token::POUND);
ret parse_attribute_naked(p, style, lo);
}
fn parse_attribute_naked(p: parser, style: ast::attr_style, lo: uint) ->
ast::attribute {
expect(p, token::LBRACKET);
let meta_item = parse_meta_item(p);
expect(p, token::RBRACKET);
let hi = p.get_hi_pos();
ret spanned(lo, hi, {style: style, value: *meta_item});
}
// Parse attributes that appear after the opening of an item, each terminated
// by a semicolon. In addition to a vector of inner attributes, this function
// also returns a vector that may contain the first outer attribute of the
// next item (since we can't know whether the attribute is an inner attribute
// of the containing item or an outer attribute of the first contained item
// until we see the semi).
fn parse_inner_attrs_and_next(p: parser) ->
{inner: [ast::attribute], next: [ast::attribute]} {
let inner_attrs: [ast::attribute] = [];
let next_outer_attrs: [ast::attribute] = [];
while p.peek() == token::POUND {
let attr = parse_attribute(p, ast::attr_inner);
if p.peek() == token::SEMI {
p.bump();
inner_attrs += [attr];
} else {
// It's not really an inner attribute
let outer_attr =
spanned(attr.span.lo, attr.span.hi,
{style: ast::attr_outer, value: attr.node.value});
next_outer_attrs += [outer_attr];
break;
}
}
ret {inner: inner_attrs, next: next_outer_attrs};
}
fn parse_meta_item(p: parser) -> @ast::meta_item {
let lo = p.get_lo_pos();
let ident = parse_ident(p);
alt p.peek() {
token::EQ. {
p.bump();
let lit = parse_lit(p);
let hi = p.get_hi_pos();
ret @spanned(lo, hi, ast::meta_name_value(ident, lit));
}
token::LPAREN. {
let inner_items = parse_meta_seq(p);
let hi = p.get_hi_pos();
ret @spanned(lo, hi, ast::meta_list(ident, inner_items));
}
_ {
let hi = p.get_hi_pos();
ret @spanned(lo, hi, ast::meta_word(ident));
}
}
}
fn parse_meta_seq(p: parser) -> [@ast::meta_item] {
ret parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
parse_meta_item, p).node;
}
fn parse_optional_meta(p: parser) -> [@ast::meta_item] {
alt p.peek() { token::LPAREN. { ret parse_meta_seq(p); } _ { ret []; } }
}
fn parse_use(p: parser) -> ast::view_item_ {
let ident = parse_ident(p);
let metadata = parse_optional_meta(p);
ret ast::view_item_use(ident, metadata, p.get_id());
}
fn parse_rest_import_name(p: parser, first: ast::ident,
def_ident: option::t<ast::ident>) ->
ast::view_item_ {
let identifiers: [ast::ident] = [first];
let glob: bool = false;
let from_idents = option::none::<[ast::import_ident]>;
while true {
alt p.peek() {
token::SEMI. { break; }
token::MOD_SEP. {
if glob { p.fatal("cannot path into a glob"); }
if option::is_some(from_idents) {
p.fatal("cannot path into import list");
}
p.bump();
}
_ { p.fatal("expecting '::' or ';'"); }
}
alt p.peek() {
token::IDENT(_, _) { identifiers += [parse_ident(p)]; }
//the lexer can't tell the different kinds of stars apart ) :
token::BINOP(token::STAR.) {
glob = true;
p.bump();
}
token::LBRACE. {
fn parse_import_ident(p: parser) -> ast::import_ident {
let lo = p.get_lo_pos();
let ident = parse_ident(p);
let hi = p.get_hi_pos();
ret spanned(lo, hi, {name: ident, id: p.get_id()});
}
let from_idents_ =
parse_seq(token::LBRACE, token::RBRACE, seq_sep(token::COMMA),
parse_import_ident, p).node;
if vec::is_empty(from_idents_) {
p.fatal("at least one import is required");
}
from_idents = some(from_idents_);
}
_ {
p.fatal("expecting an identifier, or '*'");
}
}
}
alt def_ident {
some(i) {
if glob { p.fatal("globbed imports can't be renamed"); }
if option::is_some(from_idents) {
p.fatal("can't rename import list");
}
ret ast::view_item_import(i, identifiers, p.get_id());
}
_ {
if glob {
ret ast::view_item_import_glob(identifiers, p.get_id());
} else if option::is_some(from_idents) {
ret ast::view_item_import_from(identifiers,
option::get(from_idents),
p.get_id());
} else {
let len = vec::len(identifiers);
ret ast::view_item_import(identifiers[len - 1u], identifiers,
p.get_id());
}
}
}
}
fn parse_full_import_name(p: parser, def_ident: ast::ident) ->
ast::view_item_ {
alt p.peek() {
token::IDENT(i, _) {
p.bump();
ret parse_rest_import_name(p, p.get_str(i), some(def_ident));
}
_ { p.fatal("expecting an identifier"); }
}
}
fn parse_import(p: parser) -> ast::view_item_ {
alt p.peek() {
token::IDENT(i, _) {
p.bump();
alt p.peek() {
token::EQ. {
p.bump();
ret parse_full_import_name(p, p.get_str(i));
}
_ { ret parse_rest_import_name(p, p.get_str(i), none); }
}
}
_ { p.fatal("expecting an identifier"); }
}
}
fn parse_export(p: parser) -> ast::view_item_ {
let ids =
parse_seq_to_before_end(token::SEMI, seq_sep(token::COMMA),
parse_ident, p);
ret ast::view_item_export(ids, p.get_id());
}
fn parse_view_item(p: parser) -> @ast::view_item {
let lo = p.get_lo_pos();
let the_item =
if eat_word(p, "use") {
parse_use(p)
} else if eat_word(p, "import") {
parse_import(p)
} else if eat_word(p, "export") { parse_export(p) } else { fail };
let hi = p.get_lo_pos();
expect(p, token::SEMI);
ret @spanned(lo, hi, the_item);
}
fn is_view_item(p: parser) -> bool {
alt p.peek() {
token::IDENT(sid, false) {
let st = p.get_str(sid);
ret str::eq(st, "use") || str::eq(st, "import") ||
str::eq(st, "export");
}
_ { ret false; }
}
}
fn parse_view(p: parser) -> [@ast::view_item] {
let items: [@ast::view_item] = [];
while is_view_item(p) { items += [parse_view_item(p)]; }
ret items;
}
fn parse_native_view(p: parser) -> [@ast::view_item] {
let items: [@ast::view_item] = [];
while is_view_item(p) { items += [parse_view_item(p)]; }
ret items;
}
fn parse_crate_from_source_file(input: str, cfg: ast::crate_cfg,
sess: parse_sess) -> @ast::crate {
let p = new_parser_from_file(sess, cfg, input, 0u, 0u, SOURCE_FILE);
ret parse_crate_mod(p, cfg);
}
fn parse_crate_from_source_str(name: str, source: str, cfg: ast::crate_cfg,
sess: parse_sess) -> @ast::crate {
let ftype = SOURCE_FILE;
let filemap = codemap::new_filemap(name, 0u, 0u);
sess.cm.files += [filemap];
let itr = @interner::mk(str::hash, str::eq);
let rdr = lexer::new_reader(sess.cm, source, filemap, itr);
let p = new_parser(sess, cfg, rdr, ftype);
ret parse_crate_mod(p, cfg);
}
// Parses a source module as a crate
fn parse_crate_mod(p: parser, _cfg: ast::crate_cfg) -> @ast::crate {
let lo = p.get_lo_pos();
let crate_attrs = parse_inner_attrs_and_next(p);
let first_item_outer_attrs = crate_attrs.next;
let m = parse_mod_items(p, token::EOF, first_item_outer_attrs);
ret @spanned(lo, p.get_lo_pos(),
{directives: [],
module: m,
attrs: crate_attrs.inner,
config: p.get_cfg()});
}
fn parse_str(p: parser) -> str {
alt p.peek() {
token::LIT_STR(s) { p.bump(); p.get_str(s) }
_ {
p.fatal("expected string literal")
}
}
}
// Logic for parsing crate files (.rc)
//
// Each crate file is a sequence of directives.
//
// Each directive imperatively extends its environment with 0 or more items.
fn parse_crate_directive(p: parser, first_outer_attr: [ast::attribute]) ->
ast::crate_directive {
// Collect the next attributes
let outer_attrs = first_outer_attr + parse_outer_attributes(p);
// In a crate file outer attributes are only going to apply to mods
let expect_mod = vec::len(outer_attrs) > 0u;
let lo = p.get_lo_pos();
if expect_mod || is_word(p, "mod") {
expect_word(p, "mod");
let id = parse_ident(p);
let file_opt =
alt p.peek() {
token::EQ. { p.bump(); some(parse_str(p)) }
_ { none }
};
alt p.peek() {
// mod x = "foo.rs";
token::SEMI. {
let hi = p.get_hi_pos();
p.bump();
ret spanned(lo, hi, ast::cdir_src_mod(id, file_opt, outer_attrs));
}
// mod x = "foo_dir" { ...directives... }
token::LBRACE. {
p.bump();
let inner_attrs = parse_inner_attrs_and_next(p);
let mod_attrs = outer_attrs + inner_attrs.inner;
let next_outer_attr = inner_attrs.next;
let cdirs =
parse_crate_directives(p, token::RBRACE, next_outer_attr);
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
ret spanned(lo, hi,
ast::cdir_dir_mod(id, file_opt, cdirs, mod_attrs));
}
t { unexpected(p, t); }
}
} else if is_view_item(p) {
let vi = parse_view_item(p);
ret spanned(lo, vi.span.hi, ast::cdir_view_item(vi));
} else { ret p.fatal("expected crate directive"); }
}
fn parse_crate_directives(p: parser, term: token::token,
first_outer_attr: [ast::attribute]) ->
[@ast::crate_directive] {
// This is pretty ugly. If we have an outer attribute then we can't accept
// seeing the terminator next, so if we do see it then fail the same way
// parse_crate_directive would
if vec::len(first_outer_attr) > 0u && p.peek() == term {
expect_word(p, "mod");
}
let cdirs: [@ast::crate_directive] = [];
while p.peek() != term {
let cdir = @parse_crate_directive(p, first_outer_attr);
cdirs += [cdir];
}
ret cdirs;
}
fn parse_crate_from_crate_file(input: str, cfg: ast::crate_cfg,
sess: parse_sess) -> @ast::crate {
let p = new_parser_from_file(sess, cfg, input, 0u, 0u, CRATE_FILE);
let lo = p.get_lo_pos();
let prefix = std::fs::dirname(p.get_filemap().name);
let leading_attrs = parse_inner_attrs_and_next(p);
let crate_attrs = leading_attrs.inner;
let first_cdir_attr = leading_attrs.next;
let cdirs = parse_crate_directives(p, token::EOF, first_cdir_attr);
let cx =
@{p: p,
sess: sess,
mutable chpos: p.get_chpos(),
mutable byte_pos: p.get_byte_pos(),
cfg: p.get_cfg()};
let (companionmod, _) = fs::splitext(fs::basename(input));
let (m, attrs) = eval::eval_crate_directives_to_mod(
cx, cdirs, prefix, option::some(companionmod));
let hi = p.get_hi_pos();
expect(p, token::EOF);
ret @spanned(lo, hi,
{directives: cdirs,
module: m,
attrs: crate_attrs + attrs,
config: p.get_cfg()});
}
fn parse_crate_from_file(input: str, cfg: ast::crate_cfg, sess: parse_sess) ->
@ast::crate {
if str::ends_with(input, ".rc") {
parse_crate_from_crate_file(input, cfg, sess)
} else if str::ends_with(input, ".rs") {
parse_crate_from_source_file(input, cfg, sess)
} else {
codemap::emit_error(none, "unknown input file type: " + input,
sess.cm);
fail
}
}
//
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
//
rustc: Accepts `path` attributes for crate directives
Temporarily allow path specified in either as attribute or in AST, like:
#[path = "mymod.rs"]
mod mymod = "mymod.rs";
This is a transitional commit to avoid creating a stage1 snapshot.
import std::{io, vec, str, option, either, result, fs};
import std::option::{some, none};
import std::either::{left, right};
import std::map::{hashmap, new_str_hash};
import token::can_begin_expr;
import codemap::span;
import util::interner;
import ast::{node_id, spanned};
import front::attr;
tag restriction { UNRESTRICTED; RESTRICT_NO_CALL_EXPRS; }
tag file_type { CRATE_FILE; SOURCE_FILE; }
type parse_sess = @{cm: codemap::codemap, mutable next_id: node_id};
fn next_node_id(sess: parse_sess) -> node_id {
let rv = sess.next_id;
sess.next_id += 1;
ret rv;
}
type parser =
obj {
fn peek() -> token::token;
fn bump();
fn swap(token::token, uint, uint);
fn look_ahead(uint) -> token::token;
fn fatal(str) -> ! ;
fn span_fatal(span, str) -> ! ;
fn warn(str);
fn restrict(restriction);
fn get_restriction() -> restriction;
fn get_file_type() -> file_type;
fn get_cfg() -> ast::crate_cfg;
fn get_span() -> span;
fn get_lo_pos() -> uint;
fn get_hi_pos() -> uint;
fn get_last_lo_pos() -> uint;
fn get_last_hi_pos() -> uint;
fn get_prec_table() -> @[op_spec];
fn get_str(token::str_num) -> str;
fn get_reader() -> lexer::reader;
fn get_filemap() -> codemap::filemap;
fn get_bad_expr_words() -> hashmap<str, ()>;
fn get_chpos() -> uint;
fn get_byte_pos() -> uint;
fn get_id() -> node_id;
fn get_sess() -> parse_sess;
};
fn new_parser_from_file(sess: parse_sess, cfg: ast::crate_cfg, path: str,
chpos: uint, byte_pos: uint, ftype: file_type) ->
parser {
let src = alt io::read_whole_file_str(path) {
result::ok(src) {
// FIXME: This copy is unfortunate
src
}
result::err(e) {
codemap::emit_error(none, e, sess.cm);
fail;
}
};
let filemap = codemap::new_filemap(path, chpos, byte_pos);
sess.cm.files += [filemap];
let itr = @interner::mk(str::hash, str::eq);
let rdr = lexer::new_reader(sess.cm, src, filemap, itr);
ret new_parser(sess, cfg, rdr, ftype);
}
fn new_parser(sess: parse_sess, cfg: ast::crate_cfg, rdr: lexer::reader,
ftype: file_type) -> parser {
obj stdio_parser(sess: parse_sess,
cfg: ast::crate_cfg,
ftype: file_type,
mutable tok: token::token,
mutable tok_span: span,
mutable last_tok_span: span,
mutable buffer: [{tok: token::token, span: span}],
mutable restr: restriction,
rdr: lexer::reader,
precs: @[op_spec],
bad_words: hashmap<str, ()>) {
fn peek() -> token::token { ret tok; }
fn bump() {
last_tok_span = tok_span;
if vec::len(buffer) == 0u {
let next = lexer::next_token(rdr);
tok = next.tok;
tok_span = ast_util::mk_sp(next.chpos, rdr.get_chpos());
} else {
let next = vec::pop(buffer);
tok = next.tok;
tok_span = next.span;
}
}
fn swap(next: token::token, lo: uint, hi: uint) {
tok = next;
tok_span = ast_util::mk_sp(lo, hi);
}
fn look_ahead(distance: uint) -> token::token {
while vec::len(buffer) < distance {
let next = lexer::next_token(rdr);
let sp = ast_util::mk_sp(next.chpos, rdr.get_chpos());
buffer = [{tok: next.tok, span: sp}] + buffer;
}
ret buffer[distance - 1u].tok;
}
fn fatal(m: str) -> ! {
self.span_fatal(self.get_span(), m);
}
fn span_fatal(sp: span, m: str) -> ! {
codemap::emit_error(some(sp), m, sess.cm);
fail;
}
fn warn(m: str) {
codemap::emit_warning(some(self.get_span()), m, sess.cm);
}
fn restrict(r: restriction) { restr = r; }
fn get_restriction() -> restriction { ret restr; }
fn get_span() -> span { ret tok_span; }
fn get_lo_pos() -> uint { ret tok_span.lo; }
fn get_hi_pos() -> uint { ret tok_span.hi; }
fn get_last_lo_pos() -> uint { ret last_tok_span.lo; }
fn get_last_hi_pos() -> uint { ret last_tok_span.hi; }
fn get_file_type() -> file_type { ret ftype; }
fn get_cfg() -> ast::crate_cfg { ret cfg; }
fn get_prec_table() -> @[op_spec] { ret precs; }
fn get_str(i: token::str_num) -> str {
ret interner::get(*rdr.get_interner(), i);
}
fn get_reader() -> lexer::reader { ret rdr; }
fn get_filemap() -> codemap::filemap { ret rdr.get_filemap(); }
fn get_bad_expr_words() -> hashmap<str, ()> { ret bad_words; }
fn get_chpos() -> uint { ret rdr.get_chpos(); }
fn get_byte_pos() -> uint { ret rdr.get_byte_pos(); }
fn get_id() -> node_id { ret next_node_id(sess); }
fn get_sess() -> parse_sess { ret sess; }
}
let tok0 = lexer::next_token(rdr);
let span0 = ast_util::mk_sp(tok0.chpos, rdr.get_chpos());
ret stdio_parser(sess, cfg, ftype, tok0.tok, span0, span0, [],
UNRESTRICTED, rdr, prec_table(), bad_expr_word_table());
}
// These are the words that shouldn't be allowed as value identifiers,
// because, if used at the start of a line, they will cause the line to be
// interpreted as a specific kind of statement, which would be confusing.
fn bad_expr_word_table() -> hashmap<str, ()> {
let words = new_str_hash();
words.insert("mod", ());
words.insert("if", ());
words.insert("else", ());
words.insert("while", ());
words.insert("do", ());
words.insert("alt", ());
words.insert("for", ());
words.insert("break", ());
words.insert("cont", ());
words.insert("ret", ());
words.insert("be", ());
words.insert("fail", ());
words.insert("type", ());
words.insert("resource", ());
words.insert("check", ());
words.insert("assert", ());
words.insert("claim", ());
words.insert("native", ());
words.insert("fn", ());
words.insert("lambda", ());
words.insert("pure", ());
words.insert("unsafe", ());
words.insert("block", ());
words.insert("import", ());
words.insert("export", ());
words.insert("let", ());
words.insert("const", ());
words.insert("log", ());
words.insert("log_err", ());
words.insert("tag", ());
words.insert("obj", ());
words.insert("copy", ());
ret words;
}
fn unexpected(p: parser, t: token::token) -> ! {
let s: str = "unexpected token: ";
s += token::to_str(p.get_reader(), t);
p.fatal(s);
}
fn expect(p: parser, t: token::token) {
if p.peek() == t {
p.bump();
} else {
let s: str = "expecting ";
s += token::to_str(p.get_reader(), t);
s += ", found ";
s += token::to_str(p.get_reader(), p.peek());
p.fatal(s);
}
}
fn expect_gt(p: parser) {
if p.peek() == token::GT {
p.bump();
} else if p.peek() == token::BINOP(token::LSR) {
p.swap(token::GT, p.get_lo_pos() + 1u, p.get_hi_pos());
} else if p.peek() == token::BINOP(token::ASR) {
p.swap(token::BINOP(token::LSR), p.get_lo_pos() + 1u, p.get_hi_pos());
} else {
let s: str = "expecting ";
s += token::to_str(p.get_reader(), token::GT);
s += ", found ";
s += token::to_str(p.get_reader(), p.peek());
p.fatal(s);
}
}
fn spanned<copy T>(lo: uint, hi: uint, node: T) -> spanned<T> {
ret {node: node, span: ast_util::mk_sp(lo, hi)};
}
fn parse_ident(p: parser) -> ast::ident {
alt p.peek() {
token::IDENT(i, _) { p.bump(); ret p.get_str(i); }
_ { p.fatal("expecting ident"); }
}
}
fn parse_value_ident(p: parser) -> ast::ident {
check_bad_word(p);
ret parse_ident(p);
}
fn eat(p: parser, tok: token::token) -> bool {
ret if p.peek() == tok { p.bump(); true } else { false };
}
fn is_word(p: parser, word: str) -> bool {
ret alt p.peek() {
token::IDENT(sid, false) { str::eq(word, p.get_str(sid)) }
_ { false }
};
}
fn eat_word(p: parser, word: str) -> bool {
alt p.peek() {
token::IDENT(sid, false) {
if str::eq(word, p.get_str(sid)) {
p.bump();
ret true;
} else { ret false; }
}
_ { ret false; }
}
}
fn expect_word(p: parser, word: str) {
if !eat_word(p, word) {
p.fatal("expecting " + word + ", found " +
token::to_str(p.get_reader(), p.peek()));
}
}
fn check_bad_word(p: parser) {
alt p.peek() {
token::IDENT(sid, false) {
let w = p.get_str(sid);
if p.get_bad_expr_words().contains_key(w) {
p.fatal("found " + w + " in expression position");
}
}
_ { }
}
}
fn parse_ty_fn(proto: ast::proto, p: parser) -> ast::ty_ {
fn parse_fn_input_ty(p: parser) -> ast::ty_arg {
let lo = p.get_lo_pos();
let mode = parse_arg_mode(p);
// Ignore arg name, if present
if is_plain_ident(p) && p.look_ahead(1u) == token::COLON {
p.bump();
p.bump();
}
let t = parse_ty(p, false);
ret spanned(lo, t.span.hi, {mode: mode, ty: t});
}
let inputs =
parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
parse_fn_input_ty, p);
// FIXME: there's no syntax for this right now anyway
// auto constrs = parse_constrs(~[], p);
let constrs: [@ast::constr] = [];
let (ret_style, ret_ty) = parse_ret_ty(p);
ret ast::ty_fn(proto, inputs.node, ret_ty, ret_style, constrs);
}
fn parse_ty_obj(p: parser) -> ast::ty_ {
fn parse_method_sig(p: parser) -> ast::ty_method {
let flo = p.get_lo_pos();
let proto: ast::proto = parse_method_proto(p);
let ident = parse_value_ident(p);
let f = parse_ty_fn(proto, p);
expect(p, token::SEMI);
alt f {
ast::ty_fn(proto, inputs, output, cf, constrs) {
ret spanned(flo, output.span.hi,
{proto: proto,
ident: ident,
inputs: inputs,
output: output,
cf: cf,
constrs: constrs});
}
}
}
let meths =
parse_seq(token::LBRACE, token::RBRACE, seq_sep_none(),
parse_method_sig, p);
ret ast::ty_obj(meths.node);
}
fn parse_mt(p: parser) -> ast::mt {
let mut = parse_mutability(p);
let t = parse_ty(p, false);
ret {ty: t, mut: mut};
}
fn parse_ty_field(p: parser) -> ast::ty_field {
let lo = p.get_lo_pos();
let mut = parse_mutability(p);
let id = parse_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
ret spanned(lo, ty.span.hi, {ident: id, mt: {ty: ty, mut: mut}});
}
// if i is the jth ident in args, return j
// otherwise, fail
fn ident_index(p: parser, args: [ast::arg], i: ast::ident) -> uint {
let j = 0u;
for a: ast::arg in args { if a.ident == i { ret j; } j += 1u; }
p.fatal("Unbound variable " + i + " in constraint arg");
}
fn parse_type_constr_arg(p: parser) -> @ast::ty_constr_arg {
let sp = p.get_span();
let carg = ast::carg_base;
expect(p, token::BINOP(token::STAR));
if p.peek() == token::DOT {
// "*..." notation for record fields
p.bump();
let pth: ast::path = parse_path(p);
carg = ast::carg_ident(pth);
}
// No literals yet, I guess?
ret @{node: carg, span: sp};
}
fn parse_constr_arg(args: [ast::arg], p: parser) -> @ast::constr_arg {
let sp = p.get_span();
let carg = ast::carg_base;
if p.peek() == token::BINOP(token::STAR) {
p.bump();
} else {
let i: ast::ident = parse_value_ident(p);
carg = ast::carg_ident(ident_index(p, args, i));
}
ret @{node: carg, span: sp};
}
fn parse_ty_constr(fn_args: [ast::arg], p: parser) -> @ast::constr {
let lo = p.get_lo_pos();
let path = parse_path(p);
let args: {node: [@ast::constr_arg], span: span} =
parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
{|p| parse_constr_arg(fn_args, p)}, p);
ret @spanned(lo, args.span.hi,
{path: path, args: args.node, id: p.get_id()});
}
fn parse_constr_in_type(p: parser) -> @ast::ty_constr {
let lo = p.get_lo_pos();
let path = parse_path(p);
let args: [@ast::ty_constr_arg] =
parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
parse_type_constr_arg, p).node;
let hi = p.get_lo_pos();
let tc: ast::ty_constr_ = {path: path, args: args, id: p.get_id()};
ret @spanned(lo, hi, tc);
}
fn parse_constrs<copy T>(pser: block(parser) -> @ast::constr_general<T>,
p: parser) ->
[@ast::constr_general<T>] {
let constrs: [@ast::constr_general<T>] = [];
while true {
let constr = pser(p);
constrs += [constr];
if p.peek() == token::COMMA { p.bump(); } else { break; }
}
constrs
}
fn parse_type_constraints(p: parser) -> [@ast::ty_constr] {
ret parse_constrs(parse_constr_in_type, p);
}
fn parse_ty_postfix(orig_t: ast::ty_, p: parser, colons_before_params: bool)
-> @ast::ty {
let lo = p.get_lo_pos();
if colons_before_params && p.peek() == token::MOD_SEP {
p.bump();
expect(p, token::LT);
} else if !colons_before_params && p.peek() == token::LT {
p.bump();
} else { ret @spanned(lo, p.get_lo_pos(), orig_t); }
// If we're here, we have explicit type parameter instantiation.
let seq = parse_seq_to_gt(some(token::COMMA), {|p| parse_ty(p, false)},
p);
alt orig_t {
ast::ty_path(pth, ann) {
let hi = p.get_hi_pos();
ret @spanned(lo, hi,
ast::ty_path(spanned(lo, hi,
{global: pth.node.global,
idents: pth.node.idents,
types: seq}), ann));
}
_ { p.fatal("type parameter instantiation only allowed for paths"); }
}
}
fn parse_ret_ty(p: parser) -> (ast::ret_style, @ast::ty) {
ret if eat(p, token::RARROW) {
let lo = p.get_lo_pos();
if eat(p, token::NOT) {
(ast::noreturn, @spanned(lo, p.get_last_hi_pos(), ast::ty_bot))
} else { (ast::return_val, parse_ty(p, false)) }
} else {
let pos = p.get_lo_pos();
(ast::return_val, @spanned(pos, pos, ast::ty_nil))
}
}
fn parse_ty(p: parser, colons_before_params: bool) -> @ast::ty {
let lo = p.get_lo_pos();
let t: ast::ty_;
// FIXME: do something with this
if eat_word(p, "bool") {
t = ast::ty_bool;
} else if eat_word(p, "int") {
t = ast::ty_int;
} else if eat_word(p, "uint") {
t = ast::ty_uint;
} else if eat_word(p, "float") {
t = ast::ty_float;
} else if eat_word(p, "str") {
t = ast::ty_str;
} else if eat_word(p, "char") {
t = ast::ty_char;
/*
} else if (eat_word(p, "task")) {
t = ast::ty_task;
*/
} else if eat_word(p, "i8") {
t = ast::ty_machine(ast::ty_i8);
} else if eat_word(p, "i16") {
t = ast::ty_machine(ast::ty_i16);
} else if eat_word(p, "i32") {
t = ast::ty_machine(ast::ty_i32);
} else if eat_word(p, "i64") {
t = ast::ty_machine(ast::ty_i64);
} else if eat_word(p, "u8") {
t = ast::ty_machine(ast::ty_u8);
} else if eat_word(p, "u16") {
t = ast::ty_machine(ast::ty_u16);
} else if eat_word(p, "u32") {
t = ast::ty_machine(ast::ty_u32);
} else if eat_word(p, "u64") {
t = ast::ty_machine(ast::ty_u64);
} else if eat_word(p, "f32") {
t = ast::ty_machine(ast::ty_f32);
} else if eat_word(p, "f64") {
t = ast::ty_machine(ast::ty_f64);
} else if p.peek() == token::LPAREN {
p.bump();
if p.peek() == token::RPAREN {
p.bump();
t = ast::ty_nil;
} else {
let ts = [parse_ty(p, false)];
while p.peek() == token::COMMA {
p.bump();
ts += [parse_ty(p, false)];
}
if vec::len(ts) == 1u {
t = ts[0].node;
} else { t = ast::ty_tup(ts); }
expect(p, token::RPAREN);
}
} else if p.peek() == token::AT {
p.bump();
t = ast::ty_box(parse_mt(p));
} else if p.peek() == token::TILDE {
p.bump();
t = ast::ty_uniq(parse_mt(p));
} else if p.peek() == token::BINOP(token::STAR) {
p.bump();
t = ast::ty_ptr(parse_mt(p));
} else if p.peek() == token::LBRACE {
let elems =
parse_seq(token::LBRACE, token::RBRACE, seq_sep_opt(token::COMMA),
parse_ty_field, p);
let hi = elems.span.hi;
t = ast::ty_rec(elems.node);
if p.peek() == token::COLON {
p.bump();
t = ast::ty_constr(@spanned(lo, hi, t),
parse_type_constraints(p));
}
} else if p.peek() == token::LBRACKET {
expect(p, token::LBRACKET);
t = ast::ty_vec(parse_mt(p));
expect(p, token::RBRACKET);
} else if eat_word(p, "fn") {
let proto = parse_fn_ty_proto(p);
t = parse_ty_fn(proto, p);
} else if eat_word(p, "block") {
t = parse_ty_fn(ast::proto_block, p);
} else if eat_word(p, "lambda") {
t = parse_ty_fn(ast::proto_shared(ast::sugar_sexy), p);
} else if eat_word(p, "obj") {
t = parse_ty_obj(p);
} else if p.peek() == token::MOD_SEP || is_ident(p.peek()) {
let path = parse_path(p);
t = ast::ty_path(path, p.get_id());
} else { p.fatal("expecting type"); }
ret parse_ty_postfix(t, p, colons_before_params);
}
fn parse_arg_mode(p: parser) -> ast::mode {
if eat(p, token::BINOP(token::AND)) { ast::by_mut_ref }
else if eat(p, token::BINOP(token::MINUS)) { ast::by_move }
else if eat(p, token::ANDAND) { ast::by_ref }
else if eat(p, token::BINOP(token::PLUS)) {
if eat(p, token::BINOP(token::PLUS)) { ast::by_val }
else { ast::by_copy }
}
else { ast::mode_infer }
}
fn parse_arg(p: parser) -> ast::arg {
let m = parse_arg_mode(p);
let i = parse_value_ident(p);
expect(p, token::COLON);
let t = parse_ty(p, false);
ret {mode: m, ty: t, ident: i, id: p.get_id()};
}
fn parse_fn_block_arg(p: parser) -> ast::arg {
let m = parse_arg_mode(p);
let i = parse_value_ident(p);
let t = @spanned(p.get_lo_pos(), p.get_hi_pos(), ast::ty_infer);
ret {mode: m, ty: t, ident: i, id: p.get_id()};
}
fn parse_seq_to_before_gt<copy T>(sep: option::t<token::token>,
f: block(parser) -> T,
p: parser) -> [T] {
let first = true;
let v = [];
while p.peek() != token::GT && p.peek() != token::BINOP(token::LSR) &&
p.peek() != token::BINOP(token::ASR) {
alt sep {
some(t) { if first { first = false; } else { expect(p, t); } }
_ { }
}
v += [f(p)];
}
ret v;
}
fn parse_seq_to_gt<copy T>(sep: option::t<token::token>,
f: block(parser) -> T, p: parser) -> [T] {
let v = parse_seq_to_before_gt(sep, f, p);
expect_gt(p);
ret v;
}
fn parse_seq_lt_gt<copy T>(sep: option::t<token::token>,
f: block(parser) -> T,
p: parser) -> spanned<[T]> {
let lo = p.get_lo_pos();
expect(p, token::LT);
let result = parse_seq_to_before_gt::<T>(sep, f, p);
let hi = p.get_hi_pos();
expect_gt(p);
ret spanned(lo, hi, result);
}
fn parse_seq_to_end<copy T>(ket: token::token, sep: seq_sep,
f: block(parser) -> T, p: parser) -> [T] {
let val = parse_seq_to_before_end(ket, sep, f, p);
p.bump();
ret val;
}
type seq_sep = {
sep: option::t<token::token>,
trailing_opt: bool // is trailing separator optional?
};
fn seq_sep(t: token::token) -> seq_sep {
ret {sep: option::some(t), trailing_opt: false};
}
fn seq_sep_opt(t: token::token) -> seq_sep {
ret {sep: option::some(t), trailing_opt: true};
}
fn seq_sep_none() -> seq_sep {
ret {sep: option::none, trailing_opt: false};
}
fn parse_seq_to_before_end<copy T>(ket: token::token,
sep: seq_sep,
f: block(parser) -> T, p: parser) -> [T] {
let first: bool = true;
let v: [T] = [];
while p.peek() != ket {
alt sep.sep {
some(t) { if first { first = false; } else { expect(p, t); } }
_ { }
}
if sep.trailing_opt && p.peek() == ket { break; }
v += [f(p)];
}
ret v;
}
fn parse_seq<copy T>(bra: token::token, ket: token::token,
sep: seq_sep, f: block(parser) -> T,
p: parser) -> spanned<[T]> {
let lo = p.get_lo_pos();
expect(p, bra);
let result = parse_seq_to_before_end::<T>(ket, sep, f, p);
let hi = p.get_hi_pos();
p.bump();
ret spanned(lo, hi, result);
}
fn lit_from_token(p: parser, tok: token::token) -> ast::lit_ {
alt tok {
token::LIT_INT(i) { ast::lit_int(i) }
token::LIT_UINT(u) { ast::lit_uint(u) }
token::LIT_FLOAT(s) { ast::lit_float(p.get_str(s)) }
token::LIT_MACH_INT(tm, i) { ast::lit_mach_int(tm, i) }
token::LIT_MACH_FLOAT(tm, s) { ast::lit_mach_float(tm, p.get_str(s)) }
token::LIT_CHAR(c) { ast::lit_char(c) }
token::LIT_STR(s) { ast::lit_str(p.get_str(s)) }
token::LPAREN. { expect(p, token::RPAREN); ast::lit_nil }
_ { unexpected(p, tok); }
}
}
fn parse_lit(p: parser) -> ast::lit {
let sp = p.get_span();
let lit = if eat_word(p, "true") {
ast::lit_bool(true)
} else if eat_word(p, "false") {
ast::lit_bool(false)
} else {
let tok = p.peek();
p.bump();
lit_from_token(p, tok)
};
ret {node: lit, span: sp};
}
fn is_ident(t: token::token) -> bool {
alt t { token::IDENT(_, _) { ret true; } _ { } }
ret false;
}
fn is_plain_ident(p: parser) -> bool {
ret alt p.peek() { token::IDENT(_, false) { true } _ { false } };
}
fn parse_path(p: parser) -> ast::path {
let lo = p.get_lo_pos();
let hi = lo;
let global;
if p.peek() == token::MOD_SEP {
global = true;
p.bump();
} else { global = false; }
let ids: [ast::ident] = [];
while true {
alt p.peek() {
token::IDENT(i, _) {
hi = p.get_hi_pos();
ids += [p.get_str(i)];
hi = p.get_hi_pos();
p.bump();
if p.peek() == token::MOD_SEP && p.look_ahead(1u) != token::LT {
p.bump();
} else { break; }
}
_ { break; }
}
}
ret spanned(lo, hi, {global: global, idents: ids, types: []});
}
fn parse_path_and_ty_param_substs(p: parser) -> ast::path {
let lo = p.get_lo_pos();
let path = parse_path(p);
if p.peek() == token::MOD_SEP {
p.bump();
let seq =
parse_seq_lt_gt(some(token::COMMA), {|p| parse_ty(p, false)}, p);
let hi = seq.span.hi;
path =
spanned(lo, hi,
{global: path.node.global,
idents: path.node.idents,
types: seq.node});
}
ret path;
}
fn parse_mutability(p: parser) -> ast::mutability {
if eat_word(p, "mutable") {
ast::mut
} else if eat_word(p, "const") {
ast::maybe_mut
} else {
ast::imm
}
}
fn parse_field(p: parser, sep: token::token) -> ast::field {
let lo = p.get_lo_pos();
let m = parse_mutability(p);
let i = parse_ident(p);
expect(p, sep);
let e = parse_expr(p);
ret spanned(lo, e.span.hi, {mut: m, ident: i, expr: e});
}
fn mk_expr(p: parser, lo: uint, hi: uint, node: ast::expr_) -> @ast::expr {
ret @{id: p.get_id(), node: node, span: ast_util::mk_sp(lo, hi)};
}
fn mk_mac_expr(p: parser, lo: uint, hi: uint, m: ast::mac_) -> @ast::expr {
ret @{id: p.get_id(),
node: ast::expr_mac({node: m, span: ast_util::mk_sp(lo, hi)}),
span: ast_util::mk_sp(lo, hi)};
}
fn is_bar(t: token::token) -> bool {
alt t { token::BINOP(token::OR.) | token::OROR. { true } _ { false } }
}
fn parse_bottom_expr(p: parser) -> @ast::expr {
let lo = p.get_lo_pos();
let hi = p.get_hi_pos();
let ex: ast::expr_;
if p.peek() == token::LPAREN {
p.bump();
if p.peek() == token::RPAREN {
hi = p.get_hi_pos();
p.bump();
let lit = @spanned(lo, hi, ast::lit_nil);
ret mk_expr(p, lo, hi, ast::expr_lit(lit));
}
let es = [parse_expr(p)];
while p.peek() == token::COMMA { p.bump(); es += [parse_expr(p)]; }
hi = p.get_hi_pos();
expect(p, token::RPAREN);
if vec::len(es) == 1u {
ret mk_expr(p, lo, hi, es[0].node);
} else { ret mk_expr(p, lo, hi, ast::expr_tup(es)); }
} else if p.peek() == token::LBRACE {
p.bump();
if is_word(p, "mutable") ||
is_plain_ident(p) && p.look_ahead(1u) == token::COLON {
let fields = [parse_field(p, token::COLON)];
let base = none;
while p.peek() != token::RBRACE {
if eat_word(p, "with") { base = some(parse_expr(p)); break; }
expect(p, token::COMMA);
if p.peek() == token::RBRACE {
// record ends by an optional trailing comma
break;
}
fields += [parse_field(p, token::COLON)];
}
hi = p.get_hi_pos();
expect(p, token::RBRACE);
ex = ast::expr_rec(fields, base);
} else if is_bar(p.peek()) {
ret parse_fn_block_expr(p);
} else {
let blk = parse_block_tail(p, lo, ast::default_blk);
ret mk_expr(p, blk.span.lo, blk.span.hi, ast::expr_block(blk));
}
} else if eat_word(p, "if") {
ret parse_if_expr(p);
} else if eat_word(p, "for") {
ret parse_for_expr(p);
} else if eat_word(p, "while") {
ret parse_while_expr(p);
} else if eat_word(p, "do") {
ret parse_do_while_expr(p);
} else if eat_word(p, "alt") {
ret parse_alt_expr(p);
/*
} else if (eat_word(p, "spawn")) {
ret parse_spawn_expr(p);
*/
} else if eat_word(p, "fn") {
let proto = parse_fn_anon_proto(p);
ret parse_fn_expr(p, proto);
} else if eat_word(p, "block") {
ret parse_fn_expr(p, ast::proto_block);
} else if eat_word(p, "lambda") {
ret parse_fn_expr(p, ast::proto_shared(ast::sugar_sexy));
} else if eat_word(p, "unchecked") {
ret parse_block_expr(p, lo, ast::unchecked_blk);
} else if eat_word(p, "unsafe") {
ret parse_block_expr(p, lo, ast::unsafe_blk);
} else if p.peek() == token::LBRACKET {
p.bump();
let mut = parse_mutability(p);
let es =
parse_seq_to_end(token::RBRACKET, seq_sep(token::COMMA),
parse_expr, p);
ex = ast::expr_vec(es, mut);
} else if p.peek() == token::POUND_LT {
p.bump();
let ty = parse_ty(p, false);
expect(p, token::GT);
/* hack: early return to take advantage of specialized function */
ret mk_mac_expr(p, lo, p.get_hi_pos(), ast::mac_embed_type(ty));
} else if p.peek() == token::POUND_LBRACE {
p.bump();
let blk = ast::mac_embed_block(
parse_block_tail(p, lo, ast::default_blk));
ret mk_mac_expr(p, lo, p.get_hi_pos(), blk);
} else if p.peek() == token::ELLIPSIS {
p.bump();
ret mk_mac_expr(p, lo, p.get_hi_pos(), ast::mac_ellipsis);
} else if eat_word(p, "obj") {
// Anonymous object
// Only make people type () if they're actually adding new fields
let fields: option::t<[ast::anon_obj_field]> = none;
if p.peek() == token::LPAREN {
p.bump();
fields =
some(parse_seq_to_end(token::RPAREN, seq_sep(token::COMMA),
parse_anon_obj_field, p));
}
let meths: [@ast::method] = [];
let inner_obj: option::t<@ast::expr> = none;
expect(p, token::LBRACE);
while p.peek() != token::RBRACE {
if eat_word(p, "with") {
inner_obj = some(parse_expr(p));
} else { meths += [parse_method(p)]; }
}
hi = p.get_hi_pos();
expect(p, token::RBRACE);
// fields and methods may be *additional* or *overriding* fields
// and methods if there's a inner_obj, or they may be the *only*
// fields and methods if there's no inner_obj.
// We don't need to pull ".node" out of fields because it's not a
// "spanned".
let ob = {fields: fields, methods: meths, inner_obj: inner_obj};
ex = ast::expr_anon_obj(ob);
} else if eat_word(p, "bind") {
let e = parse_expr_res(p, RESTRICT_NO_CALL_EXPRS);
fn parse_expr_opt(p: parser) -> option::t<@ast::expr> {
alt p.peek() {
token::UNDERSCORE. { p.bump(); ret none; }
_ { ret some(parse_expr(p)); }
}
}
let es =
parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
parse_expr_opt, p);
hi = es.span.hi;
ex = ast::expr_bind(e, es.node);
} else if p.peek() == token::POUND {
let ex_ext = parse_syntax_ext(p);
hi = ex_ext.span.hi;
ex = ex_ext.node;
} else if eat_word(p, "fail") {
if can_begin_expr(p.peek()) {
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_fail(some(e));
} else { ex = ast::expr_fail(none); }
} else if eat_word(p, "log") {
let e = parse_expr(p);
ex = ast::expr_log(1, e);
hi = e.span.hi;
} else if eat_word(p, "log_err") {
let e = parse_expr(p);
ex = ast::expr_log(0, e);
hi = e.span.hi;
} else if eat_word(p, "assert") {
let e = parse_expr(p);
ex = ast::expr_assert(e);
hi = e.span.hi;
} else if eat_word(p, "check") {
/* Should be a predicate (pure boolean function) applied to
arguments that are all either slot variables or literals.
but the typechecker enforces that. */
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_check(ast::checked_expr, e);
} else if eat_word(p, "claim") {
/* Same rules as check, except that if check-claims
is enabled (a command-line flag), then the parser turns
claims into check */
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_check(ast::claimed_expr, e);
} else if eat_word(p, "ret") {
if can_begin_expr(p.peek()) {
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_ret(some(e));
} else { ex = ast::expr_ret(none); }
} else if eat_word(p, "break") {
ex = ast::expr_break;
hi = p.get_hi_pos();
} else if eat_word(p, "cont") {
ex = ast::expr_cont;
hi = p.get_hi_pos();
} else if eat_word(p, "be") {
let e = parse_expr(p);
// FIXME: Is this the right place for this check?
if /*check*/ast_util::is_call_expr(e) {
hi = e.span.hi;
ex = ast::expr_be(e);
} else { p.fatal("Non-call expression in tail call"); }
} else if eat_word(p, "copy") {
let e = parse_expr(p);
ex = ast::expr_copy(e);
hi = e.span.hi;
} else if eat_word(p, "self") {
expect(p, token::DOT);
// The rest is a call expression.
let f: @ast::expr = parse_self_method(p);
let es =
parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
parse_expr, p);
hi = es.span.hi;
ex = ast::expr_call(f, es.node, false);
} else if p.peek() == token::MOD_SEP ||
is_ident(p.peek()) && !is_word(p, "true") &&
!is_word(p, "false") {
check_bad_word(p);
let pth = parse_path_and_ty_param_substs(p);
hi = pth.span.hi;
ex = ast::expr_path(pth);
} else {
let lit = parse_lit(p);
hi = lit.span.hi;
ex = ast::expr_lit(@lit);
}
ret mk_expr(p, lo, hi, ex);
}
fn parse_block_expr(p: parser,
lo: uint,
blk_mode: ast::blk_check_mode) -> @ast::expr {
expect(p, token::LBRACE);
let blk = parse_block_tail(p, lo, blk_mode);
ret mk_expr(p, blk.span.lo, blk.span.hi, ast::expr_block(blk));
}
fn parse_syntax_ext(p: parser) -> @ast::expr {
let lo = p.get_lo_pos();
expect(p, token::POUND);
ret parse_syntax_ext_naked(p, lo);
}
fn parse_syntax_ext_naked(p: parser, lo: uint) -> @ast::expr {
let pth = parse_path(p);
if vec::len(pth.node.idents) == 0u {
p.fatal("expected a syntax expander name");
}
//temporary for a backwards-compatible cycle:
let sep = seq_sep(token::COMMA);
let es =
if p.peek() == token::LPAREN {
parse_seq(token::LPAREN, token::RPAREN, sep, parse_expr, p)
} else {
parse_seq(token::LBRACKET, token::RBRACKET, sep, parse_expr, p)
};
let hi = es.span.hi;
let e = mk_expr(p, es.span.lo, hi, ast::expr_vec(es.node, ast::imm));
ret mk_mac_expr(p, lo, hi, ast::mac_invoc(pth, e, none));
}
fn parse_self_method(p: parser) -> @ast::expr {
let sp = p.get_span();
let f_name: ast::ident = parse_ident(p);
ret mk_expr(p, sp.lo, sp.hi, ast::expr_self_method(f_name));
}
fn parse_dot_or_call_expr(p: parser) -> @ast::expr {
let b = parse_bottom_expr(p);
if expr_has_value(b) { parse_dot_or_call_expr_with(p, b) }
else { b }
}
fn parse_dot_or_call_expr_with(p: parser, e: @ast::expr) -> @ast::expr {
let lo = e.span.lo;
let hi = e.span.hi;
let e = e;
while true {
alt p.peek() {
token::LPAREN. {
if p.get_restriction() == RESTRICT_NO_CALL_EXPRS {
ret e;
} else {
// Call expr.
let es = parse_seq(token::LPAREN, token::RPAREN,
seq_sep(token::COMMA), parse_expr, p);
hi = es.span.hi;
let nd = ast::expr_call(e, es.node, false);
e = mk_expr(p, lo, hi, nd);
}
}
token::LBRACKET. {
p.bump();
let ix = parse_expr(p);
hi = ix.span.hi;
expect(p, token::RBRACKET);
e = mk_expr(p, lo, hi, ast::expr_index(e, ix));
}
token::DOT. {
p.bump();
alt p.peek() {
token::IDENT(i, _) {
hi = p.get_hi_pos();
p.bump();
e = mk_expr(p, lo, hi, ast::expr_field(e, p.get_str(i)));
}
t { unexpected(p, t); }
}
}
_ { ret e; }
}
}
ret e;
}
fn parse_prefix_expr(p: parser) -> @ast::expr {
let lo = p.get_lo_pos();
let hi = p.get_hi_pos();
let ex;
alt p.peek() {
token::NOT. {
p.bump();
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::not, e);
}
token::BINOP(b) {
alt b {
token::MINUS. {
p.bump();
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::neg, e);
}
token::STAR. {
p.bump();
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::deref, e);
}
_ { ret parse_dot_or_call_expr(p); }
}
}
token::AT. {
p.bump();
let m = parse_mutability(p);
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::box(m), e);
}
token::TILDE. {
p.bump();
let m = parse_mutability(p);
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::uniq(m), e);
}
_ { ret parse_dot_or_call_expr(p); }
}
ret mk_expr(p, lo, hi, ex);
}
fn parse_ternary(p: parser) -> @ast::expr {
let cond_expr = parse_binops(p);
if p.peek() == token::QUES {
p.bump();
let then_expr = parse_expr(p);
expect(p, token::COLON);
let else_expr = parse_expr(p);
ret mk_expr(p, cond_expr.span.lo, else_expr.span.hi,
ast::expr_ternary(cond_expr, then_expr, else_expr));
} else { ret cond_expr; }
}
type op_spec = {tok: token::token, op: ast::binop, prec: int};
// FIXME make this a const, don't store it in parser state
fn prec_table() -> @[op_spec] {
ret @[{tok: token::BINOP(token::STAR), op: ast::mul, prec: 11},
{tok: token::BINOP(token::SLASH), op: ast::div, prec: 11},
{tok: token::BINOP(token::PERCENT), op: ast::rem, prec: 11},
{tok: token::BINOP(token::PLUS), op: ast::add, prec: 10},
{tok: token::BINOP(token::MINUS), op: ast::sub, prec: 10},
{tok: token::BINOP(token::LSL), op: ast::lsl, prec: 9},
{tok: token::BINOP(token::LSR), op: ast::lsr, prec: 9},
{tok: token::BINOP(token::ASR), op: ast::asr, prec: 9},
{tok: token::BINOP(token::AND), op: ast::bitand, prec: 8},
{tok: token::BINOP(token::CARET), op: ast::bitxor, prec: 6},
{tok: token::BINOP(token::OR), op: ast::bitor, prec: 6},
// 'as' sits between here with 5
{tok: token::LT, op: ast::lt, prec: 4},
{tok: token::LE, op: ast::le, prec: 4},
{tok: token::GE, op: ast::ge, prec: 4},
{tok: token::GT, op: ast::gt, prec: 4},
{tok: token::EQEQ, op: ast::eq, prec: 3},
{tok: token::NE, op: ast::ne, prec: 3},
{tok: token::ANDAND, op: ast::and, prec: 2},
{tok: token::OROR, op: ast::or, prec: 1}];
}
fn parse_binops(p: parser) -> @ast::expr {
ret parse_more_binops(p, parse_prefix_expr(p), 0);
}
const unop_prec: int = 100;
const as_prec: int = 5;
const ternary_prec: int = 0;
fn parse_more_binops(p: parser, lhs: @ast::expr, min_prec: int) ->
@ast::expr {
if !expr_has_value(lhs) { ret lhs; }
let peeked = p.peek();
let lit_after = alt lexer::maybe_untangle_minus_from_lit(p.get_reader(),
peeked) {
some(tok) {
peeked = token::BINOP(token::MINUS);
let lit = @{node: lit_from_token(p, tok), span: p.get_span()};
some(mk_expr(p, p.get_lo_pos(), p.get_hi_pos(), ast::expr_lit(lit)))
}
none. { none }
};
for cur: op_spec in *p.get_prec_table() {
if cur.prec > min_prec && cur.tok == peeked {
p.bump();
let expr = alt lit_after {
some(ex) { ex }
_ { parse_prefix_expr(p) }
};
let rhs = parse_more_binops(p, expr, cur.prec);
let bin = mk_expr(p, lhs.span.lo, rhs.span.hi,
ast::expr_binary(cur.op, lhs, rhs));
ret parse_more_binops(p, bin, min_prec);
}
}
if as_prec > min_prec && eat_word(p, "as") {
let rhs = parse_ty(p, true);
let _as =
mk_expr(p, lhs.span.lo, rhs.span.hi, ast::expr_cast(lhs, rhs));
ret parse_more_binops(p, _as, min_prec);
}
ret lhs;
}
fn parse_assign_expr(p: parser) -> @ast::expr {
let lo = p.get_lo_pos();
let lhs = parse_ternary(p);
alt p.peek() {
token::EQ. {
p.bump();
let rhs = parse_expr(p);
ret mk_expr(p, lo, rhs.span.hi, ast::expr_assign(lhs, rhs));
}
token::BINOPEQ(op) {
p.bump();
let rhs = parse_expr(p);
let aop = ast::add;
alt op {
token::PLUS. { aop = ast::add; }
token::MINUS. { aop = ast::sub; }
token::STAR. { aop = ast::mul; }
token::SLASH. { aop = ast::div; }
token::PERCENT. { aop = ast::rem; }
token::CARET. { aop = ast::bitxor; }
token::AND. { aop = ast::bitand; }
token::OR. { aop = ast::bitor; }
token::LSL. { aop = ast::lsl; }
token::LSR. { aop = ast::lsr; }
token::ASR. { aop = ast::asr; }
}
ret mk_expr(p, lo, rhs.span.hi, ast::expr_assign_op(aop, lhs, rhs));
}
token::LARROW. {
p.bump();
let rhs = parse_expr(p);
ret mk_expr(p, lo, rhs.span.hi, ast::expr_move(lhs, rhs));
}
token::DARROW. {
p.bump();
let rhs = parse_expr(p);
ret mk_expr(p, lo, rhs.span.hi, ast::expr_swap(lhs, rhs));
}
_ {/* fall through */ }
}
ret lhs;
}
fn parse_if_expr_1(p: parser) ->
{cond: @ast::expr,
then: ast::blk,
els: option::t<@ast::expr>,
lo: uint,
hi: uint} {
let lo = p.get_last_lo_pos();
let cond = parse_expr(p);
let thn = parse_block(p);
let els: option::t<@ast::expr> = none;
let hi = thn.span.hi;
if eat_word(p, "else") {
let elexpr = parse_else_expr(p);
els = some(elexpr);
hi = elexpr.span.hi;
} else if !option::is_none(thn.node.expr) {
let sp = option::get(thn.node.expr).span;
p.span_fatal(sp, "`if` without `else` can not produce a result");
//TODO: If a suggestion mechanism appears, suggest that the
//user may have forgotten a ';'
}
ret {cond: cond, then: thn, els: els, lo: lo, hi: hi};
}
fn parse_if_expr(p: parser) -> @ast::expr {
if eat_word(p, "check") {
let q = parse_if_expr_1(p);
ret mk_expr(p, q.lo, q.hi, ast::expr_if_check(q.cond, q.then, q.els));
} else {
let q = parse_if_expr_1(p);
ret mk_expr(p, q.lo, q.hi, ast::expr_if(q.cond, q.then, q.els));
}
}
fn parse_fn_expr(p: parser, proto: ast::proto) -> @ast::expr {
let lo = p.get_last_lo_pos();
let decl = parse_fn_decl(p, ast::impure_fn, ast::il_normal);
let body = parse_block(p);
let _fn = {decl: decl, proto: proto, body: body};
ret mk_expr(p, lo, body.span.hi, ast::expr_fn(_fn));
}
fn parse_fn_block_expr(p: parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let decl = parse_fn_block_decl(p);
let body = parse_block_tail(p, lo, ast::default_blk);
let _fn = {decl: decl, proto: ast::proto_block, body: body};
ret mk_expr(p, lo, body.span.hi, ast::expr_fn(_fn));
}
fn parse_else_expr(p: parser) -> @ast::expr {
if eat_word(p, "if") {
ret parse_if_expr(p);
} else {
let blk = parse_block(p);
ret mk_expr(p, blk.span.lo, blk.span.hi, ast::expr_block(blk));
}
}
fn parse_for_expr(p: parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let decl = parse_local(p, false);
expect_word(p, "in");
let seq = parse_expr(p);
let body = parse_block_no_value(p);
let hi = body.span.hi;
ret mk_expr(p, lo, hi, ast::expr_for(decl, seq, body));
}
fn parse_while_expr(p: parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let cond = parse_expr(p);
let body = parse_block_no_value(p);
let hi = body.span.hi;
ret mk_expr(p, lo, hi, ast::expr_while(cond, body));
}
fn parse_do_while_expr(p: parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let body = parse_block_no_value(p);
expect_word(p, "while");
let cond = parse_expr(p);
let hi = cond.span.hi;
ret mk_expr(p, lo, hi, ast::expr_do_while(body, cond));
}
fn parse_alt_expr(p: parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let discriminant = parse_expr(p);
expect(p, token::LBRACE);
let arms: [ast::arm] = [];
while p.peek() != token::RBRACE {
let pats = parse_pats(p);
let guard = none;
if eat_word(p, "when") { guard = some(parse_expr(p)); }
let blk = parse_block(p);
arms += [{pats: pats, guard: guard, body: blk}];
}
let hi = p.get_hi_pos();
p.bump();
ret mk_expr(p, lo, hi, ast::expr_alt(discriminant, arms));
}
fn parse_expr(p: parser) -> @ast::expr {
ret parse_expr_res(p, UNRESTRICTED);
}
fn parse_expr_res(p: parser, r: restriction) -> @ast::expr {
let old = p.get_restriction();
p.restrict(r);
let e = parse_assign_expr(p);
p.restrict(old);
ret e;
}
fn parse_initializer(p: parser) -> option::t<ast::initializer> {
alt p.peek() {
token::EQ. {
p.bump();
ret some({op: ast::init_assign, expr: parse_expr(p)});
}
token::LARROW. {
p.bump();
ret some({op: ast::init_move, expr: parse_expr(p)});
}
// Now that the the channel is the first argument to receive,
// combining it with an initializer doesn't really make sense.
// case (token::RECV) {
// p.bump();
// ret some(rec(op = ast::init_recv,
// expr = parse_expr(p)));
// }
_ {
ret none;
}
}
}
fn parse_pats(p: parser) -> [@ast::pat] {
let pats = [];
while true {
pats += [parse_pat(p)];
if p.peek() == token::BINOP(token::OR) { p.bump(); } else { break; }
}
ret pats;
}
fn parse_pat(p: parser) -> @ast::pat {
let lo = p.get_lo_pos();
let hi = p.get_hi_pos();
let pat;
alt p.peek() {
token::UNDERSCORE. { p.bump(); pat = ast::pat_wild; }
token::AT. {
p.bump();
let sub = parse_pat(p);
pat = ast::pat_box(sub);
hi = sub.span.hi;
}
token::TILDE. {
p.bump();
let sub = parse_pat(p);
pat = ast::pat_uniq(sub);
hi = sub.span.hi;
}
token::LBRACE. {
p.bump();
let fields = [];
let etc = false;
let first = true;
while p.peek() != token::RBRACE {
if first { first = false; } else { expect(p, token::COMMA); }
if p.peek() == token::UNDERSCORE {
p.bump();
if p.peek() != token::RBRACE {
p.fatal("expecting }, found " +
token::to_str(p.get_reader(), p.peek()));
}
etc = true;
break;
}
let fieldname = parse_ident(p);
let subpat;
if p.peek() == token::COLON {
p.bump();
subpat = parse_pat(p);
} else {
if p.get_bad_expr_words().contains_key(fieldname) {
p.fatal("found " + fieldname + " in binding position");
}
subpat =
@{id: p.get_id(),
node: ast::pat_bind(fieldname),
span: ast_util::mk_sp(lo, hi)};
}
fields += [{ident: fieldname, pat: subpat}];
}
hi = p.get_hi_pos();
p.bump();
pat = ast::pat_rec(fields, etc);
}
token::LPAREN. {
p.bump();
if p.peek() == token::RPAREN {
hi = p.get_hi_pos();
p.bump();
pat =
ast::pat_lit(@{node: ast::lit_nil,
span: ast_util::mk_sp(lo, hi)});
} else {
let fields = [parse_pat(p)];
while p.peek() == token::COMMA {
p.bump();
fields += [parse_pat(p)];
}
if vec::len(fields) == 1u { expect(p, token::COMMA); }
hi = p.get_hi_pos();
expect(p, token::RPAREN);
pat = ast::pat_tup(fields);
}
}
tok {
if !is_ident(tok) || is_word(p, "true") || is_word(p, "false") {
let lit = parse_lit(p);
if eat_word(p, "to") {
let end = parse_lit(p);
hi = end.span.hi;
pat = ast::pat_range(@lit, @end);
} else {
hi = lit.span.hi;
pat = ast::pat_lit(@lit);
}
} else if is_plain_ident(p) &&
alt p.look_ahead(1u) {
token::DOT. | token::LPAREN. | token::LBRACKET. {
false
}
_ { true }
} {
hi = p.get_hi_pos();
pat = ast::pat_bind(parse_value_ident(p));
} else {
let tag_path = parse_path_and_ty_param_substs(p);
hi = tag_path.span.hi;
let args: [@ast::pat];
alt p.peek() {
token::LPAREN. {
let a =
parse_seq(token::LPAREN, token::RPAREN,
seq_sep(token::COMMA), parse_pat, p);
args = a.node;
hi = a.span.hi;
}
token::DOT. { args = []; p.bump(); }
_ { expect(p, token::LPAREN); fail; }
}
pat = ast::pat_tag(tag_path, args);
}
}
}
ret @{id: p.get_id(), node: pat, span: ast_util::mk_sp(lo, hi)};
}
fn parse_local(p: parser, allow_init: bool) -> @ast::local {
let lo = p.get_lo_pos();
let pat = parse_pat(p);
let ty = @spanned(lo, lo, ast::ty_infer);
if eat(p, token::COLON) { ty = parse_ty(p, false); }
let init = if allow_init { parse_initializer(p) } else { none };
ret @spanned(lo, p.get_last_hi_pos(),
{ty: ty, pat: pat, init: init, id: p.get_id()});
}
fn parse_let(p: parser) -> @ast::decl {
fn parse_let_style(p: parser) -> ast::let_style {
eat(p, token::BINOP(token::AND)) ? ast::let_ref : ast::let_copy
}
let lo = p.get_lo_pos();
let locals = [(parse_let_style(p), parse_local(p, true))];
while eat(p, token::COMMA) {
locals += [(parse_let_style(p), parse_local(p, true))];
}
ret @spanned(lo, p.get_last_hi_pos(), ast::decl_local(locals));
}
fn parse_stmt(p: parser) -> @ast::stmt {
if p.get_file_type() == SOURCE_FILE {
ret parse_source_stmt(p);
} else { ret parse_crate_stmt(p); }
}
fn parse_crate_stmt(p: parser) -> @ast::stmt {
let cdir = parse_crate_directive(p, []);
ret @spanned(cdir.span.lo, cdir.span.hi,
ast::stmt_crate_directive(@cdir));
}
fn parse_source_stmt(p: parser) -> @ast::stmt {
let lo = p.get_lo_pos();
if eat_word(p, "let") {
let decl = parse_let(p);
ret @spanned(lo, decl.span.hi, ast::stmt_decl(decl, p.get_id()));
} else {
let item_attrs;
alt parse_outer_attrs_or_ext(p) {
none. { item_attrs = []; }
some(left(attrs)) { item_attrs = attrs; }
some(right(ext)) {
ret @spanned(lo, ext.span.hi, ast::stmt_expr(ext, p.get_id()));
}
}
let maybe_item = parse_item(p, item_attrs);
// If we have attributes then we should have an item
if vec::len(item_attrs) > 0u {
alt maybe_item {
some(_) {/* fallthrough */ }
_ { ret p.fatal("expected item"); }
}
}
alt maybe_item {
some(i) {
let hi = i.span.hi;
let decl = @spanned(lo, hi, ast::decl_item(i));
ret @spanned(lo, hi, ast::stmt_decl(decl, p.get_id()));
}
none. {
// Remainder are line-expr stmts.
let e = parse_expr(p);
// See if it is a block call
if expr_has_value(e) && p.peek() == token::LBRACE &&
is_bar(p.look_ahead(1u)) {
p.bump();
let blk = parse_fn_block_expr(p);
alt e.node {
ast::expr_call(f, args, false) {
e = @{node: ast::expr_call(f, args + [blk], true)
with *e};
}
_ {
e = mk_expr(p, lo, p.get_last_hi_pos(),
ast::expr_call(e, [blk], true));
}
}
}
ret @spanned(lo, e.span.hi, ast::stmt_expr(e, p.get_id()));
}
_ { p.fatal("expected statement"); }
}
}
}
fn expr_has_value(e: @ast::expr) -> bool {
alt e.node {
ast::expr_if(_, th, els) | ast::expr_if_check(_, th, els) {
if option::is_none(els) { false }
else { !option::is_none(th.node.expr) ||
expr_has_value(option::get(els)) }
}
ast::expr_alt(_, arms) {
let found_expr = false;
for arm in arms {
if !option::is_none(arm.body.node.expr) { found_expr = true; }
}
found_expr
}
ast::expr_block(blk) | ast::expr_while(_, blk) |
ast::expr_for(_, _, blk) | ast::expr_do_while(blk, _) {
!option::is_none(blk.node.expr)
}
ast::expr_call(_, _, true) { false }
_ { true }
}
}
fn stmt_is_expr(stmt: @ast::stmt) -> bool {
ret alt stmt.node {
ast::stmt_expr(e, _) { expr_has_value(e) }
_ { false }
};
}
fn stmt_to_expr(stmt: @ast::stmt) -> option::t<@ast::expr> {
ret if stmt_is_expr(stmt) {
alt stmt.node {
ast::stmt_expr(e, _) { some(e) }
}
} else { none };
}
fn stmt_ends_with_semi(stmt: ast::stmt) -> bool {
alt stmt.node {
ast::stmt_decl(d, _) {
ret alt d.node {
ast::decl_local(_) { true }
ast::decl_item(_) { false }
}
}
ast::stmt_expr(e, _) {
ret expr_has_value(e);
}
// We should not be calling this on a cdir.
ast::stmt_crate_directive(cdir) {
fail;
}
}
}
fn parse_block(p: parser) -> ast::blk {
let lo = p.get_lo_pos();
if eat_word(p, "unchecked") {
expect(p, token::LBRACE);
be parse_block_tail(p, lo, ast::unchecked_blk);
} else if eat_word(p, "unsafe") {
expect(p, token::LBRACE);
be parse_block_tail(p, lo, ast::unsafe_blk);
} else {
expect(p, token::LBRACE);
be parse_block_tail(p, lo, ast::default_blk);
}
}
fn parse_block_no_value(p: parser) -> ast::blk {
let blk = parse_block(p);
if !option::is_none(blk.node.expr) {
let sp = option::get(blk.node.expr).span;
p.span_fatal(sp, "this block must not have a result");
//TODO: If a suggestion mechanism appears, suggest that the
//user may have forgotten a ';'
}
ret blk;
}
// Precondition: already parsed the '{' or '#{'
// I guess that also means "already parsed the 'impure'" if
// necessary, and this should take a qualifier.
// some blocks start with "#{"...
fn parse_block_tail(p: parser, lo: uint, s: ast::blk_check_mode) -> ast::blk {
let view_items = [], stmts = [], expr = none;
while is_word(p, "import") { view_items += [parse_view_item(p)]; }
while p.peek() != token::RBRACE {
alt p.peek() {
token::SEMI. {
p.bump(); // empty
}
_ {
let stmt = parse_stmt(p);
alt stmt_to_expr(stmt) {
some(e) {
alt p.peek() {
token::SEMI. { p.bump(); stmts += [stmt]; }
token::RBRACE. { expr = some(e); }
t {
if stmt_ends_with_semi(*stmt) {
p.fatal("expected ';' or '}' after " +
"expression but found " +
token::to_str(p.get_reader(), t));
}
stmts += [stmt];
}
}
}
none. {
// Not an expression statement.
stmts += [stmt];
if p.get_file_type() == SOURCE_FILE &&
stmt_ends_with_semi(*stmt) {
expect(p, token::SEMI);
}
}
}
}
}
}
let hi = p.get_hi_pos();
p.bump();
let bloc = {view_items: view_items, stmts: stmts, expr: expr,
id: p.get_id(), rules: s};
ret spanned(lo, hi, bloc);
}
fn parse_ty_param(p: parser) -> ast::ty_param {
let k = if eat_word(p, "send") { ast::kind_sendable }
else if eat_word(p, "copy") { ast::kind_copyable }
else { ast::kind_noncopyable };
ret {ident: parse_ident(p), kind: k};
}
fn parse_ty_params(p: parser) -> [ast::ty_param] {
let ty_params: [ast::ty_param] = [];
if p.peek() == token::LT {
p.bump();
ty_params = parse_seq_to_gt(some(token::COMMA), parse_ty_param, p);
}
ret ty_params;
}
fn parse_fn_decl(p: parser, purity: ast::purity, il: ast::inlineness) ->
ast::fn_decl {
let inputs: ast::spanned<[ast::arg]> =
parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
parse_arg, p);
// Use the args list to translate each bound variable
// mentioned in a constraint to an arg index.
// Seems weird to do this in the parser, but I'm not sure how else to.
let constrs = [];
if p.peek() == token::COLON {
p.bump();
constrs = parse_constrs({|x| parse_ty_constr(inputs.node, x) }, p);
}
let (ret_style, ret_ty) = parse_ret_ty(p);
ret {inputs: inputs.node,
output: ret_ty,
purity: purity,
il: il,
cf: ret_style,
constraints: constrs};
}
fn parse_fn_block_decl(p: parser) -> ast::fn_decl {
let inputs =
if p.peek() == token::OROR {
p.bump();
[]
} else {
parse_seq(token::BINOP(token::OR), token::BINOP(token::OR),
seq_sep(token::COMMA), parse_fn_block_arg, p).node
};
ret {inputs: inputs,
output: @spanned(p.get_lo_pos(), p.get_hi_pos(), ast::ty_infer),
purity: ast::impure_fn,
il: ast::il_normal,
cf: ast::return_val,
constraints: []};
}
fn parse_fn(p: parser, proto: ast::proto, purity: ast::purity,
il: ast::inlineness) -> ast::_fn {
let decl = parse_fn_decl(p, purity, il);
let body = parse_block(p);
ret {decl: decl, proto: proto, body: body};
}
fn parse_fn_header(p: parser) -> {ident: ast::ident, tps: [ast::ty_param]} {
let id = parse_value_ident(p);
let ty_params = parse_ty_params(p);
ret {ident: id, tps: ty_params};
}
fn mk_item(p: parser, lo: uint, hi: uint, ident: ast::ident, node: ast::item_,
attrs: [ast::attribute]) -> @ast::item {
ret @{ident: ident,
attrs: attrs,
id: p.get_id(),
node: node,
span: ast_util::mk_sp(lo, hi)};
}
fn parse_item_fn(p: parser, purity: ast::purity, proto: ast::proto,
attrs: [ast::attribute], il: ast::inlineness) ->
@ast::item {
let lo = p.get_last_lo_pos();
let t = parse_fn_header(p);
let f = parse_fn(p, proto, purity, il);
ret mk_item(p, lo, f.body.span.hi, t.ident, ast::item_fn(f, t.tps),
attrs);
}
fn parse_obj_field(p: parser) -> ast::obj_field {
let mut = parse_mutability(p);
let ident = parse_value_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
ret {mut: mut, ty: ty, ident: ident, id: p.get_id()};
}
fn parse_anon_obj_field(p: parser) -> ast::anon_obj_field {
let mut = parse_mutability(p);
let ident = parse_value_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
expect(p, token::EQ);
let expr = parse_expr(p);
ret {mut: mut, ty: ty, expr: expr, ident: ident, id: p.get_id()};
}
fn parse_method(p: parser) -> @ast::method {
let lo = p.get_lo_pos();
let proto = parse_method_proto(p);
let ident = parse_value_ident(p);
let f = parse_fn(p, proto, ast::impure_fn, ast::il_normal);
let meth = {ident: ident, meth: f, id: p.get_id()};
ret @spanned(lo, f.body.span.hi, meth);
}
fn parse_item_obj(p: parser, attrs: [ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let ident = parse_value_ident(p);
let ty_params = parse_ty_params(p);
let fields: ast::spanned<[ast::obj_field]> =
parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
parse_obj_field, p);
let meths: [@ast::method] = [];
expect(p, token::LBRACE);
while p.peek() != token::RBRACE { meths += [parse_method(p)]; }
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
let ob: ast::_obj = {fields: fields.node, methods: meths};
ret mk_item(p, lo, hi, ident, ast::item_obj(ob, ty_params, p.get_id()),
attrs);
}
fn parse_item_res(p: parser, attrs: [ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let ident = parse_value_ident(p);
let ty_params = parse_ty_params(p);
expect(p, token::LPAREN);
let arg_ident = parse_value_ident(p);
expect(p, token::COLON);
let t = parse_ty(p, false);
expect(p, token::RPAREN);
let dtor = parse_block_no_value(p);
let decl =
{inputs:
[{mode: ast::by_ref, ty: t, ident: arg_ident,
id: p.get_id()}],
output: @spanned(lo, lo, ast::ty_nil),
purity: ast::impure_fn,
il: ast::il_normal,
cf: ast::return_val,
constraints: []};
let f = {decl: decl, proto: ast::proto_shared(ast::sugar_normal),
body: dtor};
ret mk_item(p, lo, dtor.span.hi, ident,
ast::item_res(f, p.get_id(), ty_params, p.get_id()), attrs);
}
fn parse_mod_items(p: parser, term: token::token,
first_item_attrs: [ast::attribute]) -> ast::_mod {
// Shouldn't be any view items since we've already parsed an item attr
let view_items =
if vec::len(first_item_attrs) == 0u { parse_view(p) } else { [] };
let items: [@ast::item] = [];
let initial_attrs = first_item_attrs;
while p.peek() != term {
let attrs = initial_attrs + parse_outer_attributes(p);
initial_attrs = [];
alt parse_item(p, attrs) {
some(i) { items += [i]; }
_ {
p.fatal("expected item but found " +
token::to_str(p.get_reader(), p.peek()));
}
}
}
ret {view_items: view_items, items: items};
}
fn parse_item_const(p: parser, attrs: [ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let id = parse_value_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
expect(p, token::EQ);
let e = parse_expr(p);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret mk_item(p, lo, hi, id, ast::item_const(ty, e), attrs);
}
fn parse_item_mod(p: parser, attrs: [ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let id = parse_ident(p);
expect(p, token::LBRACE);
let inner_attrs = parse_inner_attrs_and_next(p);
let first_item_outer_attrs = inner_attrs.next;
let m = parse_mod_items(p, token::RBRACE, first_item_outer_attrs);
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
ret mk_item(p, lo, hi, id, ast::item_mod(m), attrs + inner_attrs.inner);
}
fn parse_item_native_type(p: parser, attrs: [ast::attribute]) ->
@ast::native_item {
let t = parse_type_decl(p);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret @{ident: t.ident,
attrs: attrs,
node: ast::native_item_ty,
id: p.get_id(),
span: ast_util::mk_sp(t.lo, hi)};
}
fn parse_item_native_fn(p: parser, attrs: [ast::attribute],
purity: ast::purity) -> @ast::native_item {
let lo = p.get_last_lo_pos();
let t = parse_fn_header(p);
let decl = parse_fn_decl(p, purity, ast::il_normal);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret @{ident: t.ident,
attrs: attrs,
node: ast::native_item_fn(decl, t.tps),
id: p.get_id(),
span: ast_util::mk_sp(lo, hi)};
}
fn parse_native_item(p: parser, attrs: [ast::attribute]) ->
@ast::native_item {
if eat_word(p, "type") {
ret parse_item_native_type(p, attrs);
} else if eat_word(p, "fn") {
ret parse_item_native_fn(p, attrs, ast::impure_fn);
} else if eat_word(p, "pure") {
expect_word(p, "fn");
ret parse_item_native_fn(p, attrs, ast::pure_fn);
} else if eat_word(p, "unsafe") {
expect_word(p, "fn");
ret parse_item_native_fn(p, attrs, ast::unsafe_fn);
} else { unexpected(p, p.peek()); }
}
fn parse_native_mod_items(p: parser, first_item_attrs: [ast::attribute]) ->
ast::native_mod {
// Shouldn't be any view items since we've already parsed an item attr
let view_items =
if vec::len(first_item_attrs) == 0u {
parse_native_view(p)
} else { [] };
let items: [@ast::native_item] = [];
let initial_attrs = first_item_attrs;
while p.peek() != token::RBRACE {
let attrs = initial_attrs + parse_outer_attributes(p);
initial_attrs = [];
items += [parse_native_item(p, attrs)];
}
ret {view_items: view_items,
items: items};
}
fn parse_item_native_mod(p: parser, attrs: [ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
expect_word(p, "mod");
let id = parse_ident(p);
expect(p, token::LBRACE);
let more_attrs = parse_inner_attrs_and_next(p);
let inner_attrs = more_attrs.inner;
let first_item_outer_attrs = more_attrs.next;
let m = parse_native_mod_items(p, first_item_outer_attrs);
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
ret mk_item(p, lo, hi, id, ast::item_native_mod(m), attrs + inner_attrs);
}
fn parse_type_decl(p: parser) -> {lo: uint, ident: ast::ident} {
let lo = p.get_last_lo_pos();
let id = parse_ident(p);
ret {lo: lo, ident: id};
}
fn parse_item_type(p: parser, attrs: [ast::attribute]) -> @ast::item {
let t = parse_type_decl(p);
let tps = parse_ty_params(p);
expect(p, token::EQ);
let ty = parse_ty(p, false);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret mk_item(p, t.lo, hi, t.ident, ast::item_ty(ty, tps), attrs);
}
fn parse_item_tag(p: parser, attrs: [ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let id = parse_ident(p);
let ty_params = parse_ty_params(p);
let variants: [ast::variant] = [];
// Newtype syntax
if p.peek() == token::EQ {
if p.get_bad_expr_words().contains_key(id) {
p.fatal("found " + id + " in tag constructor position");
}
p.bump();
let ty = parse_ty(p, false);
expect(p, token::SEMI);
let variant =
spanned(ty.span.lo, ty.span.hi,
{name: id,
args: [{ty: ty, id: p.get_id()}],
id: p.get_id()});
ret mk_item(p, lo, ty.span.hi, id,
ast::item_tag([variant], ty_params), attrs);
}
expect(p, token::LBRACE);
while p.peek() != token::RBRACE {
let tok = p.peek();
alt tok {
token::IDENT(name, _) {
check_bad_word(p);
let vlo = p.get_lo_pos();
p.bump();
let args: [ast::variant_arg] = [];
let vhi = p.get_hi_pos();
alt p.peek() {
token::LPAREN. {
let arg_tys = parse_seq(token::LPAREN, token::RPAREN,
seq_sep(token::COMMA),
{|p| parse_ty(p, false)}, p);
for ty: @ast::ty in arg_tys.node {
args += [{ty: ty, id: p.get_id()}];
}
vhi = arg_tys.span.hi;
}
_ {/* empty */ }
}
expect(p, token::SEMI);
p.get_id();
let vr = {name: p.get_str(name), args: args, id: p.get_id()};
variants += [spanned(vlo, vhi, vr)];
}
token::RBRACE. {/* empty */ }
_ {
p.fatal("expected name of variant or '}' but found " +
token::to_str(p.get_reader(), tok));
}
}
}
let hi = p.get_hi_pos();
p.bump();
ret mk_item(p, lo, hi, id, ast::item_tag(variants, ty_params), attrs);
}
fn parse_fn_item_proto(_p: parser) -> ast::proto {
ast::proto_bare
}
fn parse_fn_ty_proto(p: parser) -> ast::proto {
if p.peek() == token::AT {
p.bump();
ast::proto_shared(ast::sugar_normal)
} else {
ast::proto_bare
}
}
fn parse_fn_anon_proto(p: parser) -> ast::proto {
if p.peek() == token::AT {
p.bump();
ast::proto_shared(ast::sugar_normal)
} else {
ast::proto_bare
}
}
fn parse_method_proto(p: parser) -> ast::proto {
if eat_word(p, "fn") {
ret ast::proto_bare;
} else { unexpected(p, p.peek()); }
}
fn parse_item(p: parser, attrs: [ast::attribute]) -> option::t<@ast::item> {
if eat_word(p, "const") {
ret some(parse_item_const(p, attrs));
} else if eat_word(p, "inline") {
expect_word(p, "fn");
let proto = parse_fn_item_proto(p);
ret some(parse_item_fn(p, ast::impure_fn, proto,
attrs, ast::il_inline));
} else if is_word(p, "fn") && p.look_ahead(1u) != token::LPAREN {
p.bump();
let proto = parse_fn_item_proto(p);
ret some(parse_item_fn(p, ast::impure_fn, proto,
attrs, ast::il_normal));
} else if eat_word(p, "pure") {
expect_word(p, "fn");
let proto = parse_fn_item_proto(p);
ret some(parse_item_fn(p, ast::pure_fn, proto, attrs,
ast::il_normal));
} else if is_word(p, "unsafe") && p.look_ahead(1u) != token::LBRACE {
p.bump();
expect_word(p, "fn");
let proto = parse_fn_item_proto(p);
ret some(parse_item_fn(p, ast::unsafe_fn, proto,
attrs, ast::il_normal));
} else if eat_word(p, "mod") {
ret some(parse_item_mod(p, attrs));
} else if eat_word(p, "native") {
ret some(parse_item_native_mod(p, attrs));
}
if eat_word(p, "type") {
ret some(parse_item_type(p, attrs));
} else if eat_word(p, "tag") {
ret some(parse_item_tag(p, attrs));
} else if is_word(p, "obj") && p.look_ahead(1u) != token::LPAREN {
p.bump();
ret some(parse_item_obj(p, attrs));
} else if eat_word(p, "resource") {
ret some(parse_item_res(p, attrs));
} else { ret none; }
}
// A type to distingush between the parsing of item attributes or syntax
// extensions, which both begin with token.POUND
type attr_or_ext = option::t<either::t<[ast::attribute], @ast::expr>>;
fn parse_outer_attrs_or_ext(p: parser) -> attr_or_ext {
if p.peek() == token::POUND {
let lo = p.get_lo_pos();
p.bump();
if p.peek() == token::LBRACKET {
let first_attr = parse_attribute_naked(p, ast::attr_outer, lo);
ret some(left([first_attr] + parse_outer_attributes(p)));
} else if !(p.peek() == token::LT || p.peek() == token::LBRACKET) {
ret some(right(parse_syntax_ext_naked(p, lo)));
} else { ret none; }
} else { ret none; }
}
// Parse attributes that appear before an item
fn parse_outer_attributes(p: parser) -> [ast::attribute] {
let attrs: [ast::attribute] = [];
while p.peek() == token::POUND {
attrs += [parse_attribute(p, ast::attr_outer)];
}
ret attrs;
}
fn parse_attribute(p: parser, style: ast::attr_style) -> ast::attribute {
let lo = p.get_lo_pos();
expect(p, token::POUND);
ret parse_attribute_naked(p, style, lo);
}
fn parse_attribute_naked(p: parser, style: ast::attr_style, lo: uint) ->
ast::attribute {
expect(p, token::LBRACKET);
let meta_item = parse_meta_item(p);
expect(p, token::RBRACKET);
let hi = p.get_hi_pos();
ret spanned(lo, hi, {style: style, value: *meta_item});
}
// Parse attributes that appear after the opening of an item, each terminated
// by a semicolon. In addition to a vector of inner attributes, this function
// also returns a vector that may contain the first outer attribute of the
// next item (since we can't know whether the attribute is an inner attribute
// of the containing item or an outer attribute of the first contained item
// until we see the semi).
fn parse_inner_attrs_and_next(p: parser) ->
{inner: [ast::attribute], next: [ast::attribute]} {
let inner_attrs: [ast::attribute] = [];
let next_outer_attrs: [ast::attribute] = [];
while p.peek() == token::POUND {
let attr = parse_attribute(p, ast::attr_inner);
if p.peek() == token::SEMI {
p.bump();
inner_attrs += [attr];
} else {
// It's not really an inner attribute
let outer_attr =
spanned(attr.span.lo, attr.span.hi,
{style: ast::attr_outer, value: attr.node.value});
next_outer_attrs += [outer_attr];
break;
}
}
ret {inner: inner_attrs, next: next_outer_attrs};
}
fn parse_meta_item(p: parser) -> @ast::meta_item {
let lo = p.get_lo_pos();
let ident = parse_ident(p);
alt p.peek() {
token::EQ. {
p.bump();
let lit = parse_lit(p);
let hi = p.get_hi_pos();
ret @spanned(lo, hi, ast::meta_name_value(ident, lit));
}
token::LPAREN. {
let inner_items = parse_meta_seq(p);
let hi = p.get_hi_pos();
ret @spanned(lo, hi, ast::meta_list(ident, inner_items));
}
_ {
let hi = p.get_hi_pos();
ret @spanned(lo, hi, ast::meta_word(ident));
}
}
}
fn parse_meta_seq(p: parser) -> [@ast::meta_item] {
ret parse_seq(token::LPAREN, token::RPAREN, seq_sep(token::COMMA),
parse_meta_item, p).node;
}
fn parse_optional_meta(p: parser) -> [@ast::meta_item] {
alt p.peek() { token::LPAREN. { ret parse_meta_seq(p); } _ { ret []; } }
}
fn parse_use(p: parser) -> ast::view_item_ {
let ident = parse_ident(p);
let metadata = parse_optional_meta(p);
ret ast::view_item_use(ident, metadata, p.get_id());
}
fn parse_rest_import_name(p: parser, first: ast::ident,
def_ident: option::t<ast::ident>) ->
ast::view_item_ {
let identifiers: [ast::ident] = [first];
let glob: bool = false;
let from_idents = option::none::<[ast::import_ident]>;
while true {
alt p.peek() {
token::SEMI. { break; }
token::MOD_SEP. {
if glob { p.fatal("cannot path into a glob"); }
if option::is_some(from_idents) {
p.fatal("cannot path into import list");
}
p.bump();
}
_ { p.fatal("expecting '::' or ';'"); }
}
alt p.peek() {
token::IDENT(_, _) { identifiers += [parse_ident(p)]; }
//the lexer can't tell the different kinds of stars apart ) :
token::BINOP(token::STAR.) {
glob = true;
p.bump();
}
token::LBRACE. {
fn parse_import_ident(p: parser) -> ast::import_ident {
let lo = p.get_lo_pos();
let ident = parse_ident(p);
let hi = p.get_hi_pos();
ret spanned(lo, hi, {name: ident, id: p.get_id()});
}
let from_idents_ =
parse_seq(token::LBRACE, token::RBRACE, seq_sep(token::COMMA),
parse_import_ident, p).node;
if vec::is_empty(from_idents_) {
p.fatal("at least one import is required");
}
from_idents = some(from_idents_);
}
_ {
p.fatal("expecting an identifier, or '*'");
}
}
}
alt def_ident {
some(i) {
if glob { p.fatal("globbed imports can't be renamed"); }
if option::is_some(from_idents) {
p.fatal("can't rename import list");
}
ret ast::view_item_import(i, identifiers, p.get_id());
}
_ {
if glob {
ret ast::view_item_import_glob(identifiers, p.get_id());
} else if option::is_some(from_idents) {
ret ast::view_item_import_from(identifiers,
option::get(from_idents),
p.get_id());
} else {
let len = vec::len(identifiers);
ret ast::view_item_import(identifiers[len - 1u], identifiers,
p.get_id());
}
}
}
}
fn parse_full_import_name(p: parser, def_ident: ast::ident) ->
ast::view_item_ {
alt p.peek() {
token::IDENT(i, _) {
p.bump();
ret parse_rest_import_name(p, p.get_str(i), some(def_ident));
}
_ { p.fatal("expecting an identifier"); }
}
}
fn parse_import(p: parser) -> ast::view_item_ {
alt p.peek() {
token::IDENT(i, _) {
p.bump();
alt p.peek() {
token::EQ. {
p.bump();
ret parse_full_import_name(p, p.get_str(i));
}
_ { ret parse_rest_import_name(p, p.get_str(i), none); }
}
}
_ { p.fatal("expecting an identifier"); }
}
}
fn parse_export(p: parser) -> ast::view_item_ {
let ids =
parse_seq_to_before_end(token::SEMI, seq_sep(token::COMMA),
parse_ident, p);
ret ast::view_item_export(ids, p.get_id());
}
fn parse_view_item(p: parser) -> @ast::view_item {
let lo = p.get_lo_pos();
let the_item =
if eat_word(p, "use") {
parse_use(p)
} else if eat_word(p, "import") {
parse_import(p)
} else if eat_word(p, "export") { parse_export(p) } else { fail };
let hi = p.get_lo_pos();
expect(p, token::SEMI);
ret @spanned(lo, hi, the_item);
}
fn is_view_item(p: parser) -> bool {
alt p.peek() {
token::IDENT(sid, false) {
let st = p.get_str(sid);
ret str::eq(st, "use") || str::eq(st, "import") ||
str::eq(st, "export");
}
_ { ret false; }
}
}
fn parse_view(p: parser) -> [@ast::view_item] {
let items: [@ast::view_item] = [];
while is_view_item(p) { items += [parse_view_item(p)]; }
ret items;
}
fn parse_native_view(p: parser) -> [@ast::view_item] {
let items: [@ast::view_item] = [];
while is_view_item(p) { items += [parse_view_item(p)]; }
ret items;
}
fn parse_crate_from_source_file(input: str, cfg: ast::crate_cfg,
sess: parse_sess) -> @ast::crate {
let p = new_parser_from_file(sess, cfg, input, 0u, 0u, SOURCE_FILE);
ret parse_crate_mod(p, cfg);
}
fn parse_crate_from_source_str(name: str, source: str, cfg: ast::crate_cfg,
sess: parse_sess) -> @ast::crate {
let ftype = SOURCE_FILE;
let filemap = codemap::new_filemap(name, 0u, 0u);
sess.cm.files += [filemap];
let itr = @interner::mk(str::hash, str::eq);
let rdr = lexer::new_reader(sess.cm, source, filemap, itr);
let p = new_parser(sess, cfg, rdr, ftype);
ret parse_crate_mod(p, cfg);
}
// Parses a source module as a crate
fn parse_crate_mod(p: parser, _cfg: ast::crate_cfg) -> @ast::crate {
let lo = p.get_lo_pos();
let crate_attrs = parse_inner_attrs_and_next(p);
let first_item_outer_attrs = crate_attrs.next;
let m = parse_mod_items(p, token::EOF, first_item_outer_attrs);
ret @spanned(lo, p.get_lo_pos(),
{directives: [],
module: m,
attrs: crate_attrs.inner,
config: p.get_cfg()});
}
fn parse_str(p: parser) -> str {
alt p.peek() {
token::LIT_STR(s) { p.bump(); p.get_str(s) }
_ {
p.fatal("expected string literal")
}
}
}
// Logic for parsing crate files (.rc)
//
// Each crate file is a sequence of directives.
//
// Each directive imperatively extends its environment with 0 or more items.
fn parse_crate_directive(p: parser, first_outer_attr: [ast::attribute]) ->
ast::crate_directive {
// Collect the next attributes
let outer_attrs = first_outer_attr + parse_outer_attributes(p);
// In a crate file outer attributes are only going to apply to mods
let expect_mod = vec::len(outer_attrs) > 0u;
let lo = p.get_lo_pos();
if expect_mod || is_word(p, "mod") {
expect_word(p, "mod");
let id = parse_ident(p);
let file_opt =
alt p.peek() {
token::EQ. { p.bump(); some(parse_str(p)) }
_ {
attr::get_meta_item_value_str_by_name(outer_attrs, "path")
}
};
alt p.peek() {
// mod x = "foo.rs";
token::SEMI. {
let hi = p.get_hi_pos();
p.bump();
ret spanned(lo, hi, ast::cdir_src_mod(id, file_opt, outer_attrs));
}
// mod x = "foo_dir" { ...directives... }
token::LBRACE. {
p.bump();
let inner_attrs = parse_inner_attrs_and_next(p);
let mod_attrs = outer_attrs + inner_attrs.inner;
let next_outer_attr = inner_attrs.next;
let cdirs =
parse_crate_directives(p, token::RBRACE, next_outer_attr);
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
ret spanned(lo, hi,
ast::cdir_dir_mod(id, file_opt, cdirs, mod_attrs));
}
t { unexpected(p, t); }
}
} else if is_view_item(p) {
let vi = parse_view_item(p);
ret spanned(lo, vi.span.hi, ast::cdir_view_item(vi));
} else { ret p.fatal("expected crate directive"); }
}
fn parse_crate_directives(p: parser, term: token::token,
first_outer_attr: [ast::attribute]) ->
[@ast::crate_directive] {
// This is pretty ugly. If we have an outer attribute then we can't accept
// seeing the terminator next, so if we do see it then fail the same way
// parse_crate_directive would
if vec::len(first_outer_attr) > 0u && p.peek() == term {
expect_word(p, "mod");
}
let cdirs: [@ast::crate_directive] = [];
while p.peek() != term {
let cdir = @parse_crate_directive(p, first_outer_attr);
cdirs += [cdir];
}
ret cdirs;
}
fn parse_crate_from_crate_file(input: str, cfg: ast::crate_cfg,
sess: parse_sess) -> @ast::crate {
let p = new_parser_from_file(sess, cfg, input, 0u, 0u, CRATE_FILE);
let lo = p.get_lo_pos();
let prefix = std::fs::dirname(p.get_filemap().name);
let leading_attrs = parse_inner_attrs_and_next(p);
let crate_attrs = leading_attrs.inner;
let first_cdir_attr = leading_attrs.next;
let cdirs = parse_crate_directives(p, token::EOF, first_cdir_attr);
let cx =
@{p: p,
sess: sess,
mutable chpos: p.get_chpos(),
mutable byte_pos: p.get_byte_pos(),
cfg: p.get_cfg()};
let (companionmod, _) = fs::splitext(fs::basename(input));
let (m, attrs) = eval::eval_crate_directives_to_mod(
cx, cdirs, prefix, option::some(companionmod));
let hi = p.get_hi_pos();
expect(p, token::EOF);
ret @spanned(lo, hi,
{directives: cdirs,
module: m,
attrs: crate_attrs + attrs,
config: p.get_cfg()});
}
fn parse_crate_from_file(input: str, cfg: ast::crate_cfg, sess: parse_sess) ->
@ast::crate {
if str::ends_with(input, ".rc") {
parse_crate_from_crate_file(input, cfg, sess)
} else if str::ends_with(input, ".rs") {
parse_crate_from_source_file(input, cfg, sess)
} else {
codemap::emit_error(none, "unknown input file type: " + input,
sess.cm);
fail
}
}
//
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// End:
//
|
import std::io;
import std::ivec;
import std::str;
import std::option;
import std::option::some;
import std::option::none;
import std::either;
import std::either::left;
import std::either::right;
import std::map::hashmap;
import token::can_begin_expr;
import ex = ext::base;
import codemap::span;
import std::map::new_str_hash;
import util::interner;
import ast::node_id;
import ast::spanned;
tag restriction { UNRESTRICTED; RESTRICT_NO_CALL_EXPRS; }
tag file_type { CRATE_FILE; SOURCE_FILE; }
tag ty_or_bang { a_ty(@ast::ty); a_bang; }
type parse_sess = @{cm: codemap::codemap, mutable next_id: node_id};
fn next_node_id(sess: &parse_sess) -> node_id {
let rv = sess.next_id;
sess.next_id += 1;
ret rv;
}
type parser =
obj {
fn peek() -> token::token ;
fn bump() ;
fn look_ahead(uint) -> token::token ;
fn fatal(str) -> ! ;
fn warn(str) ;
fn restrict(restriction) ;
fn get_restriction() -> restriction ;
fn get_file_type() -> file_type ;
fn get_cfg() -> ast::crate_cfg ;
fn get_span() -> span ;
fn get_lo_pos() -> uint ;
fn get_hi_pos() -> uint ;
fn get_last_lo_pos() -> uint ;
fn get_last_hi_pos() -> uint ;
fn get_prec_table() -> @[op_spec] ;
fn get_str(token::str_num) -> str ;
fn get_reader() -> lexer::reader ;
fn get_filemap() -> codemap::filemap ;
fn get_bad_expr_words() -> hashmap[str, ()] ;
fn get_chpos() -> uint ;
fn get_byte_pos() -> uint ;
fn get_id() -> node_id ;
fn get_sess() -> parse_sess ;
};
fn new_parser_from_file(sess: parse_sess, cfg:
ast::crate_cfg, path: str,
chpos: uint, byte_pos: uint,
ftype: file_type) -> parser {
let src = io::read_whole_file_str(path);
let filemap = codemap::new_filemap(path, chpos, byte_pos);
sess.cm.files += ~[filemap];
let itr = @interner::mk(str::hash, str::eq);
let rdr = lexer::new_reader(sess.cm, src, filemap, itr);
ret new_parser(sess, cfg, rdr, ftype);
}
fn new_parser(sess: parse_sess, cfg: ast::crate_cfg, rdr: lexer::reader,
ftype: file_type) -> parser {
obj stdio_parser(sess: parse_sess,
cfg: ast::crate_cfg,
ftype: file_type,
mutable tok: token::token,
mutable tok_span: span,
mutable last_tok_span: span,
mutable buffer: [{tok: token::token, span: span}],
mutable restr: restriction,
rdr: lexer::reader,
precs: @[op_spec],
bad_words: hashmap[str, ()]) {
fn peek() -> token::token { ret tok; }
fn bump() {
last_tok_span = tok_span;
if ivec::len(buffer) == 0u {
let next = lexer::next_token(rdr);
tok = next.tok;
tok_span = ast::mk_sp(next.chpos, rdr.get_chpos());
} else {
let next = ivec::pop(buffer);
tok = next.tok;
tok_span = next.span;
}
}
fn look_ahead(distance: uint) -> token::token {
while ivec::len(buffer) < distance {
let next = lexer::next_token(rdr);
let sp = ast::mk_sp(next.chpos, rdr.get_chpos());
buffer = ~[{tok: next.tok, span: sp}] + buffer;
}
ret buffer.(distance - 1u).tok;
}
fn fatal(m: str) -> ! {
codemap::emit_error(some(self.get_span()), m, sess.cm);
fail;
}
fn warn(m: str) {
codemap::emit_warning(some(self.get_span()), m, sess.cm);
}
fn restrict(r: restriction) { restr = r; }
fn get_restriction() -> restriction { ret restr; }
fn get_span() -> span { ret tok_span; }
fn get_lo_pos() -> uint { ret tok_span.lo; }
fn get_hi_pos() -> uint { ret tok_span.hi; }
fn get_last_lo_pos() -> uint { ret last_tok_span.lo; }
fn get_last_hi_pos() -> uint { ret last_tok_span.hi; }
fn get_file_type() -> file_type { ret ftype; }
fn get_cfg() -> ast::crate_cfg { ret cfg; }
fn get_prec_table() -> @[op_spec] { ret precs; }
fn get_str(i: token::str_num) -> str {
ret interner::get(*rdr.get_interner(), i);
}
fn get_reader() -> lexer::reader { ret rdr; }
fn get_filemap() -> codemap::filemap { ret rdr.get_filemap(); }
fn get_bad_expr_words() -> hashmap[str, ()] { ret bad_words; }
fn get_chpos() -> uint { ret rdr.get_chpos(); }
fn get_byte_pos() -> uint { ret rdr.get_byte_pos(); }
fn get_id() -> node_id { ret next_node_id(sess); }
fn get_sess() -> parse_sess { ret sess; }
}
let tok0 = lexer::next_token(rdr);
let span0 = ast::mk_sp(tok0.chpos, rdr.get_chpos());
ret stdio_parser(sess, cfg, ftype, tok0.tok, span0, span0, ~[],
UNRESTRICTED, rdr, prec_table(), bad_expr_word_table());
}
// These are the words that shouldn't be allowed as value identifiers,
// because, if used at the start of a line, they will cause the line to be
// interpreted as a specific kind of statement, which would be confusing.
fn bad_expr_word_table() -> hashmap[str, ()] {
let words = new_str_hash();
words.insert("mod", ());
words.insert("if", ());
words.insert("else", ());
words.insert("while", ());
words.insert("do", ());
words.insert("alt", ());
words.insert("for", ());
words.insert("each", ());
words.insert("break", ());
words.insert("cont", ());
words.insert("put", ());
words.insert("ret", ());
words.insert("be", ());
words.insert("fail", ());
words.insert("type", ());
words.insert("resource", ());
words.insert("check", ());
words.insert("assert", ());
words.insert("claim", ());
words.insert("prove", ());
words.insert("native", ());
words.insert("fn", ());
words.insert("block", ());
words.insert("lambda", ());
words.insert("pred", ());
words.insert("iter", ());
words.insert("block", ());
words.insert("import", ());
words.insert("export", ());
words.insert("let", ());
words.insert("const", ());
words.insert("log", ());
words.insert("log_err", ());
words.insert("tag", ());
words.insert("obj", ());
words.insert("copy", ());
ret words;
}
fn unexpected(p: &parser, t: token::token) -> ! {
let s: str = "unexpected token: ";
s += token::to_str(p.get_reader(), t);
p.fatal(s);
}
fn expect(p: &parser, t: token::token) {
if p.peek() == t {
p.bump();
} else {
let s: str = "expecting ";
s += token::to_str(p.get_reader(), t);
s += ", found ";
s += token::to_str(p.get_reader(), p.peek());
p.fatal(s);
}
}
fn spanned[T](lo: uint, hi: uint, node: &T) -> spanned[T] {
ret {node: node, span: ast::mk_sp(lo, hi)};
}
fn parse_ident(p: &parser) -> ast::ident {
alt p.peek() {
token::IDENT(i, _) { p.bump(); ret p.get_str(i); }
_ { p.fatal("expecting ident"); }
}
}
fn parse_value_ident(p: &parser) -> ast::ident {
check_bad_word(p);
ret parse_ident(p);
}
fn eat(p: &parser, tok: &token::token) -> bool {
ret if p.peek() == tok { p.bump(); true } else { false };
}
fn is_word(p: &parser, word: &str) -> bool {
ret alt p.peek() {
token::IDENT(sid, false) { str::eq(word, p.get_str(sid)) }
_ { false }
};
}
fn eat_word(p: &parser, word: &str) -> bool {
alt p.peek() {
token::IDENT(sid, false) {
if str::eq(word, p.get_str(sid)) {
p.bump();
ret true;
} else { ret false; }
}
_ { ret false; }
}
}
fn expect_word(p: &parser, word: &str) {
if !eat_word(p, word) {
p.fatal("expecting " + word + ", found " +
token::to_str(p.get_reader(), p.peek()));
}
}
fn check_bad_word(p: &parser) {
alt p.peek() {
token::IDENT(sid, false) {
let w = p.get_str(sid);
if p.get_bad_expr_words().contains_key(w) {
p.fatal("found " + w + " in expression position");
}
}
_ { }
}
}
fn parse_ty_fn(proto: ast::proto, p: &parser, lo: uint) -> ast::ty_ {
fn parse_fn_input_ty(p: &parser) -> ast::ty_arg {
let lo = p.get_lo_pos();
// Ignore arg name, if present
if is_plain_ident(p) && p.look_ahead(1u) == token::COLON {
p.bump();
p.bump();
}
let mode = ast::val;
if p.peek() == token::BINOP(token::AND) {
p.bump();
mode = ast::alias(eat_word(p, "mutable"));
}
let t = parse_ty(p, false);
ret spanned(lo, t.span.hi, {mode: mode, ty: t});
}
let lo = p.get_lo_pos();
let inputs =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_fn_input_ty, p);
// FIXME: there's no syntax for this right now anyway
// auto constrs = parse_constrs(~[], p);
let constrs: [@ast::constr] = ~[];
let output: @ast::ty;
let cf = ast::return;
if p.peek() == token::RARROW {
p.bump();
let tmp = parse_ty_or_bang(p);
alt tmp {
a_ty(t) { output = t; }
a_bang. {
output = @spanned(lo, inputs.span.hi, ast::ty_bot);
cf = ast::noreturn;
}
}
} else { output = @spanned(lo, inputs.span.hi, ast::ty_nil); }
ret ast::ty_fn(proto, inputs.node, output, cf, constrs);
}
fn parse_proto(p: &parser) -> ast::proto {
if eat_word(p, "iter") {
ret ast::proto_iter;
} else if (eat_word(p, "fn")) {
ret ast::proto_fn;
} else if (eat_word(p, "block")) {
ret ast::proto_block;
} else if (eat_word(p, "pred")) {
ret ast::proto_fn;
} else { unexpected(p, p.peek()); }
}
fn parse_ty_obj(p: &parser, hi: &mutable uint) -> ast::ty_ {
fn parse_method_sig(p: &parser) -> ast::ty_method {
let flo = p.get_lo_pos();
let proto: ast::proto = parse_proto(p);
let ident = parse_value_ident(p);
let f = parse_ty_fn(proto, p, flo);
expect(p, token::SEMI);
alt f {
ast::ty_fn(proto, inputs, output, cf, constrs) {
ret spanned(flo, output.span.hi,
{proto: proto,
ident: ident,
inputs: inputs,
output: output,
cf: cf,
constrs: constrs});
}
}
}
let meths =
parse_seq(token::LBRACE, token::RBRACE, none, parse_method_sig, p);
hi = meths.span.hi;
ret ast::ty_obj(meths.node);
}
fn parse_mt(p: &parser) -> ast::mt {
let mut = parse_mutability(p);
let t = parse_ty(p, false);
ret {ty: t, mut: mut};
}
fn parse_ty_field(p: &parser) -> ast::ty_field {
let lo = p.get_lo_pos();
let mut = parse_mutability(p);
let id = parse_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
ret spanned(lo, ty.span.hi, {ident: id, mt: {ty: ty, mut: mut}});
}
// if i is the jth ident in args, return j
// otherwise, fail
fn ident_index(p: &parser, args: &[ast::arg], i: &ast::ident) -> uint {
let j = 0u;
for a: ast::arg in args { if a.ident == i { ret j; } j += 1u; }
p.fatal("Unbound variable " + i + " in constraint arg");
}
fn parse_type_constr_arg(p: &parser) -> @ast::ty_constr_arg {
let sp = p.get_span();
let carg = ast::carg_base;
expect(p, token::BINOP(token::STAR));
if p.peek() == token::DOT {
// "*..." notation for record fields
p.bump();
let pth: ast::path = parse_path(p);
carg = ast::carg_ident(pth);
}
// No literals yet, I guess?
ret @{node: carg, span: sp};
}
fn parse_constr_arg(args: &[ast::arg], p: &parser) -> @ast::constr_arg {
let sp = p.get_span();
let carg = ast::carg_base;
if p.peek() == token::BINOP(token::STAR) {
p.bump();
} else {
let i: ast::ident = parse_value_ident(p);
carg = ast::carg_ident(ident_index(p, args, i));
}
ret @{node: carg, span: sp};
}
fn parse_ty_constr(fn_args: &[ast::arg], p: &parser) -> @ast::constr {
let lo = p.get_lo_pos();
let path = parse_path(p);
let pf = bind parse_constr_arg(fn_args, _);
let args: {node: [@ast::constr_arg], span: span} =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA), pf, p);
ret @spanned(lo, args.span.hi,
{path: path, args: args.node, id: p.get_id()});
}
fn parse_constr_in_type(p: &parser) -> @ast::ty_constr {
let lo = p.get_lo_pos();
let path = parse_path(p);
let args: [@ast::ty_constr_arg] =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_type_constr_arg, p).node;
let hi = p.get_lo_pos();
let tc: ast::ty_constr_ = {path: path, args: args, id: p.get_id()};
ret @spanned(lo, hi, tc);
}
fn parse_constrs[T](pser: fn(&parser) -> @ast::constr_general[T] , p: &parser)
-> [@ast::constr_general[T]] {
let constrs: [@ast::constr_general[T]] = ~[];
while true {
let constr = pser(p);
constrs += ~[constr];
if p.peek() == token::COMMA { p.bump(); } else { break; }
}
constrs
}
fn parse_type_constraints(p: &parser) -> [@ast::ty_constr] {
ret parse_constrs(parse_constr_in_type, p);
}
fn parse_ty_postfix(orig_t: ast::ty_, p: &parser, colons_before_params: bool)
-> @ast::ty {
let lo = p.get_lo_pos();
let end;
if p.peek() == token::LBRACKET {
p.bump();
end = token::RBRACKET;
} else if colons_before_params && p.peek() == token::MOD_SEP {
p.bump();
expect(p, token::LT);
end = token::GT;
} else if !colons_before_params && p.peek() == token::LT {
p.bump();
end = token::GT;
} else {
ret @spanned(lo, p.get_lo_pos(), orig_t);
}
// If we're here, we have explicit type parameter instantiation.
let seq = parse_seq_to_end(end, some(token::COMMA),
bind parse_ty(_, false), p);
alt orig_t {
ast::ty_path(pth, ann) {
let hi = p.get_hi_pos();
ret @spanned(lo, hi,
ast::ty_path(spanned(lo, hi,
{global: pth.node.global,
idents: pth.node.idents,
types: seq}), ann));
}
_ {
p.fatal("type parameter instantiation only allowed for paths");
}
}
}
fn parse_ty_or_bang(p: &parser) -> ty_or_bang {
alt p.peek() {
token::NOT. { p.bump(); ret a_bang; }
_ { ret a_ty(parse_ty(p, false)); }
}
}
fn parse_ty(p: &parser, colons_before_params: bool) -> @ast::ty {
let lo = p.get_lo_pos();
let hi = lo;
let t: ast::ty_;
// FIXME: do something with this
if eat_word(p, "bool") {
t = ast::ty_bool;
} else if (eat_word(p, "int")) {
t = ast::ty_int;
} else if (eat_word(p, "uint")) {
t = ast::ty_uint;
} else if (eat_word(p, "float")) {
t = ast::ty_float;
} else if (eat_word(p, "str")) {
t = ast::ty_str;
} else if (eat_word(p, "istr")) {
t = ast::ty_istr;
} else if (eat_word(p, "char")) {
t = ast::ty_char;
/*
} else if (eat_word(p, "task")) {
t = ast::ty_task;
*/
} else if (eat_word(p, "i8")) {
t = ast::ty_machine(ast::ty_i8);
} else if (eat_word(p, "i16")) {
t = ast::ty_machine(ast::ty_i16);
} else if (eat_word(p, "i32")) {
t = ast::ty_machine(ast::ty_i32);
} else if (eat_word(p, "i64")) {
t = ast::ty_machine(ast::ty_i64);
} else if (eat_word(p, "u8")) {
t = ast::ty_machine(ast::ty_u8);
} else if (eat_word(p, "u16")) {
t = ast::ty_machine(ast::ty_u16);
} else if (eat_word(p, "u32")) {
t = ast::ty_machine(ast::ty_u32);
} else if (eat_word(p, "u64")) {
t = ast::ty_machine(ast::ty_u64);
} else if (eat_word(p, "f32")) {
t = ast::ty_machine(ast::ty_f32);
} else if (eat_word(p, "f64")) {
t = ast::ty_machine(ast::ty_f64);
} else if (p.peek() == token::LPAREN) {
p.bump();
if p.peek() == token::RPAREN {
hi = p.get_hi_pos();
p.bump();
t = ast::ty_nil;
} else {
let ts = ~[parse_ty(p, false)];
while p.peek() == token::COMMA {
p.bump();
ts += ~[parse_ty(p, false)];
}
if ivec::len(ts) == 1u {
t = ts.(0).node;
} else {
t = ast::ty_tup(ts);
}
hi = p.get_hi_pos();
expect(p, token::RPAREN);
}
} else if (p.peek() == token::AT) {
p.bump();
let mt = parse_mt(p);
hi = mt.ty.span.hi;
t = ast::ty_box(mt);
} else if (p.peek() == token::BINOP(token::STAR)) {
p.bump();
let mt = parse_mt(p);
hi = mt.ty.span.hi;
t = ast::ty_ptr(mt);
} else if (p.peek() == token::LBRACE) {
let elems =
parse_seq(token::LBRACE, token::RBRACE, some(token::COMMA),
parse_ty_field, p);
hi = elems.span.hi;
t = ast::ty_rec(elems.node);
if p.peek() == token::COLON {
p.bump();
t =
ast::ty_constr(@spanned(lo, hi, t),
parse_type_constraints(p));
}
} else if (eat_word(p, "vec")) {
expect(p, token::LBRACKET);
t = ast::ty_vec(parse_mt(p));
hi = p.get_hi_pos();
expect(p, token::RBRACKET);
} else if (p.peek() == token::LBRACKET) {
expect(p, token::LBRACKET);
t = ast::ty_ivec(parse_mt(p));
hi = p.get_hi_pos();
expect(p, token::RBRACKET);
} else if (eat_word(p, "fn")) {
let flo = p.get_last_lo_pos();
t = parse_ty_fn(ast::proto_fn, p, flo);
alt t { ast::ty_fn(_, _, out, _, _) { hi = out.span.hi; } }
} else if (eat_word(p, "block")) {
let flo = p.get_last_lo_pos();
t = parse_ty_fn(ast::proto_block, p, flo);
alt t { ast::ty_fn(_, _, out, _, _) { hi = out.span.hi; } }
} else if (eat_word(p, "iter")) {
let flo = p.get_last_lo_pos();
t = parse_ty_fn(ast::proto_iter, p, flo);
alt t { ast::ty_fn(_, _, out, _, _) { hi = out.span.hi; } }
} else if (eat_word(p, "obj")) {
t = parse_ty_obj(p, hi);
} else if (eat_word(p, "mutable")) {
p.warn("ignoring deprecated 'mutable' type constructor");
let typ = parse_ty(p, false);
t = typ.node;
hi = typ.span.hi;
} else if (p.peek() == token::MOD_SEP || is_ident(p.peek())) {
let path = parse_path(p);
t = ast::ty_path(path, p.get_id());
hi = path.span.hi;
} else { p.fatal("expecting type"); }
ret parse_ty_postfix(t, p, colons_before_params);
}
fn parse_arg_mode(p: &parser) -> ast::mode {
if eat(p, token::BINOP(token::AND)) {
ast::alias(eat_word(p, "mutable"))
} else if eat(p, token::BINOP(token::MINUS)) {
ast::move
} else {
ast::val
}
}
fn parse_arg(p: &parser) -> ast::arg {
let i = parse_value_ident(p);
expect(p, token::COLON);
let m = parse_arg_mode(p);
let t = parse_ty(p, false);
ret {mode: m, ty: t, ident: i, id: p.get_id()};
}
fn parse_fn_block_arg(p: &parser) -> ast::arg {
let m = parse_arg_mode(p);
let i = parse_value_ident(p);
let t = @spanned(p.get_lo_pos(), p.get_hi_pos(), ast::ty_infer);
ret {mode: m, ty: t, ident: i, id: p.get_id()};
}
fn parse_seq_to_end[T](ket: token::token, sep: option::t[token::token],
f: fn(&parser) -> T , p: &parser) -> [T] {
let val = parse_seq_to_before_end(ket, sep, f, p);
p.bump();
ret val;
}
fn parse_seq_to_before_end[T](ket: token::token, sep: option::t[token::token],
f: fn(&parser) -> T , p: &parser) -> [T] {
let first: bool = true;
let v: [T] = ~[];
while p.peek() != ket {
alt sep {
some(t) { if first { first = false; } else { expect(p, t); } }
_ { }
}
v += ~[f(p)];
}
ret v;
}
fn parse_seq[T](bra: token::token, ket: token::token,
sep: option::t[token::token], f: fn(&parser) -> T ,
p: &parser) -> spanned[[T]] {
let lo = p.get_lo_pos();
expect(p, bra);
let result = parse_seq_to_before_end[T](ket, sep, f, p);
let hi = p.get_hi_pos();
p.bump();
ret spanned(lo, hi, result);
}
fn parse_lit(p: &parser) -> ast::lit {
let sp = p.get_span();
let lit: ast::lit_ = ast::lit_nil;
if eat_word(p, "true") {
lit = ast::lit_bool(true);
} else if (eat_word(p, "false")) {
lit = ast::lit_bool(false);
} else {
alt p.peek() {
token::LIT_INT(i) { p.bump(); lit = ast::lit_int(i); }
token::LIT_UINT(u) { p.bump(); lit = ast::lit_uint(u); }
token::LIT_FLOAT(s) {
p.bump();
lit = ast::lit_float(p.get_str(s));
}
token::LIT_MACH_INT(tm, i) {
p.bump();
lit = ast::lit_mach_int(tm, i);
}
token::LIT_MACH_FLOAT(tm, s) {
p.bump();
lit = ast::lit_mach_float(tm, p.get_str(s));
}
token::LIT_CHAR(c) { p.bump(); lit = ast::lit_char(c); }
token::LIT_STR(s) {
p.bump();
lit = ast::lit_str(p.get_str(s), ast::sk_rc);
}
token::LPAREN. {
p.bump();
expect(p, token::RPAREN);
lit = ast::lit_nil;
}
t { unexpected(p, t); }
}
}
ret {node: lit, span: sp};
}
fn is_ident(t: token::token) -> bool {
alt t { token::IDENT(_, _) { ret true; } _ { } }
ret false;
}
fn is_plain_ident(p: &parser) -> bool {
ret alt p.peek() { token::IDENT(_, false) { true } _ { false } };
}
fn parse_path(p: &parser) -> ast::path {
let lo = p.get_lo_pos();
let hi = lo;
let global;
if p.peek() == token::MOD_SEP {
global = true;
p.bump();
} else { global = false; }
let ids: [ast::ident] = ~[];
while true {
alt p.peek() {
token::IDENT(i, _) {
hi = p.get_hi_pos();
ids += ~[p.get_str(i)];
hi = p.get_hi_pos();
p.bump();
if p.peek() == token::MOD_SEP && p.look_ahead(1u) != token::LT {
p.bump();
} else {
break;
}
}
_ { break; }
}
}
ret spanned(lo, hi, {global: global, idents: ids, types: ~[]});
}
fn parse_path_and_ty_param_substs(p: &parser) -> ast::path {
let lo = p.get_lo_pos();
let path = parse_path(p);
if p.peek() == token::LBRACKET {
let seq =
parse_seq(token::LBRACKET, token::RBRACKET, some(token::COMMA),
bind parse_ty(_, false), p);
let hi = seq.span.hi;
path =
spanned(lo, hi,
{global: path.node.global,
idents: path.node.idents,
types: seq.node});
}
ret path;
}
fn parse_mutability(p: &parser) -> ast::mutability {
if eat_word(p, "mutable") {
if p.peek() == token::QUES { p.bump(); ret ast::maybe_mut; }
ret ast::mut;
}
ret ast::imm;
}
fn parse_field(p: &parser, sep: &token::token) -> ast::field {
let lo = p.get_lo_pos();
let m = parse_mutability(p);
let i = parse_ident(p);
expect(p, sep);
let e = parse_expr(p);
ret spanned(lo, e.span.hi, {mut: m, ident: i, expr: e});
}
fn mk_expr(p: &parser, lo: uint, hi: uint, node: &ast::expr_) -> @ast::expr {
ret @{id: p.get_id(), node: node, span: ast::mk_sp(lo, hi)};
}
fn mk_mac_expr(p: &parser, lo: uint, hi: uint, m: &ast::mac_) -> @ast::expr {
ret @{id: p.get_id(),
node: ast::expr_mac({node: m, span: ast::mk_sp(lo, hi)}),
span: ast::mk_sp(lo, hi)};
}
fn parse_bottom_expr(p: &parser) -> @ast::expr {
let lo = p.get_lo_pos();
let hi = p.get_hi_pos();
let ex: ast::expr_;
if p.peek() == token::LPAREN {
p.bump();
if p.peek() == token::RPAREN {
hi = p.get_hi_pos();
p.bump();
let lit = @spanned(lo, hi, ast::lit_nil);
ret mk_expr(p, lo, hi, ast::expr_lit(lit));
}
let es = ~[parse_expr(p)];
while p.peek() == token::COMMA {
p.bump();
es += ~[parse_expr(p)];
}
hi = p.get_hi_pos();
expect(p, token::RPAREN);
if ivec::len(es) == 1u {
ret mk_expr(p, lo, hi, es.(0).node);
} else {
ret mk_expr(p, lo, hi, ast::expr_tup(es));
}
} else if (p.peek() == token::LBRACE) {
p.bump();
if is_word(p, "mutable") ||
is_plain_ident(p) && p.look_ahead(1u) == token::COLON {
let fields = ~[parse_field(p, token::COLON)];
let base = none;
while p.peek() != token::RBRACE {
if eat_word(p, "with") { base = some(parse_expr(p)); break; }
expect(p, token::COMMA);
fields += ~[parse_field(p, token::COLON)];
}
hi = p.get_hi_pos();
expect(p, token::RBRACE);
ex = ast::expr_rec(fields, base);
} else if p.peek() == token::BINOP(token::OR) {
ret parse_fn_block_expr(p);
} else {
let blk = parse_block_tail(p, lo);
ret mk_expr(p, blk.span.lo, blk.span.hi, ast::expr_block(blk));
}
} else if (eat_word(p, "if")) {
ret parse_if_expr(p);
} else if (eat_word(p, "for")) {
ret parse_for_expr(p);
} else if (eat_word(p, "while")) {
ret parse_while_expr(p);
} else if (eat_word(p, "do")) {
ret parse_do_while_expr(p);
} else if (eat_word(p, "alt")) {
ret parse_alt_expr(p);
/*
} else if (eat_word(p, "spawn")) {
ret parse_spawn_expr(p);
*/
} else if (eat_word(p, "fn")) {
ret parse_fn_expr(p, ast::proto_fn);
} else if (eat_word(p, "block")) {
ret parse_fn_expr(p, ast::proto_block);
} else if (eat_word(p, "lambda")) {
ret parse_fn_expr(p, ast::proto_closure);
} else if (p.peek() == token::LBRACKET) {
p.bump();
let mut = parse_mutability(p);
let es =
parse_seq_to_end(token::RBRACKET, some(token::COMMA), parse_expr,
p);
ex = ast::expr_vec(es, mut, ast::sk_rc);
} else if (p.peek() == token::POUND_LT) {
p.bump();
let ty = parse_ty(p, false);
expect(p, token::GT);
/* hack: early return to take advantage of specialized function */
ret mk_mac_expr(p, lo, p.get_hi_pos(), ast::mac_embed_type(ty))
} else if (p.peek() == token::POUND_LBRACE) {
p.bump();
let blk = ast::mac_embed_block(parse_block_tail(p, lo));
ret mk_mac_expr(p, lo, p.get_hi_pos(), blk);
} else if (p.peek() == token::ELLIPSIS) {
p.bump();
ret mk_mac_expr(p, lo, p.get_hi_pos(), ast::mac_ellipsis)
} else if (p.peek() == token::TILDE) {
p.bump();
alt p.peek() {
token::LBRACKET. { // unique array (temporary)
p.bump();
let mut = parse_mutability(p);
let es =
parse_seq_to_end(token::RBRACKET, some(token::COMMA),
parse_expr, p);
ex = ast::expr_vec(es, mut, ast::sk_unique);
}
token::LIT_STR(s) {
p.bump();
let lit =
@{node: ast::lit_str(p.get_str(s), ast::sk_unique),
span: p.get_span()};
ex = ast::expr_lit(lit);
}
_ { ex = ast::expr_uniq(parse_expr(p)); }
}
} else if (eat_word(p, "obj")) {
// Anonymous object
// Only make people type () if they're actually adding new fields
let fields: option::t[[ast::anon_obj_field]] = none;
if p.peek() == token::LPAREN {
p.bump();
fields =
some(parse_seq_to_end(token::RPAREN, some(token::COMMA),
parse_anon_obj_field, p));
}
let meths: [@ast::method] = ~[];
let inner_obj: option::t[@ast::expr] = none;
expect(p, token::LBRACE);
while p.peek() != token::RBRACE {
if eat_word(p, "with") {
inner_obj = some(parse_expr(p));
} else { meths += ~[parse_method(p)]; }
}
hi = p.get_hi_pos();
expect(p, token::RBRACE);
// fields and methods may be *additional* or *overriding* fields
// and methods if there's a inner_obj, or they may be the *only*
// fields and methods if there's no inner_obj.
// We don't need to pull ".node" out of fields because it's not a
// "spanned".
let ob = {fields: fields, methods: meths, inner_obj: inner_obj};
ex = ast::expr_anon_obj(ob);
} else if (eat_word(p, "bind")) {
let e = parse_expr_res(p, RESTRICT_NO_CALL_EXPRS);
fn parse_expr_opt(p: &parser) -> option::t[@ast::expr] {
alt p.peek() {
token::UNDERSCORE. { p.bump(); ret none; }
_ { ret some(parse_expr(p)); }
}
}
let es =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_expr_opt, p);
hi = es.span.hi;
ex = ast::expr_bind(e, es.node);
} else if (p.peek() == token::POUND) {
let ex_ext = parse_syntax_ext(p);
hi = ex_ext.span.hi;
ex = ex_ext.node;
} else if (eat_word(p, "fail")) {
if can_begin_expr(p.peek()) {
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_fail(some(e));
} else { ex = ast::expr_fail(none); }
} else if (eat_word(p, "log")) {
let e = parse_expr(p);
ex = ast::expr_log(1, e);
hi = e.span.hi;
} else if (eat_word(p, "log_err")) {
let e = parse_expr(p);
ex = ast::expr_log(0, e);
hi = e.span.hi;
} else if (eat_word(p, "assert")) {
let e = parse_expr(p);
ex = ast::expr_assert(e);
hi = e.span.hi;
} else if (eat_word(p, "check")) {
/* Should be a predicate (pure boolean function) applied to
arguments that are all either slot variables or literals.
but the typechecker enforces that. */
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_check(ast::checked, e);
} else if (eat_word(p, "claim")) {
/* Same rules as check, except that if check-claims
is enabled (a command-line flag), then the parser turns
claims into check */
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_check(ast::unchecked, e);
} else if (eat_word(p, "ret")) {
if can_begin_expr(p.peek()) {
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_ret(some(e));
} else { ex = ast::expr_ret(none); }
} else if (eat_word(p, "break")) {
ex = ast::expr_break;
hi = p.get_hi_pos();
} else if (eat_word(p, "cont")) {
ex = ast::expr_cont;
hi = p.get_hi_pos();
} else if (eat_word(p, "put")) {
alt p.peek() {
token::SEMI. { ex = ast::expr_put(none); }
_ {
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_put(some(e));
}
}
} else if (eat_word(p, "be")) {
let e = parse_expr(p);
// FIXME: Is this the right place for this check?
if /*check*/ast::is_call_expr(e) {
hi = e.span.hi;
ex = ast::expr_be(e);
} else { p.fatal("Non-call expression in tail call"); }
} else if (eat_word(p, "copy")) {
let e = parse_expr(p);
ex = ast::expr_copy(e);
hi = e.span.hi;
} else if (eat_word(p, "self")) {
log "parsing a self-call...";
expect(p, token::DOT);
// The rest is a call expression.
let f: @ast::expr = parse_self_method(p);
let es =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_expr, p);
hi = es.span.hi;
ex = ast::expr_call(f, es.node);
} else if (p.peek() == token::MOD_SEP ||
is_ident(p.peek()) && !is_word(p, "true") &&
!is_word(p, "false")) {
check_bad_word(p);
let pth = parse_path_and_ty_param_substs(p);
hi = pth.span.hi;
ex = ast::expr_path(pth);
} else {
let lit = parse_lit(p);
hi = lit.span.hi;
ex = ast::expr_lit(@lit);
}
ret mk_expr(p, lo, hi, ex);
}
fn parse_syntax_ext(p: &parser) -> @ast::expr {
let lo = p.get_lo_pos();
expect(p, token::POUND);
ret parse_syntax_ext_naked(p, lo);
}
fn parse_syntax_ext_naked(p: &parser, lo: uint) -> @ast::expr {
let pth = parse_path(p);
if ivec::len(pth.node.idents) == 0u {
p.fatal("expected a syntax expander name");
}
//temporary for a backwards-compatible cycle:
let es = if p.peek() == token::LPAREN {
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_expr, p)
} else {
parse_seq(token::LBRACKET, token::RBRACKET, some(token::COMMA),
parse_expr, p)
};
let hi = es.span.hi;
let e = mk_expr(p, es.span.lo, hi,
ast::expr_vec(es.node, ast::imm, ast::sk_rc));
ret mk_mac_expr(p, lo, hi, ast::mac_invoc(pth, e, none));
}
fn parse_self_method(p: &parser) -> @ast::expr {
let sp = p.get_span();
let f_name: ast::ident = parse_ident(p);
ret mk_expr(p, sp.lo, sp.hi, ast::expr_self_method(f_name));
}
fn parse_dot_or_call_expr(p: &parser) -> @ast::expr {
ret parse_dot_or_call_expr_with(p, parse_bottom_expr(p));
}
fn parse_dot_or_call_expr_with(p: &parser, e: @ast::expr) -> @ast::expr {
let lo = e.span.lo;
let hi = e.span.hi;
while true {
alt p.peek() {
token::LPAREN. {
if p.get_restriction() == RESTRICT_NO_CALL_EXPRS {
ret e;
} else {
// Call expr.
let es =
parse_seq(token::LPAREN, token::RPAREN,
some(token::COMMA), parse_expr, p);
hi = es.span.hi;
e = mk_expr(p, lo, hi, ast::expr_call(e, es.node));
}
}
token::DOT. {
p.bump();
alt p.peek() {
token::IDENT(i, _) {
hi = p.get_hi_pos();
p.bump();
e = mk_expr(p, lo, hi, ast::expr_field(e, p.get_str(i)));
}
token::LPAREN. {
p.bump();
let ix = parse_expr(p);
hi = ix.span.hi;
expect(p, token::RPAREN);
e = mk_expr(p, lo, hi, ast::expr_index(e, ix));
}
t { unexpected(p, t); }
}
}
_ { ret e; }
}
}
ret e;
}
fn parse_prefix_expr(p: &parser) -> @ast::expr {
if eat_word(p, "mutable") {
p.warn("ignoring deprecated 'mutable' prefix operator");
}
let lo = p.get_lo_pos();
let hi = p.get_hi_pos();
// FIXME: can only remove this sort of thing when both typestate and
// alt-exhaustive-match checking are co-operating.
let lit = @spanned(lo, lo, ast::lit_nil);
let ex: ast::expr_ = ast::expr_lit(lit);
alt p.peek() {
token::NOT. {
p.bump();
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::not, e);
}
token::BINOP(b) {
alt b {
token::MINUS. {
p.bump();
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::neg, e);
}
token::STAR. {
p.bump();
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::deref, e);
}
_ { ret parse_dot_or_call_expr(p); }
}
}
token::AT. {
p.bump();
let m = parse_mutability(p);
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::box(m), e);
}
_ { ret parse_dot_or_call_expr(p); }
}
ret mk_expr(p, lo, hi, ex);
}
fn parse_ternary(p: &parser) -> @ast::expr {
let cond_expr = parse_binops(p);
if p.peek() == token::QUES {
p.bump();
let then_expr = parse_expr(p);
expect(p, token::COLON);
let else_expr = parse_expr(p);
ret mk_expr(p, cond_expr.span.lo, else_expr.span.hi,
ast::expr_ternary(cond_expr, then_expr, else_expr));
} else { ret cond_expr; }
}
type op_spec = {tok: token::token, op: ast::binop, prec: int};
// FIXME make this a const, don't store it in parser state
fn prec_table() -> @[op_spec] {
ret @~[{tok: token::BINOP(token::STAR), op: ast::mul, prec: 11},
{tok: token::BINOP(token::SLASH), op: ast::div, prec: 11},
{tok: token::BINOP(token::PERCENT), op: ast::rem, prec: 11},
{tok: token::BINOP(token::PLUS), op: ast::add, prec: 10},
{tok: token::BINOP(token::MINUS), op: ast::sub, prec: 10},
{tok: token::BINOP(token::LSL), op: ast::lsl, prec: 9},
{tok: token::BINOP(token::LSR), op: ast::lsr, prec: 9},
{tok: token::BINOP(token::ASR), op: ast::asr, prec: 9},
{tok: token::BINOP(token::AND), op: ast::bitand, prec: 8},
{tok: token::BINOP(token::CARET), op: ast::bitxor, prec: 6},
{tok: token::BINOP(token::OR), op: ast::bitor, prec: 6},
// 'as' sits between here with 5
{tok: token::LT, op: ast::lt, prec: 4},
{tok: token::LE, op: ast::le, prec: 4},
{tok: token::GE, op: ast::ge, prec: 4},
{tok: token::GT, op: ast::gt, prec: 4},
{tok: token::EQEQ, op: ast::eq, prec: 3},
{tok: token::NE, op: ast::ne, prec: 3},
{tok: token::ANDAND, op: ast::and, prec: 2},
{tok: token::OROR, op: ast::or, prec: 1}];
}
fn parse_binops(p: &parser) -> @ast::expr {
ret parse_more_binops(p, parse_prefix_expr(p), 0);
}
const unop_prec: int = 100;
const as_prec: int = 5;
const ternary_prec: int = 0;
fn parse_more_binops(p: &parser, lhs: @ast::expr, min_prec: int) ->
@ast::expr {
let peeked = p.peek();
for cur: op_spec in *p.get_prec_table() {
if cur.prec > min_prec && cur.tok == peeked {
p.bump();
let rhs = parse_more_binops(p, parse_prefix_expr(p), cur.prec);
let bin =
mk_expr(p, lhs.span.lo, rhs.span.hi,
ast::expr_binary(cur.op, lhs, rhs));
ret parse_more_binops(p, bin, min_prec);
}
}
if as_prec > min_prec && eat_word(p, "as") {
let rhs = parse_ty(p, true);
let _as =
mk_expr(p, lhs.span.lo, rhs.span.hi, ast::expr_cast(lhs, rhs));
ret parse_more_binops(p, _as, min_prec);
}
ret lhs;
}
fn parse_assign_expr(p: &parser) -> @ast::expr {
let lo = p.get_lo_pos();
let lhs = parse_ternary(p);
alt p.peek() {
token::EQ. {
p.bump();
let rhs = parse_expr(p);
ret mk_expr(p, lo, rhs.span.hi, ast::expr_assign(lhs, rhs));
}
token::BINOPEQ(op) {
p.bump();
let rhs = parse_expr(p);
let aop = ast::add;
alt op {
token::PLUS. { aop = ast::add; }
token::MINUS. { aop = ast::sub; }
token::STAR. { aop = ast::mul; }
token::SLASH. { aop = ast::div; }
token::PERCENT. { aop = ast::rem; }
token::CARET. { aop = ast::bitxor; }
token::AND. { aop = ast::bitand; }
token::OR. { aop = ast::bitor; }
token::LSL. { aop = ast::lsl; }
token::LSR. { aop = ast::lsr; }
token::ASR. { aop = ast::asr; }
}
ret mk_expr(p, lo, rhs.span.hi, ast::expr_assign_op(aop, lhs, rhs));
}
token::LARROW. {
p.bump();
let rhs = parse_expr(p);
ret mk_expr(p, lo, rhs.span.hi, ast::expr_move(lhs, rhs));
}
token::DARROW. {
p.bump();
let rhs = parse_expr(p);
ret mk_expr(p, lo, rhs.span.hi, ast::expr_swap(lhs, rhs));
}
_ {/* fall through */ }
}
ret lhs;
}
fn parse_if_expr_1(p: &parser) ->
{cond: @ast::expr,
then: ast::blk,
els: option::t[@ast::expr],
lo: uint,
hi: uint} {
let lo = p.get_last_lo_pos();
let cond = parse_expr(p);
let thn = parse_block(p);
let els: option::t[@ast::expr] = none;
let hi = thn.span.hi;
if eat_word(p, "else") {
let elexpr = parse_else_expr(p);
els = some(elexpr);
hi = elexpr.span.hi;
}
ret {cond: cond, then: thn, els: els, lo: lo, hi: hi};
}
fn parse_if_expr(p: &parser) -> @ast::expr {
if eat_word(p, "check") {
let q = parse_if_expr_1(p);
ret mk_expr(p, q.lo, q.hi, ast::expr_if_check(q.cond, q.then, q.els));
} else {
let q = parse_if_expr_1(p);
ret mk_expr(p, q.lo, q.hi, ast::expr_if(q.cond, q.then, q.els));
}
}
fn parse_fn_expr(p: &parser, proto: ast::proto) -> @ast::expr {
let lo = p.get_last_lo_pos();
let decl = parse_fn_decl(p, ast::impure_fn, ast::il_normal);
let body = parse_block(p);
let _fn = {decl: decl, proto: proto, body: body};
ret mk_expr(p, lo, body.span.hi, ast::expr_fn(_fn));
}
fn parse_fn_block_expr(p: &parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let decl = parse_fn_block_decl(p);
let body = parse_block_tail(p, lo);
let _fn = {decl: decl, proto: ast::proto_block, body: body};
ret mk_expr(p, lo, body.span.hi, ast::expr_fn(_fn));
}
fn parse_else_expr(p: &parser) -> @ast::expr {
if eat_word(p, "if") {
ret parse_if_expr(p);
} else {
let blk = parse_block(p);
ret mk_expr(p, blk.span.lo, blk.span.hi, ast::expr_block(blk));
}
}
fn parse_for_expr(p: &parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let is_each = eat_word(p, "each");
let decl = parse_local(p, false);
expect_word(p, "in");
let seq = parse_expr(p);
let body = parse_block(p);
let hi = body.span.hi;
if is_each {
ret mk_expr(p, lo, hi, ast::expr_for_each(decl, seq, body));
} else { ret mk_expr(p, lo, hi, ast::expr_for(decl, seq, body)); }
}
fn parse_while_expr(p: &parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let cond = parse_expr(p);
let body = parse_block(p);
let hi = body.span.hi;
ret mk_expr(p, lo, hi, ast::expr_while(cond, body));
}
fn parse_do_while_expr(p: &parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let body = parse_block(p);
expect_word(p, "while");
let cond = parse_expr(p);
let hi = cond.span.hi;
ret mk_expr(p, lo, hi, ast::expr_do_while(body, cond));
}
fn parse_alt_expr(p: &parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let discriminant = parse_expr(p);
expect(p, token::LBRACE);
let arms: [ast::arm] = ~[];
while p.peek() != token::RBRACE {
let pats = parse_pats(p);
let blk = parse_block(p);
arms += ~[{pats: pats, body: blk}];
}
let hi = p.get_hi_pos();
p.bump();
ret mk_expr(p, lo, hi, ast::expr_alt(discriminant, arms));
}
fn parse_spawn_expr(p: &parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
// FIXME: Parse domain and name
// FIXME: why no full expr?
let fn_expr = parse_bottom_expr(p);
let es =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_expr, p);
let hi = es.span.hi;
ret mk_expr(p, lo, hi,
ast::expr_spawn(ast::dom_implicit, option::none, fn_expr,
es.node));
}
fn parse_expr(p: &parser) -> @ast::expr {
ret parse_expr_res(p, UNRESTRICTED);
}
fn parse_expr_res(p: &parser, r: restriction) -> @ast::expr {
let old = p.get_restriction();
p.restrict(r);
let e = parse_assign_expr(p);
p.restrict(old);
ret e;
}
fn parse_initializer(p: &parser) -> option::t[ast::initializer] {
alt p.peek() {
token::EQ. {
p.bump();
ret some({op: ast::init_assign, expr: parse_expr(p)});
}
token::LARROW. {
p.bump();
ret some({op: ast::init_move, expr: parse_expr(p)});
}
// Now that the the channel is the first argument to receive,
// combining it with an initializer doesn't really make sense.
// case (token::RECV) {
// p.bump();
// ret some(rec(op = ast::init_recv,
// expr = parse_expr(p)));
// }
_ {
ret none;
}
}
}
fn parse_pats(p: &parser) -> [@ast::pat] {
let pats = ~[];
while true {
pats += ~[parse_pat(p)];
if p.peek() == token::BINOP(token::OR) { p.bump(); } else { break; }
}
ret pats;
}
fn parse_pat(p: &parser) -> @ast::pat {
let lo = p.get_lo_pos();
let hi = p.get_hi_pos();
let pat;
alt p.peek() {
token::UNDERSCORE. { p.bump(); pat = ast::pat_wild; }
token::AT. {
p.bump();
let sub = parse_pat(p);
pat = ast::pat_box(sub);
hi = sub.span.hi;
}
token::LBRACE. {
p.bump();
let fields = ~[];
let etc = false;
let first = true;
while p.peek() != token::RBRACE {
if first { first = false; } else { expect(p, token::COMMA); }
if p.peek() == token::UNDERSCORE {
p.bump();
if p.peek() != token::RBRACE {
p.fatal("expecting }, found " +
token::to_str(p.get_reader(), p.peek()));
}
etc = true;
break;
}
let fieldname = parse_ident(p);
let subpat;
if p.peek() == token::COLON {
p.bump();
subpat = parse_pat(p);
} else {
if p.get_bad_expr_words().contains_key(fieldname) {
p.fatal("found " + fieldname + " in binding position");
}
subpat =
@{id: p.get_id(),
node: ast::pat_bind(fieldname),
span: ast::mk_sp(lo, hi)};
}
fields += ~[{ident: fieldname, pat: subpat}];
}
hi = p.get_hi_pos();
p.bump();
pat = ast::pat_rec(fields, etc);
}
token::LPAREN. {
p.bump();
if p.peek() == token::RPAREN {
hi = p.get_hi_pos();
p.bump();
pat = ast::pat_lit(@{node: ast::lit_nil,
span: ast::mk_sp(lo,hi)});
} else {
let fields = ~[parse_pat(p)];
while p.peek() == token::COMMA {
p.bump();
fields += ~[parse_pat(p)];
}
if ivec::len(fields) == 1u { expect(p, token::COMMA); }
hi = p.get_hi_pos();
expect(p, token::RPAREN);
pat = ast::pat_tup(fields);
}
}
tok {
if !is_ident(tok) || is_word(p, "true") || is_word(p, "false") {
let lit = parse_lit(p);
hi = lit.span.hi;
pat = ast::pat_lit(@lit);
} else if (is_plain_ident(p) &&
alt p.look_ahead(1u) {
token::DOT. | token::LPAREN. | token::LBRACKET. {
false
}
_ { true }
}) {
hi = p.get_hi_pos();
pat = ast::pat_bind(parse_value_ident(p));
} else {
let tag_path = parse_path_and_ty_param_substs(p);
hi = tag_path.span.hi;
let args: [@ast::pat];
alt p.peek() {
token::LPAREN. {
let a =
parse_seq(token::LPAREN, token::RPAREN,
some(token::COMMA), parse_pat, p);
args = a.node;
hi = a.span.hi;
}
token::DOT. { args = ~[]; p.bump(); }
_ { expect(p, token::LPAREN); fail; }
}
pat = ast::pat_tag(tag_path, args);
}
}
}
ret @{id: p.get_id(), node: pat, span: ast::mk_sp(lo, hi)};
}
fn parse_local(p: &parser, allow_init: bool) -> @ast::local {
let lo = p.get_lo_pos();
let pat = parse_pat(p);
let ty = @spanned(lo, lo, ast::ty_infer);
if eat(p, token::COLON) { ty = parse_ty(p, false); }
let init = if allow_init { parse_initializer(p) } else { none };
ret @spanned(lo, p.get_last_hi_pos(),
{ty: ty,
pat: pat,
init: init,
id: p.get_id()});
}
fn parse_let(p: &parser) -> @ast::decl {
let lo = p.get_lo_pos();
let locals = ~[parse_local(p, true)];
while p.peek() == token::COMMA {
p.bump();
locals += ~[parse_local(p, true)];
}
ret @spanned(lo, p.get_last_hi_pos(), ast::decl_local(locals));
}
fn parse_stmt(p: &parser) -> @ast::stmt {
if p.get_file_type() == SOURCE_FILE {
ret parse_source_stmt(p);
} else { ret parse_crate_stmt(p); }
}
fn parse_crate_stmt(p: &parser) -> @ast::stmt {
let cdir = parse_crate_directive(p, ~[]);
ret @spanned(cdir.span.lo, cdir.span.hi,
ast::stmt_crate_directive(@cdir));
}
fn parse_source_stmt(p: &parser) -> @ast::stmt {
let lo = p.get_lo_pos();
if eat_word(p, "let") {
let decl = parse_let(p);
ret @spanned(lo, decl.span.hi, ast::stmt_decl(decl, p.get_id()));
} else {
let item_attrs;
alt parse_outer_attrs_or_ext(p) {
none. { item_attrs = ~[]; }
some(left(attrs)) { item_attrs = attrs; }
some(right(ext)) {
ret @spanned(lo, ext.span.hi, ast::stmt_expr(ext, p.get_id()));
}
}
let maybe_item = parse_item(p, item_attrs);
// If we have attributes then we should have an item
if ivec::len(item_attrs) > 0u {
alt maybe_item {
some(_) {/* fallthrough */ }
_ { ret p.fatal("expected item"); }
}
}
alt maybe_item {
some(i) {
let hi = i.span.hi;
let decl = @spanned(lo, hi, ast::decl_item(i));
ret @spanned(lo, hi, ast::stmt_decl(decl, p.get_id()));
}
none. {
// Remainder are line-expr stmts.
let e = parse_expr(p);
ret @spanned(lo, e.span.hi, ast::stmt_expr(e, p.get_id()));
}
_ { p.fatal("expected statement"); }
}
}
}
fn stmt_to_expr(stmt: @ast::stmt) -> option::t[@ast::expr] {
ret alt stmt.node { ast::stmt_expr(e, _) { some(e) } _ { none } };
}
fn stmt_ends_with_semi(stmt: &ast::stmt) -> bool {
alt stmt.node {
ast::stmt_decl(d, _) {
ret alt d.node {
ast::decl_local(_) { true }
ast::decl_item(_) { false }
}
}
ast::stmt_expr(e, _) {
ret alt e.node {
ast::expr_vec(_, _, _) { true }
ast::expr_rec(_, _) { true }
ast::expr_tup(_) { true }
ast::expr_call(_, _) { true }
ast::expr_self_method(_) { false }
ast::expr_bind(_, _) { true }
ast::expr_spawn(_, _, _, _) { true }
ast::expr_binary(_, _, _) { true }
ast::expr_unary(_, _) { true }
ast::expr_lit(_) { true }
ast::expr_cast(_, _) { true }
ast::expr_if(_, _, _) { false }
ast::expr_ternary(_, _, _) { true }
ast::expr_for(_, _, _) { false }
ast::expr_for_each(_, _, _) { false }
ast::expr_while(_, _) { false }
ast::expr_do_while(_, _) { false }
ast::expr_alt(_, _) { false }
ast::expr_fn(_) { false }
ast::expr_block(_) { false }
ast::expr_move(_, _) { true }
ast::expr_assign(_, _) { true }
ast::expr_swap(_, _) { true }
ast::expr_assign_op(_, _, _) { true }
ast::expr_send(_, _) { true }
ast::expr_recv(_, _) { true }
ast::expr_field(_, _) { true }
ast::expr_index(_, _) { true }
ast::expr_path(_) { true }
ast::expr_mac(_) { true }
ast::expr_fail(_) { true }
ast::expr_break. { true }
ast::expr_cont. { true }
ast::expr_ret(_) { true }
ast::expr_put(_) { true }
ast::expr_be(_) { true }
ast::expr_log(_, _) { true }
ast::expr_check(_, _) { true }
ast::expr_if_check(_, _, _) { false }
ast::expr_port(_) { true }
ast::expr_chan(_) { true }
ast::expr_anon_obj(_) { false }
ast::expr_assert(_) { true }
}
}
// We should not be calling this on a cdir.
ast::stmt_crate_directive(cdir) {
fail;
}
}
}
fn parse_block(p: &parser) -> ast::blk {
let lo = p.get_lo_pos();
expect(p, token::LBRACE);
be parse_block_tail(p, lo);
}
// some blocks start with "#{"...
fn parse_block_tail(p: &parser, lo: uint) -> ast::blk {
let stmts: [@ast::stmt] = ~[];
let expr: option::t[@ast::expr] = none;
while p.peek() != token::RBRACE {
alt p.peek() {
token::SEMI. {
p.bump(); // empty
}
_ {
let stmt = parse_stmt(p);
alt stmt_to_expr(stmt) {
some(e) {
alt p.peek() {
token::SEMI. { p.bump(); stmts += ~[stmt]; }
token::RBRACE. { expr = some(e); }
t {
if stmt_ends_with_semi(*stmt) {
p.fatal("expected ';' or '}' after " +
"expression but found " +
token::to_str(p.get_reader(), t));
}
stmts += ~[stmt];
}
}
}
none. {
// Not an expression statement.
stmts += ~[stmt];
if p.get_file_type() == SOURCE_FILE &&
stmt_ends_with_semi(*stmt) {
expect(p, token::SEMI);
}
}
}
}
}
}
let hi = p.get_hi_pos();
p.bump();
let bloc = {stmts: stmts, expr: expr, id: p.get_id()};
ret spanned(lo, hi, bloc);
}
fn parse_ty_param(p: &parser) -> ast::ty_param {
let k = alt p.peek() {
token::TILDE. { p.bump(); ast::kind_unique }
token::AT. { p.bump(); ast::kind_shared }
_ { ast::kind_pinned }
};
ret {ident: parse_ident(p), kind: k};
}
fn parse_ty_params(p: &parser) -> [ast::ty_param] {
let ty_params: [ast::ty_param] = ~[];
if p.peek() == token::LBRACKET {
ty_params =
parse_seq(token::LBRACKET, token::RBRACKET, some(token::COMMA),
parse_ty_param, p).node;
}
if p.peek() == token::LT {
ty_params =
parse_seq(token::LT, token::GT, some(token::COMMA),
parse_ty_param, p).node;
}
ret ty_params;
}
fn parse_fn_decl(p: &parser, purity: ast::purity, il: ast::inlineness)
-> ast::fn_decl {
let inputs: ast::spanned[[ast::arg]] =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA), parse_arg,
p);
let rslt: ty_or_bang;
// Use the args list to translate each bound variable
// mentioned in a constraint to an arg index.
// Seems weird to do this in the parser, but I'm not sure how else to.
let constrs = ~[];
if p.peek() == token::COLON {
p.bump();
constrs = parse_constrs(bind parse_ty_constr(inputs.node, _), p);
}
if p.peek() == token::RARROW {
p.bump();
rslt = parse_ty_or_bang(p);
} else {
rslt = a_ty(@spanned(inputs.span.lo, inputs.span.hi, ast::ty_nil));
}
alt rslt {
a_ty(t) {
ret {inputs: inputs.node,
output: t,
purity: purity,
il: il,
cf: ast::return,
constraints: constrs};
}
a_bang. {
ret {inputs: inputs.node,
output: @spanned(p.get_lo_pos(), p.get_hi_pos(), ast::ty_bot),
purity: purity,
il: il,
cf: ast::noreturn,
constraints: constrs};
}
}
}
fn parse_fn_block_decl(p: &parser) -> ast::fn_decl {
let inputs: ast::spanned[[ast::arg]] =
parse_seq(token::BINOP(token::OR), token::BINOP(token::OR),
some(token::COMMA), parse_fn_block_arg, p);
ret {inputs: inputs.node,
output: @spanned(p.get_lo_pos(), p.get_hi_pos(), ast::ty_infer),
purity: ast::impure_fn,
il: ast::il_normal,
cf: ast::return,
constraints: ~[]};
}
fn parse_fn(p: &parser, proto: ast::proto, purity: ast::purity,
il: ast::inlineness) -> ast::_fn {
let decl = parse_fn_decl(p, purity, il);
let body = parse_block(p);
ret {decl: decl, proto: proto, body: body};
}
fn parse_fn_header(p: &parser) -> {ident: ast::ident, tps: [ast::ty_param]} {
let id = parse_value_ident(p);
let ty_params = parse_ty_params(p);
ret {ident: id, tps: ty_params};
}
fn mk_item(p: &parser, lo: uint, hi: uint, ident: &ast::ident,
node: &ast::item_, attrs: &[ast::attribute]) -> @ast::item {
ret @{ident: ident,
attrs: attrs,
id: p.get_id(),
node: node,
span: ast::mk_sp(lo, hi)};
}
fn parse_item_fn_or_iter(p: &parser, purity: ast::purity, proto: ast::proto,
attrs: &[ast::attribute], il: ast::inlineness)
-> @ast::item {
let lo = p.get_last_lo_pos();
let t = parse_fn_header(p);
let f = parse_fn(p, proto, purity, il);
ret mk_item(p, lo, f.body.span.hi, t.ident, ast::item_fn(f, t.tps),
attrs);
}
fn parse_obj_field(p: &parser) -> ast::obj_field {
let mut = parse_mutability(p);
let ident = parse_value_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
ret {mut: mut, ty: ty, ident: ident, id: p.get_id()};
}
fn parse_anon_obj_field(p: &parser) -> ast::anon_obj_field {
let mut = parse_mutability(p);
let ident = parse_value_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
expect(p, token::EQ);
let expr = parse_expr(p);
ret {mut: mut, ty: ty, expr: expr, ident: ident, id: p.get_id()};
}
fn parse_method(p: &parser) -> @ast::method {
let lo = p.get_lo_pos();
let proto = parse_proto(p);
let ident = parse_value_ident(p);
let f = parse_fn(p, proto, ast::impure_fn, ast::il_normal);
let meth = {ident: ident, meth: f, id: p.get_id()};
ret @spanned(lo, f.body.span.hi, meth);
}
fn parse_item_obj(p: &parser, attrs: &[ast::attribute]) ->
@ast::item {
let lo = p.get_last_lo_pos();
let ident = parse_value_ident(p);
let ty_params = parse_ty_params(p);
let fields: ast::spanned[[ast::obj_field]] =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_obj_field, p);
let meths: [@ast::method] = ~[];
expect(p, token::LBRACE);
while p.peek() != token::RBRACE {
meths += ~[parse_method(p)];
}
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
let ob: ast::_obj = {fields: fields.node, methods: meths};
ret mk_item(p, lo, hi, ident, ast::item_obj(ob, ty_params, p.get_id()),
attrs);
}
fn parse_item_res(p: &parser, attrs: &[ast::attribute]) ->
@ast::item {
let lo = p.get_last_lo_pos();
let ident = parse_value_ident(p);
let ty_params = parse_ty_params(p);
expect(p, token::LPAREN);
let arg_ident = parse_value_ident(p);
expect(p, token::COLON);
let t = parse_ty(p, false);
expect(p, token::RPAREN);
let dtor = parse_block(p);
let decl =
{inputs:
~[{mode: ast::alias(false),
ty: t,
ident: arg_ident,
id: p.get_id()}],
output: @spanned(lo, lo, ast::ty_nil),
purity: ast::impure_fn,
il: ast::il_normal,
cf: ast::return,
constraints: ~[]};
let f = {decl: decl, proto: ast::proto_fn, body: dtor};
ret mk_item(p, lo, dtor.span.hi, ident,
ast::item_res(f, p.get_id(), ty_params, p.get_id()), attrs);
}
fn parse_mod_items(p: &parser, term: token::token,
first_item_attrs: &[ast::attribute]) -> ast::_mod {
// Shouldn't be any view items since we've already parsed an item attr
let view_items =
if ivec::len(first_item_attrs) == 0u { parse_view(p) } else { ~[] };
let items: [@ast::item] = ~[];
let initial_attrs = first_item_attrs;
while p.peek() != term {
let attrs = initial_attrs + parse_outer_attributes(p);
initial_attrs = ~[];
alt parse_item(p, attrs) {
some(i) { items += ~[i]; }
_ {
p.fatal("expected item but found " +
token::to_str(p.get_reader(), p.peek()));
}
}
}
ret {view_items: view_items, items: items};
}
fn parse_item_const(p: &parser, attrs: &[ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let id = parse_value_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
expect(p, token::EQ);
let e = parse_expr(p);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret mk_item(p, lo, hi, id, ast::item_const(ty, e), attrs);
}
fn parse_item_mod(p: &parser, attrs: &[ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let id = parse_ident(p);
expect(p, token::LBRACE);
let inner_attrs = parse_inner_attrs_and_next(p);
let first_item_outer_attrs = inner_attrs.next;
let m = parse_mod_items(p, token::RBRACE, first_item_outer_attrs);
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
ret mk_item(p, lo, hi, id, ast::item_mod(m), attrs + inner_attrs.inner);
}
fn parse_item_native_type(p: &parser, attrs: &[ast::attribute]) ->
@ast::native_item {
let t = parse_type_decl(p);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret @{ident: t.ident,
attrs: attrs,
node: ast::native_item_ty,
id: p.get_id(),
span: ast::mk_sp(t.lo, hi)};
}
fn parse_item_native_fn(p: &parser, attrs: &[ast::attribute]) ->
@ast::native_item {
let lo = p.get_last_lo_pos();
let t = parse_fn_header(p);
let decl = parse_fn_decl(p, ast::impure_fn, ast::il_normal);
let link_name = none;
if p.peek() == token::EQ { p.bump(); link_name = some(parse_str(p)); }
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret @{ident: t.ident,
attrs: attrs,
node: ast::native_item_fn(link_name, decl, t.tps),
id: p.get_id(),
span: ast::mk_sp(lo, hi)};
}
fn parse_native_item(p: &parser, attrs: &[ast::attribute]) ->
@ast::native_item {
if eat_word(p, "type") {
ret parse_item_native_type(p, attrs);
} else if (eat_word(p, "fn")) {
ret parse_item_native_fn(p, attrs);
} else { unexpected(p, p.peek()); }
}
fn parse_native_mod_items(p: &parser, native_name: &str, abi: ast::native_abi,
first_item_attrs: &[ast::attribute])
-> ast::native_mod {
// Shouldn't be any view items since we've already parsed an item attr
let view_items =
if ivec::len(first_item_attrs) == 0u {
parse_native_view(p)
} else { ~[] };
let items: [@ast::native_item] = ~[];
let initial_attrs = first_item_attrs;
while p.peek() != token::RBRACE {
let attrs = initial_attrs + parse_outer_attributes(p);
initial_attrs = ~[];
items += ~[parse_native_item(p, attrs)];
}
ret {native_name: native_name,
abi: abi,
view_items: view_items,
items: items};
}
fn parse_item_native_mod(p: &parser, attrs: &[ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let abi = ast::native_abi_cdecl;
if !is_word(p, "mod") {
let t = parse_str(p);
if str::eq(t, "cdecl") {
} else if (str::eq(t, "rust")) {
abi = ast::native_abi_rust;
} else if (str::eq(t, "llvm")) {
abi = ast::native_abi_llvm;
} else if (str::eq(t, "rust-intrinsic")) {
abi = ast::native_abi_rust_intrinsic;
} else if (str::eq(t, "x86stdcall")) {
abi = ast::native_abi_x86stdcall;
} else { p.fatal("unsupported abi: " + t); }
}
expect_word(p, "mod");
let id = parse_ident(p);
let native_name;
if p.peek() == token::EQ {
expect(p, token::EQ);
native_name = parse_str(p);
} else { native_name = id; }
expect(p, token::LBRACE);
let more_attrs = parse_inner_attrs_and_next(p);
let inner_attrs = more_attrs.inner;
let first_item_outer_attrs = more_attrs.next;
let m =
parse_native_mod_items(p, native_name, abi, first_item_outer_attrs);
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
ret mk_item(p, lo, hi, id, ast::item_native_mod(m), attrs + inner_attrs);
}
fn parse_type_decl(p: &parser) -> {lo: uint, ident: ast::ident} {
let lo = p.get_last_lo_pos();
let id = parse_ident(p);
ret {lo: lo, ident: id};
}
fn parse_item_type(p: &parser, attrs: &[ast::attribute]) -> @ast::item {
let t = parse_type_decl(p);
let tps = parse_ty_params(p);
expect(p, token::EQ);
let ty = parse_ty(p, false);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret mk_item(p, t.lo, hi, t.ident, ast::item_ty(ty, tps), attrs);
}
fn parse_item_tag(p: &parser, attrs: &[ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let id = parse_ident(p);
let ty_params = parse_ty_params(p);
let variants: [ast::variant] = ~[];
// Newtype syntax
if p.peek() == token::EQ {
if p.get_bad_expr_words().contains_key(id) {
p.fatal("found " + id + " in tag constructor position");
}
p.bump();
let ty = parse_ty(p, false);
expect(p, token::SEMI);
let variant =
spanned(ty.span.lo, ty.span.hi,
{name: id,
args: ~[{ty: ty, id: p.get_id()}],
id: p.get_id()});
ret mk_item(p, lo, ty.span.hi, id,
ast::item_tag(~[variant], ty_params), attrs);
}
expect(p, token::LBRACE);
while p.peek() != token::RBRACE {
let tok = p.peek();
alt tok {
token::IDENT(name, _) {
check_bad_word(p);
let vlo = p.get_lo_pos();
p.bump();
let args: [ast::variant_arg] = ~[];
let vhi = p.get_hi_pos();
alt p.peek() {
token::LPAREN. {
let arg_tys =
parse_seq(token::LPAREN, token::RPAREN,
some(token::COMMA), bind parse_ty(_, false), p);
for ty: @ast::ty in arg_tys.node {
args += ~[{ty: ty, id: p.get_id()}];
}
vhi = arg_tys.span.hi;
}
_ {/* empty */ }
}
expect(p, token::SEMI);
p.get_id();
let vr = {name: p.get_str(name), args: args, id: p.get_id()};
variants += ~[spanned(vlo, vhi, vr)];
}
token::RBRACE. {/* empty */ }
_ {
p.fatal("expected name of variant or '}' but found " +
token::to_str(p.get_reader(), tok));
}
}
}
let hi = p.get_hi_pos();
p.bump();
ret mk_item(p, lo, hi, id, ast::item_tag(variants, ty_params), attrs);
}
fn parse_auth(p: &parser) -> ast::_auth {
if eat_word(p, "unsafe") {
ret ast::auth_unsafe;
} else { unexpected(p, p.peek()); }
}
fn parse_item(p: &parser, attrs: &[ast::attribute]) -> option::t[@ast::item] {
if eat_word(p, "const") {
ret some(parse_item_const(p, attrs));
} else if (eat_word(p, "inline")) {
expect_word(p, "fn");
ret some(parse_item_fn_or_iter(p, ast::impure_fn, ast::proto_fn,
attrs, ast::il_inline));
} else if (is_word(p, "fn") && p.look_ahead(1u) != token::LPAREN) {
p.bump();
ret some(parse_item_fn_or_iter(p, ast::impure_fn, ast::proto_fn,
attrs, ast::il_normal));
} else if (eat_word(p, "pred")) {
ret some(parse_item_fn_or_iter(p, ast::pure_fn, ast::proto_fn,
attrs, ast::il_normal));
} else if (eat_word(p, "iter")) {
ret some(parse_item_fn_or_iter(p, ast::impure_fn, ast::proto_iter,
attrs, ast::il_normal));
} else if (eat_word(p, "mod")) {
ret some(parse_item_mod(p, attrs));
} else if (eat_word(p, "native")) {
ret some(parse_item_native_mod(p, attrs));
}
if eat_word(p, "type") {
ret some(parse_item_type(p, attrs));
} else if (eat_word(p, "tag")) {
ret some(parse_item_tag(p, attrs));
} else if (is_word(p, "obj") && p.look_ahead(1u) != token::LPAREN) {
p.bump();
ret some(parse_item_obj(p, attrs));
} else if (eat_word(p, "resource")) {
ret some(parse_item_res(p, attrs));
} else { ret none; }
}
// A type to distingush between the parsing of item attributes or syntax
// extensions, which both begin with token.POUND
type attr_or_ext = option::t[either::t[[ast::attribute], @ast::expr]];
fn parse_outer_attrs_or_ext(p: &parser) -> attr_or_ext {
if p.peek() == token::POUND {
let lo = p.get_lo_pos();
p.bump();
if p.peek() == token::LBRACKET {
let first_attr = parse_attribute_naked(p, ast::attr_outer, lo);
ret some(left(~[first_attr] + parse_outer_attributes(p)));
} else if (!(p.peek() == token::LT || p.peek() == token::LBRACKET)) {
ret some(right(parse_syntax_ext_naked(p, lo)));
} else { ret none; }
} else { ret none; }
}
// Parse attributes that appear before an item
fn parse_outer_attributes(p: &parser) -> [ast::attribute] {
let attrs: [ast::attribute] = ~[];
while p.peek() == token::POUND {
attrs += ~[parse_attribute(p, ast::attr_outer)];
}
ret attrs;
}
fn parse_attribute(p: &parser, style: ast::attr_style) -> ast::attribute {
let lo = p.get_lo_pos();
expect(p, token::POUND);
ret parse_attribute_naked(p, style, lo);
}
fn parse_attribute_naked(p: &parser, style: ast::attr_style, lo: uint) ->
ast::attribute {
expect(p, token::LBRACKET);
let meta_item = parse_meta_item(p);
expect(p, token::RBRACKET);
let hi = p.get_hi_pos();
ret spanned(lo, hi, {style: style, value: *meta_item});
}
// Parse attributes that appear after the opening of an item, each terminated
// by a semicolon. In addition to a vector of inner attributes, this function
// also returns a vector that may contain the first outer attribute of the
// next item (since we can't know whether the attribute is an inner attribute
// of the containing item or an outer attribute of the first contained item
// until we see the semi).
fn parse_inner_attrs_and_next(p: &parser) ->
{inner: [ast::attribute], next: [ast::attribute]} {
let inner_attrs: [ast::attribute] = ~[];
let next_outer_attrs: [ast::attribute] = ~[];
while p.peek() == token::POUND {
let attr = parse_attribute(p, ast::attr_inner);
if p.peek() == token::SEMI {
p.bump();
inner_attrs += ~[attr];
} else {
// It's not really an inner attribute
let outer_attr =
spanned(attr.span.lo, attr.span.hi,
{style: ast::attr_outer, value: attr.node.value});
next_outer_attrs += ~[outer_attr];
break;
}
}
ret {inner: inner_attrs, next: next_outer_attrs};
}
fn parse_meta_item(p: &parser) -> @ast::meta_item {
let lo = p.get_lo_pos();
let ident = parse_ident(p);
alt p.peek() {
token::EQ. {
p.bump();
let lit = parse_lit(p);
let hi = p.get_hi_pos();
ret @spanned(lo, hi, ast::meta_name_value(ident, lit));
}
token::LPAREN. {
let inner_items = parse_meta_seq(p);
let hi = p.get_hi_pos();
ret @spanned(lo, hi, ast::meta_list(ident, inner_items));
}
_ {
let hi = p.get_hi_pos();
ret @spanned(lo, hi, ast::meta_word(ident));
}
}
}
fn parse_meta_seq(p: &parser) -> [@ast::meta_item] {
ret parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_meta_item, p).node;
}
fn parse_optional_meta(p: &parser) -> [@ast::meta_item] {
alt p.peek() { token::LPAREN. { ret parse_meta_seq(p); } _ { ret ~[]; } }
}
fn parse_use(p: &parser) -> ast::view_item_ {
let ident = parse_ident(p);
let metadata = parse_optional_meta(p);
ret ast::view_item_use(ident, metadata, p.get_id());
}
fn parse_rest_import_name(p: &parser, first: ast::ident,
def_ident: option::t[ast::ident]) ->
ast::view_item_ {
let identifiers: [ast::ident] = ~[first];
let glob: bool = false;
while true {
alt p.peek() {
token::SEMI. { break; }
token::MOD_SEP. {
if glob { p.fatal("cannot path into a glob"); }
p.bump();
}
_ { p.fatal("expecting '::' or ';'"); }
}
alt p.peek() {
token::IDENT(_, _) { identifiers += ~[parse_ident(p)]; }
//the lexer can't tell the different kinds of stars apart ) :
token::BINOP(token::STAR.) {
glob = true;
p.bump();
}
_ { p.fatal("expecting an identifier, or '*'"); }
}
}
alt def_ident {
some(i) {
if glob { p.fatal("globbed imports can't be renamed"); }
ret ast::view_item_import(i, identifiers, p.get_id());
}
_ {
if glob {
ret ast::view_item_import_glob(identifiers, p.get_id());
} else {
let len = ivec::len(identifiers);
ret ast::view_item_import(identifiers.(len - 1u), identifiers,
p.get_id());
}
}
}
}
fn parse_full_import_name(p: &parser, def_ident: ast::ident) ->
ast::view_item_ {
alt p.peek() {
token::IDENT(i, _) {
p.bump();
ret parse_rest_import_name(p, p.get_str(i), some(def_ident));
}
_ { p.fatal("expecting an identifier"); }
}
}
fn parse_import(p: &parser) -> ast::view_item_ {
alt p.peek() {
token::IDENT(i, _) {
p.bump();
alt p.peek() {
token::EQ. {
p.bump();
ret parse_full_import_name(p, p.get_str(i));
}
_ { ret parse_rest_import_name(p, p.get_str(i), none); }
}
}
_ { p.fatal("expecting an identifier"); }
}
}
fn parse_export(p: &parser) -> ast::view_item_ {
let id = parse_ident(p);
ret ast::view_item_export(id, p.get_id());
}
fn parse_view_item(p: &parser) -> @ast::view_item {
let lo = p.get_lo_pos();
let the_item =
if eat_word(p, "use") {
parse_use(p)
} else if (eat_word(p, "import")) {
parse_import(p)
} else if (eat_word(p, "export")) { parse_export(p) } else { fail };
let hi = p.get_lo_pos();
expect(p, token::SEMI);
ret @spanned(lo, hi, the_item);
}
fn is_view_item(p: &parser) -> bool {
alt p.peek() {
token::IDENT(sid, false) {
let st = p.get_str(sid);
ret str::eq(st, "use") || str::eq(st, "import") ||
str::eq(st, "export");
}
_ { ret false; }
}
}
fn parse_view(p: &parser) -> [@ast::view_item] {
let items: [@ast::view_item] = ~[];
while is_view_item(p) { items += ~[parse_view_item(p)]; }
ret items;
}
fn parse_native_view(p: &parser) -> [@ast::view_item] {
let items: [@ast::view_item] = ~[];
while is_view_item(p) { items += ~[parse_view_item(p)]; }
ret items;
}
fn parse_crate_from_source_file(input: &str, cfg: &ast::crate_cfg,
sess: &parse_sess) -> @ast::crate {
let p = new_parser_from_file(sess, cfg, input, 0u, 0u, SOURCE_FILE);
ret parse_crate_mod(p, cfg, sess);
}
fn parse_crate_from_source_str(name: &str, source: &str, cfg: &ast::crate_cfg,
sess: &parse_sess) -> @ast::crate {
let ftype = SOURCE_FILE;
let filemap = codemap::new_filemap(name, 0u, 0u);
sess.cm.files += ~[filemap];
let itr = @interner::mk(str::hash, str::eq);
let rdr = lexer::new_reader(sess.cm, source, filemap, itr);
let p = new_parser(sess, cfg, rdr, ftype);
ret parse_crate_mod(p, cfg, sess);
}
// Parses a source module as a crate
fn parse_crate_mod(p: &parser, cfg: &ast::crate_cfg, sess: parse_sess) ->
@ast::crate {
let lo = p.get_lo_pos();
let crate_attrs = parse_inner_attrs_and_next(p);
let first_item_outer_attrs = crate_attrs.next;
let m = parse_mod_items(p, token::EOF, first_item_outer_attrs);
ret @spanned(lo, p.get_lo_pos(),
{directives: ~[],
module: m,
attrs: crate_attrs.inner,
config: p.get_cfg()});
}
fn parse_str(p: &parser) -> ast::ident {
alt p.peek() {
token::LIT_STR(s) { p.bump(); ret p.get_str(s); }
_ { fail; }
}
}
// Logic for parsing crate files (.rc)
//
// Each crate file is a sequence of directives.
//
// Each directive imperatively extends its environment with 0 or more items.
fn parse_crate_directive(p: &parser, first_outer_attr: &[ast::attribute]) ->
ast::crate_directive {
// Collect the next attributes
let outer_attrs = first_outer_attr + parse_outer_attributes(p);
// In a crate file outer attributes are only going to apply to mods
let expect_mod = ivec::len(outer_attrs) > 0u;
let lo = p.get_lo_pos();
if expect_mod || is_word(p, "mod") {
expect_word(p, "mod");
let id = parse_ident(p);
let file_opt =
alt p.peek() {
token::EQ. { p.bump(); some(parse_str(p)) }
_ { none }
};
alt p.peek() {
// mod x = "foo.rs";
token::SEMI. {
let hi = p.get_hi_pos();
p.bump();
ret spanned(lo, hi, ast::cdir_src_mod(id, file_opt, outer_attrs));
}
// mod x = "foo_dir" { ...directives... }
token::LBRACE. {
p.bump();
let inner_attrs = parse_inner_attrs_and_next(p);
let mod_attrs = outer_attrs + inner_attrs.inner;
let next_outer_attr = inner_attrs.next;
let cdirs =
parse_crate_directives(p, token::RBRACE, next_outer_attr);
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
ret spanned(lo, hi,
ast::cdir_dir_mod(id, file_opt, cdirs, mod_attrs));
}
t { unexpected(p, t); }
}
} else if (eat_word(p, "auth")) {
let n = parse_path(p);
expect(p, token::EQ);
let a = parse_auth(p);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret spanned(lo, hi, ast::cdir_auth(n, a));
} else if (is_view_item(p)) {
let vi = parse_view_item(p);
ret spanned(lo, vi.span.hi, ast::cdir_view_item(vi));
} else { ret p.fatal("expected crate directive"); }
}
fn parse_crate_directives(p: &parser, term: token::token,
first_outer_attr: &[ast::attribute]) ->
[@ast::crate_directive] {
// This is pretty ugly. If we have an outer attribute then we can't accept
// seeing the terminator next, so if we do see it then fail the same way
// parse_crate_directive would
if ivec::len(first_outer_attr) > 0u && p.peek() == term {
expect_word(p, "mod");
}
let cdirs: [@ast::crate_directive] = ~[];
while p.peek() != term {
let cdir = @parse_crate_directive(p, first_outer_attr);
cdirs += ~[cdir];
}
ret cdirs;
}
fn parse_crate_from_crate_file(input: &str, cfg: &ast::crate_cfg,
sess: &parse_sess) -> @ast::crate {
let p = new_parser_from_file(sess, cfg, input, 0u, 0u, CRATE_FILE);
let lo = p.get_lo_pos();
let prefix = std::fs::dirname(p.get_filemap().name);
let leading_attrs = parse_inner_attrs_and_next(p);
let crate_attrs = leading_attrs.inner;
let first_cdir_attr = leading_attrs.next;
let cdirs = parse_crate_directives(p, token::EOF, first_cdir_attr);
let deps: [str] = ~[];
let cx =
@{p: p,
mode: eval::mode_parse,
mutable deps: deps,
sess: sess,
mutable chpos: p.get_chpos(),
mutable byte_pos: p.get_byte_pos(),
cfg: p.get_cfg()};
let m = eval::eval_crate_directives_to_mod(cx, cdirs, prefix);
let hi = p.get_hi_pos();
expect(p, token::EOF);
ret @spanned(lo, hi,
{directives: cdirs,
module: m,
attrs: crate_attrs,
config: p.get_cfg()});
}
fn parse_crate_from_file(input: &str, cfg: &ast::crate_cfg,
sess: &parse_sess) -> @ast::crate {
if str::ends_with(input, ".rc") {
parse_crate_from_crate_file(input, cfg, sess)
} else if str::ends_with(input, ".rs") {
parse_crate_from_source_file(input, cfg, sess)
} else {
codemap::emit_error(none,
"unknown input file type: " + input,
sess.cm);
fail
}
}
//
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
// End:
//
Revert "Revert "Make [] and ~[] both construct ivecs""
This reverts commit 28bf19021981bd41a365aaa684c71afd2cf079d7.
Should work now that tests are all using ivecs
import std::io;
import std::ivec;
import std::str;
import std::option;
import std::option::some;
import std::option::none;
import std::either;
import std::either::left;
import std::either::right;
import std::map::hashmap;
import token::can_begin_expr;
import ex = ext::base;
import codemap::span;
import std::map::new_str_hash;
import util::interner;
import ast::node_id;
import ast::spanned;
tag restriction { UNRESTRICTED; RESTRICT_NO_CALL_EXPRS; }
tag file_type { CRATE_FILE; SOURCE_FILE; }
tag ty_or_bang { a_ty(@ast::ty); a_bang; }
type parse_sess = @{cm: codemap::codemap, mutable next_id: node_id};
fn next_node_id(sess: &parse_sess) -> node_id {
let rv = sess.next_id;
sess.next_id += 1;
ret rv;
}
type parser =
obj {
fn peek() -> token::token ;
fn bump() ;
fn look_ahead(uint) -> token::token ;
fn fatal(str) -> ! ;
fn warn(str) ;
fn restrict(restriction) ;
fn get_restriction() -> restriction ;
fn get_file_type() -> file_type ;
fn get_cfg() -> ast::crate_cfg ;
fn get_span() -> span ;
fn get_lo_pos() -> uint ;
fn get_hi_pos() -> uint ;
fn get_last_lo_pos() -> uint ;
fn get_last_hi_pos() -> uint ;
fn get_prec_table() -> @[op_spec] ;
fn get_str(token::str_num) -> str ;
fn get_reader() -> lexer::reader ;
fn get_filemap() -> codemap::filemap ;
fn get_bad_expr_words() -> hashmap[str, ()] ;
fn get_chpos() -> uint ;
fn get_byte_pos() -> uint ;
fn get_id() -> node_id ;
fn get_sess() -> parse_sess ;
};
fn new_parser_from_file(sess: parse_sess, cfg:
ast::crate_cfg, path: str,
chpos: uint, byte_pos: uint,
ftype: file_type) -> parser {
let src = io::read_whole_file_str(path);
let filemap = codemap::new_filemap(path, chpos, byte_pos);
sess.cm.files += ~[filemap];
let itr = @interner::mk(str::hash, str::eq);
let rdr = lexer::new_reader(sess.cm, src, filemap, itr);
ret new_parser(sess, cfg, rdr, ftype);
}
fn new_parser(sess: parse_sess, cfg: ast::crate_cfg, rdr: lexer::reader,
ftype: file_type) -> parser {
obj stdio_parser(sess: parse_sess,
cfg: ast::crate_cfg,
ftype: file_type,
mutable tok: token::token,
mutable tok_span: span,
mutable last_tok_span: span,
mutable buffer: [{tok: token::token, span: span}],
mutable restr: restriction,
rdr: lexer::reader,
precs: @[op_spec],
bad_words: hashmap[str, ()]) {
fn peek() -> token::token { ret tok; }
fn bump() {
last_tok_span = tok_span;
if ivec::len(buffer) == 0u {
let next = lexer::next_token(rdr);
tok = next.tok;
tok_span = ast::mk_sp(next.chpos, rdr.get_chpos());
} else {
let next = ivec::pop(buffer);
tok = next.tok;
tok_span = next.span;
}
}
fn look_ahead(distance: uint) -> token::token {
while ivec::len(buffer) < distance {
let next = lexer::next_token(rdr);
let sp = ast::mk_sp(next.chpos, rdr.get_chpos());
buffer = ~[{tok: next.tok, span: sp}] + buffer;
}
ret buffer.(distance - 1u).tok;
}
fn fatal(m: str) -> ! {
codemap::emit_error(some(self.get_span()), m, sess.cm);
fail;
}
fn warn(m: str) {
codemap::emit_warning(some(self.get_span()), m, sess.cm);
}
fn restrict(r: restriction) { restr = r; }
fn get_restriction() -> restriction { ret restr; }
fn get_span() -> span { ret tok_span; }
fn get_lo_pos() -> uint { ret tok_span.lo; }
fn get_hi_pos() -> uint { ret tok_span.hi; }
fn get_last_lo_pos() -> uint { ret last_tok_span.lo; }
fn get_last_hi_pos() -> uint { ret last_tok_span.hi; }
fn get_file_type() -> file_type { ret ftype; }
fn get_cfg() -> ast::crate_cfg { ret cfg; }
fn get_prec_table() -> @[op_spec] { ret precs; }
fn get_str(i: token::str_num) -> str {
ret interner::get(*rdr.get_interner(), i);
}
fn get_reader() -> lexer::reader { ret rdr; }
fn get_filemap() -> codemap::filemap { ret rdr.get_filemap(); }
fn get_bad_expr_words() -> hashmap[str, ()] { ret bad_words; }
fn get_chpos() -> uint { ret rdr.get_chpos(); }
fn get_byte_pos() -> uint { ret rdr.get_byte_pos(); }
fn get_id() -> node_id { ret next_node_id(sess); }
fn get_sess() -> parse_sess { ret sess; }
}
let tok0 = lexer::next_token(rdr);
let span0 = ast::mk_sp(tok0.chpos, rdr.get_chpos());
ret stdio_parser(sess, cfg, ftype, tok0.tok, span0, span0, ~[],
UNRESTRICTED, rdr, prec_table(), bad_expr_word_table());
}
// These are the words that shouldn't be allowed as value identifiers,
// because, if used at the start of a line, they will cause the line to be
// interpreted as a specific kind of statement, which would be confusing.
fn bad_expr_word_table() -> hashmap[str, ()] {
let words = new_str_hash();
words.insert("mod", ());
words.insert("if", ());
words.insert("else", ());
words.insert("while", ());
words.insert("do", ());
words.insert("alt", ());
words.insert("for", ());
words.insert("each", ());
words.insert("break", ());
words.insert("cont", ());
words.insert("put", ());
words.insert("ret", ());
words.insert("be", ());
words.insert("fail", ());
words.insert("type", ());
words.insert("resource", ());
words.insert("check", ());
words.insert("assert", ());
words.insert("claim", ());
words.insert("prove", ());
words.insert("native", ());
words.insert("fn", ());
words.insert("block", ());
words.insert("lambda", ());
words.insert("pred", ());
words.insert("iter", ());
words.insert("block", ());
words.insert("import", ());
words.insert("export", ());
words.insert("let", ());
words.insert("const", ());
words.insert("log", ());
words.insert("log_err", ());
words.insert("tag", ());
words.insert("obj", ());
words.insert("copy", ());
ret words;
}
fn unexpected(p: &parser, t: token::token) -> ! {
let s: str = "unexpected token: ";
s += token::to_str(p.get_reader(), t);
p.fatal(s);
}
fn expect(p: &parser, t: token::token) {
if p.peek() == t {
p.bump();
} else {
let s: str = "expecting ";
s += token::to_str(p.get_reader(), t);
s += ", found ";
s += token::to_str(p.get_reader(), p.peek());
p.fatal(s);
}
}
fn spanned[T](lo: uint, hi: uint, node: &T) -> spanned[T] {
ret {node: node, span: ast::mk_sp(lo, hi)};
}
fn parse_ident(p: &parser) -> ast::ident {
alt p.peek() {
token::IDENT(i, _) { p.bump(); ret p.get_str(i); }
_ { p.fatal("expecting ident"); }
}
}
fn parse_value_ident(p: &parser) -> ast::ident {
check_bad_word(p);
ret parse_ident(p);
}
fn eat(p: &parser, tok: &token::token) -> bool {
ret if p.peek() == tok { p.bump(); true } else { false };
}
fn is_word(p: &parser, word: &str) -> bool {
ret alt p.peek() {
token::IDENT(sid, false) { str::eq(word, p.get_str(sid)) }
_ { false }
};
}
fn eat_word(p: &parser, word: &str) -> bool {
alt p.peek() {
token::IDENT(sid, false) {
if str::eq(word, p.get_str(sid)) {
p.bump();
ret true;
} else { ret false; }
}
_ { ret false; }
}
}
fn expect_word(p: &parser, word: &str) {
if !eat_word(p, word) {
p.fatal("expecting " + word + ", found " +
token::to_str(p.get_reader(), p.peek()));
}
}
fn check_bad_word(p: &parser) {
alt p.peek() {
token::IDENT(sid, false) {
let w = p.get_str(sid);
if p.get_bad_expr_words().contains_key(w) {
p.fatal("found " + w + " in expression position");
}
}
_ { }
}
}
fn parse_ty_fn(proto: ast::proto, p: &parser, lo: uint) -> ast::ty_ {
fn parse_fn_input_ty(p: &parser) -> ast::ty_arg {
let lo = p.get_lo_pos();
// Ignore arg name, if present
if is_plain_ident(p) && p.look_ahead(1u) == token::COLON {
p.bump();
p.bump();
}
let mode = ast::val;
if p.peek() == token::BINOP(token::AND) {
p.bump();
mode = ast::alias(eat_word(p, "mutable"));
}
let t = parse_ty(p, false);
ret spanned(lo, t.span.hi, {mode: mode, ty: t});
}
let lo = p.get_lo_pos();
let inputs =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_fn_input_ty, p);
// FIXME: there's no syntax for this right now anyway
// auto constrs = parse_constrs(~[], p);
let constrs: [@ast::constr] = ~[];
let output: @ast::ty;
let cf = ast::return;
if p.peek() == token::RARROW {
p.bump();
let tmp = parse_ty_or_bang(p);
alt tmp {
a_ty(t) { output = t; }
a_bang. {
output = @spanned(lo, inputs.span.hi, ast::ty_bot);
cf = ast::noreturn;
}
}
} else { output = @spanned(lo, inputs.span.hi, ast::ty_nil); }
ret ast::ty_fn(proto, inputs.node, output, cf, constrs);
}
fn parse_proto(p: &parser) -> ast::proto {
if eat_word(p, "iter") {
ret ast::proto_iter;
} else if (eat_word(p, "fn")) {
ret ast::proto_fn;
} else if (eat_word(p, "block")) {
ret ast::proto_block;
} else if (eat_word(p, "pred")) {
ret ast::proto_fn;
} else { unexpected(p, p.peek()); }
}
fn parse_ty_obj(p: &parser, hi: &mutable uint) -> ast::ty_ {
fn parse_method_sig(p: &parser) -> ast::ty_method {
let flo = p.get_lo_pos();
let proto: ast::proto = parse_proto(p);
let ident = parse_value_ident(p);
let f = parse_ty_fn(proto, p, flo);
expect(p, token::SEMI);
alt f {
ast::ty_fn(proto, inputs, output, cf, constrs) {
ret spanned(flo, output.span.hi,
{proto: proto,
ident: ident,
inputs: inputs,
output: output,
cf: cf,
constrs: constrs});
}
}
}
let meths =
parse_seq(token::LBRACE, token::RBRACE, none, parse_method_sig, p);
hi = meths.span.hi;
ret ast::ty_obj(meths.node);
}
fn parse_mt(p: &parser) -> ast::mt {
let mut = parse_mutability(p);
let t = parse_ty(p, false);
ret {ty: t, mut: mut};
}
fn parse_ty_field(p: &parser) -> ast::ty_field {
let lo = p.get_lo_pos();
let mut = parse_mutability(p);
let id = parse_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
ret spanned(lo, ty.span.hi, {ident: id, mt: {ty: ty, mut: mut}});
}
// if i is the jth ident in args, return j
// otherwise, fail
fn ident_index(p: &parser, args: &[ast::arg], i: &ast::ident) -> uint {
let j = 0u;
for a: ast::arg in args { if a.ident == i { ret j; } j += 1u; }
p.fatal("Unbound variable " + i + " in constraint arg");
}
fn parse_type_constr_arg(p: &parser) -> @ast::ty_constr_arg {
let sp = p.get_span();
let carg = ast::carg_base;
expect(p, token::BINOP(token::STAR));
if p.peek() == token::DOT {
// "*..." notation for record fields
p.bump();
let pth: ast::path = parse_path(p);
carg = ast::carg_ident(pth);
}
// No literals yet, I guess?
ret @{node: carg, span: sp};
}
fn parse_constr_arg(args: &[ast::arg], p: &parser) -> @ast::constr_arg {
let sp = p.get_span();
let carg = ast::carg_base;
if p.peek() == token::BINOP(token::STAR) {
p.bump();
} else {
let i: ast::ident = parse_value_ident(p);
carg = ast::carg_ident(ident_index(p, args, i));
}
ret @{node: carg, span: sp};
}
fn parse_ty_constr(fn_args: &[ast::arg], p: &parser) -> @ast::constr {
let lo = p.get_lo_pos();
let path = parse_path(p);
let pf = bind parse_constr_arg(fn_args, _);
let args: {node: [@ast::constr_arg], span: span} =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA), pf, p);
ret @spanned(lo, args.span.hi,
{path: path, args: args.node, id: p.get_id()});
}
fn parse_constr_in_type(p: &parser) -> @ast::ty_constr {
let lo = p.get_lo_pos();
let path = parse_path(p);
let args: [@ast::ty_constr_arg] =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_type_constr_arg, p).node;
let hi = p.get_lo_pos();
let tc: ast::ty_constr_ = {path: path, args: args, id: p.get_id()};
ret @spanned(lo, hi, tc);
}
fn parse_constrs[T](pser: fn(&parser) -> @ast::constr_general[T] , p: &parser)
-> [@ast::constr_general[T]] {
let constrs: [@ast::constr_general[T]] = ~[];
while true {
let constr = pser(p);
constrs += ~[constr];
if p.peek() == token::COMMA { p.bump(); } else { break; }
}
constrs
}
fn parse_type_constraints(p: &parser) -> [@ast::ty_constr] {
ret parse_constrs(parse_constr_in_type, p);
}
fn parse_ty_postfix(orig_t: ast::ty_, p: &parser, colons_before_params: bool)
-> @ast::ty {
let lo = p.get_lo_pos();
let end;
if p.peek() == token::LBRACKET {
p.bump();
end = token::RBRACKET;
} else if colons_before_params && p.peek() == token::MOD_SEP {
p.bump();
expect(p, token::LT);
end = token::GT;
} else if !colons_before_params && p.peek() == token::LT {
p.bump();
end = token::GT;
} else {
ret @spanned(lo, p.get_lo_pos(), orig_t);
}
// If we're here, we have explicit type parameter instantiation.
let seq = parse_seq_to_end(end, some(token::COMMA),
bind parse_ty(_, false), p);
alt orig_t {
ast::ty_path(pth, ann) {
let hi = p.get_hi_pos();
ret @spanned(lo, hi,
ast::ty_path(spanned(lo, hi,
{global: pth.node.global,
idents: pth.node.idents,
types: seq}), ann));
}
_ {
p.fatal("type parameter instantiation only allowed for paths");
}
}
}
fn parse_ty_or_bang(p: &parser) -> ty_or_bang {
alt p.peek() {
token::NOT. { p.bump(); ret a_bang; }
_ { ret a_ty(parse_ty(p, false)); }
}
}
fn parse_ty(p: &parser, colons_before_params: bool) -> @ast::ty {
let lo = p.get_lo_pos();
let hi = lo;
let t: ast::ty_;
// FIXME: do something with this
if eat_word(p, "bool") {
t = ast::ty_bool;
} else if (eat_word(p, "int")) {
t = ast::ty_int;
} else if (eat_word(p, "uint")) {
t = ast::ty_uint;
} else if (eat_word(p, "float")) {
t = ast::ty_float;
} else if (eat_word(p, "str")) {
t = ast::ty_str;
} else if (eat_word(p, "istr")) {
t = ast::ty_istr;
} else if (eat_word(p, "char")) {
t = ast::ty_char;
/*
} else if (eat_word(p, "task")) {
t = ast::ty_task;
*/
} else if (eat_word(p, "i8")) {
t = ast::ty_machine(ast::ty_i8);
} else if (eat_word(p, "i16")) {
t = ast::ty_machine(ast::ty_i16);
} else if (eat_word(p, "i32")) {
t = ast::ty_machine(ast::ty_i32);
} else if (eat_word(p, "i64")) {
t = ast::ty_machine(ast::ty_i64);
} else if (eat_word(p, "u8")) {
t = ast::ty_machine(ast::ty_u8);
} else if (eat_word(p, "u16")) {
t = ast::ty_machine(ast::ty_u16);
} else if (eat_word(p, "u32")) {
t = ast::ty_machine(ast::ty_u32);
} else if (eat_word(p, "u64")) {
t = ast::ty_machine(ast::ty_u64);
} else if (eat_word(p, "f32")) {
t = ast::ty_machine(ast::ty_f32);
} else if (eat_word(p, "f64")) {
t = ast::ty_machine(ast::ty_f64);
} else if (p.peek() == token::LPAREN) {
p.bump();
if p.peek() == token::RPAREN {
hi = p.get_hi_pos();
p.bump();
t = ast::ty_nil;
} else {
let ts = ~[parse_ty(p, false)];
while p.peek() == token::COMMA {
p.bump();
ts += ~[parse_ty(p, false)];
}
if ivec::len(ts) == 1u {
t = ts.(0).node;
} else {
t = ast::ty_tup(ts);
}
hi = p.get_hi_pos();
expect(p, token::RPAREN);
}
} else if (p.peek() == token::AT) {
p.bump();
let mt = parse_mt(p);
hi = mt.ty.span.hi;
t = ast::ty_box(mt);
} else if (p.peek() == token::BINOP(token::STAR)) {
p.bump();
let mt = parse_mt(p);
hi = mt.ty.span.hi;
t = ast::ty_ptr(mt);
} else if (p.peek() == token::LBRACE) {
let elems =
parse_seq(token::LBRACE, token::RBRACE, some(token::COMMA),
parse_ty_field, p);
hi = elems.span.hi;
t = ast::ty_rec(elems.node);
if p.peek() == token::COLON {
p.bump();
t =
ast::ty_constr(@spanned(lo, hi, t),
parse_type_constraints(p));
}
} else if (eat_word(p, "vec")) {
expect(p, token::LBRACKET);
t = ast::ty_vec(parse_mt(p));
hi = p.get_hi_pos();
expect(p, token::RBRACKET);
} else if (p.peek() == token::LBRACKET) {
expect(p, token::LBRACKET);
t = ast::ty_ivec(parse_mt(p));
hi = p.get_hi_pos();
expect(p, token::RBRACKET);
} else if (eat_word(p, "fn")) {
let flo = p.get_last_lo_pos();
t = parse_ty_fn(ast::proto_fn, p, flo);
alt t { ast::ty_fn(_, _, out, _, _) { hi = out.span.hi; } }
} else if (eat_word(p, "block")) {
let flo = p.get_last_lo_pos();
t = parse_ty_fn(ast::proto_block, p, flo);
alt t { ast::ty_fn(_, _, out, _, _) { hi = out.span.hi; } }
} else if (eat_word(p, "iter")) {
let flo = p.get_last_lo_pos();
t = parse_ty_fn(ast::proto_iter, p, flo);
alt t { ast::ty_fn(_, _, out, _, _) { hi = out.span.hi; } }
} else if (eat_word(p, "obj")) {
t = parse_ty_obj(p, hi);
} else if (eat_word(p, "mutable")) {
p.warn("ignoring deprecated 'mutable' type constructor");
let typ = parse_ty(p, false);
t = typ.node;
hi = typ.span.hi;
} else if (p.peek() == token::MOD_SEP || is_ident(p.peek())) {
let path = parse_path(p);
t = ast::ty_path(path, p.get_id());
hi = path.span.hi;
} else { p.fatal("expecting type"); }
ret parse_ty_postfix(t, p, colons_before_params);
}
fn parse_arg_mode(p: &parser) -> ast::mode {
if eat(p, token::BINOP(token::AND)) {
ast::alias(eat_word(p, "mutable"))
} else if eat(p, token::BINOP(token::MINUS)) {
ast::move
} else {
ast::val
}
}
fn parse_arg(p: &parser) -> ast::arg {
let i = parse_value_ident(p);
expect(p, token::COLON);
let m = parse_arg_mode(p);
let t = parse_ty(p, false);
ret {mode: m, ty: t, ident: i, id: p.get_id()};
}
fn parse_fn_block_arg(p: &parser) -> ast::arg {
let m = parse_arg_mode(p);
let i = parse_value_ident(p);
let t = @spanned(p.get_lo_pos(), p.get_hi_pos(), ast::ty_infer);
ret {mode: m, ty: t, ident: i, id: p.get_id()};
}
fn parse_seq_to_end[T](ket: token::token, sep: option::t[token::token],
f: fn(&parser) -> T , p: &parser) -> [T] {
let val = parse_seq_to_before_end(ket, sep, f, p);
p.bump();
ret val;
}
fn parse_seq_to_before_end[T](ket: token::token, sep: option::t[token::token],
f: fn(&parser) -> T , p: &parser) -> [T] {
let first: bool = true;
let v: [T] = ~[];
while p.peek() != ket {
alt sep {
some(t) { if first { first = false; } else { expect(p, t); } }
_ { }
}
v += ~[f(p)];
}
ret v;
}
fn parse_seq[T](bra: token::token, ket: token::token,
sep: option::t[token::token], f: fn(&parser) -> T ,
p: &parser) -> spanned[[T]] {
let lo = p.get_lo_pos();
expect(p, bra);
let result = parse_seq_to_before_end[T](ket, sep, f, p);
let hi = p.get_hi_pos();
p.bump();
ret spanned(lo, hi, result);
}
fn parse_lit(p: &parser) -> ast::lit {
let sp = p.get_span();
let lit: ast::lit_ = ast::lit_nil;
if eat_word(p, "true") {
lit = ast::lit_bool(true);
} else if (eat_word(p, "false")) {
lit = ast::lit_bool(false);
} else {
alt p.peek() {
token::LIT_INT(i) { p.bump(); lit = ast::lit_int(i); }
token::LIT_UINT(u) { p.bump(); lit = ast::lit_uint(u); }
token::LIT_FLOAT(s) {
p.bump();
lit = ast::lit_float(p.get_str(s));
}
token::LIT_MACH_INT(tm, i) {
p.bump();
lit = ast::lit_mach_int(tm, i);
}
token::LIT_MACH_FLOAT(tm, s) {
p.bump();
lit = ast::lit_mach_float(tm, p.get_str(s));
}
token::LIT_CHAR(c) { p.bump(); lit = ast::lit_char(c); }
token::LIT_STR(s) {
p.bump();
lit = ast::lit_str(p.get_str(s), ast::sk_rc);
}
token::LPAREN. {
p.bump();
expect(p, token::RPAREN);
lit = ast::lit_nil;
}
t { unexpected(p, t); }
}
}
ret {node: lit, span: sp};
}
fn is_ident(t: token::token) -> bool {
alt t { token::IDENT(_, _) { ret true; } _ { } }
ret false;
}
fn is_plain_ident(p: &parser) -> bool {
ret alt p.peek() { token::IDENT(_, false) { true } _ { false } };
}
fn parse_path(p: &parser) -> ast::path {
let lo = p.get_lo_pos();
let hi = lo;
let global;
if p.peek() == token::MOD_SEP {
global = true;
p.bump();
} else { global = false; }
let ids: [ast::ident] = ~[];
while true {
alt p.peek() {
token::IDENT(i, _) {
hi = p.get_hi_pos();
ids += ~[p.get_str(i)];
hi = p.get_hi_pos();
p.bump();
if p.peek() == token::MOD_SEP && p.look_ahead(1u) != token::LT {
p.bump();
} else {
break;
}
}
_ { break; }
}
}
ret spanned(lo, hi, {global: global, idents: ids, types: ~[]});
}
fn parse_path_and_ty_param_substs(p: &parser) -> ast::path {
let lo = p.get_lo_pos();
let path = parse_path(p);
if p.peek() == token::LBRACKET {
let seq =
parse_seq(token::LBRACKET, token::RBRACKET, some(token::COMMA),
bind parse_ty(_, false), p);
let hi = seq.span.hi;
path =
spanned(lo, hi,
{global: path.node.global,
idents: path.node.idents,
types: seq.node});
}
ret path;
}
fn parse_mutability(p: &parser) -> ast::mutability {
if eat_word(p, "mutable") {
if p.peek() == token::QUES { p.bump(); ret ast::maybe_mut; }
ret ast::mut;
}
ret ast::imm;
}
fn parse_field(p: &parser, sep: &token::token) -> ast::field {
let lo = p.get_lo_pos();
let m = parse_mutability(p);
let i = parse_ident(p);
expect(p, sep);
let e = parse_expr(p);
ret spanned(lo, e.span.hi, {mut: m, ident: i, expr: e});
}
fn mk_expr(p: &parser, lo: uint, hi: uint, node: &ast::expr_) -> @ast::expr {
ret @{id: p.get_id(), node: node, span: ast::mk_sp(lo, hi)};
}
fn mk_mac_expr(p: &parser, lo: uint, hi: uint, m: &ast::mac_) -> @ast::expr {
ret @{id: p.get_id(),
node: ast::expr_mac({node: m, span: ast::mk_sp(lo, hi)}),
span: ast::mk_sp(lo, hi)};
}
fn parse_bottom_expr(p: &parser) -> @ast::expr {
let lo = p.get_lo_pos();
let hi = p.get_hi_pos();
let ex: ast::expr_;
if p.peek() == token::LPAREN {
p.bump();
if p.peek() == token::RPAREN {
hi = p.get_hi_pos();
p.bump();
let lit = @spanned(lo, hi, ast::lit_nil);
ret mk_expr(p, lo, hi, ast::expr_lit(lit));
}
let es = ~[parse_expr(p)];
while p.peek() == token::COMMA {
p.bump();
es += ~[parse_expr(p)];
}
hi = p.get_hi_pos();
expect(p, token::RPAREN);
if ivec::len(es) == 1u {
ret mk_expr(p, lo, hi, es.(0).node);
} else {
ret mk_expr(p, lo, hi, ast::expr_tup(es));
}
} else if (p.peek() == token::LBRACE) {
p.bump();
if is_word(p, "mutable") ||
is_plain_ident(p) && p.look_ahead(1u) == token::COLON {
let fields = ~[parse_field(p, token::COLON)];
let base = none;
while p.peek() != token::RBRACE {
if eat_word(p, "with") { base = some(parse_expr(p)); break; }
expect(p, token::COMMA);
fields += ~[parse_field(p, token::COLON)];
}
hi = p.get_hi_pos();
expect(p, token::RBRACE);
ex = ast::expr_rec(fields, base);
} else if p.peek() == token::BINOP(token::OR) {
ret parse_fn_block_expr(p);
} else {
let blk = parse_block_tail(p, lo);
ret mk_expr(p, blk.span.lo, blk.span.hi, ast::expr_block(blk));
}
} else if (eat_word(p, "if")) {
ret parse_if_expr(p);
} else if (eat_word(p, "for")) {
ret parse_for_expr(p);
} else if (eat_word(p, "while")) {
ret parse_while_expr(p);
} else if (eat_word(p, "do")) {
ret parse_do_while_expr(p);
} else if (eat_word(p, "alt")) {
ret parse_alt_expr(p);
/*
} else if (eat_word(p, "spawn")) {
ret parse_spawn_expr(p);
*/
} else if (eat_word(p, "fn")) {
ret parse_fn_expr(p, ast::proto_fn);
} else if (eat_word(p, "block")) {
ret parse_fn_expr(p, ast::proto_block);
} else if (eat_word(p, "lambda")) {
ret parse_fn_expr(p, ast::proto_closure);
} else if (p.peek() == token::LBRACKET) {
p.bump();
let mut = parse_mutability(p);
let es =
parse_seq_to_end(token::RBRACKET, some(token::COMMA), parse_expr,
p);
ex = ast::expr_vec(es, mut, ast::sk_unique);
} else if (p.peek() == token::POUND_LT) {
p.bump();
let ty = parse_ty(p, false);
expect(p, token::GT);
/* hack: early return to take advantage of specialized function */
ret mk_mac_expr(p, lo, p.get_hi_pos(), ast::mac_embed_type(ty))
} else if (p.peek() == token::POUND_LBRACE) {
p.bump();
let blk = ast::mac_embed_block(parse_block_tail(p, lo));
ret mk_mac_expr(p, lo, p.get_hi_pos(), blk);
} else if (p.peek() == token::ELLIPSIS) {
p.bump();
ret mk_mac_expr(p, lo, p.get_hi_pos(), ast::mac_ellipsis)
} else if (p.peek() == token::TILDE) {
p.bump();
alt p.peek() {
token::LBRACKET. { // unique array (temporary)
p.bump();
let mut = parse_mutability(p);
let es =
parse_seq_to_end(token::RBRACKET, some(token::COMMA),
parse_expr, p);
ex = ast::expr_vec(es, mut, ast::sk_unique);
}
token::LIT_STR(s) {
p.bump();
let lit =
@{node: ast::lit_str(p.get_str(s), ast::sk_unique),
span: p.get_span()};
ex = ast::expr_lit(lit);
}
_ { ex = ast::expr_uniq(parse_expr(p)); }
}
} else if (eat_word(p, "obj")) {
// Anonymous object
// Only make people type () if they're actually adding new fields
let fields: option::t[[ast::anon_obj_field]] = none;
if p.peek() == token::LPAREN {
p.bump();
fields =
some(parse_seq_to_end(token::RPAREN, some(token::COMMA),
parse_anon_obj_field, p));
}
let meths: [@ast::method] = ~[];
let inner_obj: option::t[@ast::expr] = none;
expect(p, token::LBRACE);
while p.peek() != token::RBRACE {
if eat_word(p, "with") {
inner_obj = some(parse_expr(p));
} else { meths += ~[parse_method(p)]; }
}
hi = p.get_hi_pos();
expect(p, token::RBRACE);
// fields and methods may be *additional* or *overriding* fields
// and methods if there's a inner_obj, or they may be the *only*
// fields and methods if there's no inner_obj.
// We don't need to pull ".node" out of fields because it's not a
// "spanned".
let ob = {fields: fields, methods: meths, inner_obj: inner_obj};
ex = ast::expr_anon_obj(ob);
} else if (eat_word(p, "bind")) {
let e = parse_expr_res(p, RESTRICT_NO_CALL_EXPRS);
fn parse_expr_opt(p: &parser) -> option::t[@ast::expr] {
alt p.peek() {
token::UNDERSCORE. { p.bump(); ret none; }
_ { ret some(parse_expr(p)); }
}
}
let es =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_expr_opt, p);
hi = es.span.hi;
ex = ast::expr_bind(e, es.node);
} else if (p.peek() == token::POUND) {
let ex_ext = parse_syntax_ext(p);
hi = ex_ext.span.hi;
ex = ex_ext.node;
} else if (eat_word(p, "fail")) {
if can_begin_expr(p.peek()) {
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_fail(some(e));
} else { ex = ast::expr_fail(none); }
} else if (eat_word(p, "log")) {
let e = parse_expr(p);
ex = ast::expr_log(1, e);
hi = e.span.hi;
} else if (eat_word(p, "log_err")) {
let e = parse_expr(p);
ex = ast::expr_log(0, e);
hi = e.span.hi;
} else if (eat_word(p, "assert")) {
let e = parse_expr(p);
ex = ast::expr_assert(e);
hi = e.span.hi;
} else if (eat_word(p, "check")) {
/* Should be a predicate (pure boolean function) applied to
arguments that are all either slot variables or literals.
but the typechecker enforces that. */
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_check(ast::checked, e);
} else if (eat_word(p, "claim")) {
/* Same rules as check, except that if check-claims
is enabled (a command-line flag), then the parser turns
claims into check */
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_check(ast::unchecked, e);
} else if (eat_word(p, "ret")) {
if can_begin_expr(p.peek()) {
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_ret(some(e));
} else { ex = ast::expr_ret(none); }
} else if (eat_word(p, "break")) {
ex = ast::expr_break;
hi = p.get_hi_pos();
} else if (eat_word(p, "cont")) {
ex = ast::expr_cont;
hi = p.get_hi_pos();
} else if (eat_word(p, "put")) {
alt p.peek() {
token::SEMI. { ex = ast::expr_put(none); }
_ {
let e = parse_expr(p);
hi = e.span.hi;
ex = ast::expr_put(some(e));
}
}
} else if (eat_word(p, "be")) {
let e = parse_expr(p);
// FIXME: Is this the right place for this check?
if /*check*/ast::is_call_expr(e) {
hi = e.span.hi;
ex = ast::expr_be(e);
} else { p.fatal("Non-call expression in tail call"); }
} else if (eat_word(p, "copy")) {
let e = parse_expr(p);
ex = ast::expr_copy(e);
hi = e.span.hi;
} else if (eat_word(p, "self")) {
log "parsing a self-call...";
expect(p, token::DOT);
// The rest is a call expression.
let f: @ast::expr = parse_self_method(p);
let es =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_expr, p);
hi = es.span.hi;
ex = ast::expr_call(f, es.node);
} else if (p.peek() == token::MOD_SEP ||
is_ident(p.peek()) && !is_word(p, "true") &&
!is_word(p, "false")) {
check_bad_word(p);
let pth = parse_path_and_ty_param_substs(p);
hi = pth.span.hi;
ex = ast::expr_path(pth);
} else {
let lit = parse_lit(p);
hi = lit.span.hi;
ex = ast::expr_lit(@lit);
}
ret mk_expr(p, lo, hi, ex);
}
fn parse_syntax_ext(p: &parser) -> @ast::expr {
let lo = p.get_lo_pos();
expect(p, token::POUND);
ret parse_syntax_ext_naked(p, lo);
}
fn parse_syntax_ext_naked(p: &parser, lo: uint) -> @ast::expr {
let pth = parse_path(p);
if ivec::len(pth.node.idents) == 0u {
p.fatal("expected a syntax expander name");
}
//temporary for a backwards-compatible cycle:
let es = if p.peek() == token::LPAREN {
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_expr, p)
} else {
parse_seq(token::LBRACKET, token::RBRACKET, some(token::COMMA),
parse_expr, p)
};
let hi = es.span.hi;
let e = mk_expr(p, es.span.lo, hi,
ast::expr_vec(es.node, ast::imm, ast::sk_rc));
ret mk_mac_expr(p, lo, hi, ast::mac_invoc(pth, e, none));
}
fn parse_self_method(p: &parser) -> @ast::expr {
let sp = p.get_span();
let f_name: ast::ident = parse_ident(p);
ret mk_expr(p, sp.lo, sp.hi, ast::expr_self_method(f_name));
}
fn parse_dot_or_call_expr(p: &parser) -> @ast::expr {
ret parse_dot_or_call_expr_with(p, parse_bottom_expr(p));
}
fn parse_dot_or_call_expr_with(p: &parser, e: @ast::expr) -> @ast::expr {
let lo = e.span.lo;
let hi = e.span.hi;
while true {
alt p.peek() {
token::LPAREN. {
if p.get_restriction() == RESTRICT_NO_CALL_EXPRS {
ret e;
} else {
// Call expr.
let es =
parse_seq(token::LPAREN, token::RPAREN,
some(token::COMMA), parse_expr, p);
hi = es.span.hi;
e = mk_expr(p, lo, hi, ast::expr_call(e, es.node));
}
}
token::DOT. {
p.bump();
alt p.peek() {
token::IDENT(i, _) {
hi = p.get_hi_pos();
p.bump();
e = mk_expr(p, lo, hi, ast::expr_field(e, p.get_str(i)));
}
token::LPAREN. {
p.bump();
let ix = parse_expr(p);
hi = ix.span.hi;
expect(p, token::RPAREN);
e = mk_expr(p, lo, hi, ast::expr_index(e, ix));
}
t { unexpected(p, t); }
}
}
_ { ret e; }
}
}
ret e;
}
fn parse_prefix_expr(p: &parser) -> @ast::expr {
if eat_word(p, "mutable") {
p.warn("ignoring deprecated 'mutable' prefix operator");
}
let lo = p.get_lo_pos();
let hi = p.get_hi_pos();
// FIXME: can only remove this sort of thing when both typestate and
// alt-exhaustive-match checking are co-operating.
let lit = @spanned(lo, lo, ast::lit_nil);
let ex: ast::expr_ = ast::expr_lit(lit);
alt p.peek() {
token::NOT. {
p.bump();
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::not, e);
}
token::BINOP(b) {
alt b {
token::MINUS. {
p.bump();
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::neg, e);
}
token::STAR. {
p.bump();
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::deref, e);
}
_ { ret parse_dot_or_call_expr(p); }
}
}
token::AT. {
p.bump();
let m = parse_mutability(p);
let e = parse_prefix_expr(p);
hi = e.span.hi;
ex = ast::expr_unary(ast::box(m), e);
}
_ { ret parse_dot_or_call_expr(p); }
}
ret mk_expr(p, lo, hi, ex);
}
fn parse_ternary(p: &parser) -> @ast::expr {
let cond_expr = parse_binops(p);
if p.peek() == token::QUES {
p.bump();
let then_expr = parse_expr(p);
expect(p, token::COLON);
let else_expr = parse_expr(p);
ret mk_expr(p, cond_expr.span.lo, else_expr.span.hi,
ast::expr_ternary(cond_expr, then_expr, else_expr));
} else { ret cond_expr; }
}
type op_spec = {tok: token::token, op: ast::binop, prec: int};
// FIXME make this a const, don't store it in parser state
fn prec_table() -> @[op_spec] {
ret @~[{tok: token::BINOP(token::STAR), op: ast::mul, prec: 11},
{tok: token::BINOP(token::SLASH), op: ast::div, prec: 11},
{tok: token::BINOP(token::PERCENT), op: ast::rem, prec: 11},
{tok: token::BINOP(token::PLUS), op: ast::add, prec: 10},
{tok: token::BINOP(token::MINUS), op: ast::sub, prec: 10},
{tok: token::BINOP(token::LSL), op: ast::lsl, prec: 9},
{tok: token::BINOP(token::LSR), op: ast::lsr, prec: 9},
{tok: token::BINOP(token::ASR), op: ast::asr, prec: 9},
{tok: token::BINOP(token::AND), op: ast::bitand, prec: 8},
{tok: token::BINOP(token::CARET), op: ast::bitxor, prec: 6},
{tok: token::BINOP(token::OR), op: ast::bitor, prec: 6},
// 'as' sits between here with 5
{tok: token::LT, op: ast::lt, prec: 4},
{tok: token::LE, op: ast::le, prec: 4},
{tok: token::GE, op: ast::ge, prec: 4},
{tok: token::GT, op: ast::gt, prec: 4},
{tok: token::EQEQ, op: ast::eq, prec: 3},
{tok: token::NE, op: ast::ne, prec: 3},
{tok: token::ANDAND, op: ast::and, prec: 2},
{tok: token::OROR, op: ast::or, prec: 1}];
}
fn parse_binops(p: &parser) -> @ast::expr {
ret parse_more_binops(p, parse_prefix_expr(p), 0);
}
const unop_prec: int = 100;
const as_prec: int = 5;
const ternary_prec: int = 0;
fn parse_more_binops(p: &parser, lhs: @ast::expr, min_prec: int) ->
@ast::expr {
let peeked = p.peek();
for cur: op_spec in *p.get_prec_table() {
if cur.prec > min_prec && cur.tok == peeked {
p.bump();
let rhs = parse_more_binops(p, parse_prefix_expr(p), cur.prec);
let bin =
mk_expr(p, lhs.span.lo, rhs.span.hi,
ast::expr_binary(cur.op, lhs, rhs));
ret parse_more_binops(p, bin, min_prec);
}
}
if as_prec > min_prec && eat_word(p, "as") {
let rhs = parse_ty(p, true);
let _as =
mk_expr(p, lhs.span.lo, rhs.span.hi, ast::expr_cast(lhs, rhs));
ret parse_more_binops(p, _as, min_prec);
}
ret lhs;
}
fn parse_assign_expr(p: &parser) -> @ast::expr {
let lo = p.get_lo_pos();
let lhs = parse_ternary(p);
alt p.peek() {
token::EQ. {
p.bump();
let rhs = parse_expr(p);
ret mk_expr(p, lo, rhs.span.hi, ast::expr_assign(lhs, rhs));
}
token::BINOPEQ(op) {
p.bump();
let rhs = parse_expr(p);
let aop = ast::add;
alt op {
token::PLUS. { aop = ast::add; }
token::MINUS. { aop = ast::sub; }
token::STAR. { aop = ast::mul; }
token::SLASH. { aop = ast::div; }
token::PERCENT. { aop = ast::rem; }
token::CARET. { aop = ast::bitxor; }
token::AND. { aop = ast::bitand; }
token::OR. { aop = ast::bitor; }
token::LSL. { aop = ast::lsl; }
token::LSR. { aop = ast::lsr; }
token::ASR. { aop = ast::asr; }
}
ret mk_expr(p, lo, rhs.span.hi, ast::expr_assign_op(aop, lhs, rhs));
}
token::LARROW. {
p.bump();
let rhs = parse_expr(p);
ret mk_expr(p, lo, rhs.span.hi, ast::expr_move(lhs, rhs));
}
token::DARROW. {
p.bump();
let rhs = parse_expr(p);
ret mk_expr(p, lo, rhs.span.hi, ast::expr_swap(lhs, rhs));
}
_ {/* fall through */ }
}
ret lhs;
}
fn parse_if_expr_1(p: &parser) ->
{cond: @ast::expr,
then: ast::blk,
els: option::t[@ast::expr],
lo: uint,
hi: uint} {
let lo = p.get_last_lo_pos();
let cond = parse_expr(p);
let thn = parse_block(p);
let els: option::t[@ast::expr] = none;
let hi = thn.span.hi;
if eat_word(p, "else") {
let elexpr = parse_else_expr(p);
els = some(elexpr);
hi = elexpr.span.hi;
}
ret {cond: cond, then: thn, els: els, lo: lo, hi: hi};
}
fn parse_if_expr(p: &parser) -> @ast::expr {
if eat_word(p, "check") {
let q = parse_if_expr_1(p);
ret mk_expr(p, q.lo, q.hi, ast::expr_if_check(q.cond, q.then, q.els));
} else {
let q = parse_if_expr_1(p);
ret mk_expr(p, q.lo, q.hi, ast::expr_if(q.cond, q.then, q.els));
}
}
fn parse_fn_expr(p: &parser, proto: ast::proto) -> @ast::expr {
let lo = p.get_last_lo_pos();
let decl = parse_fn_decl(p, ast::impure_fn, ast::il_normal);
let body = parse_block(p);
let _fn = {decl: decl, proto: proto, body: body};
ret mk_expr(p, lo, body.span.hi, ast::expr_fn(_fn));
}
fn parse_fn_block_expr(p: &parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let decl = parse_fn_block_decl(p);
let body = parse_block_tail(p, lo);
let _fn = {decl: decl, proto: ast::proto_block, body: body};
ret mk_expr(p, lo, body.span.hi, ast::expr_fn(_fn));
}
fn parse_else_expr(p: &parser) -> @ast::expr {
if eat_word(p, "if") {
ret parse_if_expr(p);
} else {
let blk = parse_block(p);
ret mk_expr(p, blk.span.lo, blk.span.hi, ast::expr_block(blk));
}
}
fn parse_for_expr(p: &parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let is_each = eat_word(p, "each");
let decl = parse_local(p, false);
expect_word(p, "in");
let seq = parse_expr(p);
let body = parse_block(p);
let hi = body.span.hi;
if is_each {
ret mk_expr(p, lo, hi, ast::expr_for_each(decl, seq, body));
} else { ret mk_expr(p, lo, hi, ast::expr_for(decl, seq, body)); }
}
fn parse_while_expr(p: &parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let cond = parse_expr(p);
let body = parse_block(p);
let hi = body.span.hi;
ret mk_expr(p, lo, hi, ast::expr_while(cond, body));
}
fn parse_do_while_expr(p: &parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let body = parse_block(p);
expect_word(p, "while");
let cond = parse_expr(p);
let hi = cond.span.hi;
ret mk_expr(p, lo, hi, ast::expr_do_while(body, cond));
}
fn parse_alt_expr(p: &parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
let discriminant = parse_expr(p);
expect(p, token::LBRACE);
let arms: [ast::arm] = ~[];
while p.peek() != token::RBRACE {
let pats = parse_pats(p);
let blk = parse_block(p);
arms += ~[{pats: pats, body: blk}];
}
let hi = p.get_hi_pos();
p.bump();
ret mk_expr(p, lo, hi, ast::expr_alt(discriminant, arms));
}
fn parse_spawn_expr(p: &parser) -> @ast::expr {
let lo = p.get_last_lo_pos();
// FIXME: Parse domain and name
// FIXME: why no full expr?
let fn_expr = parse_bottom_expr(p);
let es =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_expr, p);
let hi = es.span.hi;
ret mk_expr(p, lo, hi,
ast::expr_spawn(ast::dom_implicit, option::none, fn_expr,
es.node));
}
fn parse_expr(p: &parser) -> @ast::expr {
ret parse_expr_res(p, UNRESTRICTED);
}
fn parse_expr_res(p: &parser, r: restriction) -> @ast::expr {
let old = p.get_restriction();
p.restrict(r);
let e = parse_assign_expr(p);
p.restrict(old);
ret e;
}
fn parse_initializer(p: &parser) -> option::t[ast::initializer] {
alt p.peek() {
token::EQ. {
p.bump();
ret some({op: ast::init_assign, expr: parse_expr(p)});
}
token::LARROW. {
p.bump();
ret some({op: ast::init_move, expr: parse_expr(p)});
}
// Now that the the channel is the first argument to receive,
// combining it with an initializer doesn't really make sense.
// case (token::RECV) {
// p.bump();
// ret some(rec(op = ast::init_recv,
// expr = parse_expr(p)));
// }
_ {
ret none;
}
}
}
fn parse_pats(p: &parser) -> [@ast::pat] {
let pats = ~[];
while true {
pats += ~[parse_pat(p)];
if p.peek() == token::BINOP(token::OR) { p.bump(); } else { break; }
}
ret pats;
}
fn parse_pat(p: &parser) -> @ast::pat {
let lo = p.get_lo_pos();
let hi = p.get_hi_pos();
let pat;
alt p.peek() {
token::UNDERSCORE. { p.bump(); pat = ast::pat_wild; }
token::AT. {
p.bump();
let sub = parse_pat(p);
pat = ast::pat_box(sub);
hi = sub.span.hi;
}
token::LBRACE. {
p.bump();
let fields = ~[];
let etc = false;
let first = true;
while p.peek() != token::RBRACE {
if first { first = false; } else { expect(p, token::COMMA); }
if p.peek() == token::UNDERSCORE {
p.bump();
if p.peek() != token::RBRACE {
p.fatal("expecting }, found " +
token::to_str(p.get_reader(), p.peek()));
}
etc = true;
break;
}
let fieldname = parse_ident(p);
let subpat;
if p.peek() == token::COLON {
p.bump();
subpat = parse_pat(p);
} else {
if p.get_bad_expr_words().contains_key(fieldname) {
p.fatal("found " + fieldname + " in binding position");
}
subpat =
@{id: p.get_id(),
node: ast::pat_bind(fieldname),
span: ast::mk_sp(lo, hi)};
}
fields += ~[{ident: fieldname, pat: subpat}];
}
hi = p.get_hi_pos();
p.bump();
pat = ast::pat_rec(fields, etc);
}
token::LPAREN. {
p.bump();
if p.peek() == token::RPAREN {
hi = p.get_hi_pos();
p.bump();
pat = ast::pat_lit(@{node: ast::lit_nil,
span: ast::mk_sp(lo,hi)});
} else {
let fields = ~[parse_pat(p)];
while p.peek() == token::COMMA {
p.bump();
fields += ~[parse_pat(p)];
}
if ivec::len(fields) == 1u { expect(p, token::COMMA); }
hi = p.get_hi_pos();
expect(p, token::RPAREN);
pat = ast::pat_tup(fields);
}
}
tok {
if !is_ident(tok) || is_word(p, "true") || is_word(p, "false") {
let lit = parse_lit(p);
hi = lit.span.hi;
pat = ast::pat_lit(@lit);
} else if (is_plain_ident(p) &&
alt p.look_ahead(1u) {
token::DOT. | token::LPAREN. | token::LBRACKET. {
false
}
_ { true }
}) {
hi = p.get_hi_pos();
pat = ast::pat_bind(parse_value_ident(p));
} else {
let tag_path = parse_path_and_ty_param_substs(p);
hi = tag_path.span.hi;
let args: [@ast::pat];
alt p.peek() {
token::LPAREN. {
let a =
parse_seq(token::LPAREN, token::RPAREN,
some(token::COMMA), parse_pat, p);
args = a.node;
hi = a.span.hi;
}
token::DOT. { args = ~[]; p.bump(); }
_ { expect(p, token::LPAREN); fail; }
}
pat = ast::pat_tag(tag_path, args);
}
}
}
ret @{id: p.get_id(), node: pat, span: ast::mk_sp(lo, hi)};
}
fn parse_local(p: &parser, allow_init: bool) -> @ast::local {
let lo = p.get_lo_pos();
let pat = parse_pat(p);
let ty = @spanned(lo, lo, ast::ty_infer);
if eat(p, token::COLON) { ty = parse_ty(p, false); }
let init = if allow_init { parse_initializer(p) } else { none };
ret @spanned(lo, p.get_last_hi_pos(),
{ty: ty,
pat: pat,
init: init,
id: p.get_id()});
}
fn parse_let(p: &parser) -> @ast::decl {
let lo = p.get_lo_pos();
let locals = ~[parse_local(p, true)];
while p.peek() == token::COMMA {
p.bump();
locals += ~[parse_local(p, true)];
}
ret @spanned(lo, p.get_last_hi_pos(), ast::decl_local(locals));
}
fn parse_stmt(p: &parser) -> @ast::stmt {
if p.get_file_type() == SOURCE_FILE {
ret parse_source_stmt(p);
} else { ret parse_crate_stmt(p); }
}
fn parse_crate_stmt(p: &parser) -> @ast::stmt {
let cdir = parse_crate_directive(p, ~[]);
ret @spanned(cdir.span.lo, cdir.span.hi,
ast::stmt_crate_directive(@cdir));
}
fn parse_source_stmt(p: &parser) -> @ast::stmt {
let lo = p.get_lo_pos();
if eat_word(p, "let") {
let decl = parse_let(p);
ret @spanned(lo, decl.span.hi, ast::stmt_decl(decl, p.get_id()));
} else {
let item_attrs;
alt parse_outer_attrs_or_ext(p) {
none. { item_attrs = ~[]; }
some(left(attrs)) { item_attrs = attrs; }
some(right(ext)) {
ret @spanned(lo, ext.span.hi, ast::stmt_expr(ext, p.get_id()));
}
}
let maybe_item = parse_item(p, item_attrs);
// If we have attributes then we should have an item
if ivec::len(item_attrs) > 0u {
alt maybe_item {
some(_) {/* fallthrough */ }
_ { ret p.fatal("expected item"); }
}
}
alt maybe_item {
some(i) {
let hi = i.span.hi;
let decl = @spanned(lo, hi, ast::decl_item(i));
ret @spanned(lo, hi, ast::stmt_decl(decl, p.get_id()));
}
none. {
// Remainder are line-expr stmts.
let e = parse_expr(p);
ret @spanned(lo, e.span.hi, ast::stmt_expr(e, p.get_id()));
}
_ { p.fatal("expected statement"); }
}
}
}
fn stmt_to_expr(stmt: @ast::stmt) -> option::t[@ast::expr] {
ret alt stmt.node { ast::stmt_expr(e, _) { some(e) } _ { none } };
}
fn stmt_ends_with_semi(stmt: &ast::stmt) -> bool {
alt stmt.node {
ast::stmt_decl(d, _) {
ret alt d.node {
ast::decl_local(_) { true }
ast::decl_item(_) { false }
}
}
ast::stmt_expr(e, _) {
ret alt e.node {
ast::expr_vec(_, _, _) { true }
ast::expr_rec(_, _) { true }
ast::expr_tup(_) { true }
ast::expr_call(_, _) { true }
ast::expr_self_method(_) { false }
ast::expr_bind(_, _) { true }
ast::expr_spawn(_, _, _, _) { true }
ast::expr_binary(_, _, _) { true }
ast::expr_unary(_, _) { true }
ast::expr_lit(_) { true }
ast::expr_cast(_, _) { true }
ast::expr_if(_, _, _) { false }
ast::expr_ternary(_, _, _) { true }
ast::expr_for(_, _, _) { false }
ast::expr_for_each(_, _, _) { false }
ast::expr_while(_, _) { false }
ast::expr_do_while(_, _) { false }
ast::expr_alt(_, _) { false }
ast::expr_fn(_) { false }
ast::expr_block(_) { false }
ast::expr_move(_, _) { true }
ast::expr_assign(_, _) { true }
ast::expr_swap(_, _) { true }
ast::expr_assign_op(_, _, _) { true }
ast::expr_send(_, _) { true }
ast::expr_recv(_, _) { true }
ast::expr_field(_, _) { true }
ast::expr_index(_, _) { true }
ast::expr_path(_) { true }
ast::expr_mac(_) { true }
ast::expr_fail(_) { true }
ast::expr_break. { true }
ast::expr_cont. { true }
ast::expr_ret(_) { true }
ast::expr_put(_) { true }
ast::expr_be(_) { true }
ast::expr_log(_, _) { true }
ast::expr_check(_, _) { true }
ast::expr_if_check(_, _, _) { false }
ast::expr_port(_) { true }
ast::expr_chan(_) { true }
ast::expr_anon_obj(_) { false }
ast::expr_assert(_) { true }
}
}
// We should not be calling this on a cdir.
ast::stmt_crate_directive(cdir) {
fail;
}
}
}
fn parse_block(p: &parser) -> ast::blk {
let lo = p.get_lo_pos();
expect(p, token::LBRACE);
be parse_block_tail(p, lo);
}
// some blocks start with "#{"...
fn parse_block_tail(p: &parser, lo: uint) -> ast::blk {
let stmts: [@ast::stmt] = ~[];
let expr: option::t[@ast::expr] = none;
while p.peek() != token::RBRACE {
alt p.peek() {
token::SEMI. {
p.bump(); // empty
}
_ {
let stmt = parse_stmt(p);
alt stmt_to_expr(stmt) {
some(e) {
alt p.peek() {
token::SEMI. { p.bump(); stmts += ~[stmt]; }
token::RBRACE. { expr = some(e); }
t {
if stmt_ends_with_semi(*stmt) {
p.fatal("expected ';' or '}' after " +
"expression but found " +
token::to_str(p.get_reader(), t));
}
stmts += ~[stmt];
}
}
}
none. {
// Not an expression statement.
stmts += ~[stmt];
if p.get_file_type() == SOURCE_FILE &&
stmt_ends_with_semi(*stmt) {
expect(p, token::SEMI);
}
}
}
}
}
}
let hi = p.get_hi_pos();
p.bump();
let bloc = {stmts: stmts, expr: expr, id: p.get_id()};
ret spanned(lo, hi, bloc);
}
fn parse_ty_param(p: &parser) -> ast::ty_param {
let k = alt p.peek() {
token::TILDE. { p.bump(); ast::kind_unique }
token::AT. { p.bump(); ast::kind_shared }
_ { ast::kind_pinned }
};
ret {ident: parse_ident(p), kind: k};
}
fn parse_ty_params(p: &parser) -> [ast::ty_param] {
let ty_params: [ast::ty_param] = ~[];
if p.peek() == token::LBRACKET {
ty_params =
parse_seq(token::LBRACKET, token::RBRACKET, some(token::COMMA),
parse_ty_param, p).node;
}
if p.peek() == token::LT {
ty_params =
parse_seq(token::LT, token::GT, some(token::COMMA),
parse_ty_param, p).node;
}
ret ty_params;
}
fn parse_fn_decl(p: &parser, purity: ast::purity, il: ast::inlineness)
-> ast::fn_decl {
let inputs: ast::spanned[[ast::arg]] =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA), parse_arg,
p);
let rslt: ty_or_bang;
// Use the args list to translate each bound variable
// mentioned in a constraint to an arg index.
// Seems weird to do this in the parser, but I'm not sure how else to.
let constrs = ~[];
if p.peek() == token::COLON {
p.bump();
constrs = parse_constrs(bind parse_ty_constr(inputs.node, _), p);
}
if p.peek() == token::RARROW {
p.bump();
rslt = parse_ty_or_bang(p);
} else {
rslt = a_ty(@spanned(inputs.span.lo, inputs.span.hi, ast::ty_nil));
}
alt rslt {
a_ty(t) {
ret {inputs: inputs.node,
output: t,
purity: purity,
il: il,
cf: ast::return,
constraints: constrs};
}
a_bang. {
ret {inputs: inputs.node,
output: @spanned(p.get_lo_pos(), p.get_hi_pos(), ast::ty_bot),
purity: purity,
il: il,
cf: ast::noreturn,
constraints: constrs};
}
}
}
fn parse_fn_block_decl(p: &parser) -> ast::fn_decl {
let inputs: ast::spanned[[ast::arg]] =
parse_seq(token::BINOP(token::OR), token::BINOP(token::OR),
some(token::COMMA), parse_fn_block_arg, p);
ret {inputs: inputs.node,
output: @spanned(p.get_lo_pos(), p.get_hi_pos(), ast::ty_infer),
purity: ast::impure_fn,
il: ast::il_normal,
cf: ast::return,
constraints: ~[]};
}
fn parse_fn(p: &parser, proto: ast::proto, purity: ast::purity,
il: ast::inlineness) -> ast::_fn {
let decl = parse_fn_decl(p, purity, il);
let body = parse_block(p);
ret {decl: decl, proto: proto, body: body};
}
fn parse_fn_header(p: &parser) -> {ident: ast::ident, tps: [ast::ty_param]} {
let id = parse_value_ident(p);
let ty_params = parse_ty_params(p);
ret {ident: id, tps: ty_params};
}
fn mk_item(p: &parser, lo: uint, hi: uint, ident: &ast::ident,
node: &ast::item_, attrs: &[ast::attribute]) -> @ast::item {
ret @{ident: ident,
attrs: attrs,
id: p.get_id(),
node: node,
span: ast::mk_sp(lo, hi)};
}
fn parse_item_fn_or_iter(p: &parser, purity: ast::purity, proto: ast::proto,
attrs: &[ast::attribute], il: ast::inlineness)
-> @ast::item {
let lo = p.get_last_lo_pos();
let t = parse_fn_header(p);
let f = parse_fn(p, proto, purity, il);
ret mk_item(p, lo, f.body.span.hi, t.ident, ast::item_fn(f, t.tps),
attrs);
}
fn parse_obj_field(p: &parser) -> ast::obj_field {
let mut = parse_mutability(p);
let ident = parse_value_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
ret {mut: mut, ty: ty, ident: ident, id: p.get_id()};
}
fn parse_anon_obj_field(p: &parser) -> ast::anon_obj_field {
let mut = parse_mutability(p);
let ident = parse_value_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
expect(p, token::EQ);
let expr = parse_expr(p);
ret {mut: mut, ty: ty, expr: expr, ident: ident, id: p.get_id()};
}
fn parse_method(p: &parser) -> @ast::method {
let lo = p.get_lo_pos();
let proto = parse_proto(p);
let ident = parse_value_ident(p);
let f = parse_fn(p, proto, ast::impure_fn, ast::il_normal);
let meth = {ident: ident, meth: f, id: p.get_id()};
ret @spanned(lo, f.body.span.hi, meth);
}
fn parse_item_obj(p: &parser, attrs: &[ast::attribute]) ->
@ast::item {
let lo = p.get_last_lo_pos();
let ident = parse_value_ident(p);
let ty_params = parse_ty_params(p);
let fields: ast::spanned[[ast::obj_field]] =
parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_obj_field, p);
let meths: [@ast::method] = ~[];
expect(p, token::LBRACE);
while p.peek() != token::RBRACE {
meths += ~[parse_method(p)];
}
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
let ob: ast::_obj = {fields: fields.node, methods: meths};
ret mk_item(p, lo, hi, ident, ast::item_obj(ob, ty_params, p.get_id()),
attrs);
}
fn parse_item_res(p: &parser, attrs: &[ast::attribute]) ->
@ast::item {
let lo = p.get_last_lo_pos();
let ident = parse_value_ident(p);
let ty_params = parse_ty_params(p);
expect(p, token::LPAREN);
let arg_ident = parse_value_ident(p);
expect(p, token::COLON);
let t = parse_ty(p, false);
expect(p, token::RPAREN);
let dtor = parse_block(p);
let decl =
{inputs:
~[{mode: ast::alias(false),
ty: t,
ident: arg_ident,
id: p.get_id()}],
output: @spanned(lo, lo, ast::ty_nil),
purity: ast::impure_fn,
il: ast::il_normal,
cf: ast::return,
constraints: ~[]};
let f = {decl: decl, proto: ast::proto_fn, body: dtor};
ret mk_item(p, lo, dtor.span.hi, ident,
ast::item_res(f, p.get_id(), ty_params, p.get_id()), attrs);
}
fn parse_mod_items(p: &parser, term: token::token,
first_item_attrs: &[ast::attribute]) -> ast::_mod {
// Shouldn't be any view items since we've already parsed an item attr
let view_items =
if ivec::len(first_item_attrs) == 0u { parse_view(p) } else { ~[] };
let items: [@ast::item] = ~[];
let initial_attrs = first_item_attrs;
while p.peek() != term {
let attrs = initial_attrs + parse_outer_attributes(p);
initial_attrs = ~[];
alt parse_item(p, attrs) {
some(i) { items += ~[i]; }
_ {
p.fatal("expected item but found " +
token::to_str(p.get_reader(), p.peek()));
}
}
}
ret {view_items: view_items, items: items};
}
fn parse_item_const(p: &parser, attrs: &[ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let id = parse_value_ident(p);
expect(p, token::COLON);
let ty = parse_ty(p, false);
expect(p, token::EQ);
let e = parse_expr(p);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret mk_item(p, lo, hi, id, ast::item_const(ty, e), attrs);
}
fn parse_item_mod(p: &parser, attrs: &[ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let id = parse_ident(p);
expect(p, token::LBRACE);
let inner_attrs = parse_inner_attrs_and_next(p);
let first_item_outer_attrs = inner_attrs.next;
let m = parse_mod_items(p, token::RBRACE, first_item_outer_attrs);
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
ret mk_item(p, lo, hi, id, ast::item_mod(m), attrs + inner_attrs.inner);
}
fn parse_item_native_type(p: &parser, attrs: &[ast::attribute]) ->
@ast::native_item {
let t = parse_type_decl(p);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret @{ident: t.ident,
attrs: attrs,
node: ast::native_item_ty,
id: p.get_id(),
span: ast::mk_sp(t.lo, hi)};
}
fn parse_item_native_fn(p: &parser, attrs: &[ast::attribute]) ->
@ast::native_item {
let lo = p.get_last_lo_pos();
let t = parse_fn_header(p);
let decl = parse_fn_decl(p, ast::impure_fn, ast::il_normal);
let link_name = none;
if p.peek() == token::EQ { p.bump(); link_name = some(parse_str(p)); }
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret @{ident: t.ident,
attrs: attrs,
node: ast::native_item_fn(link_name, decl, t.tps),
id: p.get_id(),
span: ast::mk_sp(lo, hi)};
}
fn parse_native_item(p: &parser, attrs: &[ast::attribute]) ->
@ast::native_item {
if eat_word(p, "type") {
ret parse_item_native_type(p, attrs);
} else if (eat_word(p, "fn")) {
ret parse_item_native_fn(p, attrs);
} else { unexpected(p, p.peek()); }
}
fn parse_native_mod_items(p: &parser, native_name: &str, abi: ast::native_abi,
first_item_attrs: &[ast::attribute])
-> ast::native_mod {
// Shouldn't be any view items since we've already parsed an item attr
let view_items =
if ivec::len(first_item_attrs) == 0u {
parse_native_view(p)
} else { ~[] };
let items: [@ast::native_item] = ~[];
let initial_attrs = first_item_attrs;
while p.peek() != token::RBRACE {
let attrs = initial_attrs + parse_outer_attributes(p);
initial_attrs = ~[];
items += ~[parse_native_item(p, attrs)];
}
ret {native_name: native_name,
abi: abi,
view_items: view_items,
items: items};
}
fn parse_item_native_mod(p: &parser, attrs: &[ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let abi = ast::native_abi_cdecl;
if !is_word(p, "mod") {
let t = parse_str(p);
if str::eq(t, "cdecl") {
} else if (str::eq(t, "rust")) {
abi = ast::native_abi_rust;
} else if (str::eq(t, "llvm")) {
abi = ast::native_abi_llvm;
} else if (str::eq(t, "rust-intrinsic")) {
abi = ast::native_abi_rust_intrinsic;
} else if (str::eq(t, "x86stdcall")) {
abi = ast::native_abi_x86stdcall;
} else { p.fatal("unsupported abi: " + t); }
}
expect_word(p, "mod");
let id = parse_ident(p);
let native_name;
if p.peek() == token::EQ {
expect(p, token::EQ);
native_name = parse_str(p);
} else { native_name = id; }
expect(p, token::LBRACE);
let more_attrs = parse_inner_attrs_and_next(p);
let inner_attrs = more_attrs.inner;
let first_item_outer_attrs = more_attrs.next;
let m =
parse_native_mod_items(p, native_name, abi, first_item_outer_attrs);
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
ret mk_item(p, lo, hi, id, ast::item_native_mod(m), attrs + inner_attrs);
}
fn parse_type_decl(p: &parser) -> {lo: uint, ident: ast::ident} {
let lo = p.get_last_lo_pos();
let id = parse_ident(p);
ret {lo: lo, ident: id};
}
fn parse_item_type(p: &parser, attrs: &[ast::attribute]) -> @ast::item {
let t = parse_type_decl(p);
let tps = parse_ty_params(p);
expect(p, token::EQ);
let ty = parse_ty(p, false);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret mk_item(p, t.lo, hi, t.ident, ast::item_ty(ty, tps), attrs);
}
fn parse_item_tag(p: &parser, attrs: &[ast::attribute]) -> @ast::item {
let lo = p.get_last_lo_pos();
let id = parse_ident(p);
let ty_params = parse_ty_params(p);
let variants: [ast::variant] = ~[];
// Newtype syntax
if p.peek() == token::EQ {
if p.get_bad_expr_words().contains_key(id) {
p.fatal("found " + id + " in tag constructor position");
}
p.bump();
let ty = parse_ty(p, false);
expect(p, token::SEMI);
let variant =
spanned(ty.span.lo, ty.span.hi,
{name: id,
args: ~[{ty: ty, id: p.get_id()}],
id: p.get_id()});
ret mk_item(p, lo, ty.span.hi, id,
ast::item_tag(~[variant], ty_params), attrs);
}
expect(p, token::LBRACE);
while p.peek() != token::RBRACE {
let tok = p.peek();
alt tok {
token::IDENT(name, _) {
check_bad_word(p);
let vlo = p.get_lo_pos();
p.bump();
let args: [ast::variant_arg] = ~[];
let vhi = p.get_hi_pos();
alt p.peek() {
token::LPAREN. {
let arg_tys =
parse_seq(token::LPAREN, token::RPAREN,
some(token::COMMA), bind parse_ty(_, false), p);
for ty: @ast::ty in arg_tys.node {
args += ~[{ty: ty, id: p.get_id()}];
}
vhi = arg_tys.span.hi;
}
_ {/* empty */ }
}
expect(p, token::SEMI);
p.get_id();
let vr = {name: p.get_str(name), args: args, id: p.get_id()};
variants += ~[spanned(vlo, vhi, vr)];
}
token::RBRACE. {/* empty */ }
_ {
p.fatal("expected name of variant or '}' but found " +
token::to_str(p.get_reader(), tok));
}
}
}
let hi = p.get_hi_pos();
p.bump();
ret mk_item(p, lo, hi, id, ast::item_tag(variants, ty_params), attrs);
}
fn parse_auth(p: &parser) -> ast::_auth {
if eat_word(p, "unsafe") {
ret ast::auth_unsafe;
} else { unexpected(p, p.peek()); }
}
fn parse_item(p: &parser, attrs: &[ast::attribute]) -> option::t[@ast::item] {
if eat_word(p, "const") {
ret some(parse_item_const(p, attrs));
} else if (eat_word(p, "inline")) {
expect_word(p, "fn");
ret some(parse_item_fn_or_iter(p, ast::impure_fn, ast::proto_fn,
attrs, ast::il_inline));
} else if (is_word(p, "fn") && p.look_ahead(1u) != token::LPAREN) {
p.bump();
ret some(parse_item_fn_or_iter(p, ast::impure_fn, ast::proto_fn,
attrs, ast::il_normal));
} else if (eat_word(p, "pred")) {
ret some(parse_item_fn_or_iter(p, ast::pure_fn, ast::proto_fn,
attrs, ast::il_normal));
} else if (eat_word(p, "iter")) {
ret some(parse_item_fn_or_iter(p, ast::impure_fn, ast::proto_iter,
attrs, ast::il_normal));
} else if (eat_word(p, "mod")) {
ret some(parse_item_mod(p, attrs));
} else if (eat_word(p, "native")) {
ret some(parse_item_native_mod(p, attrs));
}
if eat_word(p, "type") {
ret some(parse_item_type(p, attrs));
} else if (eat_word(p, "tag")) {
ret some(parse_item_tag(p, attrs));
} else if (is_word(p, "obj") && p.look_ahead(1u) != token::LPAREN) {
p.bump();
ret some(parse_item_obj(p, attrs));
} else if (eat_word(p, "resource")) {
ret some(parse_item_res(p, attrs));
} else { ret none; }
}
// A type to distingush between the parsing of item attributes or syntax
// extensions, which both begin with token.POUND
type attr_or_ext = option::t[either::t[[ast::attribute], @ast::expr]];
fn parse_outer_attrs_or_ext(p: &parser) -> attr_or_ext {
if p.peek() == token::POUND {
let lo = p.get_lo_pos();
p.bump();
if p.peek() == token::LBRACKET {
let first_attr = parse_attribute_naked(p, ast::attr_outer, lo);
ret some(left(~[first_attr] + parse_outer_attributes(p)));
} else if (!(p.peek() == token::LT || p.peek() == token::LBRACKET)) {
ret some(right(parse_syntax_ext_naked(p, lo)));
} else { ret none; }
} else { ret none; }
}
// Parse attributes that appear before an item
fn parse_outer_attributes(p: &parser) -> [ast::attribute] {
let attrs: [ast::attribute] = ~[];
while p.peek() == token::POUND {
attrs += ~[parse_attribute(p, ast::attr_outer)];
}
ret attrs;
}
fn parse_attribute(p: &parser, style: ast::attr_style) -> ast::attribute {
let lo = p.get_lo_pos();
expect(p, token::POUND);
ret parse_attribute_naked(p, style, lo);
}
fn parse_attribute_naked(p: &parser, style: ast::attr_style, lo: uint) ->
ast::attribute {
expect(p, token::LBRACKET);
let meta_item = parse_meta_item(p);
expect(p, token::RBRACKET);
let hi = p.get_hi_pos();
ret spanned(lo, hi, {style: style, value: *meta_item});
}
// Parse attributes that appear after the opening of an item, each terminated
// by a semicolon. In addition to a vector of inner attributes, this function
// also returns a vector that may contain the first outer attribute of the
// next item (since we can't know whether the attribute is an inner attribute
// of the containing item or an outer attribute of the first contained item
// until we see the semi).
fn parse_inner_attrs_and_next(p: &parser) ->
{inner: [ast::attribute], next: [ast::attribute]} {
let inner_attrs: [ast::attribute] = ~[];
let next_outer_attrs: [ast::attribute] = ~[];
while p.peek() == token::POUND {
let attr = parse_attribute(p, ast::attr_inner);
if p.peek() == token::SEMI {
p.bump();
inner_attrs += ~[attr];
} else {
// It's not really an inner attribute
let outer_attr =
spanned(attr.span.lo, attr.span.hi,
{style: ast::attr_outer, value: attr.node.value});
next_outer_attrs += ~[outer_attr];
break;
}
}
ret {inner: inner_attrs, next: next_outer_attrs};
}
fn parse_meta_item(p: &parser) -> @ast::meta_item {
let lo = p.get_lo_pos();
let ident = parse_ident(p);
alt p.peek() {
token::EQ. {
p.bump();
let lit = parse_lit(p);
let hi = p.get_hi_pos();
ret @spanned(lo, hi, ast::meta_name_value(ident, lit));
}
token::LPAREN. {
let inner_items = parse_meta_seq(p);
let hi = p.get_hi_pos();
ret @spanned(lo, hi, ast::meta_list(ident, inner_items));
}
_ {
let hi = p.get_hi_pos();
ret @spanned(lo, hi, ast::meta_word(ident));
}
}
}
fn parse_meta_seq(p: &parser) -> [@ast::meta_item] {
ret parse_seq(token::LPAREN, token::RPAREN, some(token::COMMA),
parse_meta_item, p).node;
}
fn parse_optional_meta(p: &parser) -> [@ast::meta_item] {
alt p.peek() { token::LPAREN. { ret parse_meta_seq(p); } _ { ret ~[]; } }
}
fn parse_use(p: &parser) -> ast::view_item_ {
let ident = parse_ident(p);
let metadata = parse_optional_meta(p);
ret ast::view_item_use(ident, metadata, p.get_id());
}
fn parse_rest_import_name(p: &parser, first: ast::ident,
def_ident: option::t[ast::ident]) ->
ast::view_item_ {
let identifiers: [ast::ident] = ~[first];
let glob: bool = false;
while true {
alt p.peek() {
token::SEMI. { break; }
token::MOD_SEP. {
if glob { p.fatal("cannot path into a glob"); }
p.bump();
}
_ { p.fatal("expecting '::' or ';'"); }
}
alt p.peek() {
token::IDENT(_, _) { identifiers += ~[parse_ident(p)]; }
//the lexer can't tell the different kinds of stars apart ) :
token::BINOP(token::STAR.) {
glob = true;
p.bump();
}
_ { p.fatal("expecting an identifier, or '*'"); }
}
}
alt def_ident {
some(i) {
if glob { p.fatal("globbed imports can't be renamed"); }
ret ast::view_item_import(i, identifiers, p.get_id());
}
_ {
if glob {
ret ast::view_item_import_glob(identifiers, p.get_id());
} else {
let len = ivec::len(identifiers);
ret ast::view_item_import(identifiers.(len - 1u), identifiers,
p.get_id());
}
}
}
}
fn parse_full_import_name(p: &parser, def_ident: ast::ident) ->
ast::view_item_ {
alt p.peek() {
token::IDENT(i, _) {
p.bump();
ret parse_rest_import_name(p, p.get_str(i), some(def_ident));
}
_ { p.fatal("expecting an identifier"); }
}
}
fn parse_import(p: &parser) -> ast::view_item_ {
alt p.peek() {
token::IDENT(i, _) {
p.bump();
alt p.peek() {
token::EQ. {
p.bump();
ret parse_full_import_name(p, p.get_str(i));
}
_ { ret parse_rest_import_name(p, p.get_str(i), none); }
}
}
_ { p.fatal("expecting an identifier"); }
}
}
fn parse_export(p: &parser) -> ast::view_item_ {
let id = parse_ident(p);
ret ast::view_item_export(id, p.get_id());
}
fn parse_view_item(p: &parser) -> @ast::view_item {
let lo = p.get_lo_pos();
let the_item =
if eat_word(p, "use") {
parse_use(p)
} else if (eat_word(p, "import")) {
parse_import(p)
} else if (eat_word(p, "export")) { parse_export(p) } else { fail };
let hi = p.get_lo_pos();
expect(p, token::SEMI);
ret @spanned(lo, hi, the_item);
}
fn is_view_item(p: &parser) -> bool {
alt p.peek() {
token::IDENT(sid, false) {
let st = p.get_str(sid);
ret str::eq(st, "use") || str::eq(st, "import") ||
str::eq(st, "export");
}
_ { ret false; }
}
}
fn parse_view(p: &parser) -> [@ast::view_item] {
let items: [@ast::view_item] = ~[];
while is_view_item(p) { items += ~[parse_view_item(p)]; }
ret items;
}
fn parse_native_view(p: &parser) -> [@ast::view_item] {
let items: [@ast::view_item] = ~[];
while is_view_item(p) { items += ~[parse_view_item(p)]; }
ret items;
}
fn parse_crate_from_source_file(input: &str, cfg: &ast::crate_cfg,
sess: &parse_sess) -> @ast::crate {
let p = new_parser_from_file(sess, cfg, input, 0u, 0u, SOURCE_FILE);
ret parse_crate_mod(p, cfg, sess);
}
fn parse_crate_from_source_str(name: &str, source: &str, cfg: &ast::crate_cfg,
sess: &parse_sess) -> @ast::crate {
let ftype = SOURCE_FILE;
let filemap = codemap::new_filemap(name, 0u, 0u);
sess.cm.files += ~[filemap];
let itr = @interner::mk(str::hash, str::eq);
let rdr = lexer::new_reader(sess.cm, source, filemap, itr);
let p = new_parser(sess, cfg, rdr, ftype);
ret parse_crate_mod(p, cfg, sess);
}
// Parses a source module as a crate
fn parse_crate_mod(p: &parser, cfg: &ast::crate_cfg, sess: parse_sess) ->
@ast::crate {
let lo = p.get_lo_pos();
let crate_attrs = parse_inner_attrs_and_next(p);
let first_item_outer_attrs = crate_attrs.next;
let m = parse_mod_items(p, token::EOF, first_item_outer_attrs);
ret @spanned(lo, p.get_lo_pos(),
{directives: ~[],
module: m,
attrs: crate_attrs.inner,
config: p.get_cfg()});
}
fn parse_str(p: &parser) -> ast::ident {
alt p.peek() {
token::LIT_STR(s) { p.bump(); ret p.get_str(s); }
_ { fail; }
}
}
// Logic for parsing crate files (.rc)
//
// Each crate file is a sequence of directives.
//
// Each directive imperatively extends its environment with 0 or more items.
fn parse_crate_directive(p: &parser, first_outer_attr: &[ast::attribute]) ->
ast::crate_directive {
// Collect the next attributes
let outer_attrs = first_outer_attr + parse_outer_attributes(p);
// In a crate file outer attributes are only going to apply to mods
let expect_mod = ivec::len(outer_attrs) > 0u;
let lo = p.get_lo_pos();
if expect_mod || is_word(p, "mod") {
expect_word(p, "mod");
let id = parse_ident(p);
let file_opt =
alt p.peek() {
token::EQ. { p.bump(); some(parse_str(p)) }
_ { none }
};
alt p.peek() {
// mod x = "foo.rs";
token::SEMI. {
let hi = p.get_hi_pos();
p.bump();
ret spanned(lo, hi, ast::cdir_src_mod(id, file_opt, outer_attrs));
}
// mod x = "foo_dir" { ...directives... }
token::LBRACE. {
p.bump();
let inner_attrs = parse_inner_attrs_and_next(p);
let mod_attrs = outer_attrs + inner_attrs.inner;
let next_outer_attr = inner_attrs.next;
let cdirs =
parse_crate_directives(p, token::RBRACE, next_outer_attr);
let hi = p.get_hi_pos();
expect(p, token::RBRACE);
ret spanned(lo, hi,
ast::cdir_dir_mod(id, file_opt, cdirs, mod_attrs));
}
t { unexpected(p, t); }
}
} else if (eat_word(p, "auth")) {
let n = parse_path(p);
expect(p, token::EQ);
let a = parse_auth(p);
let hi = p.get_hi_pos();
expect(p, token::SEMI);
ret spanned(lo, hi, ast::cdir_auth(n, a));
} else if (is_view_item(p)) {
let vi = parse_view_item(p);
ret spanned(lo, vi.span.hi, ast::cdir_view_item(vi));
} else { ret p.fatal("expected crate directive"); }
}
fn parse_crate_directives(p: &parser, term: token::token,
first_outer_attr: &[ast::attribute]) ->
[@ast::crate_directive] {
// This is pretty ugly. If we have an outer attribute then we can't accept
// seeing the terminator next, so if we do see it then fail the same way
// parse_crate_directive would
if ivec::len(first_outer_attr) > 0u && p.peek() == term {
expect_word(p, "mod");
}
let cdirs: [@ast::crate_directive] = ~[];
while p.peek() != term {
let cdir = @parse_crate_directive(p, first_outer_attr);
cdirs += ~[cdir];
}
ret cdirs;
}
fn parse_crate_from_crate_file(input: &str, cfg: &ast::crate_cfg,
sess: &parse_sess) -> @ast::crate {
let p = new_parser_from_file(sess, cfg, input, 0u, 0u, CRATE_FILE);
let lo = p.get_lo_pos();
let prefix = std::fs::dirname(p.get_filemap().name);
let leading_attrs = parse_inner_attrs_and_next(p);
let crate_attrs = leading_attrs.inner;
let first_cdir_attr = leading_attrs.next;
let cdirs = parse_crate_directives(p, token::EOF, first_cdir_attr);
let deps: [str] = ~[];
let cx =
@{p: p,
mode: eval::mode_parse,
mutable deps: deps,
sess: sess,
mutable chpos: p.get_chpos(),
mutable byte_pos: p.get_byte_pos(),
cfg: p.get_cfg()};
let m = eval::eval_crate_directives_to_mod(cx, cdirs, prefix);
let hi = p.get_hi_pos();
expect(p, token::EOF);
ret @spanned(lo, hi,
{directives: cdirs,
module: m,
attrs: crate_attrs,
config: p.get_cfg()});
}
fn parse_crate_from_file(input: &str, cfg: &ast::crate_cfg,
sess: &parse_sess) -> @ast::crate {
if str::ends_with(input, ".rc") {
parse_crate_from_crate_file(input, cfg, sess)
} else if str::ends_with(input, ".rs") {
parse_crate_from_source_file(input, cfg, sess)
} else {
codemap::emit_error(none,
"unknown input file type: " + input,
sess.cm);
fail
}
}
//
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
// End:
//
|
//
// SOS: the Stupid Operating System
// by Eliza Weisman (eliza@elizas.website)
//
// Copyright (c) 2015-2017 Eliza Weisman
// Released under the terms of the MIT license. See `LICENSE` in the root
// directory of this repository for more information.
//
//! Parsing and loading Executable and Linkable Format (ELF) 32- and 64-bit
//! binaries.
//!
//! For more information on the ELF format, refer to:
//!
//! + [Wikipedia](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format)
//! + The [OS Dev Wiki](http://wiki.osdev.org/ELF)
//! + The [ELF Format Specification](elfspec)
//!
//! [elfspec]: http://www.skyfree.org/linux/references/ELF_Format.pdf
#![feature(core_intrinsics)]
#![feature(try_from)]
#![no_std]
#[macro_use] extern crate bitflags;
#[macro_use] extern crate macro_attr;
extern crate memory;
use core::{ intrinsics, ops, mem, slice, convert };
use core::convert::TryFrom;
use memory::{ FrameRange, PhysicalPage };
macro_rules! impl_getters {
($(#[$attr:meta])* pub fn $name:ident(&self) -> $ty:ty; $($rest:tt)*) => {
$(#[$attr])* #[inline] pub fn $name(&self) -> $ty { self.$name as $ty }
impl_getters!{ $( $rest )* }
};
($(#[$attr:meta])* fn $name:ident(&self) -> $ty:ty; $($rest:tt)*) => {
$(#[$attr])* #[inline] fn $name(&self) -> $ty { self.$name as $ty }
impl_getters!{ $( $rest )* }
};
( $(#[$attr:meta])* pub fn $name: ident (&self)-> $ty:ty; ) => {
$(#[$attr])* #[inline] pub fn $name(&self) -> $ty { self.$name as $ty }
};
( $(#[$attr:meta])* fn $name: ident (&self)-> $ty:ty; ) => {
$(#[$attr])* #[inline] fn $name(&self) -> $ty { self.$name as $ty }
};
() => {};
}
pub mod section;
pub mod file;
pub mod program;
/// An ELF section header.
pub type Section<W> = section::Header<Word = W>;
pub type ProgramHeader<W> = program::Header<Word = W>;
/// An ELF header file.
pub type FileHeader<W> = file::HeaderRepr<W>;
/// TODO: should ELF have its own error type?
pub type ElfResult<T> = Result<T, &'static str>;
pub trait ElfWord: Sized + Copy + Clone
+ ops::Add<Self> + ops::Sub<Self>
+ ops::Mul<Self> + ops::Div<Self>
+ ops::Shl<Self> + ops::Shr<Self> { }
impl ElfWord for u64 { }
impl ElfWord for u32 { }
#[cfg(target_pointer_width = "32")]
type DefaultWord = u32;
#[cfg(target_pointer_width = "64")]
type DefaultWord = u64;
/// Hack to make the type-system let me do what I want
trait ValidatesWord<Word: ElfWord> {
fn check(&self) -> ElfResult<()>;
}
/// A handle on a parsed ELF binary
/// TODO: do we want this to own a HashMap of section names to section headers,
/// to speed up section lookup?
// - eliza, 03/08/2017
#[derive(Debug)]
pub struct Image< 'bytes // lifetime of the byte slice
, Word = DefaultWord // default to machine's pointer size
, ProgHeader = ProgramHeader<Word> // same word type
, SectHeader = Section<Word>
, Header = FileHeader<Word> // must have same word type
> // jesus christ
where Word: ElfWord + 'bytes
, ProgHeader: program::Header<Word = Word> + Sized + 'bytes
, SectHeader: section::Header<Word = Word> + Sized + 'bytes
, Header: file::Header<Word = Word> + 'bytes
{
/// the binary's [file header](file/trait.Header.html)
pub header: &'bytes Header
, /// references to each [section header](section/struct.Header.html)
pub sections: &'bytes [SectHeader]
, /// references to each [program header](program/trait.Header.html)
pub program_headers: &'bytes [ProgHeader]
, /// the raw binary contents of the ELF binary.
/// note that this includes the _entire_ binary contents of the file,
/// so the file header and each section header is included in this slice.
binary: &'bytes [u8]
}
impl <'a, Word, ProgHeader, SectHeader, Header>
Image<'a, Word, ProgHeader, SectHeader, Header>
where Word: ElfWord + 'a
, ProgHeader: program::Header<Word = Word> + Sized + 'a
, SectHeader: section::Header<Word = Word> + Sized + 'a
, Header: file::Header<Word = Word> + 'a
{
/// Returns the section header [string table].
///
/// [string table]: section/struct.StrTable.html
pub fn sh_str_table(&'a self) -> section::StrTable<'a> {
// TODO: do we want to validate that the string table index is
// reasonable (e.g. it's not longer than the binary)?
// - eliza, 03/08/2017
// TODO: do we want to cache a ref to the string table?
// - eliza, 03/08/2017
section::StrTable::from(&self.binary[self.header.sh_str_idx()..])
}
}
impl<'a, Word, PH, SH, FH> TryFrom<&'a [u8]> for Image<'a, Word, PH, SH, FH>
where Word: ElfWord + 'a
, PH: program::Header<Word = Word> + 'a
, SH: section::Header<Word = Word> + 'a
, FH: file::Header<Word = Word> + 'a
, &'a FH: convert::TryFrom<&'a [u8], Error = &'static str>
{
type Error = &'static str;
fn try_from(bytes: &'a [u8]) -> ElfResult<Self> {
let header: &'a FH = <&'a FH>::try_from(bytes)?;
let sections = unsafe { extract_from_slice::<SH>(
&bytes[header.sh_range()]
, 0
, header.sh_count()
)? };
let prog_headers = unsafe { extract_from_slice::<PH>(
&bytes[header.ph_range()]
, 0
, header.ph_count()
)? };
Ok(Image { header: header
, sections: sections
, program_headers: prog_headers
, binary: bytes
})
}
}
/// Extract `n` instances of type `T` from a byte slice.
///
/// This is essentially just a _slightly_ safer wrapper around
/// [`slice::from_raw_parts`]. Unlike `from_raw_parts`, this function takes
/// a valid byte slice, rather than a pointer. Therefore, some of the safety
/// issues with `from_raw_parts` are avoided:
///
/// + the lifetime (`'slice`) of the returned slice should be the same as the
/// lifetime of the input slice (`data`), rather than inferred arbitrarily.
/// + this function will panic rather than reading past the end of the slice.
///
/// # Arguments
///
/// + `data`: the byte slice to extract a slice of `&[T]`s from
/// + `offset`: a start offset into `data`
/// + `n`: the number of instances of `T` which should be contained
/// in `data[offset..]`
///
/// # Safety
///
/// While this function is safer than [`slice::from_raw_parts`],
/// it is still unsafe for the following reasons:
///
/// + The contents of `data` may not be able to be interpreted as instances of
/// type `T`.
///
/// # Caveats
///
/// + If `n` == 0, this will give you an `&[]`. Just a warning.
// thanks to Max for making me figure this out.
/// + `offset` must be aligned on a `T`-sized boundary.
///
/// # Panics
///
/// + If the index `offset` is longer than `T`
///
/// TODO: rewrite this as a `TryFrom` implementation (see issue #85)
// - eliza, 03/09/2017
/// wait, possibly we should NOT do that. actually we should
/// almost certainly not do that. since this function is unsafe,
/// but `TryFrom` is not, and because this would be WAY generic.
// - eliza, 03/09/2017
/// TODO: is this general enough to move into util?
// - eliza, 03/09/2017
/// TODO: refactor this to take a `RangeArgument`?
// - eliza, 03/13/2017
/// or, we could just remove the offset and expect the caller to
/// offset the slice?
// - eliza, 03/14/2017
///
/// [`slice::from_raw_parts`]: https://doc.rust-lang.org/stable/std/slice/fn.from_raw_parts.html
unsafe fn extract_from_slice<'slice, T: Sized>( data: &'slice [u8]
, offset: usize
, n: usize)
-> ElfResult<&'slice [T]> {
if offset % mem::align_of::<T>() != 0 {
// TODO: these error messages don't contain as much information as they
// used to, since the return type is `&'static str` that can't be
// dynamically formatted as the panic was. refactor this?
// (e.g. should ELF get its own error type?)
// - eliza, 03/15/2017
Err("extract_from_slice: Offset not aligned on type T sized boundary!")
// assert!(
// , "Offset {} not aligned on a {}-sized boundary (must be \
// divisible by {})."
// , offset, type_name::<T>(), mem::align_of::<T>()
// );
} else if data.len() - offset < mem::size_of::<T>() * n {
Err("extract_from_slice: Slice too short to contain n instances of T!")
// assert!(
// , "Slice too short to contain {} objects of type {}"
// , n, type_name::<T>()
// );
} else {
Ok(slice::from_raw_parts(data[offset..].as_ptr() as *const T, n))
}
}
impl<'a, W: ElfWord> convert::Into<FrameRange> for &'a Section<W> {
#[inline]
fn into(self) -> FrameRange {
use memory::PAddr;
let start = PhysicalPage::from(PAddr::from(self.address() as u64));
let end = PhysicalPage::from(PAddr::from(self.end_address() as u64));
start .. end
}
}
fix(elf): make converting elf Sections into FrameRanges cross platform
(but horrendously ugly, i should fix that eventually)
//
// SOS: the Stupid Operating System
// by Eliza Weisman (eliza@elizas.website)
//
// Copyright (c) 2015-2017 Eliza Weisman
// Released under the terms of the MIT license. See `LICENSE` in the root
// directory of this repository for more information.
//
//! Parsing and loading Executable and Linkable Format (ELF) 32- and 64-bit
//! binaries.
//!
//! For more information on the ELF format, refer to:
//!
//! + [Wikipedia](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format)
//! + The [OS Dev Wiki](http://wiki.osdev.org/ELF)
//! + The [ELF Format Specification](elfspec)
//!
//! [elfspec]: http://www.skyfree.org/linux/references/ELF_Format.pdf
#![feature(core_intrinsics)]
#![feature(try_from)]
#![no_std]
#[macro_use] extern crate bitflags;
#[macro_use] extern crate macro_attr;
extern crate memory;
use core::{ intrinsics, ops, mem, slice, convert };
use core::convert::TryFrom;
use memory::{ FrameRange, PhysicalPage };
macro_rules! impl_getters {
($(#[$attr:meta])* pub fn $name:ident(&self) -> $ty:ty; $($rest:tt)*) => {
$(#[$attr])* #[inline] pub fn $name(&self) -> $ty { self.$name as $ty }
impl_getters!{ $( $rest )* }
};
($(#[$attr:meta])* fn $name:ident(&self) -> $ty:ty; $($rest:tt)*) => {
$(#[$attr])* #[inline] fn $name(&self) -> $ty { self.$name as $ty }
impl_getters!{ $( $rest )* }
};
( $(#[$attr:meta])* pub fn $name: ident (&self)-> $ty:ty; ) => {
$(#[$attr])* #[inline] pub fn $name(&self) -> $ty { self.$name as $ty }
};
( $(#[$attr:meta])* fn $name: ident (&self)-> $ty:ty; ) => {
$(#[$attr])* #[inline] fn $name(&self) -> $ty { self.$name as $ty }
};
() => {};
}
pub mod section;
pub mod file;
pub mod program;
/// An ELF section header.
pub type Section<W> = section::Header<Word = W>;
pub type ProgramHeader<W> = program::Header<Word = W>;
/// An ELF header file.
pub type FileHeader<W> = file::HeaderRepr<W>;
/// TODO: should ELF have its own error type?
pub type ElfResult<T> = Result<T, &'static str>;
pub trait ElfWord: Sized + Copy + Clone
+ ops::Add<Self> + ops::Sub<Self>
+ ops::Mul<Self> + ops::Div<Self>
+ ops::Shl<Self> + ops::Shr<Self> { }
impl ElfWord for u64 { }
impl ElfWord for u32 { }
#[cfg(target_pointer_width = "32")]
type DefaultWord = u32;
#[cfg(target_pointer_width = "64")]
type DefaultWord = u64;
/// Hack to make the type-system let me do what I want
trait ValidatesWord<Word: ElfWord> {
fn check(&self) -> ElfResult<()>;
}
/// A handle on a parsed ELF binary
/// TODO: do we want this to own a HashMap of section names to section headers,
/// to speed up section lookup?
// - eliza, 03/08/2017
#[derive(Debug)]
pub struct Image< 'bytes // lifetime of the byte slice
, Word = DefaultWord // default to machine's pointer size
, ProgHeader = ProgramHeader<Word> // same word type
, SectHeader = Section<Word>
, Header = FileHeader<Word> // must have same word type
> // jesus christ
where Word: ElfWord + 'bytes
, ProgHeader: program::Header<Word = Word> + Sized + 'bytes
, SectHeader: section::Header<Word = Word> + Sized + 'bytes
, Header: file::Header<Word = Word> + 'bytes
{
/// the binary's [file header](file/trait.Header.html)
pub header: &'bytes Header
, /// references to each [section header](section/struct.Header.html)
pub sections: &'bytes [SectHeader]
, /// references to each [program header](program/trait.Header.html)
pub program_headers: &'bytes [ProgHeader]
, /// the raw binary contents of the ELF binary.
/// note that this includes the _entire_ binary contents of the file,
/// so the file header and each section header is included in this slice.
binary: &'bytes [u8]
}
impl <'a, Word, ProgHeader, SectHeader, Header>
Image<'a, Word, ProgHeader, SectHeader, Header>
where Word: ElfWord + 'a
, ProgHeader: program::Header<Word = Word> + Sized + 'a
, SectHeader: section::Header<Word = Word> + Sized + 'a
, Header: file::Header<Word = Word> + 'a
{
/// Returns the section header [string table].
///
/// [string table]: section/struct.StrTable.html
pub fn sh_str_table(&'a self) -> section::StrTable<'a> {
// TODO: do we want to validate that the string table index is
// reasonable (e.g. it's not longer than the binary)?
// - eliza, 03/08/2017
// TODO: do we want to cache a ref to the string table?
// - eliza, 03/08/2017
section::StrTable::from(&self.binary[self.header.sh_str_idx()..])
}
}
impl<'a, Word, PH, SH, FH> TryFrom<&'a [u8]> for Image<'a, Word, PH, SH, FH>
where Word: ElfWord + 'a
, PH: program::Header<Word = Word> + 'a
, SH: section::Header<Word = Word> + 'a
, FH: file::Header<Word = Word> + 'a
, &'a FH: convert::TryFrom<&'a [u8], Error = &'static str>
{
type Error = &'static str;
fn try_from(bytes: &'a [u8]) -> ElfResult<Self> {
let header: &'a FH = <&'a FH>::try_from(bytes)?;
let sections = unsafe { extract_from_slice::<SH>(
&bytes[header.sh_range()]
, 0
, header.sh_count()
)? };
let prog_headers = unsafe { extract_from_slice::<PH>(
&bytes[header.ph_range()]
, 0
, header.ph_count()
)? };
Ok(Image { header: header
, sections: sections
, program_headers: prog_headers
, binary: bytes
})
}
}
/// Extract `n` instances of type `T` from a byte slice.
///
/// This is essentially just a _slightly_ safer wrapper around
/// [`slice::from_raw_parts`]. Unlike `from_raw_parts`, this function takes
/// a valid byte slice, rather than a pointer. Therefore, some of the safety
/// issues with `from_raw_parts` are avoided:
///
/// + the lifetime (`'slice`) of the returned slice should be the same as the
/// lifetime of the input slice (`data`), rather than inferred arbitrarily.
/// + this function will panic rather than reading past the end of the slice.
///
/// # Arguments
///
/// + `data`: the byte slice to extract a slice of `&[T]`s from
/// + `offset`: a start offset into `data`
/// + `n`: the number of instances of `T` which should be contained
/// in `data[offset..]`
///
/// # Safety
///
/// While this function is safer than [`slice::from_raw_parts`],
/// it is still unsafe for the following reasons:
///
/// + The contents of `data` may not be able to be interpreted as instances of
/// type `T`.
///
/// # Caveats
///
/// + If `n` == 0, this will give you an `&[]`. Just a warning.
// thanks to Max for making me figure this out.
/// + `offset` must be aligned on a `T`-sized boundary.
///
/// # Panics
///
/// + If the index `offset` is longer than `T`
///
/// TODO: rewrite this as a `TryFrom` implementation (see issue #85)
// - eliza, 03/09/2017
/// wait, possibly we should NOT do that. actually we should
/// almost certainly not do that. since this function is unsafe,
/// but `TryFrom` is not, and because this would be WAY generic.
// - eliza, 03/09/2017
/// TODO: is this general enough to move into util?
// - eliza, 03/09/2017
/// TODO: refactor this to take a `RangeArgument`?
// - eliza, 03/13/2017
/// or, we could just remove the offset and expect the caller to
/// offset the slice?
// - eliza, 03/14/2017
///
/// [`slice::from_raw_parts`]: https://doc.rust-lang.org/stable/std/slice/fn.from_raw_parts.html
unsafe fn extract_from_slice<'slice, T: Sized>( data: &'slice [u8]
, offset: usize
, n: usize)
-> ElfResult<&'slice [T]> {
if offset % mem::align_of::<T>() != 0 {
// TODO: these error messages don't contain as much information as they
// used to, since the return type is `&'static str` that can't be
// dynamically formatted as the panic was. refactor this?
// (e.g. should ELF get its own error type?)
// - eliza, 03/15/2017
Err("extract_from_slice: Offset not aligned on type T sized boundary!")
// assert!(
// , "Offset {} not aligned on a {}-sized boundary (must be \
// divisible by {})."
// , offset, type_name::<T>(), mem::align_of::<T>()
// );
} else if data.len() - offset < mem::size_of::<T>() * n {
Err("extract_from_slice: Slice too short to contain n instances of T!")
// assert!(
// , "Slice too short to contain {} objects of type {}"
// , n, type_name::<T>()
// );
} else {
Ok(slice::from_raw_parts(data[offset..].as_ptr() as *const T, n))
}
}
impl<'a, W: ElfWord> convert::Into<FrameRange> for &'a Section<W> {
#[inline]
fn into(self) -> FrameRange {
use memory::{Addr, PAddr};
// TODO: refactor this disgusting type cast monstrosity hell
let start = PhysicalPage::from(self.address() as <PAddr as Addr>::Repr);
let end = PhysicalPage::from(self.end_address() as <PAddr as Addr>::Repr);
start .. end
}
}
|
//! Stream-based consumer implementation.
use std::future::Future;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{Context, Poll, Waker};
use std::time::Duration;
use futures::{ready, Stream};
use slab::Slab;
use rdkafka_sys as rdsys;
use rdkafka_sys::types::*;
use crate::client::{ClientContext, NativeClient};
use crate::config::{ClientConfig, FromClientConfig, FromClientConfigAndContext, RDKafkaLogLevel};
use crate::consumer::base_consumer::BaseConsumer;
use crate::consumer::{Consumer, ConsumerContext, DefaultConsumerContext, Rebalance};
use crate::error::{KafkaError, KafkaResult};
use crate::message::BorrowedMessage;
use crate::statistics::Statistics;
use crate::topic_partition_list::TopicPartitionList;
#[cfg(feature = "tokio")]
use crate::util::TokioRuntime;
use crate::util::{AsyncRuntime, NativePtr, Timeout};
/// The [`ConsumerContext`] used by the [`StreamConsumer`]. This context will
/// automatically wake up the message stream when new data is available.
///
/// This type is not intended to be used directly. It will be automatically
/// created by the `StreamConsumer` when necessary.
pub struct StreamConsumerContext<C: ConsumerContext + 'static> {
inner: C,
wakers: Arc<Mutex<Slab<Option<Waker>>>>,
}
impl<C: ConsumerContext + 'static> StreamConsumerContext<C> {
fn new(inner: C) -> StreamConsumerContext<C> {
StreamConsumerContext {
inner,
wakers: Arc::new(Mutex::new(Slab::new())),
}
}
}
impl<C: ConsumerContext + 'static> ClientContext for StreamConsumerContext<C> {
fn log(&self, level: RDKafkaLogLevel, fac: &str, log_message: &str) {
self.inner.log(level, fac, log_message)
}
fn stats(&self, statistics: Statistics) {
self.inner.stats(statistics)
}
fn error(&self, error: KafkaError, reason: &str) {
self.inner.error(error, reason)
}
}
impl<C: ConsumerContext + 'static> ConsumerContext for StreamConsumerContext<C> {
fn rebalance(
&self,
native_client: &NativeClient,
err: RDKafkaRespErr,
tpl: &mut TopicPartitionList,
) {
self.inner.rebalance(native_client, err, tpl)
}
fn pre_rebalance<'a>(&self, rebalance: &Rebalance<'a>) {
self.inner.pre_rebalance(rebalance)
}
fn post_rebalance<'a>(&self, rebalance: &Rebalance<'a>) {
self.inner.post_rebalance(rebalance)
}
fn commit_callback(&self, result: KafkaResult<()>, offsets: &TopicPartitionList) {
self.inner.commit_callback(result, offsets)
}
fn main_queue_min_poll_interval(&self) -> Timeout {
self.inner.main_queue_min_poll_interval()
}
fn message_queue_nonempty_callback(&self) {
let mut wakers = self.wakers.lock().unwrap();
for (_, waker) in wakers.iter_mut() {
if let Some(waker) = waker.take() {
waker.wake();
}
}
self.inner.message_queue_nonempty_callback()
}
}
/// A Kafka consumer implementing [`futures::Stream`].
pub struct MessageStream<
'a,
C,
// Ugly, but this provides backwards compatibility when the `tokio` feature
// is enabled, as it is by default.
#[cfg(feature = "tokio")] R = TokioRuntime,
#[cfg(not(feature = "tokio"))] R,
> where
C: ConsumerContext + 'static,
R: AsyncRuntime,
{
consumer: &'a StreamConsumer<C>,
interval: Duration,
delay: Pin<Box<Option<R::Delay>>>,
slot: usize,
}
impl<'a, C, R> MessageStream<'a, C, R>
where
C: ConsumerContext + 'static,
R: AsyncRuntime,
{
fn new(consumer: &'a StreamConsumer<C>, interval: Duration) -> MessageStream<'a, C, R> {
let slot = {
let context = consumer.get_base_consumer().context();
let mut wakers = context.wakers.lock().expect("lock poisoned");
wakers.insert(None)
};
MessageStream {
consumer,
interval,
delay: Box::pin(None),
slot,
}
}
fn context(&self) -> &StreamConsumerContext<C> {
self.consumer.get_base_consumer().context()
}
fn client_ptr(&self) -> *mut RDKafka {
self.consumer.client().native_ptr()
}
fn set_waker(&self, waker: Waker) {
let mut wakers = self.context().wakers.lock().expect("lock poisoned");
wakers[self.slot].replace(waker);
}
fn poll(&self) -> Option<KafkaResult<BorrowedMessage<'a>>> {
unsafe {
NativePtr::from_ptr(rdsys::rd_kafka_consumer_poll(self.client_ptr(), 0))
.map(|p| BorrowedMessage::from_consumer(p, self.consumer))
}
}
// SAFETY: All access to `self.delay` occurs via the following two
// functions. These functions are careful to never move out of `self.delay`.
// (They can *drop* the future stored in `self.delay`, but that is
// permitted.) They never return a non-pinned pointer to the contents of
// `self.delay`.
fn ensure_delay(&mut self, delay: R::Delay) -> Pin<&mut R::Delay> {
unsafe { Pin::new_unchecked(self.delay.as_mut().get_unchecked_mut().get_or_insert(delay)) }
}
fn clear_delay(&mut self) {
unsafe { *self.delay.as_mut().get_unchecked_mut() = None }
}
}
impl<'a, C, R> Stream for MessageStream<'a, C, R>
where
C: ConsumerContext + 'a,
R: AsyncRuntime,
{
type Item = KafkaResult<BorrowedMessage<'a>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
// Unconditionally store the waker so that we are woken up if the queue
// flips from non-empty to empty. We have to store the waker on every
// call to poll in case this future migrates between tasks. We also need
// to store the waker *before* calling poll to avoid a race where `poll`
// returns None to indicate that the queue is empty, but the queue
// becomes non-empty before we've installed the waker.
self.set_waker(cx.waker().clone());
match self.poll() {
None => loop {
let delay = R::delay_for(self.interval);
ready!(self.ensure_delay(delay).poll(cx));
self.clear_delay();
},
Some(message) => {
self.clear_delay();
Poll::Ready(Some(message))
}
}
}
}
impl<'a, C, R> Drop for MessageStream<'a, C, R>
where
C: ConsumerContext + 'static,
R: AsyncRuntime,
{
fn drop(&mut self) {
let mut wakers = self.context().wakers.lock().expect("lock poisoned");
wakers.remove(self.slot);
}
}
/// A Kafka consumer providing a [`futures::Stream`] interface.
///
/// This consumer doesn't need to be polled explicitly since `await`ing the
/// stream returned by [`StreamConsumer::start`] will implicitly poll the
/// consumer.
#[must_use = "Consumer polling thread will stop immediately if unused"]
pub struct StreamConsumer<C: ConsumerContext + 'static = DefaultConsumerContext> {
consumer: Arc<BaseConsumer<StreamConsumerContext<C>>>,
}
impl<C: ConsumerContext> Consumer<StreamConsumerContext<C>> for StreamConsumer<C> {
fn get_base_consumer(&self) -> &BaseConsumer<StreamConsumerContext<C>> {
Arc::as_ref(&self.consumer)
}
}
impl FromClientConfig for StreamConsumer {
fn from_config(config: &ClientConfig) -> KafkaResult<StreamConsumer> {
StreamConsumer::from_config_and_context(config, DefaultConsumerContext)
}
}
/// Creates a new `StreamConsumer` starting from a [`ClientConfig`].
impl<C: ConsumerContext> FromClientConfigAndContext<C> for StreamConsumer<C> {
fn from_config_and_context(
config: &ClientConfig,
context: C,
) -> KafkaResult<StreamConsumer<C>> {
let context = StreamConsumerContext::new(context);
let stream_consumer = StreamConsumer {
consumer: Arc::new(BaseConsumer::from_config_and_context(config, context)?),
};
unsafe {
rdsys::rd_kafka_poll_set_consumer(stream_consumer.consumer.client().native_ptr())
};
Ok(stream_consumer)
}
}
impl<C: ConsumerContext> StreamConsumer<C> {
/// Starts the stream consumer with default configuration (100ms polling
/// interval and no `NoMessageReceived` notifications).
///
/// **Note:** this method must be called from within the context of a Tokio
/// runtime.
#[cfg(feature = "tokio")]
#[cfg_attr(docsrs, doc(cfg(feature = "tokio")))]
pub fn start(&self) -> MessageStream<'_, C, TokioRuntime> {
self.start_with(Duration::from_millis(100))
}
/// Starts the stream consumer with the specified poll interval.
#[cfg(feature = "tokio")]
#[cfg_attr(docsrs, doc(cfg(feature = "tokio")))]
pub fn start_with(&self, poll_interval: Duration) -> MessageStream<'_, C, TokioRuntime> {
// TODO: verify called once
self.start_with_runtime(poll_interval)
}
/// Like [`StreamConsumer::start_with`], but with a customizable
/// asynchronous runtime.
///
/// See the [`AsyncRuntime`] trait for the details on the interface the
/// runtime must satisfy.
pub fn start_with_runtime<R>(&self, poll_interval: Duration) -> MessageStream<'_, C, R>
where
R: AsyncRuntime,
{
MessageStream::new(self, poll_interval)
}
}
Remove unnecessary Arc in StreamConsumer
//! Stream-based consumer implementation.
use std::future::Future;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{Context, Poll, Waker};
use std::time::Duration;
use futures::{ready, Stream};
use slab::Slab;
use rdkafka_sys as rdsys;
use rdkafka_sys::types::*;
use crate::client::{ClientContext, NativeClient};
use crate::config::{ClientConfig, FromClientConfig, FromClientConfigAndContext, RDKafkaLogLevel};
use crate::consumer::base_consumer::BaseConsumer;
use crate::consumer::{Consumer, ConsumerContext, DefaultConsumerContext, Rebalance};
use crate::error::{KafkaError, KafkaResult};
use crate::message::BorrowedMessage;
use crate::statistics::Statistics;
use crate::topic_partition_list::TopicPartitionList;
#[cfg(feature = "tokio")]
use crate::util::TokioRuntime;
use crate::util::{AsyncRuntime, NativePtr, Timeout};
/// The [`ConsumerContext`] used by the [`StreamConsumer`]. This context will
/// automatically wake up the message stream when new data is available.
///
/// This type is not intended to be used directly. It will be automatically
/// created by the `StreamConsumer` when necessary.
pub struct StreamConsumerContext<C: ConsumerContext + 'static> {
inner: C,
wakers: Arc<Mutex<Slab<Option<Waker>>>>,
}
impl<C: ConsumerContext + 'static> StreamConsumerContext<C> {
fn new(inner: C) -> StreamConsumerContext<C> {
StreamConsumerContext {
inner,
wakers: Arc::new(Mutex::new(Slab::new())),
}
}
}
impl<C: ConsumerContext + 'static> ClientContext for StreamConsumerContext<C> {
fn log(&self, level: RDKafkaLogLevel, fac: &str, log_message: &str) {
self.inner.log(level, fac, log_message)
}
fn stats(&self, statistics: Statistics) {
self.inner.stats(statistics)
}
fn error(&self, error: KafkaError, reason: &str) {
self.inner.error(error, reason)
}
}
impl<C: ConsumerContext + 'static> ConsumerContext for StreamConsumerContext<C> {
fn rebalance(
&self,
native_client: &NativeClient,
err: RDKafkaRespErr,
tpl: &mut TopicPartitionList,
) {
self.inner.rebalance(native_client, err, tpl)
}
fn pre_rebalance<'a>(&self, rebalance: &Rebalance<'a>) {
self.inner.pre_rebalance(rebalance)
}
fn post_rebalance<'a>(&self, rebalance: &Rebalance<'a>) {
self.inner.post_rebalance(rebalance)
}
fn commit_callback(&self, result: KafkaResult<()>, offsets: &TopicPartitionList) {
self.inner.commit_callback(result, offsets)
}
fn main_queue_min_poll_interval(&self) -> Timeout {
self.inner.main_queue_min_poll_interval()
}
fn message_queue_nonempty_callback(&self) {
let mut wakers = self.wakers.lock().unwrap();
for (_, waker) in wakers.iter_mut() {
if let Some(waker) = waker.take() {
waker.wake();
}
}
self.inner.message_queue_nonempty_callback()
}
}
/// A Kafka consumer implementing [`futures::Stream`].
pub struct MessageStream<
'a,
C,
// Ugly, but this provides backwards compatibility when the `tokio` feature
// is enabled, as it is by default.
#[cfg(feature = "tokio")] R = TokioRuntime,
#[cfg(not(feature = "tokio"))] R,
> where
C: ConsumerContext + 'static,
R: AsyncRuntime,
{
consumer: &'a StreamConsumer<C>,
interval: Duration,
delay: Pin<Box<Option<R::Delay>>>,
slot: usize,
}
impl<'a, C, R> MessageStream<'a, C, R>
where
C: ConsumerContext + 'static,
R: AsyncRuntime,
{
fn new(consumer: &'a StreamConsumer<C>, interval: Duration) -> MessageStream<'a, C, R> {
let slot = {
let context = consumer.get_base_consumer().context();
let mut wakers = context.wakers.lock().expect("lock poisoned");
wakers.insert(None)
};
MessageStream {
consumer,
interval,
delay: Box::pin(None),
slot,
}
}
fn context(&self) -> &StreamConsumerContext<C> {
self.consumer.get_base_consumer().context()
}
fn client_ptr(&self) -> *mut RDKafka {
self.consumer.client().native_ptr()
}
fn set_waker(&self, waker: Waker) {
let mut wakers = self.context().wakers.lock().expect("lock poisoned");
wakers[self.slot].replace(waker);
}
fn poll(&self) -> Option<KafkaResult<BorrowedMessage<'a>>> {
unsafe {
NativePtr::from_ptr(rdsys::rd_kafka_consumer_poll(self.client_ptr(), 0))
.map(|p| BorrowedMessage::from_consumer(p, self.consumer))
}
}
// SAFETY: All access to `self.delay` occurs via the following two
// functions. These functions are careful to never move out of `self.delay`.
// (They can *drop* the future stored in `self.delay`, but that is
// permitted.) They never return a non-pinned pointer to the contents of
// `self.delay`.
fn ensure_delay(&mut self, delay: R::Delay) -> Pin<&mut R::Delay> {
unsafe { Pin::new_unchecked(self.delay.as_mut().get_unchecked_mut().get_or_insert(delay)) }
}
fn clear_delay(&mut self) {
unsafe { *self.delay.as_mut().get_unchecked_mut() = None }
}
}
impl<'a, C, R> Stream for MessageStream<'a, C, R>
where
C: ConsumerContext + 'a,
R: AsyncRuntime,
{
type Item = KafkaResult<BorrowedMessage<'a>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
// Unconditionally store the waker so that we are woken up if the queue
// flips from non-empty to empty. We have to store the waker on every
// call to poll in case this future migrates between tasks. We also need
// to store the waker *before* calling poll to avoid a race where `poll`
// returns None to indicate that the queue is empty, but the queue
// becomes non-empty before we've installed the waker.
self.set_waker(cx.waker().clone());
match self.poll() {
None => loop {
let delay = R::delay_for(self.interval);
ready!(self.ensure_delay(delay).poll(cx));
self.clear_delay();
},
Some(message) => {
self.clear_delay();
Poll::Ready(Some(message))
}
}
}
}
impl<'a, C, R> Drop for MessageStream<'a, C, R>
where
C: ConsumerContext + 'static,
R: AsyncRuntime,
{
fn drop(&mut self) {
let mut wakers = self.context().wakers.lock().expect("lock poisoned");
wakers.remove(self.slot);
}
}
/// A Kafka consumer providing a [`futures::Stream`] interface.
///
/// This consumer doesn't need to be polled explicitly since `await`ing the
/// stream returned by [`StreamConsumer::start`] will implicitly poll the
/// consumer.
#[must_use = "Consumer polling thread will stop immediately if unused"]
pub struct StreamConsumer<C: ConsumerContext + 'static = DefaultConsumerContext> {
consumer: BaseConsumer<StreamConsumerContext<C>>,
}
impl<C: ConsumerContext> Consumer<StreamConsumerContext<C>> for StreamConsumer<C> {
fn get_base_consumer(&self) -> &BaseConsumer<StreamConsumerContext<C>> {
&self.consumer
}
}
impl FromClientConfig for StreamConsumer {
fn from_config(config: &ClientConfig) -> KafkaResult<StreamConsumer> {
StreamConsumer::from_config_and_context(config, DefaultConsumerContext)
}
}
/// Creates a new `StreamConsumer` starting from a [`ClientConfig`].
impl<C: ConsumerContext> FromClientConfigAndContext<C> for StreamConsumer<C> {
fn from_config_and_context(
config: &ClientConfig,
context: C,
) -> KafkaResult<StreamConsumer<C>> {
let context = StreamConsumerContext::new(context);
let stream_consumer = StreamConsumer {
consumer: BaseConsumer::from_config_and_context(config, context)?,
};
unsafe {
rdsys::rd_kafka_poll_set_consumer(stream_consumer.consumer.client().native_ptr())
};
Ok(stream_consumer)
}
}
impl<C: ConsumerContext> StreamConsumer<C> {
/// Starts the stream consumer with default configuration (100ms polling
/// interval and no `NoMessageReceived` notifications).
///
/// **Note:** this method must be called from within the context of a Tokio
/// runtime.
#[cfg(feature = "tokio")]
#[cfg_attr(docsrs, doc(cfg(feature = "tokio")))]
pub fn start(&self) -> MessageStream<'_, C, TokioRuntime> {
self.start_with(Duration::from_millis(100))
}
/// Starts the stream consumer with the specified poll interval.
#[cfg(feature = "tokio")]
#[cfg_attr(docsrs, doc(cfg(feature = "tokio")))]
pub fn start_with(&self, poll_interval: Duration) -> MessageStream<'_, C, TokioRuntime> {
// TODO: verify called once
self.start_with_runtime(poll_interval)
}
/// Like [`StreamConsumer::start_with`], but with a customizable
/// asynchronous runtime.
///
/// See the [`AsyncRuntime`] trait for the details on the interface the
/// runtime must satisfy.
pub fn start_with_runtime<R>(&self, poll_interval: Duration) -> MessageStream<'_, C, R>
where
R: AsyncRuntime,
{
MessageStream::new(self, poll_interval)
}
}
|
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
mod protocol;
mod virtio_2d_backend;
mod virtio_3d_backend;
mod virtio_backend;
mod virtio_gfxstream_backend;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::convert::TryFrom;
use std::i64;
use std::io::Read;
use std::mem::{self, size_of};
use std::num::NonZeroU8;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use data_model::*;
use base::{
debug, error, warn, AsRawDescriptor, Event, ExternalMapping, PollToken, RawDescriptor,
WaitContext,
};
use sync::Mutex;
use vm_memory::{GuestAddress, GuestMemory};
use gpu_buffer::Format;
pub use gpu_display::EventDevice;
use gpu_display::*;
use gpu_renderer::RendererFlags;
use msg_socket::{MsgReceiver, MsgSender};
use resources::Alloc;
use super::{
copy_config, resource_bridge::*, DescriptorChain, Interrupt, Queue, Reader, VirtioDevice,
Writer, TYPE_GPU,
};
use super::{PciCapabilityType, VirtioPciShmCap};
use self::protocol::*;
use self::virtio_2d_backend::Virtio2DBackend;
use self::virtio_3d_backend::Virtio3DBackend;
#[cfg(feature = "gfxstream")]
use self::virtio_gfxstream_backend::VirtioGfxStreamBackend;
use crate::pci::{
PciAddress, PciBarConfiguration, PciBarPrefetchable, PciBarRegionType, PciCapability,
};
use vm_control::VmMemoryControlRequestSocket;
pub const DEFAULT_DISPLAY_WIDTH: u32 = 1280;
pub const DEFAULT_DISPLAY_HEIGHT: u32 = 1024;
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum GpuMode {
Mode2D,
Mode3D,
#[cfg(feature = "gfxstream")]
ModeGfxStream,
}
#[derive(Debug)]
pub struct GpuParameters {
pub display_width: u32,
pub display_height: u32,
pub renderer_use_egl: bool,
pub renderer_use_gles: bool,
pub renderer_use_glx: bool,
pub renderer_use_surfaceless: bool,
#[cfg(feature = "gfxstream")]
pub gfxstream_use_guest_angle: bool,
#[cfg(feature = "gfxstream")]
pub gfxstream_use_syncfd: bool,
#[cfg(feature = "gfxstream")]
pub gfxstream_support_vulkan: bool,
pub mode: GpuMode,
pub cache_path: Option<String>,
pub cache_size: Option<String>,
}
// First queue is for virtio gpu commands. Second queue is for cursor commands, which we expect
// there to be fewer of.
const QUEUE_SIZES: &[u16] = &[256, 16];
const FENCE_POLL_MS: u64 = 1;
const GPU_BAR_NUM: u8 = 4;
const GPU_BAR_OFFSET: u64 = 0;
const GPU_BAR_SIZE: u64 = 1 << 33;
impl Default for GpuParameters {
fn default() -> Self {
GpuParameters {
display_width: DEFAULT_DISPLAY_WIDTH,
display_height: DEFAULT_DISPLAY_HEIGHT,
renderer_use_egl: true,
renderer_use_gles: true,
renderer_use_glx: false,
renderer_use_surfaceless: true,
#[cfg(feature = "gfxstream")]
gfxstream_use_guest_angle: false,
#[cfg(feature = "gfxstream")]
gfxstream_use_syncfd: true,
#[cfg(feature = "gfxstream")]
gfxstream_support_vulkan: true,
mode: GpuMode::Mode3D,
cache_path: None,
cache_size: None,
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct VirtioScanoutBlobData {
pub width: u32,
pub height: u32,
pub drm_format: Format,
pub strides: [u32; 4],
pub offsets: [u32; 4],
}
/// A virtio-gpu backend state tracker which supports display and potentially accelerated rendering.
///
/// Commands from the virtio-gpu protocol can be submitted here using the methods, and they will be
/// realized on the hardware. Most methods return a `VirtioGpuResult` that indicate the success,
/// failure, or requested data for the given command.
trait Backend {
/// Returns the number of capsets provided by the Backend.
fn capsets() -> u32
where
Self: Sized;
/// Returns the bitset of virtio features provided by the Backend in addition to the base set
/// of device features.
fn features() -> u64
where
Self: Sized;
/// Constructs a backend.
fn build(
display: GpuDisplay,
display_width: u32,
display_height: u32,
renderer_flags: RendererFlags,
event_devices: Vec<EventDevice>,
gpu_device_socket: VmMemoryControlRequestSocket,
pci_bar: Alloc,
map_request: Arc<Mutex<Option<ExternalMapping>>>,
external_blob: bool,
) -> Option<Box<dyn Backend>>
where
Self: Sized;
fn display(&self) -> &Rc<RefCell<GpuDisplay>>;
/// Processes the internal `display` events and returns `true` if the main display was closed.
fn process_display(&mut self) -> bool;
/// Creates a fence with the given id that can be used to determine when the previous command
/// completed.
fn create_fence(&mut self, ctx_id: u32, fence_id: u32) -> VirtioGpuResult;
fn export_fence(&mut self, _fence_id: u32) -> ResourceResponse {
ResourceResponse::Invalid
}
/// Returns the id of the latest fence to complete.
fn fence_poll(&mut self) -> u32;
/// For accelerated rendering capable backends, switch to the default rendering context.
fn force_ctx_0(&mut self) {}
/// Attaches the given input device to the given surface of the display (to allow for input
/// from a X11 window for example).
fn import_event_device(&mut self, event_device: EventDevice, scanout: u32) -> VirtioGpuResult;
/// If supported, export the resource with the given id to a file.
fn export_resource(&mut self, id: u32) -> ResourceResponse;
/// Gets the list of supported display resolutions as a slice of `(width, height)` tuples.
fn display_info(&self) -> [(u32, u32); 1];
/// Creates a 2D resource with the given properties and associates it with the given id.
fn create_resource_2d(
&mut self,
id: u32,
width: u32,
height: u32,
format: u32,
) -> VirtioGpuResult;
/// Removes the guest's reference count for the given resource id.
fn unref_resource(&mut self, id: u32) -> VirtioGpuResult;
/// Sets the given resource id as the source of scanout to the display, with optional blob data.
fn set_scanout(
&mut self,
_scanout_id: u32,
resource_id: u32,
scanout_data: Option<VirtioScanoutBlobData>,
) -> VirtioGpuResult;
/// Flushes the given rectangle of pixels of the given resource to the display.
fn flush_resource(
&mut self,
id: u32,
x: u32,
y: u32,
width: u32,
height: u32,
) -> VirtioGpuResult;
/// Copes the given rectangle of pixels of the given resource's backing memory to the host side
/// resource.
fn transfer_to_resource_2d(
&mut self,
id: u32,
x: u32,
y: u32,
width: u32,
height: u32,
src_offset: u64,
mem: &GuestMemory,
) -> VirtioGpuResult;
/// Attaches backing memory to the given resource, represented by a `Vec` of `(address, size)`
/// tuples in the guest's physical address space.
fn attach_backing(
&mut self,
id: u32,
mem: &GuestMemory,
vecs: Vec<(GuestAddress, usize)>,
) -> VirtioGpuResult;
/// Detaches any backing memory from the given resource, if there is any.
fn detach_backing(&mut self, id: u32) -> VirtioGpuResult;
fn resource_assign_uuid(&mut self, _id: u32) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// Updates the cursor's memory to the given id, and sets its position to the given coordinates.
fn update_cursor(&mut self, id: u32, x: u32, y: u32) -> VirtioGpuResult;
/// Moves the cursor's position to the given coordinates.
fn move_cursor(&mut self, x: u32, y: u32) -> VirtioGpuResult;
/// Gets the renderer's capset information associated with `index`.
fn get_capset_info(&self, index: u32) -> VirtioGpuResult;
/// Gets the capset of `version` associated with `id`.
fn get_capset(&self, id: u32, version: u32) -> VirtioGpuResult;
/// Creates a fresh renderer context with the given `id`.
fn create_renderer_context(&mut self, _id: u32) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// Destorys the renderer context associated with `id`.
fn destroy_renderer_context(&mut self, _id: u32) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// Attaches the indicated resource to the given context.
fn context_attach_resource(&mut self, _ctx_id: u32, _res_id: u32) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// detaches the indicated resource to the given context.
fn context_detach_resource(&mut self, _ctx_id: u32, _res_id: u32) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// Creates a 3D resource with the given properties and associates it with the given id.
fn resource_create_3d(
&mut self,
_id: u32,
_target: u32,
_format: u32,
_bind: u32,
_width: u32,
_height: u32,
_depth: u32,
_array_size: u32,
_last_level: u32,
_nr_samples: u32,
_flags: u32,
) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// Copes the given 3D rectangle of pixels of the given resource's backing memory to the host
/// side resource.
fn transfer_to_resource_3d(
&mut self,
_ctx_id: u32,
_res_id: u32,
_x: u32,
_y: u32,
_z: u32,
_width: u32,
_height: u32,
_depth: u32,
_level: u32,
_stride: u32,
_layer_stride: u32,
_offset: u64,
) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// Copes the given rectangle of pixels from the resource to the given resource's backing
/// memory.
fn transfer_from_resource_3d(
&mut self,
_ctx_id: u32,
_res_id: u32,
_x: u32,
_y: u32,
_z: u32,
_width: u32,
_height: u32,
_depth: u32,
_level: u32,
_stride: u32,
_layer_stride: u32,
_offset: u64,
) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// Submits a command buffer to the given rendering context.
fn submit_command(&mut self, _ctx_id: u32, _commands: &mut [u8]) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
fn resource_create_blob(
&mut self,
_resource_id: u32,
_ctx_id: u32,
_blob_mem: u32,
_blob_flags: u32,
_blob_id: u64,
_size: u64,
_vecs: Vec<(GuestAddress, usize)>,
_mem: &GuestMemory,
) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
fn resource_map_blob(&mut self, _resource_id: u32, _offset: u64) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
fn resource_unmap_blob(&mut self, _resource_id: u32) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
}
#[derive(Clone)]
enum BackendKind {
Virtio2D,
Virtio3D,
#[cfg(feature = "gfxstream")]
VirtioGfxStream,
}
impl BackendKind {
/// Returns the number of capsets provided by the Backend.
fn capsets(&self) -> u32 {
match self {
BackendKind::Virtio2D => Virtio2DBackend::capsets(),
BackendKind::Virtio3D => Virtio3DBackend::capsets(),
#[cfg(feature = "gfxstream")]
BackendKind::VirtioGfxStream => VirtioGfxStreamBackend::capsets(),
}
}
/// Returns the bitset of virtio features provided by the Backend in addition to the base set
/// of device features.
fn features(&self) -> u64 {
match self {
BackendKind::Virtio2D => Virtio2DBackend::features(),
BackendKind::Virtio3D => Virtio3DBackend::features(),
#[cfg(feature = "gfxstream")]
BackendKind::VirtioGfxStream => VirtioGfxStreamBackend::features(),
}
}
/// Initializes the backend.
fn build(
&self,
possible_displays: &[DisplayBackend],
display_width: u32,
display_height: u32,
renderer_flags: RendererFlags,
event_devices: Vec<EventDevice>,
gpu_device_socket: VmMemoryControlRequestSocket,
pci_bar: Alloc,
map_request: Arc<Mutex<Option<ExternalMapping>>>,
external_blob: bool,
) -> Option<Box<dyn Backend>> {
let mut display_opt = None;
for display in possible_displays {
match display.build() {
Ok(c) => {
display_opt = Some(c);
break;
}
Err(e) => error!("failed to open display: {}", e),
};
}
let display = match display_opt {
Some(d) => d,
None => {
error!("failed to open any displays");
return None;
}
};
match self {
BackendKind::Virtio2D => Virtio2DBackend::build(
display,
display_width,
display_height,
renderer_flags,
event_devices,
gpu_device_socket,
pci_bar,
map_request,
external_blob,
),
BackendKind::Virtio3D => Virtio3DBackend::build(
display,
display_width,
display_height,
renderer_flags,
event_devices,
gpu_device_socket,
pci_bar,
map_request,
external_blob,
),
#[cfg(feature = "gfxstream")]
BackendKind::VirtioGfxStream => VirtioGfxStreamBackend::build(
display,
display_width,
display_height,
renderer_flags,
event_devices,
gpu_device_socket,
pci_bar,
map_request,
external_blob,
),
}
}
}
struct ReturnDescriptor {
index: u16,
len: u32,
}
struct FenceDescriptor {
fence_id: u32,
index: u16,
len: u32,
}
struct Frontend {
return_ctrl_descriptors: VecDeque<ReturnDescriptor>,
return_cursor_descriptors: VecDeque<ReturnDescriptor>,
fence_descriptors: Vec<FenceDescriptor>,
backend: Box<dyn Backend>,
}
impl Frontend {
fn new(backend: Box<dyn Backend>) -> Frontend {
Frontend {
return_ctrl_descriptors: Default::default(),
return_cursor_descriptors: Default::default(),
fence_descriptors: Default::default(),
backend,
}
}
fn display(&mut self) -> &Rc<RefCell<GpuDisplay>> {
self.backend.display()
}
fn process_display(&mut self) -> bool {
self.backend.process_display()
}
fn process_resource_bridge(&mut self, resource_bridge: &ResourceResponseSocket) {
let response = match resource_bridge.recv() {
Ok(ResourceRequest::GetBuffer { id }) => self.backend.export_resource(id),
Ok(ResourceRequest::GetFence { seqno }) => {
// The seqno originated from self.backend, so
// it should fit in a u32.
match u32::try_from(seqno) {
Ok(fence_id) => self.backend.export_fence(fence_id),
Err(_) => ResourceResponse::Invalid,
}
}
Err(e) => {
error!("error receiving resource bridge request: {}", e);
return;
}
};
if let Err(e) = resource_bridge.send(&response) {
error!("error sending resource bridge request: {}", e);
}
}
fn process_gpu_command(
&mut self,
mem: &GuestMemory,
cmd: GpuCommand,
reader: &mut Reader,
) -> VirtioGpuResult {
self.backend.force_ctx_0();
match cmd {
GpuCommand::GetDisplayInfo(_) => Ok(GpuResponse::OkDisplayInfo(
self.backend.display_info().to_vec(),
)),
GpuCommand::ResourceCreate2d(info) => self.backend.create_resource_2d(
info.resource_id.to_native(),
info.width.to_native(),
info.height.to_native(),
info.format.to_native(),
),
GpuCommand::ResourceUnref(info) => {
self.backend.unref_resource(info.resource_id.to_native())
}
GpuCommand::SetScanout(info) => self.backend.set_scanout(
info.scanout_id.to_native(),
info.resource_id.to_native(),
None,
),
GpuCommand::ResourceFlush(info) => self.backend.flush_resource(
info.resource_id.to_native(),
info.r.x.to_native(),
info.r.y.to_native(),
info.r.width.to_native(),
info.r.height.to_native(),
),
GpuCommand::TransferToHost2d(info) => self.backend.transfer_to_resource_2d(
info.resource_id.to_native(),
info.r.x.to_native(),
info.r.y.to_native(),
info.r.width.to_native(),
info.r.height.to_native(),
info.offset.to_native(),
mem,
),
GpuCommand::ResourceAttachBacking(info) => {
let available_bytes = reader.available_bytes();
if available_bytes != 0 {
let entry_count = info.nr_entries.to_native() as usize;
let mut vecs = Vec::with_capacity(entry_count);
for _ in 0..entry_count {
match reader.read_obj::<virtio_gpu_mem_entry>() {
Ok(entry) => {
let addr = GuestAddress(entry.addr.to_native());
let len = entry.length.to_native() as usize;
vecs.push((addr, len))
}
Err(_) => return Err(GpuResponse::ErrUnspec),
}
}
self.backend
.attach_backing(info.resource_id.to_native(), mem, vecs)
} else {
error!("missing data for command {:?}", cmd);
Err(GpuResponse::ErrUnspec)
}
}
GpuCommand::ResourceDetachBacking(info) => {
self.backend.detach_backing(info.resource_id.to_native())
}
GpuCommand::UpdateCursor(info) => self.backend.update_cursor(
info.resource_id.to_native(),
info.pos.x.into(),
info.pos.y.into(),
),
GpuCommand::MoveCursor(info) => self
.backend
.move_cursor(info.pos.x.into(), info.pos.y.into()),
GpuCommand::ResourceAssignUuid(info) => {
let resource_id = info.resource_id.to_native();
self.backend.resource_assign_uuid(resource_id)
}
GpuCommand::GetCapsetInfo(info) => {
self.backend.get_capset_info(info.capset_index.to_native())
}
GpuCommand::GetCapset(info) => self
.backend
.get_capset(info.capset_id.to_native(), info.capset_version.to_native()),
GpuCommand::CtxCreate(info) => self
.backend
.create_renderer_context(info.hdr.ctx_id.to_native()),
GpuCommand::CtxDestroy(info) => self
.backend
.destroy_renderer_context(info.hdr.ctx_id.to_native()),
GpuCommand::CtxAttachResource(info) => self
.backend
.context_attach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
GpuCommand::CtxDetachResource(info) => self
.backend
.context_detach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
GpuCommand::ResourceCreate3d(info) => {
let id = info.resource_id.to_native();
let target = info.target.to_native();
let format = info.format.to_native();
let bind = info.bind.to_native();
let width = info.width.to_native();
let height = info.height.to_native();
let depth = info.depth.to_native();
let array_size = info.array_size.to_native();
let last_level = info.last_level.to_native();
let nr_samples = info.nr_samples.to_native();
let flags = info.flags.to_native();
self.backend.resource_create_3d(
id, target, format, bind, width, height, depth, array_size, last_level,
nr_samples, flags,
)
}
GpuCommand::TransferToHost3d(info) => {
let ctx_id = info.hdr.ctx_id.to_native();
let res_id = info.resource_id.to_native();
let x = info.box_.x.to_native();
let y = info.box_.y.to_native();
let z = info.box_.z.to_native();
let width = info.box_.w.to_native();
let height = info.box_.h.to_native();
let depth = info.box_.d.to_native();
let level = info.level.to_native();
let stride = info.stride.to_native();
let layer_stride = info.layer_stride.to_native();
let offset = info.offset.to_native();
self.backend.transfer_to_resource_3d(
ctx_id,
res_id,
x,
y,
z,
width,
height,
depth,
level,
stride,
layer_stride,
offset,
)
}
GpuCommand::TransferFromHost3d(info) => {
let ctx_id = info.hdr.ctx_id.to_native();
let res_id = info.resource_id.to_native();
let x = info.box_.x.to_native();
let y = info.box_.y.to_native();
let z = info.box_.z.to_native();
let width = info.box_.w.to_native();
let height = info.box_.h.to_native();
let depth = info.box_.d.to_native();
let level = info.level.to_native();
let stride = info.stride.to_native();
let layer_stride = info.layer_stride.to_native();
let offset = info.offset.to_native();
self.backend.transfer_from_resource_3d(
ctx_id,
res_id,
x,
y,
z,
width,
height,
depth,
level,
stride,
layer_stride,
offset,
)
}
GpuCommand::CmdSubmit3d(info) => {
if reader.available_bytes() != 0 {
let cmd_size = info.size.to_native() as usize;
let mut cmd_buf = vec![0; cmd_size];
if reader.read_exact(&mut cmd_buf[..]).is_ok() {
self.backend
.submit_command(info.hdr.ctx_id.to_native(), &mut cmd_buf[..])
} else {
Err(GpuResponse::ErrInvalidParameter)
}
} else {
// Silently accept empty command buffers to allow for
// benchmarking.
Ok(GpuResponse::OkNoData)
}
}
GpuCommand::ResourceCreateBlob(info) => {
let resource_id = info.resource_id.to_native();
let ctx_id = info.hdr.ctx_id.to_native();
let blob_mem = info.blob_mem.to_native();
let blob_flags = info.blob_flags.to_native();
let blob_id = info.blob_id.to_native();
let size = info.size.to_native();
let entry_count = info.nr_entries.to_native();
if entry_count > VIRTIO_GPU_MAX_IOVEC_ENTRIES
|| (reader.available_bytes() == 0 && entry_count > 0)
{
return Err(GpuResponse::ErrUnspec);
}
let mut vecs = Vec::with_capacity(entry_count as usize);
for _ in 0..entry_count {
match reader.read_obj::<virtio_gpu_mem_entry>() {
Ok(entry) => {
let addr = GuestAddress(entry.addr.to_native());
let len = entry.length.to_native() as usize;
vecs.push((addr, len))
}
Err(_) => return Err(GpuResponse::ErrUnspec),
}
}
self.backend.resource_create_blob(
resource_id,
ctx_id,
blob_mem,
blob_flags,
blob_id,
size,
vecs,
mem,
)
}
GpuCommand::SetScanoutBlob(info) => {
let scanout_id = info.scanout_id.to_native();
let resource_id = info.resource_id.to_native();
let virtio_gpu_format = info.format.to_native();
let width = info.width.to_native();
let height = info.width.to_native();
let mut strides: [u32; 4] = [0; 4];
let mut offsets: [u32; 4] = [0; 4];
// As of v4.19, virtio-gpu kms only really uses these formats. If that changes,
// the following may have to change too.
let drm_format = match virtio_gpu_format {
VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM => Format::new(b'X', b'R', b'2', b'4'),
VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM => Format::new(b'A', b'R', b'2', b'4'),
_ => {
error!("unrecognized virtio-gpu format {}", virtio_gpu_format);
return Err(GpuResponse::ErrUnspec);
}
};
for plane_index in 0..PLANE_INFO_MAX_COUNT {
offsets[plane_index] = info.offsets[plane_index].to_native();
strides[plane_index] = info.strides[plane_index].to_native();
}
let scanout = VirtioScanoutBlobData {
width,
height,
drm_format,
strides,
offsets,
};
self.backend
.set_scanout(scanout_id, resource_id, Some(scanout))
}
GpuCommand::ResourceMapBlob(info) => {
let resource_id = info.resource_id.to_native();
let offset = info.offset.to_native();
self.backend.resource_map_blob(resource_id, offset)
}
GpuCommand::ResourceUnmapBlob(info) => {
let resource_id = info.resource_id.to_native();
self.backend.resource_unmap_blob(resource_id)
}
}
}
fn validate_desc(desc: &DescriptorChain) -> bool {
desc.len as usize >= size_of::<virtio_gpu_ctrl_hdr>() && !desc.is_write_only()
}
fn process_queue(&mut self, mem: &GuestMemory, queue: &mut Queue) -> bool {
let mut signal_used = false;
while let Some(desc) = queue.pop(mem) {
if Frontend::validate_desc(&desc) {
match (
Reader::new(mem.clone(), desc.clone()),
Writer::new(mem.clone(), desc.clone()),
) {
(Ok(mut reader), Ok(mut writer)) => {
if let Some(ret_desc) =
self.process_descriptor(mem, desc.index, &mut reader, &mut writer)
{
queue.add_used(&mem, ret_desc.index, ret_desc.len);
signal_used = true;
}
}
(_, Err(e)) | (Err(e), _) => {
debug!("invalid descriptor: {}", e);
queue.add_used(&mem, desc.index, 0);
signal_used = true;
}
}
} else {
let likely_type = mem.read_obj_from_addr(desc.addr).unwrap_or(Le32::from(0));
debug!(
"queue bad descriptor index = {} len = {} write = {} type = {}",
desc.index,
desc.len,
desc.is_write_only(),
virtio_gpu_cmd_str(likely_type.to_native())
);
queue.add_used(&mem, desc.index, 0);
signal_used = true;
}
}
signal_used
}
fn process_descriptor(
&mut self,
mem: &GuestMemory,
desc_index: u16,
reader: &mut Reader,
writer: &mut Writer,
) -> Option<ReturnDescriptor> {
let mut resp = Err(GpuResponse::ErrUnspec);
let mut gpu_cmd = None;
let mut len = 0;
match GpuCommand::decode(reader) {
Ok(cmd) => {
resp = self.process_gpu_command(mem, cmd, reader);
gpu_cmd = Some(cmd);
}
Err(e) => debug!("descriptor decode error: {}", e),
}
let mut gpu_response = match resp {
Ok(gpu_response) => gpu_response,
Err(gpu_response) => {
debug!("{:?} -> {:?}", gpu_cmd, gpu_response);
gpu_response
}
};
if writer.available_bytes() != 0 {
let mut fence_id = 0;
let mut ctx_id = 0;
let mut flags = 0;
if let Some(cmd) = gpu_cmd {
let ctrl_hdr = cmd.ctrl_hdr();
if ctrl_hdr.flags.to_native() & VIRTIO_GPU_FLAG_FENCE != 0 {
fence_id = ctrl_hdr.fence_id.to_native();
ctx_id = ctrl_hdr.ctx_id.to_native();
flags = VIRTIO_GPU_FLAG_FENCE;
gpu_response = match self.backend.create_fence(ctx_id, fence_id as u32) {
Ok(_) => gpu_response,
Err(fence_resp) => {
warn!("create_fence {} -> {:?}", fence_id, fence_resp);
fence_resp
}
};
}
}
// Prepare the response now, even if it is going to wait until
// fence is complete.
match gpu_response.encode(flags, fence_id, ctx_id, writer) {
Ok(l) => len = l,
Err(e) => debug!("ctrl queue response encode error: {}", e),
}
if flags & VIRTIO_GPU_FLAG_FENCE != 0 {
self.fence_descriptors.push(FenceDescriptor {
fence_id: fence_id as u32,
index: desc_index,
len,
});
return None;
}
// No fence, respond now.
}
Some(ReturnDescriptor {
index: desc_index,
len,
})
}
fn return_cursor(&mut self) -> Option<ReturnDescriptor> {
self.return_cursor_descriptors.pop_front()
}
fn return_ctrl(&mut self) -> Option<ReturnDescriptor> {
self.return_ctrl_descriptors.pop_front()
}
fn fence_poll(&mut self) {
let fence_id = self.backend.fence_poll();
let return_descs = &mut self.return_ctrl_descriptors;
self.fence_descriptors.retain(|f_desc| {
if f_desc.fence_id > fence_id {
true
} else {
return_descs.push_back(ReturnDescriptor {
index: f_desc.index,
len: f_desc.len,
});
false
}
})
}
}
struct Worker {
interrupt: Interrupt,
exit_evt: Event,
mem: GuestMemory,
ctrl_queue: Queue,
ctrl_evt: Event,
cursor_queue: Queue,
cursor_evt: Event,
resource_bridges: Vec<ResourceResponseSocket>,
kill_evt: Event,
state: Frontend,
}
impl Worker {
fn run(&mut self) {
#[derive(PollToken)]
enum Token {
CtrlQueue,
CursorQueue,
Display,
InterruptResample,
Kill,
ResourceBridge { index: usize },
}
let wait_ctx: WaitContext<Token> = match WaitContext::build_with(&[
(&self.ctrl_evt, Token::CtrlQueue),
(&self.cursor_evt, Token::CursorQueue),
(&*self.state.display().borrow(), Token::Display),
(self.interrupt.get_resample_evt(), Token::InterruptResample),
(&self.kill_evt, Token::Kill),
]) {
Ok(pc) => pc,
Err(e) => {
error!("failed creating WaitContext: {}", e);
return;
}
};
for (index, bridge) in self.resource_bridges.iter().enumerate() {
if let Err(e) = wait_ctx.add(bridge, Token::ResourceBridge { index }) {
error!("failed to add resource bridge to WaitContext: {}", e);
}
}
// TODO(davidriley): The entire main loop processing is somewhat racey and incorrect with
// respect to cursor vs control queue processing. As both currently and originally
// written, while the control queue is only processed/read from after the the cursor queue
// is finished, the entire queue will be processed at that time. The end effect of this
// racyiness is that control queue descriptors that are issued after cursors descriptors
// might be handled first instead of the other way around. In practice, the cursor queue
// isn't used so this isn't a huge issue.
// Declare this outside the loop so we don't keep allocating and freeing the vector.
let mut process_resource_bridge = Vec::with_capacity(self.resource_bridges.len());
'wait: loop {
// If there are outstanding fences, wake up early to poll them.
let duration = if !self.state.fence_descriptors.is_empty() {
Duration::from_millis(FENCE_POLL_MS)
} else {
Duration::new(i64::MAX as u64, 0)
};
let events = match wait_ctx.wait_timeout(duration) {
Ok(v) => v,
Err(e) => {
error!("failed polling for events: {}", e);
break;
}
};
let mut signal_used_cursor = false;
let mut signal_used_ctrl = false;
let mut ctrl_available = false;
// Clear the old values and re-initialize with false.
process_resource_bridge.clear();
process_resource_bridge.resize(self.resource_bridges.len(), false);
// This display isn't typically used when the virt-wl device is available and it can
// lead to hung fds (crbug.com/1027379). Disable if it's hung.
for event in events.iter().filter(|e| e.is_hungup) {
if let Token::Display = event.token {
error!("default display hang-up detected");
let _ = wait_ctx.delete(&*self.state.display().borrow());
}
}
for event in events.iter().filter(|e| e.is_readable) {
match event.token {
Token::CtrlQueue => {
let _ = self.ctrl_evt.read();
// Set flag that control queue is available to be read, but defer reading
// until rest of the events are processed.
ctrl_available = true;
}
Token::CursorQueue => {
let _ = self.cursor_evt.read();
if self.state.process_queue(&self.mem, &mut self.cursor_queue) {
signal_used_cursor = true;
}
}
Token::Display => {
let close_requested = self.state.process_display();
if close_requested {
let _ = self.exit_evt.write(1);
}
}
Token::ResourceBridge { index } => {
process_resource_bridge[index] = true;
}
Token::InterruptResample => {
self.interrupt.interrupt_resample();
}
Token::Kill => {
break 'wait;
}
}
}
// All cursor commands go first because they have higher priority.
while let Some(desc) = self.state.return_cursor() {
self.cursor_queue.add_used(&self.mem, desc.index, desc.len);
signal_used_cursor = true;
}
if ctrl_available && self.state.process_queue(&self.mem, &mut self.ctrl_queue) {
signal_used_ctrl = true;
}
self.state.fence_poll();
while let Some(desc) = self.state.return_ctrl() {
self.ctrl_queue.add_used(&self.mem, desc.index, desc.len);
signal_used_ctrl = true;
}
// Process the entire control queue before the resource bridge in case a resource is
// created or destroyed by the control queue. Processing the resource bridge first may
// lead to a race condition.
// TODO(davidriley): This is still inherently racey if both the control queue request
// and the resource bridge request come in at the same time after the control queue is
// processed above and before the corresponding bridge is processed below.
for (bridge, &should_process) in
self.resource_bridges.iter().zip(&process_resource_bridge)
{
if should_process {
self.state.process_resource_bridge(bridge);
}
}
if signal_used_ctrl {
self.interrupt.signal_used_queue(self.ctrl_queue.vector);
}
if signal_used_cursor {
self.interrupt.signal_used_queue(self.cursor_queue.vector);
}
}
}
}
/// Indicates a backend that should be tried for the gpu to use for display.
///
/// Several instances of this enum are used in an ordered list to give the gpu device many backends
/// to use as fallbacks in case some do not work.
#[derive(Clone)]
pub enum DisplayBackend {
/// Use the wayland backend with the given socket path if given.
Wayland(Option<PathBuf>),
/// Open a connection to the X server at the given display if given.
X(Option<String>),
/// Emulate a display without actually displaying it.
Stub,
}
impl DisplayBackend {
fn build(&self) -> std::result::Result<GpuDisplay, GpuDisplayError> {
match self {
DisplayBackend::Wayland(path) => GpuDisplay::open_wayland(path.as_ref()),
DisplayBackend::X(display) => GpuDisplay::open_x(display.as_ref()),
DisplayBackend::Stub => GpuDisplay::open_stub(),
}
}
}
pub struct Gpu {
exit_evt: Event,
gpu_device_socket: Option<VmMemoryControlRequestSocket>,
resource_bridges: Vec<ResourceResponseSocket>,
event_devices: Vec<EventDevice>,
kill_evt: Option<Event>,
config_event: bool,
worker_thread: Option<thread::JoinHandle<()>>,
num_scanouts: NonZeroU8,
display_backends: Vec<DisplayBackend>,
display_width: u32,
display_height: u32,
renderer_flags: RendererFlags,
pci_bar: Option<Alloc>,
map_request: Arc<Mutex<Option<ExternalMapping>>>,
external_blob: bool,
backend_kind: BackendKind,
base_features: u64,
}
impl Gpu {
pub fn new(
exit_evt: Event,
gpu_device_socket: Option<VmMemoryControlRequestSocket>,
num_scanouts: NonZeroU8,
resource_bridges: Vec<ResourceResponseSocket>,
display_backends: Vec<DisplayBackend>,
gpu_parameters: &GpuParameters,
event_devices: Vec<EventDevice>,
map_request: Arc<Mutex<Option<ExternalMapping>>>,
external_blob: bool,
base_features: u64,
) -> Gpu {
let renderer_flags = RendererFlags::new()
.use_egl(gpu_parameters.renderer_use_egl)
.use_gles(gpu_parameters.renderer_use_gles)
.use_glx(gpu_parameters.renderer_use_glx)
.use_surfaceless(gpu_parameters.renderer_use_surfaceless);
#[cfg(feature = "gfxstream")]
let renderer_flags = renderer_flags
.use_guest_angle(gpu_parameters.gfxstream_use_guest_angle)
.use_syncfd(gpu_parameters.gfxstream_use_syncfd)
.support_vulkan(gpu_parameters.gfxstream_support_vulkan);
let backend_kind = match gpu_parameters.mode {
GpuMode::Mode2D => BackendKind::Virtio2D,
GpuMode::Mode3D => BackendKind::Virtio3D,
#[cfg(feature = "gfxstream")]
GpuMode::ModeGfxStream => BackendKind::VirtioGfxStream,
};
Gpu {
exit_evt,
gpu_device_socket,
num_scanouts,
resource_bridges,
event_devices,
config_event: false,
kill_evt: None,
worker_thread: None,
display_backends,
display_width: gpu_parameters.display_width,
display_height: gpu_parameters.display_height,
renderer_flags,
pci_bar: None,
map_request,
external_blob,
backend_kind,
base_features,
}
}
fn get_config(&self) -> virtio_gpu_config {
let mut events_read = 0;
if self.config_event {
events_read |= VIRTIO_GPU_EVENT_DISPLAY;
}
virtio_gpu_config {
events_read: Le32::from(events_read),
events_clear: Le32::from(0),
num_scanouts: Le32::from(self.num_scanouts.get() as u32),
num_capsets: Le32::from(self.backend_kind.capsets()),
}
}
}
impl Drop for Gpu {
fn drop(&mut self) {
if let Some(kill_evt) = self.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.
let _ = kill_evt.write(1);
}
if let Some(worker_thread) = self.worker_thread.take() {
let _ = worker_thread.join();
}
}
}
impl VirtioDevice for Gpu {
fn keep_rds(&self) -> Vec<RawDescriptor> {
let mut keep_rds = Vec::new();
// TODO(davidriley): Remove once virgl has another path to include
// debugging logs.
if cfg!(debug_assertions) {
keep_rds.push(libc::STDOUT_FILENO);
keep_rds.push(libc::STDERR_FILENO);
}
if let Some(ref gpu_device_socket) = self.gpu_device_socket {
keep_rds.push(gpu_device_socket.as_raw_descriptor());
}
keep_rds.push(self.exit_evt.as_raw_descriptor());
for bridge in &self.resource_bridges {
keep_rds.push(bridge.as_raw_descriptor());
}
keep_rds
}
fn device_type(&self) -> u32 {
TYPE_GPU
}
fn queue_max_sizes(&self) -> &[u16] {
QUEUE_SIZES
}
fn features(&self) -> u64 {
self.base_features | self.backend_kind.features()
}
fn ack_features(&mut self, value: u64) {
let _ = value;
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
copy_config(data, 0, self.get_config().as_slice(), offset);
}
fn write_config(&mut self, offset: u64, data: &[u8]) {
let mut cfg = self.get_config();
copy_config(cfg.as_mut_slice(), offset, data, 0);
if (cfg.events_clear.to_native() & VIRTIO_GPU_EVENT_DISPLAY) != 0 {
self.config_event = false;
}
}
fn activate(
&mut self,
mem: GuestMemory,
interrupt: Interrupt,
mut queues: Vec<Queue>,
mut queue_evts: Vec<Event>,
) {
if queues.len() != QUEUE_SIZES.len() || queue_evts.len() != QUEUE_SIZES.len() {
return;
}
let exit_evt = match self.exit_evt.try_clone() {
Ok(e) => e,
Err(e) => {
error!("error cloning exit event: {}", e);
return;
}
};
let (self_kill_evt, kill_evt) = match Event::new().and_then(|e| Ok((e.try_clone()?, e))) {
Ok(v) => v,
Err(e) => {
error!("error creating kill Event pair: {}", e);
return;
}
};
self.kill_evt = Some(self_kill_evt);
let resource_bridges = mem::replace(&mut self.resource_bridges, Vec::new());
let backend_kind = self.backend_kind.clone();
let ctrl_queue = queues.remove(0);
let ctrl_evt = queue_evts.remove(0);
let cursor_queue = queues.remove(0);
let cursor_evt = queue_evts.remove(0);
let display_backends = self.display_backends.clone();
let display_width = self.display_width;
let display_height = self.display_height;
let renderer_flags = self.renderer_flags;
let event_devices = self.event_devices.split_off(0);
let map_request = Arc::clone(&self.map_request);
let external_blob = self.external_blob;
if let (Some(gpu_device_socket), Some(pci_bar)) =
(self.gpu_device_socket.take(), self.pci_bar.take())
{
let worker_result =
thread::Builder::new()
.name("virtio_gpu".to_string())
.spawn(move || {
let backend = match backend_kind.build(
&display_backends,
display_width,
display_height,
renderer_flags,
event_devices,
gpu_device_socket,
pci_bar,
map_request,
external_blob,
) {
Some(backend) => backend,
None => return,
};
Worker {
interrupt,
exit_evt,
mem,
ctrl_queue,
ctrl_evt,
cursor_queue,
cursor_evt,
resource_bridges,
kill_evt,
state: Frontend::new(backend),
}
.run()
});
match worker_result {
Err(e) => {
error!("failed to spawn virtio_gpu worker: {}", e);
return;
}
Ok(join_handle) => {
self.worker_thread = Some(join_handle);
}
}
}
}
// Require 1 BAR for mapping 3D buffers
fn get_device_bars(&mut self, address: PciAddress) -> Vec<PciBarConfiguration> {
self.pci_bar = Some(Alloc::PciBar {
bus: address.bus,
dev: address.dev,
func: address.func,
bar: GPU_BAR_NUM,
});
vec![PciBarConfiguration::new(
GPU_BAR_NUM as usize,
GPU_BAR_SIZE,
PciBarRegionType::Memory64BitRegion,
PciBarPrefetchable::NotPrefetchable,
)]
}
fn get_device_caps(&self) -> Vec<Box<dyn PciCapability>> {
vec![Box::new(VirtioPciShmCap::new(
PciCapabilityType::SharedMemoryConfig,
GPU_BAR_NUM,
GPU_BAR_OFFSET,
GPU_BAR_SIZE,
VIRTIO_GPU_SHM_ID_HOST_VISIBLE,
))]
}
}
virtio: gpu: forward use_external_blob flag to renderer
Fixes crosvm's failure to forward the "use_external_blob"
state flag to the host renderer (virglrenderer).
BUG=b:174794821
TEST=Start a crostini VM and run `DISPLAY=:0 glxgears`, verify graphical
output.
Change-Id: I1ef616079664224e7741cea00b9125c5061ea69d
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/2574669
Tested-by: Ryan Neph <7a140012b58874b966883fce003938f5de9942f2@google.com>
Tested-by: kokoro <2ac7b1f3fa578934c95181d4272be0d3bca00121@google.com>
Reviewed-by: Gurchetan Singh <99434bba058e25b6203b8ae055a4f697dad0ef64@chromium.org>
Commit-Queue: Ryan Neph <7a140012b58874b966883fce003938f5de9942f2@google.com>
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
mod protocol;
mod virtio_2d_backend;
mod virtio_3d_backend;
mod virtio_backend;
mod virtio_gfxstream_backend;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::convert::TryFrom;
use std::i64;
use std::io::Read;
use std::mem::{self, size_of};
use std::num::NonZeroU8;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use data_model::*;
use base::{
debug, error, warn, AsRawDescriptor, Event, ExternalMapping, PollToken, RawDescriptor,
WaitContext,
};
use sync::Mutex;
use vm_memory::{GuestAddress, GuestMemory};
use gpu_buffer::Format;
pub use gpu_display::EventDevice;
use gpu_display::*;
use gpu_renderer::RendererFlags;
use msg_socket::{MsgReceiver, MsgSender};
use resources::Alloc;
use super::{
copy_config, resource_bridge::*, DescriptorChain, Interrupt, Queue, Reader, VirtioDevice,
Writer, TYPE_GPU,
};
use super::{PciCapabilityType, VirtioPciShmCap};
use self::protocol::*;
use self::virtio_2d_backend::Virtio2DBackend;
use self::virtio_3d_backend::Virtio3DBackend;
#[cfg(feature = "gfxstream")]
use self::virtio_gfxstream_backend::VirtioGfxStreamBackend;
use crate::pci::{
PciAddress, PciBarConfiguration, PciBarPrefetchable, PciBarRegionType, PciCapability,
};
use vm_control::VmMemoryControlRequestSocket;
pub const DEFAULT_DISPLAY_WIDTH: u32 = 1280;
pub const DEFAULT_DISPLAY_HEIGHT: u32 = 1024;
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum GpuMode {
Mode2D,
Mode3D,
#[cfg(feature = "gfxstream")]
ModeGfxStream,
}
#[derive(Debug)]
pub struct GpuParameters {
pub display_width: u32,
pub display_height: u32,
pub renderer_use_egl: bool,
pub renderer_use_gles: bool,
pub renderer_use_glx: bool,
pub renderer_use_surfaceless: bool,
#[cfg(feature = "gfxstream")]
pub gfxstream_use_guest_angle: bool,
#[cfg(feature = "gfxstream")]
pub gfxstream_use_syncfd: bool,
#[cfg(feature = "gfxstream")]
pub gfxstream_support_vulkan: bool,
pub mode: GpuMode,
pub cache_path: Option<String>,
pub cache_size: Option<String>,
}
// First queue is for virtio gpu commands. Second queue is for cursor commands, which we expect
// there to be fewer of.
const QUEUE_SIZES: &[u16] = &[256, 16];
const FENCE_POLL_MS: u64 = 1;
const GPU_BAR_NUM: u8 = 4;
const GPU_BAR_OFFSET: u64 = 0;
const GPU_BAR_SIZE: u64 = 1 << 33;
impl Default for GpuParameters {
fn default() -> Self {
GpuParameters {
display_width: DEFAULT_DISPLAY_WIDTH,
display_height: DEFAULT_DISPLAY_HEIGHT,
renderer_use_egl: true,
renderer_use_gles: true,
renderer_use_glx: false,
renderer_use_surfaceless: true,
#[cfg(feature = "gfxstream")]
gfxstream_use_guest_angle: false,
#[cfg(feature = "gfxstream")]
gfxstream_use_syncfd: true,
#[cfg(feature = "gfxstream")]
gfxstream_support_vulkan: true,
mode: GpuMode::Mode3D,
cache_path: None,
cache_size: None,
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct VirtioScanoutBlobData {
pub width: u32,
pub height: u32,
pub drm_format: Format,
pub strides: [u32; 4],
pub offsets: [u32; 4],
}
/// A virtio-gpu backend state tracker which supports display and potentially accelerated rendering.
///
/// Commands from the virtio-gpu protocol can be submitted here using the methods, and they will be
/// realized on the hardware. Most methods return a `VirtioGpuResult` that indicate the success,
/// failure, or requested data for the given command.
trait Backend {
/// Returns the number of capsets provided by the Backend.
fn capsets() -> u32
where
Self: Sized;
/// Returns the bitset of virtio features provided by the Backend in addition to the base set
/// of device features.
fn features() -> u64
where
Self: Sized;
/// Constructs a backend.
fn build(
display: GpuDisplay,
display_width: u32,
display_height: u32,
renderer_flags: RendererFlags,
event_devices: Vec<EventDevice>,
gpu_device_socket: VmMemoryControlRequestSocket,
pci_bar: Alloc,
map_request: Arc<Mutex<Option<ExternalMapping>>>,
external_blob: bool,
) -> Option<Box<dyn Backend>>
where
Self: Sized;
fn display(&self) -> &Rc<RefCell<GpuDisplay>>;
/// Processes the internal `display` events and returns `true` if the main display was closed.
fn process_display(&mut self) -> bool;
/// Creates a fence with the given id that can be used to determine when the previous command
/// completed.
fn create_fence(&mut self, ctx_id: u32, fence_id: u32) -> VirtioGpuResult;
fn export_fence(&mut self, _fence_id: u32) -> ResourceResponse {
ResourceResponse::Invalid
}
/// Returns the id of the latest fence to complete.
fn fence_poll(&mut self) -> u32;
/// For accelerated rendering capable backends, switch to the default rendering context.
fn force_ctx_0(&mut self) {}
/// Attaches the given input device to the given surface of the display (to allow for input
/// from a X11 window for example).
fn import_event_device(&mut self, event_device: EventDevice, scanout: u32) -> VirtioGpuResult;
/// If supported, export the resource with the given id to a file.
fn export_resource(&mut self, id: u32) -> ResourceResponse;
/// Gets the list of supported display resolutions as a slice of `(width, height)` tuples.
fn display_info(&self) -> [(u32, u32); 1];
/// Creates a 2D resource with the given properties and associates it with the given id.
fn create_resource_2d(
&mut self,
id: u32,
width: u32,
height: u32,
format: u32,
) -> VirtioGpuResult;
/// Removes the guest's reference count for the given resource id.
fn unref_resource(&mut self, id: u32) -> VirtioGpuResult;
/// Sets the given resource id as the source of scanout to the display, with optional blob data.
fn set_scanout(
&mut self,
_scanout_id: u32,
resource_id: u32,
scanout_data: Option<VirtioScanoutBlobData>,
) -> VirtioGpuResult;
/// Flushes the given rectangle of pixels of the given resource to the display.
fn flush_resource(
&mut self,
id: u32,
x: u32,
y: u32,
width: u32,
height: u32,
) -> VirtioGpuResult;
/// Copes the given rectangle of pixels of the given resource's backing memory to the host side
/// resource.
fn transfer_to_resource_2d(
&mut self,
id: u32,
x: u32,
y: u32,
width: u32,
height: u32,
src_offset: u64,
mem: &GuestMemory,
) -> VirtioGpuResult;
/// Attaches backing memory to the given resource, represented by a `Vec` of `(address, size)`
/// tuples in the guest's physical address space.
fn attach_backing(
&mut self,
id: u32,
mem: &GuestMemory,
vecs: Vec<(GuestAddress, usize)>,
) -> VirtioGpuResult;
/// Detaches any backing memory from the given resource, if there is any.
fn detach_backing(&mut self, id: u32) -> VirtioGpuResult;
fn resource_assign_uuid(&mut self, _id: u32) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// Updates the cursor's memory to the given id, and sets its position to the given coordinates.
fn update_cursor(&mut self, id: u32, x: u32, y: u32) -> VirtioGpuResult;
/// Moves the cursor's position to the given coordinates.
fn move_cursor(&mut self, x: u32, y: u32) -> VirtioGpuResult;
/// Gets the renderer's capset information associated with `index`.
fn get_capset_info(&self, index: u32) -> VirtioGpuResult;
/// Gets the capset of `version` associated with `id`.
fn get_capset(&self, id: u32, version: u32) -> VirtioGpuResult;
/// Creates a fresh renderer context with the given `id`.
fn create_renderer_context(&mut self, _id: u32) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// Destorys the renderer context associated with `id`.
fn destroy_renderer_context(&mut self, _id: u32) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// Attaches the indicated resource to the given context.
fn context_attach_resource(&mut self, _ctx_id: u32, _res_id: u32) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// detaches the indicated resource to the given context.
fn context_detach_resource(&mut self, _ctx_id: u32, _res_id: u32) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// Creates a 3D resource with the given properties and associates it with the given id.
fn resource_create_3d(
&mut self,
_id: u32,
_target: u32,
_format: u32,
_bind: u32,
_width: u32,
_height: u32,
_depth: u32,
_array_size: u32,
_last_level: u32,
_nr_samples: u32,
_flags: u32,
) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// Copes the given 3D rectangle of pixels of the given resource's backing memory to the host
/// side resource.
fn transfer_to_resource_3d(
&mut self,
_ctx_id: u32,
_res_id: u32,
_x: u32,
_y: u32,
_z: u32,
_width: u32,
_height: u32,
_depth: u32,
_level: u32,
_stride: u32,
_layer_stride: u32,
_offset: u64,
) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// Copes the given rectangle of pixels from the resource to the given resource's backing
/// memory.
fn transfer_from_resource_3d(
&mut self,
_ctx_id: u32,
_res_id: u32,
_x: u32,
_y: u32,
_z: u32,
_width: u32,
_height: u32,
_depth: u32,
_level: u32,
_stride: u32,
_layer_stride: u32,
_offset: u64,
) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
/// Submits a command buffer to the given rendering context.
fn submit_command(&mut self, _ctx_id: u32, _commands: &mut [u8]) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
fn resource_create_blob(
&mut self,
_resource_id: u32,
_ctx_id: u32,
_blob_mem: u32,
_blob_flags: u32,
_blob_id: u64,
_size: u64,
_vecs: Vec<(GuestAddress, usize)>,
_mem: &GuestMemory,
) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
fn resource_map_blob(&mut self, _resource_id: u32, _offset: u64) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
fn resource_unmap_blob(&mut self, _resource_id: u32) -> VirtioGpuResult {
Err(GpuResponse::ErrUnspec)
}
}
#[derive(Clone)]
enum BackendKind {
Virtio2D,
Virtio3D,
#[cfg(feature = "gfxstream")]
VirtioGfxStream,
}
impl BackendKind {
/// Returns the number of capsets provided by the Backend.
fn capsets(&self) -> u32 {
match self {
BackendKind::Virtio2D => Virtio2DBackend::capsets(),
BackendKind::Virtio3D => Virtio3DBackend::capsets(),
#[cfg(feature = "gfxstream")]
BackendKind::VirtioGfxStream => VirtioGfxStreamBackend::capsets(),
}
}
/// Returns the bitset of virtio features provided by the Backend in addition to the base set
/// of device features.
fn features(&self) -> u64 {
match self {
BackendKind::Virtio2D => Virtio2DBackend::features(),
BackendKind::Virtio3D => Virtio3DBackend::features(),
#[cfg(feature = "gfxstream")]
BackendKind::VirtioGfxStream => VirtioGfxStreamBackend::features(),
}
}
/// Initializes the backend.
fn build(
&self,
possible_displays: &[DisplayBackend],
display_width: u32,
display_height: u32,
renderer_flags: RendererFlags,
event_devices: Vec<EventDevice>,
gpu_device_socket: VmMemoryControlRequestSocket,
pci_bar: Alloc,
map_request: Arc<Mutex<Option<ExternalMapping>>>,
external_blob: bool,
) -> Option<Box<dyn Backend>> {
let mut display_opt = None;
for display in possible_displays {
match display.build() {
Ok(c) => {
display_opt = Some(c);
break;
}
Err(e) => error!("failed to open display: {}", e),
};
}
let display = match display_opt {
Some(d) => d,
None => {
error!("failed to open any displays");
return None;
}
};
match self {
BackendKind::Virtio2D => Virtio2DBackend::build(
display,
display_width,
display_height,
renderer_flags,
event_devices,
gpu_device_socket,
pci_bar,
map_request,
external_blob,
),
BackendKind::Virtio3D => Virtio3DBackend::build(
display,
display_width,
display_height,
renderer_flags,
event_devices,
gpu_device_socket,
pci_bar,
map_request,
external_blob,
),
#[cfg(feature = "gfxstream")]
BackendKind::VirtioGfxStream => VirtioGfxStreamBackend::build(
display,
display_width,
display_height,
renderer_flags,
event_devices,
gpu_device_socket,
pci_bar,
map_request,
external_blob,
),
}
}
}
struct ReturnDescriptor {
index: u16,
len: u32,
}
struct FenceDescriptor {
fence_id: u32,
index: u16,
len: u32,
}
struct Frontend {
return_ctrl_descriptors: VecDeque<ReturnDescriptor>,
return_cursor_descriptors: VecDeque<ReturnDescriptor>,
fence_descriptors: Vec<FenceDescriptor>,
backend: Box<dyn Backend>,
}
impl Frontend {
fn new(backend: Box<dyn Backend>) -> Frontend {
Frontend {
return_ctrl_descriptors: Default::default(),
return_cursor_descriptors: Default::default(),
fence_descriptors: Default::default(),
backend,
}
}
fn display(&mut self) -> &Rc<RefCell<GpuDisplay>> {
self.backend.display()
}
fn process_display(&mut self) -> bool {
self.backend.process_display()
}
fn process_resource_bridge(&mut self, resource_bridge: &ResourceResponseSocket) {
let response = match resource_bridge.recv() {
Ok(ResourceRequest::GetBuffer { id }) => self.backend.export_resource(id),
Ok(ResourceRequest::GetFence { seqno }) => {
// The seqno originated from self.backend, so
// it should fit in a u32.
match u32::try_from(seqno) {
Ok(fence_id) => self.backend.export_fence(fence_id),
Err(_) => ResourceResponse::Invalid,
}
}
Err(e) => {
error!("error receiving resource bridge request: {}", e);
return;
}
};
if let Err(e) = resource_bridge.send(&response) {
error!("error sending resource bridge request: {}", e);
}
}
fn process_gpu_command(
&mut self,
mem: &GuestMemory,
cmd: GpuCommand,
reader: &mut Reader,
) -> VirtioGpuResult {
self.backend.force_ctx_0();
match cmd {
GpuCommand::GetDisplayInfo(_) => Ok(GpuResponse::OkDisplayInfo(
self.backend.display_info().to_vec(),
)),
GpuCommand::ResourceCreate2d(info) => self.backend.create_resource_2d(
info.resource_id.to_native(),
info.width.to_native(),
info.height.to_native(),
info.format.to_native(),
),
GpuCommand::ResourceUnref(info) => {
self.backend.unref_resource(info.resource_id.to_native())
}
GpuCommand::SetScanout(info) => self.backend.set_scanout(
info.scanout_id.to_native(),
info.resource_id.to_native(),
None,
),
GpuCommand::ResourceFlush(info) => self.backend.flush_resource(
info.resource_id.to_native(),
info.r.x.to_native(),
info.r.y.to_native(),
info.r.width.to_native(),
info.r.height.to_native(),
),
GpuCommand::TransferToHost2d(info) => self.backend.transfer_to_resource_2d(
info.resource_id.to_native(),
info.r.x.to_native(),
info.r.y.to_native(),
info.r.width.to_native(),
info.r.height.to_native(),
info.offset.to_native(),
mem,
),
GpuCommand::ResourceAttachBacking(info) => {
let available_bytes = reader.available_bytes();
if available_bytes != 0 {
let entry_count = info.nr_entries.to_native() as usize;
let mut vecs = Vec::with_capacity(entry_count);
for _ in 0..entry_count {
match reader.read_obj::<virtio_gpu_mem_entry>() {
Ok(entry) => {
let addr = GuestAddress(entry.addr.to_native());
let len = entry.length.to_native() as usize;
vecs.push((addr, len))
}
Err(_) => return Err(GpuResponse::ErrUnspec),
}
}
self.backend
.attach_backing(info.resource_id.to_native(), mem, vecs)
} else {
error!("missing data for command {:?}", cmd);
Err(GpuResponse::ErrUnspec)
}
}
GpuCommand::ResourceDetachBacking(info) => {
self.backend.detach_backing(info.resource_id.to_native())
}
GpuCommand::UpdateCursor(info) => self.backend.update_cursor(
info.resource_id.to_native(),
info.pos.x.into(),
info.pos.y.into(),
),
GpuCommand::MoveCursor(info) => self
.backend
.move_cursor(info.pos.x.into(), info.pos.y.into()),
GpuCommand::ResourceAssignUuid(info) => {
let resource_id = info.resource_id.to_native();
self.backend.resource_assign_uuid(resource_id)
}
GpuCommand::GetCapsetInfo(info) => {
self.backend.get_capset_info(info.capset_index.to_native())
}
GpuCommand::GetCapset(info) => self
.backend
.get_capset(info.capset_id.to_native(), info.capset_version.to_native()),
GpuCommand::CtxCreate(info) => self
.backend
.create_renderer_context(info.hdr.ctx_id.to_native()),
GpuCommand::CtxDestroy(info) => self
.backend
.destroy_renderer_context(info.hdr.ctx_id.to_native()),
GpuCommand::CtxAttachResource(info) => self
.backend
.context_attach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
GpuCommand::CtxDetachResource(info) => self
.backend
.context_detach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
GpuCommand::ResourceCreate3d(info) => {
let id = info.resource_id.to_native();
let target = info.target.to_native();
let format = info.format.to_native();
let bind = info.bind.to_native();
let width = info.width.to_native();
let height = info.height.to_native();
let depth = info.depth.to_native();
let array_size = info.array_size.to_native();
let last_level = info.last_level.to_native();
let nr_samples = info.nr_samples.to_native();
let flags = info.flags.to_native();
self.backend.resource_create_3d(
id, target, format, bind, width, height, depth, array_size, last_level,
nr_samples, flags,
)
}
GpuCommand::TransferToHost3d(info) => {
let ctx_id = info.hdr.ctx_id.to_native();
let res_id = info.resource_id.to_native();
let x = info.box_.x.to_native();
let y = info.box_.y.to_native();
let z = info.box_.z.to_native();
let width = info.box_.w.to_native();
let height = info.box_.h.to_native();
let depth = info.box_.d.to_native();
let level = info.level.to_native();
let stride = info.stride.to_native();
let layer_stride = info.layer_stride.to_native();
let offset = info.offset.to_native();
self.backend.transfer_to_resource_3d(
ctx_id,
res_id,
x,
y,
z,
width,
height,
depth,
level,
stride,
layer_stride,
offset,
)
}
GpuCommand::TransferFromHost3d(info) => {
let ctx_id = info.hdr.ctx_id.to_native();
let res_id = info.resource_id.to_native();
let x = info.box_.x.to_native();
let y = info.box_.y.to_native();
let z = info.box_.z.to_native();
let width = info.box_.w.to_native();
let height = info.box_.h.to_native();
let depth = info.box_.d.to_native();
let level = info.level.to_native();
let stride = info.stride.to_native();
let layer_stride = info.layer_stride.to_native();
let offset = info.offset.to_native();
self.backend.transfer_from_resource_3d(
ctx_id,
res_id,
x,
y,
z,
width,
height,
depth,
level,
stride,
layer_stride,
offset,
)
}
GpuCommand::CmdSubmit3d(info) => {
if reader.available_bytes() != 0 {
let cmd_size = info.size.to_native() as usize;
let mut cmd_buf = vec![0; cmd_size];
if reader.read_exact(&mut cmd_buf[..]).is_ok() {
self.backend
.submit_command(info.hdr.ctx_id.to_native(), &mut cmd_buf[..])
} else {
Err(GpuResponse::ErrInvalidParameter)
}
} else {
// Silently accept empty command buffers to allow for
// benchmarking.
Ok(GpuResponse::OkNoData)
}
}
GpuCommand::ResourceCreateBlob(info) => {
let resource_id = info.resource_id.to_native();
let ctx_id = info.hdr.ctx_id.to_native();
let blob_mem = info.blob_mem.to_native();
let blob_flags = info.blob_flags.to_native();
let blob_id = info.blob_id.to_native();
let size = info.size.to_native();
let entry_count = info.nr_entries.to_native();
if entry_count > VIRTIO_GPU_MAX_IOVEC_ENTRIES
|| (reader.available_bytes() == 0 && entry_count > 0)
{
return Err(GpuResponse::ErrUnspec);
}
let mut vecs = Vec::with_capacity(entry_count as usize);
for _ in 0..entry_count {
match reader.read_obj::<virtio_gpu_mem_entry>() {
Ok(entry) => {
let addr = GuestAddress(entry.addr.to_native());
let len = entry.length.to_native() as usize;
vecs.push((addr, len))
}
Err(_) => return Err(GpuResponse::ErrUnspec),
}
}
self.backend.resource_create_blob(
resource_id,
ctx_id,
blob_mem,
blob_flags,
blob_id,
size,
vecs,
mem,
)
}
GpuCommand::SetScanoutBlob(info) => {
let scanout_id = info.scanout_id.to_native();
let resource_id = info.resource_id.to_native();
let virtio_gpu_format = info.format.to_native();
let width = info.width.to_native();
let height = info.width.to_native();
let mut strides: [u32; 4] = [0; 4];
let mut offsets: [u32; 4] = [0; 4];
// As of v4.19, virtio-gpu kms only really uses these formats. If that changes,
// the following may have to change too.
let drm_format = match virtio_gpu_format {
VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM => Format::new(b'X', b'R', b'2', b'4'),
VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM => Format::new(b'A', b'R', b'2', b'4'),
_ => {
error!("unrecognized virtio-gpu format {}", virtio_gpu_format);
return Err(GpuResponse::ErrUnspec);
}
};
for plane_index in 0..PLANE_INFO_MAX_COUNT {
offsets[plane_index] = info.offsets[plane_index].to_native();
strides[plane_index] = info.strides[plane_index].to_native();
}
let scanout = VirtioScanoutBlobData {
width,
height,
drm_format,
strides,
offsets,
};
self.backend
.set_scanout(scanout_id, resource_id, Some(scanout))
}
GpuCommand::ResourceMapBlob(info) => {
let resource_id = info.resource_id.to_native();
let offset = info.offset.to_native();
self.backend.resource_map_blob(resource_id, offset)
}
GpuCommand::ResourceUnmapBlob(info) => {
let resource_id = info.resource_id.to_native();
self.backend.resource_unmap_blob(resource_id)
}
}
}
fn validate_desc(desc: &DescriptorChain) -> bool {
desc.len as usize >= size_of::<virtio_gpu_ctrl_hdr>() && !desc.is_write_only()
}
fn process_queue(&mut self, mem: &GuestMemory, queue: &mut Queue) -> bool {
let mut signal_used = false;
while let Some(desc) = queue.pop(mem) {
if Frontend::validate_desc(&desc) {
match (
Reader::new(mem.clone(), desc.clone()),
Writer::new(mem.clone(), desc.clone()),
) {
(Ok(mut reader), Ok(mut writer)) => {
if let Some(ret_desc) =
self.process_descriptor(mem, desc.index, &mut reader, &mut writer)
{
queue.add_used(&mem, ret_desc.index, ret_desc.len);
signal_used = true;
}
}
(_, Err(e)) | (Err(e), _) => {
debug!("invalid descriptor: {}", e);
queue.add_used(&mem, desc.index, 0);
signal_used = true;
}
}
} else {
let likely_type = mem.read_obj_from_addr(desc.addr).unwrap_or(Le32::from(0));
debug!(
"queue bad descriptor index = {} len = {} write = {} type = {}",
desc.index,
desc.len,
desc.is_write_only(),
virtio_gpu_cmd_str(likely_type.to_native())
);
queue.add_used(&mem, desc.index, 0);
signal_used = true;
}
}
signal_used
}
fn process_descriptor(
&mut self,
mem: &GuestMemory,
desc_index: u16,
reader: &mut Reader,
writer: &mut Writer,
) -> Option<ReturnDescriptor> {
let mut resp = Err(GpuResponse::ErrUnspec);
let mut gpu_cmd = None;
let mut len = 0;
match GpuCommand::decode(reader) {
Ok(cmd) => {
resp = self.process_gpu_command(mem, cmd, reader);
gpu_cmd = Some(cmd);
}
Err(e) => debug!("descriptor decode error: {}", e),
}
let mut gpu_response = match resp {
Ok(gpu_response) => gpu_response,
Err(gpu_response) => {
debug!("{:?} -> {:?}", gpu_cmd, gpu_response);
gpu_response
}
};
if writer.available_bytes() != 0 {
let mut fence_id = 0;
let mut ctx_id = 0;
let mut flags = 0;
if let Some(cmd) = gpu_cmd {
let ctrl_hdr = cmd.ctrl_hdr();
if ctrl_hdr.flags.to_native() & VIRTIO_GPU_FLAG_FENCE != 0 {
fence_id = ctrl_hdr.fence_id.to_native();
ctx_id = ctrl_hdr.ctx_id.to_native();
flags = VIRTIO_GPU_FLAG_FENCE;
gpu_response = match self.backend.create_fence(ctx_id, fence_id as u32) {
Ok(_) => gpu_response,
Err(fence_resp) => {
warn!("create_fence {} -> {:?}", fence_id, fence_resp);
fence_resp
}
};
}
}
// Prepare the response now, even if it is going to wait until
// fence is complete.
match gpu_response.encode(flags, fence_id, ctx_id, writer) {
Ok(l) => len = l,
Err(e) => debug!("ctrl queue response encode error: {}", e),
}
if flags & VIRTIO_GPU_FLAG_FENCE != 0 {
self.fence_descriptors.push(FenceDescriptor {
fence_id: fence_id as u32,
index: desc_index,
len,
});
return None;
}
// No fence, respond now.
}
Some(ReturnDescriptor {
index: desc_index,
len,
})
}
fn return_cursor(&mut self) -> Option<ReturnDescriptor> {
self.return_cursor_descriptors.pop_front()
}
fn return_ctrl(&mut self) -> Option<ReturnDescriptor> {
self.return_ctrl_descriptors.pop_front()
}
fn fence_poll(&mut self) {
let fence_id = self.backend.fence_poll();
let return_descs = &mut self.return_ctrl_descriptors;
self.fence_descriptors.retain(|f_desc| {
if f_desc.fence_id > fence_id {
true
} else {
return_descs.push_back(ReturnDescriptor {
index: f_desc.index,
len: f_desc.len,
});
false
}
})
}
}
struct Worker {
interrupt: Interrupt,
exit_evt: Event,
mem: GuestMemory,
ctrl_queue: Queue,
ctrl_evt: Event,
cursor_queue: Queue,
cursor_evt: Event,
resource_bridges: Vec<ResourceResponseSocket>,
kill_evt: Event,
state: Frontend,
}
impl Worker {
fn run(&mut self) {
#[derive(PollToken)]
enum Token {
CtrlQueue,
CursorQueue,
Display,
InterruptResample,
Kill,
ResourceBridge { index: usize },
}
let wait_ctx: WaitContext<Token> = match WaitContext::build_with(&[
(&self.ctrl_evt, Token::CtrlQueue),
(&self.cursor_evt, Token::CursorQueue),
(&*self.state.display().borrow(), Token::Display),
(self.interrupt.get_resample_evt(), Token::InterruptResample),
(&self.kill_evt, Token::Kill),
]) {
Ok(pc) => pc,
Err(e) => {
error!("failed creating WaitContext: {}", e);
return;
}
};
for (index, bridge) in self.resource_bridges.iter().enumerate() {
if let Err(e) = wait_ctx.add(bridge, Token::ResourceBridge { index }) {
error!("failed to add resource bridge to WaitContext: {}", e);
}
}
// TODO(davidriley): The entire main loop processing is somewhat racey and incorrect with
// respect to cursor vs control queue processing. As both currently and originally
// written, while the control queue is only processed/read from after the the cursor queue
// is finished, the entire queue will be processed at that time. The end effect of this
// racyiness is that control queue descriptors that are issued after cursors descriptors
// might be handled first instead of the other way around. In practice, the cursor queue
// isn't used so this isn't a huge issue.
// Declare this outside the loop so we don't keep allocating and freeing the vector.
let mut process_resource_bridge = Vec::with_capacity(self.resource_bridges.len());
'wait: loop {
// If there are outstanding fences, wake up early to poll them.
let duration = if !self.state.fence_descriptors.is_empty() {
Duration::from_millis(FENCE_POLL_MS)
} else {
Duration::new(i64::MAX as u64, 0)
};
let events = match wait_ctx.wait_timeout(duration) {
Ok(v) => v,
Err(e) => {
error!("failed polling for events: {}", e);
break;
}
};
let mut signal_used_cursor = false;
let mut signal_used_ctrl = false;
let mut ctrl_available = false;
// Clear the old values and re-initialize with false.
process_resource_bridge.clear();
process_resource_bridge.resize(self.resource_bridges.len(), false);
// This display isn't typically used when the virt-wl device is available and it can
// lead to hung fds (crbug.com/1027379). Disable if it's hung.
for event in events.iter().filter(|e| e.is_hungup) {
if let Token::Display = event.token {
error!("default display hang-up detected");
let _ = wait_ctx.delete(&*self.state.display().borrow());
}
}
for event in events.iter().filter(|e| e.is_readable) {
match event.token {
Token::CtrlQueue => {
let _ = self.ctrl_evt.read();
// Set flag that control queue is available to be read, but defer reading
// until rest of the events are processed.
ctrl_available = true;
}
Token::CursorQueue => {
let _ = self.cursor_evt.read();
if self.state.process_queue(&self.mem, &mut self.cursor_queue) {
signal_used_cursor = true;
}
}
Token::Display => {
let close_requested = self.state.process_display();
if close_requested {
let _ = self.exit_evt.write(1);
}
}
Token::ResourceBridge { index } => {
process_resource_bridge[index] = true;
}
Token::InterruptResample => {
self.interrupt.interrupt_resample();
}
Token::Kill => {
break 'wait;
}
}
}
// All cursor commands go first because they have higher priority.
while let Some(desc) = self.state.return_cursor() {
self.cursor_queue.add_used(&self.mem, desc.index, desc.len);
signal_used_cursor = true;
}
if ctrl_available && self.state.process_queue(&self.mem, &mut self.ctrl_queue) {
signal_used_ctrl = true;
}
self.state.fence_poll();
while let Some(desc) = self.state.return_ctrl() {
self.ctrl_queue.add_used(&self.mem, desc.index, desc.len);
signal_used_ctrl = true;
}
// Process the entire control queue before the resource bridge in case a resource is
// created or destroyed by the control queue. Processing the resource bridge first may
// lead to a race condition.
// TODO(davidriley): This is still inherently racey if both the control queue request
// and the resource bridge request come in at the same time after the control queue is
// processed above and before the corresponding bridge is processed below.
for (bridge, &should_process) in
self.resource_bridges.iter().zip(&process_resource_bridge)
{
if should_process {
self.state.process_resource_bridge(bridge);
}
}
if signal_used_ctrl {
self.interrupt.signal_used_queue(self.ctrl_queue.vector);
}
if signal_used_cursor {
self.interrupt.signal_used_queue(self.cursor_queue.vector);
}
}
}
}
/// Indicates a backend that should be tried for the gpu to use for display.
///
/// Several instances of this enum are used in an ordered list to give the gpu device many backends
/// to use as fallbacks in case some do not work.
#[derive(Clone)]
pub enum DisplayBackend {
/// Use the wayland backend with the given socket path if given.
Wayland(Option<PathBuf>),
/// Open a connection to the X server at the given display if given.
X(Option<String>),
/// Emulate a display without actually displaying it.
Stub,
}
impl DisplayBackend {
fn build(&self) -> std::result::Result<GpuDisplay, GpuDisplayError> {
match self {
DisplayBackend::Wayland(path) => GpuDisplay::open_wayland(path.as_ref()),
DisplayBackend::X(display) => GpuDisplay::open_x(display.as_ref()),
DisplayBackend::Stub => GpuDisplay::open_stub(),
}
}
}
pub struct Gpu {
exit_evt: Event,
gpu_device_socket: Option<VmMemoryControlRequestSocket>,
resource_bridges: Vec<ResourceResponseSocket>,
event_devices: Vec<EventDevice>,
kill_evt: Option<Event>,
config_event: bool,
worker_thread: Option<thread::JoinHandle<()>>,
num_scanouts: NonZeroU8,
display_backends: Vec<DisplayBackend>,
display_width: u32,
display_height: u32,
renderer_flags: RendererFlags,
pci_bar: Option<Alloc>,
map_request: Arc<Mutex<Option<ExternalMapping>>>,
external_blob: bool,
backend_kind: BackendKind,
base_features: u64,
}
impl Gpu {
pub fn new(
exit_evt: Event,
gpu_device_socket: Option<VmMemoryControlRequestSocket>,
num_scanouts: NonZeroU8,
resource_bridges: Vec<ResourceResponseSocket>,
display_backends: Vec<DisplayBackend>,
gpu_parameters: &GpuParameters,
event_devices: Vec<EventDevice>,
map_request: Arc<Mutex<Option<ExternalMapping>>>,
external_blob: bool,
base_features: u64,
) -> Gpu {
let renderer_flags = RendererFlags::new()
.use_egl(gpu_parameters.renderer_use_egl)
.use_gles(gpu_parameters.renderer_use_gles)
.use_glx(gpu_parameters.renderer_use_glx)
.use_surfaceless(gpu_parameters.renderer_use_surfaceless)
.use_external_blob(external_blob);
#[cfg(feature = "gfxstream")]
let renderer_flags = renderer_flags
.use_guest_angle(gpu_parameters.gfxstream_use_guest_angle)
.use_syncfd(gpu_parameters.gfxstream_use_syncfd)
.support_vulkan(gpu_parameters.gfxstream_support_vulkan);
let backend_kind = match gpu_parameters.mode {
GpuMode::Mode2D => BackendKind::Virtio2D,
GpuMode::Mode3D => BackendKind::Virtio3D,
#[cfg(feature = "gfxstream")]
GpuMode::ModeGfxStream => BackendKind::VirtioGfxStream,
};
Gpu {
exit_evt,
gpu_device_socket,
num_scanouts,
resource_bridges,
event_devices,
config_event: false,
kill_evt: None,
worker_thread: None,
display_backends,
display_width: gpu_parameters.display_width,
display_height: gpu_parameters.display_height,
renderer_flags,
pci_bar: None,
map_request,
external_blob,
backend_kind,
base_features,
}
}
fn get_config(&self) -> virtio_gpu_config {
let mut events_read = 0;
if self.config_event {
events_read |= VIRTIO_GPU_EVENT_DISPLAY;
}
virtio_gpu_config {
events_read: Le32::from(events_read),
events_clear: Le32::from(0),
num_scanouts: Le32::from(self.num_scanouts.get() as u32),
num_capsets: Le32::from(self.backend_kind.capsets()),
}
}
}
impl Drop for Gpu {
fn drop(&mut self) {
if let Some(kill_evt) = self.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.
let _ = kill_evt.write(1);
}
if let Some(worker_thread) = self.worker_thread.take() {
let _ = worker_thread.join();
}
}
}
impl VirtioDevice for Gpu {
fn keep_rds(&self) -> Vec<RawDescriptor> {
let mut keep_rds = Vec::new();
// TODO(davidriley): Remove once virgl has another path to include
// debugging logs.
if cfg!(debug_assertions) {
keep_rds.push(libc::STDOUT_FILENO);
keep_rds.push(libc::STDERR_FILENO);
}
if let Some(ref gpu_device_socket) = self.gpu_device_socket {
keep_rds.push(gpu_device_socket.as_raw_descriptor());
}
keep_rds.push(self.exit_evt.as_raw_descriptor());
for bridge in &self.resource_bridges {
keep_rds.push(bridge.as_raw_descriptor());
}
keep_rds
}
fn device_type(&self) -> u32 {
TYPE_GPU
}
fn queue_max_sizes(&self) -> &[u16] {
QUEUE_SIZES
}
fn features(&self) -> u64 {
self.base_features | self.backend_kind.features()
}
fn ack_features(&mut self, value: u64) {
let _ = value;
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
copy_config(data, 0, self.get_config().as_slice(), offset);
}
fn write_config(&mut self, offset: u64, data: &[u8]) {
let mut cfg = self.get_config();
copy_config(cfg.as_mut_slice(), offset, data, 0);
if (cfg.events_clear.to_native() & VIRTIO_GPU_EVENT_DISPLAY) != 0 {
self.config_event = false;
}
}
fn activate(
&mut self,
mem: GuestMemory,
interrupt: Interrupt,
mut queues: Vec<Queue>,
mut queue_evts: Vec<Event>,
) {
if queues.len() != QUEUE_SIZES.len() || queue_evts.len() != QUEUE_SIZES.len() {
return;
}
let exit_evt = match self.exit_evt.try_clone() {
Ok(e) => e,
Err(e) => {
error!("error cloning exit event: {}", e);
return;
}
};
let (self_kill_evt, kill_evt) = match Event::new().and_then(|e| Ok((e.try_clone()?, e))) {
Ok(v) => v,
Err(e) => {
error!("error creating kill Event pair: {}", e);
return;
}
};
self.kill_evt = Some(self_kill_evt);
let resource_bridges = mem::replace(&mut self.resource_bridges, Vec::new());
let backend_kind = self.backend_kind.clone();
let ctrl_queue = queues.remove(0);
let ctrl_evt = queue_evts.remove(0);
let cursor_queue = queues.remove(0);
let cursor_evt = queue_evts.remove(0);
let display_backends = self.display_backends.clone();
let display_width = self.display_width;
let display_height = self.display_height;
let renderer_flags = self.renderer_flags;
let event_devices = self.event_devices.split_off(0);
let map_request = Arc::clone(&self.map_request);
let external_blob = self.external_blob;
if let (Some(gpu_device_socket), Some(pci_bar)) =
(self.gpu_device_socket.take(), self.pci_bar.take())
{
let worker_result =
thread::Builder::new()
.name("virtio_gpu".to_string())
.spawn(move || {
let backend = match backend_kind.build(
&display_backends,
display_width,
display_height,
renderer_flags,
event_devices,
gpu_device_socket,
pci_bar,
map_request,
external_blob,
) {
Some(backend) => backend,
None => return,
};
Worker {
interrupt,
exit_evt,
mem,
ctrl_queue,
ctrl_evt,
cursor_queue,
cursor_evt,
resource_bridges,
kill_evt,
state: Frontend::new(backend),
}
.run()
});
match worker_result {
Err(e) => {
error!("failed to spawn virtio_gpu worker: {}", e);
return;
}
Ok(join_handle) => {
self.worker_thread = Some(join_handle);
}
}
}
}
// Require 1 BAR for mapping 3D buffers
fn get_device_bars(&mut self, address: PciAddress) -> Vec<PciBarConfiguration> {
self.pci_bar = Some(Alloc::PciBar {
bus: address.bus,
dev: address.dev,
func: address.func,
bar: GPU_BAR_NUM,
});
vec![PciBarConfiguration::new(
GPU_BAR_NUM as usize,
GPU_BAR_SIZE,
PciBarRegionType::Memory64BitRegion,
PciBarPrefetchable::NotPrefetchable,
)]
}
fn get_device_caps(&self) -> Vec<Box<dyn PciCapability>> {
vec![Box::new(VirtioPciShmCap::new(
PciCapabilityType::SharedMemoryConfig,
GPU_BAR_NUM,
GPU_BAR_OFFSET,
GPU_BAR_SIZE,
VIRTIO_GPU_SHM_ID_HOST_VISIBLE,
))]
}
}
|
//! Glyph caching
use { freetype, graphics, Texture, TextureSettings };
use std::collections::HashMap;
use graphics::types::Scalar;
extern crate fnv;
use self::fnv::FnvHasher;
use std::hash::BuildHasherDefault;
use std::path::Path;
use error::Error;
pub use graphics::types::FontSize;
/// The type alias for font characters.
pub type Character<'a> = graphics::character::Character<'a, Texture>;
/// A struct used for caching rendered font.
pub struct GlyphCache<'a> {
/// The font face.
pub face: freetype::Face<'a>,
// Maps from fontsize and character to offset, size and texture.
data: HashMap<(FontSize, char),
([Scalar; 2], [Scalar; 2], Texture),
BuildHasherDefault<FnvHasher>>,
}
impl<'a> GlyphCache<'a> {
/// Constructor for a GlyphCache.
pub fn new(font: &Path) -> Result<GlyphCache<'static>, Error> {
let freetype = match freetype::Library::init() {
Ok(freetype) => freetype,
Err(why) => return Err(Error::FreetypeError(why)),
};
let face = match freetype.new_face(font, 0) {
Ok(face) => face,
Err(why) => return Err(Error::FreetypeError(why)),
};
Ok(GlyphCache {
face: face,
data: HashMap::with_hasher(BuildHasherDefault::<FnvHasher>::default()),
})
}
/// Creates a GlyphCache for a font stored in memory.
pub fn from_bytes(font: &'a [u8]) -> Result<GlyphCache<'a>, Error> {
let freetype = match freetype::Library::init() {
Ok(freetype) => freetype,
Err(why) => return Err(Error::FreetypeError(why))
};
let face = match freetype.new_memory_face(font, 0) {
Ok(face) => face,
Err(why) => return Err(Error::FreetypeError(why))
};
Ok(GlyphCache {
face: face,
data: HashMap::with_hasher(BuildHasherDefault::<FnvHasher>::default())
})
}
/// Get a `Character` from cache, or load it if not there.
fn get(&mut self, size: FontSize, ch: char)
-> &([Scalar; 2], [Scalar; 2], Texture) {
// Create a `Character` from a given `FontSize` and `char`.
fn create_character(face: &freetype::Face, size: FontSize, ch: char)
-> ([Scalar; 2], [Scalar; 2], Texture) {
face.set_pixel_sizes(0, size).unwrap();
face.load_char(ch as usize, freetype::face::DEFAULT).unwrap();
let glyph = face.glyph().get_glyph().unwrap();
let bitmap_glyph = glyph.to_bitmap(freetype::render_mode::RenderMode::Normal, None)
.unwrap();
let bitmap = bitmap_glyph.bitmap();
let texture = Texture::from_memory_alpha(bitmap.buffer(),
bitmap.width() as u32,
bitmap.rows() as u32,
&TextureSettings::new()).unwrap();
(
[bitmap_glyph.left() as f64, bitmap_glyph.top() as f64],
[(glyph.advance_x() >> 16) as f64, (glyph.advance_y() >> 16) as f64],
texture,
)
}
let face = &self.face;// necessary to borrow-check
self.data.entry((size, ch))
.or_insert_with(|| create_character(face, size, ch) )
}
/// Load all characters in the `chars` iterator for `size`
pub fn preload_chars<I>(
&mut self,
size: FontSize,
chars: I
)
where
I: Iterator<Item = char>
{
for ch in chars {
self.get(size, ch);
}
}
/// Load all the printable ASCII characters for `size`. Includes space.
pub fn preload_printable_ascii(&mut self, size: FontSize) {
// [0x20, 0x7F) contains all printable ASCII characters ([' ', '~'])
self.preload_chars(size, (0x20u8 .. 0x7F).map(|ch| ch as char));
}
/// Return `ch` for `size` if it's already cached. Don't load.
/// See the `preload_*` functions.
pub fn opt_character(&self, size: FontSize, ch: char) -> Option<Character> {
self.data.get(&(size, ch)).map(|&(offset, size, ref texture)| {
Character {
offset: offset,
size: size,
texture: texture
}
})
}
}
impl<'b> graphics::character::CharacterCache for GlyphCache<'b> {
type Texture = Texture;
fn character<'a>(&'a mut self, size: FontSize, ch: char) -> Character<'a> {
let &(offset, size, ref texture) = self.get(size, ch);
return Character {
offset: offset,
size: size,
texture: texture
}
}
}
Rewrite GlyphCache constructors to use functional style
Saves a few lines.
Cannot try! since Error doesn't implement From<freetype::error::Error>.
(and implementing it would take more lines than would be saved.)
//! Glyph caching
use { freetype, graphics, Texture, TextureSettings };
use std::collections::HashMap;
use graphics::types::Scalar;
extern crate fnv;
use self::fnv::FnvHasher;
use std::hash::BuildHasherDefault;
use std::path::Path;
use error::Error;
pub use graphics::types::FontSize;
/// The type alias for font characters.
pub type Character<'a> = graphics::character::Character<'a, Texture>;
/// A struct used for caching rendered font.
pub struct GlyphCache<'a> {
/// The font face.
pub face: freetype::Face<'a>,
// Maps from fontsize and character to offset, size and texture.
data: HashMap<(FontSize, char),
([Scalar; 2], [Scalar; 2], Texture),
BuildHasherDefault<FnvHasher>>,
}
impl<'a> GlyphCache<'a> {
/// Constructor for a GlyphCache.
pub fn new(font: &Path) -> Result<GlyphCache<'static>, Error> {
let fnv = BuildHasherDefault::<FnvHasher>::default();
freetype::Library::init()
.and_then(|freetype| freetype.new_face(font, 0) )
.map_err( Error::FreetypeError )
.map(|face| GlyphCache {
face: face,
data: HashMap::with_hasher(fnv),
} )
}
/// Creates a GlyphCache for a font stored in memory.
pub fn from_bytes(font: &'a [u8]) -> Result<GlyphCache<'a>, Error> {
let fnv = BuildHasherDefault::<FnvHasher>::default();
freetype::Library::init()
.and_then(|freetype| freetype.new_memory_face(font, 0) )
.map_err( Error::FreetypeError )
.map(|face| GlyphCache {
face: face,
data: HashMap::with_hasher(fnv),
} )
}
/// Get a `Character` from cache, or load it if not there.
fn get(&mut self, size: FontSize, ch: char)
-> &([Scalar; 2], [Scalar; 2], Texture) {
// Create a `Character` from a given `FontSize` and `char`.
fn create_character(face: &freetype::Face, size: FontSize, ch: char)
-> ([Scalar; 2], [Scalar; 2], Texture) {
face.set_pixel_sizes(0, size).unwrap();
face.load_char(ch as usize, freetype::face::DEFAULT).unwrap();
let glyph = face.glyph().get_glyph().unwrap();
let bitmap_glyph = glyph.to_bitmap(freetype::render_mode::RenderMode::Normal, None)
.unwrap();
let bitmap = bitmap_glyph.bitmap();
let texture = Texture::from_memory_alpha(bitmap.buffer(),
bitmap.width() as u32,
bitmap.rows() as u32,
&TextureSettings::new()).unwrap();
(
[bitmap_glyph.left() as f64, bitmap_glyph.top() as f64],
[(glyph.advance_x() >> 16) as f64, (glyph.advance_y() >> 16) as f64],
texture,
)
}
let face = &self.face;// necessary to borrow-check
self.data.entry((size, ch))
.or_insert_with(|| create_character(face, size, ch) )
}
/// Load all characters in the `chars` iterator for `size`
pub fn preload_chars<I>(
&mut self,
size: FontSize,
chars: I
)
where
I: Iterator<Item = char>
{
for ch in chars {
self.get(size, ch);
}
}
/// Load all the printable ASCII characters for `size`. Includes space.
pub fn preload_printable_ascii(&mut self, size: FontSize) {
// [0x20, 0x7F) contains all printable ASCII characters ([' ', '~'])
self.preload_chars(size, (0x20u8 .. 0x7F).map(|ch| ch as char));
}
/// Return `ch` for `size` if it's already cached. Don't load.
/// See the `preload_*` functions.
pub fn opt_character(&self, size: FontSize, ch: char) -> Option<Character> {
self.data.get(&(size, ch)).map(|&(offset, size, ref texture)| {
Character {
offset: offset,
size: size,
texture: texture
}
})
}
}
impl<'b> graphics::character::CharacterCache for GlyphCache<'b> {
type Texture = Texture;
fn character<'a>(&'a mut self, size: FontSize, ch: char) -> Character<'a> {
let &(offset, size, ref texture) = self.get(size, ch);
return Character {
offset: offset,
size: size,
texture: texture
}
}
}
|
use std::path::PathBuf;
use super::Algorithm;
use std::fmt::Write;
macro_rules! hash_func {
($ctx:expr, $update:expr, $convert:expr) => {
pub fn hash(path: &PathBuf) -> String {
let mut file = File::open(path).unwrap();
let mut buffer = vec![0; 4096];
let mut ctx = $ctx;
loop {
let read = file.read(&mut buffer[..]).unwrap();
if read == 0 {
break;
}
$update(&mut ctx, &buffer, read);
}
$convert(ctx)
}
}
}
mod md5;
mod xor8;
mod crc8;
mod crc16;
mod blake;
mod blake2;
mod crc32_64;
mod sha3256_3512;
mod sha1_2256_2512;
mod md6128_256_512;
/// Hash the specified file using the specified hashing algorithm.
pub fn hash_file(path: &PathBuf, algo: Algorithm) -> String {
match algo {
Algorithm::SHA1 => sha1_2256_2512::sha1::hash(path),
Algorithm::SHA2256 => sha1_2256_2512::sha2256::hash(path),
Algorithm::SHA2512 => sha1_2256_2512::sha2512::hash(path),
Algorithm::SHA3256 => sha3256_3512::sha3256::hash(path),
Algorithm::SHA3512 => sha3256_3512::sha3512::hash(path),
Algorithm::BLAKE => blake::hash(path),
Algorithm::BLAKE2 => blake2::hash(path),
Algorithm::CRC64 => crc32_64::crc64::hash(path),
Algorithm::CRC32 => crc32_64::crc32::hash(path),
Algorithm::CRC16 => crc16::hash(path),
Algorithm::CRC8 => crc8::hash(path),
Algorithm::MD5 => md5::hash(path),
Algorithm::MD6128 => md6128_256_512::md6128::hash(path),
Algorithm::MD6256 => md6128_256_512::md6256::hash(path),
Algorithm::MD6512 => md6128_256_512::md6512::hash(path),
Algorithm::XOR8 => xor8::hash(path),
}
}
/// Create a hash string out of its raw bytes.
///
/// # Examples
///
/// ```
/// assert_eq!(checksums::hash_string(&[0x99, 0xAA, 0xBB, 0xCC]), "99AABBCC".to_string());
/// assert_eq!(checksums::hash_string(&[0x09, 0x0A]), "090A".to_string());
/// ```
pub fn hash_string(bytes: &[u8]) -> String {
let mut result = String::with_capacity(bytes.len() * 2);
for b in bytes {
write!(result, "{:02X}", b).unwrap();
}
result
}
Reordering
macro_rules! hash_func {
($ctx:expr, $update:expr, $convert:expr) => {
pub fn hash(path: &PathBuf) -> String {
let mut file = File::open(path).unwrap();
let mut buffer = vec![0; 4096];
let mut ctx = $ctx;
loop {
let read = file.read(&mut buffer[..]).unwrap();
if read == 0 {
break;
}
$update(&mut ctx, &buffer, read);
}
$convert(ctx)
}
}
}
use std::path::PathBuf;
use super::Algorithm;
use std::fmt::Write;
mod md5;
mod xor8;
mod crc8;
mod crc16;
mod blake;
mod blake2;
mod crc32_64;
mod sha3256_3512;
mod sha1_2256_2512;
mod md6128_256_512;
/// Hash the specified file using the specified hashing algorithm.
pub fn hash_file(path: &PathBuf, algo: Algorithm) -> String {
match algo {
Algorithm::SHA1 => sha1_2256_2512::sha1::hash(path),
Algorithm::SHA2256 => sha1_2256_2512::sha2256::hash(path),
Algorithm::SHA2512 => sha1_2256_2512::sha2512::hash(path),
Algorithm::SHA3256 => sha3256_3512::sha3256::hash(path),
Algorithm::SHA3512 => sha3256_3512::sha3512::hash(path),
Algorithm::BLAKE => blake::hash(path),
Algorithm::BLAKE2 => blake2::hash(path),
Algorithm::CRC64 => crc32_64::crc64::hash(path),
Algorithm::CRC32 => crc32_64::crc32::hash(path),
Algorithm::CRC16 => crc16::hash(path),
Algorithm::CRC8 => crc8::hash(path),
Algorithm::MD5 => md5::hash(path),
Algorithm::MD6128 => md6128_256_512::md6128::hash(path),
Algorithm::MD6256 => md6128_256_512::md6256::hash(path),
Algorithm::MD6512 => md6128_256_512::md6512::hash(path),
Algorithm::XOR8 => xor8::hash(path),
}
}
/// Create a hash string out of its raw bytes.
///
/// # Examples
///
/// ```
/// assert_eq!(checksums::hash_string(&[0x99, 0xAA, 0xBB, 0xCC]), "99AABBCC".to_string());
/// assert_eq!(checksums::hash_string(&[0x09, 0x0A]), "090A".to_string());
/// ```
pub fn hash_string(bytes: &[u8]) -> String {
let mut result = String::with_capacity(bytes.len() * 2);
for b in bytes {
write!(result, "{:02X}", b).unwrap();
}
result
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.